This is page 2 of 4. Use http://codebase.md/rashidazarang/airtable-mcp?page={x} to view the full context.
# Directory Structure
```
├── .eslintrc.js
├── .github
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── custom.md
│ │ └── feature_request.md
│ └── pull_request_template.md
├── .gitignore
├── .nvmrc
├── .prettierrc
├── bin
│ ├── airtable-crud-cli.js
│ └── airtable-mcp.js
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docker
│ ├── Dockerfile
│ └── Dockerfile.node
├── docs
│ ├── guides
│ │ ├── CLAUDE_INTEGRATION.md
│ │ ├── ENHANCED_FEATURES.md
│ │ ├── INSTALLATION.md
│ │ └── QUICK_START.md
│ └── releases
│ ├── RELEASE_NOTES_v1.2.2.md
│ ├── RELEASE_NOTES_v1.2.4.md
│ ├── RELEASE_NOTES_v1.4.0.md
│ ├── RELEASE_NOTES_v1.5.0.md
│ └── RELEASE_NOTES_v1.6.0.md
├── examples
│ ├── airtable-crud-example.js
│ ├── building-mcp.md
│ ├── claude_config.json
│ ├── claude_simple_config.json
│ ├── env-demo.js
│ ├── example_usage.md
│ ├── example-tasks-update.json
│ ├── example-tasks.json
│ ├── python_debug_patch.txt
│ ├── sample-transform.js
│ ├── typescript
│ │ ├── advanced-ai-prompts.ts
│ │ ├── basic-usage.ts
│ │ └── claude-desktop-config.json
│ └── windsurf_mcp_config.json
├── index.js
├── ISSUE_RESPONSES.md
├── jest.config.js
├── LICENSE
├── package-lock.json
├── package.json
├── PROJECT_STRUCTURE.md
├── README.md
├── RELEASE_SUMMARY_v3.2.x.md
├── RELEASE_v3.2.1.md
├── RELEASE_v3.2.3.md
├── RELEASE_v3.2.4.md
├── requirements.txt
├── SECURITY_NOTICE.md
├── smithery.yaml
├── src
│ ├── index.js
│ ├── javascript
│ │ ├── airtable_simple_production.js
│ │ └── airtable_simple.js
│ ├── python
│ │ ├── airtable_mcp
│ │ │ ├── __init__.py
│ │ │ └── src
│ │ │ └── server.py
│ │ ├── inspector_server.py
│ │ ├── inspector.py
│ │ ├── setup.py
│ │ ├── simple_airtable_server.py
│ │ └── test_client.py
│ └── typescript
│ ├── ai-prompts.d.ts
│ ├── airtable-mcp-server.d.ts
│ ├── airtable-mcp-server.ts
│ ├── app
│ │ ├── airtable-client.ts
│ │ ├── config.ts
│ │ ├── context.ts
│ │ ├── exceptions.ts
│ │ ├── governance.ts
│ │ ├── logger.ts
│ │ ├── rateLimiter.ts
│ │ ├── tools
│ │ │ ├── create.ts
│ │ │ ├── describe.ts
│ │ │ ├── handleError.ts
│ │ │ ├── index.ts
│ │ │ ├── listBases.ts
│ │ │ ├── listExceptions.ts
│ │ │ ├── listGovernance.ts
│ │ │ ├── query.ts
│ │ │ ├── update.ts
│ │ │ ├── upsert.ts
│ │ │ └── webhooks.ts
│ │ └── types.ts
│ ├── errors.ts
│ ├── index.d.ts
│ ├── index.ts
│ ├── prompt-templates.ts
│ ├── tools-schemas.ts
│ └── tools.d.ts
├── TESTING_REPORT.md
├── tests
│ ├── test_all_features.sh
│ ├── test_mcp_comprehensive.js
│ ├── test_v1.5.0_final.sh
│ └── test_v1.6.0_comprehensive.sh
├── tsconfig.json
└── types
└── typescript
├── airtable-mcp-server.d.ts
├── app
│ ├── airtable-client.d.ts
│ ├── config.d.ts
│ ├── context.d.ts
│ ├── exceptions.d.ts
│ ├── governance.d.ts
│ ├── logger.d.ts
│ ├── rateLimiter.d.ts
│ ├── tools
│ │ ├── create.d.ts
│ │ ├── describe.d.ts
│ │ ├── handleError.d.ts
│ │ ├── index.d.ts
│ │ ├── listBases.d.ts
│ │ ├── listExceptions.d.ts
│ │ ├── listGovernance.d.ts
│ │ ├── query.d.ts
│ │ ├── update.d.ts
│ │ ├── upsert.d.ts
│ │ └── webhooks.d.ts
│ └── types.d.ts
├── errors.d.ts
├── index.d.ts
├── prompt-templates.d.ts
├── test-suite.d.ts
└── tools-schemas.d.ts
```
# Files
--------------------------------------------------------------------------------
/examples/env-demo.js:
--------------------------------------------------------------------------------
```javascript
#!/usr/bin/env node
/**
* Demo script that uses the AIRTABLE_BASE_ID from the .env file
* Demonstrates various operations with the Airtable API
*/
require('dotenv').config();
const baseUtils = require('../tools/airtable-base');
const crudUtils = require('../tools/airtable-crud');
const schemaUtils = require('../tools/airtable-schema');
// Constants
const DEMO_TABLE_NAME = 'ENV Demo Table';
const SAMPLE_RECORDS = [
{ Name: 'Record from ENV Demo', Description: 'Created using AIRTABLE_BASE_ID from .env file', Status: 'Active' },
{ Name: 'Another ENV Record', Description: 'Second record from the environment demo', Status: 'Pending' }
];
async function runDemo() {
console.log('=================================');
console.log(' AIRTABLE ENV DEMO SCRIPT ');
console.log('=================================');
// Check environment variables
if (!process.env.AIRTABLE_PERSONAL_ACCESS_TOKEN) {
console.error('❌ Error: AIRTABLE_PERSONAL_ACCESS_TOKEN is not set in .env file');
process.exit(1);
}
if (!process.env.AIRTABLE_BASE_ID) {
console.error('❌ Error: AIRTABLE_BASE_ID is not set in .env file');
process.exit(1);
}
const baseId = process.env.AIRTABLE_BASE_ID;
console.log(`✅ Using AIRTABLE_BASE_ID: ${baseId}`);
try {
// Step 1: Verify base access
console.log('\nStep 1: Verifying access to the base...');
const baseAccess = await baseUtils.checkBaseAccess(baseId);
if (!baseAccess.accessible) {
console.error(`❌ Error: Cannot access base with ID ${baseId}`);
console.error(` Reason: ${baseAccess.error}`);
process.exit(1);
}
console.log(`✅ Access confirmed to base: ${baseAccess.name}`);
// Step 2: List existing tables
console.log('\nStep 2: Listing existing tables...');
const tables = await baseUtils.listTables(baseId);
console.log(`✅ Found ${tables.length} tables in the base`);
// Step 3: Check if our demo table exists
console.log('\nStep 3: Checking if demo table exists...');
const demoTableExists = await crudUtils.tableExists(baseId, DEMO_TABLE_NAME);
if (demoTableExists) {
console.log(`✅ Demo table "${DEMO_TABLE_NAME}" already exists`);
} else {
console.log(`ℹ️ Demo table "${DEMO_TABLE_NAME}" does not exist, creating it...`);
// Step 4: Create the demo table
console.log('\nStep 4: Creating the demo table...');
const tableConfig = {
name: DEMO_TABLE_NAME,
description: 'Table created from the Environment Demo script',
fields: [
{
name: 'Name',
type: 'singleLineText',
description: 'Record name'
},
{
name: 'Description',
type: 'multilineText',
description: 'Record description'
},
{
name: 'Status',
type: 'singleSelect',
options: {
choices: [
{ name: 'Active' },
{ name: 'Pending' },
{ name: 'Completed' }
]
},
description: 'Current status'
},
{
name: 'Created',
type: 'date',
options: {
dateFormat: {
name: 'local'
}
},
description: 'Creation date'
}
]
};
await schemaUtils.createTable(baseId, tableConfig);
console.log(`✅ Created demo table: ${DEMO_TABLE_NAME}`);
}
// Step 5: Create sample records
console.log('\nStep 5: Creating sample records...');
// Add today's date to all records
const recordsWithDate = SAMPLE_RECORDS.map(record => ({
...record,
Created: new Date().toISOString().split('T')[0] // Format as YYYY-MM-DD
}));
const createdRecords = await crudUtils.createRecords(baseId, DEMO_TABLE_NAME, recordsWithDate);
console.log(`✅ Created ${createdRecords.length} sample records`);
// Step 6: Read records back
console.log('\nStep 6: Reading records from the table...');
const records = await crudUtils.readRecords(baseId, DEMO_TABLE_NAME, 100);
console.log(`✅ Read ${records.length} records from the table`);
console.log('\nSample record:');
console.log(JSON.stringify(records[0], null, 2));
// Step 7: Update a record
console.log('\nStep 7: Updating the first record...');
const recordToUpdate = {
id: createdRecords[0].id,
fields: {
Description: createdRecords[0].Description + ' (UPDATED)',
Status: 'Completed'
}
};
const updatedRecords = await crudUtils.updateRecords(baseId, DEMO_TABLE_NAME, [recordToUpdate]);
console.log(`✅ Updated ${updatedRecords.length} record`);
// Step 8: Get the updated record
console.log('\nStep 8: Getting the updated record...');
const updatedRecord = await crudUtils.getRecord(baseId, DEMO_TABLE_NAME, createdRecords[0].id);
console.log('Updated record:');
console.log(JSON.stringify(updatedRecord, null, 2));
// Step 9: Demonstrate filtering records
console.log('\nStep 9: Filtering records by status...');
const completedRecords = await crudUtils.readRecords(baseId, DEMO_TABLE_NAME, 100, 'Status="Completed"');
console.log(`✅ Found ${completedRecords.length} records with Status="Completed"`);
console.log('\n=================================');
console.log(' ENV DEMO COMPLETED ');
console.log('=================================');
console.log('\nThis script demonstrated:');
console.log('1. Loading environment variables from .env file');
console.log('2. Accessing an Airtable base using AIRTABLE_BASE_ID');
console.log('3. Creating a table (if it doesn\'t exist)');
console.log('4. Creating, reading, and updating records');
console.log('5. Filtering records using Airtable formulas');
console.log('\nAll operations used the AIRTABLE_BASE_ID environment variable');
} catch (error) {
console.error(`❌ Error: ${error.message}`);
process.exit(1);
}
}
// Run the demo
runDemo();
```
--------------------------------------------------------------------------------
/src/typescript/app/tools/describe.ts:
--------------------------------------------------------------------------------
```typescript
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp';
import {
DescribeInput,
DescribeOutput,
describeInputSchema,
describeInputShape,
describeOutputSchema
} from '../types';
import { AppContext } from '../context';
import { GovernanceError, NotFoundError } from '../../errors';
import { handleToolError } from './handleError';
type DescribeTableEntry = NonNullable<DescribeOutput['tables']>[number];
type DescribeFieldEntry = NonNullable<DescribeTableEntry['fields']>[number];
type DescribeViewEntry = NonNullable<DescribeTableEntry['views']>[number];
function normalizeField(raw: unknown): DescribeFieldEntry {
const source = raw as Record<string, unknown>;
const field: DescribeFieldEntry = {
id: String(source?.id ?? ''),
name: String(source?.name ?? ''),
type: String(source?.type ?? '')
};
if (source?.description && typeof source.description === 'string') {
field.description = source.description;
}
if (source?.options && typeof source.options === 'object') {
field.options = source.options as Record<string, unknown>;
}
return field;
}
function normalizeView(raw: unknown): DescribeViewEntry {
const source = raw as Record<string, unknown>;
const view: DescribeViewEntry = {
id: String(source?.id ?? ''),
name: String(source?.name ?? '')
};
if (source?.type && typeof source.type === 'string') {
view.type = source.type;
}
return view;
}
function normalizeTable(
raw: unknown,
{ includeFields, includeViews }: { includeFields: boolean; includeViews: boolean }
): DescribeTableEntry {
const source = raw as Record<string, unknown>;
const table: DescribeTableEntry = {
id: String(source?.id ?? ''),
name: String(source?.name ?? '')
};
if (source?.primaryFieldId && typeof source.primaryFieldId === 'string') {
table.primaryFieldId = source.primaryFieldId;
}
if (includeFields && Array.isArray(source?.fields)) {
table.fields = (source.fields as unknown[]).map((field) => normalizeField(field));
}
if (includeViews && Array.isArray(source?.views)) {
table.views = (source.views as unknown[]).map((view) => normalizeView(view));
}
return table;
}
export function registerDescribeTool(server: McpServer, ctx: AppContext): void {
server.registerTool(
'describe',
{
description: 'Describe Airtable base or table schema.',
inputSchema: describeInputShape,
outputSchema: describeOutputSchema.shape
},
async (args: DescribeInput, _extra: unknown) => {
try {
const input = describeInputSchema.parse(args);
ctx.governance.ensureOperationAllowed('describe');
ctx.governance.ensureBaseAllowed(input.baseId);
const includeFields = input.includeFields ?? true;
const includeViews = input.includeViews ?? false;
const logger = ctx.logger.child({
tool: 'describe',
baseId: input.baseId,
scope: input.scope
});
const [baseInfo, tableInfo] = await Promise.all([
ctx.airtable.getBase(input.baseId),
ctx.airtable.listTables(input.baseId)
]);
const baseName =
typeof (baseInfo as any)?.name === 'string'
? String((baseInfo as any).name)
: input.baseId;
const rawTables: unknown[] = Array.isArray((tableInfo as any)?.tables)
? ((tableInfo as any).tables as unknown[])
: [];
const tables: DescribeTableEntry[] = rawTables
.filter((rawTable: unknown) => {
const record = rawTable as Record<string, unknown>;
const tableId = typeof record.id === 'string' ? record.id : '';
const tableName = typeof record.name === 'string' ? record.name : '';
const idAllowed = tableId
? ctx.governance.isTableAllowed(input.baseId, tableId)
: false;
const nameAllowed = tableName
? ctx.governance.isTableAllowed(input.baseId, tableName)
: false;
return idAllowed || nameAllowed;
})
.map((table: unknown) => normalizeTable(table, { includeFields, includeViews }));
let selectedTables: DescribeTableEntry[] = tables;
if (input.scope === 'table') {
const target = tables.find(
(tableRecord) =>
String(tableRecord.id) === input.table ||
String(tableRecord.name).toLowerCase() === input.table?.toLowerCase()
);
if (!target) {
const context: Record<string, string> = { baseId: input.baseId };
if (input.table) {
context.table = input.table;
}
throw new NotFoundError(`Table ${input.table} not found in base ${input.baseId}`, {
context
});
}
const targetId = String(target.id);
const targetName = String(target.name);
if (
!ctx.governance.isTableAllowed(input.baseId, targetId) &&
!ctx.governance.isTableAllowed(input.baseId, targetName)
) {
const context: Record<string, string> = { baseId: input.baseId };
if (input.table) {
context.table = input.table;
}
throw new GovernanceError(`Table ${input.table} is not allowed in base ${input.baseId}`, {
context
});
}
selectedTables = [target];
}
const structuredContent: DescribeOutput = {
base: {
id: input.baseId,
name: baseName
},
tables: selectedTables
};
if (input.scope === 'base' && includeViews) {
structuredContent.views = rawTables
.flatMap((table: unknown) => {
const record = table as Record<string, unknown>;
return Array.isArray(record.views) ? (record.views as unknown[]) : [];
})
.map((view: unknown) => normalizeView(view));
}
logger.debug('Describe completed', {
tableCount: selectedTables.length
});
return {
structuredContent,
content: [] as const
};
} catch (error) {
return handleToolError('describe', error, ctx);
}
}
);
}
```
--------------------------------------------------------------------------------
/tests/test_all_features.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
echo "🎯 COMPREHENSIVE TEST - AIRTABLE MCP v1.4.0"
echo "==========================================="
echo ""
PASSED=0
FAILED=0
TOTAL=0
# Test function
test_feature() {
local name=$1
local result=$2
((TOTAL++))
if [ "$result" = "PASS" ]; then
echo "✅ $name"
((PASSED++))
else
echo "❌ $name"
((FAILED++))
fi
}
echo "📊 TESTING ALL 12 TOOLS"
echo "======================="
echo ""
# 1. List tables
result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \
-d '{"jsonrpc": "2.0", "id": 1, "method": "tools/call", "params": {"name": "list_tables"}}')
if [[ "$result" == *"table"* ]]; then
test_feature "list_tables" "PASS"
else
test_feature "list_tables" "FAIL"
fi
# 2. Create record
result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \
-d '{"jsonrpc": "2.0", "id": 2, "method": "tools/call", "params": {"name": "create_record", "arguments": {"table": "tblH7TnJxYpNqhQYK", "fields": {"Name": "Final Test", "Status": "Active"}}}}')
if [[ "$result" == *"Successfully created"* ]]; then
test_feature "create_record" "PASS"
RECORD_ID=$(echo "$result" | grep -o 'rec[a-zA-Z0-9]\{10,20\}' | head -1)
else
test_feature "create_record" "FAIL"
RECORD_ID=""
fi
# 3. Get record
if [ ! -z "$RECORD_ID" ]; then
result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \
-d "{\"jsonrpc\": \"2.0\", \"id\": 3, \"method\": \"tools/call\", \"params\": {\"name\": \"get_record\", \"arguments\": {\"table\": \"tblH7TnJxYpNqhQYK\", \"recordId\": \"$RECORD_ID\"}}}")
[[ "$result" == *"Record $RECORD_ID"* ]] && test_feature "get_record" "PASS" || test_feature "get_record" "FAIL"
else
test_feature "get_record" "SKIP"
fi
# 4. Update record
if [ ! -z "$RECORD_ID" ]; then
result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \
-d "{\"jsonrpc\": \"2.0\", \"id\": 4, \"method\": \"tools/call\", \"params\": {\"name\": \"update_record\", \"arguments\": {\"table\": \"tblH7TnJxYpNqhQYK\", \"recordId\": \"$RECORD_ID\", \"fields\": {\"Status\": \"Completed\"}}}}")
[[ "$result" == *"Successfully updated"* ]] && test_feature "update_record" "PASS" || test_feature "update_record" "FAIL"
else
test_feature "update_record" "SKIP"
fi
# 5. List records
result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \
-d '{"jsonrpc": "2.0", "id": 5, "method": "tools/call", "params": {"name": "list_records", "arguments": {"table": "tblH7TnJxYpNqhQYK", "maxRecords": 3}}}')
[[ "$result" == *"record"* ]] && test_feature "list_records" "PASS" || test_feature "list_records" "FAIL"
# 6. Search records
result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \
-d '{"jsonrpc": "2.0", "id": 6, "method": "tools/call", "params": {"name": "search_records", "arguments": {"table": "tblH7TnJxYpNqhQYK", "maxRecords": 3}}}')
[[ "$result" == *"record"* ]] && test_feature "search_records" "PASS" || test_feature "search_records" "FAIL"
# 7. Delete record
if [ ! -z "$RECORD_ID" ]; then
result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \
-d "{\"jsonrpc\": \"2.0\", \"id\": 7, \"method\": \"tools/call\", \"params\": {\"name\": \"delete_record\", \"arguments\": {\"table\": \"tblH7TnJxYpNqhQYK\", \"recordId\": \"$RECORD_ID\"}}}")
[[ "$result" == *"Successfully deleted"* ]] && test_feature "delete_record" "PASS" || test_feature "delete_record" "FAIL"
else
test_feature "delete_record" "SKIP"
fi
# 8. List webhooks
result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \
-d '{"jsonrpc": "2.0", "id": 8, "method": "tools/call", "params": {"name": "list_webhooks"}}')
[[ "$result" == *"webhook"* ]] && test_feature "list_webhooks" "PASS" || test_feature "list_webhooks" "FAIL"
# 9. Create webhook
result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \
-d '{"jsonrpc": "2.0", "id": 9, "method": "tools/call", "params": {"name": "create_webhook", "arguments": {"notificationUrl": "https://webhook.site/test-final"}}}')
if [[ "$result" == *"Successfully created"* ]]; then
test_feature "create_webhook" "PASS"
WEBHOOK_ID=$(echo "$result" | grep -o 'ach[a-zA-Z0-9]*' | head -1)
else
test_feature "create_webhook" "FAIL"
WEBHOOK_ID=""
fi
# 10. Get webhook payloads
if [ ! -z "$WEBHOOK_ID" ]; then
result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \
-d "{\"jsonrpc\": \"2.0\", \"id\": 10, \"method\": \"tools/call\", \"params\": {\"name\": \"get_webhook_payloads\", \"arguments\": {\"webhookId\": \"$WEBHOOK_ID\"}}}")
[[ "$result" == *"payload"* ]] && test_feature "get_webhook_payloads" "PASS" || test_feature "get_webhook_payloads" "FAIL"
else
test_feature "get_webhook_payloads" "SKIP"
fi
# 11. Refresh webhook
if [ ! -z "$WEBHOOK_ID" ]; then
result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \
-d "{\"jsonrpc\": \"2.0\", \"id\": 11, \"method\": \"tools/call\", \"params\": {\"name\": \"refresh_webhook\", \"arguments\": {\"webhookId\": \"$WEBHOOK_ID\"}}}")
[[ "$result" == *"refreshed"* ]] && test_feature "refresh_webhook" "PASS" || test_feature "refresh_webhook" "FAIL"
else
test_feature "refresh_webhook" "SKIP"
fi
# 12. Delete webhook
if [ ! -z "$WEBHOOK_ID" ]; then
result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \
-d "{\"jsonrpc\": \"2.0\", \"id\": 12, \"method\": \"tools/call\", \"params\": {\"name\": \"delete_webhook\", \"arguments\": {\"webhookId\": \"$WEBHOOK_ID\"}}}")
[[ "$result" == *"deleted"* ]] && test_feature "delete_webhook" "PASS" || test_feature "delete_webhook" "FAIL"
else
test_feature "delete_webhook" "SKIP"
fi
echo ""
echo "📈 FINAL RESULTS"
echo "==============="
echo "Total Tests: $TOTAL"
echo "✅ Passed: $PASSED"
echo "❌ Failed: $FAILED"
echo "Success Rate: $(( PASSED * 100 / TOTAL ))%"
if [ $FAILED -eq 0 ]; then
echo ""
echo "🎉 ALL TESTS PASSED! v1.4.0 is ready for production!"
exit 0
else
echo ""
echo "⚠️ $FAILED test(s) failed. Please review."
exit 1
fi
```
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
```markdown
# 🚀 Pull Request - Trust Score 100/100
<!--
Thank you for contributing to the Airtable MCP Server!
Your contribution helps us achieve our goal of a perfect 100/100 Trust Score.
-->
## 📋 PR Information
**PR Type**: <!-- Check all that apply -->
- [ ] 🐛 Bug Fix
- [ ] ✨ New Feature
- [ ] 🔒 Security Enhancement
- [ ] 📚 Documentation Update
- [ ] 🧹 Code Refactoring
- [ ] ⚡ Performance Improvement
- [ ] 🧪 Test Enhancement
- [ ] 🔧 Build/CI Changes
- [ ] 💥 Breaking Change
**Issue Reference**:
<!-- Link to the issue this PR addresses -->
- Closes #[issue_number]
- Related to #[issue_number]
## 📝 Description
### What Changed
<!-- Provide a clear and concise description of what this PR does -->
### Why This Change
<!-- Explain the motivation behind this change -->
### How It Works
<!-- Describe the technical approach and implementation -->
## 🎯 Trust Score Impact
**Trust Score Categories Affected**: <!-- Check all that apply -->
- [ ] 🛡️ Security & Authentication
- [ ] 📊 Code Quality & Standards
- [ ] 🧪 Testing & Reliability
- [ ] 📚 Documentation & Usability
- [ ] 🚀 Performance & Scalability
- [ ] 🔧 CI/CD & Automation
- [ ] 🌐 Protocol Compliance
- [ ] 👥 Community & Support
**Expected Impact**:
<!-- Describe how this contributes to our 100/100 Trust Score goal -->
## 🧪 Testing Checklist
### Automated Tests
- [ ] Unit tests added/updated
- [ ] Integration tests added/updated
- [ ] Security tests added/updated
- [ ] Performance tests added/updated
- [ ] All existing tests pass
- [ ] Coverage maintained or improved
### Manual Testing
- [ ] MCP protocol functionality verified
- [ ] OAuth2 authentication tested (if applicable)
- [ ] Rate limiting verified (if applicable)
- [ ] Error handling tested
- [ ] Edge cases covered
- [ ] Backward compatibility confirmed
### Test Environment
**Tested On**:
- [ ] Node.js 16.x
- [ ] Node.js 18.x
- [ ] Node.js 20.x
- [ ] Docker container
- [ ] Multiple operating systems
**MCP Clients Tested**:
- [ ] Claude Desktop
- [ ] Cursor IDE
- [ ] VS Code with Cline
- [ ] Custom MCP client
## 🔒 Security Review
### Security Checklist
- [ ] No hardcoded secrets or credentials
- [ ] Input validation implemented
- [ ] Output sanitization applied
- [ ] Authentication/authorization checked
- [ ] SQL injection prevention verified
- [ ] XSS prevention implemented
- [ ] CSRF protection maintained
- [ ] Rate limiting respected
- [ ] Error messages don't leak sensitive info
- [ ] Dependencies updated and secure
### Security Impact Assessment
<!-- If this PR has security implications, describe them -->
- **Authentication Changes**:
- **Data Access Changes**:
- **New Attack Vectors**:
- **Mitigation Measures**:
## 📊 Performance Impact
### Performance Checklist
- [ ] No significant performance regression
- [ ] Memory usage optimized
- [ ] Database queries optimized (if applicable)
- [ ] Network requests minimized
- [ ] Caching implemented where appropriate
- [ ] Async/await used properly
### Benchmarks
<!-- If applicable, include performance measurements -->
**Before**:
```
Metric: [value]
```
**After**:
```
Metric: [value]
```
## 📚 Documentation
### Documentation Updates
- [ ] README.md updated
- [ ] API documentation updated
- [ ] Code comments added/updated
- [ ] Examples updated
- [ ] Troubleshooting guide updated
- [ ] CHANGELOG.md updated
- [ ] Migration guide provided (for breaking changes)
### Documentation Quality
- [ ] Clear and concise explanations
- [ ] Code examples provided
- [ ] Screenshots/diagrams included (if applicable)
- [ ] Links verified and working
## 🔄 Breaking Changes
### Breaking Change Assessment
- [ ] This is NOT a breaking change
- [ ] This is a breaking change (explain below)
<!-- If breaking change, provide details -->
**Breaking Changes**:
- **What breaks**:
- **Migration path**:
- **Deprecation timeline**:
## 🎬 Demo/Examples
### How to Test This PR
```bash
# Step-by-step instructions to test this PR
git checkout [branch-name]
npm install
# ... additional setup steps
```
### Usage Examples
```javascript
// Provide code examples showing the new functionality
```
## 📋 Review Checklist
### Code Quality
- [ ] Code follows project style guidelines
- [ ] No console.log or debug statements
- [ ] Error handling is comprehensive
- [ ] Code is well-commented
- [ ] Functions are properly documented
- [ ] Variable names are descriptive
- [ ] Magic numbers avoided
### Git History
- [ ] Commit messages are clear and descriptive
- [ ] Commits are logically organized
- [ ] No merge commits (rebased if needed)
- [ ] No sensitive information in commit history
## 🤝 Collaboration
### Review Requests
**Reviewers Needed**:
- [ ] Security review required
- [ ] Performance review required
- [ ] Documentation review required
- [ ] UI/UX review required
**Specific Review Areas**:
<!-- Ask reviewers to focus on specific aspects -->
- Please review the OAuth2 implementation for security
- Please check the new API endpoints for usability
- Please verify the documentation is clear
### Follow-up Tasks
<!-- List any follow-up work needed -->
- [ ] Create/update related issues
- [ ] Plan future enhancements
- [ ] Update project roadmap
- [ ] Coordinate with documentation team
## 🎯 Success Criteria
### Definition of Done
- [ ] All acceptance criteria met
- [ ] All tests passing
- [ ] Security review completed
- [ ] Documentation updated
- [ ] Performance impact assessed
- [ ] Backward compatibility verified
- [ ] CI/CD pipeline passing
### Trust Score Validation
- [ ] Contributes to security improvements
- [ ] Maintains or improves code quality
- [ ] Includes comprehensive testing
- [ ] Provides clear documentation
- [ ] Follows community best practices
## 📸 Screenshots/Media
<!-- Include screenshots, GIFs, or videos demonstrating the changes -->
## 🙏 Acknowledgments
<!-- Thank contributors, mention inspiration, or credit sources -->
---
## 📞 Need Help?
- 💬 **Questions**: Start a [discussion](https://github.com/rashidazarang/airtable-mcp/discussions)
- 🐛 **Issues**: Check our [issue tracker](https://github.com/rashidazarang/airtable-mcp/issues)
- 📚 **Docs**: Read our [documentation](./README.md)
- 🔒 **Security**: Email security@[domain] for private matters
**🎯 Our Mission**: Building the most trusted and comprehensive MCP server for Airtable with a perfect **100/100 Trust Score**. Thank you for contributing to this goal! 🚀
```
--------------------------------------------------------------------------------
/RELEASE_SUMMARY_v3.2.x.md:
--------------------------------------------------------------------------------
```markdown
# Release Summary: v3.2.1 - v3.2.4
## Major Security & Architecture Updates
This document summarizes all releases from v3.2.1 to v3.2.4, representing a comprehensive overhaul of the Airtable MCP server with critical security fixes and architectural improvements.
---
## 📦 v3.2.4 - Complete XSS Security Fix
**Released:** September 9, 2025
**Type:** 🔒 Security Release
**GitHub Alerts:** #10 & #11 Resolved
### What's Fixed
- **XSS Vulnerabilities** in OAuth2 endpoint (`airtable_simple_production.js:708-710`)
- ✅ Unicode escaping for all special characters in JSON
- ✅ Using `textContent` instead of `innerHTML` for dynamic content
- ✅ Multiple layers of character escaping
- ✅ Defense-in-depth XSS prevention
### Technical Details
```javascript
// Before (Vulnerable)
var config = ${JSON.stringify(data)};
<p>Client ID: ${clientId}</p>
// After (Secure)
var config = ${safeJsonConfig}; // Unicode-escaped
document.getElementById('client-id').textContent = clientId;
```
---
## 📦 v3.2.3 - Command Injection Complete Fix
**Released:** September 9, 2025
**Type:** 🔒 Security Release
**GitHub Alert:** #10 (Python) Resolved
### What's Fixed
- **Command Injection** in Python test client (`test_client.py`)
- ✅ BASE_ID validation at startup
- ✅ Eliminated string interpolation vulnerabilities
- ✅ Path traversal protection
- ✅ Token format validation
- ✅ Complete input sanitization
### Security Improvements
```python
# Before (Vulnerable)
result = api_call(f"meta/bases/{BASE_ID}/tables")
# After (Secure)
# BASE_ID validated at startup
if not all(c.isalnum() or c in '-_' for c in BASE_ID):
print(f"Error: Invalid BASE_ID format")
sys.exit(1)
endpoint = "meta/bases/" + BASE_ID + "/tables"
```
---
## 📦 v3.2.2 - Initial Security Patches
**Released:** September 9, 2025
**Type:** 🔒 Security Release
**GitHub Alert:** #10 Partial Fix
### What's Fixed
- **Initial command injection fixes** in `test_client.py`
- ✅ Added input validation for API endpoints
- ✅ Removed unused subprocess import
- ✅ Basic endpoint sanitization
### Note
This was a partial fix. Complete resolution came in v3.2.3.
---
## 📦 v3.2.1 - TypeScript Architecture Fix & Project Restructure
**Released:** September 9, 2025
**Type:** 🏗️ Major Architecture Update
### Critical Fix
- **TypeScript Compilation Issue** completely resolved
- ✅ Fixed `.d.ts` files containing runtime code
- ✅ Proper separation of types and implementation
### New Files Created
```
src/typescript/
├── errors.ts # Runtime error classes
├── tools-schemas.ts # Tool schema constants
└── prompt-templates.ts # AI prompt templates
```
### Project Restructure
```
airtable-mcp/
├── src/
│ ├── index.js # Main entry point
│ ├── typescript/ # TypeScript implementation
│ ├── javascript/ # JavaScript implementation
│ └── python/ # Python implementation
├── dist/ # Compiled output
├── docs/
│ ├── guides/ # User guides
│ └── releases/ # Release notes
├── tests/ # All test files
└── types/ # TypeScript definitions
```
### What Changed
- ✅ World-class project organization
- ✅ TypeScript now compiles successfully
- ✅ Proper build system with npm scripts
- ✅ ESLint and Prettier configurations
- ✅ Jest testing framework setup
- ✅ CI/CD pipeline structure
---
## 🎯 Combined Impact
### Security Fixes Summary
| Alert | Type | File | Version | Status |
|-------|------|------|---------|---------|
| #10 | XSS | `airtable_simple_production.js:708` | v3.2.4 | ✅ Fixed |
| #11 | XSS | `airtable_simple_production.js:710` | v3.2.4 | ✅ Fixed |
| #10 | Command Injection | `test_client.py` | v3.2.3 | ✅ Fixed |
### Architecture Improvements
- ✅ TypeScript compilation working
- ✅ Proper file organization
- ✅ Clean separation of concerns
- ✅ Professional build system
- ✅ Comprehensive testing setup
### Backwards Compatibility
✅ **No breaking changes** across all versions
- All existing functionality preserved
- API endpoints unchanged
- Both JS and TS implementations working
---
## 📥 Installation
### New Installation
```bash
npm install @rashidazarang/[email protected]
```
### Update from Any Previous Version
```bash
npm update @rashidazarang/airtable-mcp
```
### Verify Installation
```bash
npm list @rashidazarang/airtable-mcp
# Should show: @rashidazarang/[email protected]
```
---
## 🚀 Quick Start
### JavaScript
```bash
AIRTABLE_TOKEN=your_token AIRTABLE_BASE_ID=your_base \
node node_modules/@rashidazarang/airtable-mcp/src/javascript/airtable_simple_production.js
```
### TypeScript
```bash
# Build first
npm run build
# Then run
AIRTABLE_TOKEN=your_token AIRTABLE_BASE_ID=your_base \
node node_modules/@rashidazarang/airtable-mcp/dist/typescript/airtable-mcp-server.js
```
---
## 📋 Migration Guide
### From v3.0.x or earlier
1. Update to v3.2.4: `npm update @rashidazarang/airtable-mcp`
2. If using TypeScript, rebuild: `npm run build`
3. No code changes required
### From v3.1.x
1. Update to v3.2.4: `npm update @rashidazarang/airtable-mcp`
2. No changes required - security patches only
### From v3.2.1-3.2.3
1. Update to v3.2.4: `npm update @rashidazarang/airtable-mcp`
2. Get latest security fixes
---
## ⚠️ Important Security Notice
**All users should update to v3.2.4 immediately** to get:
- Complete XSS protection in OAuth2 flows
- Full command injection prevention
- Path traversal protection
- Comprehensive input validation
---
## 📊 Version Comparison
| Feature | v3.2.1 | v3.2.2 | v3.2.3 | v3.2.4 |
|---------|--------|--------|--------|--------|
| TypeScript Compilation | ✅ Fixed | ✅ | ✅ | ✅ |
| Project Structure | ✅ New | ✅ | ✅ | ✅ |
| Command Injection Fix | ❌ | ⚠️ Partial | ✅ Complete | ✅ |
| XSS Protection | ❌ | ❌ | ❌ | ✅ Complete |
| Production Ready | ✅ | ✅ | ✅ | ✅ |
---
## 🙏 Acknowledgments
- GitHub Security Scanning for identifying vulnerabilities
- Community for patience during rapid security updates
- Contributors to the TypeScript architecture improvements
---
## 📚 Resources
- **Repository:** https://github.com/rashidazarang/airtable-mcp
- **Issues:** https://github.com/rashidazarang/airtable-mcp/issues
- **NPM:** https://www.npmjs.com/package/@rashidazarang/airtable-mcp
- **Changelog:** [CHANGELOG.md](./CHANGELOG.md)
---
**Current Version: v3.2.4**
**Status: Fully Secure & Production Ready**
**Last Updated: September 9, 2025**
```
--------------------------------------------------------------------------------
/examples/airtable-crud-example.js:
--------------------------------------------------------------------------------
```javascript
/**
* Example script demonstrating how to use the Airtable CRUD utilities
*/
const dotenv = require('dotenv');
const baseUtils = require('../tools/airtable-base');
const crudUtils = require('../tools/airtable-crud');
const schemaUtils = require('../tools/airtable-schema');
// Load environment variables
dotenv.config();
// Configuration
const EXAMPLE_TABLE_NAME = 'Example Tasks';
const EXAMPLE_RECORDS = [
{
Name: 'Complete project documentation',
Description: 'Write comprehensive documentation for the project',
Status: 'Not Started',
Priority: 'High',
DueDate: '2023-12-31'
},
{
Name: 'Fix login bug',
Description: 'Users are experiencing issues with the login process',
Status: 'In Progress',
Priority: 'Critical',
DueDate: '2023-11-15'
},
{
Name: 'Add new feature',
Description: 'Implement the new feature requested by the client',
Status: 'Not Started',
Priority: 'Medium',
DueDate: '2024-01-15'
}
];
/**
* Main function to run the example
*/
async function runExample() {
console.log('Starting Airtable CRUD Example...\n');
const baseId = process.env.AIRTABLE_BASE_ID;
if (!baseId) {
console.error('AIRTABLE_BASE_ID not set in .env file');
process.exit(1);
}
try {
// Step 1: Check if we have access to the base
console.log('Step 1: Checking base access...');
const bases = await baseUtils.listAllBases();
const hasAccess = bases.some(base => base.id === baseId);
if (!hasAccess) {
throw new Error(`No access to base with ID: ${baseId}`);
}
console.log(`✅ Access confirmed to base: ${baseId}\n`);
// Step 2: List existing tables
console.log('Step 2: Listing existing tables...');
const tables = await baseUtils.listTables(baseId);
console.log(`Found ${tables.length} tables in the base:`);
tables.forEach(table => console.log(`- ${table.name}`));
console.log();
// Step 3: Check if our example table exists
console.log('Step 3: Checking if example table exists...');
let tableExists = await crudUtils.tableExists(baseId, EXAMPLE_TABLE_NAME);
if (tableExists) {
console.log(`Table "${EXAMPLE_TABLE_NAME}" already exists\n`);
} else {
console.log(`Table "${EXAMPLE_TABLE_NAME}" does not exist, creating it...\n`);
// Step 4: Create the example table
console.log('Step 4: Creating example table...');
const tableConfig = {
name: EXAMPLE_TABLE_NAME,
description: 'Example table for demonstrating CRUD operations',
fields: [
{
name: 'Name',
type: 'singleLineText',
description: 'Task name'
},
{
name: 'Description',
type: 'multilineText',
description: 'Task description'
},
{
name: 'Status',
type: 'singleSelect',
options: {
choices: [
{ name: 'Not Started' },
{ name: 'In Progress' },
{ name: 'Completed' }
]
},
description: 'Current status of the task'
},
{
name: 'Priority',
type: 'singleSelect',
options: {
choices: [
{ name: 'Low' },
{ name: 'Medium' },
{ name: 'High' },
{ name: 'Critical' }
]
},
description: 'Task priority'
},
{
name: 'DueDate',
type: 'date',
description: 'When the task is due',
options: {
dateFormat: {
name: 'local'
}
}
}
]
};
await schemaUtils.createTable(baseId, tableConfig);
console.log(`✅ Created table: ${EXAMPLE_TABLE_NAME}\n`);
}
// Step 5: Create records
console.log('Step 5: Creating example records...');
const createdRecords = await crudUtils.createRecords(baseId, EXAMPLE_TABLE_NAME, EXAMPLE_RECORDS);
console.log(`✅ Created ${createdRecords.length} records\n`);
// Step 6: Read all records
console.log('Step 6: Reading all records...');
const allRecords = await crudUtils.readRecords(baseId, EXAMPLE_TABLE_NAME, 100);
console.log(`✅ Read ${allRecords.length} records`);
console.log('Sample record:');
console.log(JSON.stringify(allRecords[0], null, 2));
console.log();
// Step 7: Filter records
console.log('Step 7: Filtering records by status...');
const notStartedRecords = await crudUtils.readRecords(
baseId,
EXAMPLE_TABLE_NAME,
100,
'Status="Not Started"'
);
console.log(`✅ Found ${notStartedRecords.length} records with Status="Not Started"`);
notStartedRecords.forEach(record => console.log(`- ${record.Name} (Priority: ${record.Priority})`));
console.log();
// Step 8: Update records
console.log('Step 8: Updating records...');
const recordsToUpdate = notStartedRecords.map(record => ({
id: record.id,
fields: { Status: 'In Progress' }
}));
const updatedRecords = await crudUtils.updateRecords(baseId, EXAMPLE_TABLE_NAME, recordsToUpdate);
console.log(`✅ Updated ${updatedRecords.length} records to Status="In Progress"\n`);
// Step 9: Verify updates
console.log('Step 9: Verifying updates...');
const inProgressRecords = await crudUtils.readRecords(
baseId,
EXAMPLE_TABLE_NAME,
100,
'Status="In Progress"'
);
console.log(`✅ Found ${inProgressRecords.length} records with Status="In Progress"`);
inProgressRecords.forEach(record => console.log(`- ${record.Name} (Priority: ${record.Priority})`));
console.log();
// Step 10: Delete records (optional - commented out to preserve data)
console.log('Step 10: Deleting records (optional)...');
console.log('Skipping deletion to preserve example data.');
console.log('To delete records, uncomment the code below:');
console.log('```');
console.log('const recordIdsToDelete = allRecords.map(record => record.id);');
console.log('const deletedRecords = await crudUtils.deleteRecords(baseId, EXAMPLE_TABLE_NAME, recordIdsToDelete);');
console.log('console.log(`✅ Deleted ${deletedRecords.length} records`);');
console.log('```\n');
console.log('Example completed successfully!');
console.log('You can now view the data in your Airtable base.');
} catch (error) {
console.error('Error during example:', error.message);
process.exit(1);
}
}
// Run the example
runExample();
```
--------------------------------------------------------------------------------
/docs/releases/RELEASE_NOTES_v1.5.0.md:
--------------------------------------------------------------------------------
```markdown
# 🚀 Airtable MCP Server v1.5.0 Release Notes
**Release Date**: August 15, 2025
**Major Update**: Enhanced Schema Management & Advanced Features
## 🎯 Overview
Version 1.5.0 represents a **major expansion** of the Airtable MCP Server, adding comprehensive schema management capabilities inspired by the best features from domdomegg's airtable-mcp-server while maintaining our unique webhook support. This release **doubles** the number of available tools from 12 to **23 tools**.
## ✨ New Features
### 📊 Schema Discovery Tools (6 New Tools)
1. **`list_bases`** - Discover all accessible Airtable bases
- Lists all bases with permissions
- Supports pagination with offset parameter
- Shows base names, IDs, and permission levels
2. **`get_base_schema`** - Complete base schema information
- Detailed table structures and relationships
- Field definitions with types and options
- View configurations and metadata
3. **`describe_table`** - Enhanced table inspection
- Comprehensive field information including IDs, types, descriptions
- View details and configurations
- Much more detailed than the basic `list_tables`
4. **`list_field_types`** - Field type reference
- Complete documentation of all Airtable field types
- Includes basic fields (text, number, date) and advanced fields (formulas, lookups)
- Helpful for understanding what field types are available for creation
5. **`get_table_views`** - View management
- Lists all views for a specific table
- Shows view types, IDs, and configurations
- Includes visible field information
### 🏗️ Table Management Tools (3 New Tools)
6. **`create_table`** - Programmatic table creation
- Create new tables with custom field definitions
- Support for all field types with proper validation
- Optional table descriptions
7. **`update_table`** - Table metadata modification
- Update table names and descriptions
- Non-destructive metadata changes
8. **`delete_table`** - Table removal (with safety checks)
- Requires explicit confirmation with `confirm=true`
- Permanently removes table and all data
- Safety warnings to prevent accidental deletions
### 🔧 Field Management Tools (4 New Tools)
9. **`create_field`** - Add fields to existing tables
- Support for all Airtable field types
- Custom field options and descriptions
- Validates field types and configurations
10. **`update_field`** - Modify existing field properties
- Update field names, descriptions, and options
- Change field configurations safely
11. **`delete_field`** - Remove fields (with safety checks)
- Requires explicit confirmation with `confirm=true`
- Permanently removes field and all data
- Safety warnings to prevent accidental deletions
## 🔄 Enhanced Existing Features
- **Improved error handling** for all metadata operations
- **Better table/field lookup** supporting both names and IDs
- **Enhanced validation** for destructive operations
- **Consistent response formatting** across all tools
## 📊 Tool Count Summary
| Category | v1.4.0 | v1.5.0 | New in v1.5.0 |
|----------|--------|--------|----------------|
| **Data Operations** | 7 | 7 | - |
| **Webhook Management** | 5 | 5 | - |
| **Schema Management** | 0 | 11 | ✅ 11 new tools |
| **Total Tools** | **12** | **23** | **+11 tools** |
## 🛠️ Technical Improvements
### API Enhancements
- **Metadata API Support**: Full integration with Airtable's metadata API endpoints
- **Enhanced callAirtableAPI Function**: Already supported metadata endpoints
- **Improved Error Handling**: Better error messages for schema operations
### Security & Safety
- **Confirmation Required**: Destructive operations require explicit confirmation
- **Validation Checks**: Proper field type and option validation
- **Safety Warnings**: Clear warnings for irreversible operations
### Authentication
- **Extended Scope Support**: Now leverages `schema.bases:read` and `schema.bases:write` scopes
- **Backward Compatibility**: All existing functionality remains unchanged
## 📚 New Capabilities
### For Users
- **Complete Base Discovery**: Find and explore all accessible bases
- **Advanced Schema Inspection**: Understand table and field structures in detail
- **Programmatic Table Creation**: Build tables through natural language
- **Dynamic Field Management**: Add, modify, and remove fields as needed
- **Comprehensive Field Reference**: Quick access to all available field types
### For Developers
- **Full CRUD for Schema**: Complete Create, Read, Update, Delete operations for tables and fields
- **Metadata-First Approach**: Rich schema information before data operations
- **Enhanced Automation**: Build complex Airtable structures programmatically
## 🚀 Getting Started with v1.5.0
### Installation
```bash
npm install -g @rashidazarang/[email protected]
```
### Required Token Scopes
For full v1.5.0 functionality, ensure your Airtable Personal Access Token includes:
- `data.records:read` - Read records
- `data.records:write` - Create, update, delete records
- `schema.bases:read` - View table schemas (**New requirement**)
- `schema.bases:write` - Create, modify tables and fields (**New requirement**)
- `webhook:manage` - Webhook operations (optional)
### Example Usage
```javascript
// Discover available bases
"List all my accessible Airtable bases"
// Explore a base structure
"Show me the complete schema for this base"
// Create a new table
"Create a new table called 'Projects' with fields: Name (text), Status (single select with options: Active, Completed, On Hold), and Due Date (date)"
// Add a field to existing table
"Add a 'Priority' field to the Projects table as a single select with options: Low, Medium, High"
// Get detailed table information
"Describe the Projects table with all field details"
```
## 🔧 Breaking Changes
**None** - v1.5.0 is fully backward compatible with v1.4.0. All existing tools and functionality remain unchanged.
## 🐛 Bug Fixes
- **Security**: Fixed clear-text logging of sensitive information (GitHub security alerts)
- **API Error Handling**: Improved error messages for invalid table/field references
- **Response Formatting**: Consistent JSON response structure across all tools
## 🌟 What's Next
- Enhanced search capabilities with field-specific filtering
- Batch operations for bulk table/field management
- Advanced view creation and management
- Performance optimizations for large bases
## 📈 Performance & Compatibility
- **Node.js**: Requires Node.js 14+
- **Rate Limits**: Respects Airtable's 5 requests/second limit
- **Memory Usage**: Optimized for efficient schema operations
- **Response Times**: Fast metadata operations with caching
## 🤝 Community & Support
This release incorporates community feedback and feature requests. The v1.5.0 implementation draws inspiration from domdomegg's airtable-mcp-server while maintaining our unique webhook capabilities and enhanced error handling.
**GitHub**: https://github.com/rashidazarang/airtable-mcp
**NPM**: https://www.npmjs.com/package/@rashidazarang/airtable-mcp
**Issues**: https://github.com/rashidazarang/airtable-mcp/issues
---
🎉 **Thank you for using Airtable MCP Server!** This release makes it the most comprehensive Airtable integration available for AI assistants, combining powerful schema management with robust webhook support.
```
--------------------------------------------------------------------------------
/tests/test_v1.6.0_comprehensive.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
# COMPREHENSIVE TEST SUITE - Airtable MCP Server v1.6.0
# Testing ALL 33 tools including 10 new v1.6.0 features
set -e
SERVER_URL="http://localhost:8010/mcp"
PASSED=0
FAILED=0
BATCH_RECORD_IDS=()
echo "🚀 COMPREHENSIVE TEST SUITE - v1.6.0"
echo "===================================="
echo "Testing ALL 33 tools with real API calls"
echo "New in v1.6.0: Batch operations, attachments, advanced views, base management"
echo ""
# Function to make MCP calls
call_tool() {
local tool_name="$1"
local params="$2"
curl -s -X POST "$SERVER_URL" \
-H "Content-Type: application/json" \
-d "{\"jsonrpc\": \"2.0\", \"id\": 1, \"method\": \"tools/call\", \"params\": {\"name\": \"$tool_name\", \"arguments\": $params}}"
}
# Enhanced test function
test_tool() {
local tool_name="$1"
local params="$2"
local description="$3"
local expect_fail="$4"
echo -n "🔧 $tool_name: $description... "
if result=$(call_tool "$tool_name" "$params" 2>&1); then
if echo "$result" | jq -e '.result.content[0].text' > /dev/null 2>&1; then
response_text=$(echo "$result" | jq -r '.result.content[0].text')
if [[ "$expect_fail" == "true" ]]; then
if echo "$response_text" | grep -q "error\|Error\|not found\|Unknown field"; then
echo "✅ PASS (Expected failure)"
((PASSED++))
else
echo "❌ FAIL (Should have failed)"
((FAILED++))
fi
else
echo "✅ PASS"
((PASSED++))
# Store batch record IDs for cleanup
if [[ "$tool_name" == "batch_create_records" ]]; then
while IFS= read -r line; do
if [[ $line =~ ID:\ (rec[a-zA-Z0-9]+) ]]; then
BATCH_RECORD_IDS+=(${BASH_REMATCH[1]})
fi
done <<< "$response_text"
fi
fi
else
if echo "$result" | jq -e '.error' > /dev/null 2>&1; then
error_msg=$(echo "$result" | jq -r '.error.message')
if [[ "$expect_fail" == "true" ]]; then
echo "✅ PASS (Expected error: $error_msg)"
((PASSED++))
else
echo "❌ FAIL (API Error: $error_msg)"
((FAILED++))
fi
else
echo "❌ FAIL (Invalid response)"
((FAILED++))
fi
fi
else
echo "❌ FAIL (Request failed)"
((FAILED++))
fi
}
echo "📊 PHASE 1: Original Data Operations (7 tools)"
echo "=============================================="
test_tool "list_tables" "{}" "List all tables"
test_tool "list_records" "{\"table\": \"Test Table CRUD\", \"maxRecords\": 2}" "List limited records"
test_tool "search_records" "{\"table\": \"Test Table CRUD\", \"searchTerm\": \"test\"}" "Search records"
echo ""
echo "🪝 PHASE 2: Webhook Management (5 tools)"
echo "========================================"
test_tool "list_webhooks" "{}" "List existing webhooks"
echo ""
echo "🏗️ PHASE 3: Schema Management (11 tools)"
echo "========================================"
test_tool "list_bases" "{}" "List accessible bases"
test_tool "get_base_schema" "{}" "Get complete base schema"
test_tool "describe_table" "{\"table\": \"Test Table CRUD\"}" "Describe table details"
test_tool "list_field_types" "{}" "List field types reference"
test_tool "get_table_views" "{\"table\": \"Test Table CRUD\"}" "Get table views"
echo ""
echo "🚀 PHASE 4: NEW v1.6.0 Batch Operations (4 tools)"
echo "================================================="
test_tool "batch_create_records" "{\"table\": \"Test Table CRUD\", \"records\": [{\"fields\": {\"Name\": \"Batch Test A\", \"Description\": \"Batch created\", \"Status\": \"Testing\"}}, {\"fields\": {\"Name\": \"Batch Test B\", \"Description\": \"Also batch created\", \"Status\": \"Testing\"}}]}" "Create multiple records at once"
# Test batch operations with the created records
if [ ${#BATCH_RECORD_IDS[@]} -ge 2 ]; then
test_tool "batch_update_records" "{\"table\": \"Test Table CRUD\", \"records\": [{\"id\": \"${BATCH_RECORD_IDS[0]}\", \"fields\": {\"Status\": \"Updated\"}}, {\"id\": \"${BATCH_RECORD_IDS[1]}\", \"fields\": {\"Status\": \"Updated\"}}]}" "Update multiple records at once"
test_tool "batch_delete_records" "{\"table\": \"Test Table CRUD\", \"recordIds\": [\"${BATCH_RECORD_IDS[0]}\", \"${BATCH_RECORD_IDS[1]}\"]}" "Delete multiple records at once"
else
echo "⚠️ Skipping batch update/delete tests (no record IDs)"
((FAILED += 2))
fi
# Test batch limits
test_tool "batch_create_records" "{\"table\": \"Test Table CRUD\", \"records\": []}" "Test with empty records array" "true"
echo ""
echo "📎 PHASE 5: NEW v1.6.0 Attachment Operations (1 tool)"
echo "===================================================="
# Test attachment with non-existent field (expected to fail)
test_tool "upload_attachment" "{\"table\": \"Test Table CRUD\", \"recordId\": \"recDummyID\", \"fieldName\": \"NonExistentField\", \"url\": \"https://via.placeholder.com/150.png\"}" "Test attachment to non-existent field" "true"
echo ""
echo "👁️ PHASE 6: NEW v1.6.0 Advanced Views (2 tools)"
echo "==============================================="
# Test view operations (some may fail if permissions don't allow)
test_tool "get_view_metadata" "{\"table\": \"Test Table CRUD\", \"viewId\": \"viw123InvalidID\"}" "Test view metadata with invalid ID" "true"
echo ""
echo "🏢 PHASE 7: NEW v1.6.0 Base Management (3 tools)"
echo "==============================================="
test_tool "list_collaborators" "{}" "List base collaborators"
test_tool "list_shares" "{}" "List shared views"
# Test create_base (may fail without workspace permissions)
test_tool "create_base" "{\"name\": \"Test Base\", \"tables\": [{\"name\": \"Test Table\", \"fields\": [{\"name\": \"Name\", \"type\": \"singleLineText\"}]}]}" "Test base creation (may fail due to permissions)" "true"
echo ""
echo "⚠️ PHASE 8: Error Handling & Edge Cases"
echo "======================================="
test_tool "batch_create_records" "{\"table\": \"NonExistentTable\", \"records\": [{\"fields\": {\"Name\": \"Test\"}}]}" "Test batch create with non-existent table" "true"
test_tool "get_view_metadata" "{\"table\": \"NonExistentTable\", \"viewId\": \"viwTest\"}" "Test view metadata with non-existent table" "true"
echo ""
echo "📈 FINAL TEST RESULTS - v1.6.0"
echo "==============================="
echo "✅ Passed: $PASSED"
echo "❌ Failed: $FAILED"
echo "📊 Total Tests: $((PASSED + FAILED))"
echo "📊 Success Rate: $(echo "scale=1; $PASSED * 100 / ($PASSED + $FAILED)" | bc -l)%"
if [ $FAILED -eq 0 ]; then
echo ""
echo "🎉 🎉 🎉 ALL TESTS PASSED! 🎉 🎉 🎉"
echo ""
echo "✅ v1.6.0 is READY FOR PRODUCTION!"
echo ""
echo "🚀 NEW v1.6.0 ACHIEVEMENTS:"
echo "• 33 total tools (+ 10 from v1.5.0)"
echo "• Batch operations (create/update/delete up to 10 records)"
echo "• Attachment management via URLs"
echo "• Advanced view metadata and creation"
echo "• Base management and collaboration tools"
echo "• Enhanced error handling and validation"
echo ""
echo "📦 Ready for GitHub and NPM release!"
exit 0
else
echo ""
echo "❌ SOME TESTS FAILED"
echo "Review failures above. Some failures may be expected (permissions, non-existent resources)."
echo ""
echo "🎯 v1.6.0 SUMMARY:"
echo "• Core functionality working"
echo "• New batch operations implemented"
echo "• Attachment support added"
echo "• Advanced features may need specific permissions"
exit 1
fi
```
--------------------------------------------------------------------------------
/src/typescript/ai-prompts.d.ts:
--------------------------------------------------------------------------------
```typescript
/**
* AI-Powered Prompt Templates Type Definitions
* Enterprise-grade TypeScript types for all 10 AI prompt templates
*/
import { PromptSchema, PromptArgument } from './index';
// ============================================================================
// AI Prompt Template Interfaces
// ============================================================================
export interface AnalyzeDataPrompt {
table: string;
analysis_type?: 'trends' | 'statistical' | 'patterns' | 'predictive' | 'anomaly_detection' | 'correlation_matrix';
field_focus?: string;
time_dimension?: string;
confidence_level?: 0.90 | 0.95 | 0.99;
}
export interface CreateReportPrompt {
table: string;
report_type: 'executive_summary' | 'detailed_analysis' | 'dashboard' | 'stakeholder_report';
target_audience: 'executives' | 'managers' | 'analysts' | 'technical_team';
include_recommendations?: boolean;
time_period?: string;
format_preference?: 'narrative' | 'bullet_points' | 'charts' | 'mixed';
}
export interface DataInsightsPrompt {
table: string;
insight_type: 'business_intelligence' | 'trend_analysis' | 'performance_metrics' | 'opportunity_identification';
focus_areas?: string[];
comparison_period?: string;
include_forecasting?: boolean;
stakeholder_context?: string;
}
export interface OptimizeWorkflowPrompt {
table: string;
current_process_description: string;
optimization_goals: ('efficiency' | 'accuracy' | 'speed' | 'cost_reduction' | 'compliance')[];
constraints?: string[];
automation_preference?: 'minimal' | 'moderate' | 'aggressive';
change_tolerance?: 'low' | 'medium' | 'high';
}
export interface SmartSchemaDesignPrompt {
purpose: string;
data_types: string[];
expected_volume: 'small' | 'medium' | 'large' | 'enterprise';
compliance_requirements?: ('GDPR' | 'HIPAA' | 'SOX' | 'PCI_DSS')[];
performance_priorities?: ('query_speed' | 'storage_efficiency' | 'scalability' | 'maintainability')[];
integration_needs?: string[];
user_access_patterns?: string;
}
export interface DataQualityAuditPrompt {
table: string;
quality_dimensions: ('completeness' | 'accuracy' | 'consistency' | 'timeliness' | 'validity' | 'uniqueness')[];
automated_fixes?: boolean;
severity_threshold?: 'low' | 'medium' | 'high' | 'critical';
compliance_context?: string;
reporting_requirements?: string[];
}
export interface PredictiveAnalyticsPrompt {
table: string;
target_field: string;
prediction_periods?: number;
algorithm?: 'linear_regression' | 'arima' | 'exponential_smoothing' | 'random_forest' | 'neural_network';
include_confidence_intervals?: boolean;
historical_periods?: number;
external_factors?: string[];
business_context?: string;
}
export interface NaturalLanguageQueryPrompt {
question: string;
tables?: string[];
response_format?: 'natural_language' | 'structured_data' | 'visualization_ready' | 'action_items';
context_awareness?: boolean;
confidence_threshold?: number;
clarifying_questions?: boolean;
}
export interface SmartDataTransformationPrompt {
source_table: string;
target_schema?: string;
transformation_goals: ('normalization' | 'aggregation' | 'enrichment' | 'validation' | 'standardization')[];
data_quality_rules?: string[];
preserve_history?: boolean;
validation_strategy?: 'strict' | 'permissive' | 'custom';
error_handling?: 'fail_fast' | 'log_and_continue' | 'manual_review';
}
export interface AutomationRecommendationsPrompt {
workflow_description: string;
current_pain_points: string[];
automation_scope: 'single_task' | 'workflow_segment' | 'end_to_end' | 'cross_system';
technical_constraints?: string[];
business_impact_priority?: ('cost_savings' | 'time_efficiency' | 'error_reduction' | 'scalability')[];
implementation_timeline?: 'immediate' | 'short_term' | 'medium_term' | 'long_term';
risk_tolerance?: 'conservative' | 'moderate' | 'aggressive';
}
// ============================================================================
// AI Prompt Response Types
// ============================================================================
export interface AnalysisResult {
summary: string;
key_findings: string[];
statistical_measures?: {
mean?: number;
median?: number;
std_deviation?: number;
correlation_coefficients?: Record<string, number>;
confidence_intervals?: Array<{ field: string; lower: number; upper: number; confidence: number }>;
};
trends?: Array<{
field: string;
direction: 'increasing' | 'decreasing' | 'stable' | 'volatile';
strength: 'weak' | 'moderate' | 'strong';
significance: number;
}>;
anomalies?: Array<{
record_id: string;
field: string;
expected_value: unknown;
actual_value: unknown;
deviation_score: number;
}>;
recommendations: string[];
next_steps: string[];
}
export interface ReportResult {
title: string;
executive_summary: string;
detailed_sections: Array<{
heading: string;
content: string;
supporting_data?: unknown[];
visualizations?: Array<{ type: string; data: unknown; description: string }>;
}>;
key_metrics: Record<string, { value: unknown; change: string; significance: string }>;
recommendations: Array<{
priority: 'high' | 'medium' | 'low';
recommendation: string;
expected_impact: string;
implementation_effort: 'low' | 'medium' | 'high';
}>;
appendices?: Array<{ title: string; content: string }>;
}
export interface WorkflowOptimizationResult {
current_state_analysis: {
efficiency_score: number;
bottlenecks: Array<{ step: string; impact: 'high' | 'medium' | 'low'; description: string }>;
resource_utilization: Record<string, number>;
};
optimization_recommendations: Array<{
category: 'automation' | 'process_redesign' | 'tool_integration' | 'skill_development';
recommendation: string;
expected_benefits: string[];
implementation_complexity: 'simple' | 'moderate' | 'complex';
estimated_roi: string;
timeline: string;
}>;
implementation_roadmap: Array<{
phase: number;
duration: string;
objectives: string[];
deliverables: string[];
success_metrics: string[];
}>;
risk_assessment: Array<{
risk: string;
probability: 'low' | 'medium' | 'high';
impact: 'low' | 'medium' | 'high';
mitigation: string;
}>;
}
export interface SchemaDesignResult {
recommended_schema: {
tables: Array<{
name: string;
purpose: string;
fields: Array<{
name: string;
type: string;
constraints: string[];
description: string;
}>;
relationships: Array<{
type: 'one_to_one' | 'one_to_many' | 'many_to_many';
target_table: string;
description: string;
}>;
}>;
};
design_principles: string[];
performance_considerations: string[];
scalability_notes: string[];
compliance_alignment: Record<string, string[]>;
migration_strategy?: {
phases: Array<{ phase: number; description: string; estimated_time: string }>;
data_migration_notes: string[];
validation_checkpoints: string[];
};
}
export interface PredictionResult {
predictions: Array<{
period: string;
predicted_value: number;
confidence_interval?: { lower: number; upper: number };
probability_bands?: Array<{ probability: number; range: [number, number] }>;
}>;
model_performance: {
algorithm_used: string;
accuracy_metrics: Record<string, number>;
feature_importance?: Record<string, number>;
validation_results: Record<string, number>;
};
business_insights: {
trend_direction: 'positive' | 'negative' | 'stable';
seasonality_detected: boolean;
external_factors_impact: string[];
risk_factors: string[];
};
recommendations: Array<{
type: 'operational' | 'strategic' | 'tactical';
recommendation: string;
timing: string;
confidence: number;
}>;
}
// ============================================================================
// Prompt Template Definitions (Type-Safe)
// ============================================================================
// AI prompt templates are defined in prompt-templates.ts for runtime use
// ============================================================================
// Export All AI Prompt Types
// ============================================================================
export {
AnalyzeDataPrompt,
CreateReportPrompt,
DataInsightsPrompt,
OptimizeWorkflowPrompt,
SmartSchemaDesignPrompt,
DataQualityAuditPrompt,
PredictiveAnalyticsPrompt,
NaturalLanguageQueryPrompt,
SmartDataTransformationPrompt,
AutomationRecommendationsPrompt,
AnalysisResult,
ReportResult,
WorkflowOptimizationResult,
SchemaDesignResult,
PredictionResult
};
```
--------------------------------------------------------------------------------
/src/typescript/app/types.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from 'zod';
/**
* Shared Zod schemas and TypeScript types for Airtable Brain tools.
* Keep these aligned with the JSON Schemas under docs/prd/schemas.
*/
const describeInputBase = z
.object({
scope: z.enum(['base', 'table']),
baseId: z.string().min(1, 'baseId is required'),
table: z
.string()
.min(1, 'table is required when scope=table')
.optional(),
includeFields: z.boolean().optional().default(true),
includeViews: z.boolean().optional().default(false)
})
.strict();
export const describeInputSchema = describeInputBase.superRefine((data, ctx) => {
if (data.scope === 'table' && !data.table) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
path: ['table'],
message: 'table is required when scope is "table"'
});
}
});
export const describeInputShape = describeInputBase.shape;
const describeFieldSchema = z
.object({
id: z.string(),
name: z.string(),
type: z.string(),
options: z.record(z.unknown()).optional()
})
.passthrough();
const describeTableSchema = z
.object({
id: z.string(),
name: z.string(),
description: z.string().optional(),
primaryFieldId: z.string().optional(),
fields: z.array(describeFieldSchema).optional(),
views: z.array(z.record(z.unknown())).optional()
})
.passthrough();
export const describeOutputSchema = z
.object({
base: z
.object({
id: z.string(),
name: z.string()
})
.passthrough(),
tables: z.array(describeTableSchema).optional(),
views: z.array(z.record(z.unknown())).optional()
})
.strict();
const sortDirectionSchema = z.enum(['asc', 'desc']);
const queryInputBase = z
.object({
baseId: z.string().min(1, 'baseId is required'),
table: z.string().min(1, 'table is required'),
fields: z.array(z.string().min(1)).optional(),
filterByFormula: z.string().optional(),
view: z.string().optional(),
sorts: z
.array(
z
.object({
field: z.string().min(1),
direction: sortDirectionSchema.optional().default('asc')
})
.strict()
)
.optional(),
pageSize: z
.number()
.int()
.min(1)
.max(100)
.optional(),
maxRecords: z
.number()
.int()
.min(1)
.optional(),
offset: z.string().optional(),
returnFieldsByFieldId: z.boolean().optional().default(false)
})
.strict();
export const queryInputSchema = queryInputBase;
export const queryInputShape = queryInputBase.shape;
const recordSchema = z
.object({
id: z.string(),
createdTime: z.string().optional(),
fields: z.record(z.unknown())
})
.strict();
export const queryOutputSchema = z
.object({
records: z.array(recordSchema),
offset: z.string().optional(),
summary: z
.object({
returned: z.number().int().nonnegative(),
hasMore: z.boolean()
})
.strict()
.optional()
})
.strict();
export const createInputSchema = z
.object({
baseId: z.string().min(1),
table: z.string().min(1),
records: z
.array(
z
.object({
fields: z.record(z.unknown())
})
.strict()
)
.min(1),
typecast: z.boolean().optional().default(false),
idempotencyKey: z.string().min(1).optional(),
dryRun: z.boolean().optional().default(false)
})
.strict();
const createDiffSchema = z
.object({
added: z.number().int().nonnegative(),
updated: z.number().int().nonnegative(),
unchanged: z.number().int().nonnegative()
})
.strict();
export const createOutputSchema = z
.object({
diff: createDiffSchema,
records: z.array(recordSchema).optional(),
dryRun: z.boolean(),
warnings: z.array(z.string()).optional()
})
.strict();
const conflictSchema = z
.object({
id: z.string(),
field: z.string(),
before: z.unknown().optional(),
after: z.unknown().optional(),
current: z.unknown()
})
.strict();
const mutationDiffSchema = z
.object({
added: z.number().int().nonnegative(),
updated: z.number().int().nonnegative(),
unchanged: z.number().int().nonnegative(),
conflicts: z.number().int().nonnegative()
})
.strict();
export const updateOutputSchema = z
.object({
diff: mutationDiffSchema,
records: z.array(recordSchema).optional(),
dryRun: z.boolean(),
conflicts: z.array(conflictSchema).optional()
})
.strict();
export const updateInputSchema = z
.object({
baseId: z.string().min(1),
table: z.string().min(1),
records: z
.array(
z
.object({
id: z.string(),
fields: z.record(z.unknown())
})
.strict()
)
.min(1),
typecast: z.boolean().optional().default(false),
idempotencyKey: z.string().min(1).optional(),
dryRun: z.boolean().optional().default(false),
conflictStrategy: z
.enum(['fail_on_conflict', 'server_merge', 'client_merge'])
.optional()
.default('fail_on_conflict'),
ifUnchangedHash: z.string().optional()
})
.strict();
export const upsertInputSchema = z
.object({
baseId: z.string().min(1),
table: z.string().min(1),
records: z
.array(
z
.object({
fields: z.record(z.unknown())
})
.strict()
)
.min(1),
performUpsert: z
.object({
fieldsToMergeOn: z.array(z.string().min(1)).min(1)
})
.strict(),
typecast: z.boolean().optional().default(false),
idempotencyKey: z.string().min(1).optional(),
dryRun: z.boolean().optional().default(false),
conflictStrategy: z
.enum(['fail_on_conflict', 'server_merge', 'client_merge'])
.optional()
.default('fail_on_conflict')
})
.strict();
export const upsertOutputSchema = z
.object({
diff: mutationDiffSchema,
records: z.array(recordSchema).optional(),
dryRun: z.boolean(),
conflicts: z.array(conflictSchema).optional()
})
.strict();
export const listExceptionsInputSchema = z
.object({
since: z.string().optional(),
severity: z.enum(['info', 'warning', 'error']).optional(),
limit: z.number().int().min(1).max(500).optional().default(100),
cursor: z.string().optional()
})
.strict();
export const exceptionItemSchema = z
.object({
id: z.string(),
timestamp: z.string(),
severity: z.enum(['info', 'warning', 'error']),
category: z.enum(['rate_limit', 'validation', 'auth', 'conflict', 'schema_drift', 'other']),
summary: z.string(),
details: z.string().optional(),
proposedFix: z.record(z.unknown()).optional()
})
.strict();
export const listExceptionsOutputSchema = z
.object({
items: z.array(exceptionItemSchema),
cursor: z.string().optional()
})
.strict();
const allowedOperations = ['describe', 'query', 'create', 'update', 'upsert'] as const;
export const governanceOutputSchema = z
.object({
allowedBases: z.array(z.string()),
allowedTables: z
.array(
z
.object({
baseId: z.string(),
table: z.string()
})
.strict()
)
.optional()
.default([]),
allowedOperations: z
.array(z.enum(allowedOperations))
.default([...allowedOperations]),
piiFields: z
.array(
z
.object({
baseId: z.string(),
table: z.string(),
field: z.string(),
policy: z.enum(['mask', 'hash', 'drop'])
})
.strict()
)
.optional()
.default([]),
redactionPolicy: z.enum(['mask_all_pii', 'mask_on_inline', 'none']).default('mask_on_inline'),
loggingPolicy: z.enum(['errors_only', 'minimal', 'verbose']).default('minimal'),
retentionDays: z.number().int().min(0).default(7)
})
.strict();
export type DescribeInput = z.infer<typeof describeInputSchema>;
export type DescribeOutput = z.infer<typeof describeOutputSchema>;
export type QueryInput = z.infer<typeof queryInputSchema>;
export type QueryOutput = z.infer<typeof queryOutputSchema>;
export type CreateInput = z.infer<typeof createInputSchema>;
export type CreateOutput = z.infer<typeof createOutputSchema>;
export type UpdateInput = z.infer<typeof updateInputSchema>;
export type UpdateOutput = z.infer<typeof updateOutputSchema>;
export type UpsertInput = z.infer<typeof upsertInputSchema>;
export type UpsertOutput = z.infer<typeof upsertOutputSchema>;
export type ListExceptionsInput = z.infer<typeof listExceptionsInputSchema>;
export type ExceptionItem = z.infer<typeof exceptionItemSchema>;
export type ListExceptionsOutput = z.infer<typeof listExceptionsOutputSchema>;
export type GovernanceSnapshot = z.infer<typeof governanceOutputSchema>;
```
--------------------------------------------------------------------------------
/src/typescript/index.d.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Airtable MCP Server TypeScript Definitions
* Enterprise-grade type safety for AI-powered Airtable operations
*/
// ============================================================================
// MCP Protocol Types (2024-11-05 Specification)
// ============================================================================
export interface MCPRequest {
jsonrpc: '2.0';
id: string | number;
method: string;
params?: Record<string, unknown>;
}
export interface MCPResponse {
jsonrpc: '2.0';
id: string | number;
result?: unknown;
error?: MCPError;
}
export interface MCPError {
code: number;
message: string;
data?: unknown;
}
export interface MCPServerCapabilities {
tools?: {
listChanged?: boolean;
};
prompts?: {
listChanged?: boolean;
};
resources?: {
subscribe?: boolean;
listChanged?: boolean;
};
roots?: {
listChanged?: boolean;
};
sampling?: Record<string, unknown>;
logging?: Record<string, unknown>;
}
export interface MCPServerInfo {
name: string;
version: string;
protocolVersion: string;
capabilities: MCPServerCapabilities;
}
// ============================================================================
// Tool Schema Types
// ============================================================================
export interface ToolParameter {
type: 'string' | 'number' | 'boolean' | 'object' | 'array';
description: string;
required?: boolean;
default?: unknown;
enum?: string[];
}
export interface ToolSchema {
name: string;
description: string;
inputSchema: {
type: 'object';
properties: Record<string, ToolParameter>;
required?: string[];
};
}
// ============================================================================
// AI Prompt Types
// ============================================================================
export interface PromptArgument {
name: string;
description: string;
required: boolean;
type?: 'string' | 'number' | 'boolean';
enum?: string[];
}
export interface PromptSchema {
name: string;
description: string;
arguments: PromptArgument[];
}
export type AnalysisType =
| 'trends'
| 'statistical'
| 'patterns'
| 'predictive'
| 'anomaly_detection'
| 'correlation_matrix';
export type ConfidenceLevel = 0.90 | 0.95 | 0.99;
export interface AnalysisOptions {
table: string;
analysis_type?: AnalysisType;
field_focus?: string;
time_dimension?: string;
confidence_level?: ConfidenceLevel;
}
export interface PredictiveAnalyticsOptions {
table: string;
target_field: string;
prediction_periods?: number;
algorithm?: 'linear_regression' | 'arima' | 'exponential_smoothing' | 'random_forest';
include_confidence_intervals?: boolean;
historical_periods?: number;
}
export interface StatisticalResult {
confidence_interval: [number, number];
significance_level: number;
p_value?: number;
correlation_coefficient?: number;
}
// ============================================================================
// Airtable API Types
// ============================================================================
export interface AirtableFieldType {
type: 'singleLineText' | 'multilineText' | 'richText' | 'email' | 'url' | 'phoneNumber' |
'number' | 'percent' | 'currency' | 'singleSelect' | 'multipleSelects' |
'date' | 'dateTime' | 'checkbox' | 'rating' | 'formula' | 'rollup' |
'count' | 'lookup' | 'createdTime' | 'lastModifiedTime' | 'createdBy' |
'lastModifiedBy' | 'attachment' | 'barcode' | 'button';
}
export interface AirtableField {
id: string;
name: string;
type: AirtableFieldType['type'];
options?: Record<string, unknown>;
description?: string;
}
export interface AirtableTable {
id: string;
name: string;
description?: string;
primaryFieldId: string;
fields: AirtableField[];
views: AirtableView[];
}
export interface AirtableView {
id: string;
name: string;
type: 'grid' | 'form' | 'calendar' | 'gallery' | 'kanban';
}
export interface AirtableRecord {
id: string;
fields: Record<string, unknown>;
createdTime: string;
}
export interface AirtableBase {
id: string;
name: string;
permissionLevel: 'read' | 'comment' | 'edit' | 'create';
tables: AirtableTable[];
}
export interface AirtableWebhook {
id: string;
macSecretBase64: string;
expirationTime: string;
notificationUrl: string;
isHookEnabled: boolean;
cursorForNextPayload: number;
lastSuccessfulNotificationTime?: string;
}
export interface WebhookPayload {
timestamp: string;
base: {
id: string;
};
webhook: {
id: string;
};
changedTablesById: Record<string, {
changedRecordsById: Record<string, {
current?: AirtableRecord;
previous?: AirtableRecord;
}>;
}>;
}
// ============================================================================
// Server Configuration Types
// ============================================================================
export interface ServerConfig {
PORT: number;
HOST: string;
MAX_REQUESTS_PER_MINUTE: number;
LOG_LEVEL: 'ERROR' | 'WARN' | 'INFO' | 'DEBUG' | 'TRACE';
}
export interface AuthConfig {
AIRTABLE_TOKEN: string;
AIRTABLE_BASE_ID: string;
}
export interface OAuth2Config {
client_id: string;
redirect_uri: string;
state: string;
code_challenge?: string;
code_challenge_method?: 'S256';
}
// ============================================================================
// Batch Operation Types
// ============================================================================
export interface BatchCreateRecord {
fields: Record<string, unknown>;
}
export interface BatchUpdateRecord {
id: string;
fields: Record<string, unknown>;
}
export interface BatchDeleteRecord {
id: string;
}
export interface BatchUpsertRecord {
key_field: string;
key_value: string;
fields: Record<string, unknown>;
}
// ============================================================================
// Advanced Analytics Types
// ============================================================================
export interface DataQualityReport {
total_records: number;
missing_values: Record<string, number>;
duplicate_records: string[];
data_types: Record<string, string>;
quality_score: number;
recommendations: string[];
}
export interface WorkflowOptimization {
current_efficiency: number;
bottlenecks: string[];
automation_opportunities: Array<{
field: string;
suggestion: string;
impact_level: 'high' | 'medium' | 'low';
implementation_complexity: 'simple' | 'moderate' | 'complex';
}>;
estimated_time_savings: string;
}
export interface SchemaOptimization {
field_recommendations: Array<{
field: string;
current_type: string;
suggested_type: string;
reason: string;
}>;
index_suggestions: string[];
normalization_opportunities: string[];
compliance_notes: string[];
}
// ============================================================================
// Root Directory Types
// ============================================================================
export interface RootDirectory {
uri: string;
name: string;
description?: string;
}
// ============================================================================
// Error Types (defined in errors.ts)
// ============================================================================
export interface AirtableError extends Error {
code: string;
statusCode?: number;
}
export interface ValidationError extends Error {
field: string;
}
// ============================================================================
// Utility Types
// ============================================================================
export type DeepPartial<T> = {
[P in keyof T]?: T[P] extends object ? DeepPartial<T[P]> : T[P];
};
export type RequiredFields<T, K extends keyof T> = T & Required<Pick<T, K>>;
export type OptionalFields<T, K extends keyof T> = T & Partial<Pick<T, K>>;
// ============================================================================
// Main Server Class Type
// ============================================================================
export interface AirtableMCPServer {
config: ServerConfig;
authConfig: AuthConfig;
tools: ToolSchema[];
prompts: PromptSchema[];
initialize(capabilities: MCPServerCapabilities): Promise<MCPServerInfo>;
handleToolCall(name: string, params: Record<string, unknown>): Promise<unknown>;
handlePromptGet(name: string, args: Record<string, unknown>): Promise<{ messages: Array<{ role: string; content: { type: string; text: string } }> }>;
start(): Promise<void>;
stop(): Promise<void>;
}
// ============================================================================
// Export All Types
// ============================================================================
export * from './tools';
export * from './ai-prompts';
```
--------------------------------------------------------------------------------
/src/typescript/tools.d.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Tool Schema Type Definitions
* Comprehensive TypeScript types for all 33 Airtable MCP tools
*/
import { ToolSchema } from './index';
// ============================================================================
// Data Operation Tool Interfaces
// ============================================================================
export interface ListTablesInput {
include_schema?: boolean;
}
export interface ListRecordsInput {
[key: string]: unknown;
table: string;
maxRecords?: number;
view?: string;
filterByFormula?: string;
sort?: Array<{ field: string; direction: 'asc' | 'desc' }>;
pageSize?: number;
offset?: string;
}
export interface GetRecordInput {
table: string;
recordId: string;
}
export interface CreateRecordInput {
table: string;
fields: Record<string, unknown>;
typecast?: boolean;
}
export interface UpdateRecordInput {
table: string;
recordId: string;
fields: Record<string, unknown>;
typecast?: boolean;
}
export interface DeleteRecordInput {
table: string;
recordId: string;
}
export interface SearchRecordsInput {
table: string;
filterByFormula?: string;
sort?: Array<{ field: string; direction: 'asc' | 'desc' }>;
maxRecords?: number;
view?: string;
}
// ============================================================================
// Webhook Management Tool Interfaces
// ============================================================================
export interface ListWebhooksInput {
cursor?: string;
}
export interface CreateWebhookInput {
notificationUrl: string;
specification?: {
options?: {
filters?: {
dataTypes?: ('tableData' | 'tableSchema')[];
recordChangeScope?: string;
watchDataInTableIds?: string[];
};
};
};
}
export interface DeleteWebhookInput {
webhookId: string;
}
export interface GetWebhookPayloadsInput {
webhookId: string;
cursor?: string;
limit?: number;
}
export interface RefreshWebhookInput {
webhookId: string;
}
// ============================================================================
// Schema Discovery Tool Interfaces
// ============================================================================
export interface ListBasesInput {
offset?: string;
}
export interface GetBaseSchemaInput {
baseId?: string;
}
export interface DescribeTableInput {
table: string;
include_sample_data?: boolean;
}
export interface ListFieldTypesInput {
category?: 'basic' | 'advanced' | 'computed';
}
export interface GetTableViewsInput {
table: string;
}
// ============================================================================
// Table Management Tool Interfaces
// ============================================================================
export interface CreateTableInput {
name: string;
description?: string;
fields: Array<{
name: string;
type: string;
description?: string;
options?: Record<string, unknown>;
}>;
}
export interface UpdateTableInput {
table: string;
name?: string;
description?: string;
}
export interface DeleteTableInput {
table: string;
confirmation?: string;
}
// ============================================================================
// Field Management Tool Interfaces
// ============================================================================
export interface CreateFieldInput {
table: string;
name: string;
type: string;
description?: string;
options?: Record<string, unknown>;
}
export interface UpdateFieldInput {
table: string;
fieldId: string;
name?: string;
description?: string;
options?: Record<string, unknown>;
}
export interface DeleteFieldInput {
table: string;
fieldId: string;
confirmation?: string;
}
// ============================================================================
// Batch Operations Tool Interfaces
// ============================================================================
export interface BatchCreateRecordsInput {
table: string;
records: Array<{
fields: Record<string, unknown>;
}>;
typecast?: boolean;
}
export interface BatchUpdateRecordsInput {
table: string;
records: Array<{
id: string;
fields: Record<string, unknown>;
}>;
typecast?: boolean;
}
export interface BatchDeleteRecordsInput {
table: string;
records: Array<{
id: string;
}>;
}
export interface BatchUpsertRecordsInput {
table: string;
records: Array<{
key_field: string;
key_value: string;
fields: Record<string, unknown>;
}>;
typecast?: boolean;
}
// ============================================================================
// Attachment Management Tool Interfaces
// ============================================================================
export interface UploadAttachmentInput {
table: string;
recordId: string;
fieldName: string;
url: string;
filename?: string;
}
// ============================================================================
// Advanced Views Tool Interfaces
// ============================================================================
export interface CreateViewInput {
table: string;
name: string;
type: 'grid' | 'form' | 'calendar' | 'gallery' | 'kanban';
visibleFieldIds?: string[];
filterByFormula?: string;
sort?: Array<{ field: string; direction: 'asc' | 'desc' }>;
}
export interface GetViewMetadataInput {
table: string;
viewId: string;
}
// ============================================================================
// Base Management Tool Interfaces
// ============================================================================
export interface CreateBaseInput {
name: string;
workspaceId?: string;
tables?: Array<{
name: string;
description?: string;
fields: Array<{
name: string;
type: string;
options?: Record<string, unknown>;
}>;
}>;
}
export interface ListCollaboratorsInput {
baseId?: string;
}
export interface ListSharesInput {
baseId?: string;
}
// ============================================================================
// Tool Response Interfaces
// ============================================================================
export interface ToolResponse<T = unknown> {
content: Array<{
type: 'text' | 'image' | 'resource';
text?: string;
data?: T;
mimeType?: string;
}>;
isError?: boolean;
}
export interface PaginatedResponse<T> {
records?: T[];
offset?: string;
}
export interface TableInfo {
id: string;
name: string;
description?: string;
primaryFieldId: string;
fields: Array<{
id: string;
name: string;
type: string;
options?: Record<string, unknown>;
description?: string;
}>;
views: Array<{
id: string;
name: string;
type: string;
}>;
}
export interface RecordInfo {
id: string;
fields: Record<string, unknown>;
createdTime: string;
commentCount?: number;
}
export interface WebhookInfo {
id: string;
macSecretBase64: string;
expirationTime: string;
notificationUrl: string;
isHookEnabled: boolean;
specification: {
options: {
filters: {
dataTypes: string[];
recordChangeScope?: string;
watchDataInTableIds?: string[];
};
};
};
}
export interface BaseInfo {
id: string;
name: string;
permissionLevel: 'read' | 'comment' | 'edit' | 'create';
}
export interface FieldTypeInfo {
type: string;
name: string;
description: string;
supportedOptions?: string[];
examples?: Record<string, unknown>[];
}
export interface ViewInfo {
id: string;
name: string;
type: 'grid' | 'form' | 'calendar' | 'gallery' | 'kanban' | 'timeline' | 'block';
visibleFieldIds?: string[];
filterByFormula?: string;
sort?: Array<{
field: string;
direction: 'asc' | 'desc';
}>;
}
export interface CollaboratorInfo {
type: 'user' | 'group';
id: string;
email?: string;
name?: string;
permissionLevel: 'read' | 'comment' | 'edit' | 'create';
createdTime: string;
}
export interface ShareInfo {
id: string;
type: 'view' | 'base';
url: string;
isPasswordRequired: boolean;
allowedActions: string[];
restriction?: {
dateRange?: {
startDate?: string;
endDate?: string;
};
allowCommenting?: boolean;
allowCopyingData?: boolean;
};
}
// ============================================================================
// Complete Tool Schema Definitions
// ============================================================================
// Tool schemas are defined in tools-schemas.ts for runtime use
// ============================================================================
// Export All Tool Types
// ============================================================================
export {
ListTablesInput,
ListRecordsInput,
GetRecordInput,
CreateRecordInput,
UpdateRecordInput,
DeleteRecordInput,
SearchRecordsInput,
ListWebhooksInput,
CreateWebhookInput,
DeleteWebhookInput,
GetWebhookPayloadsInput,
RefreshWebhookInput,
BatchCreateRecordsInput,
BatchUpdateRecordsInput,
BatchDeleteRecordsInput,
BatchUpsertRecordsInput,
ToolResponse,
PaginatedResponse,
TableInfo,
RecordInfo,
WebhookInfo,
BaseInfo,
FieldTypeInfo,
ViewInfo,
CollaboratorInfo,
ShareInfo
};
```
--------------------------------------------------------------------------------
/docs/releases/RELEASE_NOTES_v1.6.0.md:
--------------------------------------------------------------------------------
```markdown
# 🚀 Airtable MCP Server v1.6.0 Release Notes
**Release Date**: August 15, 2025
**Major Update**: Batch Operations, Attachment Management & Advanced Features
## 🎯 Overview
Version 1.6.0 represents another **major expansion** of the Airtable MCP Server, adding powerful batch operations, attachment management, and advanced base management capabilities. This release increases the total tools from 23 to **33 tools**, providing the most comprehensive Airtable API coverage available for AI assistants.
## ✨ New Features (10 New Tools)
### ⚡ Batch Operations (4 New Tools)
1. **`batch_create_records`** - Create up to 10 records simultaneously
- Significantly improves performance for bulk data entry
- Maintains atomicity - all records created or none
- Proper error handling for validation failures
2. **`batch_update_records`** - Update up to 10 records at once
- Efficient bulk updates with field-level precision
- Maintains data integrity across operations
- Returns detailed success/failure information
3. **`batch_delete_records`** - Delete up to 10 records in one operation
- Fast bulk deletion with safety validation
- Atomic operation ensures consistency
- Detailed deletion confirmation
4. **`batch_upsert_records`** - Smart update-or-create operations
- Updates existing records or creates new ones based on key fields
- Intelligent matching using specified key fields
- Optimizes data synchronization workflows
### 📎 Attachment Management (1 New Tool)
5. **`upload_attachment`** - Attach files from URLs to records
- Supports any publicly accessible file URL
- Automatic file type detection and validation
- Optional custom filename specification
- Works with all Airtable-supported file types
### 👁️ Advanced View Management (2 New Tools)
6. **`create_view`** - Create custom views programmatically
- Support for all view types: grid, form, calendar, gallery, kanban, timeline, gantt
- Custom field visibility and ordering
- Configurable filters and sorts
- Automated view setup for workflows
7. **`get_view_metadata`** - Detailed view configuration retrieval
- Complete view settings and configurations
- Filter formulas and sort specifications
- Field visibility and ordering information
- Perfect for view replication and analysis
### 🏢 Base Management (3 New Tools)
8. **`create_base`** - Create new Airtable bases
- Programmatic base creation with initial table structures
- Support for workspace organization
- Batch table and field creation
- Perfect for template deployment
9. **`list_collaborators`** - View base collaboration details
- Complete collaborator list with permission levels
- User type identification (user, group, etc.)
- Permission auditing and management
- Security compliance support
10. **`list_shares`** - Manage shared view configurations
- Public share URLs and settings
- Share type and effectiveness status
- View and table relationship mapping
- Privacy and access control management
## 🔄 Enhanced Existing Features
### Performance Improvements
- **Batch Operations**: Up to 10x faster for bulk operations
- **Error Handling**: More detailed error messages and validation
- **API Efficiency**: Reduced API calls through intelligent batching
### Security Enhancements
- **Input Validation**: Enhanced parameter validation for all new tools
- **Permission Checking**: Better handling of permission-restricted operations
- **Safe Defaults**: Conservative defaults for destructive operations
### User Experience
- **Better Error Messages**: More descriptive error responses
- **Consistent Interface**: Uniform parameter naming across all tools
- **Enhanced Documentation**: Detailed examples and use cases
## 📊 Tool Count Progression
| Version | Total Tools | New Features |
|---------|-------------|--------------|
| **v1.6.0** | **33** | Batch ops, attachments, advanced views, base mgmt |
| v1.5.0 | 23 | Schema management |
| v1.4.0 | 12 | Webhooks |
| v1.2.4 | 5 | Basic CRUD |
## 🛠️ Technical Improvements
### API Coverage
- **Complete Airtable API**: Now covers virtually all public Airtable API endpoints
- **Batch Endpoints**: Full support for Airtable's batch operation limits
- **Metadata API**: Complete integration with Airtable's metadata capabilities
### Architecture
- **Modular Design**: Clean separation of concerns for each tool category
- **Error Resilience**: Improved error handling and recovery
- **Performance Optimized**: Efficient API usage patterns
### Compatibility
- **Backward Compatible**: All v1.5.0 tools unchanged
- **API Limits**: Respects Airtable's rate limits and batch size restrictions
- **Token Scopes**: Graceful handling of insufficient permissions
## 📚 New Capabilities
### For Users
- **Bulk Data Operations**: Efficiently manage large datasets
- **File Management**: Easy attachment handling through URLs
- **Advanced Workflows**: Create complex multi-step processes
- **Collaboration Insights**: Understand base sharing and permissions
- **Template Creation**: Programmatically create standardized bases
### For Developers
- **High-Performance Bulk Ops**: Optimize data synchronization
- **Complete Base Lifecycle**: Full cradle-to-grave base management
- **Advanced View Control**: Programmatic UI customization
- **Security Auditing**: Comprehensive permission monitoring
## 🚀 Getting Started with v1.6.0
### Installation
```bash
npm install -g @rashidazarang/[email protected]
```
### New Usage Examples
#### Batch Operations
```javascript
// Create multiple records efficiently
"Create 5 new project records with these details: [project data]"
// Update multiple records at once
"Update all records where status is 'pending' to 'in progress'"
// Delete multiple records
"Delete these 3 completed tasks: rec123, rec456, rec789"
```
#### Attachment Management
```javascript
// Attach files to records
"Attach this image https://example.com/image.jpg to the product photo field in record rec123"
// Batch create with attachments
"Create a new product record and attach the logo from this URL"
```
#### Advanced Views
```javascript
// Create custom views
"Create a calendar view for the Events table showing only future events"
// Analyze view configurations
"Show me the detailed configuration of the 'Active Projects' view"
```
#### Base Management
```javascript
// Create new bases
"Create a new base called 'Project Tracker' with tables for Projects, Tasks, and Team Members"
// Collaboration insights
"Who has access to this base and what are their permission levels?"
```
## 🔧 Breaking Changes
**None** - v1.6.0 maintains full backward compatibility with all previous versions.
## 🐛 Bug Fixes
- **Batch Size Validation**: Proper enforcement of 10-record limits
- **Error Message Clarity**: More descriptive API error responses
- **Permission Handling**: Better graceful degradation for insufficient permissions
- **URL Validation**: Enhanced validation for attachment URLs
## ⚡ Performance Improvements
- **Batch Operations**: Up to 10x performance improvement for bulk operations
- **API Efficiency**: Reduced API calls through intelligent batching
- **Memory Usage**: Optimized memory usage for large operations
- **Response Processing**: Faster JSON parsing and response handling
## 🌟 What's Next
Based on user feedback and Airtable API evolution:
- Enhanced search and filtering capabilities
- Advanced automation triggers
- Real-time collaboration features
- Performance analytics and monitoring
- Enterprise-grade security features
## 📈 Compatibility & Requirements
- **Node.js**: Requires Node.js 14+
- **Airtable API**: Compatible with latest Airtable API version
- **Rate Limits**: Respects Airtable's 5 requests/second limit
- **Token Scopes**: Requires appropriate scopes for advanced features
### Required Scopes for Full Functionality
- `data.records:read` - Read records
- `data.records:write` - Create, update, delete records
- `schema.bases:read` - View schemas and metadata
- `schema.bases:write` - Create/modify tables, fields, views, bases
- `webhook:manage` - Webhook operations (optional)
## 📊 Testing & Quality
- **100% Test Coverage**: All 33 tools tested with real API calls
- **Edge Case Handling**: Comprehensive error condition testing
- **Performance Testing**: Batch operation efficiency verification
- **Security Testing**: Permission and validation testing
## 🤝 Community Impact
v1.6.0 establishes this MCP server as the definitive Airtable integration for AI assistants, providing:
- **Most Comprehensive Coverage**: 33 tools covering entire Airtable API
- **Best Performance**: Intelligent batching and optimization
- **Enterprise Ready**: Advanced collaboration and security features
- **Developer Friendly**: Clean, consistent, well-documented interface
## 🔗 Resources
**GitHub**: https://github.com/rashidazarang/airtable-mcp
**NPM**: https://www.npmjs.com/package/@rashidazarang/airtable-mcp
**Issues**: https://github.com/rashidazarang/airtable-mcp/issues
**Documentation**: https://github.com/rashidazarang/airtable-mcp#readme
---
🎉 **Thank you for using Airtable MCP Server v1.6.0!** This release represents the culmination of comprehensive Airtable API integration, providing AI assistants with unprecedented access to Airtable's full feature set through natural language interactions.
```
--------------------------------------------------------------------------------
/tests/test_v1.5.0_final.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
# COMPREHENSIVE FINAL TEST SUITE - Airtable MCP Server v1.5.0
# Tests ALL 23 tools with no assumptions
set -e
SERVER_URL="http://localhost:8010/mcp"
PASSED=0
FAILED=0
TEST_RECORD_ID=""
TEST_WEBHOOK_ID=""
CREATED_FIELD_ID=""
echo "🧪 FINAL COMPREHENSIVE TEST SUITE - v1.5.0"
echo "==========================================="
echo "Testing ALL 23 tools with real API calls"
echo ""
# Function to make MCP calls
call_tool() {
local tool_name="$1"
local params="$2"
curl -s -X POST "$SERVER_URL" \
-H "Content-Type: application/json" \
-d "{\"jsonrpc\": \"2.0\", \"id\": 1, \"method\": \"tools/call\", \"params\": {\"name\": \"$tool_name\", \"arguments\": $params}}"
}
# Enhanced test function with better error reporting
test_tool() {
local tool_name="$1"
local params="$2"
local description="$3"
local expect_fail="$4"
echo -n "🔧 $tool_name: $description... "
if result=$(call_tool "$tool_name" "$params" 2>&1); then
if echo "$result" | jq -e '.result.content[0].text' > /dev/null 2>&1; then
response_text=$(echo "$result" | jq -r '.result.content[0].text')
if [[ "$expect_fail" == "true" ]]; then
if echo "$response_text" | grep -q "error\|Error\|not found\|requires"; then
echo "✅ PASS (Expected failure)"
((PASSED++))
else
echo "❌ FAIL (Should have failed)"
echo " Response: ${response_text:0:100}..."
((FAILED++))
fi
else
echo "✅ PASS"
((PASSED++))
# Store important IDs for later tests
if [[ "$tool_name" == "create_record" ]]; then
TEST_RECORD_ID=$(echo "$result" | jq -r '.result.content[0].text' | grep -o 'rec[a-zA-Z0-9]\{10,20\}' | head -1)
echo " 📝 Stored record ID: $TEST_RECORD_ID"
elif [[ "$tool_name" == "create_webhook" ]]; then
TEST_WEBHOOK_ID=$(echo "$result" | jq -r '.result.content[0].text' | grep -o 'ach[a-zA-Z0-9]\{10,20\}' | head -1)
echo " 🪝 Stored webhook ID: $TEST_WEBHOOK_ID"
elif [[ "$tool_name" == "create_field" ]]; then
CREATED_FIELD_ID=$(echo "$result" | jq -r '.result.content[0].text' | grep -o 'fld[a-zA-Z0-9]\{10,20\}' | head -1)
echo " 🏗️ Stored field ID: $CREATED_FIELD_ID"
fi
fi
else
if echo "$result" | jq -e '.error' > /dev/null 2>&1; then
error_msg=$(echo "$result" | jq -r '.error.message')
if [[ "$expect_fail" == "true" ]]; then
echo "✅ PASS (Expected error: $error_msg)"
((PASSED++))
else
echo "❌ FAIL (API Error: $error_msg)"
((FAILED++))
fi
else
echo "❌ FAIL (Invalid response)"
echo " Response: $result"
((FAILED++))
fi
fi
else
echo "❌ FAIL (Request failed)"
echo " Error: $result"
((FAILED++))
fi
}
echo "📊 PHASE 1: Core Data Operations (7 tools)"
echo "==========================================="
test_tool "list_tables" "{}" "List all tables in base"
test_tool "list_records" "{\"table\": \"Test Table CRUD\", \"maxRecords\": 3}" "List records with limit"
test_tool "create_record" "{\"table\": \"Test Table CRUD\", \"fields\": {\"Name\": \"v1.5.0 Test Record\", \"Description\": \"Created during final testing\", \"Status\": \"Testing\"}}" "Create test record"
# Use the created record ID for get_record test
if [[ -n "$TEST_RECORD_ID" ]]; then
test_tool "get_record" "{\"table\": \"Test Table CRUD\", \"recordId\": \"$TEST_RECORD_ID\"}" "Get the created record"
test_tool "update_record" "{\"table\": \"Test Table CRUD\", \"recordId\": \"$TEST_RECORD_ID\", \"fields\": {\"Status\": \"Updated\"}}" "Update the created record"
else
echo "⚠️ Skipping get_record and update_record tests (no record ID)"
((FAILED += 2))
fi
test_tool "search_records" "{\"table\": \"Test Table CRUD\", \"searchTerm\": \"v1.5.0\"}" "Search for our test record"
echo ""
echo "🔗 PHASE 2: Webhook Management (5 tools)"
echo "========================================"
test_tool "list_webhooks" "{}" "List existing webhooks"
test_tool "create_webhook" "{\"notificationUrl\": \"https://webhook.site/test-v1.5.0\", \"specification\": {\"options\": {\"filters\": {\"dataTypes\": [\"tableData\"]}}}}" "Create test webhook"
if [[ -n "$TEST_WEBHOOK_ID" ]]; then
test_tool "get_webhook_payloads" "{\"webhookId\": \"$TEST_WEBHOOK_ID\"}" "Get webhook payloads"
test_tool "refresh_webhook" "{\"webhookId\": \"$TEST_WEBHOOK_ID\"}" "Refresh webhook"
test_tool "delete_webhook" "{\"webhookId\": \"$TEST_WEBHOOK_ID\"}" "Delete test webhook"
else
echo "⚠️ Skipping webhook payload/refresh/delete tests (no webhook ID)"
((FAILED += 3))
fi
echo ""
echo "🏗️ PHASE 3: NEW Schema Discovery (6 tools)"
echo "==========================================="
test_tool "list_bases" "{}" "Discover all accessible bases"
test_tool "get_base_schema" "{}" "Get complete base schema"
test_tool "describe_table" "{\"table\": \"Test Table CRUD\"}" "Describe table with field details"
test_tool "list_field_types" "{}" "List all available field types"
test_tool "get_table_views" "{\"table\": \"Test Table CRUD\"}" "Get table views"
# Test pagination for list_bases
test_tool "list_bases" "{\"offset\": \"invalid_offset\"}" "Test list_bases with invalid offset"
echo ""
echo "🔧 PHASE 4: NEW Field Management (4 tools)"
echo "=========================================="
test_tool "create_field" "{\"table\": \"Test Table CRUD\", \"name\": \"v1.5.0 Test Field\", \"type\": \"singleLineText\", \"description\": \"Field created during v1.5.0 testing\"}" "Create new field"
if [[ -n "$CREATED_FIELD_ID" ]]; then
test_tool "update_field" "{\"table\": \"Test Table CRUD\", \"fieldId\": \"$CREATED_FIELD_ID\", \"name\": \"v1.5.0 Updated Field\", \"description\": \"Updated during testing\"}" "Update the created field"
test_tool "delete_field" "{\"table\": \"Test Table CRUD\", \"fieldId\": \"$CREATED_FIELD_ID\", \"confirm\": true}" "Delete the test field"
else
echo "⚠️ Skipping field update/delete tests (no field ID)"
((FAILED += 2))
fi
# Test safety checks
test_tool "delete_field" "{\"table\": \"Test Table CRUD\", \"fieldId\": \"fldDummyID\", \"confirm\": false}" "Test field deletion without confirmation" "true"
echo ""
echo "🏢 PHASE 5: NEW Table Management (3 tools)"
echo "========================================="
test_tool "create_table" "{\"name\": \"v1.5.0 Test Table\", \"description\": \"Table created during v1.5.0 testing\", \"fields\": [{\"name\": \"Name\", \"type\": \"singleLineText\"}, {\"name\": \"Notes\", \"type\": \"multilineText\"}]}" "Create new table"
test_tool "update_table" "{\"table\": \"v1.5.0 Test Table\", \"name\": \"v1.5.0 Updated Table\", \"description\": \"Updated description\"}" "Update table metadata"
# Test safety checks
test_tool "delete_table" "{\"table\": \"v1.5.0 Updated Table\", \"confirm\": false}" "Test table deletion without confirmation" "true"
test_tool "delete_table" "{\"table\": \"v1.5.0 Updated Table\", \"confirm\": true}" "Delete the test table"
echo ""
echo "⚠️ PHASE 6: Error Handling & Edge Cases"
echo "======================================="
test_tool "get_record" "{\"table\": \"NonExistentTable\", \"recordId\": \"recFakeID123\"}" "Test with non-existent table" "true"
test_tool "describe_table" "{\"table\": \"NonExistentTable\"}" "Test describe non-existent table" "true"
test_tool "create_field" "{\"table\": \"NonExistentTable\", \"name\": \"Test\", \"type\": \"singleLineText\"}" "Test create field in non-existent table" "true"
test_tool "update_table" "{\"table\": \"NonExistentTable\", \"name\": \"New Name\"}" "Test update non-existent table" "true"
echo ""
echo "🔒 PHASE 7: Security Verification"
echo "================================"
# Check that logs don't contain sensitive data
echo -n "🔒 Security check: Log file doesn't contain tokens... "
if grep -q "pat" /tmp/v1.5.0_test.log; then
echo "❌ FAIL (Token found in logs)"
((FAILED++))
else
echo "✅ PASS"
((PASSED++))
fi
# Clean up test record if it exists
if [[ -n "$TEST_RECORD_ID" ]]; then
echo -n "🧹 Cleanup: Deleting test record... "
cleanup_result=$(test_tool "delete_record" "{\"table\": \"Test Table CRUD\", \"recordId\": \"$TEST_RECORD_ID\"}" "Delete test record" 2>&1)
if echo "$cleanup_result" | grep -q "✅ PASS"; then
echo "✅ CLEANED"
else
echo "⚠️ CLEANUP FAILED"
fi
fi
echo ""
echo "📈 FINAL TEST RESULTS"
echo "===================="
echo "✅ Passed: $PASSED"
echo "❌ Failed: $FAILED"
echo "📊 Total Tests: $((PASSED + FAILED))"
echo "📊 Success Rate: $(echo "scale=1; $PASSED * 100 / ($PASSED + $FAILED)" | bc -l)%"
if [ $FAILED -eq 0 ]; then
echo ""
echo "🎉 🎉 🎉 ALL TESTS PASSED! 🎉 🎉 🎉"
echo ""
echo "✅ v1.5.0 is READY FOR PRODUCTION!"
echo ""
echo "🚀 ACHIEVEMENTS:"
echo "• 23 tools working perfectly"
echo "• Complete schema management"
echo "• Robust error handling"
echo "• Security verified"
echo "• All edge cases handled"
echo ""
echo "📦 Ready for GitHub and NPM release!"
exit 0
else
echo ""
echo "❌ SOME TESTS FAILED"
echo "Please review failures above before release."
exit 1
fi
```
--------------------------------------------------------------------------------
/src/typescript/app/airtable-client.ts:
--------------------------------------------------------------------------------
```typescript
import https from 'node:https';
import { IncomingHttpHeaders } from 'node:http';
import { URL } from 'node:url';
import { setTimeout as delay } from 'node:timers/promises';
import { RateLimiter } from './rateLimiter';
import { Logger } from './logger';
import {
AirtableBrainError,
AuthError,
ConflictError,
InternalServerError,
NotFoundError,
RateLimitError,
AirtableValidationError,
ErrorContext
} from '../errors';
interface RequestOptions {
method?: 'GET' | 'POST' | 'PUT' | 'PATCH' | 'DELETE';
/**
* Path including leading slash and version segment, e.g. `/v0/meta/bases/app123`.
*/
path: string;
query?: Record<string, string | number | boolean | Array<string | number | boolean> | undefined>;
body?: unknown;
baseId?: string;
idempotencyKey?: string;
}
interface ClientOptions {
baseLimiter: RateLimiter;
patLimiter: RateLimiter;
logger: Logger;
userAgent: string;
patHash: string;
maxRetries?: number;
}
type AirtableResponse<T> = {
status: number;
body: T;
headers: IncomingHttpHeaders;
};
function toQueryString(query?: RequestOptions['query']): string {
if (!query) {
return '';
}
const params = new URLSearchParams();
for (const [key, value] of Object.entries(query)) {
if (value === undefined) continue;
if (Array.isArray(value)) {
value.forEach((item) => params.append(`${key}[]`, String(item)));
} else {
params.append(key, String(value));
}
}
const queryString = params.toString();
return queryString.length > 0 ? `?${queryString}` : '';
}
function parseRetryAfter(headers: IncomingHttpHeaders): number | undefined {
const retryAfter = headers['retry-after'];
if (!retryAfter) return undefined;
const parsedSeconds = Number(retryAfter);
if (!Number.isNaN(parsedSeconds)) {
return parsedSeconds * 1000;
}
const retryDate = new Date(retryAfter);
if (!Number.isNaN(retryDate.getTime())) {
return Math.max(retryDate.getTime() - Date.now(), 0);
}
return undefined;
}
export class AirtableClient {
private readonly baseLimiter: RateLimiter;
private readonly patLimiter: RateLimiter;
private readonly logger: Logger;
private readonly userAgent: string;
private readonly pat: string;
private readonly patHash: string;
private readonly maxRetries: number;
constructor(personalAccessToken: string, options: ClientOptions) {
this.pat = personalAccessToken;
this.baseLimiter = options.baseLimiter;
this.patLimiter = options.patLimiter;
this.logger = options.logger;
this.userAgent = options.userAgent;
this.patHash = options.patHash;
this.maxRetries = options.maxRetries ?? 3;
}
async listBases(): Promise<{ bases: unknown[] }> {
return this.request<{ bases: unknown[] }>({
method: 'GET',
path: '/v0/meta/bases'
});
}
async getBase(baseId: string): Promise<unknown> {
return this.request<unknown>({
method: 'GET',
path: `/v0/meta/bases/${encodeURIComponent(baseId)}`,
baseId
});
}
async listTables(baseId: string): Promise<{ tables: unknown[] }> {
return this.request<{ tables: unknown[] }>({
method: 'GET',
path: `/v0/meta/bases/${encodeURIComponent(baseId)}/tables`,
baseId
});
}
async queryRecords<T = unknown>(
baseId: string,
table: string,
query?: RequestOptions['query']
): Promise<T> {
const requestOptions: RequestOptions = {
method: 'GET',
path: `/v0/${encodeURIComponent(baseId)}/${encodeURIComponent(table)}`,
baseId
};
if (query && Object.keys(query).length > 0) {
requestOptions.query = query;
}
return this.request<T>(requestOptions);
}
async createRecords<T = unknown>(
baseId: string,
table: string,
payload: unknown,
idempotencyKey?: string
): Promise<T> {
const requestOptions: RequestOptions = {
method: 'POST',
path: `/v0/${encodeURIComponent(baseId)}/${encodeURIComponent(table)}`,
baseId,
body: payload
};
if (idempotencyKey) {
requestOptions.idempotencyKey = idempotencyKey;
}
return this.request<T>(requestOptions);
}
async updateRecords<T = unknown>(
baseId: string,
table: string,
payload: unknown,
idempotencyKey?: string
): Promise<T> {
const requestOptions: RequestOptions = {
method: 'PATCH',
path: `/v0/${encodeURIComponent(baseId)}/${encodeURIComponent(table)}`,
baseId,
body: payload
};
if (idempotencyKey) {
requestOptions.idempotencyKey = idempotencyKey;
}
return this.request<T>(requestOptions);
}
async upsertRecords<T = unknown>(
baseId: string,
table: string,
payload: unknown,
idempotencyKey?: string
): Promise<T> {
const requestOptions: RequestOptions = {
method: 'PATCH',
path: `/v0/${encodeURIComponent(baseId)}/${encodeURIComponent(table)}`,
baseId,
body: payload
};
if (idempotencyKey) {
requestOptions.idempotencyKey = idempotencyKey;
}
return this.request<T>(requestOptions);
}
private async request<T>(options: RequestOptions): Promise<T> {
const { baseId } = options;
if (baseId) {
await this.baseLimiter.schedule(baseId);
}
await this.patLimiter.schedule(this.patHash);
return this.withRetry(() => this.performRequest<T>(options));
}
private async withRetry<T>(fn: () => Promise<T>): Promise<T> {
let attempt = 0;
let lastError: unknown;
while (attempt < this.maxRetries) {
try {
return await fn();
} catch (error) {
lastError = error;
attempt += 1;
if (error instanceof RateLimitError) {
const delayMs = error.retryAfterMs ?? this.backoffWithJitter(attempt);
this.logger.warn('Rate limited, backing off', {
attempt,
delayMs
});
await delay(delayMs);
continue;
}
if (error instanceof InternalServerError && attempt < this.maxRetries) {
const delayMs = this.backoffWithJitter(attempt);
this.logger.warn('Upstream error, retrying', {
attempt,
delayMs
});
await delay(delayMs);
continue;
}
throw error;
}
}
if (lastError instanceof AirtableBrainError) {
throw lastError.withContext({ attempt: this.maxRetries, totalAttempts: this.maxRetries });
}
throw lastError;
}
private backoffWithJitter(attempt: number): number {
const baseDelay = Math.min(1000 * 2 ** (attempt - 1), 8000);
const jitter = Math.random() * 250;
return baseDelay + jitter;
}
private performRequest<T>(options: RequestOptions): Promise<T> {
const { method = 'GET', path, query, body, idempotencyKey } = options;
const logger = this.logger.child({
op: 'airtable_request',
method,
path,
baseId: options.baseId,
patHash: this.patHash
});
const queryString = toQueryString(query);
const url = new URL(`https://api.airtable.com${path}${queryString}`);
const payload = body === undefined ? undefined : JSON.stringify(body);
return new Promise<T>((resolve, reject) => {
const request = https.request(
{
method,
hostname: url.hostname,
path: url.pathname + url.search,
headers: {
Authorization: `Bearer ${this.pat}`,
'Content-Type': 'application/json',
'User-Agent': this.userAgent,
...(payload ? { 'Content-Length': Buffer.byteLength(payload) } : {}),
...(idempotencyKey ? { 'Idempotency-Key': idempotencyKey } : {})
}
},
(response) => {
const chunks: Buffer[] = [];
response.on('data', (chunk: Buffer) => {
chunks.push(chunk);
});
response.on('end', () => {
const rawBody = Buffer.concat(chunks).toString('utf8');
let parsedBody: unknown;
if (rawBody.length > 0) {
try {
parsedBody = JSON.parse(rawBody);
} catch (error) {
reject(
new InternalServerError('Failed to parse Airtable response', {
cause: error,
status: response.statusCode ?? 0
})
);
return;
}
}
const result: AirtableResponse<unknown> = {
status: response.statusCode ?? 0,
body: parsedBody,
headers: response.headers
};
try {
if (result.status >= 200 && result.status < 300) {
resolve(parsedBody as T);
return;
}
reject(this.toDomainError(result, options));
} catch (error) {
reject(error);
}
});
}
);
request.on('error', (error) => {
logger.error('Network error calling Airtable', {
error: error instanceof Error ? error.message : String(error)
});
reject(
new InternalServerError('Network error communicating with Airtable', {
cause: error
})
);
});
request.setTimeout(30_000, () => {
request.destroy();
reject(
new InternalServerError('Airtable request timed out', {
status: 504
})
);
});
if (payload) {
request.write(payload);
}
request.end();
});
}
private toDomainError(response: AirtableResponse<unknown>, request: RequestOptions): AirtableBrainError {
const { status, body, headers } = response;
const baseContext: ErrorContext = {
endpoint: request.path
};
if (request.baseId) {
baseContext.baseId = request.baseId;
}
if (status === 401 || status === 403) {
return new AuthError('Authentication failed with Airtable', {
status,
context: baseContext
});
}
if (status === 404) {
return new NotFoundError('Requested resource was not found in Airtable', {
status,
context: baseContext
});
}
if (status === 409) {
return new ConflictError('Airtable reported a conflict', {
status,
context: baseContext
});
}
if (status === 400 || status === 422) {
const validationContext: ErrorContext = { ...baseContext };
const upstreamErrorType = this.safeExtractErrorType(body);
if (upstreamErrorType) {
validationContext.upstreamErrorType = upstreamErrorType;
}
return new AirtableValidationError('Airtable validation error', {
status,
context: validationContext
});
}
if (status === 429) {
const retryAfterMs = parseRetryAfter(headers);
return new RateLimitError('Airtable rate limit exceeded', {
status,
...(retryAfterMs !== undefined ? { retryAfterMs } : {}),
context: baseContext
});
}
if (status >= 500) {
const internalContext: ErrorContext = { ...baseContext };
const upstreamErrorType = this.safeExtractErrorType(body);
if (upstreamErrorType) {
internalContext.upstreamErrorType = upstreamErrorType;
}
return new InternalServerError('Airtable returned an internal error', {
status,
context: internalContext
});
}
return new InternalServerError('Unexpected Airtable response', {
status,
context: baseContext
});
}
private safeExtractErrorType(body: unknown): string | undefined {
if (body && typeof body === 'object' && 'error' in body) {
const error = (body as Record<string, unknown>).error;
if (error && typeof error === 'object' && 'type' in error) {
const type = (error as Record<string, unknown>).type;
if (typeof type === 'string') {
return type;
}
}
}
return undefined;
}
}
```
--------------------------------------------------------------------------------
/src/python/inspector_server.py:
--------------------------------------------------------------------------------
```python
#!/usr/bin/env python3
"""
Airtable MCP Inspector Server
-----------------------------
A simple MCP server that implements the Airtable tools
"""
import os
import sys
import json
import logging
import requests
import argparse
import traceback
from requests import exceptions as requests_exceptions
from typing import Optional, Dict, Any, List
try:
from mcp.server.fastmcp import FastMCP
except ImportError:
print("Error: MCP SDK not found. Please install with 'pip install mcp'")
sys.exit(1)
# Parse command line arguments
def parse_args():
parser = argparse.ArgumentParser(description="Airtable MCP Server")
parser.add_argument("--token", dest="api_token", help="Airtable Personal Access Token")
parser.add_argument("--base", dest="base_id", help="Airtable Base ID")
parser.add_argument("--config", dest="config_json", help="Configuration as JSON (for Smithery integration)")
return parser.parse_args()
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("airtable-mcp")
# Network safety defaults
REQUEST_TIMEOUT_SECONDS = float(os.environ.get("AIRTABLE_REQUEST_TIMEOUT", "30"))
# Parse arguments
args = parse_args()
# Handle config JSON from Smithery if provided
config = {}
if args.config_json:
try:
# Strip any trailing quotes or backslashes that might be present
config_str = args.config_json.rstrip('\\"')
# Additional sanitization for JSON format
config_str = config_str.strip()
# Handle escaped quotes
if config_str.startswith('"') and config_str.endswith('"'):
config_str = config_str[1:-1]
# Fix escaped quotes within JSON
config_str = config_str.replace('\\"', '"')
# Replace escaped backslashes
config_str = config_str.replace('\\\\', '\\')
logger.info(f"Parsing sanitized config: {config_str}")
config = json.loads(config_str)
logger.info(f"Successfully parsed config: {config}")
except json.JSONDecodeError as e:
logger.error(f"Failed to parse config JSON: {e}")
logger.error(f"Raw config string: {args.config_json}")
# Try one more approach - sometimes config is double-quoted JSON
try:
# Try to interpret as Python string literal
import ast
literal_str = ast.literal_eval(f"'''{args.config_json}'''")
config = json.loads(literal_str)
logger.info(f"Successfully parsed config using ast: {config}")
except Exception as ast_error:
logger.error(f"Failed alternate parsing method: {ast_error}")
# Create MCP server
app = FastMCP("Airtable Tools")
# Add error handling wrapper for all MCP methods
def handle_exceptions(func):
"""Decorator to properly handle and format exceptions in MCP functions"""
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except Exception as e:
error_trace = traceback.format_exc()
logger.error(f"Error in MCP handler: {str(e)}\n{error_trace}")
sys.stderr.write(f"Error in MCP handler: {str(e)}\n{error_trace}\n")
# For tool functions that return strings, return a formatted error message
if hasattr(func, "__annotations__") and func.__annotations__.get("return") == str:
return f"Error: {str(e)}"
# For RPC methods that return dicts, return a properly formatted JSON error
return {"error": {"code": -32000, "message": str(e)}}
return wrapper
# Patch the tool method to automatically apply error handling
original_tool = app.tool
def patched_tool(*args, **kwargs):
def decorator(func):
wrapped_func = handle_exceptions(func)
return original_tool(*args, **kwargs)(wrapped_func)
return decorator
# Replace app.tool with our patched version
app.tool = patched_tool
# Get token from arguments, config, or environment
token = args.api_token or config.get("airtable_token", "") or os.environ.get("AIRTABLE_PERSONAL_ACCESS_TOKEN", "")
# Clean up token if it has trailing quote
if token and token.endswith('"'):
token = token[:-1]
base_id = args.base_id or config.get("base_id", "") or os.environ.get("AIRTABLE_BASE_ID", "")
if not token:
logger.warning("No Airtable API token provided. Use --token, --config, or set AIRTABLE_PERSONAL_ACCESS_TOKEN environment variable.")
else:
logger.info("Airtable authentication configured")
if base_id:
logger.info(f"Using base ID: {base_id}")
else:
logger.warning("No base ID provided. Use --base, --config, or set AIRTABLE_BASE_ID environment variable.")
# Helper functions for Airtable API calls
async def api_call(endpoint, method="GET", data=None, params=None):
"""Make an Airtable API call"""
if not token:
return {"error": "No Airtable API token provided. Use --token, --config, or set AIRTABLE_PERSONAL_ACCESS_TOKEN environment variable."}
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
url = f"https://api.airtable.com/v0/{endpoint}"
try:
if method == "GET":
response = requests.get(url, headers=headers, params=params, timeout=REQUEST_TIMEOUT_SECONDS)
elif method == "POST":
response = requests.post(url, headers=headers, json=data, timeout=REQUEST_TIMEOUT_SECONDS)
elif method == "PATCH":
response = requests.patch(url, headers=headers, json=data, timeout=REQUEST_TIMEOUT_SECONDS)
elif method == "DELETE":
response = requests.delete(url, headers=headers, params=params, timeout=REQUEST_TIMEOUT_SECONDS)
else:
raise ValueError(f"Unsupported method: {method}")
response.raise_for_status()
return response.json()
except requests_exceptions.Timeout as e:
logger.error(f"API call timed out after {REQUEST_TIMEOUT_SECONDS}s: {str(e)}")
return {"error": f"Request to Airtable timed out after {REQUEST_TIMEOUT_SECONDS}s"}
except Exception as e:
logger.error(f"API call error: {str(e)}")
return {"error": str(e)}
# Define MCP tool functions
@app.tool()
async def list_bases() -> str:
"""List all accessible Airtable bases"""
if not token:
return "Please provide an Airtable API token to list your bases."
result = await api_call("meta/bases")
if "error" in result:
return f"Error: {result['error']}"
bases = result.get("bases", [])
if not bases:
return "No bases found accessible with your token."
base_list = [f"{i+1}. {base['name']} (ID: {base['id']})" for i, base in enumerate(bases)]
return "Available bases:\n" + "\n".join(base_list)
@app.tool()
async def list_tables(base_id_param: Optional[str] = None) -> str:
"""List all tables in the specified base or the default base"""
global base_id
current_base = base_id_param or base_id
if not token:
return "Please provide an Airtable API token to list tables."
if not current_base:
return "Error: No base ID provided. Please specify a base_id or set AIRTABLE_BASE_ID environment variable."
result = await api_call(f"meta/bases/{current_base}/tables")
if "error" in result:
return f"Error: {result['error']}"
tables = result.get("tables", [])
if not tables:
return "No tables found in this base."
table_list = [f"{i+1}. {table['name']} (ID: {table['id']}, Fields: {len(table.get('fields', []))})"
for i, table in enumerate(tables)]
return "Tables in this base:\n" + "\n".join(table_list)
@app.tool()
async def list_records(table_name: str, max_records: Optional[int] = 100, filter_formula: Optional[str] = None) -> str:
"""List records from a table with optional filtering"""
if not token:
return "Please provide an Airtable API token to list records."
if not base_id:
return "Error: No base ID set. Please use --base or set AIRTABLE_BASE_ID environment variable."
params = {"maxRecords": max_records}
if filter_formula:
params["filterByFormula"] = filter_formula
result = await api_call(f"{base_id}/{table_name}", params=params)
if "error" in result:
return f"Error: {result['error']}"
records = result.get("records", [])
if not records:
return "No records found in this table."
# Format the records for display
formatted_records = []
for i, record in enumerate(records):
record_id = record.get("id", "unknown")
fields = record.get("fields", {})
field_text = ", ".join([f"{k}: {v}" for k, v in fields.items()])
formatted_records.append(f"{i+1}. ID: {record_id} - {field_text}")
return "Records:\n" + "\n".join(formatted_records)
@app.tool()
async def get_record(table_name: str, record_id: str) -> str:
"""Get a specific record from a table"""
if not token:
return "Please provide an Airtable API token to get records."
if not base_id:
return "Error: No base ID set. Please set AIRTABLE_BASE_ID environment variable."
result = await api_call(f"{base_id}/{table_name}/{record_id}")
if "error" in result:
return f"Error: {result['error']}"
fields = result.get("fields", {})
if not fields:
return f"Record {record_id} found but contains no fields."
# Format the fields for display
formatted_fields = []
for key, value in fields.items():
formatted_fields.append(f"{key}: {value}")
return f"Record ID: {record_id}\n" + "\n".join(formatted_fields)
@app.tool()
async def create_records(table_name: str, records_json: str) -> str:
"""Create records in a table from JSON string"""
if not token:
return "Please provide an Airtable API token to create records."
if not base_id:
return "Error: No base ID set. Please set AIRTABLE_BASE_ID environment variable."
try:
records_data = json.loads(records_json)
# Format the records for Airtable API
if not isinstance(records_data, list):
records_data = [records_data]
records = [{"fields": record} for record in records_data]
data = {"records": records}
result = await api_call(f"{base_id}/{table_name}", method="POST", data=data)
if "error" in result:
return f"Error: {result['error']}"
created_records = result.get("records", [])
return f"Successfully created {len(created_records)} records."
except json.JSONDecodeError:
return "Error: Invalid JSON format in records_json parameter."
except Exception as e:
return f"Error creating records: {str(e)}"
@app.tool()
async def update_records(table_name: str, records_json: str) -> str:
"""Update records in a table from JSON string"""
if not token:
return "Please provide an Airtable API token to update records."
if not base_id:
return "Error: No base ID set. Please set AIRTABLE_BASE_ID environment variable."
try:
records_data = json.loads(records_json)
# Format the records for Airtable API
if not isinstance(records_data, list):
records_data = [records_data]
records = []
for record in records_data:
if "id" not in record:
return "Error: Each record must have an 'id' field."
rec_id = record.pop("id")
fields = record.get("fields", record) # Support both {id, fields} format and direct fields
records.append({"id": rec_id, "fields": fields})
data = {"records": records}
result = await api_call(f"{base_id}/{table_name}", method="PATCH", data=data)
if "error" in result:
return f"Error: {result['error']}"
updated_records = result.get("records", [])
return f"Successfully updated {len(updated_records)} records."
except json.JSONDecodeError:
return "Error: Invalid JSON format in records_json parameter."
except Exception as e:
return f"Error updating records: {str(e)}"
@app.tool()
async def set_base_id(base_id_param: str) -> str:
"""Set the current Airtable base ID"""
global base_id
base_id = base_id_param
return f"Base ID set to: {base_id}"
# Note: rpc_method is not available in the current MCP version
# These methods would be used for Claude-specific functionality
# but are not needed for basic MCP operation
# Start the server
if __name__ == "__main__":
app.start()
```
--------------------------------------------------------------------------------
/bin/airtable-crud-cli.js:
--------------------------------------------------------------------------------
```javascript
#!/usr/bin/env node
/**
* Command-line interface for Airtable CRUD operations
*/
const fs = require('fs');
const path = require('path');
const dotenv = require('dotenv');
const baseUtils = require('../tools/airtable-base');
const crudUtils = require('../tools/airtable-crud');
const schemaUtils = require('../tools/airtable-schema');
// Load environment variables
dotenv.config();
// Get the base ID from environment variables
const baseId = process.env.AIRTABLE_BASE_ID;
if (!baseId) {
console.error('Error: AIRTABLE_BASE_ID not set in .env file');
process.exit(1);
}
// Parse command line arguments
const args = process.argv.slice(2);
const command = args[0];
// Display help if no command is provided
if (!command) {
showHelp();
process.exit(0);
}
// Process the command
processCommand(command, args.slice(1))
.then(() => {
console.log('Command completed successfully');
})
.catch(error => {
console.error(`Error: ${error.message}`);
process.exit(1);
});
/**
* Process the command
* @param {string} command - The command to process
* @param {Array} args - The command arguments
*/
async function processCommand(command, args) {
switch (command) {
case 'list-bases':
await listBases();
break;
case 'list-tables':
await listTables();
break;
case 'list-records':
await listRecords(args);
break;
case 'get-record':
await getRecord(args);
break;
case 'create-records':
await createRecords(args);
break;
case 'update-records':
await updateRecords(args);
break;
case 'delete-records':
await deleteRecords(args);
break;
case 'export-records':
await exportRecords(args);
break;
case 'import-records':
await importRecords(args);
break;
case 'help':
showHelp();
break;
default:
console.error(`Unknown command: ${command}`);
showHelp();
process.exit(1);
}
}
/**
* List all accessible bases
*/
async function listBases() {
console.log('Listing accessible bases...');
const bases = await baseUtils.listAllBases();
console.log(`Found ${bases.length} accessible bases:`);
bases.forEach(base => {
console.log(`- ${base.name} (${base.id})`);
});
}
/**
* List all tables in the base
*/
async function listTables() {
console.log(`Listing tables in base ${baseId}...`);
const tables = await baseUtils.listTables(baseId);
console.log(`Found ${tables.length} tables:`);
tables.forEach(table => {
console.log(`- ${table.name} (${table.id})`);
});
}
/**
* List records from a table
* @param {Array} args - Command arguments
*/
async function listRecords(args) {
if (args.length < 1) {
console.error('Error: Table name is required');
console.log('Usage: node airtable-crud-cli.js list-records <tableName> [maxRecords] [filterFormula]');
process.exit(1);
}
const tableName = args[0];
const maxRecords = args[1] ? parseInt(args[1]) : 100;
const filterFormula = args[2] || null;
console.log(`Listing records from table "${tableName}"...`);
console.log(`Max records: ${maxRecords}`);
if (filterFormula) {
console.log(`Filter: ${filterFormula}`);
}
const records = await crudUtils.readRecords(baseId, tableName, maxRecords, filterFormula);
console.log(`Found ${records.length} records:`);
records.forEach(record => {
console.log(`- ${record.id}: ${JSON.stringify(record)}`);
});
}
/**
* Get a specific record by ID
* @param {Array} args - Command arguments
*/
async function getRecord(args) {
if (args.length < 2) {
console.error('Error: Table name and record ID are required');
console.log('Usage: node airtable-crud-cli.js get-record <tableName> <recordId>');
process.exit(1);
}
const tableName = args[0];
const recordId = args[1];
console.log(`Getting record ${recordId} from table "${tableName}"...`);
const record = await crudUtils.getRecord(baseId, tableName, recordId);
console.log('Record:');
console.log(JSON.stringify(record, null, 2));
}
/**
* Create records in a table
* @param {Array} args - Command arguments
*/
async function createRecords(args) {
if (args.length < 2) {
console.error('Error: Table name and JSON file are required');
console.log('Usage: node airtable-crud-cli.js create-records <tableName> <jsonFile>');
process.exit(1);
}
const tableName = args[0];
const jsonFile = args[1];
// Read the JSON file
let records;
try {
const jsonData = fs.readFileSync(jsonFile, 'utf8');
records = JSON.parse(jsonData);
if (!Array.isArray(records)) {
console.error('Error: JSON file must contain an array of records');
process.exit(1);
}
} catch (error) {
console.error(`Error reading JSON file: ${error.message}`);
process.exit(1);
}
console.log(`Creating ${records.length} records in table "${tableName}"...`);
const createdRecords = await crudUtils.createRecords(baseId, tableName, records);
console.log(`Created ${createdRecords.length} records`);
console.log('First record:');
console.log(JSON.stringify(createdRecords[0], null, 2));
}
/**
* Update records in a table
* @param {Array} args - Command arguments
*/
async function updateRecords(args) {
if (args.length < 2) {
console.error('Error: Table name and JSON file are required');
console.log('Usage: node airtable-crud-cli.js update-records <tableName> <jsonFile>');
process.exit(1);
}
const tableName = args[0];
const jsonFile = args[1];
// Read the JSON file
let records;
try {
const jsonData = fs.readFileSync(jsonFile, 'utf8');
records = JSON.parse(jsonData);
if (!Array.isArray(records)) {
console.error('Error: JSON file must contain an array of records');
process.exit(1);
}
// Check if records have id and fields
for (const record of records) {
if (!record.id) {
console.error('Error: Each record must have an id field');
process.exit(1);
}
if (!record.fields || typeof record.fields !== 'object') {
console.error('Error: Each record must have a fields object');
process.exit(1);
}
}
} catch (error) {
console.error(`Error reading JSON file: ${error.message}`);
process.exit(1);
}
console.log(`Updating ${records.length} records in table "${tableName}"...`);
const updatedRecords = await crudUtils.updateRecords(baseId, tableName, records);
console.log(`Updated ${updatedRecords.length} records`);
console.log('First record:');
console.log(JSON.stringify(updatedRecords[0], null, 2));
}
/**
* Delete records from a table
* @param {Array} args - Command arguments
*/
async function deleteRecords(args) {
if (args.length < 2) {
console.error('Error: Table name and record IDs are required');
console.log('Usage: node airtable-crud-cli.js delete-records <tableName> <recordId1,recordId2,...>');
process.exit(1);
}
const tableName = args[0];
const recordIds = args[1].split(',');
console.log(`Deleting ${recordIds.length} records from table "${tableName}"...`);
const deletedRecords = await crudUtils.deleteRecords(baseId, tableName, recordIds);
console.log(`Deleted ${deletedRecords.length} records`);
}
/**
* Export records from a table to a JSON file
* @param {Array} args - Command arguments
*/
async function exportRecords(args) {
if (args.length < 2) {
console.error('Error: Table name and output file are required');
console.log('Usage: node airtable-crud-cli.js export-records <tableName> <outputFile> [maxRecords] [filterFormula]');
process.exit(1);
}
const tableName = args[0];
const outputFile = args[1];
const maxRecords = args[2] ? parseInt(args[2]) : 100;
const filterFormula = args[3] || null;
console.log(`Exporting records from table "${tableName}" to ${outputFile}...`);
console.log(`Max records: ${maxRecords}`);
if (filterFormula) {
console.log(`Filter: ${filterFormula}`);
}
const records = await crudUtils.readRecords(baseId, tableName, maxRecords, filterFormula);
// Write records to file
try {
fs.writeFileSync(outputFile, JSON.stringify(records, null, 2));
console.log(`Exported ${records.length} records to ${outputFile}`);
} catch (error) {
console.error(`Error writing to file: ${error.message}`);
process.exit(1);
}
}
/**
* Import records from a JSON file to a table
* @param {Array} args - Command arguments
*/
async function importRecords(args) {
if (args.length < 2) {
console.error('Error: Table name and input file are required');
console.log('Usage: node airtable-crud-cli.js import-records <tableName> <inputFile> [--update] [--clear]');
process.exit(1);
}
const tableName = args[0];
const inputFile = args[1];
const update = args.includes('--update');
const clear = args.includes('--clear');
// Read the JSON file
let records;
try {
const jsonData = fs.readFileSync(inputFile, 'utf8');
records = JSON.parse(jsonData);
if (!Array.isArray(records)) {
console.error('Error: JSON file must contain an array of records');
process.exit(1);
}
} catch (error) {
console.error(`Error reading JSON file: ${error.message}`);
process.exit(1);
}
console.log(`Importing ${records.length} records to table "${tableName}"...`);
// Clear the table if requested
if (clear) {
console.log('Clearing existing records...');
const existingRecords = await crudUtils.readRecords(baseId, tableName, 100000);
if (existingRecords.length > 0) {
const recordIds = existingRecords.map(record => record.id);
await crudUtils.deleteRecords(baseId, tableName, recordIds);
console.log(`Deleted ${existingRecords.length} existing records`);
}
}
// Update existing records if requested
if (update) {
console.log('Updating existing records...');
// Get existing records
const existingRecords = await crudUtils.readRecords(baseId, tableName, 100000);
const existingRecordsMap = {};
// Create a map of existing records by a key field (assuming 'Name' is the key)
existingRecords.forEach(record => {
if (record.Name) {
existingRecordsMap[record.Name] = record;
}
});
// Separate records to update and create
const recordsToUpdate = [];
const recordsToCreate = [];
records.forEach(record => {
if (record.Name && existingRecordsMap[record.Name]) {
// Record exists, update it
recordsToUpdate.push({
id: existingRecordsMap[record.Name].id,
fields: record
});
} else {
// Record doesn't exist, create it
recordsToCreate.push(record);
}
});
// Update existing records
if (recordsToUpdate.length > 0) {
const updatedRecords = await crudUtils.updateRecords(baseId, tableName, recordsToUpdate);
console.log(`Updated ${updatedRecords.length} existing records`);
}
// Create new records
if (recordsToCreate.length > 0) {
const createdRecords = await crudUtils.createRecords(baseId, tableName, recordsToCreate);
console.log(`Created ${createdRecords.length} new records`);
}
} else {
// Create all records
const createdRecords = await crudUtils.createRecords(baseId, tableName, records);
console.log(`Created ${createdRecords.length} records`);
}
}
/**
* Show help
*/
function showHelp() {
console.log('Airtable CRUD CLI');
console.log('================');
console.log('');
console.log('Usage: node airtable-crud-cli.js <command> [options]');
console.log('');
console.log('Commands:');
console.log(' list-bases List all accessible bases');
console.log(' list-tables List all tables in the base');
console.log(' list-records <tableName> [max] [filter] List records from a table');
console.log(' get-record <tableName> <recordId> Get a specific record');
console.log(' create-records <tableName> <jsonFile> Create records from a JSON file');
console.log(' update-records <tableName> <jsonFile> Update records from a JSON file');
console.log(' delete-records <tableName> <id1,id2,...> Delete records from a table');
console.log(' export-records <tableName> <file> [max] Export records to a JSON file');
console.log(' import-records <tableName> <file> [flags] Import records from a JSON file');
console.log(' help Show this help');
console.log('');
console.log('Flags for import-records:');
console.log(' --update Update existing records (match by Name field)');
console.log(' --clear Clear all existing records before import');
console.log('');
console.log('Examples:');
console.log(' node airtable-crud-cli.js list-tables');
console.log(' node airtable-crud-cli.js list-records "My Table" 10');
console.log(' node airtable-crud-cli.js get-record "My Table" rec123456');
console.log(' node airtable-crud-cli.js create-records "My Table" data.json');
console.log(' node airtable-crud-cli.js export-records "My Table" export.json 1000');
console.log(' node airtable-crud-cli.js import-records "My Table" import.json --update');
}
```
--------------------------------------------------------------------------------
/examples/typescript/advanced-ai-prompts.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Advanced AI Prompts TypeScript Example
* Demonstrates enterprise-grade AI capabilities with strict typing
*/
import {
AirtableMCPServer,
AnalyzeDataPrompt,
CreateReportPrompt,
PredictiveAnalyticsPrompt,
NaturalLanguageQueryPrompt,
SmartSchemaDesignPrompt,
DataQualityAuditPrompt,
OptimizeWorkflowPrompt,
AutomationRecommendationsPrompt,
AnalysisResult,
ReportResult,
PredictionResult,
WorkflowOptimizationResult
} from '@rashidazarang/airtable-mcp/types';
// Enterprise AI Analytics Class
class EnterpriseAIAnalytics {
private server: AirtableMCPServer;
constructor() {
this.server = new AirtableMCPServer();
}
// Advanced Statistical Analysis with Type Safety
async performStatisticalAnalysis(table: string): Promise<AnalysisResult> {
const params: AnalyzeDataPrompt = {
table,
analysis_type: 'statistical',
confidence_level: 0.99,
field_focus: 'revenue,conversion_rate,customer_satisfaction',
time_dimension: 'created_date'
};
const response = await this.server.handlePromptGet('analyze_data', params);
// Type-safe result processing
const result: AnalysisResult = {
summary: 'Comprehensive statistical analysis completed',
key_findings: [
'Revenue shows 15.3% growth trend',
'Conversion rate correlation: 0.78',
'Customer satisfaction: 94.2% positive'
],
statistical_measures: {
mean: 45670.23,
median: 42150.00,
std_deviation: 12340.56,
correlation_coefficients: {
'revenue_conversion': 0.78,
'satisfaction_retention': 0.85
},
confidence_intervals: [
{ field: 'revenue', lower: 40000, upper: 51000, confidence: 0.99 },
{ field: 'conversion_rate', lower: 0.12, upper: 0.18, confidence: 0.99 }
]
},
trends: [
{
field: 'revenue',
direction: 'increasing',
strength: 'strong',
significance: 0.97
}
],
recommendations: [
'Implement predictive modeling for revenue forecasting',
'Establish monitoring dashboard for key metrics',
'Consider A/B testing for conversion optimization'
],
next_steps: [
'Set up automated reporting pipeline',
'Deploy real-time analytics dashboard',
'Schedule quarterly deep-dive analysis'
]
};
return result;
}
// Executive Report Generation with Business Intelligence
async generateExecutiveReport(table: string, audience: 'executives' | 'managers' | 'analysts' | 'technical_team'): Promise<ReportResult> {
const params: CreateReportPrompt = {
table,
report_type: 'executive_summary',
target_audience: audience,
include_recommendations: true,
time_period: 'Q4 2024',
format_preference: 'mixed'
};
const response = await this.server.handlePromptGet('create_report', params);
const result: ReportResult = {
title: `Q4 2024 Executive Summary - ${table} Analysis`,
executive_summary: 'Strategic overview of business performance with actionable insights and growth opportunities.',
detailed_sections: [
{
heading: 'Performance Metrics',
content: 'Comprehensive analysis of key performance indicators showing strong growth trajectory.',
supporting_data: [
{ metric: 'Revenue Growth', value: '15.3%', trend: 'positive' },
{ metric: 'Customer Acquisition', value: '1,247', trend: 'positive' },
{ metric: 'Retention Rate', value: '94.2%', trend: 'stable' }
],
visualizations: [
{ type: 'line_chart', data: {}, description: 'Revenue trend over time' },
{ type: 'bar_chart', data: {}, description: 'Customer acquisition by channel' }
]
},
{
heading: 'Strategic Opportunities',
content: 'Identified high-impact areas for business expansion and optimization.',
supporting_data: [
{ opportunity: 'Market Expansion', impact: 'High', effort: 'Medium' },
{ opportunity: 'Process Automation', impact: 'Medium', effort: 'Low' }
]
}
],
key_metrics: {
'Revenue': { value: '$2.4M', change: '+15.3%', significance: 'high' },
'Customer Count': { value: '12,470', change: '+8.2%', significance: 'medium' },
'Satisfaction Score': { value: '4.7/5', change: '+0.3', significance: 'high' }
},
recommendations: [
{
priority: 'high',
recommendation: 'Implement predictive analytics for demand forecasting',
expected_impact: '12-18% efficiency improvement',
implementation_effort: 'medium'
},
{
priority: 'medium',
recommendation: 'Enhance customer segmentation strategy',
expected_impact: '8-12% conversion rate improvement',
implementation_effort: 'low'
}
],
appendices: [
{ title: 'Technical Methodology', content: 'Detailed explanation of analytical methods used' },
{ title: 'Data Sources', content: 'Comprehensive list of data sources and validation methods' }
]
};
return result;
}
// Advanced Predictive Analytics with Machine Learning
async performPredictiveAnalytics(table: string, targetField: string): Promise<PredictionResult> {
const params: PredictiveAnalyticsPrompt = {
table,
target_field: targetField,
prediction_periods: 12,
algorithm: 'random_forest',
include_confidence_intervals: true,
historical_periods: 24,
external_factors: ['market_trends', 'seasonality', 'economic_indicators'],
business_context: 'Enterprise revenue forecasting with risk assessment'
};
const response = await this.server.handlePromptGet('predictive_analytics', params);
const result: PredictionResult = {
predictions: [
{
period: '2025-01',
predicted_value: 125670.45,
confidence_interval: { lower: 118450.23, upper: 132890.67 },
probability_bands: [
{ probability: 0.68, range: [120000, 131000] },
{ probability: 0.95, range: [115000, 136000] }
]
},
{
period: '2025-02',
predicted_value: 128340.12,
confidence_interval: { lower: 121120.89, upper: 135559.35 }
}
],
model_performance: {
algorithm_used: 'random_forest',
accuracy_metrics: {
'r_squared': 0.847,
'mae': 4567.89,
'rmse': 6234.12,
'mape': 3.8
},
feature_importance: {
'historical_revenue': 0.34,
'seasonality': 0.28,
'market_trends': 0.23,
'customer_count': 0.15
},
validation_results: {
'cross_validation_score': 0.82,
'holdout_accuracy': 0.79,
'stability_index': 0.91
}
},
business_insights: {
trend_direction: 'positive',
seasonality_detected: true,
external_factors_impact: [
'Strong correlation with market expansion',
'Seasonal peak in Q4 consistently observed',
'Economic indicators show positive influence'
],
risk_factors: [
'Market volatility could impact 15% variance',
'Supply chain disruptions possible',
'Competitive landscape changes'
]
},
recommendations: [
{
type: 'strategic',
recommendation: 'Prepare for 23% capacity increase by Q3 2025',
timing: '6 months lead time',
confidence: 0.87
},
{
type: 'operational',
recommendation: 'Implement dynamic pricing based on demand forecasts',
timing: 'Immediate',
confidence: 0.94
},
{
type: 'tactical',
recommendation: 'Establish risk monitoring for volatility indicators',
timing: '3 months',
confidence: 0.89
}
]
};
return result;
}
// Natural Language Query Processing
async processNaturalLanguageQuery(question: string, tables?: string[]): Promise<string> {
const params: NaturalLanguageQueryPrompt = {
question,
tables: tables?.join(','),
response_format: 'natural_language',
context_awareness: true,
confidence_threshold: 0.85,
clarifying_questions: true
};
const response = await this.server.handlePromptGet('natural_language_query', params);
return response.messages[0].content.text;
}
// Smart Schema Design with Compliance
async designOptimalSchema(purpose: string, requirements: string[]): Promise<any> {
const params: SmartSchemaDesignPrompt = {
purpose,
data_types: ['text', 'number', 'date', 'select', 'attachment'],
expected_volume: 'enterprise',
compliance_requirements: ['GDPR', 'HIPAA'],
performance_priorities: ['query_speed', 'scalability'],
integration_needs: ['API access', 'webhook notifications'],
user_access_patterns: 'Multi-team collaboration with role-based permissions'
};
const response = await this.server.handlePromptGet('smart_schema_design', params);
return response;
}
// Comprehensive Data Quality Audit
async performDataQualityAudit(table: string): Promise<any> {
const params: DataQualityAuditPrompt = {
table,
quality_dimensions: ['completeness', 'accuracy', 'consistency', 'timeliness', 'validity'],
automated_fixes: true,
severity_threshold: 'medium',
compliance_context: 'Enterprise data governance standards',
reporting_requirements: ['executive_summary', 'detailed_findings', 'remediation_plan']
};
const response = await this.server.handlePromptGet('data_quality_audit', params);
return response;
}
// Workflow Optimization Analysis
async optimizeWorkflow(workflowDescription: string, painPoints: string[]): Promise<WorkflowOptimizationResult> {
const params: OptimizeWorkflowPrompt = {
table: 'workflow_data',
current_process_description: workflowDescription,
optimization_goals: ['efficiency', 'accuracy', 'cost_reduction'],
constraints: ['regulatory_compliance', 'legacy_system_integration'],
automation_preference: 'moderate',
change_tolerance: 'medium'
};
const response = await this.server.handlePromptGet('optimize_workflow', params);
// Return a comprehensive optimization result
const result: WorkflowOptimizationResult = {
current_state_analysis: {
efficiency_score: 72,
bottlenecks: [
{ step: 'Manual data entry', impact: 'high', description: 'Causes 40% of processing delays' },
{ step: 'Approval routing', impact: 'medium', description: 'Average 2.3 day approval time' }
],
resource_utilization: {
'staff_time': 0.68,
'system_capacity': 0.84,
'automation_coverage': 0.23
}
},
optimization_recommendations: [
{
category: 'automation',
recommendation: 'Implement automated data validation and entry',
expected_benefits: ['45% time reduction', '90% error reduction'],
implementation_complexity: 'moderate',
estimated_roi: '340% within 12 months',
timeline: '3-4 months'
},
{
category: 'process_redesign',
recommendation: 'Parallel approval workflow with smart routing',
expected_benefits: ['60% faster approvals', 'Improved transparency'],
implementation_complexity: 'complex',
estimated_roi: '220% within 18 months',
timeline: '6-8 months'
}
],
implementation_roadmap: [
{
phase: 1,
duration: '3 months',
objectives: ['Implement basic automation', 'Staff training'],
deliverables: ['Automated validation system', 'Training materials'],
success_metrics: ['25% efficiency improvement', '95% staff adoption']
},
{
phase: 2,
duration: '4 months',
objectives: ['Advanced workflow redesign', 'Integration testing'],
deliverables: ['New approval system', 'Performance dashboard'],
success_metrics: ['60% approval time reduction', '99.5% system uptime']
}
],
risk_assessment: [
{
risk: 'Staff resistance to change',
probability: 'medium',
impact: 'medium',
mitigation: 'Comprehensive change management and training program'
},
{
risk: 'System integration challenges',
probability: 'low',
impact: 'high',
mitigation: 'Phased rollout with fallback procedures'
}
]
};
return result;
}
// Automation Recommendations Engine
async generateAutomationRecommendations(workflowDescription: string): Promise<any> {
const params: AutomationRecommendationsPrompt = {
workflow_description: workflowDescription,
current_pain_points: ['manual_data_entry', 'approval_delays', 'reporting_overhead'],
automation_scope: 'end_to_end',
technical_constraints: ['legacy_system_compatibility', 'security_requirements'],
business_impact_priority: ['time_efficiency', 'error_reduction', 'cost_savings'],
implementation_timeline: 'medium_term',
risk_tolerance: 'moderate'
};
const response = await this.server.handlePromptGet('automation_recommendations', params);
return response;
}
}
// Example usage with comprehensive error handling
async function demonstrateEnterpriseAI(): Promise<void> {
const analytics = new EnterpriseAIAnalytics();
try {
console.log('🤖 Starting Enterprise AI Analysis...');
// Statistical Analysis
console.log('\n📊 Performing Statistical Analysis...');
const analysisResult = await analytics.performStatisticalAnalysis('Sales');
console.log('Analysis completed:', analysisResult.summary);
// Executive Report
console.log('\n📋 Generating Executive Report...');
const reportResult = await analytics.generateExecutiveReport('Sales', 'executives');
console.log('Report generated:', reportResult.title);
// Predictive Analytics
console.log('\n🔮 Running Predictive Analytics...');
const predictionResult = await analytics.performPredictiveAnalytics('Sales', 'revenue');
console.log('Predictions generated:', predictionResult.predictions.length, 'periods');
// Natural Language Query
console.log('\n🗣️ Processing Natural Language Query...');
const nlResult = await analytics.processNaturalLanguageQuery(
'What are the top 5 performing products by revenue this quarter?',
['Products', 'Sales']
);
console.log('NL Response:', nlResult.substring(0, 100) + '...');
// Workflow Optimization
console.log('\n⚡ Analyzing Workflow Optimization...');
const workflowResult = await analytics.optimizeWorkflow(
'Manual invoice processing with email approvals',
['Slow approval times', 'Manual data entry errors']
);
console.log('Optimization completed, efficiency score:', workflowResult.current_state_analysis.efficiency_score);
console.log('\n✅ All Enterprise AI operations completed successfully!');
} catch (error) {
console.error('❌ Enterprise AI Error:', error);
throw error;
}
}
// Export for testing and integration
export {
EnterpriseAIAnalytics,
demonstrateEnterpriseAI
};
// Run demonstration if executed directly
if (require.main === module) {
demonstrateEnterpriseAI()
.then(() => process.exit(0))
.catch((error) => {
console.error('Fatal error:', error);
process.exit(1);
});
}
```
--------------------------------------------------------------------------------
/src/python/airtable_mcp/src/server.py:
--------------------------------------------------------------------------------
```python
#!/usr/bin/env python3
"""
Airtable MCP Server
-------------------
This is a Model Context Protocol (MCP) server that exposes Airtable operations as tools.
"""
import os
import sys
import json
import asyncio
import logging
import argparse
from contextlib import asynccontextmanager
from typing import Any, Dict, List, Optional, AsyncIterator, Callable
from dotenv import load_dotenv
print(f"Python version: {sys.version}")
print(f"Python executable: {sys.executable}")
print(f"Python path: {sys.path}")
# Import MCP-related modules - will be available when run with Python 3.10+
try:
from mcp.server.fastmcp import FastMCP
from mcp.server import stdio
print("Successfully imported MCP modules")
except ImportError as e:
print(f"Error importing MCP modules: {e}")
print("Error: MCP SDK requires Python 3.10+")
print("Please install Python 3.10 or newer and try again.")
sys.exit(1)
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("airtable-mcp")
# Parse command line arguments
def parse_args():
parser = argparse.ArgumentParser(description="Airtable MCP Server")
parser.add_argument("--token", dest="api_token", help="Airtable Personal Access Token")
parser.add_argument("--base", dest="base_id", help="Airtable Base ID")
parser.add_argument("--port", type=int, default=8080, help="MCP server port for dev mode")
parser.add_argument("--host", default="127.0.0.1", help="MCP server host for dev mode")
parser.add_argument("--dev", action="store_true", help="Run in development mode")
return parser.parse_args()
# Load environment variables as fallback
load_dotenv()
# Create MCP server
mcp = FastMCP("Airtable Tools")
# Server state will be initialized in main()
server_state = {
"base_id": "",
"token": "",
}
# Authentication middleware
@mcp.middleware
async def auth_middleware(context, next_handler):
# Skip auth check for tool listing
if hasattr(context, 'operation') and context.operation == "list_tools":
return await next_handler(context)
# Allow all operations without a token check - actual API calls will be checked later
return await next_handler(context)
# Helper functions for Airtable API calls
async def api_call(endpoint, method="GET", data=None, params=None):
"""Make an Airtable API call"""
import requests
# Check if token is available before making API calls
if not server_state["token"]:
return {"error": "No Airtable API token provided. Please set via --token or AIRTABLE_PERSONAL_ACCESS_TOKEN"}
headers = {
"Authorization": f"Bearer {server_state['token']}",
"Content-Type": "application/json"
}
url = f"https://api.airtable.com/v0/{endpoint}"
try:
if method == "GET":
response = requests.get(url, headers=headers, params=params)
elif method == "POST":
response = requests.post(url, headers=headers, json=data)
elif method == "PATCH":
response = requests.patch(url, headers=headers, json=data)
elif method == "DELETE":
response = requests.delete(url, headers=headers, params=params)
else:
raise ValueError(f"Unsupported method: {method}")
response.raise_for_status()
return response.json()
except Exception as e:
logger.error(f"API call error: {str(e)}")
return {"error": str(e)}
# Define MCP tool functions
@mcp.tool()
async def list_bases() -> str:
"""List all accessible Airtable bases"""
if not server_state["token"]:
return "Please provide an Airtable API token to list your bases."
result = await api_call("meta/bases")
if "error" in result:
return f"Error: {result['error']}"
bases = result.get("bases", [])
if not bases:
return "No bases found accessible with your token."
base_list = [f"{i+1}. {base['name']} (ID: {base['id']})" for i, base in enumerate(bases)]
return "Available bases:\n" + "\n".join(base_list)
@mcp.tool()
async def list_tables(base_id: Optional[str] = None) -> str:
"""List all tables in the specified base or the default base"""
if not server_state["token"]:
return "Please provide an Airtable API token to list tables."
base = base_id or server_state["base_id"]
if not base:
return "Error: No base ID provided. Please specify a base_id or set AIRTABLE_BASE_ID in your .env file."
result = await api_call(f"meta/bases/{base}/tables")
if "error" in result:
return f"Error: {result['error']}"
tables = result.get("tables", [])
if not tables:
return "No tables found in this base."
table_list = [f"{i+1}. {table['name']} (ID: {table['id']}, Fields: {len(table.get('fields', []))})"
for i, table in enumerate(tables)]
return "Tables in this base:\n" + "\n".join(table_list)
@mcp.tool()
async def list_records(table_name: str, max_records: Optional[int] = 100, filter_formula: Optional[str] = None) -> str:
"""List records from a table with optional filtering"""
if not server_state["token"]:
return "Please provide an Airtable API token to list records."
base = server_state["base_id"]
if not base:
return "Error: No base ID set. Please set a base ID."
params = {"maxRecords": max_records}
if filter_formula:
params["filterByFormula"] = filter_formula
result = await api_call(f"{base}/{table_name}", params=params)
if "error" in result:
return f"Error: {result['error']}"
records = result.get("records", [])
if not records:
return "No records found in this table."
# Format the records for display
formatted_records = []
for i, record in enumerate(records):
record_id = record.get("id", "unknown")
fields = record.get("fields", {})
field_text = ", ".join([f"{k}: {v}" for k, v in fields.items()])
formatted_records.append(f"{i+1}. ID: {record_id} - {field_text}")
return "Records:\n" + "\n".join(formatted_records)
@mcp.tool()
async def get_record(table_name: str, record_id: str) -> str:
"""Get a specific record from a table"""
if not server_state["token"]:
return "Please provide an Airtable API token to get records."
base = server_state["base_id"]
if not base:
return "Error: No base ID set. Please set a base ID."
result = await api_call(f"{base}/{table_name}/{record_id}")
if "error" in result:
return f"Error: {result['error']}"
fields = result.get("fields", {})
if not fields:
return f"Record {record_id} found but contains no fields."
# Format the fields for display
formatted_fields = []
for key, value in fields.items():
formatted_fields.append(f"{key}: {value}")
return f"Record ID: {record_id}\n" + "\n".join(formatted_fields)
@mcp.tool()
async def create_records(table_name: str, records_json: str) -> str:
"""Create records in a table from JSON string"""
if not server_state["token"]:
return "Please provide an Airtable API token to create records."
base = server_state["base_id"]
if not base:
return "Error: No base ID set. Please set a base ID."
try:
records_data = json.loads(records_json)
# Format the records for Airtable API
if not isinstance(records_data, list):
records_data = [records_data]
records = [{"fields": record} for record in records_data]
data = {"records": records}
result = await api_call(f"{base}/{table_name}", method="POST", data=data)
if "error" in result:
return f"Error: {result['error']}"
created_records = result.get("records", [])
return f"Successfully created {len(created_records)} records."
except json.JSONDecodeError:
return "Error: Invalid JSON format in records_json parameter."
except Exception as e:
return f"Error creating records: {str(e)}"
@mcp.tool()
async def update_records(table_name: str, records_json: str) -> str:
"""Update records in a table from JSON string"""
if not server_state["token"]:
return "Please provide an Airtable API token to update records."
base = server_state["base_id"]
if not base:
return "Error: No base ID set. Please set a base ID."
try:
records_data = json.loads(records_json)
# Format the records for Airtable API
if not isinstance(records_data, list):
records_data = [records_data]
records = []
for record in records_data:
if "id" not in record:
return "Error: Each record must have an 'id' field."
rec_id = record.pop("id")
fields = record.get("fields", record) # Support both {id, fields} format and direct fields
records.append({"id": rec_id, "fields": fields})
data = {"records": records}
result = await api_call(f"{base}/{table_name}", method="PATCH", data=data)
if "error" in result:
return f"Error: {result['error']}"
updated_records = result.get("records", [])
return f"Successfully updated {len(updated_records)} records."
except json.JSONDecodeError:
return "Error: Invalid JSON format in records_json parameter."
except Exception as e:
return f"Error updating records: {str(e)}"
@mcp.tool()
async def delete_records(table_name: str, record_ids: str) -> str:
"""Delete records from a table by their IDs (comma-separated or JSON array)"""
if not server_state["token"]:
return "Please provide an Airtable API token to delete records."
base = server_state["base_id"]
if not base:
return "Error: No base ID set. Please set a base ID."
try:
# Handle both comma-separated and JSON array formats
if record_ids.startswith("["):
ids_list = json.loads(record_ids)
else:
ids_list = [rid.strip() for rid in record_ids.split(",")]
# Delete records in batches of 10 (Airtable API limit)
deleted_count = 0
for i in range(0, len(ids_list), 10):
batch = ids_list[i:i+10]
params = {"records[]": batch}
result = await api_call(f"{base}/{table_name}", method="DELETE", params=params)
if "error" in result:
return f"Error deleting records: {result['error']}"
deleted_count += len(result.get("records", []))
return f"Successfully deleted {deleted_count} records."
except json.JSONDecodeError:
return "Error: Invalid format for record_ids. Use comma-separated IDs or JSON array."
except Exception as e:
return f"Error deleting records: {str(e)}"
@mcp.tool()
async def set_base_id(base_id: str) -> str:
"""Set the current Airtable base ID"""
server_state["base_id"] = base_id
return f"Base ID set to: {base_id}"
# Resources implementation for MCP protocol
@mcp.resource("airtable://base/{base_id}")
async def get_base_resource(base_id: str) -> Dict:
"""Get base metadata as a resource"""
if not server_state["token"]:
return {"error": "No Airtable API token provided"}
result = await api_call(f"meta/bases/{base_id}/tables")
if "error" in result:
return {"error": result["error"]}
tables = result.get("tables", [])
return {
"base_id": base_id,
"tables_count": len(tables),
"tables": [{"id": t["id"], "name": t["name"]} for t in tables]
}
@mcp.resource("airtable://base/{base_id}/table/{table_name}")
async def get_table_resource(base_id: str, table_name: str) -> Dict:
"""Get table data as a resource"""
if not server_state["token"]:
return {"error": "No Airtable API token provided"}
result = await api_call(f"{base_id}/{table_name}", params={"maxRecords": 100})
if "error" in result:
return {"error": result["error"]}
records = result.get("records", [])
return {
"base_id": base_id,
"table_name": table_name,
"records_count": len(records),
"records": records
}
# Roots implementation for filesystem access
@mcp.rpc_method("roots/list")
async def roots_list() -> Dict:
"""List available filesystem roots for data import/export"""
roots = [
{
"uri": "file:///tmp/airtable-exports",
"name": "Airtable Exports Directory"
}
]
return {"roots": roots}
# Prompts implementation for guided interactions
@mcp.rpc_method("prompts/list")
async def prompts_list() -> Dict:
"""List available prompt templates"""
prompts = [
{
"name": "analyze_base",
"description": "Analyze an Airtable base structure and suggest optimizations",
"arguments": [
{
"name": "base_id",
"description": "The Airtable base ID to analyze",
"required": True
}
]
},
{
"name": "create_table_schema",
"description": "Generate a table schema based on requirements",
"arguments": [
{
"name": "requirements",
"description": "Description of the table requirements",
"required": True
},
{
"name": "table_name",
"description": "Name for the new table",
"required": True
}
]
},
{
"name": "data_migration",
"description": "Plan data migration between tables or bases",
"arguments": [
{
"name": "source",
"description": "Source table/base identifier",
"required": True
},
{
"name": "destination",
"description": "Destination table/base identifier",
"required": True
}
]
}
]
return {"prompts": prompts}
@mcp.rpc_method("prompts/get")
async def prompts_get(name: str, arguments: Optional[Dict] = None) -> Dict:
"""Get a specific prompt template with filled arguments"""
prompts_templates = {
"analyze_base": """Analyze the Airtable base '{base_id}' and provide:
1. Overview of all tables and their relationships
2. Data quality assessment
3. Performance optimization suggestions
4. Schema improvement recommendations
5. Automation opportunities""",
"create_table_schema": """Create a table schema for '{table_name}' with these requirements:
{requirements}
Please provide:
1. Field definitions with appropriate types
2. Validation rules
3. Linked record relationships
4. Views and filters setup
5. Sample data structure""",
"data_migration": """Plan a data migration from '{source}' to '{destination}':
1. Analyze source structure
2. Map fields between source and destination
3. Identify data transformation needs
4. Handle relationship mappings
5. Provide migration script
6. Include validation steps"""
}
if name not in prompts_templates:
return {"error": f"Unknown prompt: {name}"}
template = prompts_templates[name]
if arguments:
try:
prompt = template.format(**arguments)
except KeyError as e:
return {"error": f"Missing required argument: {e}"}
else:
prompt = template
return {
"messages": [
{
"role": "user",
"content": prompt
}
]
}
# Sampling implementation for completion suggestions
@mcp.rpc_method("completion/complete")
async def completion_complete(ref: Dict, argument: Dict, partial: str) -> Dict:
"""Provide completion suggestions for partial inputs"""
completions = []
# Handle tool argument completions
if ref.get("type") == "ref/tool":
tool_name = ref.get("name")
arg_name = argument.get("name")
if tool_name == "list_tables" and arg_name == "base_id":
# Suggest recent base IDs
if server_state["base_id"]:
completions.append({
"value": server_state["base_id"],
"label": "Current base",
"insertText": server_state["base_id"]
})
elif tool_name == "list_records" and arg_name == "filter_formula":
# Suggest common filter formulas
formulas = [
"{Status} = 'Active'",
"NOT({Completed})",
"AND({Priority} = 'High', {Status} = 'Open')",
"OR({Assigned} = 'Me', {Assigned} = BLANK())",
"DATETIME_DIFF(TODAY(), {DueDate}, 'days') < 7"
]
for formula in formulas:
if not partial or partial.lower() in formula.lower():
completions.append({
"value": formula,
"label": formula,
"insertText": formula
})
elif tool_name in ["create_records", "update_records"] and arg_name == "records_json":
# Suggest JSON templates
templates = [
'{"Name": "New Item", "Status": "Active"}',
'[{"Name": "Item 1"}, {"Name": "Item 2"}]',
'{"id": "rec123", "fields": {"Status": "Updated"}}'
]
for template in templates:
completions.append({
"value": template,
"label": f"Template: {template[:30]}...",
"insertText": template
})
return {
"completion": {
"values": completions[:10] # Limit to 10 suggestions
}
}
# Resources list implementation
@mcp.rpc_method("resources/list")
async def resources_list() -> Dict:
"""List available Airtable resources"""
resources = []
# Add resource templates even without a base configured
resources.append({
"uri": "airtable://templates/base-schema",
"name": "Base Schema Template",
"description": "Template for creating base schemas",
"mimeType": "application/json"
})
resources.append({
"uri": "airtable://templates/automation-scripts",
"name": "Automation Scripts",
"description": "Common Airtable automation scripts",
"mimeType": "text/javascript"
})
if server_state["base_id"]:
# Add base resource
resources.append({
"uri": f"airtable://base/{server_state['base_id']}",
"name": "Current Airtable Base",
"description": f"Base ID: {server_state['base_id']}",
"mimeType": "application/json"
})
# Try to add table resources if we have access
if server_state["token"]:
result = await api_call(f"meta/bases/{server_state['base_id']}/tables")
if "tables" in result:
for table in result.get("tables", []):
fields_count = len(table.get("fields", []))
resources.append({
"uri": f"airtable://base/{server_state['base_id']}/table/{table['name']}",
"name": f"Table: {table['name']}",
"description": f"{fields_count} fields, ID: {table['id']}",
"mimeType": "application/json"
})
return {"resources": resources}
# Resources read implementation
@mcp.rpc_method("resources/read")
async def resources_read(uri: str) -> Dict:
"""Read a specific resource by URI"""
# Handle template resources
if uri == "airtable://templates/base-schema":
return {
"contents": [
{
"uri": uri,
"mimeType": "application/json",
"text": json.dumps({
"tables": [
{
"name": "Projects",
"fields": [
{"name": "Name", "type": "singleLineText"},
{"name": "Status", "type": "singleSelect", "options": ["Planning", "Active", "Complete"]},
{"name": "Start Date", "type": "date"},
{"name": "End Date", "type": "date"},
{"name": "Owner", "type": "collaborator"},
{"name": "Tasks", "type": "linkedRecords"}
]
},
{
"name": "Tasks",
"fields": [
{"name": "Title", "type": "singleLineText"},
{"name": "Description", "type": "multilineText"},
{"name": "Project", "type": "linkedRecords"},
{"name": "Assignee", "type": "collaborator"},
{"name": "Priority", "type": "singleSelect", "options": ["Low", "Medium", "High"]},
{"name": "Complete", "type": "checkbox"}
]
}
]
}, indent=2)
}
]
}
elif uri == "airtable://templates/automation-scripts":
return {
"contents": [
{
"uri": uri,
"mimeType": "text/javascript",
"text": """// Common Airtable Automation Scripts
// 1. Send notification when record matches condition
function notifyOnCondition(record) {
if (record.getCellValue('Status') === 'Urgent') {
// Send notification logic here
console.log('Urgent task:', record.getCellValue('Name'));
}
}
// 2. Auto-calculate fields
function calculateFields(record) {
const startDate = record.getCellValue('Start Date');
const endDate = record.getCellValue('End Date');
if (startDate && endDate) {
const duration = Math.ceil((endDate - startDate) / (1000 * 60 * 60 * 24));
return { 'Duration (days)': duration };
}
}
// 3. Bulk update records
async function bulkUpdate(table, condition, updates) {
const query = await table.selectRecordsAsync();
const recordsToUpdate = query.records.filter(condition);
const updatePromises = recordsToUpdate.map(record =>
table.updateRecordAsync(record.id, updates)
);
await Promise.all(updatePromises);
}"""
}
]
}
# Handle base and table resources
elif uri.startswith("airtable://base/"):
parts = uri.replace("airtable://base/", "").split("/table/")
if len(parts) == 2:
base_id, table_name = parts
result = await get_table_resource(base_id, table_name)
return {
"contents": [
{
"uri": uri,
"mimeType": "application/json",
"text": json.dumps(result, indent=2)
}
]
}
elif len(parts) == 1:
base_id = parts[0]
result = await get_base_resource(base_id)
return {
"contents": [
{
"uri": uri,
"mimeType": "application/json",
"text": json.dumps(result, indent=2)
}
]
}
return {"error": f"Unknown resource URI: {uri}"}
def main():
"""Run the MCP server"""
try:
# Parse command line arguments
args = parse_args()
# Set server state from command line args or fallback to env vars
server_state["token"] = args.api_token or os.getenv("AIRTABLE_PERSONAL_ACCESS_TOKEN", "")
server_state["base_id"] = args.base_id or os.getenv("AIRTABLE_BASE_ID", "")
if not server_state["token"]:
logger.warning("No Airtable API token provided. Please set via --token or AIRTABLE_PERSONAL_ACCESS_TOKEN")
logger.info("Tool listing will work but API calls will require a token")
# Setup asyncio event loop
if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# Run the server
if args.dev:
# Development mode
mcp.run(host=args.host, port=args.port)
else:
# Production mode - stdio interface for MCP
mcp.run()
except Exception as e:
logger.error(f"Server error: {str(e)}")
sys.exit(1)
if __name__ == "__main__":
main()
```