This is page 2 of 5. Use http://codebase.md/rashidazarang/airtable-mcp?lines=true&page={x} to view the full context. # Directory Structure ``` ├── .eslintrc.js ├── .github │ ├── ISSUE_TEMPLATE │ │ ├── bug_report.md │ │ ├── custom.md │ │ └── feature_request.md │ └── pull_request_template.md ├── .gitignore ├── .nvmrc ├── .prettierrc ├── bin │ ├── airtable-crud-cli.js │ └── airtable-mcp.js ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── docker │ ├── Dockerfile │ └── Dockerfile.node ├── docs │ ├── guides │ │ ├── CLAUDE_INTEGRATION.md │ │ ├── ENHANCED_FEATURES.md │ │ ├── INSTALLATION.md │ │ └── QUICK_START.md │ └── releases │ ├── RELEASE_NOTES_v1.2.2.md │ ├── RELEASE_NOTES_v1.2.4.md │ ├── RELEASE_NOTES_v1.4.0.md │ ├── RELEASE_NOTES_v1.5.0.md │ └── RELEASE_NOTES_v1.6.0.md ├── examples │ ├── airtable-crud-example.js │ ├── building-mcp.md │ ├── claude_config.json │ ├── claude_simple_config.json │ ├── env-demo.js │ ├── example_usage.md │ ├── example-tasks-update.json │ ├── example-tasks.json │ ├── python_debug_patch.txt │ ├── sample-transform.js │ ├── typescript │ │ ├── advanced-ai-prompts.ts │ │ ├── basic-usage.ts │ │ └── claude-desktop-config.json │ └── windsurf_mcp_config.json ├── index.js ├── ISSUE_RESPONSES.md ├── jest.config.js ├── LICENSE ├── package-lock.json ├── package.json ├── PROJECT_STRUCTURE.md ├── README.md ├── RELEASE_SUMMARY_v3.2.x.md ├── RELEASE_v3.2.1.md ├── RELEASE_v3.2.3.md ├── RELEASE_v3.2.4.md ├── requirements.txt ├── SECURITY_NOTICE.md ├── smithery.yaml ├── src │ ├── index.js │ ├── javascript │ │ ├── airtable_simple_production.js │ │ └── airtable_simple.js │ ├── python │ │ ├── airtable_mcp │ │ │ ├── __init__.py │ │ │ └── src │ │ │ └── server.py │ │ ├── inspector_server.py │ │ ├── inspector.py │ │ ├── setup.py │ │ ├── simple_airtable_server.py │ │ └── test_client.py │ └── typescript │ ├── ai-prompts.d.ts │ ├── airtable-mcp-server.d.ts │ ├── airtable-mcp-server.ts │ ├── errors.ts │ ├── index.d.ts │ ├── prompt-templates.ts │ ├── test-suite.d.ts │ ├── test-suite.ts │ ├── tools-schemas.ts │ └── tools.d.ts ├── TESTING_REPORT.md ├── tests │ ├── test_all_features.sh │ ├── test_mcp_comprehensive.js │ ├── test_v1.5.0_final.sh │ └── test_v1.6.0_comprehensive.sh ├── tsconfig.json └── types └── typescript ├── airtable-mcp-server.d.ts ├── errors.d.ts ├── prompt-templates.d.ts ├── test-suite.d.ts └── tools-schemas.d.ts ``` # Files -------------------------------------------------------------------------------- /examples/env-demo.js: -------------------------------------------------------------------------------- ```javascript 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Demo script that uses the AIRTABLE_BASE_ID from the .env file 5 | * Demonstrates various operations with the Airtable API 6 | */ 7 | 8 | require('dotenv').config(); 9 | const baseUtils = require('../tools/airtable-base'); 10 | const crudUtils = require('../tools/airtable-crud'); 11 | const schemaUtils = require('../tools/airtable-schema'); 12 | 13 | // Constants 14 | const DEMO_TABLE_NAME = 'ENV Demo Table'; 15 | const SAMPLE_RECORDS = [ 16 | { Name: 'Record from ENV Demo', Description: 'Created using AIRTABLE_BASE_ID from .env file', Status: 'Active' }, 17 | { Name: 'Another ENV Record', Description: 'Second record from the environment demo', Status: 'Pending' } 18 | ]; 19 | 20 | async function runDemo() { 21 | console.log('================================='); 22 | console.log(' AIRTABLE ENV DEMO SCRIPT '); 23 | console.log('================================='); 24 | 25 | // Check environment variables 26 | if (!process.env.AIRTABLE_PERSONAL_ACCESS_TOKEN) { 27 | console.error('❌ Error: AIRTABLE_PERSONAL_ACCESS_TOKEN is not set in .env file'); 28 | process.exit(1); 29 | } 30 | 31 | if (!process.env.AIRTABLE_BASE_ID) { 32 | console.error('❌ Error: AIRTABLE_BASE_ID is not set in .env file'); 33 | process.exit(1); 34 | } 35 | 36 | const baseId = process.env.AIRTABLE_BASE_ID; 37 | console.log(`✅ Using AIRTABLE_BASE_ID: ${baseId}`); 38 | 39 | try { 40 | // Step 1: Verify base access 41 | console.log('\nStep 1: Verifying access to the base...'); 42 | const baseAccess = await baseUtils.checkBaseAccess(baseId); 43 | 44 | if (!baseAccess.accessible) { 45 | console.error(`❌ Error: Cannot access base with ID ${baseId}`); 46 | console.error(` Reason: ${baseAccess.error}`); 47 | process.exit(1); 48 | } 49 | 50 | console.log(`✅ Access confirmed to base: ${baseAccess.name}`); 51 | 52 | // Step 2: List existing tables 53 | console.log('\nStep 2: Listing existing tables...'); 54 | const tables = await baseUtils.listTables(baseId); 55 | console.log(`✅ Found ${tables.length} tables in the base`); 56 | 57 | // Step 3: Check if our demo table exists 58 | console.log('\nStep 3: Checking if demo table exists...'); 59 | const demoTableExists = await crudUtils.tableExists(baseId, DEMO_TABLE_NAME); 60 | 61 | if (demoTableExists) { 62 | console.log(`✅ Demo table "${DEMO_TABLE_NAME}" already exists`); 63 | } else { 64 | console.log(`ℹ️ Demo table "${DEMO_TABLE_NAME}" does not exist, creating it...`); 65 | 66 | // Step 4: Create the demo table 67 | console.log('\nStep 4: Creating the demo table...'); 68 | const tableConfig = { 69 | name: DEMO_TABLE_NAME, 70 | description: 'Table created from the Environment Demo script', 71 | fields: [ 72 | { 73 | name: 'Name', 74 | type: 'singleLineText', 75 | description: 'Record name' 76 | }, 77 | { 78 | name: 'Description', 79 | type: 'multilineText', 80 | description: 'Record description' 81 | }, 82 | { 83 | name: 'Status', 84 | type: 'singleSelect', 85 | options: { 86 | choices: [ 87 | { name: 'Active' }, 88 | { name: 'Pending' }, 89 | { name: 'Completed' } 90 | ] 91 | }, 92 | description: 'Current status' 93 | }, 94 | { 95 | name: 'Created', 96 | type: 'date', 97 | options: { 98 | dateFormat: { 99 | name: 'local' 100 | } 101 | }, 102 | description: 'Creation date' 103 | } 104 | ] 105 | }; 106 | 107 | await schemaUtils.createTable(baseId, tableConfig); 108 | console.log(`✅ Created demo table: ${DEMO_TABLE_NAME}`); 109 | } 110 | 111 | // Step 5: Create sample records 112 | console.log('\nStep 5: Creating sample records...'); 113 | // Add today's date to all records 114 | const recordsWithDate = SAMPLE_RECORDS.map(record => ({ 115 | ...record, 116 | Created: new Date().toISOString().split('T')[0] // Format as YYYY-MM-DD 117 | })); 118 | 119 | const createdRecords = await crudUtils.createRecords(baseId, DEMO_TABLE_NAME, recordsWithDate); 120 | console.log(`✅ Created ${createdRecords.length} sample records`); 121 | 122 | // Step 6: Read records back 123 | console.log('\nStep 6: Reading records from the table...'); 124 | const records = await crudUtils.readRecords(baseId, DEMO_TABLE_NAME, 100); 125 | console.log(`✅ Read ${records.length} records from the table`); 126 | 127 | console.log('\nSample record:'); 128 | console.log(JSON.stringify(records[0], null, 2)); 129 | 130 | // Step 7: Update a record 131 | console.log('\nStep 7: Updating the first record...'); 132 | const recordToUpdate = { 133 | id: createdRecords[0].id, 134 | fields: { 135 | Description: createdRecords[0].Description + ' (UPDATED)', 136 | Status: 'Completed' 137 | } 138 | }; 139 | 140 | const updatedRecords = await crudUtils.updateRecords(baseId, DEMO_TABLE_NAME, [recordToUpdate]); 141 | console.log(`✅ Updated ${updatedRecords.length} record`); 142 | 143 | // Step 8: Get the updated record 144 | console.log('\nStep 8: Getting the updated record...'); 145 | const updatedRecord = await crudUtils.getRecord(baseId, DEMO_TABLE_NAME, createdRecords[0].id); 146 | console.log('Updated record:'); 147 | console.log(JSON.stringify(updatedRecord, null, 2)); 148 | 149 | // Step 9: Demonstrate filtering records 150 | console.log('\nStep 9: Filtering records by status...'); 151 | const completedRecords = await crudUtils.readRecords(baseId, DEMO_TABLE_NAME, 100, 'Status="Completed"'); 152 | console.log(`✅ Found ${completedRecords.length} records with Status="Completed"`); 153 | 154 | console.log('\n================================='); 155 | console.log(' ENV DEMO COMPLETED '); 156 | console.log('================================='); 157 | console.log('\nThis script demonstrated:'); 158 | console.log('1. Loading environment variables from .env file'); 159 | console.log('2. Accessing an Airtable base using AIRTABLE_BASE_ID'); 160 | console.log('3. Creating a table (if it doesn\'t exist)'); 161 | console.log('4. Creating, reading, and updating records'); 162 | console.log('5. Filtering records using Airtable formulas'); 163 | console.log('\nAll operations used the AIRTABLE_BASE_ID environment variable'); 164 | 165 | } catch (error) { 166 | console.error(`❌ Error: ${error.message}`); 167 | process.exit(1); 168 | } 169 | } 170 | 171 | // Run the demo 172 | runDemo(); ``` -------------------------------------------------------------------------------- /tests/test_all_features.sh: -------------------------------------------------------------------------------- ```bash 1 | #!/bin/bash 2 | 3 | echo "🎯 COMPREHENSIVE TEST - AIRTABLE MCP v1.4.0" 4 | echo "===========================================" 5 | echo "" 6 | 7 | PASSED=0 8 | FAILED=0 9 | TOTAL=0 10 | 11 | # Test function 12 | test_feature() { 13 | local name=$1 14 | local result=$2 15 | ((TOTAL++)) 16 | 17 | if [ "$result" = "PASS" ]; then 18 | echo "✅ $name" 19 | ((PASSED++)) 20 | else 21 | echo "❌ $name" 22 | ((FAILED++)) 23 | fi 24 | } 25 | 26 | echo "📊 TESTING ALL 12 TOOLS" 27 | echo "=======================" 28 | echo "" 29 | 30 | # 1. List tables 31 | result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \ 32 | -d '{"jsonrpc": "2.0", "id": 1, "method": "tools/call", "params": {"name": "list_tables"}}') 33 | if [[ "$result" == *"table"* ]]; then 34 | test_feature "list_tables" "PASS" 35 | else 36 | test_feature "list_tables" "FAIL" 37 | fi 38 | 39 | # 2. Create record 40 | result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \ 41 | -d '{"jsonrpc": "2.0", "id": 2, "method": "tools/call", "params": {"name": "create_record", "arguments": {"table": "tblH7TnJxYpNqhQYK", "fields": {"Name": "Final Test", "Status": "Active"}}}}') 42 | if [[ "$result" == *"Successfully created"* ]]; then 43 | test_feature "create_record" "PASS" 44 | RECORD_ID=$(echo "$result" | grep -o 'rec[a-zA-Z0-9]\{10,20\}' | head -1) 45 | else 46 | test_feature "create_record" "FAIL" 47 | RECORD_ID="" 48 | fi 49 | 50 | # 3. Get record 51 | if [ ! -z "$RECORD_ID" ]; then 52 | result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \ 53 | -d "{\"jsonrpc\": \"2.0\", \"id\": 3, \"method\": \"tools/call\", \"params\": {\"name\": \"get_record\", \"arguments\": {\"table\": \"tblH7TnJxYpNqhQYK\", \"recordId\": \"$RECORD_ID\"}}}") 54 | [[ "$result" == *"Record $RECORD_ID"* ]] && test_feature "get_record" "PASS" || test_feature "get_record" "FAIL" 55 | else 56 | test_feature "get_record" "SKIP" 57 | fi 58 | 59 | # 4. Update record 60 | if [ ! -z "$RECORD_ID" ]; then 61 | result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \ 62 | -d "{\"jsonrpc\": \"2.0\", \"id\": 4, \"method\": \"tools/call\", \"params\": {\"name\": \"update_record\", \"arguments\": {\"table\": \"tblH7TnJxYpNqhQYK\", \"recordId\": \"$RECORD_ID\", \"fields\": {\"Status\": \"Completed\"}}}}") 63 | [[ "$result" == *"Successfully updated"* ]] && test_feature "update_record" "PASS" || test_feature "update_record" "FAIL" 64 | else 65 | test_feature "update_record" "SKIP" 66 | fi 67 | 68 | # 5. List records 69 | result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \ 70 | -d '{"jsonrpc": "2.0", "id": 5, "method": "tools/call", "params": {"name": "list_records", "arguments": {"table": "tblH7TnJxYpNqhQYK", "maxRecords": 3}}}') 71 | [[ "$result" == *"record"* ]] && test_feature "list_records" "PASS" || test_feature "list_records" "FAIL" 72 | 73 | # 6. Search records 74 | result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \ 75 | -d '{"jsonrpc": "2.0", "id": 6, "method": "tools/call", "params": {"name": "search_records", "arguments": {"table": "tblH7TnJxYpNqhQYK", "maxRecords": 3}}}') 76 | [[ "$result" == *"record"* ]] && test_feature "search_records" "PASS" || test_feature "search_records" "FAIL" 77 | 78 | # 7. Delete record 79 | if [ ! -z "$RECORD_ID" ]; then 80 | result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \ 81 | -d "{\"jsonrpc\": \"2.0\", \"id\": 7, \"method\": \"tools/call\", \"params\": {\"name\": \"delete_record\", \"arguments\": {\"table\": \"tblH7TnJxYpNqhQYK\", \"recordId\": \"$RECORD_ID\"}}}") 82 | [[ "$result" == *"Successfully deleted"* ]] && test_feature "delete_record" "PASS" || test_feature "delete_record" "FAIL" 83 | else 84 | test_feature "delete_record" "SKIP" 85 | fi 86 | 87 | # 8. List webhooks 88 | result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \ 89 | -d '{"jsonrpc": "2.0", "id": 8, "method": "tools/call", "params": {"name": "list_webhooks"}}') 90 | [[ "$result" == *"webhook"* ]] && test_feature "list_webhooks" "PASS" || test_feature "list_webhooks" "FAIL" 91 | 92 | # 9. Create webhook 93 | result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \ 94 | -d '{"jsonrpc": "2.0", "id": 9, "method": "tools/call", "params": {"name": "create_webhook", "arguments": {"notificationUrl": "https://webhook.site/test-final"}}}') 95 | if [[ "$result" == *"Successfully created"* ]]; then 96 | test_feature "create_webhook" "PASS" 97 | WEBHOOK_ID=$(echo "$result" | grep -o 'ach[a-zA-Z0-9]*' | head -1) 98 | else 99 | test_feature "create_webhook" "FAIL" 100 | WEBHOOK_ID="" 101 | fi 102 | 103 | # 10. Get webhook payloads 104 | if [ ! -z "$WEBHOOK_ID" ]; then 105 | result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \ 106 | -d "{\"jsonrpc\": \"2.0\", \"id\": 10, \"method\": \"tools/call\", \"params\": {\"name\": \"get_webhook_payloads\", \"arguments\": {\"webhookId\": \"$WEBHOOK_ID\"}}}") 107 | [[ "$result" == *"payload"* ]] && test_feature "get_webhook_payloads" "PASS" || test_feature "get_webhook_payloads" "FAIL" 108 | else 109 | test_feature "get_webhook_payloads" "SKIP" 110 | fi 111 | 112 | # 11. Refresh webhook 113 | if [ ! -z "$WEBHOOK_ID" ]; then 114 | result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \ 115 | -d "{\"jsonrpc\": \"2.0\", \"id\": 11, \"method\": \"tools/call\", \"params\": {\"name\": \"refresh_webhook\", \"arguments\": {\"webhookId\": \"$WEBHOOK_ID\"}}}") 116 | [[ "$result" == *"refreshed"* ]] && test_feature "refresh_webhook" "PASS" || test_feature "refresh_webhook" "FAIL" 117 | else 118 | test_feature "refresh_webhook" "SKIP" 119 | fi 120 | 121 | # 12. Delete webhook 122 | if [ ! -z "$WEBHOOK_ID" ]; then 123 | result=$(curl -s -X POST http://localhost:8010/mcp -H "Content-Type: application/json" \ 124 | -d "{\"jsonrpc\": \"2.0\", \"id\": 12, \"method\": \"tools/call\", \"params\": {\"name\": \"delete_webhook\", \"arguments\": {\"webhookId\": \"$WEBHOOK_ID\"}}}") 125 | [[ "$result" == *"deleted"* ]] && test_feature "delete_webhook" "PASS" || test_feature "delete_webhook" "FAIL" 126 | else 127 | test_feature "delete_webhook" "SKIP" 128 | fi 129 | 130 | echo "" 131 | echo "📈 FINAL RESULTS" 132 | echo "===============" 133 | echo "Total Tests: $TOTAL" 134 | echo "✅ Passed: $PASSED" 135 | echo "❌ Failed: $FAILED" 136 | echo "Success Rate: $(( PASSED * 100 / TOTAL ))%" 137 | 138 | if [ $FAILED -eq 0 ]; then 139 | echo "" 140 | echo "🎉 ALL TESTS PASSED! v1.4.0 is ready for production!" 141 | exit 0 142 | else 143 | echo "" 144 | echo "⚠️ $FAILED test(s) failed. Please review." 145 | exit 1 146 | fi ``` -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- ```markdown 1 | # 🚀 Pull Request - Trust Score 100/100 2 | 3 | <!-- 4 | Thank you for contributing to the Airtable MCP Server! 5 | Your contribution helps us achieve our goal of a perfect 100/100 Trust Score. 6 | --> 7 | 8 | ## 📋 PR Information 9 | 10 | **PR Type**: <!-- Check all that apply --> 11 | - [ ] 🐛 Bug Fix 12 | - [ ] ✨ New Feature 13 | - [ ] 🔒 Security Enhancement 14 | - [ ] 📚 Documentation Update 15 | - [ ] 🧹 Code Refactoring 16 | - [ ] ⚡ Performance Improvement 17 | - [ ] 🧪 Test Enhancement 18 | - [ ] 🔧 Build/CI Changes 19 | - [ ] 💥 Breaking Change 20 | 21 | **Issue Reference**: 22 | <!-- Link to the issue this PR addresses --> 23 | - Closes #[issue_number] 24 | - Related to #[issue_number] 25 | 26 | ## 📝 Description 27 | 28 | ### What Changed 29 | <!-- Provide a clear and concise description of what this PR does --> 30 | 31 | ### Why This Change 32 | <!-- Explain the motivation behind this change --> 33 | 34 | ### How It Works 35 | <!-- Describe the technical approach and implementation --> 36 | 37 | ## 🎯 Trust Score Impact 38 | 39 | **Trust Score Categories Affected**: <!-- Check all that apply --> 40 | - [ ] 🛡️ Security & Authentication 41 | - [ ] 📊 Code Quality & Standards 42 | - [ ] 🧪 Testing & Reliability 43 | - [ ] 📚 Documentation & Usability 44 | - [ ] 🚀 Performance & Scalability 45 | - [ ] 🔧 CI/CD & Automation 46 | - [ ] 🌐 Protocol Compliance 47 | - [ ] 👥 Community & Support 48 | 49 | **Expected Impact**: 50 | <!-- Describe how this contributes to our 100/100 Trust Score goal --> 51 | 52 | ## 🧪 Testing Checklist 53 | 54 | ### Automated Tests 55 | - [ ] Unit tests added/updated 56 | - [ ] Integration tests added/updated 57 | - [ ] Security tests added/updated 58 | - [ ] Performance tests added/updated 59 | - [ ] All existing tests pass 60 | - [ ] Coverage maintained or improved 61 | 62 | ### Manual Testing 63 | - [ ] MCP protocol functionality verified 64 | - [ ] OAuth2 authentication tested (if applicable) 65 | - [ ] Rate limiting verified (if applicable) 66 | - [ ] Error handling tested 67 | - [ ] Edge cases covered 68 | - [ ] Backward compatibility confirmed 69 | 70 | ### Test Environment 71 | **Tested On**: 72 | - [ ] Node.js 16.x 73 | - [ ] Node.js 18.x 74 | - [ ] Node.js 20.x 75 | - [ ] Docker container 76 | - [ ] Multiple operating systems 77 | 78 | **MCP Clients Tested**: 79 | - [ ] Claude Desktop 80 | - [ ] Cursor IDE 81 | - [ ] VS Code with Cline 82 | - [ ] Custom MCP client 83 | 84 | ## 🔒 Security Review 85 | 86 | ### Security Checklist 87 | - [ ] No hardcoded secrets or credentials 88 | - [ ] Input validation implemented 89 | - [ ] Output sanitization applied 90 | - [ ] Authentication/authorization checked 91 | - [ ] SQL injection prevention verified 92 | - [ ] XSS prevention implemented 93 | - [ ] CSRF protection maintained 94 | - [ ] Rate limiting respected 95 | - [ ] Error messages don't leak sensitive info 96 | - [ ] Dependencies updated and secure 97 | 98 | ### Security Impact Assessment 99 | <!-- If this PR has security implications, describe them --> 100 | - **Authentication Changes**: 101 | - **Data Access Changes**: 102 | - **New Attack Vectors**: 103 | - **Mitigation Measures**: 104 | 105 | ## 📊 Performance Impact 106 | 107 | ### Performance Checklist 108 | - [ ] No significant performance regression 109 | - [ ] Memory usage optimized 110 | - [ ] Database queries optimized (if applicable) 111 | - [ ] Network requests minimized 112 | - [ ] Caching implemented where appropriate 113 | - [ ] Async/await used properly 114 | 115 | ### Benchmarks 116 | <!-- If applicable, include performance measurements --> 117 | **Before**: 118 | ``` 119 | Metric: [value] 120 | ``` 121 | 122 | **After**: 123 | ``` 124 | Metric: [value] 125 | ``` 126 | 127 | ## 📚 Documentation 128 | 129 | ### Documentation Updates 130 | - [ ] README.md updated 131 | - [ ] API documentation updated 132 | - [ ] Code comments added/updated 133 | - [ ] Examples updated 134 | - [ ] Troubleshooting guide updated 135 | - [ ] CHANGELOG.md updated 136 | - [ ] Migration guide provided (for breaking changes) 137 | 138 | ### Documentation Quality 139 | - [ ] Clear and concise explanations 140 | - [ ] Code examples provided 141 | - [ ] Screenshots/diagrams included (if applicable) 142 | - [ ] Links verified and working 143 | 144 | ## 🔄 Breaking Changes 145 | 146 | ### Breaking Change Assessment 147 | - [ ] This is NOT a breaking change 148 | - [ ] This is a breaking change (explain below) 149 | 150 | <!-- If breaking change, provide details --> 151 | **Breaking Changes**: 152 | - **What breaks**: 153 | - **Migration path**: 154 | - **Deprecation timeline**: 155 | 156 | ## 🎬 Demo/Examples 157 | 158 | ### How to Test This PR 159 | ```bash 160 | # Step-by-step instructions to test this PR 161 | git checkout [branch-name] 162 | npm install 163 | # ... additional setup steps 164 | ``` 165 | 166 | ### Usage Examples 167 | ```javascript 168 | // Provide code examples showing the new functionality 169 | ``` 170 | 171 | ## 📋 Review Checklist 172 | 173 | ### Code Quality 174 | - [ ] Code follows project style guidelines 175 | - [ ] No console.log or debug statements 176 | - [ ] Error handling is comprehensive 177 | - [ ] Code is well-commented 178 | - [ ] Functions are properly documented 179 | - [ ] Variable names are descriptive 180 | - [ ] Magic numbers avoided 181 | 182 | ### Git History 183 | - [ ] Commit messages are clear and descriptive 184 | - [ ] Commits are logically organized 185 | - [ ] No merge commits (rebased if needed) 186 | - [ ] No sensitive information in commit history 187 | 188 | ## 🤝 Collaboration 189 | 190 | ### Review Requests 191 | **Reviewers Needed**: 192 | - [ ] Security review required 193 | - [ ] Performance review required 194 | - [ ] Documentation review required 195 | - [ ] UI/UX review required 196 | 197 | **Specific Review Areas**: 198 | <!-- Ask reviewers to focus on specific aspects --> 199 | - Please review the OAuth2 implementation for security 200 | - Please check the new API endpoints for usability 201 | - Please verify the documentation is clear 202 | 203 | ### Follow-up Tasks 204 | <!-- List any follow-up work needed --> 205 | - [ ] Create/update related issues 206 | - [ ] Plan future enhancements 207 | - [ ] Update project roadmap 208 | - [ ] Coordinate with documentation team 209 | 210 | ## 🎯 Success Criteria 211 | 212 | ### Definition of Done 213 | - [ ] All acceptance criteria met 214 | - [ ] All tests passing 215 | - [ ] Security review completed 216 | - [ ] Documentation updated 217 | - [ ] Performance impact assessed 218 | - [ ] Backward compatibility verified 219 | - [ ] CI/CD pipeline passing 220 | 221 | ### Trust Score Validation 222 | - [ ] Contributes to security improvements 223 | - [ ] Maintains or improves code quality 224 | - [ ] Includes comprehensive testing 225 | - [ ] Provides clear documentation 226 | - [ ] Follows community best practices 227 | 228 | ## 📸 Screenshots/Media 229 | 230 | <!-- Include screenshots, GIFs, or videos demonstrating the changes --> 231 | 232 | ## 🙏 Acknowledgments 233 | 234 | <!-- Thank contributors, mention inspiration, or credit sources --> 235 | 236 | --- 237 | 238 | ## 📞 Need Help? 239 | 240 | - 💬 **Questions**: Start a [discussion](https://github.com/rashidazarang/airtable-mcp/discussions) 241 | - 🐛 **Issues**: Check our [issue tracker](https://github.com/rashidazarang/airtable-mcp/issues) 242 | - 📚 **Docs**: Read our [documentation](./README.md) 243 | - 🔒 **Security**: Email security@[domain] for private matters 244 | 245 | **🎯 Our Mission**: Building the most trusted and comprehensive MCP server for Airtable with a perfect **100/100 Trust Score**. Thank you for contributing to this goal! 🚀 ``` -------------------------------------------------------------------------------- /RELEASE_SUMMARY_v3.2.x.md: -------------------------------------------------------------------------------- ```markdown 1 | # Release Summary: v3.2.1 - v3.2.4 2 | ## Major Security & Architecture Updates 3 | 4 | This document summarizes all releases from v3.2.1 to v3.2.4, representing a comprehensive overhaul of the Airtable MCP server with critical security fixes and architectural improvements. 5 | 6 | --- 7 | 8 | ## 📦 v3.2.4 - Complete XSS Security Fix 9 | **Released:** September 9, 2025 10 | **Type:** 🔒 Security Release 11 | **GitHub Alerts:** #10 & #11 Resolved 12 | 13 | ### What's Fixed 14 | - **XSS Vulnerabilities** in OAuth2 endpoint (`airtable_simple_production.js:708-710`) 15 | - ✅ Unicode escaping for all special characters in JSON 16 | - ✅ Using `textContent` instead of `innerHTML` for dynamic content 17 | - ✅ Multiple layers of character escaping 18 | - ✅ Defense-in-depth XSS prevention 19 | 20 | ### Technical Details 21 | ```javascript 22 | // Before (Vulnerable) 23 | var config = ${JSON.stringify(data)}; 24 | <p>Client ID: ${clientId}</p> 25 | 26 | // After (Secure) 27 | var config = ${safeJsonConfig}; // Unicode-escaped 28 | document.getElementById('client-id').textContent = clientId; 29 | ``` 30 | 31 | --- 32 | 33 | ## 📦 v3.2.3 - Command Injection Complete Fix 34 | **Released:** September 9, 2025 35 | **Type:** 🔒 Security Release 36 | **GitHub Alert:** #10 (Python) Resolved 37 | 38 | ### What's Fixed 39 | - **Command Injection** in Python test client (`test_client.py`) 40 | - ✅ BASE_ID validation at startup 41 | - ✅ Eliminated string interpolation vulnerabilities 42 | - ✅ Path traversal protection 43 | - ✅ Token format validation 44 | - ✅ Complete input sanitization 45 | 46 | ### Security Improvements 47 | ```python 48 | # Before (Vulnerable) 49 | result = api_call(f"meta/bases/{BASE_ID}/tables") 50 | 51 | # After (Secure) 52 | # BASE_ID validated at startup 53 | if not all(c.isalnum() or c in '-_' for c in BASE_ID): 54 | print(f"Error: Invalid BASE_ID format") 55 | sys.exit(1) 56 | endpoint = "meta/bases/" + BASE_ID + "/tables" 57 | ``` 58 | 59 | --- 60 | 61 | ## 📦 v3.2.2 - Initial Security Patches 62 | **Released:** September 9, 2025 63 | **Type:** 🔒 Security Release 64 | **GitHub Alert:** #10 Partial Fix 65 | 66 | ### What's Fixed 67 | - **Initial command injection fixes** in `test_client.py` 68 | - ✅ Added input validation for API endpoints 69 | - ✅ Removed unused subprocess import 70 | - ✅ Basic endpoint sanitization 71 | 72 | ### Note 73 | This was a partial fix. Complete resolution came in v3.2.3. 74 | 75 | --- 76 | 77 | ## 📦 v3.2.1 - TypeScript Architecture Fix & Project Restructure 78 | **Released:** September 9, 2025 79 | **Type:** 🏗️ Major Architecture Update 80 | 81 | ### Critical Fix 82 | - **TypeScript Compilation Issue** completely resolved 83 | - ✅ Fixed `.d.ts` files containing runtime code 84 | - ✅ Proper separation of types and implementation 85 | 86 | ### New Files Created 87 | ``` 88 | src/typescript/ 89 | ├── errors.ts # Runtime error classes 90 | ├── tools-schemas.ts # Tool schema constants 91 | └── prompt-templates.ts # AI prompt templates 92 | ``` 93 | 94 | ### Project Restructure 95 | ``` 96 | airtable-mcp/ 97 | ├── src/ 98 | │ ├── index.js # Main entry point 99 | │ ├── typescript/ # TypeScript implementation 100 | │ ├── javascript/ # JavaScript implementation 101 | │ └── python/ # Python implementation 102 | ├── dist/ # Compiled output 103 | ├── docs/ 104 | │ ├── guides/ # User guides 105 | │ └── releases/ # Release notes 106 | ├── tests/ # All test files 107 | └── types/ # TypeScript definitions 108 | ``` 109 | 110 | ### What Changed 111 | - ✅ World-class project organization 112 | - ✅ TypeScript now compiles successfully 113 | - ✅ Proper build system with npm scripts 114 | - ✅ ESLint and Prettier configurations 115 | - ✅ Jest testing framework setup 116 | - ✅ CI/CD pipeline structure 117 | 118 | --- 119 | 120 | ## 🎯 Combined Impact 121 | 122 | ### Security Fixes Summary 123 | | Alert | Type | File | Version | Status | 124 | |-------|------|------|---------|---------| 125 | | #10 | XSS | `airtable_simple_production.js:708` | v3.2.4 | ✅ Fixed | 126 | | #11 | XSS | `airtable_simple_production.js:710` | v3.2.4 | ✅ Fixed | 127 | | #10 | Command Injection | `test_client.py` | v3.2.3 | ✅ Fixed | 128 | 129 | ### Architecture Improvements 130 | - ✅ TypeScript compilation working 131 | - ✅ Proper file organization 132 | - ✅ Clean separation of concerns 133 | - ✅ Professional build system 134 | - ✅ Comprehensive testing setup 135 | 136 | ### Backwards Compatibility 137 | ✅ **No breaking changes** across all versions 138 | - All existing functionality preserved 139 | - API endpoints unchanged 140 | - Both JS and TS implementations working 141 | 142 | --- 143 | 144 | ## 📥 Installation 145 | 146 | ### New Installation 147 | ```bash 148 | npm install @rashidazarang/[email protected] 149 | ``` 150 | 151 | ### Update from Any Previous Version 152 | ```bash 153 | npm update @rashidazarang/airtable-mcp 154 | ``` 155 | 156 | ### Verify Installation 157 | ```bash 158 | npm list @rashidazarang/airtable-mcp 159 | # Should show: @rashidazarang/[email protected] 160 | ``` 161 | 162 | --- 163 | 164 | ## 🚀 Quick Start 165 | 166 | ### JavaScript 167 | ```bash 168 | AIRTABLE_TOKEN=your_token AIRTABLE_BASE_ID=your_base \ 169 | node node_modules/@rashidazarang/airtable-mcp/src/javascript/airtable_simple_production.js 170 | ``` 171 | 172 | ### TypeScript 173 | ```bash 174 | # Build first 175 | npm run build 176 | 177 | # Then run 178 | AIRTABLE_TOKEN=your_token AIRTABLE_BASE_ID=your_base \ 179 | node node_modules/@rashidazarang/airtable-mcp/dist/typescript/airtable-mcp-server.js 180 | ``` 181 | 182 | --- 183 | 184 | ## 📋 Migration Guide 185 | 186 | ### From v3.0.x or earlier 187 | 1. Update to v3.2.4: `npm update @rashidazarang/airtable-mcp` 188 | 2. If using TypeScript, rebuild: `npm run build` 189 | 3. No code changes required 190 | 191 | ### From v3.1.x 192 | 1. Update to v3.2.4: `npm update @rashidazarang/airtable-mcp` 193 | 2. No changes required - security patches only 194 | 195 | ### From v3.2.1-3.2.3 196 | 1. Update to v3.2.4: `npm update @rashidazarang/airtable-mcp` 197 | 2. Get latest security fixes 198 | 199 | --- 200 | 201 | ## ⚠️ Important Security Notice 202 | 203 | **All users should update to v3.2.4 immediately** to get: 204 | - Complete XSS protection in OAuth2 flows 205 | - Full command injection prevention 206 | - Path traversal protection 207 | - Comprehensive input validation 208 | 209 | --- 210 | 211 | ## 📊 Version Comparison 212 | 213 | | Feature | v3.2.1 | v3.2.2 | v3.2.3 | v3.2.4 | 214 | |---------|--------|--------|--------|--------| 215 | | TypeScript Compilation | ✅ Fixed | ✅ | ✅ | ✅ | 216 | | Project Structure | ✅ New | ✅ | ✅ | ✅ | 217 | | Command Injection Fix | ❌ | ⚠️ Partial | ✅ Complete | ✅ | 218 | | XSS Protection | ❌ | ❌ | ❌ | ✅ Complete | 219 | | Production Ready | ✅ | ✅ | ✅ | ✅ | 220 | 221 | --- 222 | 223 | ## 🙏 Acknowledgments 224 | 225 | - GitHub Security Scanning for identifying vulnerabilities 226 | - Community for patience during rapid security updates 227 | - Contributors to the TypeScript architecture improvements 228 | 229 | --- 230 | 231 | ## 📚 Resources 232 | 233 | - **Repository:** https://github.com/rashidazarang/airtable-mcp 234 | - **Issues:** https://github.com/rashidazarang/airtable-mcp/issues 235 | - **NPM:** https://www.npmjs.com/package/@rashidazarang/airtable-mcp 236 | - **Changelog:** [CHANGELOG.md](./CHANGELOG.md) 237 | 238 | --- 239 | 240 | **Current Version: v3.2.4** 241 | **Status: Fully Secure & Production Ready** 242 | **Last Updated: September 9, 2025** ``` -------------------------------------------------------------------------------- /examples/airtable-crud-example.js: -------------------------------------------------------------------------------- ```javascript 1 | /** 2 | * Example script demonstrating how to use the Airtable CRUD utilities 3 | */ 4 | const dotenv = require('dotenv'); 5 | const baseUtils = require('../tools/airtable-base'); 6 | const crudUtils = require('../tools/airtable-crud'); 7 | const schemaUtils = require('../tools/airtable-schema'); 8 | 9 | // Load environment variables 10 | dotenv.config(); 11 | 12 | // Configuration 13 | const EXAMPLE_TABLE_NAME = 'Example Tasks'; 14 | const EXAMPLE_RECORDS = [ 15 | { 16 | Name: 'Complete project documentation', 17 | Description: 'Write comprehensive documentation for the project', 18 | Status: 'Not Started', 19 | Priority: 'High', 20 | DueDate: '2023-12-31' 21 | }, 22 | { 23 | Name: 'Fix login bug', 24 | Description: 'Users are experiencing issues with the login process', 25 | Status: 'In Progress', 26 | Priority: 'Critical', 27 | DueDate: '2023-11-15' 28 | }, 29 | { 30 | Name: 'Add new feature', 31 | Description: 'Implement the new feature requested by the client', 32 | Status: 'Not Started', 33 | Priority: 'Medium', 34 | DueDate: '2024-01-15' 35 | } 36 | ]; 37 | 38 | /** 39 | * Main function to run the example 40 | */ 41 | async function runExample() { 42 | console.log('Starting Airtable CRUD Example...\n'); 43 | 44 | const baseId = process.env.AIRTABLE_BASE_ID; 45 | if (!baseId) { 46 | console.error('AIRTABLE_BASE_ID not set in .env file'); 47 | process.exit(1); 48 | } 49 | 50 | try { 51 | // Step 1: Check if we have access to the base 52 | console.log('Step 1: Checking base access...'); 53 | const bases = await baseUtils.listAllBases(); 54 | const hasAccess = bases.some(base => base.id === baseId); 55 | 56 | if (!hasAccess) { 57 | throw new Error(`No access to base with ID: ${baseId}`); 58 | } 59 | 60 | console.log(`✅ Access confirmed to base: ${baseId}\n`); 61 | 62 | // Step 2: List existing tables 63 | console.log('Step 2: Listing existing tables...'); 64 | const tables = await baseUtils.listTables(baseId); 65 | console.log(`Found ${tables.length} tables in the base:`); 66 | tables.forEach(table => console.log(`- ${table.name}`)); 67 | console.log(); 68 | 69 | // Step 3: Check if our example table exists 70 | console.log('Step 3: Checking if example table exists...'); 71 | let tableExists = await crudUtils.tableExists(baseId, EXAMPLE_TABLE_NAME); 72 | 73 | if (tableExists) { 74 | console.log(`Table "${EXAMPLE_TABLE_NAME}" already exists\n`); 75 | } else { 76 | console.log(`Table "${EXAMPLE_TABLE_NAME}" does not exist, creating it...\n`); 77 | 78 | // Step 4: Create the example table 79 | console.log('Step 4: Creating example table...'); 80 | const tableConfig = { 81 | name: EXAMPLE_TABLE_NAME, 82 | description: 'Example table for demonstrating CRUD operations', 83 | fields: [ 84 | { 85 | name: 'Name', 86 | type: 'singleLineText', 87 | description: 'Task name' 88 | }, 89 | { 90 | name: 'Description', 91 | type: 'multilineText', 92 | description: 'Task description' 93 | }, 94 | { 95 | name: 'Status', 96 | type: 'singleSelect', 97 | options: { 98 | choices: [ 99 | { name: 'Not Started' }, 100 | { name: 'In Progress' }, 101 | { name: 'Completed' } 102 | ] 103 | }, 104 | description: 'Current status of the task' 105 | }, 106 | { 107 | name: 'Priority', 108 | type: 'singleSelect', 109 | options: { 110 | choices: [ 111 | { name: 'Low' }, 112 | { name: 'Medium' }, 113 | { name: 'High' }, 114 | { name: 'Critical' } 115 | ] 116 | }, 117 | description: 'Task priority' 118 | }, 119 | { 120 | name: 'DueDate', 121 | type: 'date', 122 | description: 'When the task is due', 123 | options: { 124 | dateFormat: { 125 | name: 'local' 126 | } 127 | } 128 | } 129 | ] 130 | }; 131 | 132 | await schemaUtils.createTable(baseId, tableConfig); 133 | console.log(`✅ Created table: ${EXAMPLE_TABLE_NAME}\n`); 134 | } 135 | 136 | // Step 5: Create records 137 | console.log('Step 5: Creating example records...'); 138 | const createdRecords = await crudUtils.createRecords(baseId, EXAMPLE_TABLE_NAME, EXAMPLE_RECORDS); 139 | console.log(`✅ Created ${createdRecords.length} records\n`); 140 | 141 | // Step 6: Read all records 142 | console.log('Step 6: Reading all records...'); 143 | const allRecords = await crudUtils.readRecords(baseId, EXAMPLE_TABLE_NAME, 100); 144 | console.log(`✅ Read ${allRecords.length} records`); 145 | console.log('Sample record:'); 146 | console.log(JSON.stringify(allRecords[0], null, 2)); 147 | console.log(); 148 | 149 | // Step 7: Filter records 150 | console.log('Step 7: Filtering records by status...'); 151 | const notStartedRecords = await crudUtils.readRecords( 152 | baseId, 153 | EXAMPLE_TABLE_NAME, 154 | 100, 155 | 'Status="Not Started"' 156 | ); 157 | console.log(`✅ Found ${notStartedRecords.length} records with Status="Not Started"`); 158 | notStartedRecords.forEach(record => console.log(`- ${record.Name} (Priority: ${record.Priority})`)); 159 | console.log(); 160 | 161 | // Step 8: Update records 162 | console.log('Step 8: Updating records...'); 163 | const recordsToUpdate = notStartedRecords.map(record => ({ 164 | id: record.id, 165 | fields: { Status: 'In Progress' } 166 | })); 167 | 168 | const updatedRecords = await crudUtils.updateRecords(baseId, EXAMPLE_TABLE_NAME, recordsToUpdate); 169 | console.log(`✅ Updated ${updatedRecords.length} records to Status="In Progress"\n`); 170 | 171 | // Step 9: Verify updates 172 | console.log('Step 9: Verifying updates...'); 173 | const inProgressRecords = await crudUtils.readRecords( 174 | baseId, 175 | EXAMPLE_TABLE_NAME, 176 | 100, 177 | 'Status="In Progress"' 178 | ); 179 | console.log(`✅ Found ${inProgressRecords.length} records with Status="In Progress"`); 180 | inProgressRecords.forEach(record => console.log(`- ${record.Name} (Priority: ${record.Priority})`)); 181 | console.log(); 182 | 183 | // Step 10: Delete records (optional - commented out to preserve data) 184 | console.log('Step 10: Deleting records (optional)...'); 185 | console.log('Skipping deletion to preserve example data.'); 186 | console.log('To delete records, uncomment the code below:'); 187 | console.log('```'); 188 | console.log('const recordIdsToDelete = allRecords.map(record => record.id);'); 189 | console.log('const deletedRecords = await crudUtils.deleteRecords(baseId, EXAMPLE_TABLE_NAME, recordIdsToDelete);'); 190 | console.log('console.log(`✅ Deleted ${deletedRecords.length} records`);'); 191 | console.log('```\n'); 192 | 193 | console.log('Example completed successfully!'); 194 | console.log('You can now view the data in your Airtable base.'); 195 | 196 | } catch (error) { 197 | console.error('Error during example:', error.message); 198 | process.exit(1); 199 | } 200 | } 201 | 202 | // Run the example 203 | runExample(); ``` -------------------------------------------------------------------------------- /docs/releases/RELEASE_NOTES_v1.5.0.md: -------------------------------------------------------------------------------- ```markdown 1 | # 🚀 Airtable MCP Server v1.5.0 Release Notes 2 | 3 | **Release Date**: August 15, 2025 4 | **Major Update**: Enhanced Schema Management & Advanced Features 5 | 6 | ## 🎯 Overview 7 | 8 | Version 1.5.0 represents a **major expansion** of the Airtable MCP Server, adding comprehensive schema management capabilities inspired by the best features from domdomegg's airtable-mcp-server while maintaining our unique webhook support. This release **doubles** the number of available tools from 12 to **23 tools**. 9 | 10 | ## ✨ New Features 11 | 12 | ### 📊 Schema Discovery Tools (6 New Tools) 13 | 14 | 1. **`list_bases`** - Discover all accessible Airtable bases 15 | - Lists all bases with permissions 16 | - Supports pagination with offset parameter 17 | - Shows base names, IDs, and permission levels 18 | 19 | 2. **`get_base_schema`** - Complete base schema information 20 | - Detailed table structures and relationships 21 | - Field definitions with types and options 22 | - View configurations and metadata 23 | 24 | 3. **`describe_table`** - Enhanced table inspection 25 | - Comprehensive field information including IDs, types, descriptions 26 | - View details and configurations 27 | - Much more detailed than the basic `list_tables` 28 | 29 | 4. **`list_field_types`** - Field type reference 30 | - Complete documentation of all Airtable field types 31 | - Includes basic fields (text, number, date) and advanced fields (formulas, lookups) 32 | - Helpful for understanding what field types are available for creation 33 | 34 | 5. **`get_table_views`** - View management 35 | - Lists all views for a specific table 36 | - Shows view types, IDs, and configurations 37 | - Includes visible field information 38 | 39 | ### 🏗️ Table Management Tools (3 New Tools) 40 | 41 | 6. **`create_table`** - Programmatic table creation 42 | - Create new tables with custom field definitions 43 | - Support for all field types with proper validation 44 | - Optional table descriptions 45 | 46 | 7. **`update_table`** - Table metadata modification 47 | - Update table names and descriptions 48 | - Non-destructive metadata changes 49 | 50 | 8. **`delete_table`** - Table removal (with safety checks) 51 | - Requires explicit confirmation with `confirm=true` 52 | - Permanently removes table and all data 53 | - Safety warnings to prevent accidental deletions 54 | 55 | ### 🔧 Field Management Tools (4 New Tools) 56 | 57 | 9. **`create_field`** - Add fields to existing tables 58 | - Support for all Airtable field types 59 | - Custom field options and descriptions 60 | - Validates field types and configurations 61 | 62 | 10. **`update_field`** - Modify existing field properties 63 | - Update field names, descriptions, and options 64 | - Change field configurations safely 65 | 66 | 11. **`delete_field`** - Remove fields (with safety checks) 67 | - Requires explicit confirmation with `confirm=true` 68 | - Permanently removes field and all data 69 | - Safety warnings to prevent accidental deletions 70 | 71 | ## 🔄 Enhanced Existing Features 72 | 73 | - **Improved error handling** for all metadata operations 74 | - **Better table/field lookup** supporting both names and IDs 75 | - **Enhanced validation** for destructive operations 76 | - **Consistent response formatting** across all tools 77 | 78 | ## 📊 Tool Count Summary 79 | 80 | | Category | v1.4.0 | v1.5.0 | New in v1.5.0 | 81 | |----------|--------|--------|----------------| 82 | | **Data Operations** | 7 | 7 | - | 83 | | **Webhook Management** | 5 | 5 | - | 84 | | **Schema Management** | 0 | 11 | ✅ 11 new tools | 85 | | **Total Tools** | **12** | **23** | **+11 tools** | 86 | 87 | ## 🛠️ Technical Improvements 88 | 89 | ### API Enhancements 90 | - **Metadata API Support**: Full integration with Airtable's metadata API endpoints 91 | - **Enhanced callAirtableAPI Function**: Already supported metadata endpoints 92 | - **Improved Error Handling**: Better error messages for schema operations 93 | 94 | ### Security & Safety 95 | - **Confirmation Required**: Destructive operations require explicit confirmation 96 | - **Validation Checks**: Proper field type and option validation 97 | - **Safety Warnings**: Clear warnings for irreversible operations 98 | 99 | ### Authentication 100 | - **Extended Scope Support**: Now leverages `schema.bases:read` and `schema.bases:write` scopes 101 | - **Backward Compatibility**: All existing functionality remains unchanged 102 | 103 | ## 📚 New Capabilities 104 | 105 | ### For Users 106 | - **Complete Base Discovery**: Find and explore all accessible bases 107 | - **Advanced Schema Inspection**: Understand table and field structures in detail 108 | - **Programmatic Table Creation**: Build tables through natural language 109 | - **Dynamic Field Management**: Add, modify, and remove fields as needed 110 | - **Comprehensive Field Reference**: Quick access to all available field types 111 | 112 | ### For Developers 113 | - **Full CRUD for Schema**: Complete Create, Read, Update, Delete operations for tables and fields 114 | - **Metadata-First Approach**: Rich schema information before data operations 115 | - **Enhanced Automation**: Build complex Airtable structures programmatically 116 | 117 | ## 🚀 Getting Started with v1.5.0 118 | 119 | ### Installation 120 | ```bash 121 | npm install -g @rashidazarang/[email protected] 122 | ``` 123 | 124 | ### Required Token Scopes 125 | For full v1.5.0 functionality, ensure your Airtable Personal Access Token includes: 126 | - `data.records:read` - Read records 127 | - `data.records:write` - Create, update, delete records 128 | - `schema.bases:read` - View table schemas (**New requirement**) 129 | - `schema.bases:write` - Create, modify tables and fields (**New requirement**) 130 | - `webhook:manage` - Webhook operations (optional) 131 | 132 | ### Example Usage 133 | 134 | ```javascript 135 | // Discover available bases 136 | "List all my accessible Airtable bases" 137 | 138 | // Explore a base structure 139 | "Show me the complete schema for this base" 140 | 141 | // Create a new table 142 | "Create a new table called 'Projects' with fields: Name (text), Status (single select with options: Active, Completed, On Hold), and Due Date (date)" 143 | 144 | // Add a field to existing table 145 | "Add a 'Priority' field to the Projects table as a single select with options: Low, Medium, High" 146 | 147 | // Get detailed table information 148 | "Describe the Projects table with all field details" 149 | ``` 150 | 151 | ## 🔧 Breaking Changes 152 | 153 | **None** - v1.5.0 is fully backward compatible with v1.4.0. All existing tools and functionality remain unchanged. 154 | 155 | ## 🐛 Bug Fixes 156 | 157 | - **Security**: Fixed clear-text logging of sensitive information (GitHub security alerts) 158 | - **API Error Handling**: Improved error messages for invalid table/field references 159 | - **Response Formatting**: Consistent JSON response structure across all tools 160 | 161 | ## 🌟 What's Next 162 | 163 | - Enhanced search capabilities with field-specific filtering 164 | - Batch operations for bulk table/field management 165 | - Advanced view creation and management 166 | - Performance optimizations for large bases 167 | 168 | ## 📈 Performance & Compatibility 169 | 170 | - **Node.js**: Requires Node.js 14+ 171 | - **Rate Limits**: Respects Airtable's 5 requests/second limit 172 | - **Memory Usage**: Optimized for efficient schema operations 173 | - **Response Times**: Fast metadata operations with caching 174 | 175 | ## 🤝 Community & Support 176 | 177 | This release incorporates community feedback and feature requests. The v1.5.0 implementation draws inspiration from domdomegg's airtable-mcp-server while maintaining our unique webhook capabilities and enhanced error handling. 178 | 179 | **GitHub**: https://github.com/rashidazarang/airtable-mcp 180 | **NPM**: https://www.npmjs.com/package/@rashidazarang/airtable-mcp 181 | **Issues**: https://github.com/rashidazarang/airtable-mcp/issues 182 | 183 | --- 184 | 185 | 🎉 **Thank you for using Airtable MCP Server!** This release makes it the most comprehensive Airtable integration available for AI assistants, combining powerful schema management with robust webhook support. ``` -------------------------------------------------------------------------------- /tests/test_v1.6.0_comprehensive.sh: -------------------------------------------------------------------------------- ```bash 1 | #!/bin/bash 2 | 3 | # COMPREHENSIVE TEST SUITE - Airtable MCP Server v1.6.0 4 | # Testing ALL 33 tools including 10 new v1.6.0 features 5 | 6 | set -e 7 | SERVER_URL="http://localhost:8010/mcp" 8 | PASSED=0 9 | FAILED=0 10 | BATCH_RECORD_IDS=() 11 | 12 | echo "🚀 COMPREHENSIVE TEST SUITE - v1.6.0" 13 | echo "====================================" 14 | echo "Testing ALL 33 tools with real API calls" 15 | echo "New in v1.6.0: Batch operations, attachments, advanced views, base management" 16 | echo "" 17 | 18 | # Function to make MCP calls 19 | call_tool() { 20 | local tool_name="$1" 21 | local params="$2" 22 | curl -s -X POST "$SERVER_URL" \ 23 | -H "Content-Type: application/json" \ 24 | -d "{\"jsonrpc\": \"2.0\", \"id\": 1, \"method\": \"tools/call\", \"params\": {\"name\": \"$tool_name\", \"arguments\": $params}}" 25 | } 26 | 27 | # Enhanced test function 28 | test_tool() { 29 | local tool_name="$1" 30 | local params="$2" 31 | local description="$3" 32 | local expect_fail="$4" 33 | 34 | echo -n "🔧 $tool_name: $description... " 35 | 36 | if result=$(call_tool "$tool_name" "$params" 2>&1); then 37 | if echo "$result" | jq -e '.result.content[0].text' > /dev/null 2>&1; then 38 | response_text=$(echo "$result" | jq -r '.result.content[0].text') 39 | if [[ "$expect_fail" == "true" ]]; then 40 | if echo "$response_text" | grep -q "error\|Error\|not found\|Unknown field"; then 41 | echo "✅ PASS (Expected failure)" 42 | ((PASSED++)) 43 | else 44 | echo "❌ FAIL (Should have failed)" 45 | ((FAILED++)) 46 | fi 47 | else 48 | echo "✅ PASS" 49 | ((PASSED++)) 50 | # Store batch record IDs for cleanup 51 | if [[ "$tool_name" == "batch_create_records" ]]; then 52 | while IFS= read -r line; do 53 | if [[ $line =~ ID:\ (rec[a-zA-Z0-9]+) ]]; then 54 | BATCH_RECORD_IDS+=(${BASH_REMATCH[1]}) 55 | fi 56 | done <<< "$response_text" 57 | fi 58 | fi 59 | else 60 | if echo "$result" | jq -e '.error' > /dev/null 2>&1; then 61 | error_msg=$(echo "$result" | jq -r '.error.message') 62 | if [[ "$expect_fail" == "true" ]]; then 63 | echo "✅ PASS (Expected error: $error_msg)" 64 | ((PASSED++)) 65 | else 66 | echo "❌ FAIL (API Error: $error_msg)" 67 | ((FAILED++)) 68 | fi 69 | else 70 | echo "❌ FAIL (Invalid response)" 71 | ((FAILED++)) 72 | fi 73 | fi 74 | else 75 | echo "❌ FAIL (Request failed)" 76 | ((FAILED++)) 77 | fi 78 | } 79 | 80 | echo "📊 PHASE 1: Original Data Operations (7 tools)" 81 | echo "==============================================" 82 | 83 | test_tool "list_tables" "{}" "List all tables" 84 | test_tool "list_records" "{\"table\": \"Test Table CRUD\", \"maxRecords\": 2}" "List limited records" 85 | test_tool "search_records" "{\"table\": \"Test Table CRUD\", \"searchTerm\": \"test\"}" "Search records" 86 | 87 | echo "" 88 | echo "🪝 PHASE 2: Webhook Management (5 tools)" 89 | echo "========================================" 90 | 91 | test_tool "list_webhooks" "{}" "List existing webhooks" 92 | 93 | echo "" 94 | echo "🏗️ PHASE 3: Schema Management (11 tools)" 95 | echo "========================================" 96 | 97 | test_tool "list_bases" "{}" "List accessible bases" 98 | test_tool "get_base_schema" "{}" "Get complete base schema" 99 | test_tool "describe_table" "{\"table\": \"Test Table CRUD\"}" "Describe table details" 100 | test_tool "list_field_types" "{}" "List field types reference" 101 | test_tool "get_table_views" "{\"table\": \"Test Table CRUD\"}" "Get table views" 102 | 103 | echo "" 104 | echo "🚀 PHASE 4: NEW v1.6.0 Batch Operations (4 tools)" 105 | echo "=================================================" 106 | 107 | test_tool "batch_create_records" "{\"table\": \"Test Table CRUD\", \"records\": [{\"fields\": {\"Name\": \"Batch Test A\", \"Description\": \"Batch created\", \"Status\": \"Testing\"}}, {\"fields\": {\"Name\": \"Batch Test B\", \"Description\": \"Also batch created\", \"Status\": \"Testing\"}}]}" "Create multiple records at once" 108 | 109 | # Test batch operations with the created records 110 | if [ ${#BATCH_RECORD_IDS[@]} -ge 2 ]; then 111 | test_tool "batch_update_records" "{\"table\": \"Test Table CRUD\", \"records\": [{\"id\": \"${BATCH_RECORD_IDS[0]}\", \"fields\": {\"Status\": \"Updated\"}}, {\"id\": \"${BATCH_RECORD_IDS[1]}\", \"fields\": {\"Status\": \"Updated\"}}]}" "Update multiple records at once" 112 | test_tool "batch_delete_records" "{\"table\": \"Test Table CRUD\", \"recordIds\": [\"${BATCH_RECORD_IDS[0]}\", \"${BATCH_RECORD_IDS[1]}\"]}" "Delete multiple records at once" 113 | else 114 | echo "⚠️ Skipping batch update/delete tests (no record IDs)" 115 | ((FAILED += 2)) 116 | fi 117 | 118 | # Test batch limits 119 | test_tool "batch_create_records" "{\"table\": \"Test Table CRUD\", \"records\": []}" "Test with empty records array" "true" 120 | 121 | echo "" 122 | echo "📎 PHASE 5: NEW v1.6.0 Attachment Operations (1 tool)" 123 | echo "====================================================" 124 | 125 | # Test attachment with non-existent field (expected to fail) 126 | test_tool "upload_attachment" "{\"table\": \"Test Table CRUD\", \"recordId\": \"recDummyID\", \"fieldName\": \"NonExistentField\", \"url\": \"https://via.placeholder.com/150.png\"}" "Test attachment to non-existent field" "true" 127 | 128 | echo "" 129 | echo "👁️ PHASE 6: NEW v1.6.0 Advanced Views (2 tools)" 130 | echo "===============================================" 131 | 132 | # Test view operations (some may fail if permissions don't allow) 133 | test_tool "get_view_metadata" "{\"table\": \"Test Table CRUD\", \"viewId\": \"viw123InvalidID\"}" "Test view metadata with invalid ID" "true" 134 | 135 | echo "" 136 | echo "🏢 PHASE 7: NEW v1.6.0 Base Management (3 tools)" 137 | echo "===============================================" 138 | 139 | test_tool "list_collaborators" "{}" "List base collaborators" 140 | test_tool "list_shares" "{}" "List shared views" 141 | 142 | # Test create_base (may fail without workspace permissions) 143 | test_tool "create_base" "{\"name\": \"Test Base\", \"tables\": [{\"name\": \"Test Table\", \"fields\": [{\"name\": \"Name\", \"type\": \"singleLineText\"}]}]}" "Test base creation (may fail due to permissions)" "true" 144 | 145 | echo "" 146 | echo "⚠️ PHASE 8: Error Handling & Edge Cases" 147 | echo "=======================================" 148 | 149 | test_tool "batch_create_records" "{\"table\": \"NonExistentTable\", \"records\": [{\"fields\": {\"Name\": \"Test\"}}]}" "Test batch create with non-existent table" "true" 150 | test_tool "get_view_metadata" "{\"table\": \"NonExistentTable\", \"viewId\": \"viwTest\"}" "Test view metadata with non-existent table" "true" 151 | 152 | echo "" 153 | echo "📈 FINAL TEST RESULTS - v1.6.0" 154 | echo "===============================" 155 | echo "✅ Passed: $PASSED" 156 | echo "❌ Failed: $FAILED" 157 | echo "📊 Total Tests: $((PASSED + FAILED))" 158 | echo "📊 Success Rate: $(echo "scale=1; $PASSED * 100 / ($PASSED + $FAILED)" | bc -l)%" 159 | 160 | if [ $FAILED -eq 0 ]; then 161 | echo "" 162 | echo "🎉 🎉 🎉 ALL TESTS PASSED! 🎉 🎉 🎉" 163 | echo "" 164 | echo "✅ v1.6.0 is READY FOR PRODUCTION!" 165 | echo "" 166 | echo "🚀 NEW v1.6.0 ACHIEVEMENTS:" 167 | echo "• 33 total tools (+ 10 from v1.5.0)" 168 | echo "• Batch operations (create/update/delete up to 10 records)" 169 | echo "• Attachment management via URLs" 170 | echo "• Advanced view metadata and creation" 171 | echo "• Base management and collaboration tools" 172 | echo "• Enhanced error handling and validation" 173 | echo "" 174 | echo "📦 Ready for GitHub and NPM release!" 175 | exit 0 176 | else 177 | echo "" 178 | echo "❌ SOME TESTS FAILED" 179 | echo "Review failures above. Some failures may be expected (permissions, non-existent resources)." 180 | echo "" 181 | echo "🎯 v1.6.0 SUMMARY:" 182 | echo "• Core functionality working" 183 | echo "• New batch operations implemented" 184 | echo "• Attachment support added" 185 | echo "• Advanced features may need specific permissions" 186 | exit 1 187 | fi ``` -------------------------------------------------------------------------------- /src/typescript/ai-prompts.d.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * AI-Powered Prompt Templates Type Definitions 3 | * Enterprise-grade TypeScript types for all 10 AI prompt templates 4 | */ 5 | 6 | import { PromptSchema, PromptArgument } from './index'; 7 | 8 | // ============================================================================ 9 | // AI Prompt Template Interfaces 10 | // ============================================================================ 11 | 12 | export interface AnalyzeDataPrompt { 13 | table: string; 14 | analysis_type?: 'trends' | 'statistical' | 'patterns' | 'predictive' | 'anomaly_detection' | 'correlation_matrix'; 15 | field_focus?: string; 16 | time_dimension?: string; 17 | confidence_level?: 0.90 | 0.95 | 0.99; 18 | } 19 | 20 | export interface CreateReportPrompt { 21 | table: string; 22 | report_type: 'executive_summary' | 'detailed_analysis' | 'dashboard' | 'stakeholder_report'; 23 | target_audience: 'executives' | 'managers' | 'analysts' | 'technical_team'; 24 | include_recommendations?: boolean; 25 | time_period?: string; 26 | format_preference?: 'narrative' | 'bullet_points' | 'charts' | 'mixed'; 27 | } 28 | 29 | export interface DataInsightsPrompt { 30 | table: string; 31 | insight_type: 'business_intelligence' | 'trend_analysis' | 'performance_metrics' | 'opportunity_identification'; 32 | focus_areas?: string[]; 33 | comparison_period?: string; 34 | include_forecasting?: boolean; 35 | stakeholder_context?: string; 36 | } 37 | 38 | export interface OptimizeWorkflowPrompt { 39 | table: string; 40 | current_process_description: string; 41 | optimization_goals: ('efficiency' | 'accuracy' | 'speed' | 'cost_reduction' | 'compliance')[]; 42 | constraints?: string[]; 43 | automation_preference?: 'minimal' | 'moderate' | 'aggressive'; 44 | change_tolerance?: 'low' | 'medium' | 'high'; 45 | } 46 | 47 | export interface SmartSchemaDesignPrompt { 48 | purpose: string; 49 | data_types: string[]; 50 | expected_volume: 'small' | 'medium' | 'large' | 'enterprise'; 51 | compliance_requirements?: ('GDPR' | 'HIPAA' | 'SOX' | 'PCI_DSS')[]; 52 | performance_priorities?: ('query_speed' | 'storage_efficiency' | 'scalability' | 'maintainability')[]; 53 | integration_needs?: string[]; 54 | user_access_patterns?: string; 55 | } 56 | 57 | export interface DataQualityAuditPrompt { 58 | table: string; 59 | quality_dimensions: ('completeness' | 'accuracy' | 'consistency' | 'timeliness' | 'validity' | 'uniqueness')[]; 60 | automated_fixes?: boolean; 61 | severity_threshold?: 'low' | 'medium' | 'high' | 'critical'; 62 | compliance_context?: string; 63 | reporting_requirements?: string[]; 64 | } 65 | 66 | export interface PredictiveAnalyticsPrompt { 67 | table: string; 68 | target_field: string; 69 | prediction_periods?: number; 70 | algorithm?: 'linear_regression' | 'arima' | 'exponential_smoothing' | 'random_forest' | 'neural_network'; 71 | include_confidence_intervals?: boolean; 72 | historical_periods?: number; 73 | external_factors?: string[]; 74 | business_context?: string; 75 | } 76 | 77 | export interface NaturalLanguageQueryPrompt { 78 | question: string; 79 | tables?: string[]; 80 | response_format?: 'natural_language' | 'structured_data' | 'visualization_ready' | 'action_items'; 81 | context_awareness?: boolean; 82 | confidence_threshold?: number; 83 | clarifying_questions?: boolean; 84 | } 85 | 86 | export interface SmartDataTransformationPrompt { 87 | source_table: string; 88 | target_schema?: string; 89 | transformation_goals: ('normalization' | 'aggregation' | 'enrichment' | 'validation' | 'standardization')[]; 90 | data_quality_rules?: string[]; 91 | preserve_history?: boolean; 92 | validation_strategy?: 'strict' | 'permissive' | 'custom'; 93 | error_handling?: 'fail_fast' | 'log_and_continue' | 'manual_review'; 94 | } 95 | 96 | export interface AutomationRecommendationsPrompt { 97 | workflow_description: string; 98 | current_pain_points: string[]; 99 | automation_scope: 'single_task' | 'workflow_segment' | 'end_to_end' | 'cross_system'; 100 | technical_constraints?: string[]; 101 | business_impact_priority?: ('cost_savings' | 'time_efficiency' | 'error_reduction' | 'scalability')[]; 102 | implementation_timeline?: 'immediate' | 'short_term' | 'medium_term' | 'long_term'; 103 | risk_tolerance?: 'conservative' | 'moderate' | 'aggressive'; 104 | } 105 | 106 | // ============================================================================ 107 | // AI Prompt Response Types 108 | // ============================================================================ 109 | 110 | export interface AnalysisResult { 111 | summary: string; 112 | key_findings: string[]; 113 | statistical_measures?: { 114 | mean?: number; 115 | median?: number; 116 | std_deviation?: number; 117 | correlation_coefficients?: Record<string, number>; 118 | confidence_intervals?: Array<{ field: string; lower: number; upper: number; confidence: number }>; 119 | }; 120 | trends?: Array<{ 121 | field: string; 122 | direction: 'increasing' | 'decreasing' | 'stable' | 'volatile'; 123 | strength: 'weak' | 'moderate' | 'strong'; 124 | significance: number; 125 | }>; 126 | anomalies?: Array<{ 127 | record_id: string; 128 | field: string; 129 | expected_value: unknown; 130 | actual_value: unknown; 131 | deviation_score: number; 132 | }>; 133 | recommendations: string[]; 134 | next_steps: string[]; 135 | } 136 | 137 | export interface ReportResult { 138 | title: string; 139 | executive_summary: string; 140 | detailed_sections: Array<{ 141 | heading: string; 142 | content: string; 143 | supporting_data?: unknown[]; 144 | visualizations?: Array<{ type: string; data: unknown; description: string }>; 145 | }>; 146 | key_metrics: Record<string, { value: unknown; change: string; significance: string }>; 147 | recommendations: Array<{ 148 | priority: 'high' | 'medium' | 'low'; 149 | recommendation: string; 150 | expected_impact: string; 151 | implementation_effort: 'low' | 'medium' | 'high'; 152 | }>; 153 | appendices?: Array<{ title: string; content: string }>; 154 | } 155 | 156 | export interface WorkflowOptimizationResult { 157 | current_state_analysis: { 158 | efficiency_score: number; 159 | bottlenecks: Array<{ step: string; impact: 'high' | 'medium' | 'low'; description: string }>; 160 | resource_utilization: Record<string, number>; 161 | }; 162 | optimization_recommendations: Array<{ 163 | category: 'automation' | 'process_redesign' | 'tool_integration' | 'skill_development'; 164 | recommendation: string; 165 | expected_benefits: string[]; 166 | implementation_complexity: 'simple' | 'moderate' | 'complex'; 167 | estimated_roi: string; 168 | timeline: string; 169 | }>; 170 | implementation_roadmap: Array<{ 171 | phase: number; 172 | duration: string; 173 | objectives: string[]; 174 | deliverables: string[]; 175 | success_metrics: string[]; 176 | }>; 177 | risk_assessment: Array<{ 178 | risk: string; 179 | probability: 'low' | 'medium' | 'high'; 180 | impact: 'low' | 'medium' | 'high'; 181 | mitigation: string; 182 | }>; 183 | } 184 | 185 | export interface SchemaDesignResult { 186 | recommended_schema: { 187 | tables: Array<{ 188 | name: string; 189 | purpose: string; 190 | fields: Array<{ 191 | name: string; 192 | type: string; 193 | constraints: string[]; 194 | description: string; 195 | }>; 196 | relationships: Array<{ 197 | type: 'one_to_one' | 'one_to_many' | 'many_to_many'; 198 | target_table: string; 199 | description: string; 200 | }>; 201 | }>; 202 | }; 203 | design_principles: string[]; 204 | performance_considerations: string[]; 205 | scalability_notes: string[]; 206 | compliance_alignment: Record<string, string[]>; 207 | migration_strategy?: { 208 | phases: Array<{ phase: number; description: string; estimated_time: string }>; 209 | data_migration_notes: string[]; 210 | validation_checkpoints: string[]; 211 | }; 212 | } 213 | 214 | export interface PredictionResult { 215 | predictions: Array<{ 216 | period: string; 217 | predicted_value: number; 218 | confidence_interval?: { lower: number; upper: number }; 219 | probability_bands?: Array<{ probability: number; range: [number, number] }>; 220 | }>; 221 | model_performance: { 222 | algorithm_used: string; 223 | accuracy_metrics: Record<string, number>; 224 | feature_importance?: Record<string, number>; 225 | validation_results: Record<string, number>; 226 | }; 227 | business_insights: { 228 | trend_direction: 'positive' | 'negative' | 'stable'; 229 | seasonality_detected: boolean; 230 | external_factors_impact: string[]; 231 | risk_factors: string[]; 232 | }; 233 | recommendations: Array<{ 234 | type: 'operational' | 'strategic' | 'tactical'; 235 | recommendation: string; 236 | timing: string; 237 | confidence: number; 238 | }>; 239 | } 240 | 241 | // ============================================================================ 242 | // Prompt Template Definitions (Type-Safe) 243 | // ============================================================================ 244 | 245 | // AI prompt templates are defined in prompt-templates.ts for runtime use 246 | 247 | // ============================================================================ 248 | // Export All AI Prompt Types 249 | // ============================================================================ 250 | 251 | export { 252 | AnalyzeDataPrompt, 253 | CreateReportPrompt, 254 | DataInsightsPrompt, 255 | OptimizeWorkflowPrompt, 256 | SmartSchemaDesignPrompt, 257 | DataQualityAuditPrompt, 258 | PredictiveAnalyticsPrompt, 259 | NaturalLanguageQueryPrompt, 260 | SmartDataTransformationPrompt, 261 | AutomationRecommendationsPrompt, 262 | 263 | AnalysisResult, 264 | ReportResult, 265 | WorkflowOptimizationResult, 266 | SchemaDesignResult, 267 | PredictionResult 268 | }; ``` -------------------------------------------------------------------------------- /src/typescript/index.d.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Airtable MCP Server TypeScript Definitions 3 | * Enterprise-grade type safety for AI-powered Airtable operations 4 | */ 5 | 6 | // ============================================================================ 7 | // MCP Protocol Types (2024-11-05 Specification) 8 | // ============================================================================ 9 | 10 | export interface MCPRequest { 11 | jsonrpc: '2.0'; 12 | id: string | number; 13 | method: string; 14 | params?: Record<string, unknown>; 15 | } 16 | 17 | export interface MCPResponse { 18 | jsonrpc: '2.0'; 19 | id: string | number; 20 | result?: unknown; 21 | error?: MCPError; 22 | } 23 | 24 | export interface MCPError { 25 | code: number; 26 | message: string; 27 | data?: unknown; 28 | } 29 | 30 | export interface MCPServerCapabilities { 31 | tools?: { 32 | listChanged?: boolean; 33 | }; 34 | prompts?: { 35 | listChanged?: boolean; 36 | }; 37 | resources?: { 38 | subscribe?: boolean; 39 | listChanged?: boolean; 40 | }; 41 | roots?: { 42 | listChanged?: boolean; 43 | }; 44 | sampling?: Record<string, unknown>; 45 | logging?: Record<string, unknown>; 46 | } 47 | 48 | export interface MCPServerInfo { 49 | name: string; 50 | version: string; 51 | protocolVersion: string; 52 | capabilities: MCPServerCapabilities; 53 | } 54 | 55 | // ============================================================================ 56 | // Tool Schema Types 57 | // ============================================================================ 58 | 59 | export interface ToolParameter { 60 | type: 'string' | 'number' | 'boolean' | 'object' | 'array'; 61 | description: string; 62 | required?: boolean; 63 | default?: unknown; 64 | enum?: string[]; 65 | } 66 | 67 | export interface ToolSchema { 68 | name: string; 69 | description: string; 70 | inputSchema: { 71 | type: 'object'; 72 | properties: Record<string, ToolParameter>; 73 | required?: string[]; 74 | }; 75 | } 76 | 77 | // ============================================================================ 78 | // AI Prompt Types 79 | // ============================================================================ 80 | 81 | export interface PromptArgument { 82 | name: string; 83 | description: string; 84 | required: boolean; 85 | type?: 'string' | 'number' | 'boolean'; 86 | enum?: string[]; 87 | } 88 | 89 | export interface PromptSchema { 90 | name: string; 91 | description: string; 92 | arguments: PromptArgument[]; 93 | } 94 | 95 | export type AnalysisType = 96 | | 'trends' 97 | | 'statistical' 98 | | 'patterns' 99 | | 'predictive' 100 | | 'anomaly_detection' 101 | | 'correlation_matrix'; 102 | 103 | export type ConfidenceLevel = 0.90 | 0.95 | 0.99; 104 | 105 | export interface AnalysisOptions { 106 | table: string; 107 | analysis_type?: AnalysisType; 108 | field_focus?: string; 109 | time_dimension?: string; 110 | confidence_level?: ConfidenceLevel; 111 | } 112 | 113 | export interface PredictiveAnalyticsOptions { 114 | table: string; 115 | target_field: string; 116 | prediction_periods?: number; 117 | algorithm?: 'linear_regression' | 'arima' | 'exponential_smoothing' | 'random_forest'; 118 | include_confidence_intervals?: boolean; 119 | historical_periods?: number; 120 | } 121 | 122 | export interface StatisticalResult { 123 | confidence_interval: [number, number]; 124 | significance_level: number; 125 | p_value?: number; 126 | correlation_coefficient?: number; 127 | } 128 | 129 | // ============================================================================ 130 | // Airtable API Types 131 | // ============================================================================ 132 | 133 | export interface AirtableFieldType { 134 | type: 'singleLineText' | 'multilineText' | 'richText' | 'email' | 'url' | 'phoneNumber' | 135 | 'number' | 'percent' | 'currency' | 'singleSelect' | 'multipleSelects' | 136 | 'date' | 'dateTime' | 'checkbox' | 'rating' | 'formula' | 'rollup' | 137 | 'count' | 'lookup' | 'createdTime' | 'lastModifiedTime' | 'createdBy' | 138 | 'lastModifiedBy' | 'attachment' | 'barcode' | 'button'; 139 | } 140 | 141 | export interface AirtableField { 142 | id: string; 143 | name: string; 144 | type: AirtableFieldType['type']; 145 | options?: Record<string, unknown>; 146 | description?: string; 147 | } 148 | 149 | export interface AirtableTable { 150 | id: string; 151 | name: string; 152 | description?: string; 153 | primaryFieldId: string; 154 | fields: AirtableField[]; 155 | views: AirtableView[]; 156 | } 157 | 158 | export interface AirtableView { 159 | id: string; 160 | name: string; 161 | type: 'grid' | 'form' | 'calendar' | 'gallery' | 'kanban'; 162 | } 163 | 164 | export interface AirtableRecord { 165 | id: string; 166 | fields: Record<string, unknown>; 167 | createdTime: string; 168 | } 169 | 170 | export interface AirtableBase { 171 | id: string; 172 | name: string; 173 | permissionLevel: 'read' | 'comment' | 'edit' | 'create'; 174 | tables: AirtableTable[]; 175 | } 176 | 177 | export interface AirtableWebhook { 178 | id: string; 179 | macSecretBase64: string; 180 | expirationTime: string; 181 | notificationUrl: string; 182 | isHookEnabled: boolean; 183 | cursorForNextPayload: number; 184 | lastSuccessfulNotificationTime?: string; 185 | } 186 | 187 | export interface WebhookPayload { 188 | timestamp: string; 189 | base: { 190 | id: string; 191 | }; 192 | webhook: { 193 | id: string; 194 | }; 195 | changedTablesById: Record<string, { 196 | changedRecordsById: Record<string, { 197 | current?: AirtableRecord; 198 | previous?: AirtableRecord; 199 | }>; 200 | }>; 201 | } 202 | 203 | // ============================================================================ 204 | // Server Configuration Types 205 | // ============================================================================ 206 | 207 | export interface ServerConfig { 208 | PORT: number; 209 | HOST: string; 210 | MAX_REQUESTS_PER_MINUTE: number; 211 | LOG_LEVEL: 'ERROR' | 'WARN' | 'INFO' | 'DEBUG' | 'TRACE'; 212 | } 213 | 214 | export interface AuthConfig { 215 | AIRTABLE_TOKEN: string; 216 | AIRTABLE_BASE_ID: string; 217 | } 218 | 219 | export interface OAuth2Config { 220 | client_id: string; 221 | redirect_uri: string; 222 | state: string; 223 | code_challenge?: string; 224 | code_challenge_method?: 'S256'; 225 | } 226 | 227 | // ============================================================================ 228 | // Batch Operation Types 229 | // ============================================================================ 230 | 231 | export interface BatchCreateRecord { 232 | fields: Record<string, unknown>; 233 | } 234 | 235 | export interface BatchUpdateRecord { 236 | id: string; 237 | fields: Record<string, unknown>; 238 | } 239 | 240 | export interface BatchDeleteRecord { 241 | id: string; 242 | } 243 | 244 | export interface BatchUpsertRecord { 245 | key_field: string; 246 | key_value: string; 247 | fields: Record<string, unknown>; 248 | } 249 | 250 | // ============================================================================ 251 | // Advanced Analytics Types 252 | // ============================================================================ 253 | 254 | export interface DataQualityReport { 255 | total_records: number; 256 | missing_values: Record<string, number>; 257 | duplicate_records: string[]; 258 | data_types: Record<string, string>; 259 | quality_score: number; 260 | recommendations: string[]; 261 | } 262 | 263 | export interface WorkflowOptimization { 264 | current_efficiency: number; 265 | bottlenecks: string[]; 266 | automation_opportunities: Array<{ 267 | field: string; 268 | suggestion: string; 269 | impact_level: 'high' | 'medium' | 'low'; 270 | implementation_complexity: 'simple' | 'moderate' | 'complex'; 271 | }>; 272 | estimated_time_savings: string; 273 | } 274 | 275 | export interface SchemaOptimization { 276 | field_recommendations: Array<{ 277 | field: string; 278 | current_type: string; 279 | suggested_type: string; 280 | reason: string; 281 | }>; 282 | index_suggestions: string[]; 283 | normalization_opportunities: string[]; 284 | compliance_notes: string[]; 285 | } 286 | 287 | // ============================================================================ 288 | // Root Directory Types 289 | // ============================================================================ 290 | 291 | export interface RootDirectory { 292 | uri: string; 293 | name: string; 294 | description?: string; 295 | } 296 | 297 | // ============================================================================ 298 | // Error Types (defined in errors.ts) 299 | // ============================================================================ 300 | 301 | export interface AirtableError extends Error { 302 | code: string; 303 | statusCode?: number; 304 | } 305 | 306 | export interface ValidationError extends Error { 307 | field: string; 308 | } 309 | 310 | // ============================================================================ 311 | // Utility Types 312 | // ============================================================================ 313 | 314 | export type DeepPartial<T> = { 315 | [P in keyof T]?: T[P] extends object ? DeepPartial<T[P]> : T[P]; 316 | }; 317 | 318 | export type RequiredFields<T, K extends keyof T> = T & Required<Pick<T, K>>; 319 | 320 | export type OptionalFields<T, K extends keyof T> = T & Partial<Pick<T, K>>; 321 | 322 | // ============================================================================ 323 | // Main Server Class Type 324 | // ============================================================================ 325 | 326 | export interface AirtableMCPServer { 327 | config: ServerConfig; 328 | authConfig: AuthConfig; 329 | tools: ToolSchema[]; 330 | prompts: PromptSchema[]; 331 | 332 | initialize(capabilities: MCPServerCapabilities): Promise<MCPServerInfo>; 333 | handleToolCall(name: string, params: Record<string, unknown>): Promise<unknown>; 334 | handlePromptGet(name: string, args: Record<string, unknown>): Promise<{ messages: Array<{ role: string; content: { type: string; text: string } }> }>; 335 | start(): Promise<void>; 336 | stop(): Promise<void>; 337 | } 338 | 339 | // ============================================================================ 340 | // Export All Types 341 | // ============================================================================ 342 | 343 | export * from './tools'; 344 | export * from './ai-prompts'; ``` -------------------------------------------------------------------------------- /src/typescript/tools.d.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Tool Schema Type Definitions 3 | * Comprehensive TypeScript types for all 33 Airtable MCP tools 4 | */ 5 | 6 | import { ToolSchema } from './index'; 7 | 8 | // ============================================================================ 9 | // Data Operation Tool Interfaces 10 | // ============================================================================ 11 | 12 | export interface ListTablesInput { 13 | include_schema?: boolean; 14 | } 15 | 16 | export interface ListRecordsInput { 17 | [key: string]: unknown; 18 | table: string; 19 | maxRecords?: number; 20 | view?: string; 21 | filterByFormula?: string; 22 | sort?: Array<{ field: string; direction: 'asc' | 'desc' }>; 23 | pageSize?: number; 24 | offset?: string; 25 | } 26 | 27 | export interface GetRecordInput { 28 | table: string; 29 | recordId: string; 30 | } 31 | 32 | export interface CreateRecordInput { 33 | table: string; 34 | fields: Record<string, unknown>; 35 | typecast?: boolean; 36 | } 37 | 38 | export interface UpdateRecordInput { 39 | table: string; 40 | recordId: string; 41 | fields: Record<string, unknown>; 42 | typecast?: boolean; 43 | } 44 | 45 | export interface DeleteRecordInput { 46 | table: string; 47 | recordId: string; 48 | } 49 | 50 | export interface SearchRecordsInput { 51 | table: string; 52 | filterByFormula?: string; 53 | sort?: Array<{ field: string; direction: 'asc' | 'desc' }>; 54 | maxRecords?: number; 55 | view?: string; 56 | } 57 | 58 | // ============================================================================ 59 | // Webhook Management Tool Interfaces 60 | // ============================================================================ 61 | 62 | export interface ListWebhooksInput { 63 | cursor?: string; 64 | } 65 | 66 | export interface CreateWebhookInput { 67 | notificationUrl: string; 68 | specification?: { 69 | options?: { 70 | filters?: { 71 | dataTypes?: ('tableData' | 'tableSchema')[]; 72 | recordChangeScope?: string; 73 | watchDataInTableIds?: string[]; 74 | }; 75 | }; 76 | }; 77 | } 78 | 79 | export interface DeleteWebhookInput { 80 | webhookId: string; 81 | } 82 | 83 | export interface GetWebhookPayloadsInput { 84 | webhookId: string; 85 | cursor?: string; 86 | limit?: number; 87 | } 88 | 89 | export interface RefreshWebhookInput { 90 | webhookId: string; 91 | } 92 | 93 | // ============================================================================ 94 | // Schema Discovery Tool Interfaces 95 | // ============================================================================ 96 | 97 | export interface ListBasesInput { 98 | offset?: string; 99 | } 100 | 101 | export interface GetBaseSchemaInput { 102 | baseId?: string; 103 | } 104 | 105 | export interface DescribeTableInput { 106 | table: string; 107 | include_sample_data?: boolean; 108 | } 109 | 110 | export interface ListFieldTypesInput { 111 | category?: 'basic' | 'advanced' | 'computed'; 112 | } 113 | 114 | export interface GetTableViewsInput { 115 | table: string; 116 | } 117 | 118 | // ============================================================================ 119 | // Table Management Tool Interfaces 120 | // ============================================================================ 121 | 122 | export interface CreateTableInput { 123 | name: string; 124 | description?: string; 125 | fields: Array<{ 126 | name: string; 127 | type: string; 128 | description?: string; 129 | options?: Record<string, unknown>; 130 | }>; 131 | } 132 | 133 | export interface UpdateTableInput { 134 | table: string; 135 | name?: string; 136 | description?: string; 137 | } 138 | 139 | export interface DeleteTableInput { 140 | table: string; 141 | confirmation?: string; 142 | } 143 | 144 | // ============================================================================ 145 | // Field Management Tool Interfaces 146 | // ============================================================================ 147 | 148 | export interface CreateFieldInput { 149 | table: string; 150 | name: string; 151 | type: string; 152 | description?: string; 153 | options?: Record<string, unknown>; 154 | } 155 | 156 | export interface UpdateFieldInput { 157 | table: string; 158 | fieldId: string; 159 | name?: string; 160 | description?: string; 161 | options?: Record<string, unknown>; 162 | } 163 | 164 | export interface DeleteFieldInput { 165 | table: string; 166 | fieldId: string; 167 | confirmation?: string; 168 | } 169 | 170 | // ============================================================================ 171 | // Batch Operations Tool Interfaces 172 | // ============================================================================ 173 | 174 | export interface BatchCreateRecordsInput { 175 | table: string; 176 | records: Array<{ 177 | fields: Record<string, unknown>; 178 | }>; 179 | typecast?: boolean; 180 | } 181 | 182 | export interface BatchUpdateRecordsInput { 183 | table: string; 184 | records: Array<{ 185 | id: string; 186 | fields: Record<string, unknown>; 187 | }>; 188 | typecast?: boolean; 189 | } 190 | 191 | export interface BatchDeleteRecordsInput { 192 | table: string; 193 | records: Array<{ 194 | id: string; 195 | }>; 196 | } 197 | 198 | export interface BatchUpsertRecordsInput { 199 | table: string; 200 | records: Array<{ 201 | key_field: string; 202 | key_value: string; 203 | fields: Record<string, unknown>; 204 | }>; 205 | typecast?: boolean; 206 | } 207 | 208 | // ============================================================================ 209 | // Attachment Management Tool Interfaces 210 | // ============================================================================ 211 | 212 | export interface UploadAttachmentInput { 213 | table: string; 214 | recordId: string; 215 | fieldName: string; 216 | url: string; 217 | filename?: string; 218 | } 219 | 220 | // ============================================================================ 221 | // Advanced Views Tool Interfaces 222 | // ============================================================================ 223 | 224 | export interface CreateViewInput { 225 | table: string; 226 | name: string; 227 | type: 'grid' | 'form' | 'calendar' | 'gallery' | 'kanban'; 228 | visibleFieldIds?: string[]; 229 | filterByFormula?: string; 230 | sort?: Array<{ field: string; direction: 'asc' | 'desc' }>; 231 | } 232 | 233 | export interface GetViewMetadataInput { 234 | table: string; 235 | viewId: string; 236 | } 237 | 238 | // ============================================================================ 239 | // Base Management Tool Interfaces 240 | // ============================================================================ 241 | 242 | export interface CreateBaseInput { 243 | name: string; 244 | workspaceId?: string; 245 | tables?: Array<{ 246 | name: string; 247 | description?: string; 248 | fields: Array<{ 249 | name: string; 250 | type: string; 251 | options?: Record<string, unknown>; 252 | }>; 253 | }>; 254 | } 255 | 256 | export interface ListCollaboratorsInput { 257 | baseId?: string; 258 | } 259 | 260 | export interface ListSharesInput { 261 | baseId?: string; 262 | } 263 | 264 | // ============================================================================ 265 | // Tool Response Interfaces 266 | // ============================================================================ 267 | 268 | export interface ToolResponse<T = unknown> { 269 | content: Array<{ 270 | type: 'text' | 'image' | 'resource'; 271 | text?: string; 272 | data?: T; 273 | mimeType?: string; 274 | }>; 275 | isError?: boolean; 276 | } 277 | 278 | export interface PaginatedResponse<T> { 279 | records?: T[]; 280 | offset?: string; 281 | } 282 | 283 | export interface TableInfo { 284 | id: string; 285 | name: string; 286 | description?: string; 287 | primaryFieldId: string; 288 | fields: Array<{ 289 | id: string; 290 | name: string; 291 | type: string; 292 | options?: Record<string, unknown>; 293 | description?: string; 294 | }>; 295 | views: Array<{ 296 | id: string; 297 | name: string; 298 | type: string; 299 | }>; 300 | } 301 | 302 | export interface RecordInfo { 303 | id: string; 304 | fields: Record<string, unknown>; 305 | createdTime: string; 306 | commentCount?: number; 307 | } 308 | 309 | export interface WebhookInfo { 310 | id: string; 311 | macSecretBase64: string; 312 | expirationTime: string; 313 | notificationUrl: string; 314 | isHookEnabled: boolean; 315 | specification: { 316 | options: { 317 | filters: { 318 | dataTypes: string[]; 319 | recordChangeScope?: string; 320 | watchDataInTableIds?: string[]; 321 | }; 322 | }; 323 | }; 324 | } 325 | 326 | export interface BaseInfo { 327 | id: string; 328 | name: string; 329 | permissionLevel: 'read' | 'comment' | 'edit' | 'create'; 330 | } 331 | 332 | export interface FieldTypeInfo { 333 | type: string; 334 | name: string; 335 | description: string; 336 | supportedOptions?: string[]; 337 | examples?: Record<string, unknown>[]; 338 | } 339 | 340 | export interface ViewInfo { 341 | id: string; 342 | name: string; 343 | type: 'grid' | 'form' | 'calendar' | 'gallery' | 'kanban' | 'timeline' | 'block'; 344 | visibleFieldIds?: string[]; 345 | filterByFormula?: string; 346 | sort?: Array<{ 347 | field: string; 348 | direction: 'asc' | 'desc'; 349 | }>; 350 | } 351 | 352 | export interface CollaboratorInfo { 353 | type: 'user' | 'group'; 354 | id: string; 355 | email?: string; 356 | name?: string; 357 | permissionLevel: 'read' | 'comment' | 'edit' | 'create'; 358 | createdTime: string; 359 | } 360 | 361 | export interface ShareInfo { 362 | id: string; 363 | type: 'view' | 'base'; 364 | url: string; 365 | isPasswordRequired: boolean; 366 | allowedActions: string[]; 367 | restriction?: { 368 | dateRange?: { 369 | startDate?: string; 370 | endDate?: string; 371 | }; 372 | allowCommenting?: boolean; 373 | allowCopyingData?: boolean; 374 | }; 375 | } 376 | 377 | // ============================================================================ 378 | // Complete Tool Schema Definitions 379 | // ============================================================================ 380 | 381 | // Tool schemas are defined in tools-schemas.ts for runtime use 382 | 383 | // ============================================================================ 384 | // Export All Tool Types 385 | // ============================================================================ 386 | 387 | export { 388 | ListTablesInput, 389 | ListRecordsInput, 390 | GetRecordInput, 391 | CreateRecordInput, 392 | UpdateRecordInput, 393 | DeleteRecordInput, 394 | SearchRecordsInput, 395 | 396 | ListWebhooksInput, 397 | CreateWebhookInput, 398 | DeleteWebhookInput, 399 | GetWebhookPayloadsInput, 400 | RefreshWebhookInput, 401 | 402 | BatchCreateRecordsInput, 403 | BatchUpdateRecordsInput, 404 | BatchDeleteRecordsInput, 405 | BatchUpsertRecordsInput, 406 | 407 | ToolResponse, 408 | PaginatedResponse, 409 | TableInfo, 410 | RecordInfo, 411 | WebhookInfo, 412 | BaseInfo, 413 | FieldTypeInfo, 414 | ViewInfo, 415 | CollaboratorInfo, 416 | ShareInfo 417 | }; ``` -------------------------------------------------------------------------------- /docs/releases/RELEASE_NOTES_v1.6.0.md: -------------------------------------------------------------------------------- ```markdown 1 | # 🚀 Airtable MCP Server v1.6.0 Release Notes 2 | 3 | **Release Date**: August 15, 2025 4 | **Major Update**: Batch Operations, Attachment Management & Advanced Features 5 | 6 | ## 🎯 Overview 7 | 8 | Version 1.6.0 represents another **major expansion** of the Airtable MCP Server, adding powerful batch operations, attachment management, and advanced base management capabilities. This release increases the total tools from 23 to **33 tools**, providing the most comprehensive Airtable API coverage available for AI assistants. 9 | 10 | ## ✨ New Features (10 New Tools) 11 | 12 | ### ⚡ Batch Operations (4 New Tools) 13 | 14 | 1. **`batch_create_records`** - Create up to 10 records simultaneously 15 | - Significantly improves performance for bulk data entry 16 | - Maintains atomicity - all records created or none 17 | - Proper error handling for validation failures 18 | 19 | 2. **`batch_update_records`** - Update up to 10 records at once 20 | - Efficient bulk updates with field-level precision 21 | - Maintains data integrity across operations 22 | - Returns detailed success/failure information 23 | 24 | 3. **`batch_delete_records`** - Delete up to 10 records in one operation 25 | - Fast bulk deletion with safety validation 26 | - Atomic operation ensures consistency 27 | - Detailed deletion confirmation 28 | 29 | 4. **`batch_upsert_records`** - Smart update-or-create operations 30 | - Updates existing records or creates new ones based on key fields 31 | - Intelligent matching using specified key fields 32 | - Optimizes data synchronization workflows 33 | 34 | ### 📎 Attachment Management (1 New Tool) 35 | 36 | 5. **`upload_attachment`** - Attach files from URLs to records 37 | - Supports any publicly accessible file URL 38 | - Automatic file type detection and validation 39 | - Optional custom filename specification 40 | - Works with all Airtable-supported file types 41 | 42 | ### 👁️ Advanced View Management (2 New Tools) 43 | 44 | 6. **`create_view`** - Create custom views programmatically 45 | - Support for all view types: grid, form, calendar, gallery, kanban, timeline, gantt 46 | - Custom field visibility and ordering 47 | - Configurable filters and sorts 48 | - Automated view setup for workflows 49 | 50 | 7. **`get_view_metadata`** - Detailed view configuration retrieval 51 | - Complete view settings and configurations 52 | - Filter formulas and sort specifications 53 | - Field visibility and ordering information 54 | - Perfect for view replication and analysis 55 | 56 | ### 🏢 Base Management (3 New Tools) 57 | 58 | 8. **`create_base`** - Create new Airtable bases 59 | - Programmatic base creation with initial table structures 60 | - Support for workspace organization 61 | - Batch table and field creation 62 | - Perfect for template deployment 63 | 64 | 9. **`list_collaborators`** - View base collaboration details 65 | - Complete collaborator list with permission levels 66 | - User type identification (user, group, etc.) 67 | - Permission auditing and management 68 | - Security compliance support 69 | 70 | 10. **`list_shares`** - Manage shared view configurations 71 | - Public share URLs and settings 72 | - Share type and effectiveness status 73 | - View and table relationship mapping 74 | - Privacy and access control management 75 | 76 | ## 🔄 Enhanced Existing Features 77 | 78 | ### Performance Improvements 79 | - **Batch Operations**: Up to 10x faster for bulk operations 80 | - **Error Handling**: More detailed error messages and validation 81 | - **API Efficiency**: Reduced API calls through intelligent batching 82 | 83 | ### Security Enhancements 84 | - **Input Validation**: Enhanced parameter validation for all new tools 85 | - **Permission Checking**: Better handling of permission-restricted operations 86 | - **Safe Defaults**: Conservative defaults for destructive operations 87 | 88 | ### User Experience 89 | - **Better Error Messages**: More descriptive error responses 90 | - **Consistent Interface**: Uniform parameter naming across all tools 91 | - **Enhanced Documentation**: Detailed examples and use cases 92 | 93 | ## 📊 Tool Count Progression 94 | 95 | | Version | Total Tools | New Features | 96 | |---------|-------------|--------------| 97 | | **v1.6.0** | **33** | Batch ops, attachments, advanced views, base mgmt | 98 | | v1.5.0 | 23 | Schema management | 99 | | v1.4.0 | 12 | Webhooks | 100 | | v1.2.4 | 5 | Basic CRUD | 101 | 102 | ## 🛠️ Technical Improvements 103 | 104 | ### API Coverage 105 | - **Complete Airtable API**: Now covers virtually all public Airtable API endpoints 106 | - **Batch Endpoints**: Full support for Airtable's batch operation limits 107 | - **Metadata API**: Complete integration with Airtable's metadata capabilities 108 | 109 | ### Architecture 110 | - **Modular Design**: Clean separation of concerns for each tool category 111 | - **Error Resilience**: Improved error handling and recovery 112 | - **Performance Optimized**: Efficient API usage patterns 113 | 114 | ### Compatibility 115 | - **Backward Compatible**: All v1.5.0 tools unchanged 116 | - **API Limits**: Respects Airtable's rate limits and batch size restrictions 117 | - **Token Scopes**: Graceful handling of insufficient permissions 118 | 119 | ## 📚 New Capabilities 120 | 121 | ### For Users 122 | - **Bulk Data Operations**: Efficiently manage large datasets 123 | - **File Management**: Easy attachment handling through URLs 124 | - **Advanced Workflows**: Create complex multi-step processes 125 | - **Collaboration Insights**: Understand base sharing and permissions 126 | - **Template Creation**: Programmatically create standardized bases 127 | 128 | ### For Developers 129 | - **High-Performance Bulk Ops**: Optimize data synchronization 130 | - **Complete Base Lifecycle**: Full cradle-to-grave base management 131 | - **Advanced View Control**: Programmatic UI customization 132 | - **Security Auditing**: Comprehensive permission monitoring 133 | 134 | ## 🚀 Getting Started with v1.6.0 135 | 136 | ### Installation 137 | ```bash 138 | npm install -g @rashidazarang/[email protected] 139 | ``` 140 | 141 | ### New Usage Examples 142 | 143 | #### Batch Operations 144 | ```javascript 145 | // Create multiple records efficiently 146 | "Create 5 new project records with these details: [project data]" 147 | 148 | // Update multiple records at once 149 | "Update all records where status is 'pending' to 'in progress'" 150 | 151 | // Delete multiple records 152 | "Delete these 3 completed tasks: rec123, rec456, rec789" 153 | ``` 154 | 155 | #### Attachment Management 156 | ```javascript 157 | // Attach files to records 158 | "Attach this image https://example.com/image.jpg to the product photo field in record rec123" 159 | 160 | // Batch create with attachments 161 | "Create a new product record and attach the logo from this URL" 162 | ``` 163 | 164 | #### Advanced Views 165 | ```javascript 166 | // Create custom views 167 | "Create a calendar view for the Events table showing only future events" 168 | 169 | // Analyze view configurations 170 | "Show me the detailed configuration of the 'Active Projects' view" 171 | ``` 172 | 173 | #### Base Management 174 | ```javascript 175 | // Create new bases 176 | "Create a new base called 'Project Tracker' with tables for Projects, Tasks, and Team Members" 177 | 178 | // Collaboration insights 179 | "Who has access to this base and what are their permission levels?" 180 | ``` 181 | 182 | ## 🔧 Breaking Changes 183 | 184 | **None** - v1.6.0 maintains full backward compatibility with all previous versions. 185 | 186 | ## 🐛 Bug Fixes 187 | 188 | - **Batch Size Validation**: Proper enforcement of 10-record limits 189 | - **Error Message Clarity**: More descriptive API error responses 190 | - **Permission Handling**: Better graceful degradation for insufficient permissions 191 | - **URL Validation**: Enhanced validation for attachment URLs 192 | 193 | ## ⚡ Performance Improvements 194 | 195 | - **Batch Operations**: Up to 10x performance improvement for bulk operations 196 | - **API Efficiency**: Reduced API calls through intelligent batching 197 | - **Memory Usage**: Optimized memory usage for large operations 198 | - **Response Processing**: Faster JSON parsing and response handling 199 | 200 | ## 🌟 What's Next 201 | 202 | Based on user feedback and Airtable API evolution: 203 | - Enhanced search and filtering capabilities 204 | - Advanced automation triggers 205 | - Real-time collaboration features 206 | - Performance analytics and monitoring 207 | - Enterprise-grade security features 208 | 209 | ## 📈 Compatibility & Requirements 210 | 211 | - **Node.js**: Requires Node.js 14+ 212 | - **Airtable API**: Compatible with latest Airtable API version 213 | - **Rate Limits**: Respects Airtable's 5 requests/second limit 214 | - **Token Scopes**: Requires appropriate scopes for advanced features 215 | 216 | ### Required Scopes for Full Functionality 217 | - `data.records:read` - Read records 218 | - `data.records:write` - Create, update, delete records 219 | - `schema.bases:read` - View schemas and metadata 220 | - `schema.bases:write` - Create/modify tables, fields, views, bases 221 | - `webhook:manage` - Webhook operations (optional) 222 | 223 | ## 📊 Testing & Quality 224 | 225 | - **100% Test Coverage**: All 33 tools tested with real API calls 226 | - **Edge Case Handling**: Comprehensive error condition testing 227 | - **Performance Testing**: Batch operation efficiency verification 228 | - **Security Testing**: Permission and validation testing 229 | 230 | ## 🤝 Community Impact 231 | 232 | v1.6.0 establishes this MCP server as the definitive Airtable integration for AI assistants, providing: 233 | 234 | - **Most Comprehensive Coverage**: 33 tools covering entire Airtable API 235 | - **Best Performance**: Intelligent batching and optimization 236 | - **Enterprise Ready**: Advanced collaboration and security features 237 | - **Developer Friendly**: Clean, consistent, well-documented interface 238 | 239 | ## 🔗 Resources 240 | 241 | **GitHub**: https://github.com/rashidazarang/airtable-mcp 242 | **NPM**: https://www.npmjs.com/package/@rashidazarang/airtable-mcp 243 | **Issues**: https://github.com/rashidazarang/airtable-mcp/issues 244 | **Documentation**: https://github.com/rashidazarang/airtable-mcp#readme 245 | 246 | --- 247 | 248 | 🎉 **Thank you for using Airtable MCP Server v1.6.0!** This release represents the culmination of comprehensive Airtable API integration, providing AI assistants with unprecedented access to Airtable's full feature set through natural language interactions. ``` -------------------------------------------------------------------------------- /tests/test_v1.5.0_final.sh: -------------------------------------------------------------------------------- ```bash 1 | #!/bin/bash 2 | 3 | # COMPREHENSIVE FINAL TEST SUITE - Airtable MCP Server v1.5.0 4 | # Tests ALL 23 tools with no assumptions 5 | 6 | set -e 7 | SERVER_URL="http://localhost:8010/mcp" 8 | PASSED=0 9 | FAILED=0 10 | TEST_RECORD_ID="" 11 | TEST_WEBHOOK_ID="" 12 | CREATED_FIELD_ID="" 13 | 14 | echo "🧪 FINAL COMPREHENSIVE TEST SUITE - v1.5.0" 15 | echo "===========================================" 16 | echo "Testing ALL 23 tools with real API calls" 17 | echo "" 18 | 19 | # Function to make MCP calls 20 | call_tool() { 21 | local tool_name="$1" 22 | local params="$2" 23 | curl -s -X POST "$SERVER_URL" \ 24 | -H "Content-Type: application/json" \ 25 | -d "{\"jsonrpc\": \"2.0\", \"id\": 1, \"method\": \"tools/call\", \"params\": {\"name\": \"$tool_name\", \"arguments\": $params}}" 26 | } 27 | 28 | # Enhanced test function with better error reporting 29 | test_tool() { 30 | local tool_name="$1" 31 | local params="$2" 32 | local description="$3" 33 | local expect_fail="$4" 34 | 35 | echo -n "🔧 $tool_name: $description... " 36 | 37 | if result=$(call_tool "$tool_name" "$params" 2>&1); then 38 | if echo "$result" | jq -e '.result.content[0].text' > /dev/null 2>&1; then 39 | response_text=$(echo "$result" | jq -r '.result.content[0].text') 40 | if [[ "$expect_fail" == "true" ]]; then 41 | if echo "$response_text" | grep -q "error\|Error\|not found\|requires"; then 42 | echo "✅ PASS (Expected failure)" 43 | ((PASSED++)) 44 | else 45 | echo "❌ FAIL (Should have failed)" 46 | echo " Response: ${response_text:0:100}..." 47 | ((FAILED++)) 48 | fi 49 | else 50 | echo "✅ PASS" 51 | ((PASSED++)) 52 | # Store important IDs for later tests 53 | if [[ "$tool_name" == "create_record" ]]; then 54 | TEST_RECORD_ID=$(echo "$result" | jq -r '.result.content[0].text' | grep -o 'rec[a-zA-Z0-9]\{10,20\}' | head -1) 55 | echo " 📝 Stored record ID: $TEST_RECORD_ID" 56 | elif [[ "$tool_name" == "create_webhook" ]]; then 57 | TEST_WEBHOOK_ID=$(echo "$result" | jq -r '.result.content[0].text' | grep -o 'ach[a-zA-Z0-9]\{10,20\}' | head -1) 58 | echo " 🪝 Stored webhook ID: $TEST_WEBHOOK_ID" 59 | elif [[ "$tool_name" == "create_field" ]]; then 60 | CREATED_FIELD_ID=$(echo "$result" | jq -r '.result.content[0].text' | grep -o 'fld[a-zA-Z0-9]\{10,20\}' | head -1) 61 | echo " 🏗️ Stored field ID: $CREATED_FIELD_ID" 62 | fi 63 | fi 64 | else 65 | if echo "$result" | jq -e '.error' > /dev/null 2>&1; then 66 | error_msg=$(echo "$result" | jq -r '.error.message') 67 | if [[ "$expect_fail" == "true" ]]; then 68 | echo "✅ PASS (Expected error: $error_msg)" 69 | ((PASSED++)) 70 | else 71 | echo "❌ FAIL (API Error: $error_msg)" 72 | ((FAILED++)) 73 | fi 74 | else 75 | echo "❌ FAIL (Invalid response)" 76 | echo " Response: $result" 77 | ((FAILED++)) 78 | fi 79 | fi 80 | else 81 | echo "❌ FAIL (Request failed)" 82 | echo " Error: $result" 83 | ((FAILED++)) 84 | fi 85 | } 86 | 87 | echo "📊 PHASE 1: Core Data Operations (7 tools)" 88 | echo "===========================================" 89 | 90 | test_tool "list_tables" "{}" "List all tables in base" 91 | test_tool "list_records" "{\"table\": \"Test Table CRUD\", \"maxRecords\": 3}" "List records with limit" 92 | test_tool "create_record" "{\"table\": \"Test Table CRUD\", \"fields\": {\"Name\": \"v1.5.0 Test Record\", \"Description\": \"Created during final testing\", \"Status\": \"Testing\"}}" "Create test record" 93 | 94 | # Use the created record ID for get_record test 95 | if [[ -n "$TEST_RECORD_ID" ]]; then 96 | test_tool "get_record" "{\"table\": \"Test Table CRUD\", \"recordId\": \"$TEST_RECORD_ID\"}" "Get the created record" 97 | test_tool "update_record" "{\"table\": \"Test Table CRUD\", \"recordId\": \"$TEST_RECORD_ID\", \"fields\": {\"Status\": \"Updated\"}}" "Update the created record" 98 | else 99 | echo "⚠️ Skipping get_record and update_record tests (no record ID)" 100 | ((FAILED += 2)) 101 | fi 102 | 103 | test_tool "search_records" "{\"table\": \"Test Table CRUD\", \"searchTerm\": \"v1.5.0\"}" "Search for our test record" 104 | 105 | echo "" 106 | echo "🔗 PHASE 2: Webhook Management (5 tools)" 107 | echo "========================================" 108 | 109 | test_tool "list_webhooks" "{}" "List existing webhooks" 110 | test_tool "create_webhook" "{\"notificationUrl\": \"https://webhook.site/test-v1.5.0\", \"specification\": {\"options\": {\"filters\": {\"dataTypes\": [\"tableData\"]}}}}" "Create test webhook" 111 | 112 | if [[ -n "$TEST_WEBHOOK_ID" ]]; then 113 | test_tool "get_webhook_payloads" "{\"webhookId\": \"$TEST_WEBHOOK_ID\"}" "Get webhook payloads" 114 | test_tool "refresh_webhook" "{\"webhookId\": \"$TEST_WEBHOOK_ID\"}" "Refresh webhook" 115 | test_tool "delete_webhook" "{\"webhookId\": \"$TEST_WEBHOOK_ID\"}" "Delete test webhook" 116 | else 117 | echo "⚠️ Skipping webhook payload/refresh/delete tests (no webhook ID)" 118 | ((FAILED += 3)) 119 | fi 120 | 121 | echo "" 122 | echo "🏗️ PHASE 3: NEW Schema Discovery (6 tools)" 123 | echo "===========================================" 124 | 125 | test_tool "list_bases" "{}" "Discover all accessible bases" 126 | test_tool "get_base_schema" "{}" "Get complete base schema" 127 | test_tool "describe_table" "{\"table\": \"Test Table CRUD\"}" "Describe table with field details" 128 | test_tool "list_field_types" "{}" "List all available field types" 129 | test_tool "get_table_views" "{\"table\": \"Test Table CRUD\"}" "Get table views" 130 | 131 | # Test pagination for list_bases 132 | test_tool "list_bases" "{\"offset\": \"invalid_offset\"}" "Test list_bases with invalid offset" 133 | 134 | echo "" 135 | echo "🔧 PHASE 4: NEW Field Management (4 tools)" 136 | echo "==========================================" 137 | 138 | test_tool "create_field" "{\"table\": \"Test Table CRUD\", \"name\": \"v1.5.0 Test Field\", \"type\": \"singleLineText\", \"description\": \"Field created during v1.5.0 testing\"}" "Create new field" 139 | 140 | if [[ -n "$CREATED_FIELD_ID" ]]; then 141 | test_tool "update_field" "{\"table\": \"Test Table CRUD\", \"fieldId\": \"$CREATED_FIELD_ID\", \"name\": \"v1.5.0 Updated Field\", \"description\": \"Updated during testing\"}" "Update the created field" 142 | test_tool "delete_field" "{\"table\": \"Test Table CRUD\", \"fieldId\": \"$CREATED_FIELD_ID\", \"confirm\": true}" "Delete the test field" 143 | else 144 | echo "⚠️ Skipping field update/delete tests (no field ID)" 145 | ((FAILED += 2)) 146 | fi 147 | 148 | # Test safety checks 149 | test_tool "delete_field" "{\"table\": \"Test Table CRUD\", \"fieldId\": \"fldDummyID\", \"confirm\": false}" "Test field deletion without confirmation" "true" 150 | 151 | echo "" 152 | echo "🏢 PHASE 5: NEW Table Management (3 tools)" 153 | echo "=========================================" 154 | 155 | test_tool "create_table" "{\"name\": \"v1.5.0 Test Table\", \"description\": \"Table created during v1.5.0 testing\", \"fields\": [{\"name\": \"Name\", \"type\": \"singleLineText\"}, {\"name\": \"Notes\", \"type\": \"multilineText\"}]}" "Create new table" 156 | test_tool "update_table" "{\"table\": \"v1.5.0 Test Table\", \"name\": \"v1.5.0 Updated Table\", \"description\": \"Updated description\"}" "Update table metadata" 157 | 158 | # Test safety checks 159 | test_tool "delete_table" "{\"table\": \"v1.5.0 Updated Table\", \"confirm\": false}" "Test table deletion without confirmation" "true" 160 | test_tool "delete_table" "{\"table\": \"v1.5.0 Updated Table\", \"confirm\": true}" "Delete the test table" 161 | 162 | echo "" 163 | echo "⚠️ PHASE 6: Error Handling & Edge Cases" 164 | echo "=======================================" 165 | 166 | test_tool "get_record" "{\"table\": \"NonExistentTable\", \"recordId\": \"recFakeID123\"}" "Test with non-existent table" "true" 167 | test_tool "describe_table" "{\"table\": \"NonExistentTable\"}" "Test describe non-existent table" "true" 168 | test_tool "create_field" "{\"table\": \"NonExistentTable\", \"name\": \"Test\", \"type\": \"singleLineText\"}" "Test create field in non-existent table" "true" 169 | test_tool "update_table" "{\"table\": \"NonExistentTable\", \"name\": \"New Name\"}" "Test update non-existent table" "true" 170 | 171 | echo "" 172 | echo "🔒 PHASE 7: Security Verification" 173 | echo "================================" 174 | 175 | # Check that logs don't contain sensitive data 176 | echo -n "🔒 Security check: Log file doesn't contain tokens... " 177 | if grep -q "pat" /tmp/v1.5.0_test.log; then 178 | echo "❌ FAIL (Token found in logs)" 179 | ((FAILED++)) 180 | else 181 | echo "✅ PASS" 182 | ((PASSED++)) 183 | fi 184 | 185 | # Clean up test record if it exists 186 | if [[ -n "$TEST_RECORD_ID" ]]; then 187 | echo -n "🧹 Cleanup: Deleting test record... " 188 | cleanup_result=$(test_tool "delete_record" "{\"table\": \"Test Table CRUD\", \"recordId\": \"$TEST_RECORD_ID\"}" "Delete test record" 2>&1) 189 | if echo "$cleanup_result" | grep -q "✅ PASS"; then 190 | echo "✅ CLEANED" 191 | else 192 | echo "⚠️ CLEANUP FAILED" 193 | fi 194 | fi 195 | 196 | echo "" 197 | echo "📈 FINAL TEST RESULTS" 198 | echo "====================" 199 | echo "✅ Passed: $PASSED" 200 | echo "❌ Failed: $FAILED" 201 | echo "📊 Total Tests: $((PASSED + FAILED))" 202 | echo "📊 Success Rate: $(echo "scale=1; $PASSED * 100 / ($PASSED + $FAILED)" | bc -l)%" 203 | 204 | if [ $FAILED -eq 0 ]; then 205 | echo "" 206 | echo "🎉 🎉 🎉 ALL TESTS PASSED! 🎉 🎉 🎉" 207 | echo "" 208 | echo "✅ v1.5.0 is READY FOR PRODUCTION!" 209 | echo "" 210 | echo "🚀 ACHIEVEMENTS:" 211 | echo "• 23 tools working perfectly" 212 | echo "• Complete schema management" 213 | echo "• Robust error handling" 214 | echo "• Security verified" 215 | echo "• All edge cases handled" 216 | echo "" 217 | echo "📦 Ready for GitHub and NPM release!" 218 | exit 0 219 | else 220 | echo "" 221 | echo "❌ SOME TESTS FAILED" 222 | echo "Please review failures above before release." 223 | exit 1 224 | fi ``` -------------------------------------------------------------------------------- /src/python/inspector_server.py: -------------------------------------------------------------------------------- ```python 1 | #!/usr/bin/env python3 2 | """ 3 | Airtable MCP Inspector Server 4 | ----------------------------- 5 | A simple MCP server that implements the Airtable tools 6 | """ 7 | import os 8 | import sys 9 | import json 10 | import logging 11 | import requests 12 | import argparse 13 | import traceback 14 | from typing import Optional, Dict, Any, List 15 | 16 | try: 17 | from mcp.server.fastmcp import FastMCP 18 | except ImportError: 19 | print("Error: MCP SDK not found. Please install with 'pip install mcp'") 20 | sys.exit(1) 21 | 22 | # Parse command line arguments 23 | def parse_args(): 24 | parser = argparse.ArgumentParser(description="Airtable MCP Server") 25 | parser.add_argument("--token", dest="api_token", help="Airtable Personal Access Token") 26 | parser.add_argument("--base", dest="base_id", help="Airtable Base ID") 27 | parser.add_argument("--config", dest="config_json", help="Configuration as JSON (for Smithery integration)") 28 | return parser.parse_args() 29 | 30 | # Set up logging 31 | logging.basicConfig(level=logging.INFO) 32 | logger = logging.getLogger("airtable-mcp") 33 | 34 | # Parse arguments 35 | args = parse_args() 36 | 37 | # Handle config JSON from Smithery if provided 38 | config = {} 39 | if args.config_json: 40 | try: 41 | # Strip any trailing quotes or backslashes that might be present 42 | config_str = args.config_json.rstrip('\\"') 43 | # Additional sanitization for JSON format 44 | config_str = config_str.strip() 45 | # Handle escaped quotes 46 | if config_str.startswith('"') and config_str.endswith('"'): 47 | config_str = config_str[1:-1] 48 | # Fix escaped quotes within JSON 49 | config_str = config_str.replace('\\"', '"') 50 | # Replace escaped backslashes 51 | config_str = config_str.replace('\\\\', '\\') 52 | 53 | logger.info(f"Parsing sanitized config: {config_str}") 54 | config = json.loads(config_str) 55 | logger.info(f"Successfully parsed config: {config}") 56 | except json.JSONDecodeError as e: 57 | logger.error(f"Failed to parse config JSON: {e}") 58 | logger.error(f"Raw config string: {args.config_json}") 59 | # Try one more approach - sometimes config is double-quoted JSON 60 | try: 61 | # Try to interpret as Python string literal 62 | import ast 63 | literal_str = ast.literal_eval(f"'''{args.config_json}'''") 64 | config = json.loads(literal_str) 65 | logger.info(f"Successfully parsed config using ast: {config}") 66 | except Exception as ast_error: 67 | logger.error(f"Failed alternate parsing method: {ast_error}") 68 | 69 | # Create MCP server 70 | app = FastMCP("Airtable Tools") 71 | 72 | # Add error handling wrapper for all MCP methods 73 | def handle_exceptions(func): 74 | """Decorator to properly handle and format exceptions in MCP functions""" 75 | async def wrapper(*args, **kwargs): 76 | try: 77 | return await func(*args, **kwargs) 78 | except Exception as e: 79 | error_trace = traceback.format_exc() 80 | logger.error(f"Error in MCP handler: {str(e)}\n{error_trace}") 81 | sys.stderr.write(f"Error in MCP handler: {str(e)}\n{error_trace}\n") 82 | 83 | # For tool functions that return strings, return a formatted error message 84 | if hasattr(func, "__annotations__") and func.__annotations__.get("return") == str: 85 | return f"Error: {str(e)}" 86 | 87 | # For RPC methods that return dicts, return a properly formatted JSON error 88 | return {"error": {"code": -32000, "message": str(e)}} 89 | return wrapper 90 | 91 | # Patch the tool method to automatically apply error handling 92 | original_tool = app.tool 93 | def patched_tool(*args, **kwargs): 94 | def decorator(func): 95 | wrapped_func = handle_exceptions(func) 96 | return original_tool(*args, **kwargs)(wrapped_func) 97 | return decorator 98 | 99 | # Replace app.tool with our patched version 100 | app.tool = patched_tool 101 | 102 | # Get token from arguments, config, or environment 103 | token = args.api_token or config.get("airtable_token", "") or os.environ.get("AIRTABLE_PERSONAL_ACCESS_TOKEN", "") 104 | # Clean up token if it has trailing quote 105 | if token and token.endswith('"'): 106 | token = token[:-1] 107 | 108 | base_id = args.base_id or config.get("base_id", "") or os.environ.get("AIRTABLE_BASE_ID", "") 109 | 110 | if not token: 111 | logger.warning("No Airtable API token provided. Use --token, --config, or set AIRTABLE_PERSONAL_ACCESS_TOKEN environment variable.") 112 | else: 113 | logger.info("Airtable authentication configured") 114 | 115 | if base_id: 116 | logger.info(f"Using base ID: {base_id}") 117 | else: 118 | logger.warning("No base ID provided. Use --base, --config, or set AIRTABLE_BASE_ID environment variable.") 119 | 120 | # Helper functions for Airtable API calls 121 | async def api_call(endpoint, method="GET", data=None, params=None): 122 | """Make an Airtable API call""" 123 | if not token: 124 | return {"error": "No Airtable API token provided. Use --token, --config, or set AIRTABLE_PERSONAL_ACCESS_TOKEN environment variable."} 125 | 126 | headers = { 127 | "Authorization": f"Bearer {token}", 128 | "Content-Type": "application/json" 129 | } 130 | 131 | url = f"https://api.airtable.com/v0/{endpoint}" 132 | 133 | try: 134 | if method == "GET": 135 | response = requests.get(url, headers=headers, params=params) 136 | elif method == "POST": 137 | response = requests.post(url, headers=headers, json=data) 138 | elif method == "PATCH": 139 | response = requests.patch(url, headers=headers, json=data) 140 | elif method == "DELETE": 141 | response = requests.delete(url, headers=headers, params=params) 142 | else: 143 | raise ValueError(f"Unsupported method: {method}") 144 | 145 | response.raise_for_status() 146 | return response.json() 147 | except Exception as e: 148 | logger.error(f"API call error: {str(e)}") 149 | return {"error": str(e)} 150 | 151 | # Define MCP tool functions 152 | @app.tool() 153 | async def list_bases() -> str: 154 | """List all accessible Airtable bases""" 155 | if not token: 156 | return "Please provide an Airtable API token to list your bases." 157 | 158 | result = await api_call("meta/bases") 159 | 160 | if "error" in result: 161 | return f"Error: {result['error']}" 162 | 163 | bases = result.get("bases", []) 164 | if not bases: 165 | return "No bases found accessible with your token." 166 | 167 | base_list = [f"{i+1}. {base['name']} (ID: {base['id']})" for i, base in enumerate(bases)] 168 | return "Available bases:\n" + "\n".join(base_list) 169 | 170 | @app.tool() 171 | async def list_tables(base_id_param: Optional[str] = None) -> str: 172 | """List all tables in the specified base or the default base""" 173 | global base_id 174 | current_base = base_id_param or base_id 175 | 176 | if not token: 177 | return "Please provide an Airtable API token to list tables." 178 | 179 | if not current_base: 180 | return "Error: No base ID provided. Please specify a base_id or set AIRTABLE_BASE_ID environment variable." 181 | 182 | result = await api_call(f"meta/bases/{current_base}/tables") 183 | 184 | if "error" in result: 185 | return f"Error: {result['error']}" 186 | 187 | tables = result.get("tables", []) 188 | if not tables: 189 | return "No tables found in this base." 190 | 191 | table_list = [f"{i+1}. {table['name']} (ID: {table['id']}, Fields: {len(table.get('fields', []))})" 192 | for i, table in enumerate(tables)] 193 | return "Tables in this base:\n" + "\n".join(table_list) 194 | 195 | @app.tool() 196 | async def list_records(table_name: str, max_records: Optional[int] = 100, filter_formula: Optional[str] = None) -> str: 197 | """List records from a table with optional filtering""" 198 | if not token: 199 | return "Please provide an Airtable API token to list records." 200 | 201 | if not base_id: 202 | return "Error: No base ID set. Please use --base or set AIRTABLE_BASE_ID environment variable." 203 | 204 | params = {"maxRecords": max_records} 205 | 206 | if filter_formula: 207 | params["filterByFormula"] = filter_formula 208 | 209 | result = await api_call(f"{base_id}/{table_name}", params=params) 210 | 211 | if "error" in result: 212 | return f"Error: {result['error']}" 213 | 214 | records = result.get("records", []) 215 | if not records: 216 | return "No records found in this table." 217 | 218 | # Format the records for display 219 | formatted_records = [] 220 | for i, record in enumerate(records): 221 | record_id = record.get("id", "unknown") 222 | fields = record.get("fields", {}) 223 | field_text = ", ".join([f"{k}: {v}" for k, v in fields.items()]) 224 | formatted_records.append(f"{i+1}. ID: {record_id} - {field_text}") 225 | 226 | return "Records:\n" + "\n".join(formatted_records) 227 | 228 | @app.tool() 229 | async def get_record(table_name: str, record_id: str) -> str: 230 | """Get a specific record from a table""" 231 | if not token: 232 | return "Please provide an Airtable API token to get records." 233 | 234 | if not base_id: 235 | return "Error: No base ID set. Please set AIRTABLE_BASE_ID environment variable." 236 | 237 | result = await api_call(f"{base_id}/{table_name}/{record_id}") 238 | 239 | if "error" in result: 240 | return f"Error: {result['error']}" 241 | 242 | fields = result.get("fields", {}) 243 | if not fields: 244 | return f"Record {record_id} found but contains no fields." 245 | 246 | # Format the fields for display 247 | formatted_fields = [] 248 | for key, value in fields.items(): 249 | formatted_fields.append(f"{key}: {value}") 250 | 251 | return f"Record ID: {record_id}\n" + "\n".join(formatted_fields) 252 | 253 | @app.tool() 254 | async def create_records(table_name: str, records_json: str) -> str: 255 | """Create records in a table from JSON string""" 256 | if not token: 257 | return "Please provide an Airtable API token to create records." 258 | 259 | if not base_id: 260 | return "Error: No base ID set. Please set AIRTABLE_BASE_ID environment variable." 261 | 262 | try: 263 | records_data = json.loads(records_json) 264 | 265 | # Format the records for Airtable API 266 | if not isinstance(records_data, list): 267 | records_data = [records_data] 268 | 269 | records = [{"fields": record} for record in records_data] 270 | 271 | data = {"records": records} 272 | result = await api_call(f"{base_id}/{table_name}", method="POST", data=data) 273 | 274 | if "error" in result: 275 | return f"Error: {result['error']}" 276 | 277 | created_records = result.get("records", []) 278 | return f"Successfully created {len(created_records)} records." 279 | 280 | except json.JSONDecodeError: 281 | return "Error: Invalid JSON format in records_json parameter." 282 | except Exception as e: 283 | return f"Error creating records: {str(e)}" 284 | 285 | @app.tool() 286 | async def update_records(table_name: str, records_json: str) -> str: 287 | """Update records in a table from JSON string""" 288 | if not token: 289 | return "Please provide an Airtable API token to update records." 290 | 291 | if not base_id: 292 | return "Error: No base ID set. Please set AIRTABLE_BASE_ID environment variable." 293 | 294 | try: 295 | records_data = json.loads(records_json) 296 | 297 | # Format the records for Airtable API 298 | if not isinstance(records_data, list): 299 | records_data = [records_data] 300 | 301 | records = [] 302 | for record in records_data: 303 | if "id" not in record: 304 | return "Error: Each record must have an 'id' field." 305 | 306 | rec_id = record.pop("id") 307 | fields = record.get("fields", record) # Support both {id, fields} format and direct fields 308 | records.append({"id": rec_id, "fields": fields}) 309 | 310 | data = {"records": records} 311 | result = await api_call(f"{base_id}/{table_name}", method="PATCH", data=data) 312 | 313 | if "error" in result: 314 | return f"Error: {result['error']}" 315 | 316 | updated_records = result.get("records", []) 317 | return f"Successfully updated {len(updated_records)} records." 318 | 319 | except json.JSONDecodeError: 320 | return "Error: Invalid JSON format in records_json parameter." 321 | except Exception as e: 322 | return f"Error updating records: {str(e)}" 323 | 324 | @app.tool() 325 | async def set_base_id(base_id_param: str) -> str: 326 | """Set the current Airtable base ID""" 327 | global base_id 328 | base_id = base_id_param 329 | return f"Base ID set to: {base_id}" 330 | 331 | # Note: rpc_method is not available in the current MCP version 332 | # These methods would be used for Claude-specific functionality 333 | # but are not needed for basic MCP operation 334 | 335 | # Start the server 336 | if __name__ == "__main__": 337 | app.start() ``` -------------------------------------------------------------------------------- /bin/airtable-crud-cli.js: -------------------------------------------------------------------------------- ```javascript 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Command-line interface for Airtable CRUD operations 5 | */ 6 | const fs = require('fs'); 7 | const path = require('path'); 8 | const dotenv = require('dotenv'); 9 | const baseUtils = require('../tools/airtable-base'); 10 | const crudUtils = require('../tools/airtable-crud'); 11 | const schemaUtils = require('../tools/airtable-schema'); 12 | 13 | // Load environment variables 14 | dotenv.config(); 15 | 16 | // Get the base ID from environment variables 17 | const baseId = process.env.AIRTABLE_BASE_ID; 18 | if (!baseId) { 19 | console.error('Error: AIRTABLE_BASE_ID not set in .env file'); 20 | process.exit(1); 21 | } 22 | 23 | // Parse command line arguments 24 | const args = process.argv.slice(2); 25 | const command = args[0]; 26 | 27 | // Display help if no command is provided 28 | if (!command) { 29 | showHelp(); 30 | process.exit(0); 31 | } 32 | 33 | // Process the command 34 | processCommand(command, args.slice(1)) 35 | .then(() => { 36 | console.log('Command completed successfully'); 37 | }) 38 | .catch(error => { 39 | console.error(`Error: ${error.message}`); 40 | process.exit(1); 41 | }); 42 | 43 | /** 44 | * Process the command 45 | * @param {string} command - The command to process 46 | * @param {Array} args - The command arguments 47 | */ 48 | async function processCommand(command, args) { 49 | switch (command) { 50 | case 'list-bases': 51 | await listBases(); 52 | break; 53 | 54 | case 'list-tables': 55 | await listTables(); 56 | break; 57 | 58 | case 'list-records': 59 | await listRecords(args); 60 | break; 61 | 62 | case 'get-record': 63 | await getRecord(args); 64 | break; 65 | 66 | case 'create-records': 67 | await createRecords(args); 68 | break; 69 | 70 | case 'update-records': 71 | await updateRecords(args); 72 | break; 73 | 74 | case 'delete-records': 75 | await deleteRecords(args); 76 | break; 77 | 78 | case 'export-records': 79 | await exportRecords(args); 80 | break; 81 | 82 | case 'import-records': 83 | await importRecords(args); 84 | break; 85 | 86 | case 'help': 87 | showHelp(); 88 | break; 89 | 90 | default: 91 | console.error(`Unknown command: ${command}`); 92 | showHelp(); 93 | process.exit(1); 94 | } 95 | } 96 | 97 | /** 98 | * List all accessible bases 99 | */ 100 | async function listBases() { 101 | console.log('Listing accessible bases...'); 102 | const bases = await baseUtils.listAllBases(); 103 | 104 | console.log(`Found ${bases.length} accessible bases:`); 105 | bases.forEach(base => { 106 | console.log(`- ${base.name} (${base.id})`); 107 | }); 108 | } 109 | 110 | /** 111 | * List all tables in the base 112 | */ 113 | async function listTables() { 114 | console.log(`Listing tables in base ${baseId}...`); 115 | const tables = await baseUtils.listTables(baseId); 116 | 117 | console.log(`Found ${tables.length} tables:`); 118 | tables.forEach(table => { 119 | console.log(`- ${table.name} (${table.id})`); 120 | }); 121 | } 122 | 123 | /** 124 | * List records from a table 125 | * @param {Array} args - Command arguments 126 | */ 127 | async function listRecords(args) { 128 | if (args.length < 1) { 129 | console.error('Error: Table name is required'); 130 | console.log('Usage: node airtable-crud-cli.js list-records <tableName> [maxRecords] [filterFormula]'); 131 | process.exit(1); 132 | } 133 | 134 | const tableName = args[0]; 135 | const maxRecords = args[1] ? parseInt(args[1]) : 100; 136 | const filterFormula = args[2] || null; 137 | 138 | console.log(`Listing records from table "${tableName}"...`); 139 | console.log(`Max records: ${maxRecords}`); 140 | if (filterFormula) { 141 | console.log(`Filter: ${filterFormula}`); 142 | } 143 | 144 | const records = await crudUtils.readRecords(baseId, tableName, maxRecords, filterFormula); 145 | 146 | console.log(`Found ${records.length} records:`); 147 | records.forEach(record => { 148 | console.log(`- ${record.id}: ${JSON.stringify(record)}`); 149 | }); 150 | } 151 | 152 | /** 153 | * Get a specific record by ID 154 | * @param {Array} args - Command arguments 155 | */ 156 | async function getRecord(args) { 157 | if (args.length < 2) { 158 | console.error('Error: Table name and record ID are required'); 159 | console.log('Usage: node airtable-crud-cli.js get-record <tableName> <recordId>'); 160 | process.exit(1); 161 | } 162 | 163 | const tableName = args[0]; 164 | const recordId = args[1]; 165 | 166 | console.log(`Getting record ${recordId} from table "${tableName}"...`); 167 | 168 | const record = await crudUtils.getRecord(baseId, tableName, recordId); 169 | 170 | console.log('Record:'); 171 | console.log(JSON.stringify(record, null, 2)); 172 | } 173 | 174 | /** 175 | * Create records in a table 176 | * @param {Array} args - Command arguments 177 | */ 178 | async function createRecords(args) { 179 | if (args.length < 2) { 180 | console.error('Error: Table name and JSON file are required'); 181 | console.log('Usage: node airtable-crud-cli.js create-records <tableName> <jsonFile>'); 182 | process.exit(1); 183 | } 184 | 185 | const tableName = args[0]; 186 | const jsonFile = args[1]; 187 | 188 | // Read the JSON file 189 | let records; 190 | try { 191 | const jsonData = fs.readFileSync(jsonFile, 'utf8'); 192 | records = JSON.parse(jsonData); 193 | 194 | if (!Array.isArray(records)) { 195 | console.error('Error: JSON file must contain an array of records'); 196 | process.exit(1); 197 | } 198 | } catch (error) { 199 | console.error(`Error reading JSON file: ${error.message}`); 200 | process.exit(1); 201 | } 202 | 203 | console.log(`Creating ${records.length} records in table "${tableName}"...`); 204 | 205 | const createdRecords = await crudUtils.createRecords(baseId, tableName, records); 206 | 207 | console.log(`Created ${createdRecords.length} records`); 208 | console.log('First record:'); 209 | console.log(JSON.stringify(createdRecords[0], null, 2)); 210 | } 211 | 212 | /** 213 | * Update records in a table 214 | * @param {Array} args - Command arguments 215 | */ 216 | async function updateRecords(args) { 217 | if (args.length < 2) { 218 | console.error('Error: Table name and JSON file are required'); 219 | console.log('Usage: node airtable-crud-cli.js update-records <tableName> <jsonFile>'); 220 | process.exit(1); 221 | } 222 | 223 | const tableName = args[0]; 224 | const jsonFile = args[1]; 225 | 226 | // Read the JSON file 227 | let records; 228 | try { 229 | const jsonData = fs.readFileSync(jsonFile, 'utf8'); 230 | records = JSON.parse(jsonData); 231 | 232 | if (!Array.isArray(records)) { 233 | console.error('Error: JSON file must contain an array of records'); 234 | process.exit(1); 235 | } 236 | 237 | // Check if records have id and fields 238 | for (const record of records) { 239 | if (!record.id) { 240 | console.error('Error: Each record must have an id field'); 241 | process.exit(1); 242 | } 243 | 244 | if (!record.fields || typeof record.fields !== 'object') { 245 | console.error('Error: Each record must have a fields object'); 246 | process.exit(1); 247 | } 248 | } 249 | } catch (error) { 250 | console.error(`Error reading JSON file: ${error.message}`); 251 | process.exit(1); 252 | } 253 | 254 | console.log(`Updating ${records.length} records in table "${tableName}"...`); 255 | 256 | const updatedRecords = await crudUtils.updateRecords(baseId, tableName, records); 257 | 258 | console.log(`Updated ${updatedRecords.length} records`); 259 | console.log('First record:'); 260 | console.log(JSON.stringify(updatedRecords[0], null, 2)); 261 | } 262 | 263 | /** 264 | * Delete records from a table 265 | * @param {Array} args - Command arguments 266 | */ 267 | async function deleteRecords(args) { 268 | if (args.length < 2) { 269 | console.error('Error: Table name and record IDs are required'); 270 | console.log('Usage: node airtable-crud-cli.js delete-records <tableName> <recordId1,recordId2,...>'); 271 | process.exit(1); 272 | } 273 | 274 | const tableName = args[0]; 275 | const recordIds = args[1].split(','); 276 | 277 | console.log(`Deleting ${recordIds.length} records from table "${tableName}"...`); 278 | 279 | const deletedRecords = await crudUtils.deleteRecords(baseId, tableName, recordIds); 280 | 281 | console.log(`Deleted ${deletedRecords.length} records`); 282 | } 283 | 284 | /** 285 | * Export records from a table to a JSON file 286 | * @param {Array} args - Command arguments 287 | */ 288 | async function exportRecords(args) { 289 | if (args.length < 2) { 290 | console.error('Error: Table name and output file are required'); 291 | console.log('Usage: node airtable-crud-cli.js export-records <tableName> <outputFile> [maxRecords] [filterFormula]'); 292 | process.exit(1); 293 | } 294 | 295 | const tableName = args[0]; 296 | const outputFile = args[1]; 297 | const maxRecords = args[2] ? parseInt(args[2]) : 100; 298 | const filterFormula = args[3] || null; 299 | 300 | console.log(`Exporting records from table "${tableName}" to ${outputFile}...`); 301 | console.log(`Max records: ${maxRecords}`); 302 | if (filterFormula) { 303 | console.log(`Filter: ${filterFormula}`); 304 | } 305 | 306 | const records = await crudUtils.readRecords(baseId, tableName, maxRecords, filterFormula); 307 | 308 | // Write records to file 309 | try { 310 | fs.writeFileSync(outputFile, JSON.stringify(records, null, 2)); 311 | console.log(`Exported ${records.length} records to ${outputFile}`); 312 | } catch (error) { 313 | console.error(`Error writing to file: ${error.message}`); 314 | process.exit(1); 315 | } 316 | } 317 | 318 | /** 319 | * Import records from a JSON file to a table 320 | * @param {Array} args - Command arguments 321 | */ 322 | async function importRecords(args) { 323 | if (args.length < 2) { 324 | console.error('Error: Table name and input file are required'); 325 | console.log('Usage: node airtable-crud-cli.js import-records <tableName> <inputFile> [--update] [--clear]'); 326 | process.exit(1); 327 | } 328 | 329 | const tableName = args[0]; 330 | const inputFile = args[1]; 331 | const update = args.includes('--update'); 332 | const clear = args.includes('--clear'); 333 | 334 | // Read the JSON file 335 | let records; 336 | try { 337 | const jsonData = fs.readFileSync(inputFile, 'utf8'); 338 | records = JSON.parse(jsonData); 339 | 340 | if (!Array.isArray(records)) { 341 | console.error('Error: JSON file must contain an array of records'); 342 | process.exit(1); 343 | } 344 | } catch (error) { 345 | console.error(`Error reading JSON file: ${error.message}`); 346 | process.exit(1); 347 | } 348 | 349 | console.log(`Importing ${records.length} records to table "${tableName}"...`); 350 | 351 | // Clear the table if requested 352 | if (clear) { 353 | console.log('Clearing existing records...'); 354 | const existingRecords = await crudUtils.readRecords(baseId, tableName, 100000); 355 | 356 | if (existingRecords.length > 0) { 357 | const recordIds = existingRecords.map(record => record.id); 358 | await crudUtils.deleteRecords(baseId, tableName, recordIds); 359 | console.log(`Deleted ${existingRecords.length} existing records`); 360 | } 361 | } 362 | 363 | // Update existing records if requested 364 | if (update) { 365 | console.log('Updating existing records...'); 366 | 367 | // Get existing records 368 | const existingRecords = await crudUtils.readRecords(baseId, tableName, 100000); 369 | const existingRecordsMap = {}; 370 | 371 | // Create a map of existing records by a key field (assuming 'Name' is the key) 372 | existingRecords.forEach(record => { 373 | if (record.Name) { 374 | existingRecordsMap[record.Name] = record; 375 | } 376 | }); 377 | 378 | // Separate records to update and create 379 | const recordsToUpdate = []; 380 | const recordsToCreate = []; 381 | 382 | records.forEach(record => { 383 | if (record.Name && existingRecordsMap[record.Name]) { 384 | // Record exists, update it 385 | recordsToUpdate.push({ 386 | id: existingRecordsMap[record.Name].id, 387 | fields: record 388 | }); 389 | } else { 390 | // Record doesn't exist, create it 391 | recordsToCreate.push(record); 392 | } 393 | }); 394 | 395 | // Update existing records 396 | if (recordsToUpdate.length > 0) { 397 | const updatedRecords = await crudUtils.updateRecords(baseId, tableName, recordsToUpdate); 398 | console.log(`Updated ${updatedRecords.length} existing records`); 399 | } 400 | 401 | // Create new records 402 | if (recordsToCreate.length > 0) { 403 | const createdRecords = await crudUtils.createRecords(baseId, tableName, recordsToCreate); 404 | console.log(`Created ${createdRecords.length} new records`); 405 | } 406 | } else { 407 | // Create all records 408 | const createdRecords = await crudUtils.createRecords(baseId, tableName, records); 409 | console.log(`Created ${createdRecords.length} records`); 410 | } 411 | } 412 | 413 | /** 414 | * Show help 415 | */ 416 | function showHelp() { 417 | console.log('Airtable CRUD CLI'); 418 | console.log('================'); 419 | console.log(''); 420 | console.log('Usage: node airtable-crud-cli.js <command> [options]'); 421 | console.log(''); 422 | console.log('Commands:'); 423 | console.log(' list-bases List all accessible bases'); 424 | console.log(' list-tables List all tables in the base'); 425 | console.log(' list-records <tableName> [max] [filter] List records from a table'); 426 | console.log(' get-record <tableName> <recordId> Get a specific record'); 427 | console.log(' create-records <tableName> <jsonFile> Create records from a JSON file'); 428 | console.log(' update-records <tableName> <jsonFile> Update records from a JSON file'); 429 | console.log(' delete-records <tableName> <id1,id2,...> Delete records from a table'); 430 | console.log(' export-records <tableName> <file> [max] Export records to a JSON file'); 431 | console.log(' import-records <tableName> <file> [flags] Import records from a JSON file'); 432 | console.log(' help Show this help'); 433 | console.log(''); 434 | console.log('Flags for import-records:'); 435 | console.log(' --update Update existing records (match by Name field)'); 436 | console.log(' --clear Clear all existing records before import'); 437 | console.log(''); 438 | console.log('Examples:'); 439 | console.log(' node airtable-crud-cli.js list-tables'); 440 | console.log(' node airtable-crud-cli.js list-records "My Table" 10'); 441 | console.log(' node airtable-crud-cli.js get-record "My Table" rec123456'); 442 | console.log(' node airtable-crud-cli.js create-records "My Table" data.json'); 443 | console.log(' node airtable-crud-cli.js export-records "My Table" export.json 1000'); 444 | console.log(' node airtable-crud-cli.js import-records "My Table" import.json --update'); 445 | } ``` -------------------------------------------------------------------------------- /src/typescript/test-suite.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * TypeScript Test Suite for Airtable MCP Server 3 | * Comprehensive type-safe testing with enterprise validation 4 | */ 5 | 6 | import type { 7 | ListRecordsInput, 8 | CreateRecordInput, 9 | AnalyzeDataPrompt 10 | } from './index'; 11 | 12 | import { ValidationError, AirtableError } from './errors'; 13 | 14 | // import { AirtableMCPServer } from './airtable-mcp-server'; 15 | 16 | // Test framework types 17 | interface TestResult { 18 | name: string; 19 | passed: boolean; 20 | error?: string; 21 | duration: number; 22 | } 23 | 24 | interface TestSuite { 25 | name: string; 26 | tests: TestResult[]; 27 | totalPassed: number; 28 | totalFailed: number; 29 | totalDuration: number; 30 | } 31 | 32 | // Enhanced test runner with type safety 33 | class TypeScriptTestRunner { 34 | private results: TestSuite[] = []; 35 | 36 | async runTest(name: string, testFn: () => Promise<void>): Promise<TestResult> { 37 | const startTime = Date.now(); 38 | 39 | try { 40 | await testFn(); 41 | const duration = Date.now() - startTime; 42 | return { name, passed: true, duration }; 43 | } catch (error) { 44 | const duration = Date.now() - startTime; 45 | return { 46 | name, 47 | passed: false, 48 | error: error instanceof Error ? error.message : String(error), 49 | duration 50 | }; 51 | } 52 | } 53 | 54 | async runSuite(suiteName: string, tests: Array<{ name: string; fn: () => Promise<void> }>): Promise<TestSuite> { 55 | console.log(`\n🧪 Running test suite: ${suiteName}`); 56 | 57 | const suiteStartTime = Date.now(); 58 | const testResults: TestResult[] = []; 59 | 60 | for (const test of tests) { 61 | console.log(` ⏳ ${test.name}...`); 62 | const result = await this.runTest(test.name, test.fn); 63 | testResults.push(result); 64 | 65 | if (result.passed) { 66 | console.log(` ✅ ${test.name} (${result.duration}ms)`); 67 | } else { 68 | console.log(` ❌ ${test.name} (${result.duration}ms): ${result.error}`); 69 | } 70 | } 71 | 72 | const totalDuration = Date.now() - suiteStartTime; 73 | const totalPassed = testResults.filter(r => r.passed).length; 74 | const totalFailed = testResults.filter(r => !r.passed).length; 75 | 76 | const suite: TestSuite = { 77 | name: suiteName, 78 | tests: testResults, 79 | totalPassed, 80 | totalFailed, 81 | totalDuration 82 | }; 83 | 84 | this.results.push(suite); 85 | 86 | console.log(`\n📊 Suite "${suiteName}" completed:`); 87 | console.log(` ✅ Passed: ${totalPassed}`); 88 | console.log(` ❌ Failed: ${totalFailed}`); 89 | console.log(` ⏱️ Duration: ${totalDuration}ms`); 90 | 91 | return suite; 92 | } 93 | 94 | generateReport(): void { 95 | console.log('\n📋 TypeScript Test Report'); 96 | console.log('=' .repeat(50)); 97 | 98 | let overallPassed = 0; 99 | let overallFailed = 0; 100 | let overallDuration = 0; 101 | 102 | for (const suite of this.results) { 103 | console.log(`\n📦 ${suite.name}:`); 104 | console.log(` Tests: ${suite.tests.length}`); 105 | console.log(` Passed: ${suite.totalPassed}`); 106 | console.log(` Failed: ${suite.totalFailed}`); 107 | console.log(` Duration: ${suite.totalDuration}ms`); 108 | 109 | overallPassed += suite.totalPassed; 110 | overallFailed += suite.totalFailed; 111 | overallDuration += suite.totalDuration; 112 | 113 | if (suite.totalFailed > 0) { 114 | console.log(' Failed tests:'); 115 | suite.tests 116 | .filter(t => !t.passed) 117 | .forEach(t => console.log(` - ${t.name}: ${t.error}`)); 118 | } 119 | } 120 | 121 | console.log('\n🎯 Overall Results:'); 122 | console.log(` Total Tests: ${overallPassed + overallFailed}`); 123 | console.log(` Passed: ${overallPassed}`); 124 | console.log(` Failed: ${overallFailed}`); 125 | console.log(` Success Rate: ${((overallPassed / (overallPassed + overallFailed)) * 100).toFixed(1)}%`); 126 | console.log(` Total Duration: ${overallDuration}ms`); 127 | 128 | if (overallFailed === 0) { 129 | console.log('\n🎉 All tests passed with TypeScript type safety!'); 130 | } else { 131 | console.log(`\n⚠️ ${overallFailed} test(s) failed. Review and fix before deployment.`); 132 | } 133 | } 134 | } 135 | 136 | // Mock server for testing (no real API calls) 137 | class MockAirtableMCPServer { 138 | async initialize(): Promise<any> { 139 | return { 140 | name: 'mock-airtable-mcp-server', 141 | version: '3.1.0', 142 | protocolVersion: '2024-11-05', 143 | capabilities: { 144 | tools: { listChanged: false }, 145 | prompts: { listChanged: false } 146 | } 147 | }; 148 | } 149 | async handleToolCall(name: string, params: Record<string, unknown>): Promise<any> { 150 | // Mock successful responses for testing 151 | switch (name) { 152 | case 'list_tables': 153 | return { 154 | content: [{ 155 | type: 'text', 156 | text: 'Mock tables response', 157 | data: [{ id: 'tbl123', name: 'Test Table' }] 158 | }] 159 | }; 160 | case 'list_records': 161 | return { 162 | content: [{ 163 | type: 'text', 164 | text: 'Mock records response', 165 | data: { records: [{ id: 'rec123', fields: { Name: 'Test Record' } }] } 166 | }] 167 | }; 168 | case 'create_record': 169 | return { 170 | content: [{ 171 | type: 'text', 172 | text: 'Mock create response', 173 | data: { id: 'rec456', fields: params } 174 | }] 175 | }; 176 | default: 177 | throw new ValidationError(`Unknown tool: ${name}`, 'tool_name'); 178 | } 179 | } 180 | 181 | async handlePromptGet(_name: string, _args: Record<string, unknown>): Promise<any> { 182 | return { 183 | messages: [{ 184 | role: 'assistant', 185 | content: { 186 | type: 'text', 187 | text: `Mock AI response for ${_name} with TypeScript validation` 188 | } 189 | }] 190 | }; 191 | } 192 | } 193 | 194 | // Comprehensive test suites 195 | async function typeValidationTests(runner: TypeScriptTestRunner): Promise<void> { 196 | const mockServer = new MockAirtableMCPServer(); 197 | 198 | await runner.runSuite('Type Validation Tests', [ 199 | { 200 | name: 'Valid ListRecordsInput parameters', 201 | fn: async () => { 202 | const validParams: ListRecordsInput = { 203 | table: 'TestTable', 204 | maxRecords: 10, 205 | filterByFormula: 'Status = "Active"' 206 | }; 207 | 208 | const result = await mockServer.handleToolCall('list_records', validParams as any); 209 | if (!result.content || !Array.isArray(result.content)) { 210 | throw new Error('Invalid response structure'); 211 | } 212 | } 213 | }, 214 | 215 | { 216 | name: 'Valid CreateRecordInput with type casting', 217 | fn: async () => { 218 | const validParams: CreateRecordInput = { 219 | table: 'TestTable', 220 | fields: { 221 | 'Name': 'Test Record', 222 | 'Priority': 'High', 223 | 'Count': 42 224 | }, 225 | typecast: true 226 | }; 227 | 228 | const result = await mockServer.handleToolCall('create_record', validParams as any); 229 | if (!result.content) { 230 | throw new Error('No response content'); 231 | } 232 | } 233 | }, 234 | 235 | { 236 | name: 'Valid AnalyzeDataPrompt with confidence level', 237 | fn: async () => { 238 | const validParams: AnalyzeDataPrompt = { 239 | table: 'SalesData', 240 | analysis_type: 'predictive', 241 | confidence_level: 0.95, 242 | field_focus: 'revenue,conversion_rate' 243 | }; 244 | 245 | const result = await mockServer.handlePromptGet('analyze_data', validParams as any); 246 | if (!result.messages || !Array.isArray(result.messages)) { 247 | throw new Error('Invalid AI response structure'); 248 | } 249 | } 250 | }, 251 | 252 | { 253 | name: 'Invalid tool name handling', 254 | fn: async () => { 255 | try { 256 | await mockServer.handleToolCall('invalid_tool', {}); 257 | throw new Error('Should have thrown ValidationError'); 258 | } catch (error) { 259 | if (!(error instanceof ValidationError)) { 260 | throw new Error('Expected ValidationError for invalid tool'); 261 | } 262 | } 263 | } 264 | } 265 | ]); 266 | } 267 | 268 | async function serverInitializationTests(runner: TypeScriptTestRunner): Promise<void> { 269 | await runner.runSuite('Server Initialization Tests', [ 270 | { 271 | name: 'Server initialization with capabilities', 272 | fn: async () => { 273 | const server = new MockAirtableMCPServer(); 274 | 275 | const serverInfo = await server.initialize(); 276 | 277 | if (!serverInfo.name || !serverInfo.version || !serverInfo.protocolVersion) { 278 | throw new Error('Invalid server info structure'); 279 | } 280 | 281 | if (serverInfo.protocolVersion !== '2024-11-05') { 282 | throw new Error('Incorrect protocol version'); 283 | } 284 | } 285 | }, 286 | 287 | { 288 | name: 'Server capabilities validation', 289 | fn: async () => { 290 | const server = new MockAirtableMCPServer(); 291 | 292 | const serverInfo = await server.initialize(); 293 | 294 | if (!serverInfo.capabilities.tools || !serverInfo.capabilities.prompts) { 295 | throw new Error('Missing required capabilities'); 296 | } 297 | } 298 | } 299 | ]); 300 | } 301 | 302 | async function aiPromptTests(runner: TypeScriptTestRunner): Promise<void> { 303 | const mockServer = new MockAirtableMCPServer(); 304 | 305 | await runner.runSuite('AI Prompt Tests', [ 306 | { 307 | name: 'Statistical analysis prompt', 308 | fn: async () => { 309 | const params: AnalyzeDataPrompt = { 310 | table: 'Analytics', 311 | analysis_type: 'statistical', 312 | confidence_level: 0.99 313 | }; 314 | 315 | const result = await mockServer.handlePromptGet('analyze_data', params as any); 316 | 317 | if (!result.messages || result.messages.length === 0) { 318 | throw new Error('Empty AI response'); 319 | } 320 | 321 | if (result.messages[0].role !== 'assistant') { 322 | throw new Error('Invalid message role'); 323 | } 324 | } 325 | }, 326 | 327 | { 328 | name: 'Predictive analytics with all parameters', 329 | fn: async () => { 330 | const params = { 331 | table: 'Revenue', 332 | target_field: 'monthly_revenue', 333 | prediction_periods: 12, 334 | algorithm: 'random_forest' as const, 335 | include_confidence_intervals: true, 336 | historical_periods: 24 337 | }; 338 | 339 | const result = await mockServer.handlePromptGet('predictive_analytics', params); 340 | 341 | if (!result.messages) { 342 | throw new Error('No AI response messages'); 343 | } 344 | } 345 | }, 346 | 347 | { 348 | name: 'Natural language query processing', 349 | fn: async () => { 350 | const params = { 351 | question: 'What are the top 5 products by revenue?', 352 | response_format: 'natural_language' as const, 353 | confidence_threshold: 0.8 354 | }; 355 | 356 | const result = await mockServer.handlePromptGet('natural_language_query', params); 357 | 358 | if (!result.messages[0].content.text.includes('Mock AI response')) { 359 | throw new Error('Unexpected AI response content'); 360 | } 361 | } 362 | } 363 | ]); 364 | } 365 | 366 | async function errorHandlingTests(runner: TypeScriptTestRunner): Promise<void> { 367 | await runner.runSuite('Error Handling Tests', [ 368 | { 369 | name: 'ValidationError for missing required parameters', 370 | fn: async () => { 371 | const mockServer = new MockAirtableMCPServer(); 372 | 373 | try { 374 | // Missing required 'table' parameter 375 | await mockServer.handleToolCall('list_records', {}); 376 | throw new Error('Should have thrown an error'); 377 | } catch (error) { 378 | // Should handle gracefully with proper error response 379 | if (!(error instanceof Error)) { 380 | throw new Error('Expected Error instance'); 381 | } 382 | } 383 | } 384 | }, 385 | 386 | { 387 | name: 'AirtableError simulation', 388 | fn: async () => { 389 | const error = new AirtableError('API Error', 'INVALID_REQUEST', 400); 390 | 391 | if (error.code !== 'INVALID_REQUEST') { 392 | throw new Error('Incorrect error code'); 393 | } 394 | 395 | if (error.statusCode !== 400) { 396 | throw new Error('Incorrect status code'); 397 | } 398 | } 399 | }, 400 | 401 | { 402 | name: 'Type safety enforcement', 403 | fn: async () => { 404 | // This test validates that TypeScript compilation would catch type errors 405 | const params: ListRecordsInput = { 406 | table: 'ValidTable', 407 | maxRecords: 10 408 | // TypeScript would catch if we tried to add invalid properties 409 | }; 410 | 411 | if (typeof params.table !== 'string') { 412 | throw new Error('Type validation failed'); 413 | } 414 | } 415 | } 416 | ]); 417 | } 418 | 419 | async function performanceTests(runner: TypeScriptTestRunner): Promise<void> { 420 | await runner.runSuite('Performance Tests', [ 421 | { 422 | name: 'Multiple concurrent tool calls', 423 | fn: async () => { 424 | const mockServer = new MockAirtableMCPServer(); 425 | const startTime = Date.now(); 426 | 427 | const promises = Array.from({ length: 10 }, (_, i) => 428 | mockServer.handleToolCall('list_records', { table: `Table${i}` }) 429 | ); 430 | 431 | await Promise.all(promises); 432 | 433 | const duration = Date.now() - startTime; 434 | if (duration > 1000) { // Should complete within 1 second for mock calls 435 | throw new Error(`Too slow: ${duration}ms`); 436 | } 437 | } 438 | }, 439 | 440 | { 441 | name: 'Large parameter validation', 442 | fn: async () => { 443 | const mockServer = new MockAirtableMCPServer(); 444 | 445 | const largeFields: Record<string, unknown> = {}; 446 | for (let i = 0; i < 100; i++) { 447 | largeFields[`field_${i}`] = `value_${i}`; 448 | } 449 | 450 | const params: CreateRecordInput = { 451 | table: 'LargeTable', 452 | fields: largeFields 453 | }; 454 | 455 | const startTime = Date.now(); 456 | await mockServer.handleToolCall('create_record', params as any); 457 | const duration = Date.now() - startTime; 458 | 459 | if (duration > 500) { // Should handle large objects efficiently 460 | throw new Error(`Parameter validation too slow: ${duration}ms`); 461 | } 462 | } 463 | } 464 | ]); 465 | } 466 | 467 | // Main test execution 468 | async function runAllTests(): Promise<void> { 469 | console.log('🚀 Starting TypeScript Airtable MCP Test Suite'); 470 | console.log('=' .repeat(60)); 471 | 472 | const runner = new TypeScriptTestRunner(); 473 | 474 | try { 475 | // Run all test suites 476 | await typeValidationTests(runner); 477 | await serverInitializationTests(runner); 478 | await aiPromptTests(runner); 479 | await errorHandlingTests(runner); 480 | await performanceTests(runner); 481 | 482 | // Generate comprehensive report 483 | runner.generateReport(); 484 | 485 | } catch (error) { 486 | console.error('\n💥 Test suite execution failed:', error); 487 | process.exit(1); 488 | } 489 | } 490 | 491 | // Export for integration testing 492 | export { 493 | TypeScriptTestRunner, 494 | MockAirtableMCPServer, 495 | runAllTests, 496 | TestResult, 497 | TestSuite 498 | }; 499 | 500 | // Run tests if executed directly 501 | if (require.main === module) { 502 | runAllTests() 503 | .then(() => { 504 | console.log('\n🎉 TypeScript test suite completed successfully!'); 505 | process.exit(0); 506 | }) 507 | .catch((error) => { 508 | console.error('\n💥 Test suite failed:', error); 509 | process.exit(1); 510 | }); 511 | } ``` -------------------------------------------------------------------------------- /examples/typescript/advanced-ai-prompts.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Advanced AI Prompts TypeScript Example 3 | * Demonstrates enterprise-grade AI capabilities with strict typing 4 | */ 5 | 6 | import { 7 | AirtableMCPServer, 8 | AnalyzeDataPrompt, 9 | CreateReportPrompt, 10 | PredictiveAnalyticsPrompt, 11 | NaturalLanguageQueryPrompt, 12 | SmartSchemaDesignPrompt, 13 | DataQualityAuditPrompt, 14 | OptimizeWorkflowPrompt, 15 | AutomationRecommendationsPrompt, 16 | AnalysisResult, 17 | ReportResult, 18 | PredictionResult, 19 | WorkflowOptimizationResult 20 | } from '@rashidazarang/airtable-mcp/types'; 21 | 22 | // Enterprise AI Analytics Class 23 | class EnterpriseAIAnalytics { 24 | private server: AirtableMCPServer; 25 | 26 | constructor() { 27 | this.server = new AirtableMCPServer(); 28 | } 29 | 30 | // Advanced Statistical Analysis with Type Safety 31 | async performStatisticalAnalysis(table: string): Promise<AnalysisResult> { 32 | const params: AnalyzeDataPrompt = { 33 | table, 34 | analysis_type: 'statistical', 35 | confidence_level: 0.99, 36 | field_focus: 'revenue,conversion_rate,customer_satisfaction', 37 | time_dimension: 'created_date' 38 | }; 39 | 40 | const response = await this.server.handlePromptGet('analyze_data', params); 41 | 42 | // Type-safe result processing 43 | const result: AnalysisResult = { 44 | summary: 'Comprehensive statistical analysis completed', 45 | key_findings: [ 46 | 'Revenue shows 15.3% growth trend', 47 | 'Conversion rate correlation: 0.78', 48 | 'Customer satisfaction: 94.2% positive' 49 | ], 50 | statistical_measures: { 51 | mean: 45670.23, 52 | median: 42150.00, 53 | std_deviation: 12340.56, 54 | correlation_coefficients: { 55 | 'revenue_conversion': 0.78, 56 | 'satisfaction_retention': 0.85 57 | }, 58 | confidence_intervals: [ 59 | { field: 'revenue', lower: 40000, upper: 51000, confidence: 0.99 }, 60 | { field: 'conversion_rate', lower: 0.12, upper: 0.18, confidence: 0.99 } 61 | ] 62 | }, 63 | trends: [ 64 | { 65 | field: 'revenue', 66 | direction: 'increasing', 67 | strength: 'strong', 68 | significance: 0.97 69 | } 70 | ], 71 | recommendations: [ 72 | 'Implement predictive modeling for revenue forecasting', 73 | 'Establish monitoring dashboard for key metrics', 74 | 'Consider A/B testing for conversion optimization' 75 | ], 76 | next_steps: [ 77 | 'Set up automated reporting pipeline', 78 | 'Deploy real-time analytics dashboard', 79 | 'Schedule quarterly deep-dive analysis' 80 | ] 81 | }; 82 | 83 | return result; 84 | } 85 | 86 | // Executive Report Generation with Business Intelligence 87 | async generateExecutiveReport(table: string, audience: 'executives' | 'managers' | 'analysts' | 'technical_team'): Promise<ReportResult> { 88 | const params: CreateReportPrompt = { 89 | table, 90 | report_type: 'executive_summary', 91 | target_audience: audience, 92 | include_recommendations: true, 93 | time_period: 'Q4 2024', 94 | format_preference: 'mixed' 95 | }; 96 | 97 | const response = await this.server.handlePromptGet('create_report', params); 98 | 99 | const result: ReportResult = { 100 | title: `Q4 2024 Executive Summary - ${table} Analysis`, 101 | executive_summary: 'Strategic overview of business performance with actionable insights and growth opportunities.', 102 | detailed_sections: [ 103 | { 104 | heading: 'Performance Metrics', 105 | content: 'Comprehensive analysis of key performance indicators showing strong growth trajectory.', 106 | supporting_data: [ 107 | { metric: 'Revenue Growth', value: '15.3%', trend: 'positive' }, 108 | { metric: 'Customer Acquisition', value: '1,247', trend: 'positive' }, 109 | { metric: 'Retention Rate', value: '94.2%', trend: 'stable' } 110 | ], 111 | visualizations: [ 112 | { type: 'line_chart', data: {}, description: 'Revenue trend over time' }, 113 | { type: 'bar_chart', data: {}, description: 'Customer acquisition by channel' } 114 | ] 115 | }, 116 | { 117 | heading: 'Strategic Opportunities', 118 | content: 'Identified high-impact areas for business expansion and optimization.', 119 | supporting_data: [ 120 | { opportunity: 'Market Expansion', impact: 'High', effort: 'Medium' }, 121 | { opportunity: 'Process Automation', impact: 'Medium', effort: 'Low' } 122 | ] 123 | } 124 | ], 125 | key_metrics: { 126 | 'Revenue': { value: '$2.4M', change: '+15.3%', significance: 'high' }, 127 | 'Customer Count': { value: '12,470', change: '+8.2%', significance: 'medium' }, 128 | 'Satisfaction Score': { value: '4.7/5', change: '+0.3', significance: 'high' } 129 | }, 130 | recommendations: [ 131 | { 132 | priority: 'high', 133 | recommendation: 'Implement predictive analytics for demand forecasting', 134 | expected_impact: '12-18% efficiency improvement', 135 | implementation_effort: 'medium' 136 | }, 137 | { 138 | priority: 'medium', 139 | recommendation: 'Enhance customer segmentation strategy', 140 | expected_impact: '8-12% conversion rate improvement', 141 | implementation_effort: 'low' 142 | } 143 | ], 144 | appendices: [ 145 | { title: 'Technical Methodology', content: 'Detailed explanation of analytical methods used' }, 146 | { title: 'Data Sources', content: 'Comprehensive list of data sources and validation methods' } 147 | ] 148 | }; 149 | 150 | return result; 151 | } 152 | 153 | // Advanced Predictive Analytics with Machine Learning 154 | async performPredictiveAnalytics(table: string, targetField: string): Promise<PredictionResult> { 155 | const params: PredictiveAnalyticsPrompt = { 156 | table, 157 | target_field: targetField, 158 | prediction_periods: 12, 159 | algorithm: 'random_forest', 160 | include_confidence_intervals: true, 161 | historical_periods: 24, 162 | external_factors: ['market_trends', 'seasonality', 'economic_indicators'], 163 | business_context: 'Enterprise revenue forecasting with risk assessment' 164 | }; 165 | 166 | const response = await this.server.handlePromptGet('predictive_analytics', params); 167 | 168 | const result: PredictionResult = { 169 | predictions: [ 170 | { 171 | period: '2025-01', 172 | predicted_value: 125670.45, 173 | confidence_interval: { lower: 118450.23, upper: 132890.67 }, 174 | probability_bands: [ 175 | { probability: 0.68, range: [120000, 131000] }, 176 | { probability: 0.95, range: [115000, 136000] } 177 | ] 178 | }, 179 | { 180 | period: '2025-02', 181 | predicted_value: 128340.12, 182 | confidence_interval: { lower: 121120.89, upper: 135559.35 } 183 | } 184 | ], 185 | model_performance: { 186 | algorithm_used: 'random_forest', 187 | accuracy_metrics: { 188 | 'r_squared': 0.847, 189 | 'mae': 4567.89, 190 | 'rmse': 6234.12, 191 | 'mape': 3.8 192 | }, 193 | feature_importance: { 194 | 'historical_revenue': 0.34, 195 | 'seasonality': 0.28, 196 | 'market_trends': 0.23, 197 | 'customer_count': 0.15 198 | }, 199 | validation_results: { 200 | 'cross_validation_score': 0.82, 201 | 'holdout_accuracy': 0.79, 202 | 'stability_index': 0.91 203 | } 204 | }, 205 | business_insights: { 206 | trend_direction: 'positive', 207 | seasonality_detected: true, 208 | external_factors_impact: [ 209 | 'Strong correlation with market expansion', 210 | 'Seasonal peak in Q4 consistently observed', 211 | 'Economic indicators show positive influence' 212 | ], 213 | risk_factors: [ 214 | 'Market volatility could impact 15% variance', 215 | 'Supply chain disruptions possible', 216 | 'Competitive landscape changes' 217 | ] 218 | }, 219 | recommendations: [ 220 | { 221 | type: 'strategic', 222 | recommendation: 'Prepare for 23% capacity increase by Q3 2025', 223 | timing: '6 months lead time', 224 | confidence: 0.87 225 | }, 226 | { 227 | type: 'operational', 228 | recommendation: 'Implement dynamic pricing based on demand forecasts', 229 | timing: 'Immediate', 230 | confidence: 0.94 231 | }, 232 | { 233 | type: 'tactical', 234 | recommendation: 'Establish risk monitoring for volatility indicators', 235 | timing: '3 months', 236 | confidence: 0.89 237 | } 238 | ] 239 | }; 240 | 241 | return result; 242 | } 243 | 244 | // Natural Language Query Processing 245 | async processNaturalLanguageQuery(question: string, tables?: string[]): Promise<string> { 246 | const params: NaturalLanguageQueryPrompt = { 247 | question, 248 | tables: tables?.join(','), 249 | response_format: 'natural_language', 250 | context_awareness: true, 251 | confidence_threshold: 0.85, 252 | clarifying_questions: true 253 | }; 254 | 255 | const response = await this.server.handlePromptGet('natural_language_query', params); 256 | return response.messages[0].content.text; 257 | } 258 | 259 | // Smart Schema Design with Compliance 260 | async designOptimalSchema(purpose: string, requirements: string[]): Promise<any> { 261 | const params: SmartSchemaDesignPrompt = { 262 | purpose, 263 | data_types: ['text', 'number', 'date', 'select', 'attachment'], 264 | expected_volume: 'enterprise', 265 | compliance_requirements: ['GDPR', 'HIPAA'], 266 | performance_priorities: ['query_speed', 'scalability'], 267 | integration_needs: ['API access', 'webhook notifications'], 268 | user_access_patterns: 'Multi-team collaboration with role-based permissions' 269 | }; 270 | 271 | const response = await this.server.handlePromptGet('smart_schema_design', params); 272 | return response; 273 | } 274 | 275 | // Comprehensive Data Quality Audit 276 | async performDataQualityAudit(table: string): Promise<any> { 277 | const params: DataQualityAuditPrompt = { 278 | table, 279 | quality_dimensions: ['completeness', 'accuracy', 'consistency', 'timeliness', 'validity'], 280 | automated_fixes: true, 281 | severity_threshold: 'medium', 282 | compliance_context: 'Enterprise data governance standards', 283 | reporting_requirements: ['executive_summary', 'detailed_findings', 'remediation_plan'] 284 | }; 285 | 286 | const response = await this.server.handlePromptGet('data_quality_audit', params); 287 | return response; 288 | } 289 | 290 | // Workflow Optimization Analysis 291 | async optimizeWorkflow(workflowDescription: string, painPoints: string[]): Promise<WorkflowOptimizationResult> { 292 | const params: OptimizeWorkflowPrompt = { 293 | table: 'workflow_data', 294 | current_process_description: workflowDescription, 295 | optimization_goals: ['efficiency', 'accuracy', 'cost_reduction'], 296 | constraints: ['regulatory_compliance', 'legacy_system_integration'], 297 | automation_preference: 'moderate', 298 | change_tolerance: 'medium' 299 | }; 300 | 301 | const response = await this.server.handlePromptGet('optimize_workflow', params); 302 | 303 | // Return a comprehensive optimization result 304 | const result: WorkflowOptimizationResult = { 305 | current_state_analysis: { 306 | efficiency_score: 72, 307 | bottlenecks: [ 308 | { step: 'Manual data entry', impact: 'high', description: 'Causes 40% of processing delays' }, 309 | { step: 'Approval routing', impact: 'medium', description: 'Average 2.3 day approval time' } 310 | ], 311 | resource_utilization: { 312 | 'staff_time': 0.68, 313 | 'system_capacity': 0.84, 314 | 'automation_coverage': 0.23 315 | } 316 | }, 317 | optimization_recommendations: [ 318 | { 319 | category: 'automation', 320 | recommendation: 'Implement automated data validation and entry', 321 | expected_benefits: ['45% time reduction', '90% error reduction'], 322 | implementation_complexity: 'moderate', 323 | estimated_roi: '340% within 12 months', 324 | timeline: '3-4 months' 325 | }, 326 | { 327 | category: 'process_redesign', 328 | recommendation: 'Parallel approval workflow with smart routing', 329 | expected_benefits: ['60% faster approvals', 'Improved transparency'], 330 | implementation_complexity: 'complex', 331 | estimated_roi: '220% within 18 months', 332 | timeline: '6-8 months' 333 | } 334 | ], 335 | implementation_roadmap: [ 336 | { 337 | phase: 1, 338 | duration: '3 months', 339 | objectives: ['Implement basic automation', 'Staff training'], 340 | deliverables: ['Automated validation system', 'Training materials'], 341 | success_metrics: ['25% efficiency improvement', '95% staff adoption'] 342 | }, 343 | { 344 | phase: 2, 345 | duration: '4 months', 346 | objectives: ['Advanced workflow redesign', 'Integration testing'], 347 | deliverables: ['New approval system', 'Performance dashboard'], 348 | success_metrics: ['60% approval time reduction', '99.5% system uptime'] 349 | } 350 | ], 351 | risk_assessment: [ 352 | { 353 | risk: 'Staff resistance to change', 354 | probability: 'medium', 355 | impact: 'medium', 356 | mitigation: 'Comprehensive change management and training program' 357 | }, 358 | { 359 | risk: 'System integration challenges', 360 | probability: 'low', 361 | impact: 'high', 362 | mitigation: 'Phased rollout with fallback procedures' 363 | } 364 | ] 365 | }; 366 | 367 | return result; 368 | } 369 | 370 | // Automation Recommendations Engine 371 | async generateAutomationRecommendations(workflowDescription: string): Promise<any> { 372 | const params: AutomationRecommendationsPrompt = { 373 | workflow_description: workflowDescription, 374 | current_pain_points: ['manual_data_entry', 'approval_delays', 'reporting_overhead'], 375 | automation_scope: 'end_to_end', 376 | technical_constraints: ['legacy_system_compatibility', 'security_requirements'], 377 | business_impact_priority: ['time_efficiency', 'error_reduction', 'cost_savings'], 378 | implementation_timeline: 'medium_term', 379 | risk_tolerance: 'moderate' 380 | }; 381 | 382 | const response = await this.server.handlePromptGet('automation_recommendations', params); 383 | return response; 384 | } 385 | } 386 | 387 | // Example usage with comprehensive error handling 388 | async function demonstrateEnterpriseAI(): Promise<void> { 389 | const analytics = new EnterpriseAIAnalytics(); 390 | 391 | try { 392 | console.log('🤖 Starting Enterprise AI Analysis...'); 393 | 394 | // Statistical Analysis 395 | console.log('\n📊 Performing Statistical Analysis...'); 396 | const analysisResult = await analytics.performStatisticalAnalysis('Sales'); 397 | console.log('Analysis completed:', analysisResult.summary); 398 | 399 | // Executive Report 400 | console.log('\n📋 Generating Executive Report...'); 401 | const reportResult = await analytics.generateExecutiveReport('Sales', 'executives'); 402 | console.log('Report generated:', reportResult.title); 403 | 404 | // Predictive Analytics 405 | console.log('\n🔮 Running Predictive Analytics...'); 406 | const predictionResult = await analytics.performPredictiveAnalytics('Sales', 'revenue'); 407 | console.log('Predictions generated:', predictionResult.predictions.length, 'periods'); 408 | 409 | // Natural Language Query 410 | console.log('\n🗣️ Processing Natural Language Query...'); 411 | const nlResult = await analytics.processNaturalLanguageQuery( 412 | 'What are the top 5 performing products by revenue this quarter?', 413 | ['Products', 'Sales'] 414 | ); 415 | console.log('NL Response:', nlResult.substring(0, 100) + '...'); 416 | 417 | // Workflow Optimization 418 | console.log('\n⚡ Analyzing Workflow Optimization...'); 419 | const workflowResult = await analytics.optimizeWorkflow( 420 | 'Manual invoice processing with email approvals', 421 | ['Slow approval times', 'Manual data entry errors'] 422 | ); 423 | console.log('Optimization completed, efficiency score:', workflowResult.current_state_analysis.efficiency_score); 424 | 425 | console.log('\n✅ All Enterprise AI operations completed successfully!'); 426 | 427 | } catch (error) { 428 | console.error('❌ Enterprise AI Error:', error); 429 | throw error; 430 | } 431 | } 432 | 433 | // Export for testing and integration 434 | export { 435 | EnterpriseAIAnalytics, 436 | demonstrateEnterpriseAI 437 | }; 438 | 439 | // Run demonstration if executed directly 440 | if (require.main === module) { 441 | demonstrateEnterpriseAI() 442 | .then(() => process.exit(0)) 443 | .catch((error) => { 444 | console.error('Fatal error:', error); 445 | process.exit(1); 446 | }); 447 | } ```