#
tokens: 7117/50000 6/6 files
lines: on (toggle) GitHub
raw markdown copy reset
# Directory Structure

```
├── .gitignore
├── .python-version
├── LICENSE
├── pyproject.toml
├── README.md
├── src
│   └── dash_mcp_server
│       ├── __init__.py
│       └── server.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------

```
1 | 3.12
2 | 
```

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
  1 | .DS_Store
  2 | .vscode/
  3 | .ruff_cache/
  4 | .cursor/
  5 | .cursorrules
  6 | .cursorignore
  7 | 
  8 | # Byte-compiled / optimized / DLL files
  9 | __pycache__/
 10 | *.py[cod]
 11 | *$py.class
 12 | 
 13 | # C extensions
 14 | *.so
 15 | 
 16 | # Distribution / packaging
 17 | .Python
 18 | build/
 19 | develop-eggs/
 20 | dist/
 21 | downloads/
 22 | eggs/
 23 | .eggs/
 24 | lib/
 25 | lib64/
 26 | parts/
 27 | sdist/
 28 | var/
 29 | wheels/
 30 | share/python-wheels/
 31 | *.egg-info/
 32 | .installed.cfg
 33 | *.egg
 34 | MANIFEST
 35 | 
 36 | # PyInstaller
 37 | #  Usually these files are written by a python script from a template
 38 | #  before PyInstaller builds the exe, so as to inject date/other infos into it.
 39 | *.manifest
 40 | *.spec
 41 | 
 42 | # Installer logs
 43 | pip-log.txt
 44 | pip-delete-this-directory.txt
 45 | 
 46 | # Unit test / coverage reports
 47 | htmlcov/
 48 | .tox/
 49 | .nox/
 50 | .coverage
 51 | .coverage.*
 52 | .cache
 53 | nosetests.xml
 54 | coverage.xml
 55 | *.cover
 56 | *.py,cover
 57 | .hypothesis/
 58 | .pytest_cache/
 59 | cover/
 60 | 
 61 | # Translations
 62 | *.mo
 63 | *.pot
 64 | 
 65 | # Django stuff:
 66 | *.log
 67 | local_settings.py
 68 | db.sqlite3
 69 | db.sqlite3-journal
 70 | 
 71 | # Flask stuff:
 72 | instance/
 73 | .webassets-cache
 74 | 
 75 | # Scrapy stuff:
 76 | .scrapy
 77 | 
 78 | # Sphinx documentation
 79 | docs/_build/
 80 | 
 81 | # PyBuilder
 82 | .pybuilder/
 83 | target/
 84 | 
 85 | # Jupyter Notebook
 86 | .ipynb_checkpoints
 87 | 
 88 | # IPython
 89 | profile_default/
 90 | ipython_config.py
 91 | 
 92 | # pyenv
 93 | #   For a library or package, you might want to ignore these files since the code is
 94 | #   intended to run in multiple environments; otherwise, check them in:
 95 | # .python-version
 96 | 
 97 | # pipenv
 98 | #   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
 99 | #   However, in case of collaboration, if having platform-specific dependencies or dependencies
100 | #   having no cross-platform support, pipenv may install dependencies that don't work, or not
101 | #   install all needed dependencies.
102 | #Pipfile.lock
103 | 
104 | # UV
105 | #   Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
106 | #   This is especially recommended for binary packages to ensure reproducibility, and is more
107 | #   commonly ignored for libraries.
108 | #uv.lock
109 | 
110 | # poetry
111 | #   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
112 | #   This is especially recommended for binary packages to ensure reproducibility, and is more
113 | #   commonly ignored for libraries.
114 | #   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
115 | #poetry.lock
116 | 
117 | # pdm
118 | #   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
119 | #pdm.lock
120 | #   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
121 | #   in version control.
122 | #   https://pdm.fming.dev/latest/usage/project/#working-with-version-control
123 | .pdm.toml
124 | .pdm-python
125 | .pdm-build/
126 | 
127 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128 | __pypackages__/
129 | 
130 | # Celery stuff
131 | celerybeat-schedule
132 | celerybeat.pid
133 | 
134 | # SageMath parsed files
135 | *.sage.py
136 | 
137 | # Environments
138 | .env
139 | .venv
140 | env/
141 | venv/
142 | ENV/
143 | env.bak/
144 | venv.bak/
145 | 
146 | # Spyder project settings
147 | .spyderproject
148 | .spyproject
149 | 
150 | # Rope project settings
151 | .ropeproject
152 | 
153 | # mkdocs documentation
154 | /site
155 | 
156 | # mypy
157 | .mypy_cache/
158 | .dmypy.json
159 | dmypy.json
160 | 
161 | # Pyre type checker
162 | .pyre/
163 | 
164 | # pytype static type analyzer
165 | .pytype/
166 | 
167 | # Cython debug symbols
168 | cython_debug/
169 | 
170 | # PyCharm
171 | #  JetBrains specific template is maintained in a separate JetBrains.gitignore that can
172 | #  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
173 | #  and can be added to the global gitignore or merged into this file.  For a more nuclear
174 | #  option (not recommended) you can uncomment the following to ignore the entire idea folder.
175 | #.idea/
176 | 
177 | # PyPI configuration file
178 | .pypirc
179 | 
180 | # Claude
181 | CLAUDE.md
182 | .claude
```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
 1 | # mcp-server-dash
 2 | 
 3 | A Model Context Protocol (MCP) server that provides tools to interact with the [Dash](https://kapeli.com/dash) documentation browser API.
 4 | 
 5 | Dash 8 is required. You can download Dash 8 at https://blog.kapeli.com/dash-8.
 6 | 
 7 | <a href="https://glama.ai/mcp/servers/@Kapeli/dash-mcp-server">
 8 |   <img width="380" height="200" src="https://glama.ai/mcp/servers/@Kapeli/dash-mcp-server/badge" alt="Dash Server MCP server" />
 9 | </a>
10 | 
11 | ## Overview
12 | 
13 | The Dash MCP server provides tools for accessing and searching documentation directly from Dash, the macOS documentation browser. MCP clients can:
14 | 
15 | - List installed docsets
16 | - Search across docsets and code snippets
17 | - Enable full-text search for specific docsets
18 | 
19 | ### Notice
20 | 
21 | This is a work in progress. Any suggestions are welcome!
22 | 
23 | ## Tools
24 | 
25 | 1. **list_installed_docsets**
26 |    - Lists all installed documentation sets in Dash
27 | 2. **search_documentation**
28 |    - Searches across docsets and snippets
29 | 3. **enable_docset_fts**
30 |    - Enables full-text search for a specific docset
31 | 
32 | ## Requirements
33 | 
34 | - macOS (required for Dash app)
35 | - [Dash](https://kapeli.com/dash) installed
36 | - Python 3.11.4 or higher
37 | - uv
38 | 
39 | ## Configuration
40 | 
41 | ### Using uvx
42 | 
43 | ```bash
44 | brew install uv
45 | ```
46 | 
47 | #### in `claude_desktop_config.json`
48 | 
49 | ```json
50 | {
51 |   "mcpServers": {
52 |       "dash-api": {
53 |           "command": "uvx",
54 |           "args": [
55 |               "--from",
56 |               "git+https://github.com/Kapeli/dash-mcp-server.git",
57 |               "dash-mcp-server"
58 |           ]
59 |       }
60 |   }
61 | }
62 | ```
63 | 
64 | #### in `Claude Code`
65 | 
66 | ```bash
67 | claude mcp add dash-api -- uvx --from "git+https://github.com/Kapeli/dash-mcp-server.git" "dash-mcp-server"
68 | ```
69 | 
```

--------------------------------------------------------------------------------
/src/dash_mcp_server/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | from . import server
 2 | 
 3 | 
 4 | def main():
 5 |     """Main entry point for the package."""
 6 |     server.main()
 7 | 
 8 | 
 9 | if __name__ == "__main__":
10 |     main()
11 | 
```

--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------

```toml
 1 | [project]
 2 | name = "dash-mcp-server"
 3 | version = "1.0.0"
 4 | description = "MCP server for Dash, the macOS documentation browser"
 5 | readme = "README.md"
 6 | requires-python = ">=3.12"
 7 | dependencies = [
 8 |     "mcp>=1.13.1",
 9 |     "pydantic>=2.11.7",
10 |     "httpx>=0.28.1",
11 | ]
12 | 
13 | [build-system]
14 | requires = ["hatchling"]
15 | build-backend = "hatchling.build"
16 | 
17 | [tool.hatch.build.targets.wheel]
18 | packages = ["src/dash_mcp_server"]
19 | 
20 | [project.scripts]
21 | dash-mcp-server = "dash_mcp_server:main"
```

--------------------------------------------------------------------------------
/src/dash_mcp_server/server.py:
--------------------------------------------------------------------------------

```python
  1 | from typing import Optional
  2 | import httpx
  3 | import subprocess
  4 | import json
  5 | from pathlib import Path
  6 | from mcp.server.fastmcp import FastMCP
  7 | from mcp.server.fastmcp import Context
  8 | from pydantic import BaseModel, Field
  9 | 
 10 | mcp = FastMCP("Dash Documentation API")
 11 | 
 12 | 
 13 | async def check_api_health(ctx: Context, port: int) -> bool:
 14 |     """Check if the Dash API server is responding at the given port."""
 15 |     base_url = f"http://127.0.0.1:{port}"
 16 |     try:
 17 |         with httpx.Client(timeout=5.0) as client:
 18 |             response = client.get(f"{base_url}/health")
 19 |             response.raise_for_status()
 20 |         await ctx.debug(f"Successfully connected to Dash API at {base_url}")
 21 |         return True
 22 |     except Exception as e:
 23 |         await ctx.debug(f"Health check failed for {base_url}: {e}")
 24 |         return False
 25 | 
 26 | 
 27 | async def working_api_base_url(ctx: Context) -> Optional[str]:
 28 |     dash_running = await ensure_dash_running(ctx)
 29 |     if not dash_running:
 30 |         return None
 31 |     
 32 |     port = await get_dash_api_port(ctx)
 33 |     if port is None:
 34 |         # Try to automatically enable the Dash API Server
 35 |         await ctx.info("The Dash API Server is not enabled. Attempting to enable it automatically...")
 36 |         try:
 37 |             subprocess.run(
 38 |                 ["defaults", "write", "com.kapeli.dashdoc", "DHAPIServerEnabled", "YES"],
 39 |                 check=True,
 40 |                 timeout=10
 41 |             )
 42 |             # Wait a moment for Dash to pick up the change
 43 |             import time
 44 |             time.sleep(2)
 45 |             
 46 |             # Try to get the port again
 47 |             port = await get_dash_api_port(ctx)
 48 |             if port is None:
 49 |                 await ctx.error("Failed to enable Dash API Server automatically. Please enable it manually in Dash Settings > Integration")
 50 |                 return None
 51 |             else:
 52 |                 await ctx.info("Successfully enabled Dash API Server")
 53 |         except Exception as e:
 54 |             await ctx.error("Failed to enable Dash API Server automatically. Please enable it manually in Dash Settings > Integration")
 55 |             return None
 56 |     
 57 |     return f"http://127.0.0.1:{port}"
 58 | 
 59 | 
 60 | async def get_dash_api_port(ctx: Context) -> Optional[int]:
 61 |     """Get the Dash API port from the status.json file and verify the API server is responding."""
 62 |     status_file = Path.home() / "Library" / "Application Support" / "Dash" / ".dash_api_server" / "status.json"
 63 |     
 64 |     try:
 65 |         with open(status_file, 'r') as f:
 66 |             status_data = json.load(f)
 67 |             port = status_data.get('port')
 68 |             if port is None:
 69 |                 return None
 70 |                 
 71 |         # Check if the API server is actually responding
 72 |         if await check_api_health(ctx, port):
 73 |             return port
 74 |         else:
 75 |             return None
 76 |             
 77 |     except (FileNotFoundError, json.JSONDecodeError, KeyError):
 78 |         return None
 79 | 
 80 | 
 81 | def check_dash_running() -> bool:
 82 |     """Check if Dash app is running by looking for the process."""
 83 |     try:
 84 |         # Use pgrep to check for Dash process
 85 |         result = subprocess.run(
 86 |             ["pgrep", "-f", "Dash"],
 87 |             capture_output=True,
 88 |             timeout=5
 89 |         )
 90 |         return result.returncode == 0
 91 |     except Exception:
 92 |         return False
 93 | 
 94 | 
 95 | async def ensure_dash_running(ctx: Context) -> bool:
 96 |     """Ensure Dash is running, launching it if necessary."""
 97 |     if not check_dash_running():
 98 |         await ctx.info("Dash is not running. Launching Dash...")
 99 |         try:
100 |             # Launch Dash using the bundle identifier
101 |             subprocess.run(
102 |                 ["open", "-g", "-j", "-b", "com.kapeli.dashdoc"],
103 |                 check=True,
104 |                 timeout=10
105 |             )
106 |             # Wait a moment for Dash to start
107 |             import time
108 |             time.sleep(4)
109 |             
110 |             # Check again if Dash is now running
111 |             if not check_dash_running():
112 |                 await ctx.error("Failed to launch Dash application")
113 |                 return False
114 |             else:
115 |                 await ctx.info("Dash launched successfully")
116 |                 return True
117 |         except subprocess.CalledProcessError:
118 |             await ctx.error("Failed to launch Dash application")
119 |             return False
120 |         except Exception as e:
121 |             await ctx.error(f"Error launching Dash: {e}")
122 |             return False
123 |     else:
124 |         return True
125 | 
126 | 
127 | 
128 | class DocsetResult(BaseModel):
129 |     """Information about a docset."""
130 |     name: str = Field(description="Display name of the docset")
131 |     identifier: str = Field(description="Unique identifier")
132 |     platform: str = Field(description="Platform/type of the docset")
133 |     full_text_search: str = Field(description="Full-text search status: 'not supported', 'disabled', 'indexing', or 'enabled'")
134 |     notice: Optional[str] = Field(description="Optional notice about the docset status", default=None)
135 | 
136 | 
137 | class DocsetResults(BaseModel):
138 |     """Result from listing docsets."""
139 |     docsets: list[DocsetResult] = Field(description="List of installed docsets", default_factory=list)
140 |     error: Optional[str] = Field(description="Error message if there was an issue", default=None)
141 | 
142 | 
143 | class SearchResult(BaseModel):
144 |     """A search result from documentation."""
145 |     name: str = Field(description="Name of the documentation entry")
146 |     type: str = Field(description="Type of result (Function, Class, etc.)")
147 |     platform: Optional[str] = Field(description="Platform of the result", default=None)
148 |     load_url: str = Field(description="URL to load the documentation")
149 |     docset: Optional[str] = Field(description="Name of the docset", default=None)
150 |     description: Optional[str] = Field(description="Additional description", default=None)
151 |     language: Optional[str] = Field(description="Programming language (snippet results only)", default=None)
152 |     tags: Optional[str] = Field(description="Tags (snippet results only)", default=None)
153 | 
154 | 
155 | class SearchResults(BaseModel):
156 |     """Result from searching documentation."""
157 |     results: list[SearchResult] = Field(description="List of search results", default_factory=list)
158 |     error: Optional[str] = Field(description="Error message if there was an issue", default=None)
159 | 
160 | 
161 | def estimate_tokens(obj) -> int:
162 |     """Estimate token count for a serialized object. Rough approximation: 1 token ≈ 4 characters."""
163 |     if isinstance(obj, str):
164 |         return max(1, len(obj) // 4)
165 |     elif isinstance(obj, (list, tuple)):
166 |         return sum(estimate_tokens(item) for item in obj)
167 |     elif isinstance(obj, dict):
168 |         return sum(estimate_tokens(k) + estimate_tokens(v) for k, v in obj.items())
169 |     elif hasattr(obj, 'model_dump'):  # Pydantic model
170 |         return estimate_tokens(obj.model_dump())
171 |     else:
172 |         return max(1, len(str(obj)) // 4)
173 | 
174 | 
175 | @mcp.tool()
176 | async def list_installed_docsets(ctx: Context) -> DocsetResults:
177 |     """List all installed documentation sets in Dash. An empty list is returned if the user has no docsets installed. 
178 |     Results are automatically truncated if they would exceed 25,000 tokens."""
179 |     try:
180 |         base_url = await working_api_base_url(ctx)
181 |         if base_url is None:
182 |             return DocsetResults(error="Failed to connect to Dash API Server. Please ensure Dash is running and the API server is enabled (Settings > Integration, or run open -b com.kapeli.dashdoc, followed by defaults write com.kapeli.dashdoc DHAPIServerEnabled YES).")
183 |         await ctx.debug("Fetching installed docsets from Dash API")
184 |         
185 |         with httpx.Client(timeout=30.0) as client:
186 |             response = client.get(f"{base_url}/docsets/list")
187 |             response.raise_for_status()
188 |             result = response.json()
189 |         
190 |         docsets = result.get("docsets", [])
191 |         await ctx.info(f"Found {len(docsets)} installed docsets")
192 |         
193 |         # Build result list with token limit checking
194 |         token_limit = 25000
195 |         current_tokens = 100  # Base overhead for response structure
196 |         limited_docsets = []
197 |         
198 |         for docset in docsets:
199 |             docset_info = DocsetResult(
200 |                 name=docset["name"],
201 |                 identifier=docset["identifier"],
202 |                 platform=docset["platform"],
203 |                 full_text_search=docset["full_text_search"],
204 |                 notice=docset.get("notice")
205 |             )
206 |             
207 |             # Estimate tokens for this docset
208 |             docset_tokens = estimate_tokens(docset_info)
209 |             
210 |             if current_tokens + docset_tokens > token_limit:
211 |                 await ctx.warning(f"Token limit reached. Returning {len(limited_docsets)} of {len(docsets)} docsets to stay under 25k token limit.")
212 |                 break
213 |                 
214 |             limited_docsets.append(docset_info)
215 |             current_tokens += docset_tokens
216 |         
217 |         if len(limited_docsets) < len(docsets):
218 |             await ctx.info(f"Returned {len(limited_docsets)} docsets (truncated from {len(docsets)} due to token limit)")
219 |         
220 |         return DocsetResults(docsets=limited_docsets)
221 |         
222 |     except httpx.HTTPStatusError as e:
223 |         if e.response.status_code == 404:
224 |             await ctx.warning("No docsets found. Install some in Settings > Downloads.")
225 |             return DocsetResults(error="No docsets found. Instruct the user to install some docsets in Settings > Downloads.")
226 |         return DocsetResults(error=f"HTTP error: {e}")
227 |     except Exception as e:
228 |         await ctx.error(f"Failed to get installed docsets: {e}")
229 |         return DocsetResults(error=f"Failed to get installed docsets: {e}")
230 | 
231 | 
232 | @mcp.tool()
233 | async def search_documentation(
234 |     ctx: Context,
235 |     query: str,
236 |     docset_identifiers: str,
237 |     search_snippets: bool = True,
238 |     max_results: int = 100,
239 | ) -> SearchResults:
240 |     """
241 |     Search for documentation across docset identifiers and snippets.
242 |     
243 |     Args:
244 |         query: The search query string
245 |         docset_identifiers: Comma-separated list of docset identifiers to search in (from list_installed_docsets)
246 |         search_snippets: Whether to include snippets in search results
247 |         max_results: Maximum number of results to return (1-1000)
248 |     
249 |     Results are automatically truncated if they would exceed 25,000 tokens.
250 |     """
251 |     if not query.strip():
252 |         await ctx.error("Query cannot be empty")
253 |         return SearchResults(error="Query cannot be empty")
254 |     
255 |     if not docset_identifiers.strip():
256 |         await ctx.error("docset_identifiers cannot be empty. Get the docset identifiers using list_installed_docsets")
257 |         return SearchResults(error="docset_identifiers cannot be empty. Get the docset identifiers using list_installed_docsets")
258 |     
259 |     if max_results < 1 or max_results > 1000:
260 |         await ctx.error("max_results must be between 1 and 1000")
261 |         return SearchResults(error="max_results must be between 1 and 1000")
262 |     
263 |     try:
264 |         base_url = await working_api_base_url(ctx)
265 |         if base_url is None:
266 |             return SearchResults(error="Failed to connect to Dash API Server. Please ensure Dash is running and the API server is enabled (Settings > Integration, or run open -b com.kapeli.dashdoc, followed by defaults write com.kapeli.dashdoc DHAPIServerEnabled YES).")
267 |         
268 |         params = {
269 |             "query": query,
270 |             "docset_identifiers": docset_identifiers,
271 |             "search_snippets": search_snippets,
272 |             "max_results": max_results,
273 |         }
274 |         
275 |         await ctx.debug(f"Searching Dash API with query: '{query}'")
276 |         
277 |         with httpx.Client(timeout=30.0) as client:
278 |             response = client.get(f"{base_url}/search", params=params)
279 |             response.raise_for_status()
280 |             result = response.json()
281 |         
282 |         # Check for warning message in response
283 |         warning_message = None
284 |         if "message" in result:
285 |             warning_message = result["message"]
286 |             await ctx.warning(warning_message)
287 |         
288 |         results = result.get("results", [])
289 |         await ctx.info(f"Found {len(results)} results")
290 |         
291 |         # Build result list with token limit checking
292 |         token_limit = 25000
293 |         current_tokens = 100  # Base overhead for response structure
294 |         limited_results = []
295 |         
296 |         for item in results:
297 |             search_result = SearchResult(
298 |                 name=item["name"],
299 |                 type=item["type"],
300 |                 platform=item.get("platform"),
301 |                 load_url=item["load_url"],
302 |                 docset=item.get("docset"),
303 |                 description=item.get("description"),
304 |                 language=item.get("language"),
305 |                 tags=item.get("tags")
306 |             )
307 |             
308 |             # Estimate tokens for this result
309 |             result_tokens = estimate_tokens(search_result)
310 |             
311 |             if current_tokens + result_tokens > token_limit:
312 |                 await ctx.warning(f"Token limit reached. Returning {len(limited_results)} of {len(results)} results to stay under 25k token limit.")
313 |                 break
314 |                 
315 |             limited_results.append(search_result)
316 |             current_tokens += result_tokens
317 |         
318 |         if len(limited_results) < len(results):
319 |             await ctx.info(f"Returned {len(limited_results)} results (truncated from {len(results)} due to token limit)")
320 |         
321 |         return SearchResults(results=limited_results, error=warning_message)
322 |     except httpx.HTTPStatusError as e:
323 |         if e.response.status_code == 400:
324 |             error_text = e.response.text
325 |             if "Docset with identifier" in error_text and "not found" in error_text:
326 |                 await ctx.error("Invalid docset identifier. Run list_installed_docsets to see available docsets.")
327 |                 return SearchResults(error="Invalid docset identifier. Run list_installed_docsets to see available docsets, then use the exact identifier from that list.")
328 |             elif "No docsets found" in error_text:
329 |                 await ctx.error("No valid docsets found for search.")
330 |                 return SearchResults(error="No valid docsets found for search. Either provide valid docset identifiers from list_installed_docsets, or set search_snippets=true to search snippets only.")
331 |             else:
332 |                 await ctx.error(f"Bad request: {error_text}")
333 |                 return SearchResults(error=f"Bad request: {error_text}. Please ensure Dash is running and the API server is enabled (Settings > Integration, or run open -b com.kapeli.dashdoc, followed by defaults write com.kapeli.dashdoc DHAPIServerEnabled YES).")
334 |         elif e.response.status_code == 403:
335 |             error_text = e.response.text
336 |             if "API access blocked due to Dash trial expiration" in error_text:
337 |                 await ctx.error("Dash trial expired. Purchase Dash to continue using the API.")
338 |                 return SearchResults(error="Your Dash trial has expired. Purchase Dash at https://kapeli.com/dash to continue using the API. During trial expiration, API access is blocked.")
339 |             else:
340 |                 await ctx.error(f"Forbidden: {error_text}")
341 |                 return SearchResults(error=f"Forbidden: {error_text}. Please ensure Dash is running and the API server is enabled (Settings > Integration, or run open -b com.kapeli.dashdoc, followed by defaults write com.kapeli.dashdoc DHAPIServerEnabled YES).")
342 |         await ctx.error(f"HTTP error: {e}")
343 |         return SearchResults(error=f"HTTP error: {e}. Please ensure Dash is running and the API server is enabled (Settings > Integration, or run open -b com.kapeli.dashdoc, followed by defaults write com.kapeli.dashdoc DHAPIServerEnabled YES).")
344 |     except Exception as e:
345 |         await ctx.error(f"Search failed: {e}")
346 |         return SearchResults(error=f"Search failed: {e}. Please ensure Dash is running and the API server is enabled (Settings > Integration, or run open -b com.kapeli.dashdoc, followed by defaults write com.kapeli.dashdoc DHAPIServerEnabled YES).")
347 | 
348 | 
349 | @mcp.tool()
350 | async def enable_docset_fts(ctx: Context, identifier: str) -> bool:
351 |     """
352 |     Enable full-text search for a specific docset.
353 |     
354 |     Args:
355 |         identifier: The docset identifier (from list_installed_docsets)
356 |         
357 |     Returns:
358 |         True if FTS was successfully enabled, False otherwise
359 |     """
360 |     if not identifier.strip():
361 |         await ctx.error("Docset identifier cannot be empty")
362 |         return False
363 | 
364 |     try:
365 |         base_url = await working_api_base_url(ctx)
366 |         if base_url is None:
367 |             return False
368 |         
369 |         await ctx.debug(f"Enabling FTS for docset: {identifier}")
370 |         
371 |         with httpx.Client(timeout=30.0) as client:
372 |             response = client.get(f"{base_url}/docsets/enable_fts", params={"identifier": identifier})
373 |             response.raise_for_status()
374 |             result = response.json()
375 |         
376 |     except httpx.HTTPStatusError as e:
377 |         if e.response.status_code == 400:
378 |             await ctx.error(f"Bad request: {e.response.text}")
379 |             return False
380 |         elif e.response.status_code == 404:
381 |             await ctx.error(f"Docset not found: {identifier}")
382 |             return False
383 |         await ctx.error(f"HTTP error: {e}")
384 |         return False
385 |     except Exception as e:
386 |         await ctx.error(f"Failed to enable FTS: {e}")
387 |         return False
388 |     return True
389 | 
390 | def main():
391 |     mcp.run()
392 | 
393 | 
394 | if __name__ == "__main__":
395 |     main()
396 | 
```