#
tokens: 6680/50000 9/9 files
lines: on (toggle) GitHub
raw markdown copy reset
# Directory Structure

```
├── .github
│   └── workflows
│       └── python-publish.yml
├── .gitignore
├── .python-version
├── Dockerfile
├── LICENSE
├── pyproject.toml
├── README.md
├── smithery.yaml
├── src
│   └── duckduckgo_mcp_server
│       ├── __init__.py
│       └── server.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------

```
1 | 3.13.2
2 | 
```

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
  1 | # Byte-compiled / optimized / DLL files
  2 | __pycache__/
  3 | *.py[cod]
  4 | *$py.class
  5 | 
  6 | # C extensions
  7 | *.so
  8 | 
  9 | # Distribution / packaging
 10 | .Python
 11 | build/
 12 | develop-eggs/
 13 | dist/
 14 | downloads/
 15 | eggs/
 16 | .eggs/
 17 | lib/
 18 | lib64/
 19 | parts/
 20 | sdist/
 21 | var/
 22 | wheels/
 23 | share/python-wheels/
 24 | *.egg-info/
 25 | .installed.cfg
 26 | *.egg
 27 | MANIFEST
 28 | 
 29 | # PyInstaller
 30 | #  Usually these files are written by a python script from a template
 31 | #  before PyInstaller builds the exe, so as to inject date/other infos into it.
 32 | *.manifest
 33 | *.spec
 34 | 
 35 | # Installer logs
 36 | pip-log.txt
 37 | pip-delete-this-directory.txt
 38 | 
 39 | # Unit test / coverage reports
 40 | htmlcov/
 41 | .tox/
 42 | .nox/
 43 | .coverage
 44 | .coverage.*
 45 | .cache
 46 | nosetests.xml
 47 | coverage.xml
 48 | *.cover
 49 | *.py,cover
 50 | .hypothesis/
 51 | .pytest_cache/
 52 | cover/
 53 | 
 54 | # Translations
 55 | *.mo
 56 | *.pot
 57 | 
 58 | # Django stuff:
 59 | *.log
 60 | local_settings.py
 61 | db.sqlite3
 62 | db.sqlite3-journal
 63 | 
 64 | # Flask stuff:
 65 | instance/
 66 | .webassets-cache
 67 | 
 68 | # Scrapy stuff:
 69 | .scrapy
 70 | 
 71 | # Sphinx documentation
 72 | docs/_build/
 73 | 
 74 | # PyBuilder
 75 | .pybuilder/
 76 | target/
 77 | 
 78 | # Jupyter Notebook
 79 | .ipynb_checkpoints
 80 | 
 81 | # IPython
 82 | profile_default/
 83 | ipython_config.py
 84 | 
 85 | # pyenv
 86 | #   For a library or package, you might want to ignore these files since the code is
 87 | #   intended to run in multiple environments; otherwise, check them in:
 88 | # .python-version
 89 | 
 90 | # pipenv
 91 | #   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
 92 | #   However, in case of collaboration, if having platform-specific dependencies or dependencies
 93 | #   having no cross-platform support, pipenv may install dependencies that don't work, or not
 94 | #   install all needed dependencies.
 95 | #Pipfile.lock
 96 | 
 97 | # UV
 98 | #   Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
 99 | #   This is especially recommended for binary packages to ensure reproducibility, and is more
100 | #   commonly ignored for libraries.
101 | #uv.lock
102 | 
103 | # poetry
104 | #   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105 | #   This is especially recommended for binary packages to ensure reproducibility, and is more
106 | #   commonly ignored for libraries.
107 | #   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108 | #poetry.lock
109 | 
110 | # pdm
111 | #   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112 | #pdm.lock
113 | #   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114 | #   in version control.
115 | #   https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116 | .pdm.toml
117 | .pdm-python
118 | .pdm-build/
119 | 
120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121 | __pypackages__/
122 | 
123 | # Celery stuff
124 | celerybeat-schedule
125 | celerybeat.pid
126 | 
127 | # SageMath parsed files
128 | *.sage.py
129 | 
130 | # Environments
131 | .env
132 | .venv
133 | env/
134 | venv/
135 | ENV/
136 | env.bak/
137 | venv.bak/
138 | 
139 | # Spyder project settings
140 | .spyderproject
141 | .spyproject
142 | 
143 | # Rope project settings
144 | .ropeproject
145 | 
146 | # mkdocs documentation
147 | /site
148 | 
149 | # mypy
150 | .mypy_cache/
151 | .dmypy.json
152 | dmypy.json
153 | 
154 | # Pyre type checker
155 | .pyre/
156 | 
157 | # pytype static type analyzer
158 | .pytype/
159 | 
160 | # Cython debug symbols
161 | cython_debug/
162 | 
163 | # PyCharm
164 | #  JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165 | #  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166 | #  and can be added to the global gitignore or merged into this file.  For a more nuclear
167 | #  option (not recommended) you can uncomment the following to ignore the entire idea folder.
168 | #.idea/
169 | 
170 | # PyPI configuration file
171 | .pypirc
172 | 
```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
  1 | # DuckDuckGo Search MCP Server
  2 | 
  3 | [![smithery badge](https://smithery.ai/badge/@nickclyde/duckduckgo-mcp-server)](https://smithery.ai/server/@nickclyde/duckduckgo-mcp-server)
  4 | 
  5 | A Model Context Protocol (MCP) server that provides web search capabilities through DuckDuckGo, with additional features for content fetching and parsing.
  6 | 
  7 | <a href="https://glama.ai/mcp/servers/phcus2gcpn">
  8 |   <img width="380" height="200" src="https://glama.ai/mcp/servers/phcus2gcpn/badge" alt="DuckDuckGo Server MCP server" />
  9 | </a>
 10 | 
 11 | ## Features
 12 | 
 13 | - **Web Search**: Search DuckDuckGo with advanced rate limiting and result formatting
 14 | - **Content Fetching**: Retrieve and parse webpage content with intelligent text extraction
 15 | - **Rate Limiting**: Built-in protection against rate limits for both search and content fetching
 16 | - **Error Handling**: Comprehensive error handling and logging
 17 | - **LLM-Friendly Output**: Results formatted specifically for large language model consumption
 18 | 
 19 | ## Installation
 20 | 
 21 | ### Installing via Smithery
 22 | 
 23 | To install DuckDuckGo Search Server for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@nickclyde/duckduckgo-mcp-server):
 24 | 
 25 | ```bash
 26 | npx -y @smithery/cli install @nickclyde/duckduckgo-mcp-server --client claude
 27 | ```
 28 | 
 29 | ### Installing via `uv`
 30 | 
 31 | Install directly from PyPI using `uv`:
 32 | 
 33 | ```bash
 34 | uv pip install duckduckgo-mcp-server
 35 | ```
 36 | 
 37 | ## Usage
 38 | 
 39 | ### Running with Claude Desktop
 40 | 
 41 | 1. Download [Claude Desktop](https://claude.ai/download)
 42 | 2. Create or edit your Claude Desktop configuration:
 43 |    - On macOS: `~/Library/Application Support/Claude/claude_desktop_config.json`
 44 |    - On Windows: `%APPDATA%\Claude\claude_desktop_config.json`
 45 | 
 46 | Add the following configuration:
 47 | 
 48 | ```json
 49 | {
 50 |     "mcpServers": {
 51 |         "ddg-search": {
 52 |             "command": "uvx",
 53 |             "args": ["duckduckgo-mcp-server"]
 54 |         }
 55 |     }
 56 | }
 57 | ```
 58 | 
 59 | 3. Restart Claude Desktop
 60 | 
 61 | ### Development
 62 | 
 63 | For local development, you can use the MCP CLI:
 64 | 
 65 | ```bash
 66 | # Run with the MCP Inspector
 67 | mcp dev server.py
 68 | 
 69 | # Install locally for testing with Claude Desktop
 70 | mcp install server.py
 71 | ```
 72 | ## Available Tools
 73 | 
 74 | ### 1. Search Tool
 75 | 
 76 | ```python
 77 | async def search(query: str, max_results: int = 10) -> str
 78 | ```
 79 | 
 80 | Performs a web search on DuckDuckGo and returns formatted results.
 81 | 
 82 | **Parameters:**
 83 | - `query`: Search query string
 84 | - `max_results`: Maximum number of results to return (default: 10)
 85 | 
 86 | **Returns:**
 87 | Formatted string containing search results with titles, URLs, and snippets.
 88 | 
 89 | ### 2. Content Fetching Tool
 90 | 
 91 | ```python
 92 | async def fetch_content(url: str) -> str
 93 | ```
 94 | 
 95 | Fetches and parses content from a webpage.
 96 | 
 97 | **Parameters:**
 98 | - `url`: The webpage URL to fetch content from
 99 | 
100 | **Returns:**
101 | Cleaned and formatted text content from the webpage.
102 | 
103 | ## Features in Detail
104 | 
105 | ### Rate Limiting
106 | 
107 | - Search: Limited to 30 requests per minute
108 | - Content Fetching: Limited to 20 requests per minute
109 | - Automatic queue management and wait times
110 | 
111 | ### Result Processing
112 | 
113 | - Removes ads and irrelevant content
114 | - Cleans up DuckDuckGo redirect URLs
115 | - Formats results for optimal LLM consumption
116 | - Truncates long content appropriately
117 | 
118 | ### Error Handling
119 | 
120 | - Comprehensive error catching and reporting
121 | - Detailed logging through MCP context
122 | - Graceful degradation on rate limits or timeouts
123 | 
124 | ## Contributing
125 | 
126 | Issues and pull requests are welcome! Some areas for potential improvement:
127 | 
128 | - Additional search parameters (region, language, etc.)
129 | - Enhanced content parsing options
130 | - Caching layer for frequently accessed content
131 | - Additional rate limiting strategies
132 | 
133 | ## License
134 | 
135 | This project is licensed under the MIT License.
```

--------------------------------------------------------------------------------
/src/duckduckgo_mcp_server/__init__.py:
--------------------------------------------------------------------------------

```python
1 | __version__ = "0.1.1"
2 | 
```

--------------------------------------------------------------------------------
/smithery.yaml:
--------------------------------------------------------------------------------

```yaml
 1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
 2 | 
 3 | startCommand:
 4 |   type: stdio
 5 |   configSchema:
 6 |     # JSON Schema defining the configuration options for the MCP.
 7 |     {}
 8 |   commandFunction:
 9 |     # A JS function that produces the CLI command based on the given config to start the MCP on stdio.
10 |     |-
11 |     (config) => ({ command: 'python', args: ['-m', 'duckduckgo_mcp_server.server'] })
12 |   exampleConfig: {}
13 | 
```

--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
 1 | # Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile
 2 | FROM python:3.11-alpine
 3 | 
 4 | # Install system dependencies
 5 | RUN apk add --no-cache gcc musl-dev linux-headers
 6 | 
 7 | # Set working directory
 8 | WORKDIR /app
 9 | 
10 | # Copy all files
11 | COPY . /app
12 | 
13 | # Install Python dependencies
14 | RUN pip install --upgrade pip \
15 |     && pip install --no-cache-dir .
16 | 
17 | # Expose port if needed (MCP uses stdio, so not required)
18 | 
19 | # Run the MCP server
20 | CMD ["python", "-m", "duckduckgo_mcp_server.server"]
21 | 
```

--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------

```toml
 1 | [project]
 2 | name = "duckduckgo-mcp-server"
 3 | version = "0.1.1"
 4 | description = "MCP Server for searching via DuckDuckGo"
 5 | readme = "README.md"
 6 | authors = [{ name = "Nick Clyde", email = "[email protected]" }]
 7 | requires-python = ">=3.10"
 8 | dependencies = ["beautifulsoup4>=4.13.3", "httpx>=0.28.1", "mcp[cli]>=1.3.0"]
 9 | classifiers = [
10 |     "Development Status :: 3 - Alpha",
11 |     "Intended Audience :: Developers",
12 |     "License :: OSI Approved :: MIT License",
13 |     "Programming Language :: Python :: 3",
14 |     "Programming Language :: Python :: 3.10",
15 |     "Programming Language :: Python :: 3.11",
16 |     "Programming Language :: Python :: 3.12",
17 |     "Programming Language :: Python :: 3.13",
18 | ]
19 | 
20 | [project.urls]
21 | Homepage = "https://github.com/nickclyde/duckduckgo-mcp-server"
22 | Issues = "https://github.com/nickclyde/duckduckgo-mcp-server/issues"
23 | 
24 | [project.scripts]
25 | duckduckgo-mcp-server = "duckduckgo_mcp_server.server:main"
26 | 
27 | [build-system]
28 | requires = ["hatchling"]
29 | build-backend = "hatchling.build"
30 | 
```

--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------

```yaml
 1 | # This workflow will upload a Python Package to PyPI when a release is created
 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
 3 | 
 4 | # This workflow uses actions that are not certified by GitHub.
 5 | # They are provided by a third-party and are governed by
 6 | # separate terms of service, privacy policy, and support
 7 | # documentation.
 8 | 
 9 | name: Upload Python Package
10 | 
11 | on:
12 |   release:
13 |     types: [published]
14 | 
15 | permissions:
16 |   contents: read
17 | 
18 | jobs:
19 |   release-build:
20 |     runs-on: ubuntu-latest
21 | 
22 |     steps:
23 |       - uses: actions/checkout@v4
24 | 
25 |       - uses: actions/setup-python@v5
26 |         with:
27 |           python-version: "3.x"
28 | 
29 |       - name: Build release distributions
30 |         run: |
31 |           # NOTE: put your own distribution build steps here.
32 |           python -m pip install build
33 |           python -m build
34 | 
35 |       - name: Upload distributions
36 |         uses: actions/upload-artifact@v4
37 |         with:
38 |           name: release-dists
39 |           path: dist/
40 | 
41 |   pypi-publish:
42 |     runs-on: ubuntu-latest
43 |     needs:
44 |       - release-build
45 |     permissions:
46 |       # IMPORTANT: this permission is mandatory for trusted publishing
47 |       id-token: write
48 | 
49 |     # Dedicated environments with protections for publishing are strongly recommended.
50 |     # For more information, see: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment#deployment-protection-rules
51 |     environment:
52 |       name: pypi
53 |       # OPTIONAL: uncomment and update to include your PyPI project URL in the deployment status:
54 |       # url: https://pypi.org/p/YOURPROJECT
55 |       #
56 |       # ALTERNATIVE: if your GitHub Release name is the PyPI project version string
57 |       # ALTERNATIVE: exactly, uncomment the following line instead:
58 |       # url: https://pypi.org/project/YOURPROJECT/${{ github.event.release.name }}
59 | 
60 |     steps:
61 |       - name: Retrieve release distributions
62 |         uses: actions/download-artifact@v4
63 |         with:
64 |           name: release-dists
65 |           path: dist/
66 | 
67 |       - name: Publish release distributions to PyPI
68 |         uses: pypa/gh-action-pypi-publish@release/v1
69 |         with:
70 |           packages-dir: dist/
71 | 
```

--------------------------------------------------------------------------------
/src/duckduckgo_mcp_server/server.py:
--------------------------------------------------------------------------------

```python
  1 | from mcp.server.fastmcp import FastMCP, Context
  2 | import httpx
  3 | from bs4 import BeautifulSoup
  4 | from typing import List, Dict, Optional, Any
  5 | from dataclasses import dataclass
  6 | import urllib.parse
  7 | import sys
  8 | import traceback
  9 | import asyncio
 10 | from datetime import datetime, timedelta
 11 | import time
 12 | import re
 13 | 
 14 | 
 15 | @dataclass
 16 | class SearchResult:
 17 |     title: str
 18 |     link: str
 19 |     snippet: str
 20 |     position: int
 21 | 
 22 | 
 23 | class RateLimiter:
 24 |     def __init__(self, requests_per_minute: int = 30):
 25 |         self.requests_per_minute = requests_per_minute
 26 |         self.requests = []
 27 | 
 28 |     async def acquire(self):
 29 |         now = datetime.now()
 30 |         # Remove requests older than 1 minute
 31 |         self.requests = [
 32 |             req for req in self.requests if now - req < timedelta(minutes=1)
 33 |         ]
 34 | 
 35 |         if len(self.requests) >= self.requests_per_minute:
 36 |             # Wait until we can make another request
 37 |             wait_time = 60 - (now - self.requests[0]).total_seconds()
 38 |             if wait_time > 0:
 39 |                 await asyncio.sleep(wait_time)
 40 | 
 41 |         self.requests.append(now)
 42 | 
 43 | 
 44 | class DuckDuckGoSearcher:
 45 |     BASE_URL = "https://html.duckduckgo.com/html"
 46 |     HEADERS = {
 47 |         "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
 48 |     }
 49 | 
 50 |     def __init__(self):
 51 |         self.rate_limiter = RateLimiter()
 52 | 
 53 |     def format_results_for_llm(self, results: List[SearchResult]) -> str:
 54 |         """Format results in a natural language style that's easier for LLMs to process"""
 55 |         if not results:
 56 |             return "No results were found for your search query. This could be due to DuckDuckGo's bot detection or the query returned no matches. Please try rephrasing your search or try again in a few minutes."
 57 | 
 58 |         output = []
 59 |         output.append(f"Found {len(results)} search results:\n")
 60 | 
 61 |         for result in results:
 62 |             output.append(f"{result.position}. {result.title}")
 63 |             output.append(f"   URL: {result.link}")
 64 |             output.append(f"   Summary: {result.snippet}")
 65 |             output.append("")  # Empty line between results
 66 | 
 67 |         return "\n".join(output)
 68 | 
 69 |     async def search(
 70 |         self, query: str, ctx: Context, max_results: int = 10
 71 |     ) -> List[SearchResult]:
 72 |         try:
 73 |             # Apply rate limiting
 74 |             await self.rate_limiter.acquire()
 75 | 
 76 |             # Create form data for POST request
 77 |             data = {
 78 |                 "q": query,
 79 |                 "b": "",
 80 |                 "kl": "",
 81 |             }
 82 | 
 83 |             await ctx.info(f"Searching DuckDuckGo for: {query}")
 84 | 
 85 |             async with httpx.AsyncClient() as client:
 86 |                 response = await client.post(
 87 |                     self.BASE_URL, data=data, headers=self.HEADERS, timeout=30.0
 88 |                 )
 89 |                 response.raise_for_status()
 90 | 
 91 |             # Parse HTML response
 92 |             soup = BeautifulSoup(response.text, "html.parser")
 93 |             if not soup:
 94 |                 await ctx.error("Failed to parse HTML response")
 95 |                 return []
 96 | 
 97 |             results = []
 98 |             for result in soup.select(".result"):
 99 |                 title_elem = result.select_one(".result__title")
100 |                 if not title_elem:
101 |                     continue
102 | 
103 |                 link_elem = title_elem.find("a")
104 |                 if not link_elem:
105 |                     continue
106 | 
107 |                 title = link_elem.get_text(strip=True)
108 |                 link = link_elem.get("href", "")
109 | 
110 |                 # Skip ad results
111 |                 if "y.js" in link:
112 |                     continue
113 | 
114 |                 # Clean up DuckDuckGo redirect URLs
115 |                 if link.startswith("//duckduckgo.com/l/?uddg="):
116 |                     link = urllib.parse.unquote(link.split("uddg=")[1].split("&")[0])
117 | 
118 |                 snippet_elem = result.select_one(".result__snippet")
119 |                 snippet = snippet_elem.get_text(strip=True) if snippet_elem else ""
120 | 
121 |                 results.append(
122 |                     SearchResult(
123 |                         title=title,
124 |                         link=link,
125 |                         snippet=snippet,
126 |                         position=len(results) + 1,
127 |                     )
128 |                 )
129 | 
130 |                 if len(results) >= max_results:
131 |                     break
132 | 
133 |             await ctx.info(f"Successfully found {len(results)} results")
134 |             return results
135 | 
136 |         except httpx.TimeoutException:
137 |             await ctx.error("Search request timed out")
138 |             return []
139 |         except httpx.HTTPError as e:
140 |             await ctx.error(f"HTTP error occurred: {str(e)}")
141 |             return []
142 |         except Exception as e:
143 |             await ctx.error(f"Unexpected error during search: {str(e)}")
144 |             traceback.print_exc(file=sys.stderr)
145 |             return []
146 | 
147 | 
148 | class WebContentFetcher:
149 |     def __init__(self):
150 |         self.rate_limiter = RateLimiter(requests_per_minute=20)
151 | 
152 |     async def fetch_and_parse(self, url: str, ctx: Context) -> str:
153 |         """Fetch and parse content from a webpage"""
154 |         try:
155 |             await self.rate_limiter.acquire()
156 | 
157 |             await ctx.info(f"Fetching content from: {url}")
158 | 
159 |             async with httpx.AsyncClient() as client:
160 |                 response = await client.get(
161 |                     url,
162 |                     headers={
163 |                         "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
164 |                     },
165 |                     follow_redirects=True,
166 |                     timeout=30.0,
167 |                 )
168 |                 response.raise_for_status()
169 | 
170 |             # Parse the HTML
171 |             soup = BeautifulSoup(response.text, "html.parser")
172 | 
173 |             # Remove script and style elements
174 |             for element in soup(["script", "style", "nav", "header", "footer"]):
175 |                 element.decompose()
176 | 
177 |             # Get the text content
178 |             text = soup.get_text()
179 | 
180 |             # Clean up the text
181 |             lines = (line.strip() for line in text.splitlines())
182 |             chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
183 |             text = " ".join(chunk for chunk in chunks if chunk)
184 | 
185 |             # Remove extra whitespace
186 |             text = re.sub(r"\s+", " ", text).strip()
187 | 
188 |             # Truncate if too long
189 |             if len(text) > 8000:
190 |                 text = text[:8000] + "... [content truncated]"
191 | 
192 |             await ctx.info(
193 |                 f"Successfully fetched and parsed content ({len(text)} characters)"
194 |             )
195 |             return text
196 | 
197 |         except httpx.TimeoutException:
198 |             await ctx.error(f"Request timed out for URL: {url}")
199 |             return "Error: The request timed out while trying to fetch the webpage."
200 |         except httpx.HTTPError as e:
201 |             await ctx.error(f"HTTP error occurred while fetching {url}: {str(e)}")
202 |             return f"Error: Could not access the webpage ({str(e)})"
203 |         except Exception as e:
204 |             await ctx.error(f"Error fetching content from {url}: {str(e)}")
205 |             return f"Error: An unexpected error occurred while fetching the webpage ({str(e)})"
206 | 
207 | 
208 | # Initialize FastMCP server
209 | mcp = FastMCP("ddg-search")
210 | searcher = DuckDuckGoSearcher()
211 | fetcher = WebContentFetcher()
212 | 
213 | 
214 | @mcp.tool()
215 | async def search(query: str, ctx: Context, max_results: int = 10) -> str:
216 |     """
217 |     Search DuckDuckGo and return formatted results.
218 | 
219 |     Args:
220 |         query: The search query string
221 |         max_results: Maximum number of results to return (default: 10)
222 |         ctx: MCP context for logging
223 |     """
224 |     try:
225 |         results = await searcher.search(query, ctx, max_results)
226 |         return searcher.format_results_for_llm(results)
227 |     except Exception as e:
228 |         traceback.print_exc(file=sys.stderr)
229 |         return f"An error occurred while searching: {str(e)}"
230 | 
231 | 
232 | @mcp.tool()
233 | async def fetch_content(url: str, ctx: Context) -> str:
234 |     """
235 |     Fetch and parse content from a webpage URL.
236 | 
237 |     Args:
238 |         url: The webpage URL to fetch content from
239 |         ctx: MCP context for logging
240 |     """
241 |     return await fetcher.fetch_and_parse(url, ctx)
242 | 
243 | 
244 | def main():
245 |     mcp.run()
246 | 
247 | 
248 | if __name__ == "__main__":
249 |     main()
250 | 
```