#
tokens: 48968/50000 73/88 files (page 1/4)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 1 of 4. Use http://codebase.md/disler/just-prompt?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── commands
│   │   ├── context_prime_eza.md
│   │   ├── context_prime_w_lead.md
│   │   ├── context_prime.md
│   │   ├── jprompt_ultra_diff_review.md
│   │   ├── project_hello_w_name.md
│   │   └── project_hello.md
│   └── settings.json
├── .env.sample
├── .gitignore
├── .mcp.json
├── .python-version
├── ai_docs
│   ├── extending_thinking_sonny.md
│   ├── google-genai-api-update.md
│   ├── llm_providers_details.xml
│   ├── openai-reasoning-effort.md
│   └── pocket-pick-mcp-server-example.xml
├── example_outputs
│   ├── countdown_component
│   │   ├── countdown_component_groq_qwen-qwq-32b.md
│   │   ├── countdown_component_o_gpt-4.5-preview.md
│   │   ├── countdown_component_openai_o3-mini.md
│   │   ├── countdown_component_q_deepseek-r1-distill-llama-70b-specdec.md
│   │   └── diff.md
│   └── decision_openai_vs_anthropic_vs_google
│       ├── ceo_decision.md
│       ├── ceo_medium_decision_openai_vs_anthropic_vs_google_anthropic_claude-3-7-sonnet-20250219_4k.md
│       ├── ceo_medium_decision_openai_vs_anthropic_vs_google_gemini_gemini-2.5-flash-preview-04-17.md
│       ├── ceo_medium_decision_openai_vs_anthropic_vs_google_gemini_gemini-2.5-pro-preview-03-25.md
│       ├── ceo_medium_decision_openai_vs_anthropic_vs_google_openai_o3_high.md
│       ├── ceo_medium_decision_openai_vs_anthropic_vs_google_openai_o4-mini_high.md
│       └── ceo_prompt.xml
├── images
│   ├── just-prompt-logo.png
│   └── o3-as-a-ceo.png
├── list_models.py
├── prompts
│   ├── ceo_medium_decision_openai_vs_anthropic_vs_google.txt
│   ├── ceo_small_decision_python_vs_typescript.txt
│   ├── ceo_small_decision_rust_vs_prompt_eng.txt
│   ├── countdown_component.txt
│   ├── mock_bin_search.txt
│   └── mock_ui_component.txt
├── pyproject.toml
├── README.md
├── specs
│   ├── gemini-2-5-flash-reasoning.md
│   ├── init-just-prompt.md
│   ├── new-tool-llm-as-a-ceo.md
│   ├── oai-reasoning-levels.md
│   └── prompt_from_file_to_file_w_context.md
├── src
│   └── just_prompt
│       ├── __init__.py
│       ├── __main__.py
│       ├── atoms
│       │   ├── __init__.py
│       │   ├── llm_providers
│       │   │   ├── __init__.py
│       │   │   ├── anthropic.py
│       │   │   ├── deepseek.py
│       │   │   ├── gemini.py
│       │   │   ├── groq.py
│       │   │   ├── ollama.py
│       │   │   └── openai.py
│       │   └── shared
│       │       ├── __init__.py
│       │       ├── data_types.py
│       │       ├── model_router.py
│       │       ├── utils.py
│       │       └── validator.py
│       ├── molecules
│       │   ├── __init__.py
│       │   ├── ceo_and_board_prompt.py
│       │   ├── list_models.py
│       │   ├── list_providers.py
│       │   ├── prompt_from_file_to_file.py
│       │   ├── prompt_from_file.py
│       │   └── prompt.py
│       ├── server.py
│       └── tests
│           ├── __init__.py
│           ├── atoms
│           │   ├── __init__.py
│           │   ├── llm_providers
│           │   │   ├── __init__.py
│           │   │   ├── test_anthropic.py
│           │   │   ├── test_deepseek.py
│           │   │   ├── test_gemini.py
│           │   │   ├── test_groq.py
│           │   │   ├── test_ollama.py
│           │   │   └── test_openai.py
│           │   └── shared
│           │       ├── __init__.py
│           │       ├── test_model_router.py
│           │       ├── test_utils.py
│           │       └── test_validator.py
│           └── molecules
│               ├── __init__.py
│               ├── test_ceo_and_board_prompt.py
│               ├── test_list_models.py
│               ├── test_list_providers.py
│               ├── test_prompt_from_file_to_file.py
│               ├── test_prompt_from_file.py
│               └── test_prompt.py
├── ultra_diff_review
│   ├── diff_anthropic_claude-3-7-sonnet-20250219_4k.md
│   ├── diff_gemini_gemini-2.0-flash-thinking-exp.md
│   ├── diff_openai_o3-mini.md
│   └── fusion_ultra_diff_review.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------

```
1 | 3.12
2 | 
```

--------------------------------------------------------------------------------
/.env.sample:
--------------------------------------------------------------------------------

```
 1 | # Environment Variables for just-prompt
 2 | 
 3 | # OpenAI API Key
 4 | OPENAI_API_KEY=your_openai_api_key_here
 5 | 
 6 | # Anthropic API Key
 7 | ANTHROPIC_API_KEY=your_anthropic_api_key_here
 8 | 
 9 | # Gemini API Key
10 | GEMINI_API_KEY=your_gemini_api_key_here
11 | 
12 | # Groq API Key
13 | GROQ_API_KEY=your_groq_api_key_here
14 | 
15 | # DeepSeek API Key
16 | DEEPSEEK_API_KEY=your_deepseek_api_key_here
17 | 
18 | # Ollama endpoint (if not default)
19 | OLLAMA_HOST=http://localhost:11434
```

--------------------------------------------------------------------------------
/.mcp.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "mcpServers": {
 3 |     "just-prompt": {
 4 |       "type": "stdio",
 5 |       "command": "uv",
 6 |       "args": [
 7 |         "--directory",
 8 |         ".",
 9 |         "run",
10 |         "just-prompt",
11 |         "--default-models",
12 |         "openai:gpt-5:high,openai:gpt-5-mini:high,openai:gpt-5-nano:high,openai:o3:high,anthropic:claude-opus-4-1-20250805,anthropic:claude-opus-4-20250514,anthropic:claude-sonnet-4-20250514,gemini:gemini-2.5-pro,gemini:gemini-2.5-flash"
13 |       ],
14 |       "env": {}
15 |     }
16 |   }
17 | }
18 | 
```

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
 1 | # Python-generated files
 2 | __pycache__/
 3 | *.py[oc]
 4 | build/
 5 | dist/
 6 | wheels/
 7 | *.egg-info
 8 | 
 9 | # Virtual environments
10 | .venv
11 | 
12 | .env
13 | 
14 | # Byte-compiled / optimized / DLL files
15 | __pycache__/
16 | *.py[cod]
17 | *$py.class
18 | 
19 | # Distribution / packaging
20 | dist/
21 | build/
22 | *.egg-info/
23 | *.egg
24 | 
25 | # Unit test / coverage reports
26 | htmlcov/
27 | .tox/
28 | .nox/
29 | .coverage
30 | .coverage.*
31 | .cache
32 | nosetests.xml
33 | coverage.xml
34 | *.cover
35 | .hypothesis/
36 | .pytest_cache/
37 | 
38 | # Jupyter Notebook
39 | .ipynb_checkpoints
40 | 
41 | # Environments
42 | .env
43 | .venv
44 | env/
45 | venv/
46 | ENV/
47 | env.bak/
48 | venv.bak/
49 | 
50 | # mypy
51 | .mypy_cache/
52 | .dmypy.json
53 | dmypy.json
54 | 
55 | # IDE specific files
56 | .idea/
57 | .vscode/
58 | *.swp
59 | *.swo
60 | .DS_Store
61 | 
62 | 
63 | prompts/responses
64 | .aider*
65 | 
66 | focus_output/
67 | 
68 | # Git worktrees
69 | trees/
```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Just Prompt - A lightweight MCP server for LLM providers
  2 | 
  3 | `just-prompt` is a Model Control Protocol (MCP) server that provides a unified interface to various Large Language Model (LLM) providers including OpenAI, Anthropic, Google Gemini, Groq, DeepSeek, and Ollama. See how we use the `ceo_and_board` tool to make [hard decisions easy with o3 here](https://youtu.be/LEMLntjfihA).
  4 | 
  5 | <img src="images/just-prompt-logo.png" alt="Just Prompt Logo" width="700" height="auto">
  6 | 
  7 | <img src="images/o3-as-a-ceo.png" alt="Just Prompt Logo" width="700" height="auto">
  8 | 
  9 | 
 10 | ## Tools
 11 | 
 12 | The following MCP tools are available in the server:
 13 | 
 14 | - **`prompt`**: Send a prompt to multiple LLM models
 15 |   - Parameters:
 16 |     - `text`: The prompt text
 17 |     - `models_prefixed_by_provider` (optional): List of models with provider prefixes. If not provided, uses default models.
 18 | 
 19 | - **`prompt_from_file`**: Send a prompt from a file to multiple LLM models
 20 |   - Parameters:
 21 |     - `abs_file_path`: Absolute path to the file containing the prompt (must be an absolute path, not relative)
 22 |     - `models_prefixed_by_provider` (optional): List of models with provider prefixes. If not provided, uses default models.
 23 | 
 24 | - **`prompt_from_file_to_file`**: Send a prompt from a file to multiple LLM models and save responses as markdown files
 25 |   - Parameters:
 26 |     - `abs_file_path`: Absolute path to the file containing the prompt (must be an absolute path, not relative)
 27 |     - `models_prefixed_by_provider` (optional): List of models with provider prefixes. If not provided, uses default models.
 28 |     - `abs_output_dir` (default: "."): Absolute directory path to save the response markdown files to (must be an absolute path, not relative)
 29 | 
 30 | - **`ceo_and_board`**: Send a prompt to multiple 'board member' models and have a 'CEO' model make a decision based on their responses
 31 |   - Parameters:
 32 |     - `abs_file_path`: Absolute path to the file containing the prompt (must be an absolute path, not relative)
 33 |     - `models_prefixed_by_provider` (optional): List of models with provider prefixes to act as board members. If not provided, uses default models.
 34 |     - `abs_output_dir` (default: "."): Absolute directory path to save the response files and CEO decision (must be an absolute path, not relative)
 35 |     - `ceo_model` (default: "openai:o3"): Model to use for the CEO decision in format "provider:model"
 36 | 
 37 | - **`list_providers`**: List all available LLM providers
 38 |   - Parameters: None
 39 | 
 40 | - **`list_models`**: List all available models for a specific LLM provider
 41 |   - Parameters:
 42 |     - `provider`: Provider to list models for (e.g., 'openai' or 'o')
 43 | 
 44 | ## Provider Prefixes
 45 | > every model must be prefixed with the provider name
 46 | >
 47 | > use the short name for faster referencing
 48 | 
 49 | - `o` or `openai`: OpenAI 
 50 |   - `o:gpt-4o-mini`
 51 |   - `openai:gpt-4o-mini`
 52 | - `a` or `anthropic`: Anthropic 
 53 |   - `a:claude-3-5-haiku`
 54 |   - `anthropic:claude-3-5-haiku`
 55 | - `g` or `gemini`: Google Gemini 
 56 |   - `g:gemini-2.5-pro-exp-03-25`
 57 |   - `gemini:gemini-2.5-pro-exp-03-25`
 58 | - `q` or `groq`: Groq 
 59 |   - `q:llama-3.1-70b-versatile`
 60 |   - `groq:llama-3.1-70b-versatile`
 61 | - `d` or `deepseek`: DeepSeek 
 62 |   - `d:deepseek-coder`
 63 |   - `deepseek:deepseek-coder`
 64 | - `l` or `ollama`: Ollama 
 65 |   - `l:llama3.1`
 66 |   - `ollama:llama3.1`
 67 | 
 68 | ## Features
 69 | 
 70 | - Unified API for multiple LLM providers
 71 | - Support for text prompts from strings or files
 72 | - Run multiple models in parallel
 73 | - Automatic model name correction using the first model in the `--default-models` list
 74 | - Ability to save responses to files
 75 | - Easy listing of available providers and models
 76 | 
 77 | ## Installation
 78 | 
 79 | ```bash
 80 | # Clone the repository
 81 | git clone https://github.com/yourusername/just-prompt.git
 82 | cd just-prompt
 83 | 
 84 | # Install with pip
 85 | uv sync
 86 | ```
 87 | 
 88 | ### Environment Variables
 89 | 
 90 | Create a `.env` file with your API keys (you can copy the `.env.sample` file):
 91 | 
 92 | ```bash
 93 | cp .env.sample .env
 94 | ```
 95 | 
 96 | Then edit the `.env` file to add your API keys (or export them in your shell):
 97 | 
 98 | ```
 99 | OPENAI_API_KEY=your_openai_api_key_here
100 | ANTHROPIC_API_KEY=your_anthropic_api_key_here
101 | GEMINI_API_KEY=your_gemini_api_key_here
102 | GROQ_API_KEY=your_groq_api_key_here
103 | DEEPSEEK_API_KEY=your_deepseek_api_key_here
104 | OLLAMA_HOST=http://localhost:11434
105 | ```
106 | 
107 | ## Claude Code Installation
108 | > In all these examples, replace the directory with the path to the just-prompt directory.
109 | 
110 | Default models set to `openai:o3:high`, `openai:o4-mini:high`, `anthropic:claude-opus-4-20250514`, `anthropic:claude-sonnet-4-20250514`, `gemini:gemini-2.5-pro-preview-03-25`, and `gemini:gemini-2.5-flash-preview-04-17`.
111 | 
112 | If you use Claude Code right out of the repository you can see in the .mcp.json file we set the default models to...
113 | 
114 | ```
115 | {
116 |   "mcpServers": {
117 |     "just-prompt": {
118 |       "type": "stdio",
119 |       "command": "uv",
120 |       "args": [
121 |         "--directory",
122 |         ".",
123 |         "run",
124 |         "just-prompt",
125 |         "--default-models",
126 |         "openai:o3:high,openai:o4-mini:high,anthropic:claude-opus-4-20250514,anthropic:claude-sonnet-4-20250514,gemini:gemini-2.5-pro-preview-03-25,gemini:gemini-2.5-flash-preview-04-17"
127 |       ],
128 |       "env": {}
129 |     }
130 |   }
131 | }
132 | ```
133 | 
134 | The `--default-models` parameter sets the models to use when none are explicitly provided to the API endpoints. The first model in the list is also used for model name correction when needed. This can be a list of models separated by commas.
135 | 
136 | When starting the server, it will automatically check which API keys are available in your environment and inform you which providers you can use. If a key is missing, the provider will be listed as unavailable, but the server will still start and can be used with the providers that are available.
137 | 
138 | ### Using `mcp add-json`
139 | 
140 | Copy this and paste it into claude code with BUT don't run until you copy the json
141 | 
142 | ```
143 | claude mcp add just-prompt "$(pbpaste)"
144 | ```
145 | 
146 | JSON to copy
147 | 
148 | ```
149 | {
150 |     "command": "uv",
151 |     "args": ["--directory", ".", "run", "just-prompt"]
152 | }
153 | ```
154 | 
155 | With a custom default model set to `openai:gpt-4o`.
156 | 
157 | ```
158 | {
159 |     "command": "uv",
160 |     "args": ["--directory", ".", "run", "just-prompt", "--default-models", "openai:gpt-4o"]
161 | }
162 | ```
163 | 
164 | With multiple default models:
165 | 
166 | ```
167 | {
168 |     "command": "uv",
169 |     "args": ["--directory", ".", "run", "just-prompt", "--default-models", "openai:o3:high,openai:o4-mini:high,anthropic:claude-opus-4-20250514,anthropic:claude-sonnet-4-20250514,gemini:gemini-2.5-pro-preview-03-25,gemini:gemini-2.5-flash-preview-04-17"]
170 | }
171 | ```
172 | 
173 | ### Using `mcp add` with project scope
174 | 
175 | ```bash
176 | # With default models
177 | claude mcp add just-prompt -s project \
178 |   -- \
179 |     uv --directory . \
180 |     run just-prompt
181 | 
182 | # With custom default model
183 | claude mcp add just-prompt -s project \
184 |   -- \
185 |   uv --directory . \
186 |   run just-prompt --default-models "openai:gpt-4o"
187 | 
188 | # With multiple default models
189 | claude mcp add just-prompt -s user \
190 |   -- \
191 |   uv --directory . \
192 |   run just-prompt --default-models "openai:o3:high,openai:o4-mini:high,anthropic:claude-opus-4-20250514,anthropic:claude-sonnet-4-20250514,gemini:gemini-2.5-pro-preview-03-25,gemini:gemini-2.5-flash-preview-04-17"
193 | ```
194 | 
195 | 
196 | ## `mcp remove`
197 | 
198 | claude mcp remove just-prompt
199 | 
200 | ## Running Tests
201 | 
202 | ```bash
203 | uv run pytest
204 | ```
205 | 
206 | ## Codebase Structure
207 | 
208 | ```
209 | .
210 | ├── ai_docs/                   # Documentation for AI model details
211 | │   ├── extending_thinking_sonny.md
212 | │   ├── llm_providers_details.xml
213 | │   ├── openai-reasoning-effort.md
214 | │   └── pocket-pick-mcp-server-example.xml
215 | ├── example_outputs/           # Example outputs from different models
216 | ├── list_models.py             # Script to list available LLM models
217 | ├── prompts/                   # Example prompt files
218 | ├── pyproject.toml             # Python project configuration
219 | ├── specs/                     # Project specifications
220 | │   ├── init-just-prompt.md
221 | │   ├── new-tool-llm-as-a-ceo.md
222 | │   └── oai-reasoning-levels.md
223 | ├── src/                       # Source code directory
224 | │   └── just_prompt/
225 | │       ├── __init__.py
226 | │       ├── __main__.py
227 | │       ├── atoms/             # Core components
228 | │       │   ├── llm_providers/ # Individual provider implementations
229 | │       │   │   ├── anthropic.py
230 | │       │   │   ├── deepseek.py
231 | │       │   │   ├── gemini.py
232 | │       │   │   ├── groq.py
233 | │       │   │   ├── ollama.py
234 | │       │   │   └── openai.py
235 | │       │   └── shared/        # Shared utilities and data types
236 | │       │       ├── data_types.py
237 | │       │       ├── model_router.py
238 | │       │       ├── utils.py
239 | │       │       └── validator.py
240 | │       ├── molecules/         # Higher-level functionality
241 | │       │   ├── ceo_and_board_prompt.py
242 | │       │   ├── list_models.py
243 | │       │   ├── list_providers.py
244 | │       │   ├── prompt.py
245 | │       │   ├── prompt_from_file.py
246 | │       │   └── prompt_from_file_to_file.py
247 | │       ├── server.py          # MCP server implementation
248 | │       └── tests/             # Test directory
249 | │           ├── atoms/         # Tests for atoms
250 | │           │   ├── llm_providers/
251 | │           │   └── shared/
252 | │           └── molecules/     # Tests for molecules
253 | │               ├── test_ceo_and_board_prompt.py
254 | │               ├── test_list_models.py
255 | │               ├── test_list_providers.py
256 | │               ├── test_prompt.py
257 | │               ├── test_prompt_from_file.py
258 | │               └── test_prompt_from_file_to_file.py
259 | └── ultra_diff_review/         # Diff review outputs
260 | ```
261 | 
262 | ## Context Priming
263 | READ README.md, pyproject.toml, then run git ls-files, and 'eza --git-ignore --tree' to understand the context of the project.
264 | 
265 | # Reasoning Effort with OpenAI o‑Series
266 | 
267 | For OpenAI o‑series reasoning models (`o4-mini`, `o3-mini`, `o3`) you can
268 | control how much *internal* reasoning the model performs before producing a
269 | visible answer.
270 | 
271 | Append one of the following suffixes to the model name (after the *provider*
272 | prefix):
273 | 
274 | * `:low`   – minimal internal reasoning (faster, cheaper)
275 | * `:medium` – balanced (default if omitted)
276 | * `:high`  – thorough reasoning (slower, more tokens)
277 | 
278 | Examples:
279 | 
280 | * `openai:o4-mini:low`
281 | * `o:o4-mini:high`
282 | 
283 | When a reasoning suffix is present, **just‑prompt** automatically switches to
284 | the OpenAI *Responses* API (when available) and sets the corresponding
285 | `reasoning.effort` parameter.  If the installed OpenAI SDK is older, it
286 | gracefully falls back to the Chat Completions endpoint and embeds an internal
287 | system instruction to approximate the requested effort level.
288 | 
289 | # Thinking Tokens with Claude
290 | 
291 | The Anthropic Claude models `claude-opus-4-20250514` and `claude-sonnet-4-20250514` support extended thinking capabilities using thinking tokens. This allows Claude to do more thorough thought processes before answering.
292 | 
293 | You can enable thinking tokens by adding a suffix to the model name in this format:
294 | - `anthropic:claude-opus-4-20250514:1k` - Use 1024 thinking tokens for Opus 4
295 | - `anthropic:claude-sonnet-4-20250514:4k` - Use 4096 thinking tokens for Sonnet 4
296 | - `anthropic:claude-opus-4-20250514:8000` - Use 8000 thinking tokens for Opus 4
297 | 
298 | Notes:
299 | - Thinking tokens are supported for `claude-opus-4-20250514`, `claude-sonnet-4-20250514`, and `claude-3-7-sonnet-20250219` models
300 | - Valid thinking token budgets range from 1024 to 16000
301 | - Values outside this range will be automatically adjusted to be within range
302 | - You can specify the budget with k notation (1k, 4k, etc.) or with exact numbers (1024, 4096, etc.)
303 | 
304 | # Thinking Budget with Gemini
305 | 
306 | The Google Gemini model `gemini-2.5-flash-preview-04-17` supports extended thinking capabilities using thinking budget. This allows Gemini to perform more thorough reasoning before providing a response.
307 | 
308 | You can enable thinking budget by adding a suffix to the model name in this format:
309 | - `gemini:gemini-2.5-flash-preview-04-17:1k` - Use 1024 thinking budget
310 | - `gemini:gemini-2.5-flash-preview-04-17:4k` - Use 4096 thinking budget
311 | - `gemini:gemini-2.5-flash-preview-04-17:8000` - Use 8000 thinking budget
312 | 
313 | Notes:
314 | - Thinking budget is only supported for the `gemini-2.5-flash-preview-04-17` model
315 | - Valid thinking budget range from 0 to 24576
316 | - Values outside this range will be automatically adjusted to be within range
317 | - You can specify the budget with k notation (1k, 4k, etc.) or with exact numbers (1024, 4096, etc.)
318 | 
319 | ## Resources
320 | - https://docs.anthropic.com/en/api/models-list?q=list+models
321 | - https://github.com/googleapis/python-genai
322 | - https://platform.openai.com/docs/api-reference/models/list
323 | - https://api-docs.deepseek.com/api/list-models
324 | - https://github.com/ollama/ollama-python
325 | - https://github.com/openai/openai-python
326 | 
327 | ## Master AI Coding 
328 | Learn to code with AI with foundational [Principles of AI Coding](https://agenticengineer.com/principled-ai-coding?y=jprompt)
329 | 
330 | Follow the [IndyDevDan youtube channel](https://www.youtube.com/@indydevdan) for more AI coding tips and tricks.
```

--------------------------------------------------------------------------------
/.claude/commands/project_hello.md:
--------------------------------------------------------------------------------

```markdown
1 | hi how are you
2 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/__init__.py:
--------------------------------------------------------------------------------

```python
1 | # Tests package
2 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/atoms/__init__.py:
--------------------------------------------------------------------------------

```python
1 | # Atoms tests package
2 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/atoms/shared/__init__.py:
--------------------------------------------------------------------------------

```python
1 | # Shared tests package
2 | 
```

--------------------------------------------------------------------------------
/.claude/commands/project_hello_w_name.md:
--------------------------------------------------------------------------------

```markdown
1 | hi how are you $ARGUMENTS
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/molecules/__init__.py:
--------------------------------------------------------------------------------

```python
1 | # Molecules tests package
2 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/atoms/llm_providers/__init__.py:
--------------------------------------------------------------------------------

```python
1 | # LLM Providers tests package
2 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/atoms/__init__.py:
--------------------------------------------------------------------------------

```python
1 | # Atoms package - basic building blocks
2 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/atoms/shared/__init__.py:
--------------------------------------------------------------------------------

```python
1 | # Shared package - common utilities and data types
2 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/atoms/llm_providers/__init__.py:
--------------------------------------------------------------------------------

```python
1 | # LLM Providers package - interfaces for various LLM APIs
2 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/molecules/__init__.py:
--------------------------------------------------------------------------------

```python
1 | # Molecules package - higher-level functionality built from atoms
2 | 
```

--------------------------------------------------------------------------------
/.claude/commands/context_prime.md:
--------------------------------------------------------------------------------

```markdown
1 | READ README.md, THEN run git ls-files to understand the context of the project.
```

--------------------------------------------------------------------------------
/prompts/mock_bin_search.txt:
--------------------------------------------------------------------------------

```
1 | python: return code exclusively: def binary_search(arr, target) -> Optional[int]:
```

--------------------------------------------------------------------------------
/.claude/commands/context_prime_eza.md:
--------------------------------------------------------------------------------

```markdown
1 | READ README.md, THEN run eza . --git-ignore --tree to understand the context of the project.
```

--------------------------------------------------------------------------------
/src/just_prompt/__init__.py:
--------------------------------------------------------------------------------

```python
1 | # just-prompt - A lightweight wrapper MCP server for various LLM providers
2 | 
3 | __version__ = "0.1.0"
4 | 
```

--------------------------------------------------------------------------------
/.claude/settings.json:
--------------------------------------------------------------------------------

```json
1 | {
2 |   "permissions": {
3 |     "allow": [
4 |       "Bash(npm run lint)",
5 |       "Bash(npm run test:*)"
6 |     ]
7 |   }
8 | }
```

--------------------------------------------------------------------------------
/.claude/commands/context_prime_w_lead.md:
--------------------------------------------------------------------------------

```markdown
1 | READ README.md, THEN run git ls-files to understand the context of the project.
2 | 
3 | Be sure to also READ: $ARGUMENTS and nothing else.
```

--------------------------------------------------------------------------------
/prompts/ceo_small_decision_rust_vs_prompt_eng.txt:
--------------------------------------------------------------------------------

```
 1 | <purpose>
 2 |     I want to decide if I should spend time learning Rust or Prompt Engineering.
 3 |     Help me decide between these two options. 
 4 | </purpose>
 5 | 
 6 | <option-1>
 7 |     Rust
 8 | </option-1>
 9 | 
10 | <option-2>
11 |     Prompt Engineering
12 | </option-2>
13 | 
```

--------------------------------------------------------------------------------
/prompts/ceo_small_decision_python_vs_typescript.txt:
--------------------------------------------------------------------------------

```
1 | <purpose>
2 |     I want to decide if I should spend time learning Python or TypeScript.
3 |     Help me decide between these two options. Given that I want to train ai models and build a fullstack website to host them, which language should I use?
4 | </purpose>
5 | 
```

--------------------------------------------------------------------------------
/ai_docs/extending_thinking_sonny.md:
--------------------------------------------------------------------------------

```markdown
 1 | # Code snippet of using thinking tokens
 2 | 
 3 | response = client.messages.create(
 4 |     model="claude-3-7-sonnet-20250219",
 5 |     max_tokens=8192,
 6 |     thinking={
 7 |         "type": "enabled",
 8 |         "budget_tokens": 4000,
 9 |     },
10 |     messages=[{"role": "user", "content": args.prompt}],
11 | )
```

--------------------------------------------------------------------------------
/prompts/mock_ui_component.txt:
--------------------------------------------------------------------------------

```
 1 | Build vue, react, and svelte components for this component definition:
 2 | 
 3 | <TableOfContents :tree="tree" />
 4 | 
 5 | The tree is a json object that looks like this:
 6 | 
 7 | ```json
 8 | {
 9 |     "name": "TableOfContents",
10 |     "children": [
11 |         {
12 |             "name": "Item",
13 |             "children": [
14 |                 {
15 |                     "name": "Item",
16 |                     "children": []
17 |                 }
18 |             ]
19 |         },
20 |         {
21 |             "name": "Item 2",
22 |             "children": []
23 |         }
24 |     ]
25 | }
26 | ```
```

--------------------------------------------------------------------------------
/src/just_prompt/molecules/list_models.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | List models functionality for just-prompt.
 3 | """
 4 | 
 5 | from typing import List
 6 | import logging
 7 | from ..atoms.shared.validator import validate_provider
 8 | from ..atoms.shared.model_router import ModelRouter
 9 | 
10 | logger = logging.getLogger(__name__)
11 | 
12 | 
13 | def list_models(provider: str) -> List[str]:
14 |     """
15 |     List available models for a provider.
16 |     
17 |     Args:
18 |         provider: Provider name (full or short)
19 |         
20 |     Returns:
21 |         List of model names
22 |     """
23 |     # Validate provider
24 |     validate_provider(provider)
25 |     
26 |     # Get models from provider
27 |     return ModelRouter.route_list_models(provider)
```

--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------

```toml
 1 | [project]
 2 | name = "just-prompt"
 3 | version = "0.1.0"
 4 | description = "A lightweight MCP server for various LLM providers"
 5 | readme = "README.md"
 6 | requires-python = ">=3.10"
 7 | dependencies = [
 8 |     "anthropic>=0.49.0",
 9 |     "google-genai>=1.22.0",
10 |     "groq>=0.20.0",
11 |     "ollama>=0.4.7",
12 |     "openai>=1.68.0",
13 |     "python-dotenv>=1.0.1",
14 |     "pydantic>=2.0.0",
15 |     "mcp>=0.1.5",
16 | ]
17 | 
18 | [project.scripts]
19 | just-prompt = "just_prompt.__main__:main"
20 | 
21 | [project.optional-dependencies]
22 | test = [
23 |     "pytest>=7.3.1",
24 |     "pytest-asyncio>=0.20.3",
25 | ]
26 | 
27 | [build-system]
28 | requires = ["setuptools>=61.0"]
29 | build-backend = "setuptools.build_meta"
```

--------------------------------------------------------------------------------
/src/just_prompt/molecules/list_providers.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | List providers functionality for just-prompt.
 3 | """
 4 | 
 5 | from typing import List, Dict
 6 | import logging
 7 | from ..atoms.shared.data_types import ModelProviders
 8 | 
 9 | logger = logging.getLogger(__name__)
10 | 
11 | 
12 | def list_providers() -> List[Dict[str, str]]:
13 |     """
14 |     List all available providers with their full and short names.
15 |     
16 |     Returns:
17 |         List of dictionaries with provider information
18 |     """
19 |     providers = []
20 |     for provider in ModelProviders:
21 |         providers.append({
22 |             "name": provider.name,
23 |             "full_name": provider.full_name,
24 |             "short_name": provider.short_name
25 |         })
26 |     
27 |     return providers
```

--------------------------------------------------------------------------------
/prompts/countdown_component.txt:
--------------------------------------------------------------------------------

```
 1 | Create a countdown timer component that satisfies these requirements:
 2 | 
 3 | 1. Framework implementations:
 4 |    - Vue.js
 5 |    - Svelte
 6 |    - React
 7 |    - Vanilla JavaScript
 8 | 
 9 | 2. Component interface:
10 |    - :start-time: number (starting time in seconds)
11 |    - :format: number (display format, 0 = MM:SS, 1 = HH:MM:SS)
12 | 
13 | 3. Features:
14 |    - Count down from start-time to zero
15 |    - Display remaining time in specified format
16 |    - Stop counting when reaching zero
17 |    - Emit/callback 'finished' event when countdown completes
18 |    - Provide a visual indication when time is running low (< 10% of total)
19 | 
20 | 4. Include:
21 |    - Component implementation
22 |    - Sample usage
23 |    - Clear comments explaining key parts
24 | 
25 | Provide clean, well-structured code for each framework version.
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/atoms/llm_providers/test_ollama.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Tests for Ollama provider.
 3 | """
 4 | 
 5 | import pytest
 6 | import os
 7 | from dotenv import load_dotenv
 8 | from just_prompt.atoms.llm_providers import ollama
 9 | 
10 | # Load environment variables
11 | load_dotenv()
12 | 
13 | 
14 | def test_list_models():
15 |     """Test listing Ollama models."""
16 |     models = ollama.list_models()
17 |     assert isinstance(models, list)
18 |     assert isinstance(models[0], str)
19 |     assert len(models) > 0
20 | 
21 | 
22 | def test_prompt():
23 |     """Test sending prompt to Ollama."""
24 |     # Using llama3 as default model - adjust if needed based on your environment
25 | 
26 |     response = ollama.prompt("What is the capital of France?", "gemma3:12b")
27 | 
28 |     # Assertions
29 |     assert isinstance(response, str)
30 |     assert len(response) > 0
31 |     assert "paris" in response.lower() or "Paris" in response
32 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/atoms/llm_providers/test_groq.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Tests for Groq provider.
 3 | """
 4 | 
 5 | import pytest
 6 | import os
 7 | from dotenv import load_dotenv
 8 | from just_prompt.atoms.llm_providers import groq
 9 | 
10 | # Load environment variables
11 | load_dotenv()
12 | 
13 | # Skip tests if API key not available
14 | if not os.environ.get("GROQ_API_KEY"):
15 |     pytest.skip("Groq API key not available", allow_module_level=True)
16 | 
17 | 
18 | def test_list_models():
19 |     """Test listing Groq models."""
20 |     models = groq.list_models()
21 |     assert isinstance(models, list)
22 |     assert len(models) > 0
23 |     assert all(isinstance(model, str) for model in models)
24 | 
25 | 
26 | def test_prompt():
27 |     """Test sending prompt to Groq."""
28 |     response = groq.prompt("What is the capital of France?", "qwen-qwq-32b")
29 |     assert isinstance(response, str)
30 |     assert len(response) > 0
31 |     assert "paris" in response.lower() or "Paris" in response
32 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/atoms/llm_providers/test_deepseek.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Tests for DeepSeek provider.
 3 | """
 4 | 
 5 | import pytest
 6 | import os
 7 | from dotenv import load_dotenv
 8 | from just_prompt.atoms.llm_providers import deepseek
 9 | 
10 | # Load environment variables
11 | load_dotenv()
12 | 
13 | # Skip tests if API key not available
14 | if not os.environ.get("DEEPSEEK_API_KEY"):
15 |     pytest.skip("DeepSeek API key not available", allow_module_level=True)
16 | 
17 | 
18 | def test_list_models():
19 |     """Test listing DeepSeek models."""
20 |     models = deepseek.list_models()
21 |     assert isinstance(models, list)
22 |     assert len(models) > 0
23 |     assert all(isinstance(model, str) for model in models)
24 | 
25 | 
26 | def test_prompt():
27 |     """Test sending prompt to DeepSeek."""
28 |     response = deepseek.prompt("What is the capital of France?", "deepseek-coder")
29 |     assert isinstance(response, str)
30 |     assert len(response) > 0
31 |     assert "paris" in response.lower() or "Paris" in response
```

--------------------------------------------------------------------------------
/src/just_prompt/atoms/shared/data_types.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Data types and models for just-prompt MCP server.
 3 | """
 4 | 
 5 | from enum import Enum
 6 | 
 7 | 
 8 | class ModelProviders(Enum):
 9 |     """
10 |     Enum of supported model providers with their full and short names.
11 |     """
12 |     OPENAI = ("openai", "o")
13 |     ANTHROPIC = ("anthropic", "a")
14 |     GEMINI = ("gemini", "g") 
15 |     GROQ = ("groq", "q")
16 |     DEEPSEEK = ("deepseek", "d")
17 |     OLLAMA = ("ollama", "l")
18 |     
19 |     def __init__(self, full_name, short_name):
20 |         self.full_name = full_name
21 |         self.short_name = short_name
22 |         
23 |     @classmethod
24 |     def from_name(cls, name):
25 |         """
26 |         Get provider enum from full or short name.
27 |         
28 |         Args:
29 |             name: The provider name (full or short)
30 |             
31 |         Returns:
32 |             ModelProviders: The corresponding provider enum, or None if not found
33 |         """
34 |         for provider in cls:
35 |             if provider.full_name == name or provider.short_name == name:
36 |                 return provider
37 |         return None
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/molecules/test_prompt_from_file.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Tests for prompt_from_file functionality.
 3 | """
 4 | 
 5 | import pytest
 6 | import os
 7 | import tempfile
 8 | from dotenv import load_dotenv
 9 | from just_prompt.molecules.prompt_from_file import prompt_from_file
10 | 
11 | # Load environment variables
12 | load_dotenv()
13 | 
14 | 
15 | def test_nonexistent_file():
16 |     """Test with non-existent file."""
17 |     with pytest.raises(FileNotFoundError):
18 |         prompt_from_file("/non/existent/file.txt", ["o:gpt-4o-mini"])
19 | 
20 | 
21 | def test_file_read():
22 |     """Test that the file is read correctly and processes with real API call."""
23 |     # Create temporary file with a simple question
24 |     with tempfile.NamedTemporaryFile(mode='w+', delete=False) as temp:
25 |         temp.write("What is the capital of France?")
26 |         temp_path = temp.name
27 |     
28 |     try:
29 |         # Make real API call
30 |         response = prompt_from_file(temp_path, ["o:gpt-4o-mini"])
31 |         
32 |         # Assertions
33 |         assert isinstance(response, list)
34 |         assert len(response) == 1
35 |         assert "paris" in response[0].lower() or "Paris" in response[0]
36 |     finally:
37 |         # Clean up
38 |         os.unlink(temp_path)
```

--------------------------------------------------------------------------------
/.claude/commands/jprompt_ultra_diff_review.md:
--------------------------------------------------------------------------------

```markdown
 1 | # Ultra Diff Review
 2 | > Execute each task in the order given to conduct a thorough code review.
 3 | 
 4 | ## Task 1: Create diff.txt
 5 | 
 6 | Create a new file called diff.md.
 7 | 
 8 | At the top of the file, add the following markdown:
 9 | 
10 | ```md
11 | # Code Review
12 | - Review the diff, report on issues, bugs, and improvements. 
13 | - End with a concise markdown table of any issues found, their solutions, and a risk assessment for each issue if applicable.
14 | - Use emojis to convey the severity of each issue.
15 | 
16 | ## Diff
17 | 
18 | ```
19 | 
20 | ## Task 2: git diff and append
21 | 
22 | Then run git diff and append the output to the file.
23 | 
24 | ## Task 3: just-prompt multi-llm tool call
25 | 
26 | Then use that file as the input to this just-prompt tool call.
27 | 
28 | prompts_from_file_to_file(
29 |     from_file = diff.md,
30 |     models = "openai:o3-mini, anthropic:claude-3-7-sonnet-20250219:4k, gemini:gemini-2.0-flash-thinking-exp"
31 |     output_dir = ultra_diff_review/
32 | )
33 | 
34 | ## Task 4: Read the output files and synthesize
35 | 
36 | Then read the output files and think hard to synthesize the results into a new single file called `ultra_diff_review/fusion_ultra_diff_review.md` following the original instructions plus any additional instructions or callouts you think are needed to create the best possible review.
37 | 
38 | ## Task 5: Present the results
39 | 
40 | Then let me know which issues you think are worth resolving and we'll proceed from there.
```

--------------------------------------------------------------------------------
/src/just_prompt/atoms/llm_providers/ollama.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Ollama provider implementation.
 3 | """
 4 | 
 5 | import os
 6 | from typing import List
 7 | import logging
 8 | import ollama
 9 | from dotenv import load_dotenv
10 | 
11 | # Load environment variables
12 | load_dotenv()
13 | 
14 | # Configure logging
15 | logger = logging.getLogger(__name__)
16 | 
17 | 
18 | def prompt(text: str, model: str) -> str:
19 |     """
20 |     Send a prompt to Ollama and get a response.
21 | 
22 |     Args:
23 |         text: The prompt text
24 |         model: The model name
25 | 
26 |     Returns:
27 |         Response string from the model
28 |     """
29 |     try:
30 |         logger.info(f"Sending prompt to Ollama model: {model}")
31 | 
32 |         # Create chat completion
33 |         response = ollama.chat(
34 |             model=model,
35 |             messages=[
36 |                 {
37 |                     "role": "user",
38 |                     "content": text,
39 |                 },
40 |             ],
41 |         )
42 | 
43 |         # Extract response content
44 |         return response.message.content
45 |     except Exception as e:
46 |         logger.error(f"Error sending prompt to Ollama: {e}")
47 |         raise ValueError(f"Failed to get response from Ollama: {str(e)}")
48 | 
49 | 
50 | def list_models() -> List[str]:
51 |     """
52 |     List available Ollama models.
53 | 
54 |     Returns:
55 |         List of model names
56 |     """
57 |     logger.info("Listing Ollama models")
58 |     response = ollama.list()
59 | 
60 |     # Extract model names from the models attribute
61 |     models = [model.model for model in response.models]
62 | 
63 |     return models
64 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/molecules/prompt_from_file.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Prompt from file functionality for just-prompt.
 3 | """
 4 | 
 5 | from typing import List
 6 | import logging
 7 | import os
 8 | from pathlib import Path
 9 | from .prompt import prompt
10 | 
11 | logger = logging.getLogger(__name__)
12 | 
13 | 
14 | def prompt_from_file(abs_file_path: str, models_prefixed_by_provider: List[str] = None) -> List[str]:
15 |     """
16 |     Read text from a file and send it as a prompt to multiple models.
17 |     
18 |     Args:
19 |         abs_file_path: Absolute path to the text file (must be an absolute path, not relative)
20 |         models_prefixed_by_provider: List of model strings in format "provider:model"
21 |                                     If None, uses the DEFAULT_MODELS environment variable
22 |         
23 |     Returns:
24 |         List of responses from the models
25 |     """
26 |     file_path = Path(abs_file_path)
27 |     
28 |     # Validate file
29 |     if not file_path.exists():
30 |         raise FileNotFoundError(f"File not found: {abs_file_path}")
31 |     
32 |     if not file_path.is_file():
33 |         raise ValueError(f"Not a file: {abs_file_path}")
34 |     
35 |     # Read file content
36 |     try:
37 |         with open(file_path, 'r', encoding='utf-8') as f:
38 |             text = f.read()
39 |     except Exception as e:
40 |         logger.error(f"Error reading file {abs_file_path}: {e}")
41 |         raise ValueError(f"Error reading file: {str(e)}")
42 |     
43 |     # Send prompt with file content
44 |     return prompt(text, models_prefixed_by_provider)
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/molecules/test_prompt.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Tests for prompt functionality.
 3 | """
 4 | 
 5 | import pytest
 6 | import os
 7 | from dotenv import load_dotenv
 8 | from just_prompt.molecules.prompt import prompt
 9 | 
10 | # Load environment variables
11 | load_dotenv()
12 | 
13 | def test_prompt_basic():
14 |     """Test basic prompt functionality with a real API call."""
15 |     # Define a simple test case
16 |     test_prompt = "What is the capital of France?"
17 |     test_models = ["openai:gpt-4o-mini"]
18 | 
19 |     # Call the prompt function with a real model
20 |     response = prompt(test_prompt, test_models)
21 | 
22 |     # Assertions
23 |     assert isinstance(response, list)
24 |     assert len(response) == 1
25 |     assert "paris" in response[0].lower() or "Paris" in response[0]
26 | 
27 | def test_prompt_multiple_models():
28 |     """Test prompt with multiple models."""
29 |     # Skip if API keys aren't available
30 |     if not os.environ.get("OPENAI_API_KEY") or not os.environ.get("ANTHROPIC_API_KEY"):
31 |         pytest.skip("Required API keys not available")
32 |         
33 |     # Define a simple test case
34 |     test_prompt = "What is the capital of France?"
35 |     test_models = ["openai:gpt-4o-mini", "anthropic:claude-3-5-haiku-20241022"]
36 | 
37 |     # Call the prompt function with multiple models
38 |     response = prompt(test_prompt, test_models)
39 | 
40 |     # Assertions
41 |     assert isinstance(response, list)
42 |     assert len(response) == 2
43 |     # Check all responses contain Paris
44 |     for r in response:
45 |         assert "paris" in r.lower() or "Paris" in r
46 | 
```

--------------------------------------------------------------------------------
/specs/oai-reasoning-levels.md:
--------------------------------------------------------------------------------

```markdown
 1 | Feature Request: Add low, medium, high reasoning levels to the OpenAI o-series reasoning models
 2 | > Models; o3-mini, o4-mini, o3
 3 | >
 4 | > Implement every detail below end to end and validate your work with tests.
 5 | 
 6 | ## Implementation Notes
 7 | 
 8 | - Just like how claude-3-7-sonnet has budget tokens in src/just_prompt/atoms/llm_providers/anthropic.py, OpenAI has a similar feature with the low, medium, high suffix. We want to support o4-mini:low, o4-mini:medium, o4-mini:high, ...repeat for o3-mini and o3.
 9 | - If this suffix is present, we should trigger a prompt_with_thinking function in src/just_prompt/atoms/llm_providers/openai.py. Use the example code in ai_docs/openai-reasoning-effort.md. If suffix is not present, use the existing prompt function.
10 | - Update tests to verify the feature works, specifically in test_openai.py. Test with o4-mini:low, o4-mini:medium, o4-mini:high on a simple puzzle.
11 | - After you implement and test, update the README.md file to detail the new feature.
12 | - We're using 'uv' to run code and test. You won't need to install anything just testing.
13 | 
14 | ## Relevant Files (Context)
15 | > Read these files before implementing the feature.
16 | README.md
17 | pyproject.toml
18 | src/just_prompt/molecules/prompt.py
19 | src/just_prompt/atoms/llm_providers/anthropic.py
20 | src/just_prompt/atoms/llm_providers/openai.py
21 | src/just_prompt/tests/atoms/llm_providers/test_openai.py
22 | 
23 | ## Self Validation (Close the loop)
24 | > After implementing the feature, run the tests to verify it works.
25 | >
26 | > All env variables are in place - run tests against real apis.
27 | - uv run pytest src/just_prompt/tests/atoms/llm_providers/test_openai.py
28 | - uv run pytest src/just_prompt/tests/molecules/test_prompt.py
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/molecules/test_prompt_from_file_to_file.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Tests for prompt_from_file_to_file functionality.
 3 | """
 4 | 
 5 | import pytest
 6 | import os
 7 | import tempfile
 8 | import shutil
 9 | from dotenv import load_dotenv
10 | from just_prompt.molecules.prompt_from_file_to_file import prompt_from_file_to_file
11 | 
12 | # Load environment variables
13 | load_dotenv()
14 | 
15 | 
16 | def test_directory_creation_and_file_writing():
17 |     """Test that the output directory is created and files are written with real API responses."""
18 |     # Create temporary input file with a simple question
19 |     with tempfile.NamedTemporaryFile(mode='w+', delete=False) as temp_file:
20 |         temp_file.write("What is the capital of France?")
21 |         input_path = temp_file.name
22 |     
23 |     # Create a deep non-existent directory path
24 |     temp_dir = os.path.join(tempfile.gettempdir(), "just_prompt_test_dir", "output")
25 |     
26 |     try:
27 |         # Make real API call
28 |         file_paths = prompt_from_file_to_file(
29 |             input_path, 
30 |             ["o:gpt-4o-mini"],
31 |             temp_dir
32 |         )
33 |         
34 |         # Assertions
35 |         assert isinstance(file_paths, list)
36 |         assert len(file_paths) == 1
37 |         
38 |         # Check that the file exists
39 |         assert os.path.exists(file_paths[0])
40 |         
41 |         # Check that the file has a .md extension
42 |         assert file_paths[0].endswith('.md')
43 |         
44 |         # Check file content contains the expected response
45 |         with open(file_paths[0], 'r') as f:
46 |             content = f.read()
47 |             assert "paris" in content.lower() or "Paris" in content
48 |     finally:
49 |         # Clean up
50 |         os.unlink(input_path)
51 |         # Remove the created directory and all its contents
52 |         if os.path.exists(os.path.dirname(temp_dir)):
53 |             shutil.rmtree(os.path.dirname(temp_dir))
```

--------------------------------------------------------------------------------
/src/just_prompt/__main__.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Main entry point for just-prompt.
 3 | """
 4 | 
 5 | import argparse
 6 | import asyncio
 7 | import logging
 8 | import sys
 9 | from dotenv import load_dotenv
10 | from .server import serve
11 | from .atoms.shared.utils import DEFAULT_MODEL
12 | from .atoms.shared.validator import print_provider_availability
13 | 
14 | # Load environment variables
15 | load_dotenv()
16 | 
17 | # Configure logging
18 | logging.basicConfig(
19 |     level=logging.INFO,
20 |     format='%(asctime)s [%(levelname)s] %(message)s',
21 |     datefmt='%Y-%m-%d %H:%M:%S'
22 | )
23 | logger = logging.getLogger(__name__)
24 | 
25 | 
26 | def main():
27 |     """
28 |     Main entry point for just-prompt.
29 |     """
30 |     parser = argparse.ArgumentParser(description="just-prompt - A lightweight MCP server for various LLM providers")
31 |     parser.add_argument(
32 |         "--default-models", 
33 |         default=DEFAULT_MODEL,
34 |         help="Comma-separated list of default models to use for prompts and model name correction, in format provider:model"
35 |     )
36 |     parser.add_argument(
37 |         "--log-level", 
38 |         choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
39 |         default="INFO",
40 |         help="Logging level"
41 |     )
42 |     parser.add_argument(
43 |         "--show-providers",
44 |         action="store_true",
45 |         help="Show available providers and exit"
46 |     )
47 |     
48 |     args = parser.parse_args()
49 |     
50 |     # Set logging level
51 |     logging.getLogger().setLevel(getattr(logging, args.log_level))
52 | 
53 |     # Show provider availability and optionally exit
54 |     if args.show_providers:
55 |         print_provider_availability()
56 |         sys.exit(0)
57 |     
58 |     try:
59 |         # Start server (asyncio)
60 |         asyncio.run(serve(args.default_models))
61 |     except Exception as e:
62 |         logger.error(f"Error starting server: {e}")
63 |         sys.exit(1)
64 | 
65 | 
66 | if __name__ == "__main__":
67 |     main()
68 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/atoms/shared/test_utils.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Tests for utility functions.
 3 | """
 4 | 
 5 | import pytest
 6 | from just_prompt.atoms.shared.utils import split_provider_and_model, get_provider_from_prefix
 7 | 
 8 | 
 9 | def test_split_provider_and_model():
10 |     """Test splitting provider and model from string."""
11 |     # Test basic splitting
12 |     provider, model = split_provider_and_model("openai:gpt-4")
13 |     assert provider == "openai"
14 |     assert model == "gpt-4"
15 |     
16 |     # Test short provider name
17 |     provider, model = split_provider_and_model("o:gpt-4")
18 |     assert provider == "o"
19 |     assert model == "gpt-4"
20 |     
21 |     # Test model with colons
22 |     provider, model = split_provider_and_model("ollama:llama3:latest")
23 |     assert provider == "ollama"
24 |     assert model == "llama3:latest"
25 |     
26 |     # Test invalid format
27 |     with pytest.raises(ValueError):
28 |         split_provider_and_model("invalid-model-string")
29 | 
30 | 
31 | def test_get_provider_from_prefix():
32 |     """Test getting provider from prefix."""
33 |     # Test full names
34 |     assert get_provider_from_prefix("openai") == "openai"
35 |     assert get_provider_from_prefix("anthropic") == "anthropic"
36 |     assert get_provider_from_prefix("gemini") == "gemini"
37 |     assert get_provider_from_prefix("groq") == "groq"
38 |     assert get_provider_from_prefix("deepseek") == "deepseek"
39 |     assert get_provider_from_prefix("ollama") == "ollama"
40 |     
41 |     # Test short names
42 |     assert get_provider_from_prefix("o") == "openai"
43 |     assert get_provider_from_prefix("a") == "anthropic"
44 |     assert get_provider_from_prefix("g") == "gemini"
45 |     assert get_provider_from_prefix("q") == "groq"
46 |     assert get_provider_from_prefix("d") == "deepseek"
47 |     assert get_provider_from_prefix("l") == "ollama"
48 |     
49 |     # Test invalid prefix
50 |     with pytest.raises(ValueError):
51 |         get_provider_from_prefix("unknown")
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/molecules/test_list_providers.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Tests for list_providers functionality.
 3 | """
 4 | 
 5 | import pytest
 6 | from just_prompt.molecules.list_providers import list_providers
 7 | 
 8 | 
 9 | def test_list_providers():
10 |     """Test listing providers."""
11 |     providers = list_providers()
12 |     
13 |     # Check basic structure
14 |     assert isinstance(providers, list)
15 |     assert len(providers) > 0
16 |     assert all(isinstance(p, dict) for p in providers)
17 |     
18 |     # Check expected providers are present
19 |     provider_names = [p["name"] for p in providers]
20 |     assert "OPENAI" in provider_names
21 |     assert "ANTHROPIC" in provider_names
22 |     assert "GEMINI" in provider_names
23 |     assert "GROQ" in provider_names
24 |     assert "DEEPSEEK" in provider_names
25 |     assert "OLLAMA" in provider_names
26 |     
27 |     # Check each provider has required fields
28 |     for provider in providers:
29 |         assert "name" in provider
30 |         assert "full_name" in provider
31 |         assert "short_name" in provider
32 |         
33 |         # Check full_name and short_name values
34 |         if provider["name"] == "OPENAI":
35 |             assert provider["full_name"] == "openai"
36 |             assert provider["short_name"] == "o"
37 |         elif provider["name"] == "ANTHROPIC":
38 |             assert provider["full_name"] == "anthropic"
39 |             assert provider["short_name"] == "a"
40 |         elif provider["name"] == "GEMINI":
41 |             assert provider["full_name"] == "gemini"
42 |             assert provider["short_name"] == "g"
43 |         elif provider["name"] == "GROQ":
44 |             assert provider["full_name"] == "groq"
45 |             assert provider["short_name"] == "q"
46 |         elif provider["name"] == "DEEPSEEK":
47 |             assert provider["full_name"] == "deepseek"
48 |             assert provider["short_name"] == "d"
49 |         elif provider["name"] == "OLLAMA":
50 |             assert provider["full_name"] == "ollama"
51 |             assert provider["short_name"] == "l"
52 | 
```

--------------------------------------------------------------------------------
/list_models.py:
--------------------------------------------------------------------------------

```python
 1 | def list_openai_models():
 2 |     from openai import OpenAI
 3 | 
 4 |     client = OpenAI()
 5 | 
 6 |     print(client.models.list())
 7 | 
 8 | 
 9 | def list_groq_models():
10 |     import os
11 |     from groq import Groq
12 | 
13 |     client = Groq(
14 |         api_key=os.environ.get("GROQ_API_KEY"),
15 |     )
16 | 
17 |     chat_completion = client.models.list()
18 | 
19 |     print(chat_completion)
20 | 
21 | 
22 | def list_anthropic_models():
23 |     import anthropic
24 |     import os
25 |     from dotenv import load_dotenv
26 | 
27 |     load_dotenv()
28 | 
29 |     client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
30 |     models = client.models.list()
31 |     print("Available Anthropic models:")
32 |     for model in models.data:
33 |         print(f"- {model.id}")
34 | 
35 | 
36 | def list_gemini_models():
37 |     import os
38 |     from google import genai
39 |     from dotenv import load_dotenv
40 | 
41 |     load_dotenv()
42 | 
43 |     client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
44 | 
45 |     print("List of models that support generateContent:\n")
46 |     for m in client.models.list():
47 |         for action in m.supported_actions:
48 |             if action == "generateContent":
49 |                 print(m.name)
50 | 
51 |     print("List of models that support embedContent:\n")
52 |     for m in client.models.list():
53 |         for action in m.supported_actions:
54 |             if action == "embedContent":
55 |                 print(m.name)
56 | 
57 | 
58 | def list_deepseek_models():
59 |     from openai import OpenAI
60 | 
61 |     # for backward compatibility, you can still use `https://api.deepseek.com/v1` as `base_url`.
62 |     client = OpenAI(
63 |         api_key="sk-ds-3f422175ff114212a42d7107c3efd1e4",  # fake
64 |         base_url="https://api.deepseek.com",
65 |     )
66 |     print(client.models.list())
67 | 
68 | 
69 | def list_ollama_models():
70 |     import ollama
71 | 
72 |     print(ollama.list())
73 | 
74 | 
75 | # Uncomment to run the functions
76 | # list_openai_models()
77 | # list_groq_models()
78 | # list_anthropic_models()
79 | # list_gemini_models()
80 | # list_deepseek_models()
81 | # list_ollama_models()
82 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/atoms/llm_providers/groq.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Groq provider implementation.
 3 | """
 4 | 
 5 | import os
 6 | from typing import List
 7 | import logging
 8 | from groq import Groq
 9 | from dotenv import load_dotenv
10 | 
11 | # Load environment variables
12 | load_dotenv()
13 | 
14 | # Configure logging
15 | logger = logging.getLogger(__name__)
16 | 
17 | # Initialize Groq client
18 | client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
19 | 
20 | 
21 | def prompt(text: str, model: str) -> str:
22 |     """
23 |     Send a prompt to Groq and get a response.
24 |     
25 |     Args:
26 |         text: The prompt text
27 |         model: The model name
28 |         
29 |     Returns:
30 |         Response string from the model
31 |     """
32 |     try:
33 |         logger.info(f"Sending prompt to Groq model: {model}")
34 |         
35 |         # Create chat completion
36 |         chat_completion = client.chat.completions.create(
37 |             messages=[{"role": "user", "content": text}],
38 |             model=model,
39 |         )
40 |         
41 |         # Extract response content
42 |         return chat_completion.choices[0].message.content
43 |     except Exception as e:
44 |         logger.error(f"Error sending prompt to Groq: {e}")
45 |         raise ValueError(f"Failed to get response from Groq: {str(e)}")
46 | 
47 | 
48 | def list_models() -> List[str]:
49 |     """
50 |     List available Groq models.
51 |     
52 |     Returns:
53 |         List of model names
54 |     """
55 |     try:
56 |         logger.info("Listing Groq models")
57 |         response = client.models.list()
58 |         
59 |         # Extract model IDs
60 |         models = [model.id for model in response.data]
61 |         
62 |         return models
63 |     except Exception as e:
64 |         logger.error(f"Error listing Groq models: {e}")
65 |         # Return some known models if API fails
66 |         logger.info("Returning hardcoded list of known Groq models")
67 |         return [
68 |             "llama-3.3-70b-versatile",
69 |             "llama-3.1-70b-versatile",
70 |             "llama-3.1-8b-versatile",
71 |             "mixtral-8x7b-32768",
72 |             "gemma-7b-it",
73 |             "qwen-2.5-32b"
74 |         ]
```

--------------------------------------------------------------------------------
/src/just_prompt/atoms/llm_providers/deepseek.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | DeepSeek provider implementation.
 3 | """
 4 | 
 5 | import os
 6 | from typing import List
 7 | import logging
 8 | from openai import OpenAI
 9 | from dotenv import load_dotenv
10 | 
11 | # Load environment variables
12 | load_dotenv()
13 | 
14 | # Configure logging
15 | logger = logging.getLogger(__name__)
16 | 
17 | # Initialize DeepSeek client with OpenAI-compatible interface
18 | client = OpenAI(
19 |     api_key=os.environ.get("DEEPSEEK_API_KEY"),
20 |     base_url="https://api.deepseek.com"
21 | )
22 | 
23 | 
24 | def prompt(text: str, model: str) -> str:
25 |     """
26 |     Send a prompt to DeepSeek and get a response.
27 |     
28 |     Args:
29 |         text: The prompt text
30 |         model: The model name
31 |         
32 |     Returns:
33 |         Response string from the model
34 |     """
35 |     try:
36 |         logger.info(f"Sending prompt to DeepSeek model: {model}")
37 |         
38 |         # Create chat completion
39 |         response = client.chat.completions.create(
40 |             model=model,
41 |             messages=[{"role": "user", "content": text}],
42 |             stream=False,
43 |         )
44 |         
45 |         # Extract response content
46 |         return response.choices[0].message.content
47 |     except Exception as e:
48 |         logger.error(f"Error sending prompt to DeepSeek: {e}")
49 |         raise ValueError(f"Failed to get response from DeepSeek: {str(e)}")
50 | 
51 | 
52 | def list_models() -> List[str]:
53 |     """
54 |     List available DeepSeek models.
55 |     
56 |     Returns:
57 |         List of model names
58 |     """
59 |     try:
60 |         logger.info("Listing DeepSeek models")
61 |         response = client.models.list()
62 |         
63 |         # Extract model IDs
64 |         models = [model.id for model in response.data]
65 |         
66 |         return models
67 |     except Exception as e:
68 |         logger.error(f"Error listing DeepSeek models: {e}")
69 |         # Return some known models if API fails
70 |         logger.info("Returning hardcoded list of known DeepSeek models")
71 |         return [
72 |             "deepseek-coder",
73 |             "deepseek-chat",
74 |             "deepseek-reasoner",
75 |             "deepseek-coder-v2",
76 |             "deepseek-reasoner-lite"
77 |         ]
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/atoms/llm_providers/test_openai.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Tests for OpenAI provider.
 3 | """
 4 | 
 5 | import pytest
 6 | import os
 7 | from dotenv import load_dotenv
 8 | from just_prompt.atoms.llm_providers import openai
 9 | 
10 | # Load environment variables
11 | load_dotenv()
12 | 
13 | # Skip tests if API key not available
14 | if not os.environ.get("OPENAI_API_KEY"):
15 |     pytest.skip("OpenAI API key not available", allow_module_level=True)
16 | 
17 | 
18 | def test_list_models():
19 |     """Test listing OpenAI models."""
20 |     models = openai.list_models()
21 |     
22 |     # Assertions
23 |     assert isinstance(models, list)
24 |     assert len(models) > 0
25 |     assert all(isinstance(model, str) for model in models)
26 |     
27 |     # Check for at least one expected model
28 |     gpt_models = [model for model in models if "gpt" in model.lower()]
29 |     assert len(gpt_models) > 0, "No GPT models found"
30 | 
31 | 
32 | def test_prompt():
33 |     """Test sending prompt to OpenAI with a regular model."""
34 |     response = openai.prompt("What is the capital of France?", "gpt-4o-mini")
35 | 
36 |     # Assertions
37 |     assert isinstance(response, str)
38 |     assert len(response) > 0
39 |     assert "paris" in response.lower() or "Paris" in response
40 | 
41 | 
42 | def test_parse_reasoning_suffix():
43 |     """Test parsing reasoning effort suffix from model names."""
44 | 
45 |     # No suffix
46 |     assert openai.parse_reasoning_suffix("o4-mini") == ("o4-mini", "")
47 |     assert openai.parse_reasoning_suffix("o3") == ("o3", "")
48 | 
49 |     # Supported suffixes
50 |     assert openai.parse_reasoning_suffix("o4-mini:low") == ("o4-mini", "low")
51 |     assert openai.parse_reasoning_suffix("o4-mini:medium") == ("o4-mini", "medium")
52 |     assert openai.parse_reasoning_suffix("o4-mini:high") == ("o4-mini", "high")
53 |     assert openai.parse_reasoning_suffix("o3-mini:LOW") == ("o3-mini", "low")  # case insensitive
54 | 
55 |     # Unsupported model – suffix ignored
56 |     assert openai.parse_reasoning_suffix("gpt-4o-mini:low") == ("gpt-4o-mini:low", "")
57 | 
58 | 
59 | @pytest.mark.parametrize("model_suffix", ["o4-mini:low", "o4-mini:medium", "o4-mini:high"])
60 | def test_prompt_with_reasoning(model_suffix):
61 |     """Test sending prompt with reasoning effort enabled."""
62 | 
63 |     response = openai.prompt("What is the capital of Spain?", model_suffix)
64 | 
65 |     # Assertions
66 |     assert isinstance(response, str)
67 |     assert len(response) > 0
68 |     assert "madrid" in response.lower() or "Madrid" in response
```

--------------------------------------------------------------------------------
/ultra_diff_review/diff_gemini_gemini-2.0-flash-thinking-exp.md:
--------------------------------------------------------------------------------

```markdown
 1 | ## Code Review
 2 | 
 3 | The diff introduces modularity and improves the structure of the script by encapsulating the model listing logic for each provider into separate functions. However, there are a few issues and areas for improvement.
 4 | 
 5 | **Issues, Bugs, and Improvements:**
 6 | 
 7 | 1.  **🚨 Hardcoded API Key (DeepSeek):** The `list_deepseek_models` function includes a hardcoded API key for DeepSeek. This is a major security vulnerability as API keys should be kept secret and managed securely, preferably through environment variables.
 8 | 
 9 | 2.  **⚠️ Lack of Error Handling:** The script lacks error handling. If API calls fail due to network issues, invalid API keys, or other reasons, the script will likely crash or produce uninformative error messages.  Robust error handling is crucial for production-ready code.
10 | 
11 | 3.  **ℹ️ Inconsistent API Key Loading (Minor):** While `dotenv` is used for Anthropic and Gemini API keys, OpenAI, Groq, and DeepSeek (partially) rely directly on environment variables.  While functional, consistent use of `dotenv` for all API keys would enhance maintainability and project consistency.
12 | 
13 | 4.  **ℹ️ Missing Function Docstrings (Minor):** The functions lack docstrings explaining their purpose, parameters (if any), and return values. Docstrings enhance code readability and make it easier to understand the function's role.
14 | 
15 | 5.  **ℹ️ No Centralized Configuration (Minor):**  While using environment variables is good, having a more centralized configuration mechanism (even if it's just a `.env` file loaded by `dotenv`) could be beneficial for managing various settings in the future.
16 | 
17 | **Markdown Table of Issues:**
18 | 
19 | | Issue                      | Solution                                                    | Risk Assessment |
20 | |----------------------------|-------------------------------------------------------------|-----------------|
21 | | 🚨 **Hardcoded API Key (DeepSeek)** | Use environment variables to store and access the DeepSeek API key. | High            |
22 | | ⚠️ **Lack of Error Handling**    | Implement `try-except` blocks to handle potential API errors. | Medium          |
23 | | ℹ️ **Inconsistent API Key Loading** | Use `dotenv` consistently for all API keys.               | Low             |
24 | | ℹ️ **Missing Function Docstrings** | Add docstrings to each function explaining its purpose.   | Low             |
25 | | ℹ️ **No Centralized Config**    | Consider a more centralized configuration approach if needed. | Low             |
```

--------------------------------------------------------------------------------
/src/just_prompt/molecules/prompt_from_file_to_file.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Prompt from file to file functionality for just-prompt.
 3 | """
 4 | 
 5 | from typing import List
 6 | import logging
 7 | import os
 8 | from pathlib import Path
 9 | from .prompt_from_file import prompt_from_file
10 | from ..atoms.shared.utils import DEFAULT_MODEL
11 | 
12 | logger = logging.getLogger(__name__)
13 | 
14 | 
15 | def prompt_from_file_to_file(
16 |     abs_file_path: str, models_prefixed_by_provider: List[str] = None, abs_output_dir: str = "."
17 | ) -> List[str]:
18 |     """
19 |     Read text from a file, send it as prompt to multiple models, and save responses to files.
20 | 
21 |     Args:
22 |         abs_file_path: Absolute path to the text file (must be an absolute path, not relative)
23 |         models_prefixed_by_provider: List of model strings in format "provider:model"
24 |                                     If None, uses the DEFAULT_MODELS environment variable
25 |         abs_output_dir: Absolute directory path to save response files (must be an absolute path, not relative)
26 | 
27 |     Returns:
28 |         List of paths to the output files
29 |     """
30 |     # Validate output directory
31 |     output_path = Path(abs_output_dir)
32 |     if not output_path.exists():
33 |         output_path.mkdir(parents=True, exist_ok=True)
34 | 
35 |     if not output_path.is_dir():
36 |         raise ValueError(f"Not a directory: {abs_output_dir}")
37 | 
38 |     # Get the base name of the input file
39 |     input_file_name = Path(abs_file_path).stem
40 | 
41 |     # Get responses
42 |     responses = prompt_from_file(abs_file_path, models_prefixed_by_provider)
43 | 
44 |     # Save responses to files
45 |     output_files = []
46 | 
47 |     # Get the models that were actually used
48 |     models_used = models_prefixed_by_provider
49 |     if not models_used:
50 |         default_models = os.environ.get("DEFAULT_MODELS", DEFAULT_MODEL)
51 |         models_used = [model.strip() for model in default_models.split(",")]
52 | 
53 |     for i, (model_string, response) in enumerate(zip(models_used, responses)):
54 |         # Sanitize model string for filename (replace colons with underscores)
55 |         safe_model_name = model_string.replace(":", "_")
56 | 
57 |         # Create output filename with .md extension
58 |         output_file = output_path / f"{input_file_name}_{safe_model_name}.md"
59 | 
60 |         # Write response to file as markdown
61 |         try:
62 |             with open(output_file, "w", encoding="utf-8") as f:
63 |                 f.write(response)
64 |             output_files.append(str(output_file))
65 |         except Exception as e:
66 |             logger.error(f"Error writing response to {output_file}: {e}")
67 |             output_files.append(f"Error: {str(e)}")
68 | 
69 |     return output_files
70 | 
```

--------------------------------------------------------------------------------
/ultra_diff_review/diff_anthropic_claude-3-7-sonnet-20250219_4k.md:
--------------------------------------------------------------------------------

```markdown
 1 | # Code Review
 2 | 
 3 | I've analyzed the changes made to the `list_models.py` file. The diff shows a complete refactoring of the file that organizes model listing functionality into separate functions for different AI providers.
 4 | 
 5 | ## Key Changes
 6 | 
 7 | 1. **Code Organization:** The code has been restructured from a series of commented blocks into organized functions for each AI provider.
 8 | 2. **Function Implementation:** Each provider now has a dedicated function for listing their available models.
 9 | 3. **DeepSeek API Key:** A hardcoded API key is now present in the DeepSeek function.
10 | 4. **Function Execution:** All functions are defined but commented out at the bottom of the file.
11 | 
12 | ## Issues and Improvements
13 | 
14 | ### 1. Hardcoded API Key
15 | The `list_deepseek_models()` function contains a hardcoded API key: `"sk-ds-3f422175ff114212a42d7107c3efd1e4"`. This is a significant security risk as API keys should never be stored in source code.
16 | 
17 | ### 2. Inconsistent Environment Variable Usage
18 | Most functions use environment variables for API keys, but the DeepSeek function does not follow this pattern.
19 | 
20 | ### 3. Error Handling
21 | None of the functions include error handling for API failures, network issues, or missing API keys.
22 | 
23 | ### 4. Import Organization
24 | Import statements are scattered throughout the functions instead of being consolidated at the top of the file.
25 | 
26 | ### 5. No Main Function
27 | There's no main function or entrypoint that would allow users to select which model list they want to see.
28 | 
29 | ## Issue Summary
30 | 
31 | | Issue | Solution | Risk Assessment |
32 | |-------|----------|-----------------|
33 | | 🚨 Hardcoded API key in DeepSeek function | Replace with environment variable: `api_key=os.environ.get("DEEPSEEK_API_KEY")` | High - Security risk, potential unauthorized API usage and charges |
34 | | ⚠️ No error handling | Add try/except blocks to handle API errors, network issues, and missing credentials | Medium - Code will fail without clear error messages |
35 | | 🔧 Inconsistent environment variable usage | Standardize API key access across all providers | Low - Maintenance and consistency issue |
36 | | 🔧 Scattered imports | Consolidate common imports at the top of the file | Low - Code organization issue |
37 | | 💡 No main function or CLI | Add a main function with argument parsing to run specific provider functions | Low - Usability enhancement |
38 | | 💡 Missing API key validation | Add checks to validate API keys are present before making API calls | Medium - Prevents unclear errors when keys are missing |
39 | 
40 | The most critical issue is the hardcoded API key which should be addressed immediately to prevent security risks.
```

--------------------------------------------------------------------------------
/src/just_prompt/atoms/shared/utils.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Utility functions for just-prompt.
  3 | """
  4 | 
  5 | from typing import Tuple, List, Optional
  6 | import os
  7 | from dotenv import load_dotenv
  8 | import logging
  9 | 
 10 | # Set up logging
 11 | logging.basicConfig(
 12 |     level=logging.INFO,
 13 |     format='%(asctime)s [%(levelname)s] %(message)s',
 14 |     datefmt='%Y-%m-%d %H:%M:%S'
 15 | )
 16 | 
 17 | # Load environment variables
 18 | load_dotenv()
 19 | 
 20 | # Default model constants
 21 | DEFAULT_MODEL = "anthropic:claude-3-7-sonnet-20250219"
 22 | 
 23 | 
 24 | def split_provider_and_model(model_string: str) -> Tuple[str, str]:
 25 |     """
 26 |     Split a model string into provider and model name.
 27 |     
 28 |     Note: This only splits the first colon in the model string and leaves the rest of the string
 29 |     as the model name. Models will have additional colons in the string and we want to ignore them
 30 |     and leave them for the model name.
 31 |     
 32 |     Args:
 33 |         model_string: String in format "provider:model"
 34 |         
 35 |     Returns:
 36 |         Tuple containing (provider, model)
 37 |     """
 38 |     parts = model_string.split(":", 1)
 39 |     if len(parts) != 2:
 40 |         raise ValueError(f"Invalid model string format: {model_string}. Expected format: 'provider:model'")
 41 |     
 42 |     provider, model = parts
 43 |     return provider, model
 44 | 
 45 | 
 46 | def get_provider_from_prefix(prefix: str) -> str:
 47 |     """
 48 |     Get the full provider name from a prefix.
 49 |     
 50 |     Args:
 51 |         prefix: Provider prefix (short or full name)
 52 |         
 53 |     Returns:
 54 |         Full provider name
 55 |     """
 56 |     from .data_types import ModelProviders
 57 |     
 58 |     provider = ModelProviders.from_name(prefix)
 59 |     if provider is None:
 60 |         raise ValueError(f"Unknown provider prefix: {prefix}")
 61 |     
 62 |     return provider.full_name
 63 | 
 64 | 
 65 | def get_models_prefixed_by_provider(provider_prefix: str, model_name: str) -> str:
 66 |     """
 67 |     Format a model string with provider prefix.
 68 |     
 69 |     Args:
 70 |         provider_prefix: The provider prefix (short or full name)
 71 |         model_name: The model name
 72 |         
 73 |     Returns:
 74 |         Formatted string in "provider:model" format
 75 |     """
 76 |     provider = get_provider_from_prefix(provider_prefix)
 77 |     return f"{provider}:{model_name}"
 78 | 
 79 | 
 80 | def get_api_key(provider: str) -> Optional[str]:
 81 |     """
 82 |     Get the API key for a provider from environment variables.
 83 |     
 84 |     Args:
 85 |         provider: Provider name (full name)
 86 |         
 87 |     Returns:
 88 |         API key as string or ``None`` if the provider is unsupported or no
 89 |         environment variable is set
 90 |     """
 91 |     key_mapping = {
 92 |         "openai": "OPENAI_API_KEY",
 93 |         "anthropic": "ANTHROPIC_API_KEY",
 94 |         "gemini": "GEMINI_API_KEY",
 95 |         "groq": "GROQ_API_KEY",
 96 |         "deepseek": "DEEPSEEK_API_KEY"
 97 |     }
 98 |     
 99 |     env_var = key_mapping.get(provider)
100 |     if not env_var:
101 |         return None
102 |     
103 |     return os.environ.get(env_var)
```

--------------------------------------------------------------------------------
/example_outputs/decision_openai_vs_anthropic_vs_google/ceo_medium_decision_openai_vs_anthropic_vs_google_openai_o4-mini_high.md:
--------------------------------------------------------------------------------

```markdown
 1 | Here’s a focused comparison of OpenAI, Anthropic and Google along your three axes—Model Performance, Model Tool Use and Model Cost—plus a bottom‑line “which to pick” based on different priorities.
 2 | 
 3 | 1. Model Performance (Raw Intelligence)  
 4 | 
   • OpenAI (o3 / GPT‑4o): leads most public benchmarks, best vision‑reasoning, continuous frontier releases.  
 5 |    • Google (Gemini 2.5 Pro): at parity on reasoning & code benchmarks, unrivaled context windows (1M→2M tokens soon).  
 6 |    • Anthropic (Claude 3.5 Sonnet): very strong in free‑form reasoning, matches or beats GPT‑4‑Turbo in text tasks but lags on vision/speech.  
 7 | 
 8 | 2. Model Tool Use (Ability to orchestrate APIs, plug‑ins, agents)  
 9 | 
   • OpenAI: richest ecosystem—Assistants API with built‑in tool discovery, function‑calls, vision+generation APIs out of the box.  
10 |    • Anthropic: clean, safety‑centric JSON tool schema; coming tooling ecosystem but fewer first‑party connectors (no vision yet).  
11 |    • Google: Vertex AI + AI Studio pipelines, good SDKs and open‑weight Gemma for on‑prem, but less mature “agent” layer than OpenAI.  
12 | 
13 | 3. Model Cost (Price / Performance at scale)  
14 | 
   • Anthropic (Sonnet tier): cheapest per token for GPT‑4‑level quality today.  
15 |    • Google (Vertex discounts & Gemma open models): aggressive pricing and on‑device options with Gemma 3.  
16 |    • OpenAI: steadily falling prices, but top‑end O‑series still carries a premium vs Sonnet/Gemini mid‑tiers.  
17 | 
18 | Summary “Bet” Recommendations  
19 | 
 • If you care most about **bleeding‑edge capabilities + seamless, production‑ready tool/agent support**, lean into **OpenAI**. You get top scores, the largest third‑party connector ecosystem and Microsoft’s enterprise muscle—at a premium price.  
20 | 
 • If **unit economics** (cost‑performance) is your #1 driver and you value a safety‑first alignment ethos, **Anthropic** is the sweet spot. You give up some multimodal/speech features but gain the lowest cost for GPT‑4‑class chat and clean tool integration.  
21 | 
 • If you prize **distribution scale, open‑weight fallbacks and full control over compute**, **Google** stands out. You’ll trade a slightly slower release cadence and less “agent magic” for unrivaled throughput (TPUs + 1M+ token contexts), built‑in Workspace/Android reach and on‑prem options.  
22 | 
23 | All three are competitive on raw intelligence. Your choice really comes down to your biggest lever:  
24 |   – Performance & tooling ⇒ OpenAI  
25 |   – Cost‑performance & alignment ⇒ Anthropic  
26 |   – Distribution & compute sovereignty ⇒ Google  
27 | 
28 | Whichever you pick, pilot a real workload (with rate limits, enterprise features, support SLAs) before you commit multi‑year spend. This space is evolving so rapidly that today’s “win” can shift next quarter.
```

--------------------------------------------------------------------------------
/specs/gemini-2-5-flash-reasoning.md:
--------------------------------------------------------------------------------

```markdown
 1 | # Gemini 2.5 Flash Reasoning
 2 | > Implement reasoning for Gemini 2.5 Flash.
 3 | >
 4 | > Implement every detail below end to end and validate your work with tests.
 5 | 
 6 | ## Implementation Notes
 7 | 
 8 | - We're adding support for `gemini-2.5-flash-preview-04-17` with thinking_budget for gemini.
 9 | - Just like how claude-3-7-sonnet has budget tokens in src/just_prompt/atoms/llm_providers/anthropic.py, Gemini has a similar feature with the thinking_budget. We want to support this.
10 | - If this parameter is present, we should trigger a prompt_with_thinking function in src/just_prompt/atoms/llm_providers/gemini.py. Use the example code in ai_docs/gemini-2-5-flash-reasoning.md. If parameter is not present, use the existing prompt function.
11 | - Update tests to verify the feature works, specifically in test_gemini.py. Test with gemini-2.5-flash-preview-04-17 with and without the thinking_budget parameter.
12 | - This only works with the gemini-2.5-flash-preview-04-17 model but assume more models like this will be added in the future and check against the model name from a list so we can easily add them later.
13 | - After you implement and test, update the README.md file to detail the new feature.
14 | - We're using 'uv run pytest <file>' to run tests. You won't need to run any other commands or install anything only testing.
15 | - Keep all the essential logic surrounding this change in gemini.py just like how anthropic.py sets this up for it's version (thinking_budget).
16 | - No need to update any libraries or packages.
17 | - So if we pass in something like: `gemini:gemini-2.5-flash-preview-04-17`, run the normal prompt function. If we pass in: `gemini:gemini-2.5-flash-preview-04-17:4k`, run the prompt_with_thinking function with 4000 thinking budget. Mirror anthropic.py's logic.
18 | - Update gemini.py to use the new import and client setup via `from google import genai` and `client = genai.Client(api_key="GEMINI_API_KEY")`.
19 | 
20 | ## Relevant Files (Context)
21 | > Read these files before implementing the feature.
22 | README.md
23 | pyproject.toml
24 | src/just_prompt/molecules/prompt.py
25 | src/just_prompt/atoms/llm_providers/anthropic.py
26 | src/just_prompt/atoms/llm_providers/gemini.py
27 | src/just_prompt/tests/atoms/llm_providers/test_gemini.py
28 | 
29 | ## Example Reasoning Code
30 | 
31 | ```python
32 | from google import genai
33 | 
34 | client = genai.Client(api_key="GEMINI_API_KEY")
35 | 
36 | response = client.models.generate_content(
37 |   model="gemini-2.5-flash-preview-04-17",
38 |   contents="You roll two dice. What’s the probability they add up to 7?",
39 |   config=genai.types.GenerateContentConfig(
40 |     thinking_config=genai.types.ThinkingConfig(
41 |       thinking_budget=1024 # 0 - 24576
42 |     )
43 |   )
44 | )
45 | 
46 | print(response.text)
47 | ```
48 | 
49 | ## Self Validation (Close the loop)
50 | > After implementing the feature, run the tests to verify it works.
51 | >
52 | > All env variables are in place - run tests against real apis.
53 | - uv run pytest src/just_prompt/tests/atoms/llm_providers/test_gemini.py
54 | - uv run pytest src/just_prompt/tests/molecules/test_prompt.py
55 | 
```

--------------------------------------------------------------------------------
/example_outputs/countdown_component/countdown_component_o_gpt-4.5-preview.md:
--------------------------------------------------------------------------------

```markdown
  1 | Here's how you would implement a `<Countdown :start-time="number" />` component in Vanilla JavaScript/EcmaScript (no framework) and popular frameworks/libraries Vue, Svelte, and React.
  2 | 
  3 | ---
  4 | 
  5 | ## 1\. Vanilla JavaScript (Web component)
  6 | 
  7 | ### HTML
  8 | ```html
  9 | <count-down start-time="60"></count-down>
 10 | ```
 11 | 
 12 | ### JavaScript (countdown.js)
 13 | ```js
 14 | class CountDown extends HTMLElement {
 15 |   constructor() {
 16 |     super();
 17 |     this.attachShadow({ mode: 'open' });
 18 |     this.count = parseInt(this.getAttribute('start-time'), 10) || 0;
 19 | 
 20 |     this.shadowRoot.innerHTML = `<div id="counter">${this.count}</div>`;
 21 |   }
 22 | 
 23 |   connectedCallback() {
 24 |     this.interval = setInterval(() => {
 25 |       if (this.count > 0) {
 26 |         this.count--;
 27 |         this.shadowRoot.querySelector('#counter').textContent = this.count;
 28 |       } else {
 29 |         clearInterval(this.interval);
 30 |       }
 31 |     }, 1000);
 32 |   }
 33 | 
 34 |   disconnectedCallback() {
 35 |     clearInterval(this.interval);
 36 |   }
 37 | }
 38 | 
 39 | customElements.define('count-down', CountDown);
 40 | ```
 41 | 
 42 | ### Usage
 43 | ```html
 44 | <script src="countdown.js"></script>
 45 | <count-down start-time="60"></count-down>
 46 | ```
 47 | 
 48 | ---
 49 | 
 50 | ## 2\. Vue 3 (Composition API):
 51 | 
 52 | ### Countdown.vue
 53 | ```vue
 54 | <script setup>
 55 | import { ref, onMounted, onUnmounted, defineProps } from 'vue';
 56 | 
 57 | const props = defineProps({ startTime: { type: Number, default: 60 } });
 58 | const count = ref(props.startTime);
 59 | let timer;
 60 | 
 61 | onMounted(() => {
 62 |   timer = setInterval(() => {
 63 |     if (count.value > 0) {
 64 |       count.value--;
 65 |     } else {
 66 |       clearInterval(timer);
 67 |     }
 68 |   }, 1000);
 69 | });
 70 | 
 71 | onUnmounted(() => {
 72 |   clearInterval(timer);
 73 | });
 74 | </script>
 75 | 
 76 | <template>
 77 |   <div>{{ count }}</div>
 78 | </template>
 79 | ```
 80 | 
 81 | ### Usage:
 82 | ```vue
 83 | <Countdown :start-time="60" />
 84 | ```
 85 | 
 86 | ---
 87 | 
 88 | ## 3\. Svelte
 89 | 
 90 | ### Countdown.svelte
 91 | ```svelte
 92 | <script>
 93 |   export let startTime = 60;
 94 |   let count = startTime;
 95 |   let interval = null;
 96 | 
 97 |   interval = setInterval(() => {
 98 |     if (count > 0) {
 99 |       count -= 1;
100 |     } else {
101 |       clearInterval(interval);
102 |     }
103 |   }, 1000);
104 | 
105 |   // cleanup
106 |   import { onDestroy } from 'svelte';
107 |   onDestroy(() => clearInterval(interval));
108 | </script>
109 | 
110 | <div>{count}</div>
111 | ```
112 | 
113 | ### Usage:
114 | ```svelte
115 | <Countdown startTime={60} />
116 | ```
117 | 
118 | ---
119 | 
120 | ## 4\. React
121 | 
122 | ### Countdown.jsx
123 | ```jsx
124 | import React, { useState, useEffect } from 'react';
125 | 
126 | export default function Countdown({ startTime = 60 }) {
127 |   const [count, setCount] = useState(startTime);
128 | 
129 |   useEffect(() => {
130 |     if (count <= 0) return;
131 |     const timer = setInterval(() => setCount(prev => prev - 1), 1000);
132 |     return () => clearInterval(timer);
133 |   }, [count]);
134 | 
135 |   return <div>{count}</div>;
136 | }
137 | ```
138 | 
139 | ### Usage:
140 | ```jsx
141 | <Countdown startTime={60} />
142 | ```
143 | 
144 | ---  
145 | 
146 | ### Summary:
147 | 
148 | | Implementation | Syntax & Concepts                           |
149 | |----------------|---------------------------------------------|
150 | | Vanilla        | Web Components, Shadow DOM, setInterval()   |
151 | | Vue            | Composition API, reactivity, lifecycle hooks|
152 | | Svelte         | Direct reactivity, lifecycle hooks          |
153 | | React          | JSX, hooks, useEffect, useState             |
```

--------------------------------------------------------------------------------
/example_outputs/decision_openai_vs_anthropic_vs_google/ceo_medium_decision_openai_vs_anthropic_vs_google_anthropic_claude-3-7-sonnet-20250219_4k.md:
--------------------------------------------------------------------------------

```markdown
 1 | # Comparing OpenAI, Anthropic, and Google for Your Major Investment
 2 | 
 3 | Based on your three key factors, I'll help you evaluate which AI company might best deserve your significant investment of time, money, and resources.
 4 | 
 5 | ## Factor 1: Model Performance (Raw Intelligence)
 6 | 
 7 | **OpenAI**: Currently leads with o3 and GPT-4o models, demonstrating superior reasoning capabilities and consistent top benchmark scores. Their frontier research pipeline maintains a performance edge.
 8 | 
 9 | **Anthropic**: Claude 3.5 Sonnet is competitive, with Claude 3 Opus showing excellent long-form reasoning and 200k context window support. While impressive, they typically follow OpenAI's innovations.
10 | 
11 | **Google**: Gemini 2.5 Pro leads specifically in math and code benchmarks with native 1M token context (soon 2M). Their models excel in certain specialized domains but lack OpenAI's consistent lead across all dimensions.
12 | 
13 | **Edge**: OpenAI, though the gap has narrowed considerably in 2025.
14 | 
15 | ## Factor 2: Model Tool Use
16 | 
17 | **OpenAI**: Most mature with native tool-use API, Assistants & Tools API for agent orchestration, and a universal function-calling schema. Their ecosystem encourages extensive third-party integration.
18 | 
19 | **Anthropic**: Has an elegant JSON-based tool-use schema but lacks first-party vision capabilities. Their approach is solid but more limited in scope.
20 | 
21 | **Google**: Good tool integration within their ecosystem, particularly with Workspace AI and Workspace Flows. They offer a blend of closed models and open weights (Gemma), providing flexibility.
22 | 
23 | **Edge**: OpenAI has the richest tool ecosystem and integration capabilities.
24 | 
25 | ## Factor 3: Model Cost
26 | 
27 | **OpenAI**: Historically premium-priced, though they've implemented consistent quarterly price drops. GPT-4o is roughly 8× cheaper than previous generation models.
28 | 
29 | **Anthropic**: Offers the most cost-effective options at the Sonnet tier, undercutting GPT-4-Turbo in many cases. Their mid-tier models (Claude 3 Haiku) are explicitly designed for cost efficiency.
30 | 
31 | **Google**: Provides aggressive Vertex AI discounts and offers Gemma as open-weight models for on-premises deployment, potentially reducing costs for certain use cases.
32 | 
33 | **Edge**: Anthropic for pure API cost efficiency, with Google offering compelling total cost of ownership advantages.
34 | 
35 | ## Overall Assessment
36 | 
37 | If you're prioritizing raw intelligence and cutting-edge capabilities, **OpenAI** would be your bet, though you'll pay a premium.
38 | 
39 | If cost-efficiency with very competitive performance is your priority, **Anthropic** offers the best value, especially with Claude 3.5 Sonnet.
40 | 
41 | If you need deep integration with existing productivity tools, desire open-weight options, or want the backing of the most financially stable company, **Google** is your best choice.
42 | 
43 | ## Recommendation
44 | 
45 | Based on your three factors weighted equally, I would recommend **Anthropic** as your primary bet, as they offer the best balance of performance and cost, while maintaining competitive tool use capabilities. Their Constitutional AI approach also suggests a more stable and responsible trajectory.
46 | 
47 | However, if raw performance and extensive tool integration outweigh cost considerations for your specific needs, OpenAI would be the better choice despite the higher costs.
```

--------------------------------------------------------------------------------
/ultra_diff_review/diff_openai_o3-mini.md:
--------------------------------------------------------------------------------

```markdown
 1 | Below is a review of the changes with observations on potential issues, bugs, and improvements:
 2 | 
 3 | 1. Hardcoded API key in list_deepseek_models  
 4 |  • Issue: The Deepseek function uses a hardcoded API key ("sk-ds-3f422175ff114212a42d7107c3efd1e4"). This exposes sensitive credentials in the source code.  
 5 |  • Recommendation: Retrieve the key from an environment variable (or a secure vault) as is done for other models.  
 6 |  • Severity: 🚨 Critical
 7 | 
 8 | 2. Repeated load_dotenv calls  
 9 |  • Issue: Both list_anthropic_models and list_gemini_models call load_dotenv() even if they might be used in the same run.  
10 |  • Recommendation: Consider loading environment variables once in a main entry point or in a shared initialization function.  
11 |  • Severity: ⚠️ Moderate
12 | 
13 | 3. Redundant API calls in list_gemini_models  
14 |  • Issue: The Gemini function calls client.models.list() twice (once for generateContent and again for embedContent). This might be inefficient if each call performs network I/O.  
15 |  • Recommendation: Cache the result of client.models.list() into a variable and reuse it for both loops.  
16 |  • Severity: ⚠️ Low
17 | 
18 | 4. Inconsistent variable naming and potential confusion  
19 |  • Observation: In list_groq_models, the result of client.models.list() is stored in a variable named chat_completion even though the function is about listing models.  
20 |  • Recommendation: Use a name such as models or model_list for clarity.  
21 |  • Severity: ℹ️ Low
22 | 
23 | 5. Lack of error handling for API calls  
24 |  • Observation: All functions simply print the results of API calls without handling potential exceptions (e.g., network errors, invalid credentials).  
25 |  • Recommendation: Wrap API calls in try-except blocks and add meaningful error messages.  
26 |  • Severity: ⚠️ Moderate
27 | 
28 | 6. Consistency in output formatting  
29 |  • Observation: While some functions print header messages (like list_anthropic_models and list_gemini_models), others (like list_openai_models or list_deepseek_models) simply print the raw result.  
30 |  • Recommendation: Add consistent formatting or output messages for clarity.  
31 |  • Severity: ℹ️ Low
32 | 
33 | Below is a concise summary in a markdown table:
34 | 
35 | | Issue                                | Solution                                                                                 | Risk Assessment          |
36 | |--------------------------------------|------------------------------------------------------------------------------------------|--------------------------|
37 | | Hardcoded API key in Deepseek        | Use an environment variable (e.g., os.environ.get("DEEPSEEK_API_KEY"))                     | 🚨 Critical              |
38 | | Multiple load_dotenv() calls         | Load environment variables once at program start instead of in each function               | ⚠️ Moderate             |
39 | | Redundant API call in Gemini models  | Cache client.models.list() in a variable and reuse it for looping through supported actions | ⚠️ Low                  |
40 | | Inconsistent variable naming (Groq)  | Rename variables (e.g., change "chat_completion" to "models" in list_groq_models)            | ℹ️ Low (cosmetic)       |
41 | | Lack of error handling               | Wrap API calls in try-except blocks and log errors or provide user-friendly error messages  | ⚠️ Moderate             |
42 | 
43 | This review should help in making the code more secure, efficient, and maintainable.
```

--------------------------------------------------------------------------------
/src/just_prompt/molecules/prompt.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Prompt functionality for just-prompt.
  3 | """
  4 | 
  5 | from typing import List
  6 | import logging
  7 | import concurrent.futures
  8 | import os
  9 | from ..atoms.shared.validator import validate_models_prefixed_by_provider
 10 | from ..atoms.shared.utils import split_provider_and_model, DEFAULT_MODEL
 11 | from ..atoms.shared.model_router import ModelRouter
 12 | 
 13 | logger = logging.getLogger(__name__)
 14 | 
 15 | 
 16 | def _process_model_prompt(model_string: str, text: str) -> str:
 17 |     """
 18 |     Process a single model prompt.
 19 |     
 20 |     Args:
 21 |         model_string: String in format "provider:model"
 22 |         text: The prompt text
 23 |         
 24 |     Returns:
 25 |         Response from the model
 26 |     """
 27 |     try:
 28 |         return ModelRouter.route_prompt(model_string, text)
 29 |     except Exception as e:
 30 |         logger.error(f"Error processing prompt for {model_string}: {e}")
 31 |         return f"Error ({model_string}): {str(e)}"
 32 | 
 33 | 
 34 | def _correct_model_name(provider: str, model: str, correction_model: str) -> str:
 35 |     """
 36 |     Correct a model name using the correction model.
 37 |     
 38 |     Args:
 39 |         provider: Provider name
 40 |         model: Model name
 41 |         correction_model: Model to use for correction
 42 |         
 43 |     Returns:
 44 |         Corrected model name
 45 |     """
 46 |     try:
 47 |         return ModelRouter.magic_model_correction(provider, model, correction_model)
 48 |     except Exception as e:
 49 |         logger.error(f"Error correcting model name {provider}:{model}: {e}")
 50 |         return model
 51 | 
 52 | 
 53 | def prompt(text: str, models_prefixed_by_provider: List[str] = None) -> List[str]:
 54 |     """
 55 |     Send a prompt to multiple models using parallel processing.
 56 |     
 57 |     Args:
 58 |         text: The prompt text
 59 |         models_prefixed_by_provider: List of model strings in format "provider:model"
 60 |                                     If None, uses the DEFAULT_MODELS environment variable
 61 |         
 62 |     Returns:
 63 |         List of responses from the models
 64 |     """
 65 |     # Use default models if no models provided
 66 |     if not models_prefixed_by_provider:
 67 |         default_models = os.environ.get("DEFAULT_MODELS", DEFAULT_MODEL)
 68 |         models_prefixed_by_provider = [model.strip() for model in default_models.split(",")]
 69 |     # Validate model strings
 70 |     validate_models_prefixed_by_provider(models_prefixed_by_provider)
 71 |     
 72 |     # Prepare corrected model strings
 73 |     corrected_models = []
 74 |     for model_string in models_prefixed_by_provider:
 75 |         provider, model = split_provider_and_model(model_string)
 76 |         
 77 |         # Get correction model from environment
 78 |         correction_model = os.environ.get("CORRECTION_MODEL", DEFAULT_MODEL)
 79 |         
 80 |         # Check if model needs correction
 81 |         corrected_model = _correct_model_name(provider, model, correction_model)
 82 |         
 83 |         # Use corrected model
 84 |         if corrected_model != model:
 85 |             model_string = f"{provider}:{corrected_model}"
 86 |         
 87 |         corrected_models.append(model_string)
 88 |     
 89 |     # Process each model in parallel using ThreadPoolExecutor
 90 |     responses = []
 91 |     with concurrent.futures.ThreadPoolExecutor() as executor:
 92 |         # Submit all tasks
 93 |         future_to_model = {
 94 |             executor.submit(_process_model_prompt, model_string, text): model_string
 95 |             for model_string in corrected_models
 96 |         }
 97 |         
 98 |         # Collect results in order
 99 |         for model_string in corrected_models:
100 |             for future, future_model in future_to_model.items():
101 |                 if future_model == model_string:
102 |                     responses.append(future.result())
103 |                     break
104 |     
105 |     return responses
```

--------------------------------------------------------------------------------
/src/just_prompt/atoms/shared/validator.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Validation utilities for just-prompt.
  3 | """
  4 | 
  5 | from typing import List, Dict, Optional, Tuple
  6 | import logging
  7 | import os
  8 | from .data_types import ModelProviders
  9 | from .utils import split_provider_and_model, get_api_key
 10 | 
 11 | logger = logging.getLogger(__name__)
 12 | 
 13 | 
 14 | def validate_models_prefixed_by_provider(models_prefixed_by_provider: List[str]) -> bool:
 15 |     """
 16 |     Validate that provider prefixes in model strings are valid.
 17 |     
 18 |     Args:
 19 |         models_prefixed_by_provider: List of model strings in format "provider:model"
 20 |         
 21 |     Returns:
 22 |         True if all valid, raises ValueError otherwise
 23 |     """
 24 |     if not models_prefixed_by_provider:
 25 |         raise ValueError("No models provided")
 26 |     
 27 |     for model_string in models_prefixed_by_provider:
 28 |         try:
 29 |             provider_prefix, model_name = split_provider_and_model(model_string)
 30 |             provider = ModelProviders.from_name(provider_prefix)
 31 |             if provider is None:
 32 |                 raise ValueError(f"Unknown provider prefix: {provider_prefix}")
 33 |         except Exception as e:
 34 |             logger.error(f"Validation error for model string '{model_string}': {str(e)}")
 35 |             raise
 36 |     
 37 |     return True
 38 | 
 39 | 
 40 | def validate_provider(provider: str) -> bool:
 41 |     """
 42 |     Validate that a provider name is valid.
 43 |     
 44 |     Args:
 45 |         provider: Provider name (full or short)
 46 |         
 47 |     Returns:
 48 |         True if valid, raises ValueError otherwise
 49 |     """
 50 |     provider_enum = ModelProviders.from_name(provider)
 51 |     if provider_enum is None:
 52 |         raise ValueError(f"Unknown provider: {provider}")
 53 |     
 54 |     return True
 55 | 
 56 | 
 57 | def validate_provider_api_keys() -> Dict[str, bool]:
 58 |     """
 59 |     Validate that API keys are available for each provider.
 60 |     
 61 |     Returns:
 62 |         Dictionary mapping provider names to availability status (True if available, False otherwise)
 63 |     """
 64 |     available_providers = {}
 65 |     
 66 |     # Check API keys for each provider
 67 |     for provider in ModelProviders:
 68 |         provider_name = provider.full_name
 69 |         
 70 |         # Special case for Ollama which uses OLLAMA_HOST instead of an API key
 71 |         if provider_name == "ollama":
 72 |             host = os.environ.get("OLLAMA_HOST")
 73 |             is_available = host is not None and host.strip() != ""
 74 |             available_providers[provider_name] = is_available
 75 |         else:
 76 |             # Get API key
 77 |             api_key = get_api_key(provider_name)
 78 |             is_available = api_key is not None and api_key.strip() != ""
 79 |             available_providers[provider_name] = is_available
 80 |     
 81 |     return available_providers
 82 | 
 83 | 
 84 | def print_provider_availability(detailed: bool = True) -> None:
 85 |     """
 86 |     Print information about which providers are available based on API keys.
 87 |     
 88 |     Args:
 89 |         detailed: Whether to print detailed information about missing keys
 90 |     """
 91 |     availability = validate_provider_api_keys()
 92 |     
 93 |     available = [p for p, status in availability.items() if status]
 94 |     unavailable = [p for p, status in availability.items() if not status]
 95 |     
 96 |     # Print availability information
 97 |     logger.info(f"Available LLM providers: {', '.join(available)}")
 98 |     
 99 |     if detailed and unavailable:
100 |         env_vars = {
101 |             "openai": "OPENAI_API_KEY",
102 |             "anthropic": "ANTHROPIC_API_KEY",
103 |             "gemini": "GEMINI_API_KEY", 
104 |             "groq": "GROQ_API_KEY",
105 |             "deepseek": "DEEPSEEK_API_KEY",
106 |             "ollama": "OLLAMA_HOST"
107 |         }
108 |         
109 |         logger.warning(f"The following providers are unavailable due to missing API keys:")
110 |         for provider in unavailable:
111 |             env_var = env_vars.get(provider)
112 |             if env_var:
113 |                 logger.warning(f"  - {provider}: Missing environment variable {env_var}")
114 |             else:
115 |                 logger.warning(f"  - {provider}: Missing configuration")
116 | 
```

--------------------------------------------------------------------------------
/example_outputs/countdown_component/diff.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Code Review
  2 | - Review the diff, report on issues, bugs, and improvements. 
  3 | - End with a concise markdown table of any issues found, their solutions, and a risk assessment for each issue if applicable.
  4 | - Use emojis to convey the severity of each issue.
  5 | 
  6 | ## Diff
  7 | diff --git a/list_models.py b/list_models.py
  8 | index aebb141..0c11e9b 100644
  9 | --- a/list_models.py
 10 | +++ b/list_models.py
 11 | @@ -1,69 +1,81 @@
 12 | -# from openai import OpenAI
 13 | +def list_openai_models():
 14 | +    from openai import OpenAI
 15 |  
 16 | -# client = OpenAI()
 17 | +    client = OpenAI()
 18 |  
 19 | -# print(client.models.list())
 20 | +    print(client.models.list())
 21 |  
 22 | -# --------------------------------
 23 |  
 24 | -# import os
 25 | +def list_groq_models():
 26 | +    import os
 27 | +    from groq import Groq
 28 |  
 29 | -# from groq import Groq
 30 | +    client = Groq(
 31 | +        api_key=os.environ.get("GROQ_API_KEY"),
 32 | +    )
 33 |  
 34 | -# client = Groq(
 35 | -#     api_key=os.environ.get("GROQ_API_KEY"),
 36 | -# )
 37 | +    chat_completion = client.models.list()
 38 |  
 39 | -# chat_completion = client.models.list()
 40 | +    print(chat_completion)
 41 |  
 42 | -# print(chat_completion)
 43 |  
 44 | -# --------------------------------
 45 | +def list_anthropic_models():
 46 | +    import anthropic
 47 | +    import os
 48 | +    from dotenv import load_dotenv
 49 |  
 50 | -import anthropic
 51 | -import os
 52 | -from dotenv import load_dotenv
 53 | +    load_dotenv()
 54 |  
 55 | -load_dotenv()
 56 | +    client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
 57 | +    models = client.models.list()
 58 | +    print("Available Anthropic models:")
 59 | +    for model in models.data:
 60 | +        print(f"- {model.id}")
 61 |  
 62 | -client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
 63 | -models = client.models.list()
 64 | -print("Available Anthropic models:")
 65 | -for model in models.data:
 66 | -    print(f"- {model.id}")
 67 |  
 68 | -# --------------------------------
 69 | +def list_gemini_models():
 70 | +    import os
 71 | +    from google import genai
 72 | +    from dotenv import load_dotenv
 73 |  
 74 | -# import os
 75 | -# from google import genai
 76 | -# from dotenv import load_dotenv
 77 | +    load_dotenv()
 78 |  
 79 | -# load_dotenv()
 80 | +    client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
 81 |  
 82 | -# client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
 83 | +    print("List of models that support generateContent:\n")
 84 | +    for m in client.models.list():
 85 | +        for action in m.supported_actions:
 86 | +            if action == "generateContent":
 87 | +                print(m.name)
 88 |  
 89 | -# print("List of models that support generateContent:\n")
 90 | -# for m in client.models.list():
 91 | -#     for action in m.supported_actions:
 92 | -#         if action == "generateContent":
 93 | -#             print(m.name)
 94 | +    print("List of models that support embedContent:\n")
 95 | +    for m in client.models.list():
 96 | +        for action in m.supported_actions:
 97 | +            if action == "embedContent":
 98 | +                print(m.name)
 99 |  
100 | -# print("List of models that support embedContent:\n")
101 | -# for m in client.models.list():
102 | -#     for action in m.supported_actions:
103 | -#         if action == "embedContent":
104 | -#             print(m.name)
105 |  
106 | -# -------------------------------- deepseek
107 | +def list_deepseek_models():
108 | +    from openai import OpenAI
109 |  
110 | -# from openai import OpenAI
111 | +    # for backward compatibility, you can still use `https://api.deepseek.com/v1` as `base_url`.
112 | +    client = OpenAI(
113 | +        api_key="sk-ds-3f422175ff114212a42d7107c3efd1e4",
114 | +        base_url="https://api.deepseek.com",
115 | +    )
116 | +    print(client.models.list())
117 |  
118 | -# # for backward compatibility, you can still use `https://api.deepseek.com/v1` as `base_url`.
119 | -# client = OpenAI(api_key="<your API key>", base_url="https://api.deepseek.com")
120 | -# print(client.models.list())
121 |  
122 | -# -------------------------------- ollama
123 | +def list_ollama_models():
124 | +    import ollama
125 |  
126 | -import ollama
127 | +    print(ollama.list())
128 |  
129 | -print(ollama.list())
130 | +
131 | +# Uncomment to run the functions
132 | +# list_openai_models()
133 | +# list_groq_models()
134 | +# list_anthropic_models()
135 | +# list_gemini_models()
136 | +# list_deepseek_models()
137 | +# list_ollama_models()
138 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/atoms/shared/test_model_router.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Tests for model router.
 3 | """
 4 | 
 5 | import pytest
 6 | import os
 7 | from unittest.mock import patch, MagicMock
 8 | import importlib
 9 | from just_prompt.atoms.shared.model_router import ModelRouter
10 | from just_prompt.atoms.shared.data_types import ModelProviders
11 | 
12 | 
13 | @patch('importlib.import_module')
14 | def test_route_prompt(mock_import_module):
15 |     """Test routing prompts to the appropriate provider."""
16 |     # Set up mock
17 |     mock_module = MagicMock()
18 |     mock_module.prompt.return_value = "Paris is the capital of France."
19 |     mock_import_module.return_value = mock_module
20 |     
21 |     # Test with full provider name
22 |     response = ModelRouter.route_prompt("openai:gpt-4o-mini", "What is the capital of France?")
23 |     assert response == "Paris is the capital of France."
24 |     mock_import_module.assert_called_with("just_prompt.atoms.llm_providers.openai")
25 |     mock_module.prompt.assert_called_with("What is the capital of France?", "gpt-4o-mini")
26 |     
27 |     # Test with short provider name
28 |     response = ModelRouter.route_prompt("o:gpt-4o-mini", "What is the capital of France?")
29 |     assert response == "Paris is the capital of France."
30 |     
31 |     # Test invalid provider
32 |     with pytest.raises(ValueError):
33 |         ModelRouter.route_prompt("unknown:model", "What is the capital of France?")
34 | 
35 | 
36 | @patch('importlib.import_module')
37 | def test_route_list_models(mock_import_module):
38 |     """Test routing list_models requests to the appropriate provider."""
39 |     # Set up mock
40 |     mock_module = MagicMock()
41 |     mock_module.list_models.return_value = ["model1", "model2"]
42 |     mock_import_module.return_value = mock_module
43 |     
44 |     # Test with full provider name
45 |     models = ModelRouter.route_list_models("openai")
46 |     assert models == ["model1", "model2"]
47 |     mock_import_module.assert_called_with("just_prompt.atoms.llm_providers.openai")
48 |     mock_module.list_models.assert_called_once()
49 |     
50 |     # Test with short provider name
51 |     models = ModelRouter.route_list_models("o")
52 |     assert models == ["model1", "model2"]
53 |     
54 |     # Test invalid provider
55 |     with pytest.raises(ValueError):
56 |         ModelRouter.route_list_models("unknown")
57 | 
58 | 
59 | def test_validate_and_correct_model_shorthand():
60 |     """Test validation and correction of shorthand model names like a:sonnet.3.7."""
61 |     try:
62 |         # Test with shorthand notation a:sonnet.3.7
63 |         # This should be corrected to claude-3-7-sonnet-20250219
64 |         # First, use the split_provider_and_model to get the provider and model
65 |         from just_prompt.atoms.shared.utils import split_provider_and_model
66 |         provider_prefix, model = split_provider_and_model("a:sonnet.3.7")
67 |         
68 |         # Get the provider enum
69 |         provider = ModelProviders.from_name(provider_prefix)
70 |         
71 |         # Call validate_and_correct_model
72 |         result = ModelRouter.magic_model_correction(provider.full_name, model, "anthropic:claude-sonnet-4-20250514")
73 |         
74 |         # The magic_model_correction method should correct sonnet.3.7 to a claude model
75 |         assert "claude" in result, f"Expected sonnet.3.7 to be corrected to a claude model, got {result}"
76 |         print(f"Shorthand model 'sonnet.3.7' was corrected to '{result}'")
77 |     except Exception as e:
78 |         pytest.fail(f"Test failed with error: {e}")
79 | 
80 | 
81 | def test_validate_and_correct_claude4_models():
82 |     """Test validation bypass for claude-4 models with thinking tokens."""
83 |     # Test claude-4 models bypass validation
84 |     result = ModelRouter.validate_and_correct_model("anthropic", "claude-opus-4-20250514:4k")
85 |     assert result == "claude-opus-4-20250514:4k", f"Expected bypass for claude-4 model, got {result}"
86 |     
87 |     result = ModelRouter.validate_and_correct_model("anthropic", "claude-sonnet-4-20250514:1k") 
88 |     assert result == "claude-sonnet-4-20250514:1k", f"Expected bypass for claude-4 model, got {result}"
89 |     
90 |     result = ModelRouter.validate_and_correct_model("anthropic", "claude-opus-4-20250514")
91 |     assert result == "claude-opus-4-20250514", f"Expected bypass for claude-4 model, got {result}"
92 | 
93 | 
94 | 
```

--------------------------------------------------------------------------------
/specs/prompt_from_file_to_file_w_context.md:
--------------------------------------------------------------------------------

```markdown
 1 | Feature Request: Prompt from File to File with Context Files
 2 | 
 3 | ## Implementation Notes
 4 | 
 5 | - Create a new tool 'prompt_from_file_to_file_w_context' in src/just_prompt/molecules/prompt_from_file_to_file_w_context.py
 6 | - Definition: prompt_from_file_to_file_w_context(from_file: str, context_files: List[str], models_prefixed_by_provider: List[str] = None, output_dir: str = ".") -> None:
 7 | - This tool extends the existing prompt_from_file_to_file functionality by injecting context files into the prompt before sending to LLMs
 8 | - The tool will read the from_file and search for the placeholder `{{context_files}}`
 9 | - If `{{context_files}}` is not found in the from_file, throw an error requiring this placeholder to be present
10 | - Replace `{{context_files}}` with an XML block containing all context files:
11 |   ```xml
12 |   <context_files>
13 |       <file name="absolute/path/to/file1.py">
14 |           ... file1 content ...
15 |       </file>
16 |       <file name="absolute/path/to/file2.md"> 
17 |           ... file2 content ...
18 |       </file>
19 |       ... repeat for all context_files ...
20 |   </context_files>
21 |   ```
22 | - Read each file in context_files (using absolute paths) and inject their contents into the XML structure
23 | - After context injection, use the existing prompt_from_file_to_file logic to send the enhanced prompt to all specified models
24 | - Each context file should be wrapped in a `<file name="...">content</file>` tag within the `<context_files>` block
25 | - Handle file reading errors gracefully with descriptive error messages
26 | - Validate that all context_files exist and are readable before processing
27 | - The enhanced prompt (with context files injected) should be sent to all models specified in models_prefixed_by_provider
28 | - Output files follow the same naming convention as prompt_from_file_to_file: `{output_dir}/{sanitized_filename}_{provider}_{model}.md`
29 | 
30 | ## Relevant Files
31 | - src/just_prompt/server.py (add new MCP tool endpoint)
32 | - src/just_prompt/molecules/prompt_from_file_to_file_w_context.py (new file)
33 | - src/just_prompt/molecules/prompt_from_file_to_file.py (reference existing logic)
34 | - src/just_prompt/atoms/shared/utils.py (for file operations and validation)
35 | - src/just_prompt/atoms/shared/validator.py (for input validation)
36 | - src/just_prompt/tests/molecules/test_prompt_from_file_to_file_w_context.py (new test file)
37 | 
38 | ## Validation (Close the Loop)
39 | > Be sure to test this new capability with uv run pytest.
40 | 
41 | - Create comprehensive tests in test_prompt_from_file_to_file_w_context.py covering:
42 |   - Normal operation with valid context files
43 |   - Error when {{context_files}} placeholder is missing
44 |   - Error when context files don't exist or aren't readable
45 |   - Proper XML formatting of context files
46 |   - Integration with existing prompt_from_file_to_file workflow
47 | - `uv run pytest src/just_prompt/tests/molecules/test_prompt_from_file_to_file_w_context.py`
48 | - `uv run just-prompt --help` to validate the tool works as expected
49 | - Test end-to-end functionality by creating a sample prompt file with {{context_files}} placeholder and sample context files
50 | - After implementation, update README.md with the new tool's functionality and parameters
51 | - Run `git ls-files` to update the directory tree in the README with the new files
52 | 
53 | ## Error Handling Requirements
54 | - Validate that from_file exists and is readable
55 | - Validate that all files in context_files list exist and are readable  
56 | - Require {{context_files}} placeholder to be present in from_file content
57 | - Provide clear error messages for missing files, permission issues, or missing placeholder
58 | - Handle large context files gracefully (consider file size limits if needed)
59 | 
60 | ## Example Usage
61 | ```python
62 | # Prompt file content (example.txt):
63 | """
64 | Please analyze the following codebase files:
65 | 
66 | {{context_files}}
67 | 
68 | Based on the code above, suggest improvements for better performance.
69 | """
70 | 
71 | # Tool call:
72 | prompt_from_file_to_file_w_context(
73 |     from_file="prompts/example.txt",
74 |     context_files=[
75 |         "/absolute/path/to/src/main.py",
76 |         "/absolute/path/to/src/utils.py", 
77 |         "/absolute/path/to/README.md"
78 |     ],
79 |     models_prefixed_by_provider=["openai:gpt-4o", "anthropic:claude-3-5-sonnet"],
80 |     output_dir="analysis_results"
81 | )
82 | ```
```

--------------------------------------------------------------------------------
/specs/new-tool-llm-as-a-ceo.md:
--------------------------------------------------------------------------------

```markdown
 1 | Feature Request: LLM as a CEO
 2 | 
 3 | ## Implementation Notes
 4 | 
 5 | - Create a new tool 'ceo_and_board' in src/just_prompt/molecules/ceo_and_board_prompt.py
 6 | - Definition ceo_and_board_prompt(from_file: str, output_dir: str = ., models_prefixed_by_provider: List[str] = None, ceo_model: str = DEFAULT_CEO_MODEL, ceo_decision_prompt: str = DEFAULT_CEO_DECISION_PROMPT) -> None:
 7 | - Use the existing prompt_from_file_to_file function to generate responses from 'board' aka models_prefixed_by_provider.
 8 | - Then run the ceo_decision_prompt (xml style prompt) with the board's responses, and the original question prompt to get a decision.
 9 | - DEFAULT_CEO_DECISION_PROMPT is
10 |   ```xml
11 |         <purpose>
12 |             You are a CEO of a company. You are given a list of responses from your board of directors. Your job is to take in the original question prompt, and each of the board members' responses, and choose the best direction for your company.
13 |         </purpose>
14 |         <instructions>
15 |             <instruction>Each board member has proposed an answer to the question posed in the prompt.</instruction>
16 |             <instruction>Given the original question prompt, and each of the board members' responses, choose the best answer.</instruction>
17 |             <instruction>Tally the votes of the board members, choose the best direction, and explain why you chose it.</instruction>
18 |             <instruction>To preserve anonymity, we will use model names instead of real names of your board members. When responding, use the model names in your response.</instruction>
19 |             <instruction>As a CEO, you breakdown the decision into several categories including: risk, reward, timeline, and resources. In addition to these guiding categories, you also consider the board members' expertise and experience. As a bleeding edge CEO, you also invent new dimensions of decision making to help you make the best decision for your company.</instruction>
20 |             <instruction>Your final CEO response should be in markdown format with a comprehensive explanation of your decision. Start the top of the file with a title that says "CEO Decision", include a table of contents, briefly describe the question/problem at hand then dive into several sections. One of your first sections should be a quick summary of your decision, then breakdown each of the boards decisions into sections with your commentary on each. Where we lead into your decision with the categories of your decision making process, and then we lead into your final decision.</instruction>
21 |         </instructions>
22 |         
23 |         <original-question>{original_prompt}</original-question>
24 |         
25 |         <board-decisions>
26 |             <board-response>
27 |                 <model-name>...</model-name>
28 |                 <response>...</response>
29 |             </board-response>
30 |             <board-response>
31 |                 <model-name>...</model-name>
32 |                 <response>...</response>
33 |             </board-response>
34 |             ...
35 |         </board-decisions>
36 |     ```
37 | - DEFAULT_CEO_MODEL is openai:o3
38 | - The prompt_from_file_to_file will output a file for each board member's response in the output_dir.
39 | - Once they've been created, the ceo_and_board_prompt will read in the board member's responses, and the original question prompt into the ceo_decision_prompt and make another call with the ceo_model to get a decision. Write the decision to a file in the output_dir/ceo_decision.md.
40 | - Be sure to validate this functionality with uv run pytest <path-to-test-file>
41 | - After you implement update the README.md with the new tool's functionality and run `git ls-files` to update the directory tree in the readme with the new files.
42 | - Make sure this functionality works end to end. This functionality will be exposed as an MCP tool in the server.py file.
43 | 
44 | ## Relevant Files
45 | - src/just_prompt/server.py
46 | - src/just_prompt/molecules/ceo_and_board_prompt.py
47 | - src/just_prompt/molecules/prompt_from_file_to_file.py
48 | - src/just_prompt/molecules/prompt_from_file.py
49 | - src/just_prompt/molecules/prompt.py
50 | - src/just_prompt/atoms/llm_providers/openai.py
51 | - src/just_prompt/atoms/shared/utils.py
52 | - src/just_prompt/tests/molecules/test_ceo_and_board_prompt.py
53 | 
54 | ## Validation (Close the Loop)
55 | > Be sure to test this new capability with uv run pytest.
56 | 
57 | - `uv run pytest src/just_prompt/tests/molecules/test_ceo_and_board_prompt.py`
58 | - `uv run just-prompt --help` to validate the tool works as expected.
```

--------------------------------------------------------------------------------
/ai_docs/google-genai-api-update.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Google GenAI SDK v1.22.0 Documentation
  2 | 
  3 | ## Overview
  4 | 
  5 | The Google Gen AI SDK provides an interface for developers to integrate Google's generative models into their Python applications. It supports both the Gemini Developer API and Vertex AI APIs.
  6 | 
  7 | **Latest Version:** 1.22.0 (Released: about 23 hours ago)
  8 | 
  9 | ## Installation
 10 | 
 11 | ```bash
 12 | pip install google-genai
 13 | ```
 14 | 
 15 | ## Key Features
 16 | 
 17 | ### 1. Client Creation
 18 | 
 19 | **For Gemini Developer API:**
 20 | ```python
 21 | from google import genai
 22 | client = genai.Client(api_key='GEMINI_API_KEY')
 23 | ```
 24 | 
 25 | **For Vertex AI:**
 26 | ```python
 27 | from google import genai
 28 | client = genai.Client(
 29 |     vertexai=True, 
 30 |     project='your-project-id', 
 31 |     location='us-central1'
 32 | )
 33 | ```
 34 | 
 35 | ### 2. Model Support
 36 | 
 37 | The SDK supports various models including:
 38 | - **Gemini 2.0 Flash**: `gemini-2.0-flash-001`
 39 | - **Text Embedding**: `text-embedding-004`
 40 | - **Imagen 3.0**: `imagen-3.0-generate-002` (image generation)
 41 | - **Veo 2.0**: `veo-2.0-generate-001` (video generation)
 42 | 
 43 | ### 3. Core Capabilities
 44 | 
 45 | #### Generate Content
 46 | ```python
 47 | response = client.models.generate_content(
 48 |     model='gemini-2.0-flash-001', 
 49 |     contents='Why is the sky blue?'
 50 | )
 51 | print(response.text)
 52 | ```
 53 | 
 54 | #### Chat Sessions
 55 | ```python
 56 | chat = client.chats.create(model='gemini-2.0-flash-001')
 57 | response = chat.send_message('tell me a story')
 58 | print(response.text)
 59 | ```
 60 | 
 61 | #### Function Calling
 62 | The SDK supports automatic Python function calling:
 63 | ```python
 64 | def get_current_weather(location: str) -> str:
 65 |     """Returns the current weather."""
 66 |     return 'sunny'
 67 | 
 68 | response = client.models.generate_content(
 69 |     model='gemini-2.0-flash-001',
 70 |     contents='What is the weather like in Boston?',
 71 |     config=types.GenerateContentConfig(tools=[get_current_weather]),
 72 | )
 73 | ```
 74 | 
 75 | #### JSON Response Schema
 76 | Supports Pydantic models for structured output:
 77 | ```python
 78 | from pydantic import BaseModel
 79 | 
 80 | class CountryInfo(BaseModel):
 81 |     name: str
 82 |     population: int
 83 |     capital: str
 84 | 
 85 | response = client.models.generate_content(
 86 |     model='gemini-2.0-flash-001',
 87 |     contents='Give me information for the United States.',
 88 |     config=types.GenerateContentConfig(
 89 |         response_mime_type='application/json',
 90 |         response_schema=CountryInfo,
 91 |     ),
 92 | )
 93 | ```
 94 | 
 95 | ### 4. Advanced Features
 96 | 
 97 | #### Streaming Support
 98 | ```python
 99 | for chunk in client.models.generate_content_stream(
100 |     model='gemini-2.0-flash-001', 
101 |     contents='Tell me a story in 300 words.'
102 | ):
103 |     print(chunk.text, end='')
104 | ```
105 | 
106 | #### Async Support
107 | ```python
108 | response = await client.aio.models.generate_content(
109 |     model='gemini-2.0-flash-001', 
110 |     contents='Tell me a story in 300 words.'
111 | )
112 | ```
113 | 
114 | #### Caching
115 | ```python
116 | cached_content = client.caches.create(
117 |     model='gemini-2.0-flash-001',
118 |     config=types.CreateCachedContentConfig(
119 |         contents=[...],
120 |         system_instruction='What is the sum of the two pdfs?',
121 |         display_name='test cache',
122 |         ttl='3600s',
123 |     ),
124 | )
125 | ```
126 | 
127 | #### Fine-tuning
128 | Supports supervised fine-tuning with different approaches for Vertex AI (GCS) and Gemini Developer API (inline examples).
129 | 
130 | ### 5. API Configuration
131 | 
132 | #### API Version Selection
133 | ```python
134 | from google.genai import types
135 | 
136 | # For stable API endpoints
137 | client = genai.Client(
138 |     vertexai=True,
139 |     project='your-project-id',
140 |     location='us-central1',
141 |     http_options=types.HttpOptions(api_version='v1')
142 | )
143 | ```
144 | 
145 | #### Proxy Support
146 | ```bash
147 | export HTTPS_PROXY='http://username:password@proxy_uri:port'
148 | export SSL_CERT_FILE='client.pem'
149 | ```
150 | 
151 | ### 6. Error Handling
152 | 
153 | ```python
154 | from google.genai import errors
155 | 
156 | try:
157 |     client.models.generate_content(
158 |         model="invalid-model-name",
159 |         contents="What is your name?",
160 |     )
161 | except errors.APIError as e:
162 |     print(e.code)  # 404
163 |     print(e.message)
164 | ```
165 | 
166 | ## Platform Support
167 | 
168 | - **Python Version:** >=3.9
169 | - **Supported Python Versions:** 3.9, 3.10, 3.11, 3.12, 3.13
170 | - **License:** Apache Software License (Apache-2.0)
171 | - **Operating System:** OS Independent
172 | 
173 | ## Additional Resources
174 | 
175 | - **Homepage:** https://github.com/googleapis/python-genai
176 | - **Documentation:** https://googleapis.github.io/python-genai/
177 | - **PyPI Page:** https://pypi.org/project/google-genai/
178 | 
179 | ## Recent Updates
180 | 
181 | The v1.22.0 release continues to support the latest Gemini models and maintains compatibility with both Gemini Developer API and Vertex AI platforms. The SDK provides comprehensive support for generative AI tasks including text generation, image generation, video generation, embeddings, and more.
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/atoms/llm_providers/test_gemini.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for Gemini provider.
  3 | """
  4 | 
  5 | import pytest
  6 | import os
  7 | import re
  8 | from dotenv import load_dotenv
  9 | from just_prompt.atoms.llm_providers import gemini
 10 | 
 11 | # Load environment variables
 12 | load_dotenv()
 13 | 
 14 | # Skip tests if API key not available
 15 | if not os.environ.get("GEMINI_API_KEY"):
 16 |     pytest.skip("Gemini API key not available", allow_module_level=True)
 17 | 
 18 | 
 19 | def test_list_models():
 20 |     """Test listing Gemini models."""
 21 |     models = gemini.list_models()
 22 |     
 23 |     # Assertions
 24 |     assert isinstance(models, list)
 25 |     assert len(models) > 0
 26 |     assert all(isinstance(model, str) for model in models)
 27 |     
 28 |     # Check for at least one expected model containing gemini
 29 |     gemini_models = [model for model in models if "gemini" in model.lower()]
 30 |     assert len(gemini_models) > 0, "No Gemini models found"
 31 | 
 32 | 
 33 | def test_prompt():
 34 |     """Test sending prompt to Gemini."""
 35 |     # Using gemini-1.5-flash as the model for testing
 36 |     response = gemini.prompt("What is the capital of France?", "gemini-1.5-flash")
 37 |     
 38 |     # Assertions
 39 |     assert isinstance(response, str)
 40 |     assert len(response) > 0
 41 |     assert "paris" in response.lower() or "Paris" in response
 42 | 
 43 | 
 44 | def test_parse_thinking_suffix():
 45 |     """Test parsing thinking suffix from model name."""
 46 |     # Test cases with valid formats
 47 |     assert gemini.parse_thinking_suffix("gemini-2.5-flash-preview-04-17:1k") == ("gemini-2.5-flash-preview-04-17", 1024)
 48 |     assert gemini.parse_thinking_suffix("gemini-2.5-flash-preview-04-17:4k") == ("gemini-2.5-flash-preview-04-17", 4096)
 49 |     assert gemini.parse_thinking_suffix("gemini-2.5-flash-preview-04-17:2048") == ("gemini-2.5-flash-preview-04-17", 2048)
 50 |     
 51 |     # Test cases with invalid models (should ignore suffix)
 52 |     assert gemini.parse_thinking_suffix("gemini-1.5-flash:4k") == ("gemini-1.5-flash", 0)
 53 |     
 54 |     # Test cases with invalid suffix format
 55 |     base_model, budget = gemini.parse_thinking_suffix("gemini-2.5-flash-preview-04-17:invalid")
 56 |     assert base_model == "gemini-2.5-flash-preview-04-17"
 57 |     assert budget == 0
 58 |     
 59 |     # Test case with no suffix
 60 |     assert gemini.parse_thinking_suffix("gemini-2.5-flash-preview-04-17") == ("gemini-2.5-flash-preview-04-17", 0)
 61 |     
 62 |     # Test case with out-of-range values (should be clamped)
 63 |     assert gemini.parse_thinking_suffix("gemini-2.5-flash-preview-04-17:25000")[1] == 24576
 64 |     assert gemini.parse_thinking_suffix("gemini-2.5-flash-preview-04-17:-1000")[1] == 0
 65 | 
 66 | 
 67 | @pytest.mark.skipif(
 68 |     "gemini-2.5-flash-preview-04-17" not in gemini.list_models(),
 69 |     reason="gemini-2.5-flash-preview-04-17 model not available"
 70 | )
 71 | def test_prompt_with_thinking():
 72 |     """Test sending prompt to Gemini with thinking enabled."""
 73 |     # Using the gemini-2.5-flash-preview-04-17 model with thinking budget
 74 |     model_name = "gemini-2.5-flash-preview-04-17:1k"
 75 |     response = gemini.prompt("What is the square root of 144?", model_name)
 76 |     
 77 |     # Assertions
 78 |     assert isinstance(response, str)
 79 |     assert len(response) > 0
 80 |     assert "12" in response.lower(), f"Expected '12' in response: {response}"
 81 | 
 82 | 
 83 | @pytest.mark.skipif(
 84 |     "gemini-2.5-flash-preview-04-17" not in gemini.list_models(),
 85 |     reason="gemini-2.5-flash-preview-04-17 model not available"
 86 | )
 87 | def test_prompt_without_thinking():
 88 |     """Test sending prompt to Gemini without thinking enabled."""
 89 |     # Using the gemini-2.5-flash-preview-04-17 model without thinking budget
 90 |     model_name = "gemini-2.5-flash-preview-04-17"
 91 |     response = gemini.prompt("What is the capital of Germany?", model_name)
 92 |     
 93 |     # Assertions
 94 |     assert isinstance(response, str)
 95 |     assert len(response) > 0
 96 |     assert "berlin" in response.lower() or "Berlin" in response, f"Expected 'Berlin' in response: {response}"
 97 | 
 98 | 
 99 | def test_gemini_2_5_pro_availability():
100 |     """Test if Gemini 2.5 Pro model is available."""
101 |     models = gemini.list_models()
102 |     
103 |     # Print all available models for debugging
104 |     print("\nAvailable Gemini models:")
105 |     for model in sorted(models):
106 |         print(f"  - {model}")
107 |     
108 |     # Check if any Gemini 2.5 Pro variant is available
109 |     gemini_2_5_pro_models = [model for model in models if "gemini-2.5-pro" in model.lower()]
110 |     
111 |     if gemini_2_5_pro_models:
112 |         print(f"\nFound Gemini 2.5 Pro models: {gemini_2_5_pro_models}")
113 |     else:
114 |         print("\nNo Gemini 2.5 Pro models found!")
115 |         print("You may need to update the google-genai library")
116 |     
117 |     # This assertion will fail if no Gemini 2.5 Pro is found
118 |     assert len(gemini_2_5_pro_models) > 0, "Gemini 2.5 Pro model not found - may need to update google-genai library"
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/atoms/llm_providers/test_anthropic.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Tests for Anthropic provider.
 3 | """
 4 | 
 5 | import pytest
 6 | import os
 7 | from dotenv import load_dotenv
 8 | from just_prompt.atoms.llm_providers import anthropic
 9 | 
10 | # Load environment variables
11 | load_dotenv()
12 | 
13 | # Skip tests if API key not available
14 | if not os.environ.get("ANTHROPIC_API_KEY"):
15 |     pytest.skip("Anthropic API key not available", allow_module_level=True)
16 | 
17 | 
18 | def test_list_models():
19 |     """Test listing Anthropic models."""
20 |     models = anthropic.list_models()
21 |     
22 |     # Assertions
23 |     assert isinstance(models, list)
24 |     assert len(models) > 0
25 |     assert all(isinstance(model, str) for model in models)
26 |     
27 |     # Check for at least one expected model
28 |     claude_models = [model for model in models if "claude" in model.lower()]
29 |     assert len(claude_models) > 0, "No Claude models found"
30 | 
31 | 
32 | def test_prompt():
33 |     """Test sending prompt to Anthropic."""
34 |     # Use the correct model name from the available models
35 |     response = anthropic.prompt("What is the capital of France?", "claude-3-5-haiku-20241022")
36 |     
37 |     # Assertions
38 |     assert isinstance(response, str)
39 |     assert len(response) > 0
40 |     assert "paris" in response.lower() or "Paris" in response
41 | 
42 | 
43 | def test_parse_thinking_suffix():
44 |     """Test parsing thinking suffix from model names."""
45 |     # Test cases with no suffix
46 |     assert anthropic.parse_thinking_suffix("claude-3-7-sonnet") == ("claude-3-7-sonnet", 0)
47 |     assert anthropic.parse_thinking_suffix("claude-3-5-haiku-20241022") == ("claude-3-5-haiku-20241022", 0)
48 |     
49 |     # Test cases with supported claude-3-7 model and k suffixes
50 |     assert anthropic.parse_thinking_suffix("claude-3-7-sonnet-20250219:1k") == ("claude-3-7-sonnet-20250219", 1024)
51 |     assert anthropic.parse_thinking_suffix("claude-3-7-sonnet-20250219:4k") == ("claude-3-7-sonnet-20250219", 4096)
52 |     assert anthropic.parse_thinking_suffix("claude-3-7-sonnet-20250219:15k") == ("claude-3-7-sonnet-20250219", 15360)  # 15*1024=15360 < 16000
53 |     
54 |     # Test cases with supported claude-4 models and k suffixes  
55 |     assert anthropic.parse_thinking_suffix("claude-opus-4-20250514:1k") == ("claude-opus-4-20250514", 1024)
56 |     assert anthropic.parse_thinking_suffix("claude-opus-4-20250514:4k") == ("claude-opus-4-20250514", 4096)
57 |     assert anthropic.parse_thinking_suffix("claude-sonnet-4-20250514:1k") == ("claude-sonnet-4-20250514", 1024)
58 |     assert anthropic.parse_thinking_suffix("claude-sonnet-4-20250514:8k") == ("claude-sonnet-4-20250514", 8192)
59 |     
60 |     # Test cases with supported models and numeric suffixes
61 |     assert anthropic.parse_thinking_suffix("claude-3-7-sonnet-20250219:1024") == ("claude-3-7-sonnet-20250219", 1024)
62 |     assert anthropic.parse_thinking_suffix("claude-3-7-sonnet-20250219:4096") == ("claude-3-7-sonnet-20250219", 4096)
63 |     assert anthropic.parse_thinking_suffix("claude-opus-4-20250514:8000") == ("claude-opus-4-20250514", 8000)
64 |     assert anthropic.parse_thinking_suffix("claude-sonnet-4-20250514:2048") == ("claude-sonnet-4-20250514", 2048)
65 |     
66 |     # Test cases with non-supported model
67 |     assert anthropic.parse_thinking_suffix("claude-3-7-sonnet:1k") == ("claude-3-7-sonnet", 0)
68 |     assert anthropic.parse_thinking_suffix("claude-3-5-haiku:4k") == ("claude-3-5-haiku", 0)
69 |     
70 |     # Test cases with out-of-range values (should adjust to valid range)
71 |     assert anthropic.parse_thinking_suffix("claude-3-7-sonnet-20250219:500") == ("claude-3-7-sonnet-20250219", 1024)  # Below min 1024, should use 1024
72 |     assert anthropic.parse_thinking_suffix("claude-opus-4-20250514:20000") == ("claude-opus-4-20250514", 16000)  # Above max 16000, should use 16000
73 | 
74 | 
75 | def test_prompt_with_thinking():
76 |     """Test sending prompt with thinking enabled."""
77 |     # Test with 1k thinking tokens on the supported model
78 |     response = anthropic.prompt("What is the capital of Spain?", "claude-3-7-sonnet-20250219:1k")
79 |     
80 |     # Assertions
81 |     assert isinstance(response, str)
82 |     assert len(response) > 0
83 |     assert "madrid" in response.lower() or "Madrid" in response
84 |     
85 |     # Test with 2k thinking tokens on the supported model
86 |     response = anthropic.prompt("What is the capital of Germany?", "claude-3-7-sonnet-20250219:2k")
87 |     
88 |     # Assertions
89 |     assert isinstance(response, str)
90 |     assert len(response) > 0
91 |     assert "berlin" in response.lower() or "Berlin" in response
92 |     
93 |     # Test with out-of-range but auto-corrected thinking tokens
94 |     response = anthropic.prompt("What is the capital of Italy?", "claude-3-7-sonnet-20250219:500")
95 |     
96 |     # Assertions (should still work with a corrected budget of 1024)
97 |     assert isinstance(response, str)
98 |     assert len(response) > 0
99 |     assert "rome" in response.lower() or "Rome" in response
```

--------------------------------------------------------------------------------
/ultra_diff_review/fusion_ultra_diff_review.md:
--------------------------------------------------------------------------------

```markdown
 1 | # Ultra Diff Review - Fusion Analysis
 2 | 
 3 | ## Overview
 4 | This is a synthesized analysis combining insights from multiple LLM reviews of the changes made to `list_models.py`. The code has been refactored to organize model listing functionality into separate functions for different AI providers.
 5 | 
 6 | ## Critical Issues
 7 | 
 8 | ### 1. 🚨 Hardcoded API Key (DeepSeek)
 9 | **Description**: The `list_deepseek_models()` function contains a hardcoded API key (`"sk-ds-3f422175ff114212a42d7107c3efd1e4"`).
10 | **Impact**: Major security vulnerability that could lead to unauthorized API usage and charges.
11 | **Solution**: Use environment variables instead:
12 | ```python
13 | api_key=os.environ.get("DEEPSEEK_API_KEY")
14 | ```
15 | 
16 | ### 2. ⚠️ Lack of Error Handling
17 | **Description**: None of the functions include error handling for API failures, network issues, or missing credentials.
18 | **Impact**: Code will crash or produce uninformative errors with actual usage.
19 | **Solution**: Implement try-except blocks for all API calls:
20 | ```python
21 | try:
22 |     client = DeepSeek(api_key=os.environ.get("DEEPSEEK_API_KEY"))
23 |     models = client.models.list()
24 |     # Process models
25 | except Exception as e:
26 |     print(f"Error fetching DeepSeek models: {e}")
27 | ```
28 | 
29 | ## Medium Priority Issues
30 | 
31 | ### 3. ⚠️ Multiple load_dotenv() Calls
32 | **Description**: Both `list_anthropic_models()` and `list_gemini_models()` call `load_dotenv()` independently.
33 | **Impact**: Redundant operations if multiple functions are called in the same run.
34 | **Solution**: Move `load_dotenv()` to a single location at the top of the file.
35 | 
36 | ### 4. ⚠️ Inconsistent API Key Access Patterns
37 | **Description**: Different functions use different methods to access API keys.
38 | **Impact**: Reduces code maintainability and consistency.
39 | **Solution**: Standardize API key access patterns across all providers.
40 | 
41 | ### 5. ⚠️ Redundant API Call in Gemini Function
42 | **Description**: `list_gemini_models()` calls `client.models.list()` twice for different filtering operations.
43 | **Impact**: Potential performance issue - may make unnecessary network calls.
44 | **Solution**: Store results in a variable and reuse:
45 | ```python
46 | models = client.models.list()
47 | print("List of models that support generateContent:\n")
48 | for m in models:
49 |     # Filter for generateContent
50 |     
51 | print("List of models that support embedContent:\n")
52 | for m in models:
53 |     # Filter for embedContent
54 | ```
55 | 
56 | ## Low Priority Issues
57 | 
58 | ### 6. ℹ️ Inconsistent Variable Naming
59 | **Description**: In `list_groq_models()`, the result of `client.models.list()` is stored in a variable named `chat_completion`.
60 | **Impact**: Low - could cause confusion during maintenance.
61 | **Solution**: Use a more appropriate variable name like `models` or `model_list`.
62 | 
63 | ### 7. ℹ️ Inconsistent Output Formatting
64 | **Description**: Some functions include descriptive print statements, while others just print raw results.
65 | **Impact**: Low - user experience inconsistency.
66 | **Solution**: Standardize output formatting across all functions.
67 | 
68 | ### 8. ℹ️ Scattered Imports
69 | **Description**: Import statements are scattered throughout functions rather than at the top of the file.
70 | **Impact**: Low - code organization issue.
71 | **Solution**: Consolidate imports at the top of the file.
72 | 
73 | ### 9. ℹ️ Missing Function Docstrings
74 | **Description**: Functions lack documentation describing their purpose and usage.
75 | **Impact**: Low - reduces code readability and maintainability.
76 | **Solution**: Add docstrings to all functions.
77 | 
78 | ### 10. 💡 No Main Function
79 | **Description**: There's no main function to coordinate the execution of different provider functions.
80 | **Impact**: Low - usability enhancement needed.
81 | **Solution**: Add a main function with argument parsing to run specific provider functions.
82 | 
83 | ## Summary Table
84 | 
85 | | ID | Issue | Solution | Risk Assessment |
86 | |----|-------|----------|-----------------|
87 | | 1 | 🚨 Hardcoded API key (DeepSeek) | Use environment variables | High |
88 | | 2 | ⚠️ No error handling | Add try/except blocks for API calls | Medium |
89 | | 3 | ⚠️ Multiple load_dotenv() calls | Move to single location at file top | Medium |
90 | | 4 | ⚠️ Inconsistent API key access | Standardize patterns across providers | Medium |
91 | | 5 | ⚠️ Redundant API call (Gemini) | Cache API response in variable | Medium |
92 | | 6 | ℹ️ Inconsistent variable naming | Rename variables appropriately | Low |
93 | | 7 | ℹ️ Inconsistent output formatting | Standardize output format | Low |
94 | | 8 | ℹ️ Scattered imports | Consolidate imports at file top | Low |
95 | | 9 | ℹ️ Missing function docstrings | Add documentation to functions | Low |
96 | | 10 | 💡 No main function | Add main() with argument parsing | Low |
97 | 
98 | ## Recommendation
99 | The hardcoded API key issue (#1) should be addressed immediately as it poses a significant security risk. Following that, implementing proper error handling (#2) would greatly improve the reliability of the code.
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/molecules/test_list_models.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for list_models functionality for all providers.
  3 | """
  4 | 
  5 | import pytest
  6 | import os
  7 | from dotenv import load_dotenv
  8 | from just_prompt.molecules.list_models import list_models
  9 | 
 10 | # Load environment variables
 11 | load_dotenv()
 12 | 
 13 | def test_list_models_openai():
 14 |     """Test listing OpenAI models with real API call."""
 15 |     # Skip if API key isn't available
 16 |     if not os.environ.get("OPENAI_API_KEY"):
 17 |         pytest.skip("OpenAI API key not available")
 18 |         
 19 |     # Test with full provider name
 20 |     models = list_models("openai")
 21 |     
 22 |     # Assertions
 23 |     assert isinstance(models, list)
 24 |     assert len(models) > 0
 25 |     
 26 |     # Check for specific model patterns that should exist
 27 |     assert any("gpt" in model.lower() for model in models)
 28 |     
 29 | def test_list_models_anthropic():
 30 |     """Test listing Anthropic models with real API call."""
 31 |     # Skip if API key isn't available
 32 |     if not os.environ.get("ANTHROPIC_API_KEY"):
 33 |         pytest.skip("Anthropic API key not available")
 34 |         
 35 |     # Test with full provider name
 36 |     models = list_models("anthropic")
 37 |     
 38 |     # Assertions
 39 |     assert isinstance(models, list)
 40 |     assert len(models) > 0
 41 |     
 42 |     # Check for specific model patterns that should exist
 43 |     assert any("claude" in model.lower() for model in models)
 44 | 
 45 | def test_list_models_gemini():
 46 |     """Test listing Gemini models with real API call."""
 47 |     # Skip if API key isn't available
 48 |     if not os.environ.get("GEMINI_API_KEY"):
 49 |         pytest.skip("Gemini API key not available")
 50 |         
 51 |     # Test with full provider name
 52 |     models = list_models("gemini")
 53 |     
 54 |     # Assertions
 55 |     assert isinstance(models, list)
 56 |     assert len(models) > 0
 57 |     
 58 |     # Check for specific model patterns that should exist
 59 |     assert any("gemini" in model.lower() for model in models)
 60 | 
 61 | def test_list_models_groq():
 62 |     """Test listing Groq models with real API call."""
 63 |     # Skip if API key isn't available
 64 |     if not os.environ.get("GROQ_API_KEY"):
 65 |         pytest.skip("Groq API key not available")
 66 |         
 67 |     # Test with full provider name
 68 |     models = list_models("groq")
 69 |     
 70 |     # Assertions
 71 |     assert isinstance(models, list)
 72 |     assert len(models) > 0
 73 |     
 74 |     # Check for specific model patterns (llama or mixtral are common in Groq)
 75 |     assert any(("llama" in model.lower() or "mixtral" in model.lower()) for model in models)
 76 | 
 77 | def test_list_models_deepseek():
 78 |     """Test listing DeepSeek models with real API call."""
 79 |     # Skip if API key isn't available
 80 |     if not os.environ.get("DEEPSEEK_API_KEY"):
 81 |         pytest.skip("DeepSeek API key not available")
 82 |         
 83 |     # Test with full provider name
 84 |     models = list_models("deepseek")
 85 |     
 86 |     # Assertions
 87 |     assert isinstance(models, list)
 88 |     assert len(models) > 0
 89 |     
 90 |     # Check for basic list return (no specific pattern needed)
 91 |     assert all(isinstance(model, str) for model in models)
 92 | 
 93 | def test_list_models_ollama():
 94 |     """Test listing Ollama models with real API call."""
 95 |     # Test with full provider name
 96 |     models = list_models("ollama")
 97 |     
 98 |     # Assertions
 99 |     assert isinstance(models, list)
100 |     assert len(models) > 0
101 |     
102 |     # Check for basic list return (model entries could be anything)
103 |     assert all(isinstance(model, str) for model in models)
104 | 
105 | def test_list_models_with_short_names():
106 |     """Test listing models using short provider names."""
107 |     # Test each provider with short name (only if API key available)
108 |     
109 |     # OpenAI - short name "o"
110 |     if os.environ.get("OPENAI_API_KEY"):
111 |         models = list_models("o")
112 |         assert isinstance(models, list)
113 |         assert len(models) > 0
114 |         assert any("gpt" in model.lower() for model in models)
115 |     
116 |     # Anthropic - short name "a"
117 |     if os.environ.get("ANTHROPIC_API_KEY"):
118 |         models = list_models("a")
119 |         assert isinstance(models, list)
120 |         assert len(models) > 0
121 |         assert any("claude" in model.lower() for model in models)
122 |     
123 |     # Gemini - short name "g"
124 |     if os.environ.get("GEMINI_API_KEY"):
125 |         models = list_models("g")
126 |         assert isinstance(models, list)
127 |         assert len(models) > 0
128 |         assert any("gemini" in model.lower() for model in models)
129 |     
130 |     # Groq - short name "q"
131 |     if os.environ.get("GROQ_API_KEY"):
132 |         models = list_models("q")
133 |         assert isinstance(models, list)
134 |         assert len(models) > 0
135 |     
136 |     # DeepSeek - short name "d"
137 |     if os.environ.get("DEEPSEEK_API_KEY"):
138 |         models = list_models("d")
139 |         assert isinstance(models, list)
140 |         assert len(models) > 0
141 |     
142 |     # Ollama - short name "l"
143 |     models = list_models("l")
144 |     assert isinstance(models, list)
145 |     assert len(models) > 0
146 | 
147 | def test_list_models_invalid_provider():
148 |     """Test with invalid provider name."""
149 |     # Test invalid provider
150 |     with pytest.raises(ValueError):
151 |         list_models("unknown_provider")
```

--------------------------------------------------------------------------------
/src/just_prompt/tests/atoms/shared/test_validator.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for validator functions.
  3 | """
  4 | 
  5 | import pytest
  6 | import os
  7 | from unittest.mock import patch
  8 | from just_prompt.atoms.shared.validator import (
  9 |     validate_models_prefixed_by_provider, 
 10 |     validate_provider,
 11 |     validate_provider_api_keys,
 12 |     print_provider_availability
 13 | )
 14 | 
 15 | 
 16 | def test_validate_models_prefixed_by_provider():
 17 |     """Test validating model strings."""
 18 |     # Valid model strings
 19 |     assert validate_models_prefixed_by_provider(["openai:gpt-4o-mini"]) == True
 20 |     assert validate_models_prefixed_by_provider(["anthropic:claude-3-5-haiku"]) == True
 21 |     assert validate_models_prefixed_by_provider(["o:gpt-4o-mini", "a:claude-3-5-haiku"]) == True
 22 |     
 23 |     # Invalid model strings
 24 |     with pytest.raises(ValueError):
 25 |         validate_models_prefixed_by_provider([])
 26 |     
 27 |     with pytest.raises(ValueError):
 28 |         validate_models_prefixed_by_provider(["unknown:model"])
 29 |     
 30 |     with pytest.raises(ValueError):
 31 |         validate_models_prefixed_by_provider(["invalid-format"])
 32 | 
 33 | 
 34 | def test_validate_provider():
 35 |     """Test validating provider names."""
 36 |     # Valid providers
 37 |     assert validate_provider("openai") == True
 38 |     assert validate_provider("anthropic") == True
 39 |     assert validate_provider("o") == True
 40 |     assert validate_provider("a") == True
 41 |     
 42 |     # Invalid providers
 43 |     with pytest.raises(ValueError):
 44 |         validate_provider("unknown")
 45 |         
 46 |     with pytest.raises(ValueError):
 47 |         validate_provider("")
 48 | 
 49 | 
 50 | def test_validate_provider_api_keys():
 51 |     """Test validating provider API keys."""
 52 |     # Use mocked environment variables with a mix of valid, empty, and missing keys
 53 |     with patch.dict(os.environ, {
 54 |         "OPENAI_API_KEY": "test-key",
 55 |         "ANTHROPIC_API_KEY": "test-key",
 56 |         "GROQ_API_KEY": "test-key",  
 57 |         # GEMINI_API_KEY not defined
 58 |         "DEEPSEEK_API_KEY": "test-key",
 59 |         "OLLAMA_HOST": "http://localhost:11434"
 60 |     }):
 61 |         # Call the function to validate provider API keys
 62 |         availability = validate_provider_api_keys()
 63 |         
 64 |         # Check that each provider has the correct availability status
 65 |         assert availability["openai"] is True
 66 |         assert availability["anthropic"] is True
 67 |         assert availability["groq"] is True
 68 |         
 69 |         # This depends on the actual implementation. Since we're mocking the environment,
 70 |         # let's just assert that the keys exist rather than specific values
 71 |         assert "gemini" in availability
 72 |         assert "deepseek" in availability
 73 |         assert "ollama" in availability
 74 |         
 75 |         # Make sure all providers are included in the result
 76 |         assert set(availability.keys()) == {"openai", "anthropic", "gemini", "groq", "deepseek", "ollama"}
 77 | 
 78 | 
 79 | def test_validate_provider_api_keys_none():
 80 |     """Test validating provider API keys when none are available."""
 81 |     # Use mocked environment variables with no API keys
 82 |     with patch.dict(os.environ, {}, clear=True):
 83 |         # Call the function to validate provider API keys
 84 |         availability = validate_provider_api_keys()
 85 |         
 86 |         # Check that all providers are marked as unavailable
 87 |         assert all(status is False for status in availability.values())
 88 |         assert set(availability.keys()) == {"openai", "anthropic", "gemini", "groq", "deepseek", "ollama"}
 89 | 
 90 | 
 91 | def test_print_provider_availability():
 92 |     """Test printing provider availability."""
 93 |     # Mock the validate_provider_api_keys function to return a controlled result
 94 |     mock_availability = {
 95 |         "openai": True,
 96 |         "anthropic": False,
 97 |         "gemini": True,
 98 |         "groq": False,
 99 |         "deepseek": True,
100 |         "ollama": False
101 |     }
102 |     
103 |     with patch('just_prompt.atoms.shared.validator.validate_provider_api_keys', 
104 |               return_value=mock_availability):
105 |         
106 |         # Mock the logger to verify the log messages
107 |         with patch('just_prompt.atoms.shared.validator.logger') as mock_logger:
108 |             # Call the function to print provider availability
109 |             print_provider_availability(detailed=True)
110 |             
111 |             # Verify that info was called with a message about available providers
112 |             mock_logger.info.assert_called_once()
113 |             info_call_args = mock_logger.info.call_args[0][0]
114 |             assert "Available LLM providers:" in info_call_args
115 |             assert "openai" in info_call_args
116 |             assert "gemini" in info_call_args
117 |             assert "deepseek" in info_call_args
118 |             
119 |             # Check that warning was called multiple times
120 |             assert mock_logger.warning.call_count >= 2
121 |             
122 |             # Check that the first warning is about missing API keys
123 |             warning_calls = [call[0][0] for call in mock_logger.warning.call_args_list]
124 |             assert "The following providers are unavailable due to missing API keys:" in warning_calls
125 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/atoms/llm_providers/openai.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | OpenAI provider implementation.
  3 | """
  4 | 
  5 | """OpenAI provider implementation with support for o‑series *reasoning effort* suffixes.
  6 | 
  7 | Supported suffixes (case‑insensitive): ``:low``, ``:medium``, ``:high`` on the
  8 | reasoning models ``o4-mini``, ``o3-mini`` and ``o3``.  When such a suffix is
  9 | present we use OpenAI's *Responses* API with the corresponding
 10 | ``reasoning={"effort": <level>}`` parameter (if the SDK supports it).  If the
 11 | installed ``openai`` SDK is older and does not expose the ``responses``
 12 | resource, we gracefully fall back to the Chat Completions endpoint so that the
 13 | basic functionality (and our tests) still work.
 14 | """
 15 | 
 16 | import os
 17 | import re
 18 | import logging
 19 | from typing import List, Tuple
 20 | 
 21 | from dotenv import load_dotenv
 22 | 
 23 | # Third‑party import guarded so that static analysis still works when the SDK
 24 | # is absent.
 25 | from openai import OpenAI  # type: ignore
 26 | import logging
 27 | from dotenv import load_dotenv
 28 | 
 29 | # Load environment variables
 30 | load_dotenv()
 31 | 
 32 | # Configure logging
 33 | logger = logging.getLogger(__name__)
 34 | 
 35 | # Initialize OpenAI client once – reused across calls.
 36 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
 37 | 
 38 | # ---------------------------------------------------------------------------
 39 | # Internal helpers
 40 | # ---------------------------------------------------------------------------
 41 | 
 42 | 
 43 | _REASONING_ELIGIBLE_MODELS = {"o4-mini", "o3-mini", "o3", "gpt-5", "gpt-5-mini", "gpt-5-nano"}
 44 | _REASONING_LEVELS = {"low", "medium", "high"}
 45 | 
 46 | 
 47 | # Public so that tests can import.
 48 | 
 49 | def parse_reasoning_suffix(model: str) -> Tuple[str, str]:
 50 |     """Return (base_model, effort_level).
 51 | 
 52 |     If *model* is something like ``o4-mini:high`` (case‑insensitive) we return
 53 |     ("o4-mini", "high").  For all other inputs we return (_model_, "").
 54 |     """
 55 | 
 56 |     # Split once from the right so additional colons inside the *provider* part
 57 |     # are untouched (the caller already stripped the provider prefix).
 58 |     if ":" not in model:
 59 |         return model, ""
 60 | 
 61 |     base, suffix = model.rsplit(":", 1)
 62 | 
 63 |     suffix_lower = suffix.lower()
 64 | 
 65 |     if base in _REASONING_ELIGIBLE_MODELS and suffix_lower in _REASONING_LEVELS:
 66 |         return base, suffix_lower
 67 | 
 68 |     # Not a recognised reasoning pattern; treat the whole string as the model
 69 |     return model, ""
 70 | 
 71 | 
 72 | def _prompt_with_reasoning(text: str, model: str, effort: str) -> str:  # pragma: no cover – hits network
 73 |     """Call OpenAI *Responses* API with reasoning effort.
 74 | 
 75 |     Falls back transparently to chat completions if the installed SDK does not
 76 |     yet expose the *responses* resource.
 77 |     """
 78 | 
 79 |     if not effort:
 80 |         raise ValueError("effort must be 'low', 'medium', or 'high'")
 81 | 
 82 |     logger.info(
 83 |         "Sending prompt to OpenAI reasoning model %s with effort '%s'", model, effort
 84 |     )
 85 | 
 86 |     # Prefer the official Responses endpoint when present.
 87 |     if hasattr(client, "responses"):
 88 |         try:
 89 |             response = client.responses.create(
 90 |                 model=model,
 91 |                 reasoning={"effort": effort},
 92 |                 input=[{"role": "user", "content": text}],
 93 |             )
 94 | 
 95 |             # The modern SDK returns .output_text
 96 |             output_text = getattr(response, "output_text", None)
 97 |             if output_text is not None:
 98 |                 return output_text
 99 | 
100 |             # Fallback path: maybe same shape as chat completions.
101 |             if hasattr(response, "choices") and response.choices:
102 |                 return response.choices[0].message.content  # type: ignore[attr-defined]
103 | 
104 |             raise ValueError("Unexpected response format from OpenAI responses API")
105 |         except Exception as exc:  # pragma: no cover – keep behaviour consistent
106 |             logger.warning("Responses API failed (%s); falling back to chat", exc)
107 | 
108 |     # Fallback to chat completions – pass the reasoning level as a system
109 |     # message so that, even without official support, the model can try to act
110 |     # accordingly.  This keeps tests functional if the Responses API is not
111 |     # available in the runtime environment.
112 |     try:
113 |         response = client.chat.completions.create(
114 |             model=model,
115 |             messages=[
116 |                 {
117 |                     "role": "system",
118 |                     "content": f"Use {effort} reasoning effort before answering.",
119 |                 },
120 |                 {"role": "user", "content": text},
121 |             ],
122 |         )
123 | 
124 |         return response.choices[0].message.content  # type: ignore[attr-defined]
125 |     except Exception as exc:
126 |         logger.error("Error sending prompt to OpenAI (fallback chat): %s", exc)
127 |         raise ValueError(f"Failed to get response from OpenAI: {exc}")
128 | 
129 | 
130 | def prompt(text: str, model: str) -> str:
131 |     """Main prompt entry‑point for the OpenAI provider.
132 | 
133 |     Handles the optional ``:low|:medium|:high`` suffix on reasoning models.
134 |     Falls back to regular chat completions when no suffix is detected.
135 |     """
136 | 
137 |     base_model, effort = parse_reasoning_suffix(model)
138 | 
139 |     if effort:
140 |         return _prompt_with_reasoning(text, base_model, effort)
141 | 
142 |     # Regular chat completion path
143 |     try:
144 |         logger.info("Sending prompt to OpenAI model: %s", base_model)
145 |         response = client.chat.completions.create(
146 |             model=base_model,
147 |             messages=[{"role": "user", "content": text}],
148 |         )
149 | 
150 |         return response.choices[0].message.content  # type: ignore[attr-defined]
151 |     except Exception as exc:
152 |         logger.error("Error sending prompt to OpenAI: %s", exc)
153 |         raise ValueError(f"Failed to get response from OpenAI: {exc}")
154 | 
155 | 
156 | def list_models() -> List[str]:
157 |     """
158 |     List available OpenAI models.
159 | 
160 |     Returns:
161 |         List of model names
162 |     """
163 |     try:
164 |         logger.info("Listing OpenAI models")
165 |         response = client.models.list()
166 | 
167 |         # Return all models without filtering
168 |         models = [model.id for model in response.data]
169 | 
170 |         return models
171 |     except Exception as exc:
172 |         # Networking errors shouldn't break the caller – return a minimal hard‑coded list.
173 |         logger.warning("Error listing OpenAI models via API (%s). Returning fallback list.", exc)
174 |         return [
175 |             "gpt-4o-mini",
176 |             "o4-mini",
177 |             "o3-mini",
178 |             "o3",
179 |             "text-davinci-003",
180 |         ]
181 | 
```

--------------------------------------------------------------------------------
/src/just_prompt/molecules/ceo_and_board_prompt.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | CEO and Board prompt functionality for just-prompt.
  3 | """
  4 | 
  5 | from typing import List
  6 | import logging
  7 | import os
  8 | from pathlib import Path
  9 | from .prompt_from_file_to_file import prompt_from_file_to_file
 10 | from .prompt import prompt
 11 | from ..atoms.shared.utils import DEFAULT_MODEL
 12 | 
 13 | logger = logging.getLogger(__name__)
 14 | 
 15 | # Default CEO model
 16 | DEFAULT_CEO_MODEL = "openai:o3"
 17 | 
 18 | # Default CEO decision prompt template
 19 | DEFAULT_CEO_DECISION_PROMPT = """
 20 | <purpose>
 21 |     You are a CEO of a company. You are given a list of responses from your board of directors. Your job is to take in the original question prompt, and each of the board members' responses, and choose the best direction for your company.
 22 | </purpose>
 23 | <instructions>
 24 |     <instruction>Each board member has proposed an answer to the question posed in the prompt.</instruction>
 25 |     <instruction>Given the original question prompt, and each of the board members' responses, choose the best answer.</instruction>
 26 |     <instruction>Tally the votes of the board members, choose the best direction, and explain why you chose it.</instruction>
 27 |     <instruction>To preserve anonymity, we will use model names instead of real names of your board members. When responding, use the model names in your response.</instruction>
 28 |     <instruction>As a CEO, you breakdown the decision into several categories including: risk, reward, timeline, and resources. In addition to these guiding categories, you also consider the board members' expertise and experience. As a bleeding edge CEO, you also invent new dimensions of decision making to help you make the best decision for your company.</instruction>
 29 |     <instruction>Your final CEO response should be in markdown format with a comprehensive explanation of your decision. Start the top of the file with a title that says "CEO Decision", include a table of contents, briefly describe the question/problem at hand then dive into several sections. One of your first sections should be a quick summary of your decision, then breakdown each of the boards decisions into sections with your commentary on each. Where we lead into your decision with the categories of your decision making process, and then we lead into your final decision.</instruction>
 30 | </instructions>
 31 | 
 32 | <original-question>{original_prompt}</original-question>
 33 | 
 34 | <board-decisions>
 35 | {board_responses}
 36 | </board-decisions>
 37 | """
 38 | 
 39 | 
 40 | def ceo_and_board_prompt(
 41 |     abs_from_file: str,
 42 |     abs_output_dir: str = ".",
 43 |     models_prefixed_by_provider: List[str] = None,
 44 |     ceo_model: str = DEFAULT_CEO_MODEL,
 45 |     ceo_decision_prompt: str = DEFAULT_CEO_DECISION_PROMPT
 46 | ) -> str:
 47 |     """
 48 |     Read text from a file, send it as prompt to multiple 'board member' models,
 49 |     and then have a 'CEO' model make a decision based on the responses.
 50 | 
 51 |     Args:
 52 |         abs_from_file: Absolute path to the text file containing the original prompt (must be an absolute path, not relative)
 53 |         abs_output_dir: Absolute directory path to save response files (must be an absolute path, not relative)
 54 |         models_prefixed_by_provider: List of model strings in format "provider:model"
 55 |                                    to act as the board members
 56 |         ceo_model: Model to use for the CEO decision in format "provider:model"
 57 |         ceo_decision_prompt: Template for the CEO decision prompt
 58 | 
 59 |     Returns:
 60 |         Path to the CEO decision file
 61 |     """
 62 |     # Validate output directory
 63 |     output_path = Path(abs_output_dir)
 64 |     if not output_path.exists():
 65 |         output_path.mkdir(parents=True, exist_ok=True)
 66 | 
 67 |     if not output_path.is_dir():
 68 |         raise ValueError(f"Not a directory: {abs_output_dir}")
 69 | 
 70 |     # Get the original prompt from the file
 71 |     try:
 72 |         with open(abs_from_file, 'r', encoding='utf-8') as f:
 73 |             original_prompt = f.read()
 74 |     except Exception as e:
 75 |         logger.error(f"Error reading file {abs_from_file}: {e}")
 76 |         raise ValueError(f"Error reading file: {str(e)}")
 77 | 
 78 |     # Step 1: Get board members' responses
 79 |     board_response_files = prompt_from_file_to_file(
 80 |         abs_file_path=abs_from_file,
 81 |         models_prefixed_by_provider=models_prefixed_by_provider,
 82 |         abs_output_dir=abs_output_dir
 83 |     )
 84 | 
 85 |     # Get the models that were actually used
 86 |     models_used = models_prefixed_by_provider
 87 |     if not models_used:
 88 |         default_models = os.environ.get("DEFAULT_MODELS", DEFAULT_MODEL)
 89 |         models_used = [model.strip() for model in default_models.split(",")]
 90 | 
 91 |     # Step 2: Read in the board responses
 92 |     board_responses_text = ""
 93 |     for i, file_path in enumerate(board_response_files):
 94 |         model_name = models_used[i].replace(":", "_")
 95 |         try:
 96 |             with open(file_path, 'r', encoding='utf-8') as f:
 97 |                 response_content = f.read()
 98 |                 board_responses_text += f"""
 99 | <board-response>
100 |     <model-name>{models_used[i]}</model-name>
101 |     <response>{response_content}</response>
102 | </board-response>
103 | """
104 |         except Exception as e:
105 |             logger.error(f"Error reading board response file {file_path}: {e}")
106 |             board_responses_text += f"""
107 | <board-response>
108 |     <model-name>{models_used[i]}</model-name>
109 |     <response>Error reading response: {str(e)}</response>
110 | </board-response>
111 | """
112 | 
113 |     # Step 3: Prepare the CEO decision prompt
114 |     final_ceo_prompt = ceo_decision_prompt.format(
115 |         original_prompt=original_prompt,
116 |         board_responses=board_responses_text
117 |     )
118 | 
119 |     # Step 4: Save the CEO prompt to a file
120 |     ceo_prompt_file = output_path / "ceo_prompt.xml"
121 |     try:
122 |         with open(ceo_prompt_file, "w", encoding="utf-8") as f:
123 |             f.write(final_ceo_prompt)
124 |     except Exception as e:
125 |         logger.error(f"Error writing CEO prompt to {ceo_prompt_file}: {e}")
126 |         raise ValueError(f"Error writing CEO prompt: {str(e)}")
127 |     
128 |     # Step 5: Get the CEO decision
129 |     ceo_response = prompt(final_ceo_prompt, [ceo_model])[0]
130 | 
131 |     # Step 6: Write the CEO decision to a file
132 |     ceo_output_file = output_path / "ceo_decision.md"
133 |     try:
134 |         with open(ceo_output_file, "w", encoding="utf-8") as f:
135 |             f.write(ceo_response)
136 |     except Exception as e:
137 |         logger.error(f"Error writing CEO decision to {ceo_output_file}: {e}")
138 |         raise ValueError(f"Error writing CEO decision: {str(e)}")
139 | 
140 |     return str(ceo_output_file)
```

--------------------------------------------------------------------------------
/src/just_prompt/atoms/llm_providers/anthropic.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Anthropic provider implementation.
  3 | """
  4 | 
  5 | import os
  6 | import re
  7 | import anthropic
  8 | from typing import List, Tuple
  9 | import logging
 10 | from dotenv import load_dotenv
 11 | 
 12 | # Load environment variables
 13 | load_dotenv()
 14 | 
 15 | # Configure logging
 16 | logger = logging.getLogger(__name__)
 17 | 
 18 | # Initialize Anthropic client
 19 | client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
 20 | 
 21 | 
 22 | def parse_thinking_suffix(model: str) -> Tuple[str, int]:
 23 |     """
 24 |     Parse a model name to check for thinking token budget suffixes.
 25 |     Only works with the claude-3-7-sonnet-20250219 model.
 26 |     
 27 |     Supported formats:
 28 |     - model:1k, model:4k, model:16k
 29 |     - model:1000, model:1054, model:1333, etc. (any value between 1024-16000)
 30 |     
 31 |     Args:
 32 |         model: The model name potentially with a thinking suffix
 33 |         
 34 |     Returns:
 35 |         Tuple of (base_model_name, thinking_budget)
 36 |         If no thinking suffix is found, thinking_budget will be 0
 37 |     """
 38 |     # Look for patterns like ":1k", ":4k", ":16k" or ":1000", ":1054", etc.
 39 |     pattern = r'^(.+?)(?::(\d+)k?)?$'
 40 |     match = re.match(pattern, model)
 41 |     
 42 |     if not match:
 43 |         return model, 0
 44 |     
 45 |     base_model = match.group(1)
 46 |     thinking_suffix = match.group(2)
 47 |     
 48 |     # Validate the model - only specific Claude models support thinking
 49 |     supported_thinking_models = [
 50 |         "claude-3-7-sonnet-20250219",
 51 |         "claude-opus-4-20250514", 
 52 |         "claude-sonnet-4-20250514"
 53 |     ]
 54 |     if base_model not in supported_thinking_models:
 55 |         logger.warning(f"Model {base_model} does not support thinking, ignoring thinking suffix")
 56 |         return base_model, 0
 57 |     
 58 |     if not thinking_suffix:
 59 |         return model, 0
 60 |     
 61 |     # Convert to integer
 62 |     try:
 63 |         thinking_budget = int(thinking_suffix)
 64 |         # If a small number like 1, 4, 16 is provided, assume it's in "k" (multiply by 1024)
 65 |         if thinking_budget < 100:
 66 |             thinking_budget *= 1024
 67 |             
 68 |         # Adjust values outside the range
 69 |         if thinking_budget < 1024:
 70 |             logger.warning(f"Thinking budget {thinking_budget} below minimum (1024), using 1024 instead")
 71 |             thinking_budget = 1024
 72 |         elif thinking_budget > 16000:
 73 |             logger.warning(f"Thinking budget {thinking_budget} above maximum (16000), using 16000 instead")
 74 |             thinking_budget = 16000
 75 |             
 76 |         logger.info(f"Using thinking budget of {thinking_budget} tokens for model {base_model}")
 77 |         return base_model, thinking_budget
 78 |     except ValueError:
 79 |         logger.warning(f"Invalid thinking budget format: {thinking_suffix}, ignoring")
 80 |         return base_model, 0
 81 | 
 82 | 
 83 | def prompt_with_thinking(text: str, model: str, thinking_budget: int) -> str:
 84 |     """
 85 |     Send a prompt to Anthropic Claude with thinking enabled and get a response.
 86 |     
 87 |     Args:
 88 |         text: The prompt text
 89 |         model: The base model name (without thinking suffix)
 90 |         thinking_budget: The token budget for thinking
 91 |         
 92 |     Returns:
 93 |         Response string from the model
 94 |     """
 95 |     try:
 96 |         # Ensure max_tokens is greater than thinking_budget
 97 |         # Documentation requires this: https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#max-tokens-and-context-window-size
 98 |         max_tokens = thinking_budget + 1000  # Adding 1000 tokens for the response
 99 |         
100 |         logger.info(f"Sending prompt to Anthropic model {model} with thinking budget {thinking_budget}")
101 |         message = client.messages.create(
102 |             model=model,
103 |             max_tokens=max_tokens,
104 |             thinking={
105 |                 "type": "enabled",
106 |                 "budget_tokens": thinking_budget,
107 |             },
108 |             messages=[{"role": "user", "content": text}]
109 |         )
110 |         
111 |         # Extract the response from the message content
112 |         # Filter out thinking blocks and only get text blocks
113 |         text_blocks = [block for block in message.content if block.type == "text"]
114 |         
115 |         if not text_blocks:
116 |             raise ValueError("No text content found in response")
117 |             
118 |         return text_blocks[0].text
119 |     except Exception as e:
120 |         logger.error(f"Error sending prompt with thinking to Anthropic: {e}")
121 |         raise ValueError(f"Failed to get response from Anthropic with thinking: {str(e)}")
122 | 
123 | 
124 | def prompt(text: str, model: str) -> str:
125 |     """
126 |     Send a prompt to Anthropic Claude and get a response.
127 |     
128 |     Automatically handles thinking suffixes in the model name (e.g., claude-3-7-sonnet-20250219:4k)
129 |     
130 |     Args:
131 |         text: The prompt text
132 |         model: The model name, optionally with thinking suffix
133 |         
134 |     Returns:
135 |         Response string from the model
136 |     """
137 |     # Parse the model name to check for thinking suffixes
138 |     base_model, thinking_budget = parse_thinking_suffix(model)
139 |     
140 |     # If thinking budget is specified, use prompt_with_thinking
141 |     if thinking_budget > 0:
142 |         return prompt_with_thinking(text, base_model, thinking_budget)
143 |     
144 |     # Otherwise, use regular prompt
145 |     try:
146 |         logger.info(f"Sending prompt to Anthropic model: {base_model}")
147 |         message = client.messages.create(
148 |             model=base_model, max_tokens=4096, messages=[{"role": "user", "content": text}]
149 |         )
150 | 
151 |         # Extract the response from the message content
152 |         # Get only text blocks
153 |         text_blocks = [block for block in message.content if block.type == "text"]
154 |         
155 |         if not text_blocks:
156 |             raise ValueError("No text content found in response")
157 |             
158 |         return text_blocks[0].text
159 |     except Exception as e:
160 |         logger.error(f"Error sending prompt to Anthropic: {e}")
161 |         raise ValueError(f"Failed to get response from Anthropic: {str(e)}")
162 | 
163 | 
164 | def list_models() -> List[str]:
165 |     """
166 |     List available Anthropic models.
167 |     
168 |     Returns:
169 |         List of model names
170 |     """
171 |     try:
172 |         logger.info("Listing Anthropic models")
173 |         response = client.models.list()
174 | 
175 |         models = [model.id for model in response.data]
176 |         return models
177 |     except Exception as e:
178 |         logger.error(f"Error listing Anthropic models: {e}")
179 |         # Return some known models if API fails
180 |         logger.info("Returning hardcoded list of known Anthropic models")
181 |         return [
182 |             "claude-3-7-sonnet",
183 |             "claude-3-5-sonnet",
184 |             "claude-3-5-sonnet-20240620",
185 |             "claude-3-opus-20240229",
186 |             "claude-3-sonnet-20240229",
187 |             "claude-3-haiku-20240307",
188 |             "claude-3-5-haiku",
189 |         ]
```

--------------------------------------------------------------------------------
/src/just_prompt/atoms/llm_providers/gemini.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Google Gemini provider implementation.
  3 | """
  4 | 
  5 | import os
  6 | import re
  7 | from typing import List, Tuple
  8 | import logging
  9 | from dotenv import load_dotenv
 10 | from google import genai
 11 | 
 12 | # Load environment variables
 13 | load_dotenv()
 14 | 
 15 | # Configure logging
 16 | logger = logging.getLogger(__name__)
 17 | 
 18 | # Initialize Gemini client
 19 | client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
 20 | 
 21 | # Models that support thinking_budget
 22 | THINKING_ENABLED_MODELS = ["gemini-2.5-flash-preview-04-17"]
 23 | 
 24 | 
 25 | def parse_thinking_suffix(model: str) -> Tuple[str, int]:
 26 |     """
 27 |     Parse a model name to check for thinking token budget suffixes.
 28 |     Only works with the models in THINKING_ENABLED_MODELS.
 29 |     
 30 |     Supported formats:
 31 |     - model:1k, model:4k, model:24k
 32 |     - model:1000, model:1054, model:24576, etc. (any value between 0-24576)
 33 |     
 34 |     Args:
 35 |         model: The model name potentially with a thinking suffix
 36 |         
 37 |     Returns:
 38 |         Tuple of (base_model_name, thinking_budget)
 39 |         If no thinking suffix is found, thinking_budget will be 0
 40 |     """
 41 |     # First check if the model name contains a colon
 42 |     if ":" not in model:
 43 |         return model, 0
 44 |         
 45 |     # Split the model name on the first colon to handle models with multiple colons
 46 |     parts = model.split(":", 1)
 47 |     base_model = parts[0]
 48 |     suffix = parts[1] if len(parts) > 1 else ""
 49 |     
 50 |     # Check if the base model is in the supported models list
 51 |     if base_model not in THINKING_ENABLED_MODELS:
 52 |         logger.warning(f"Model {base_model} does not support thinking, ignoring thinking suffix")
 53 |         return base_model, 0
 54 |     
 55 |     # If there's no suffix or it's empty, return default values
 56 |     if not suffix:
 57 |         return base_model, 0
 58 |     
 59 |     # Check if the suffix is a valid number (with optional 'k' suffix)
 60 |     if re.match(r'^\d+k?$', suffix):
 61 |         # Extract the numeric part and handle 'k' multiplier
 62 |         if suffix.endswith('k'):
 63 |             try:
 64 |                 thinking_budget = int(suffix[:-1]) * 1024
 65 |             except ValueError:
 66 |                 logger.warning(f"Invalid thinking budget format: {suffix}, ignoring")
 67 |                 return base_model, 0
 68 |         else:
 69 |             try:
 70 |                 thinking_budget = int(suffix)
 71 |                 # If a small number like 1, 4, 24 is provided, assume it's in "k" (multiply by 1024)
 72 |                 if thinking_budget < 100:
 73 |                     thinking_budget *= 1024
 74 |             except ValueError:
 75 |                 logger.warning(f"Invalid thinking budget format: {suffix}, ignoring")
 76 |                 return base_model, 0
 77 |         
 78 |         # Adjust values outside the range
 79 |         if thinking_budget < 0:
 80 |             logger.warning(f"Thinking budget {thinking_budget} below minimum (0), using 0 instead")
 81 |             thinking_budget = 0
 82 |         elif thinking_budget > 24576:
 83 |             logger.warning(f"Thinking budget {thinking_budget} above maximum (24576), using 24576 instead")
 84 |             thinking_budget = 24576
 85 |             
 86 |         logger.info(f"Using thinking budget of {thinking_budget} tokens for model {base_model}")
 87 |         return base_model, thinking_budget
 88 |     else:
 89 |         # If suffix is not a valid number format, ignore it
 90 |         logger.warning(f"Invalid thinking budget format: {suffix}, ignoring")
 91 |         return base_model, 0
 92 | 
 93 | 
 94 | def prompt_with_thinking(text: str, model: str, thinking_budget: int) -> str:
 95 |     """
 96 |     Send a prompt to Google Gemini with thinking enabled and get a response.
 97 |     
 98 |     Args:
 99 |         text: The prompt text
100 |         model: The base model name (without thinking suffix)
101 |         thinking_budget: The token budget for thinking
102 |         
103 |     Returns:
104 |         Response string from the model
105 |     """
106 |     try:
107 |         logger.info(f"Sending prompt to Gemini model {model} with thinking budget {thinking_budget}")
108 |         
109 |         response = client.models.generate_content(
110 |             model=model,
111 |             contents=text,
112 |             config=genai.types.GenerateContentConfig(
113 |                 thinking_config=genai.types.ThinkingConfig(
114 |                     thinking_budget=thinking_budget
115 |                 )
116 |             )
117 |         )
118 |         
119 |         return response.text
120 |     except Exception as e:
121 |         logger.error(f"Error sending prompt with thinking to Gemini: {e}")
122 |         raise ValueError(f"Failed to get response from Gemini with thinking: {str(e)}")
123 | 
124 | 
125 | def prompt(text: str, model: str) -> str:
126 |     """
127 |     Send a prompt to Google Gemini and get a response.
128 |     
129 |     Automatically handles thinking suffixes in the model name (e.g., gemini-2.5-flash-preview-04-17:4k)
130 |     
131 |     Args:
132 |         text: The prompt text
133 |         model: The model name, optionally with thinking suffix
134 |         
135 |     Returns:
136 |         Response string from the model
137 |     """
138 |     # Parse the model name to check for thinking suffixes
139 |     base_model, thinking_budget = parse_thinking_suffix(model)
140 |     
141 |     # If thinking budget is specified, use prompt_with_thinking
142 |     if thinking_budget > 0:
143 |         return prompt_with_thinking(text, base_model, thinking_budget)
144 |     
145 |     # Otherwise, use regular prompt
146 |     try:
147 |         logger.info(f"Sending prompt to Gemini model: {base_model}")
148 |         
149 |         response = client.models.generate_content(
150 |             model=base_model,
151 |             contents=text
152 |         )
153 |         
154 |         return response.text
155 |     except Exception as e:
156 |         logger.error(f"Error sending prompt to Gemini: {e}")
157 |         raise ValueError(f"Failed to get response from Gemini: {str(e)}")
158 | 
159 | 
160 | def list_models() -> List[str]:
161 |     """
162 |     List available Google Gemini models.
163 |     
164 |     Returns:
165 |         List of model names
166 |     """
167 |     try:
168 |         logger.info("Listing Gemini models")
169 |         
170 |         # Get the list of models using the correct API method
171 |         models = []
172 |         available_models = client.models.list()
173 |         for m in available_models:
174 |             # Check if the model supports content generation
175 |             if hasattr(m, 'supported_generation_methods') and "generateContent" in m.supported_generation_methods:
176 |                 models.append(m.name)
177 |             else:
178 |                 # If supported_generation_methods is not available, include all models
179 |                 models.append(m.name)
180 |                 
181 |         # Format model names - strip the "models/" prefix if present
182 |         formatted_models = [model.replace("models/", "") for model in models]
183 |         
184 |         return formatted_models
185 |     except Exception as e:
186 |         logger.error(f"Error listing Gemini models: {e}")
187 |         # Throw the error instead of returning hardcoded list
188 |         raise ValueError(f"Failed to list Gemini models: {str(e)}")
```
Page 1/4FirstPrevNextLast