# Directory Structure
```
├── .cursor
│   └── rules
│       ├── documentation-process.mdc
│       ├── git-behaviour.mdc
│       ├── python-fastapi.mdc
│       └── vite-typescript.mdc
├── .dev-tools
│   ├── .gitignore
│   ├── prompts
│   │   ├── prompt_pr.md
│   │   └── prompt_user_story.md
│   └── README.md
├── .env.example
├── .github
│   └── workflows
│       ├── docker-build-and-push.yml
│       └── python-backend-checks.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CHECKLIST.md
├── Dockerfile
├── poetry.lock
├── pyproject.toml
├── README.md
├── scripts
│   ├── build-push-ecr.sh
│   └── build-run.sh
├── src
│   ├── __init__.py
│   ├── backend
│   │   ├── __init__.py
│   │   ├── __main__.py
│   │   ├── .env.example
│   │   ├── app.py
│   │   ├── elevenlabs_client.py
│   │   ├── mcp_tools.py
│   │   ├── README.md
│   │   ├── routes.py
│   │   └── websocket.py
│   └── frontend
│       ├── .gitignore
│       ├── build.sh
│       ├── eslint.config.js
│       ├── index.html
│       ├── package-lock.json
│       ├── package.json
│       ├── postcss.config.js
│       ├── public
│       │   └── vite.svg
│       ├── README.md
│       ├── src
│       │   ├── App.tsx
│       │   ├── assets
│       │   │   └── react.svg
│       │   ├── index.css
│       │   ├── main.tsx
│       │   ├── services
│       │   │   └── api.ts
│       │   └── vite-env.d.ts
│       ├── tsconfig.app.json
│       ├── tsconfig.json
│       ├── tsconfig.node.json
│       └── vite.config.ts
├── terraform
│   ├── aws
│   │   ├── api_gateway
│   │   │   ├── main.tf
│   │   │   ├── outputs.tf
│   │   │   └── variables.tf
│   │   ├── data.tf
│   │   ├── ecr
│   │   │   ├── main.tf
│   │   │   ├── outputs.tf
│   │   │   └── variables.tf
│   │   ├── ecs
│   │   │   ├── main.tf
│   │   │   ├── outputs.tf
│   │   │   └── variables.tf
│   │   └── iam
│   │       ├── main.tf
│   │       ├── outputs.tf
│   │       └── variables.tf
│   ├── main.tf
│   ├── outputs.tf
│   ├── README.md
│   ├── terraform.tfvars
│   └── variables.tf
└── tests
    ├── __init__.py
    └── backend
        ├── __init__.py
        ├── conftest.py
        └── test_mcp_tools.py
```
# Files
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
```
# ElevenLabs API Configuration
ELEVENLABS_API_KEY=your-api-key-here
# Server Configuration
HOST=0.0.0.0
PORT=9020
LOG_LEVEL=INFO
BASE_PATH=
# Development Settings
DEBUG=false
RELOAD=true 
```
--------------------------------------------------------------------------------
/src/frontend/.gitignore:
--------------------------------------------------------------------------------
```
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
```
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
```yaml
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
  rev: v0.3.4
  hooks:
    - id: ruff
      args: [ --fix ]
    - id: ruff-format
- repo: local
  hooks:
    - id: pytest
      name: Run unit tests
      entry: poetry run pytest tests/backend -v
      language: system
      types: [python]
      pass_filenames: false
      always_run: true 
```
--------------------------------------------------------------------------------
/.dev-tools/.gitignore:
--------------------------------------------------------------------------------
```
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# Temporary files
temp_output/
*.tmp
*.temp
*.swp
*.swo
# Environment variables
.env
.env.local
.env.*.local
# IDE
.idea/
.vscode/
*.sublime-workspace
*.sublime-project
# OS
.DS_Store
Thumbs.db 
```
--------------------------------------------------------------------------------
/src/backend/.env.example:
--------------------------------------------------------------------------------
```
# Eleven Labs API Configuration
ELEVENLABS_API_KEY=your_api_key_here
# Server Configuration
HOST=127.0.0.1              # Lokal: 127.0.0.1, Docker: 0.0.0.0
PORT=9020                   # Port auf dem der Service erreichbar ist
LOG_LEVEL=INFO              # Loglevel (DEBUG, INFO, WARNING, ERROR, CRITICAL)
# API Path Configuration
# Wichtig: Lokal leer oder '/', in AWS/Container '/jessica-backend'
ROOT_PATH=                  # Pfad-Präfix für API Gateway/ALB (lokal leer, Produktion z.B. '/jessica-backend')
# Development Settings
DEBUG=false                 # Debug-Modus aktivieren
RELOAD=true                 # Auto-Reload bei Code-Änderungen
# MCP Configuration
MCP_PORT=9022               # Port für MCP-Kommunikation 
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
terraform-global
src/auth-service
.ruff_cache/
# Dependencies
node_modules/
README-INFRASTRUCTURE.md
# Environment variables
.env
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
# IDE - VSCode
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
# Misc
.DS_Store
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# Virtual Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
.python-version
# IDE
.idea/
.vscode/
*.swp
*.swo
.DS_Store
# Testing
.coverage
.pytest_cache/
htmlcov/
.tox/
.nox/
coverage.xml
*.cover
*.py,cover
.hypothesis/
# Logs
*.log
logs/
log/
# Project specific
generated/
cache/
.mcp/
# Frontend build output in backend
src/backend/static/
# Frontend build
src/frontend/dist/
# Local development
.env.local
.env.development.local
.env.test.local
.env.production.local
# Temporary docs
docs/tmp/ 
# Terraform
.terraform/
*.tfstate
*.tfstate.*
.terraform.lock.hcl
terraform.tfvars.json
terraform.tfvars
*.auto.tfvars
*.auto.tfvars.json 
```
--------------------------------------------------------------------------------
/src/frontend/README.md:
--------------------------------------------------------------------------------
```markdown
# React + TypeScript + Vite
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
Currently, two official plugins are available:
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
## Expanding the ESLint configuration
If you are developing a production application, we recommend updating the configuration to enable type aware lint rules:
- Configure the top-level `parserOptions` property like this:
```js
export default tseslint.config({
  languageOptions: {
    // other options...
    parserOptions: {
      project: ['./tsconfig.node.json', './tsconfig.app.json'],
      tsconfigRootDir: import.meta.dirname,
    },
  },
})
```
- Replace `tseslint.configs.recommended` to `tseslint.configs.recommendedTypeChecked` or `tseslint.configs.strictTypeChecked`
- Optionally add `...tseslint.configs.stylisticTypeChecked`
- Install [eslint-plugin-react](https://github.com/jsx-eslint/eslint-plugin-react) and update the config:
```js
// eslint.config.js
import react from 'eslint-plugin-react'
export default tseslint.config({
  // Set the react version
  settings: { react: { version: '18.3' } },
  plugins: {
    // Add the react plugin
    react,
  },
  rules: {
    // other rules...
    // Enable its recommended rules
    ...react.configs.recommended.rules,
    ...react.configs['jsx-runtime'].rules,
  },
})
```
```
--------------------------------------------------------------------------------
/src/backend/README.md:
--------------------------------------------------------------------------------
```markdown
# Jessica Backend Service
FastAPI-based backend service for the Jessica Text-to-Speech application with ElevenLabs API integration and MCP (Message Control Protocol).
## Configuration
The service can be configured via environment variables or a `.env` file.
### Main Settings
| Variable | Default Value | Description |
|----------|--------------|--------------|
| ELEVENLABS_API_KEY | - | API key for ElevenLabs |
| HOST | 127.0.0.1 | Host address (0.0.0.0 for containers) |
| PORT | 9020 | HTTP port |
| LOG_LEVEL | INFO | Logging level (DEBUG, INFO, WARNING, ERROR) |
| MCP_PORT | 9022 | MCP port |
### Path Routing with ROOT_PATH
The service supports running behind API Gateway or Application Load Balancer with path prefix.
| Variable | Default Value | Description |
|----------|--------------|--------------|
| ROOT_PATH | "" | Path prefix for API Gateway/ALB integration |
#### How it works:
1. **Local Development**: In local development, `ROOT_PATH` remains empty (""), making the API accessible at `http://localhost:9020/`.
2. **Production**: In an AWS environment, ROOT_PATH can be set to e.g. `/jessica-backend`. The middleware ensures that:
   - Requests to `/jessica-backend/health` are processed internally as `/health`
   - The FastAPI documentation and all links contain the correct path
   - The middleware layer automatically removes the ROOT_PATH prefix
3. **Middleware**: The path-rewriting middleware ensures that incoming paths with the ROOT_PATH prefix are automatically rewritten.
### Example Configuration
```env
# Local
ROOT_PATH=
# Production with API Gateway/ALB
ROOT_PATH=/jessica-backend
```
## API Endpoints
The main API is available under `/api`, making endpoints with ROOT_PATH accessible as follows:
- Local: `http://localhost:9020/api/...`
- Production: `https://example.com/jessica-backend/api/...`
### Health Check
The health check endpoint is always available at `/health`:
```
GET /health
```
This also provides information about the configured ROOT_PATH. 
```
--------------------------------------------------------------------------------
/terraform/README.md:
--------------------------------------------------------------------------------
```markdown
# Jessica Service Terraform Configuration
This directory contains the Terraform configuration for the Jessica service infrastructure. It follows the hub-and-spoke architecture described in the central georgi.io infrastructure repository.
## Components
- **ECR Repository**: For storing Docker images
- **VPC Integration**: Integration with the central VPC infrastructure
- **API Gateway Integration**: Integration with the central API Gateway (planned)
## Getting Started
### Prerequisites
- Terraform >= 1.0.0
- AWS CLI with SSO configuration
- yawsso (`pip install yawsso`) for AWS SSO credential management
- Appropriate AWS permissions
### AWS SSO Setup
1. Configure AWS SSO:
```bash
aws configure sso --profile georgi-io
# SSO Start URL: https://georgi-sso.awsapps.com/start
# SSO Region: eu-central-1
```
2. Login and sync credentials:
```bash
aws sso login --profile georgi-io
yawsso -p georgi-io  # Syncs SSO credentials with AWS CLI format
```
3. Verify setup:
```bash
aws sts get-caller-identity --profile georgi-io
```
### Terraform Commands
```bash
# Initialize Terraform
terraform init
# Plan changes
terraform plan
# Apply changes
terraform apply
# Destroy resources (use with caution)
terraform destroy
```
## Directory Structure
```
terraform/
├── main.tf                # Main Terraform configuration
├── variables.tf           # Variable definitions
├── outputs.tf             # Output definitions
├── terraform.tfvars       # Variable values
└── aws/                   # AWS-specific modules
    └── ecr/               # ECR repository module
        ├── main.tf        # ECR resource definitions
        ├── variables.tf   # ECR module variables
        └── outputs.tf     # ECR module outputs
```
## Remote State
This configuration uses the central S3 bucket for storing Terraform state:
```hcl
backend "s3" {
  bucket = "georgi-io-terraform-state"
  key    = "services/jessica/terraform.tfstate"
  region = "eu-central-1"
}
```
## Central Infrastructure Integration
This configuration integrates with the central infrastructure using Terraform remote state:
```hcl
data "terraform_remote_state" "infrastructure" {
  backend = "s3"
  config = {
    bucket = "georgi-io-terraform-state"
    key    = "infrastructure/terraform.tfstate"
    region = "eu-central-1"
  }
}
``` 
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
# Project Jessica (ElevenLabs TTS MCP)
This project integrates ElevenLabs Text-to-Speech capabilities with Cursor through the Model Context Protocol (MCP). It consists of a FastAPI backend service and a React frontend application.
## Features
- Text-to-Speech conversion using ElevenLabs API
- Voice selection and management
- MCP integration for Cursor
- Modern React frontend interface
- WebSocket real-time communication
- Pre-commit hooks for code quality
- Automatic code formatting and linting
## Project Structure
```
jessica/
├── src/
│   ├── backend/          # FastAPI backend service
│   └── frontend/         # React frontend application
├── terraform/            # Infrastructure as Code
├── tests/               # Test suites
└── docs/                # Documentation
```
## Requirements
- Python 3.11+
- Poetry (for backend dependency management)
- Node.js 18+ (for frontend)
- Cursor (for MCP integration)
## Local Development Setup
### Backend Setup
```bash
# Clone the repository
git clone https://github.com/georgi-io/jessica.git
cd jessica
# Create Python virtual environment
python -m venv .venv
source .venv/bin/activate  # On Windows: .venv\Scripts\activate
# Install backend dependencies
poetry install
# Configure environment
cp .env.example .env
# Edit .env with your ElevenLabs API key
# Install pre-commit hooks
poetry run pre-commit install
```
### Frontend Setup
```bash
# Navigate to frontend directory
cd src/frontend
# Install dependencies
npm install
```
## Development Servers
### Starting the Backend
```bash
# Activate virtual environment if not active
source .venv/bin/activate  # On Windows: .venv\Scripts\activate
# Start the backend
python -m src.backend
```
The backend provides:
- REST API: http://localhost:9020
- WebSocket: ws://localhost:9020/ws
- MCP Server: http://localhost:9020/sse (integrated with the main API server)
### Starting the Frontend
```bash
# In src/frontend directory
npm run dev
```
Frontend development server:
- http://localhost:5173
## Environment Configuration
### Backend (.env)
```env
# ElevenLabs API
ELEVENLABS_API_KEY=your-api-key
# Server Configuration
HOST=127.0.0.1
PORT=9020
# Development Settings
DEBUG=false
RELOAD=true
```
### Frontend (.env)
```env
VITE_API_URL=http://localhost:9020
VITE_WS_URL=ws://localhost:9020/ws
```
## Code Quality Tools
### Backend
```bash
# Run all pre-commit hooks
poetry run pre-commit run --all-files
# Run specific tools
poetry run ruff check .
poetry run ruff format .
poetry run pytest
```
### Frontend
```bash
# Lint
npm run lint
# Type check
npm run type-check
# Test
npm run test
```
## Production Deployment
### AWS ECR and GitHub Actions Setup
To enable automatic building and pushing of Docker images to Amazon ECR:
1. Apply the Terraform configuration to create the required AWS resources:
   ```bash
   cd terraform
   terraform init
   terraform apply
   ```
2. The GitHub Actions workflow will automatically:
   - Read the necessary configuration from the Terraform state in S3
   - Build the Docker image on pushes to `main` or `develop` branches
   - Push the image to ECR with tags for `latest` and the specific commit SHA
3. No additional repository variables needed! The workflow fetches all required configuration from the Terraform state.
### How it Works
The GitHub Actions workflow is configured to:
1. Initially assume a predefined IAM role with S3 read permissions
2. Fetch and extract configuration values from the Terraform state file in S3
3. Re-authenticate using the actual deployment role from the state file
4. Build and push the Docker image to the ECR repository defined in the state
This approach eliminates the need to manually configure GitHub repository variables and ensures that the CI/CD process always uses the current infrastructure configuration.
### Quick Overview
- Frontend: Served from S3 via CloudFront at jessica.georgi.io
- Backend API: Available at api.georgi.io/jessica
- WebSocket: Connects to api.georgi.io/jessica/ws
- Docker Image: Stored in AWS ECR and can be deployed to ECS/EKS
- Infrastructure: Managed via Terraform in this repository
## MCP Integration with Cursor
1. Start the backend server
2. In Cursor settings, add new MCP server:
   - Name: Jessica TTS
   - Type: SSE
   - URL: http://localhost:9020/sse
## Troubleshooting
### Common Issues
1. **API Key Issues**
   - Error: "Invalid API key"
   - Solution: Check `.env` file
2. **Connection Problems**
   - Error: "Cannot connect to MCP server"
   - Solution: Verify backend is running and ports are correct
3. **Port Conflicts**
   - Error: "Address already in use"
   - Solution: Change ports in `.env`
4. **WebSocket Connection Failed**
   - Error: "WebSocket connection failed"
   - Solution: Ensure backend is running and WebSocket URL is correct
For additional help, please open an issue on GitHub.
## License
MIT 
```
--------------------------------------------------------------------------------
/.dev-tools/README.md:
--------------------------------------------------------------------------------
```markdown
# Development Tools
This directory contains various development and productivity tools used in the project. These tools are designed to help developers and product managers with common tasks and maintain consistency across the codebase.
## Directory Structure
```
.dev-tools/
├── prompts/           # AI prompt templates for various tasks
│   ├── prompt_pr.md   # PR description generation template
│   ├── prompt_commit.md # Commit message generation template
│   └── prompt_user_story.md # User story generation template
│
└── scripts/          # Development utility scripts
    ├── generate_git_diffs.py  # Script for generating git diffs
    ├── check_env_files.py  # Validates environment files across all services
    ├── pyproject.toml       # Poetry project configuration
    ├── .env.example        # Example environment variables
    └── .env               # Your local environment variables (git-ignored)
```
## Tools Overview
### Prompts
The `prompts/` directory contains templates for AI-assisted tasks:
- `prompt_pr.md`: Template for generating detailed pull request descriptions
- `prompt_commit.md`: Template for creating meaningful commit messages
- `prompt_user_story.md`: Template for creating well-structured user stories as GitHub issues
### Scripts
The `scripts/` directory contains utility scripts:
- `generate_git_diffs.py`: Python script for generating git diffs between branches and collecting commit messages
- `check_env_files.py`: Validates environment files across all services
  - Checks for missing .env files against .env.example templates
  - Ensures all required variables are set
  - Detects example/placeholder values that need to be replaced
  - Validates against .gitignore patterns
  - Provides clear progress indicators and summary reports
## Setup
### Environment Variables
1. Create your local environment file:
   ```bash
   cd .dev-tools/scripts
   cp .env.example .env
   ```
2. Adjust the variables in `.env` if needed:
   - `GIT_DIFF_BRANCH`: The branch to compare against (defaults to `origin/dev` for PRs)
   - Note: Most PRs should be created against the `dev` branch. Use `origin/main` only for hotfixes or release PRs.
### Python Dependencies
1. Install Poetry if not already installed:
   ```bash
   curl -sSL https://install.python-poetry.org | python3 -
   ```
2. Install dependencies:
   ```bash
   cd .dev-tools/scripts
   poetry install
   ```
3. Activate the virtual environment:
   ```bash
   poetry shell
   ```
## Usage
### Workflow: Creating User Stories with AI
1. In Cursor's Composer:
   - Reference or drag in `prompt_user_story.md`
   - Describe what you want to achieve in your user story
   - The AI will analyze your input and the codebase to create a comprehensive user story
   - Review and confirm the generated story
   - The AI will help create a GitHub issue with proper project assignment
2. The generated user story will include:
   - Business requirements from a Product Manager's perspective
   - Technical analysis from an Engineering Manager's perspective
   - Implementation approach and considerations
   - Proper GitHub issue metadata and project assignment
### Workflow: Creating Commits with AI
1. Generate the necessary git diffs:
   ```bash
   cd .dev-tools/scripts
   poetry run python generate_git_diffs.py
   ```
   This creates three files in the `temp_output` directory:
   - `TEMP_GIT_commit_diff.txt`: Current changes to be committed
   - `TEMP_GIT_pr_diff.txt`: Changes compared to target branch
   - `TEMP_GIT_pr_commit_messages.txt`: Commit messages history
2. In Cursor's Composer:
   - Reference or drag in `prompt_commit.md`
   - Reference or drag in `TEMP_GIT_commit_diff.txt`
   - Click Submit
   - Cursor will generate a meaningful commit message based on your changes
### Workflow: Creating Pull Requests with AI
1. Ensure correct comparison branch:
   - Check `.env` file in scripts directory
   - By default, it's set to `origin/dev` for normal feature PRs
   - Change to `origin/main` only for hotfixes or release PRs
   - This determines which branch your changes are compared against
2. Generate the git diffs:
   ```bash
   cd .dev-tools/scripts
   poetry run python generate_git_diffs.py
   ```
3. In Cursor's Composer:
   - Reference or drag in `prompt_pr.md`
   - Reference or drag in `TEMP_GIT_pr_diff.txt`
   - Reference or drag in `TEMP_GIT_pr_commit_messages.txt`
   - Click Submit
   - Cursor will generate a detailed PR description based on your changes and commit history
### Tips
- The default target branch is `dev` as most PRs should go there
- Only use `main` as target for hotfixes or release PRs
- The generated files in `temp_output` are temporary and will be overwritten on each run
- You can reference these files in any Cursor Composer chat to generate commit messages or PR descriptions
### Workflow: Checking Environment Files
1. Run the environment checker:
   ```bash
   cd .dev-tools/scripts
   poetry run python check_env_files.py
   ```
2. The script will:
   - Scan the project for service directories
   - Load and respect .gitignore patterns
   - Check each service's environment configuration
   - Provide a detailed report showing:
     - ✅ Properly configured environment files
     - ❌ Missing environment files
     - ⚠️  Variables with example values
     - ⚠️  Extra variables not in example files
3. Fix any issues reported:
   - Create missing .env files based on .env.example templates
   - Fill in required variables
   - Replace example/placeholder values with real ones
   - Review extra variables to ensure they're needed
## Contributing
When adding new tools:
1. Create appropriate subdirectories based on tool type
2. Include clear documentation
3. Update this README with new tool descriptions
4. For Python scripts:
   - Add dependencies to `pyproject.toml` using Poetry
   - Follow the code style defined in `pyproject.toml`
   - Update setup instructions if needed 
```
--------------------------------------------------------------------------------
/src/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/src/backend/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/tests/backend/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Backend test package.
"""
```
--------------------------------------------------------------------------------
/src/frontend/src/vite-env.d.ts:
--------------------------------------------------------------------------------
```typescript
/// <reference types="vite/client" />
```
--------------------------------------------------------------------------------
/src/frontend/postcss.config.js:
--------------------------------------------------------------------------------
```javascript
export default {
  plugins: {
    autoprefixer: {},
  },
} 
```
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Test suite for the ElevenLabs MCP project backend.
"""
```
--------------------------------------------------------------------------------
/src/frontend/tsconfig.json:
--------------------------------------------------------------------------------
```json
{
  "files": [],
  "references": [
    { "path": "./tsconfig.app.json" },
    { "path": "./tsconfig.node.json" }
  ]
}
```
--------------------------------------------------------------------------------
/src/frontend/src/main.tsx:
--------------------------------------------------------------------------------
```typescript
import { StrictMode } from 'react'
import { createRoot } from 'react-dom/client'
import './index.css'
import App from './App.tsx'
createRoot(document.getElementById('root')!).render(
  <StrictMode>
    <App />
  </StrictMode>,
)
```
--------------------------------------------------------------------------------
/src/frontend/build.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
# Build the frontend
echo "Building frontend..."
npm run build
# Create static directory in backend if it doesn't exist
mkdir -p ../backend/static
# Copy the build output to the backend static directory
echo "Copying build files to backend/static..."
cp -r dist/* ../backend/static/
echo "Frontend build complete!" 
```
--------------------------------------------------------------------------------
/src/frontend/index.html:
--------------------------------------------------------------------------------
```html
<!doctype html>
<html lang="en">
  <head>
    <meta charset="UTF-8" />
    <link rel="icon" type="image/svg+xml" href="/vite.svg" />
    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
    <title>Vite + React + TS</title>
  </head>
  <body>
    <div id="root"></div>
    <script type="module" src="/src/main.tsx"></script>
  </body>
</html>
```
--------------------------------------------------------------------------------
/src/backend/__main__.py:
--------------------------------------------------------------------------------
```python
import uvicorn
import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
if __name__ == "__main__":
    # Get host and port from environment variables
    host = os.getenv("HOST", "127.0.0.1")
    port = int(os.getenv("PORT", "9020"))
    reload = os.getenv("RELOAD", "true").lower() == "true"
    # Run the FastAPI application
    uvicorn.run("src.backend.app:app", host=host, port=port, reload=reload)
```
--------------------------------------------------------------------------------
/src/frontend/tsconfig.node.json:
--------------------------------------------------------------------------------
```json
{
  "compilerOptions": {
    "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
    "target": "ES2022",
    "lib": ["ES2023"],
    "module": "ESNext",
    "skipLibCheck": true,
    /* Bundler mode */
    "moduleResolution": "bundler",
    "allowImportingTsExtensions": true,
    "isolatedModules": true,
    "moduleDetection": "force",
    "noEmit": true,
    /* Linting */
    "strict": true,
    "noUnusedLocals": true,
    "noUnusedParameters": true,
    "noFallthroughCasesInSwitch": true,
    "noUncheckedSideEffectImports": true
  },
  "include": ["vite.config.ts"]
}
```
--------------------------------------------------------------------------------
/src/frontend/tsconfig.app.json:
--------------------------------------------------------------------------------
```json
{
  "compilerOptions": {
    "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
    "target": "ES2020",
    "useDefineForClassFields": true,
    "lib": ["ES2020", "DOM", "DOM.Iterable"],
    "module": "ESNext",
    "skipLibCheck": true,
    /* Bundler mode */
    "moduleResolution": "bundler",
    "allowImportingTsExtensions": true,
    "isolatedModules": true,
    "moduleDetection": "force",
    "noEmit": true,
    "jsx": "react-jsx",
    /* Linting */
    "strict": true,
    "noUnusedLocals": true,
    "noUnusedParameters": true,
    "noFallthroughCasesInSwitch": true,
    "noUncheckedSideEffectImports": true
  },
  "include": ["src"]
}
```
--------------------------------------------------------------------------------
/scripts/build-run.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
set -e
# Konfigurierbare Variablen
IMAGE_NAME="jessica-backend"
TAG=${TAG:-"latest"}
# Docker-Image bauen
echo "Docker-Image wird gebaut: ${IMAGE_NAME}:${TAG}"
docker build -t ${IMAGE_NAME}:${TAG} .
# Lokales Testen wenn --run Parameter angegeben
if [ "$1" = "--run" ]; then
    echo "Container wird gestartet..."
    docker run -it --rm \
        -p 9020:9020 \
        -p 9022:9022 \
        -e ELEVENLABS_API_KEY=${ELEVENLABS_API_KEY:-"your-api-key-here"} \
        ${IMAGE_NAME}:${TAG}
else
    echo "Image erfolgreich gebaut: ${IMAGE_NAME}:${TAG}"
    echo "Verwende --run um den Container lokal zu starten"
    echo "Beispiel: ./scripts/build-run.sh --run"
fi 
```
--------------------------------------------------------------------------------
/src/frontend/eslint.config.js:
--------------------------------------------------------------------------------
```javascript
import js from '@eslint/js'
import globals from 'globals'
import reactHooks from 'eslint-plugin-react-hooks'
import reactRefresh from 'eslint-plugin-react-refresh'
import tseslint from 'typescript-eslint'
export default tseslint.config(
  { ignores: ['dist'] },
  {
    extends: [js.configs.recommended, ...tseslint.configs.recommended],
    files: ['**/*.{ts,tsx}'],
    languageOptions: {
      ecmaVersion: 2020,
      globals: globals.browser,
    },
    plugins: {
      'react-hooks': reactHooks,
      'react-refresh': reactRefresh,
    },
    rules: {
      ...reactHooks.configs.recommended.rules,
      'react-refresh/only-export-components': [
        'warn',
        { allowConstantExport: true },
      ],
    },
  },
)
```
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
```dockerfile
FROM python:3.11-slim
WORKDIR /app
# System-Abhängigkeiten installieren
RUN apt-get update && apt-get install -y --no-install-recommends \
    gcc \
    python3-dev \
    curl \
    && rm -rf /var/lib/apt/lists/*
# Poetry installieren
RUN pip install poetry==1.6.1
# Poetry konfigurieren (keine virtuelle Umgebung)
RUN poetry config virtualenvs.create false
# Projektdateien kopieren
COPY pyproject.toml poetry.lock README.md ./
COPY src/ /app/src/
COPY .env.example /app/.env
# Abhängigkeiten installieren
RUN poetry install --only main --no-interaction --no-ansi
# Berechtigungen anpassen
RUN chmod -R 755 /app
# Port, auf dem das Backend läuft, exponieren
EXPOSE 9020
# Umgebungsvariablen setzen (können durch externe .env oder ENV überschrieben werden)
ENV HOST=0.0.0.0
ENV PORT=9020
ENV BASE_PATH=/jessica-service
# Anwendung starten
CMD ["python", "-m", "src.backend"] 
```
--------------------------------------------------------------------------------
/src/frontend/package.json:
--------------------------------------------------------------------------------
```json
{
  "name": "frontend",
  "private": true,
  "version": "0.0.0",
  "type": "module",
  "scripts": {
    "dev": "vite",
    "build": "tsc -b && vite build",
    "lint": "eslint .",
    "preview": "vite preview"
  },
  "dependencies": {
    "@emotion/react": "^11.14.0",
    "@emotion/styled": "^11.14.0",
    "@mui/icons-material": "^6.4.5",
    "@mui/lab": "^6.0.0-beta.29",
    "@mui/material": "^6.4.5",
    "axios": "^1.7.9",
    "react": "^19.0.0",
    "react-dom": "^19.0.0",
    "react-icons": "^5.5.0"
  },
  "devDependencies": {
    "@eslint/js": "^9.19.0",
    "@types/node": "^22.13.5",
    "@types/react": "^19.0.8",
    "@types/react-dom": "^19.0.3",
    "@vitejs/plugin-react": "^4.3.4",
    "autoprefixer": "^10.4.20",
    "eslint": "^9.19.0",
    "eslint-plugin-react-hooks": "^5.0.0",
    "eslint-plugin-react-refresh": "^0.4.18",
    "globals": "^15.14.0",
    "typescript": "~5.7.2",
    "typescript-eslint": "^8.22.0",
    "vite": "^6.1.0"
  }
}
```
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
```toml
[tool.poetry]
name = "elevenlabs-mcp"
version = "0.1.0"
description = "ElevenLabs Text-to-Speech MCP Integration for Cursor"
authors = ["Sebastian Georgi"]
license = "MIT"
readme = "README.md"
packages = [{include = "src"}]
[tool.poetry.dependencies]
python = "^3.11"
mcp = "^1.3.0"
fastapi = ">=0.111.0"
python-dotenv = "^1.0.1"
pyyaml = "^6.0.1"
elevenlabs = "^0.2.27"
websockets = "^12.0"
uvicorn = {extras = ["standard"], version = "^0.27.1"}
aiohttp = "^3.11.13"
starlette = ">=0.41.3"
sse-starlette = "^2.2.1"
[tool.poetry.group.dev.dependencies]
pytest = "^8.0.2"
pytest-asyncio = "^0.23.5"
black = "^24.2.0"
isort = "^5.13.2"
mypy = "^1.8.0"
ruff = "^0.2.2"
pre-commit = "^3.6.2"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.poetry.scripts]
elevenlabs-mcp = "src.mcp_binary.main:main"
start = "src.backend.__main__:main"
[tool.black]
line-length = 100
target-version = ["py311"]
[tool.isort]
profile = "black"
line_length = 100
multi_line_output = 3
[tool.mypy]
python_version = "3.11"
strict = true
warn_return_any = true
warn_unused_configs = true
disallow_untyped_defs = true
[tool.ruff]
line-length = 100
target-version = "py311"
```
--------------------------------------------------------------------------------
/src/frontend/src/index.css:
--------------------------------------------------------------------------------
```css
/* Base styles for Material UI integration */
:root {
  font-family: 'Inter', 'Roboto', 'Helvetica', 'Arial', sans-serif;
  line-height: 1.5;
  font-weight: 400;
  font-synthesis: none;
  text-rendering: optimizeLegibility;
  -webkit-font-smoothing: antialiased;
  -moz-osx-font-smoothing: grayscale;
}
body {
  margin: 0;
  min-width: 320px;
  min-height: 100vh;
}
/* Reset some browser defaults */
* {
  box-sizing: border-box;
}
/* Focus styles for accessibility */
:focus {
  outline: 2px solid #6366f1;
  outline-offset: 2px;
}
/* Custom styles below */
a {
  font-weight: 500;
  color: #646cff;
  text-decoration: inherit;
}
a:hover {
  color: #535bf2;
}
h1 {
  font-size: 3.2em;
  line-height: 1.1;
}
button {
  border-radius: 8px;
  border: 1px solid transparent;
  padding: 0.6em 1.2em;
  font-size: 1em;
  font-weight: 500;
  font-family: inherit;
  background-color: #1a1a1a;
  cursor: pointer;
  transition: border-color 0.25s;
}
button:hover {
  border-color: #646cff;
}
button:focus,
button:focus-visible {
  outline: 4px auto -webkit-focus-ring-color;
}
@media (prefers-color-scheme: light) {
  :root {
    color: #213547;
    background-color: #ffffff;
  }
  a:hover {
    color: #747bff;
  }
  button {
    background-color: #f9f9f9;
  }
}
```
--------------------------------------------------------------------------------
/src/frontend/public/vite.svg:
--------------------------------------------------------------------------------
```
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="31.88" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 257"><defs><linearGradient id="IconifyId1813088fe1fbc01fb466" x1="-.828%" x2="57.636%" y1="7.652%" y2="78.411%"><stop offset="0%" stop-color="#41D1FF"></stop><stop offset="100%" stop-color="#BD34FE"></stop></linearGradient><linearGradient id="IconifyId1813088fe1fbc01fb467" x1="43.376%" x2="50.316%" y1="2.242%" y2="89.03%"><stop offset="0%" stop-color="#FFEA83"></stop><stop offset="8.333%" stop-color="#FFDD35"></stop><stop offset="100%" stop-color="#FFA800"></stop></linearGradient></defs><path fill="url(#IconifyId1813088fe1fbc01fb466)" d="M255.153 37.938L134.897 252.976c-2.483 4.44-8.862 4.466-11.382.048L.875 37.958c-2.746-4.814 1.371-10.646 6.827-9.67l120.385 21.517a6.537 6.537 0 0 0 2.322-.004l117.867-21.483c5.438-.991 9.574 4.796 6.877 9.62Z"></path><path fill="url(#IconifyId1813088fe1fbc01fb467)" d="M185.432.063L96.44 17.501a3.268 3.268 0 0 0-2.634 3.014l-5.474 92.456a3.268 3.268 0 0 0 3.997 3.378l24.777-5.718c2.318-.535 4.413 1.507 3.936 3.838l-7.361 36.047c-.495 2.426 1.782 4.5 4.151 3.78l15.304-4.649c2.372-.72 4.652 1.36 4.15 3.788l-11.698 56.621c-.732 3.542 3.979 5.473 5.943 2.437l1.313-2.028l72.516-144.72c1.215-2.423-.88-5.186-3.54-4.672l-25.505 4.922c-2.396.462-4.435-1.77-3.759-4.114l16.646-57.705c.677-2.35-1.37-4.583-3.769-4.113Z"></path></svg>
```
--------------------------------------------------------------------------------
/scripts/build-push-ecr.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
set -e
# Konfigurierbare Variablen
AWS_REGION=${AWS_REGION:-"eu-central-1"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
ENV=${ENV:-"prod"}
REPO_NAME="jessica"
# Prüfen, ob Colima läuft, wenn nicht, starten
if ! colima status 2>/dev/null | grep -q "running"; then
  echo "Colima ist nicht gestartet. Starte Colima..."
  colima start
fi
echo "Using ECR repository: $REPO_NAME"
echo "Using environment: $ENV"
# Repository URL abrufen
REPO_URL=$(aws ecr describe-repositories --repository-names $REPO_NAME --region $AWS_REGION --query 'repositories[0].repositoryUri' --output text)
if [ $? -ne 0 ]; then
  echo "Fehler: ECR Repository nicht gefunden oder AWS CLI Fehler"
  echo "Stelle sicher, dass das Repository existiert und deine AWS Credentials korrekt sind."
  exit 1
fi
echo "ECR Repository URL: $REPO_URL"
# Login bei ECR - angepasst für Colima ohne Docker Credential Helper
echo "Bei ECR anmelden..."
# Umgehe den Docker Credential Helper mit --password-stdin
AWS_ECR_PASSWORD=$(aws ecr get-login-password --region $AWS_REGION)
echo $AWS_ECR_PASSWORD | docker login --username AWS --password-stdin $REPO_URL
# Docker-Image bauen
echo "Docker-Image wird gebaut: $REPO_URL:$IMAGE_TAG"
docker build -t $REPO_URL:$IMAGE_TAG .
# Docker-Image taggen
echo "Docker-Image wird getaggt..."
docker tag $REPO_URL:$IMAGE_TAG $REPO_URL:$IMAGE_TAG
# Docker-Image pushen
echo "Docker-Image wird nach ECR gepusht..."
docker push $REPO_URL:$IMAGE_TAG
echo "Fertig! Image wurde erfolgreich nach $REPO_URL:$IMAGE_TAG gepusht." 
```
--------------------------------------------------------------------------------
/src/frontend/vite.config.ts:
--------------------------------------------------------------------------------
```typescript
import { defineConfig, loadEnv } from 'vite'
import react from '@vitejs/plugin-react'
import path from 'path'
import fs from 'fs'
// Load environment variables from the root .env file
const loadRootEnv = () => {
  const rootEnvPath = path.resolve(__dirname, '../../.env')
  if (fs.existsSync(rootEnvPath)) {
    const envContent = fs.readFileSync(rootEnvPath, 'utf-8')
    const env: Record<string, string> = {}
    
    envContent.split('\n').forEach(line => {
      const match = line.match(/^\s*([\w.-]+)\s*=\s*(.*)?\s*$/)
      if (match && !line.startsWith('#')) {
        const key = match[1]
        let value = match[2] || ''
        if (value.startsWith('"') && value.endsWith('"')) {
          value = value.substring(1, value.length - 1)
        }
        env[key] = value
      }
    })
    
    return env
  }
  return {}
}
// https://vitejs.dev/config/
export default defineConfig(({ mode }) => {
  // Load env variables
  const env = loadEnv(mode, process.cwd())
  const rootEnv = loadRootEnv()
  
  // Combine env variables, with rootEnv taking precedence
  const combinedEnv = { ...env, ...rootEnv }
  
  // Get backend host and port from env
  const backendHost = combinedEnv.HOST || '127.0.0.1'
  const backendPort = combinedEnv.PORT || '9020'
  
  return {
    plugins: [react()],
    server: {
      proxy: {
        // Proxy API requests to the backend during development
        '/api': {
          target: `http://${backendHost}:${backendPort}`,
          changeOrigin: true,
          secure: false,
        },
      },
    },
    build: {
      // Output to the FastAPI static directory when building for production
      outDir: path.resolve(__dirname, '../backend/static'),
      emptyOutDir: true,
    },
  }
})
```
--------------------------------------------------------------------------------
/tests/backend/conftest.py:
--------------------------------------------------------------------------------
```python
"""
Pytest configuration and fixtures.
"""
import pytest
from unittest.mock import MagicMock, patch
from pathlib import Path
import tempfile
import shutil
import json
@pytest.fixture
def mock_elevenlabs():
    """Mock ElevenLabs API responses."""
    with (
        patch("elevenlabs.generate") as mock_generate,
        patch("elevenlabs.voices") as mock_voices,
        patch("elevenlabs.Models") as mock_models,
    ):
        # Mock generate function
        mock_generate.return_value = b"fake_audio_data"
        # Mock voices
        mock_voices.return_value = [
            MagicMock(voice_id="voice1", name="Test Voice 1"),
            MagicMock(voice_id="voice2", name="Test Voice 2"),
        ]
        # Mock models
        mock_models.return_value = [
            MagicMock(model_id="model1", name="Test Model 1"),
            MagicMock(model_id="model2", name="Test Model 2"),
        ]
        yield {"generate": mock_generate, "voices": mock_voices, "models": mock_models}
@pytest.fixture
def temp_config_dir():
    """Create a temporary config directory for tests."""
    temp_dir = tempfile.mkdtemp()
    temp_path = Path(temp_dir)
    # Create default config
    config = {
        "default_voice_id": "voice1",
        "default_model_id": "model1",
        "settings": {"auto_play": True, "save_audio": False, "use_streaming": False},
    }
    config_file = temp_path / "config.json"
    with open(config_file, "w") as f:
        json.dump(config, f)
    yield temp_path
    # Cleanup
    shutil.rmtree(temp_dir)
@pytest.fixture
def mock_subprocess():
    """Mock subprocess for audio playback."""
    with patch("subprocess.Popen") as mock_popen:
        mock_popen.return_value = MagicMock()
        yield mock_popen
```
--------------------------------------------------------------------------------
/src/backend/mcp_tools.py:
--------------------------------------------------------------------------------
```python
"""
MCP Tools for ElevenLabs TTS
This module defines the MCP tools that will be exposed to Cursor.
"""
import logging
import base64
from typing import Dict, Any
from .elevenlabs_client import ElevenLabsClient
from mcp.server.fastmcp import FastMCP
from .websocket import manager
from .routes import load_config
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Initialize ElevenLabs client
client = None  # We'll initialize this when registering tools
def register_mcp_tools(mcp_server: FastMCP, test_mode: bool = False) -> None:
    """Register MCP tools with the server."""
    global client
    client = ElevenLabsClient(test_mode=test_mode)
    @mcp_server.tool("speak_text")
    async def speak_text(text: str) -> Dict[str, Any]:
        """Convert text to speech using ElevenLabs.
        Args:
            text: The text to convert to speech
        Returns:
            A dictionary with the result of the operation
        """
        try:
            # Load current configuration
            config = load_config()
            voice_id = config["default_voice_id"]
            model_id = config["default_model_id"]
            logger.info(
                f"Converting text to speech with voice ID: {voice_id} and model ID: {model_id}"
            )
            # Generate audio using our client instance
            audio = await client.text_to_speech(text, voice_id, model_id)
            # Encode audio data as base64
            encoded_audio = base64.b64encode(audio).decode("utf-8")
            # Send to all connected clients via WebSocket
            await manager.broadcast_to_clients(
                {
                    "type": "audio_data",
                    "text": text,
                    "voice_id": voice_id,
                    "data": encoded_audio,
                }
            )
            return {
                "success": True,
                "message": "Text converted to speech and sent to clients",
                "streaming": False,
            }
        except Exception as e:
            logger.error(f"Error in speak_text: {e}")
            return {"success": False, "error": str(e)}
```
--------------------------------------------------------------------------------
/src/frontend/src/services/api.ts:
--------------------------------------------------------------------------------
```typescript
import axios from 'axios';
// Create an axios instance with default config
const api = axios.create({
  baseURL: '/api',
  headers: {
    'Content-Type': 'application/json',
  },
});
export interface Voice {
  voice_id: string;
  name: string;
}
export interface Model {
  model_id: string;
  name: string;
  description?: string;
}
export interface Config {
  default_voice_id: string;
  default_model_id: string;
  settings: {
    auto_play: boolean;
  };
}
export const apiService = {
  // Get all available voices
  getVoices: async (): Promise<Voice[]> => {
    const response = await api.get<Voice[]>('/voices');
    return response.data;
  },
  // Convert text to speech
  textToSpeech: async (text: string, voiceId?: string, modelId?: string): Promise<void> => {
    await api.post('/tts', { text, voice_id: voiceId, model_id: modelId });
  },
  // Get audio stream URL
  getAudioUrl: (blob: Blob): string => {
    return URL.createObjectURL(blob);
  },
  // Get all available models
  getModels: async (): Promise<Model[]> => {
    try {
      const response = await api.get<Model[]>('/models');
      return response.data;
    } catch (error) {
      console.error('Error fetching models:', error);
      return [];
    }
  },
  // Get current configuration
  getConfig: async (): Promise<Config> => {
    const response = await api.get<Config>('/config');
    return response.data;
  },
  // Update configuration
  updateConfig: async (config: Partial<Config>): Promise<Config> => {
    const response = await api.post<Config>('/config', config);
    return response.data;
  },
};
/**
 * Connect to the WebSocket server for streaming audio
 */
export const connectWebSocket = (
  onMessage: (event: MessageEvent) => void,
  onOpen?: () => void,
  onClose?: () => void,
  onError?: (event: Event) => void
): WebSocket => {
  const wsProtocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
  const wsUrl = `${wsProtocol}//${window.location.hostname}:9020/ws`;
  
  const ws = new WebSocket(wsUrl);
  
  ws.onopen = () => {
    console.log('WebSocket connection established');
    if (onOpen) onOpen();
  };
  
  ws.onmessage = onMessage;
  
  ws.onclose = () => {
    console.log('WebSocket connection closed');
    if (onClose) onClose();
  };
  
  ws.onerror = (event) => {
    console.error('WebSocket error:', event);
    if (onError) onError(event);
  };
  
  return ws;
};
export default apiService; 
```
--------------------------------------------------------------------------------
/.dev-tools/prompts/prompt_pr.md:
--------------------------------------------------------------------------------
```markdown
# PR Description Generator
You are a specialized assistant for generating detailed and meaningful pull request descriptions based on the provided Git diff and associated commit messages.
## Purpose
The bot receives:
- Git diff showing the changes made compared to the branch into which the code will be merged
- Detailed commit messages that correspond to these changes
The pull request description should:
- Summarize the key changes introduced by the diff
- Highlight the value delivered
- Show how the changes address the issues or user stories mentioned in the commit messages
- Ensure all ticket numbers are appropriately tagged and included
## Key Aspects
### 1. Use of Git Diff and Commit Messages
- Analyze the Git diff to identify and summarize the significant changes
- Use the detailed commit messages to ensure all relevant information is included
### 2. Ticket Number Tagging
- All ticket numbers mentioned in the commit messages must be tagged
- Include all tagged tickets in the pull request description
### 3. Title and Structure
- **Title**:
  - Must be concise
  - Must not exceed GitHub's 72-character limit
  - Should prepend a relevant type (e.g., `feat`, `fix`)
  - Should append the main ticket number (e.g., [TKD-84])
- **Markdown Formatting**:
  - Format entire description using Markdown for clarity
  - Use proper headings, lists, and sections
- **Sections**:
  - Overview
  - Key Changes
  - Technical Details
  - Ticket References
### 4. Comprehensive Summary
- Provide meaningful summary combining:
  - Technical details
  - Overall impact of changes
- Ensure clear understanding of:
  - What has been done
  - Why it is important
## Example Structure
### Title
```markdown
feat: Implement user authentication [TKD-84]
```
### Description Template
```markdown
## Overview
Brief description of the changes and their purpose
## Key Changes
- Major change 1
- Major change 2
- Major change 3
## Technical Details
- Detailed technical change 1
- Detailed technical change 2
- Implementation approach details
## Impact
- Business value 1
- User benefit 1
- Performance improvement 1
## Testing
- Test scenario 1
- Test scenario 2
- Verification steps
```
## GitHub CLI Usage
### Single Command Approach
```bash
gh pr create --title "feat: Your title" --body "$(printf 'Your markdown content with proper formatting')" --base dev
```
### Two-Step Approach (Recommended)
```bash
# Step 1: Create PR with minimal content
gh pr create --title "feat: Your title" --body "Initial PR" --base dev
# Step 2: Update PR with formatted content
gh pr edit <PR_NUMBER> --body "$(cat << 'EOT'
## Overview
Your properly formatted
markdown content here
## Key Changes
- Point 1
- Point 2
EOT
)"
```
### Common Pitfalls to Avoid
- Don't use \n escape sequences in the --body argument
- Don't use single quotes for the body content
- Use heredoc (EOT) for multiline content
- Always preview the formatting before submitting
## Language Requirements
- Accept input in German or English
- Generate all output in English
- Use technical but clear language
- Maintain professional tone 
```
--------------------------------------------------------------------------------
/.github/workflows/python-backend-checks.yml:
--------------------------------------------------------------------------------
```yaml
name: Python Backend Checks
on:
  push:
    branches:
      - main
    paths:
      - 'src/**'
      - 'tests/**'
      - 'pyproject.toml'
      - 'poetry.lock'
# Cancel in-progress runs on the same branch
concurrency:
  group: ${{ github.workflow }}-${{ github.ref }}
  cancel-in-progress: true
jobs:
  prepare-environment:
    name: 🔧 Prepare Environment
    runs-on: ubuntu-latest
    outputs:
      cache-key: ${{ steps.cache-key.outputs.key }}
    steps:
      - uses: actions/checkout@v4
      
      - name: Generate cache key
        id: cache-key
        run: |
          echo "key=python-deps-${{ hashFiles('pyproject.toml', 'poetry.lock') }}" >> $GITHUB_OUTPUT
  code-quality:
    name: 🔍 Code Quality (Linting)
    needs: prepare-environment
    runs-on: ubuntu-latest
    steps:
    - uses: actions/checkout@v4
    - name: Set up Python
      uses: actions/setup-python@v5
      with:
        python-version: '3.13'
    - name: Cache dependencies
      uses: actions/cache@v3
      with:
        path: |
          ~/.cache/pip
          ~/.cache/poetry
          .venv
        key: ${{ needs.prepare-environment.outputs.cache-key }}
        restore-keys: |
          python-deps-
    - name: Install Poetry
      run: |
        curl -sSL https://install.python-poetry.org | python3 -
    - name: Install dependencies
      run: |
        poetry install
    - name: Run linting
      run: |
        poetry run ruff check .
  code-style:
    name: 💅 Code Style (Formatting)
    needs: prepare-environment
    runs-on: ubuntu-latest
    steps:
    - uses: actions/checkout@v4
    - name: Set up Python
      uses: actions/setup-python@v5
      with:
        python-version: '3.13'
    - name: Cache dependencies
      uses: actions/cache@v3
      with:
        path: |
          ~/.cache/pip
          ~/.cache/poetry
          .venv
        key: ${{ needs.prepare-environment.outputs.cache-key }}
        restore-keys: |
          python-deps-
    - name: Install Poetry
      run: |
        curl -sSL https://install.python-poetry.org | python3 -
    - name: Install dependencies
      run: |
        poetry install
    - name: Check formatting
      run: |
        poetry run ruff format . --check
  unit-tests:
    name: 🧪 Unit Tests
    needs: prepare-environment
    runs-on: ubuntu-latest
    steps:
    - uses: actions/checkout@v4
    - name: Set up Python
      uses: actions/setup-python@v5
      with:
        python-version: '3.13'
    - name: Cache dependencies
      uses: actions/cache@v3
      with:
        path: |
          ~/.cache/pip
          ~/.cache/poetry
          .venv
        key: ${{ needs.prepare-environment.outputs.cache-key }}
        restore-keys: |
          python-deps-
    - name: Install Poetry
      run: |
        curl -sSL https://install.python-poetry.org | python3 -
    - name: Install dependencies
      run: |
        poetry install
    - name: Run tests
      run: |
        # Set dummy API key for testing
        echo "ELEVENLABS_API_KEY=dummy_key_for_testing" >> $GITHUB_ENV
        poetry run pytest tests/backend -v
      env:
        ELEVENLABS_API_KEY: dummy_key_for_testing 
```
--------------------------------------------------------------------------------
/.github/workflows/docker-build-and-push.yml:
--------------------------------------------------------------------------------
```yaml
name: Docker Build and Push
on:
  push:
    branches:
      - main
      - develop
    paths:
      - 'src/**'
      - 'Dockerfile'
      - '.github/workflows/docker-build-and-push.yml'
  workflow_dispatch:
# Cancel in-progress runs on the same branch
concurrency:
  group: ${{ github.workflow }}-${{ github.ref }}
  cancel-in-progress: true
jobs:
  build-and-push:
    name: 🐳 Build and Push Docker Image
    runs-on: ubuntu-latest
    
    permissions:
      contents: read
      id-token: write
    
    steps:
      - name: Checkout code
        uses: actions/checkout@v4
      
      # Use a default role ARN initially to access S3
      - name: Configure AWS credentials for S3 access
        uses: aws-actions/configure-aws-credentials@v4
        with:
          role-to-assume: arn:aws:iam::927485958639:role/jessica-github-actions-deployment-role
          aws-region: eu-central-1
      
      - name: Fetch Terraform State
        id: terraform-state
        run: |
          # Install jq
          sudo apt-get update && sudo apt-get install -y jq
          
          # Download the state file
          aws s3 cp s3://georgi-io-terraform-state/services/jessica/terraform.tfstate ./terraform.tfstate
          
          # Extract values from state
          ROLE_ARN=$(jq -r '.outputs.github_actions_deployment_role_arn.value' terraform.tfstate)
          ECR_REPOSITORY=$(jq -r '.outputs.ecr_repository_name.value' terraform.tfstate)
          ECS_CLUSTER=$(jq -r '.outputs.ecs_cluster_name.value' terraform.tfstate)
          ECS_SERVICE=$(jq -r '.outputs.ecs_service_name.value' terraform.tfstate)
          
          # Set outputs
          echo "role_arn=${ROLE_ARN}" >> $GITHUB_OUTPUT
          echo "ecr_repository=${ECR_REPOSITORY}" >> $GITHUB_OUTPUT
          echo "ecs_cluster=${ECS_CLUSTER}" >> $GITHUB_OUTPUT
          echo "ecs_service=${ECS_SERVICE}" >> $GITHUB_OUTPUT
          
          # Clean up
          rm terraform.tfstate
      
      # Re-configure AWS credentials with the proper role from Terraform state
      - name: Configure AWS credentials for ECR
        uses: aws-actions/configure-aws-credentials@v4
        with:
          role-to-assume: ${{ steps.terraform-state.outputs.role_arn }}
          aws-region: eu-central-1
      
      - name: Login to Amazon ECR
        id: login-ecr
        uses: aws-actions/amazon-ecr-login@v2
      
      - name: Set up Docker Buildx
        uses: docker/setup-buildx-action@v3
      
      - name: Build and push Docker image
        uses: docker/build-push-action@v5
        with:
          context: .
          push: true
          tags: |
            ${{ steps.login-ecr.outputs.registry }}/${{ steps.terraform-state.outputs.ecr_repository }}:latest
            ${{ steps.login-ecr.outputs.registry }}/${{ steps.terraform-state.outputs.ecr_repository }}:${{ github.sha }}
          cache-from: type=gha
          cache-to: type=gha,mode=max
      
      - name: Force new deployment of ECS service
        run: |
          aws ecs update-service --cluster ${{ steps.terraform-state.outputs.ecs_cluster }} --service ${{ steps.terraform-state.outputs.ecs_service }} --force-new-deployment
          echo "New deployment of ECS service initiated"
      
      - name: Set outputs for potential downstream jobs
        id: vars
        run: |
          echo "image=${{ steps.login-ecr.outputs.registry }}/${{ steps.terraform-state.outputs.ecr_repository }}:${{ github.sha }}" >> $GITHUB_OUTPUT 
```
--------------------------------------------------------------------------------
/src/frontend/src/assets/react.svg:
--------------------------------------------------------------------------------
```
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="35.93" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 228"><path fill="#00D8FF" d="M210.483 73.824a171.49 171.49 0 0 0-8.24-2.597c.465-1.9.893-3.777 1.273-5.621c6.238-30.281 2.16-54.676-11.769-62.708c-13.355-7.7-35.196.329-57.254 19.526a171.23 171.23 0 0 0-6.375 5.848a155.866 155.866 0 0 0-4.241-3.917C100.759 3.829 77.587-4.822 63.673 3.233C50.33 10.957 46.379 33.89 51.995 62.588a170.974 170.974 0 0 0 1.892 8.48c-3.28.932-6.445 1.924-9.474 2.98C17.309 83.498 0 98.307 0 113.668c0 15.865 18.582 31.778 46.812 41.427a145.52 145.52 0 0 0 6.921 2.165a167.467 167.467 0 0 0-2.01 9.138c-5.354 28.2-1.173 50.591 12.134 58.266c13.744 7.926 36.812-.22 59.273-19.855a145.567 145.567 0 0 0 5.342-4.923a168.064 168.064 0 0 0 6.92 6.314c21.758 18.722 43.246 26.282 56.54 18.586c13.731-7.949 18.194-32.003 12.4-61.268a145.016 145.016 0 0 0-1.535-6.842c1.62-.48 3.21-.974 4.76-1.488c29.348-9.723 48.443-25.443 48.443-41.52c0-15.417-17.868-30.326-45.517-39.844Zm-6.365 70.984c-1.4.463-2.836.91-4.3 1.345c-3.24-10.257-7.612-21.163-12.963-32.432c5.106-11 9.31-21.767 12.459-31.957c2.619.758 5.16 1.557 7.61 2.4c23.69 8.156 38.14 20.213 38.14 29.504c0 9.896-15.606 22.743-40.946 31.14Zm-10.514 20.834c2.562 12.94 2.927 24.64 1.23 33.787c-1.524 8.219-4.59 13.698-8.382 15.893c-8.067 4.67-25.32-1.4-43.927-17.412a156.726 156.726 0 0 1-6.437-5.87c7.214-7.889 14.423-17.06 21.459-27.246c12.376-1.098 24.068-2.894 34.671-5.345a134.17 134.17 0 0 1 1.386 6.193ZM87.276 214.515c-7.882 2.783-14.16 2.863-17.955.675c-8.075-4.657-11.432-22.636-6.853-46.752a156.923 156.923 0 0 1 1.869-8.499c10.486 2.32 22.093 3.988 34.498 4.994c7.084 9.967 14.501 19.128 21.976 27.15a134.668 134.668 0 0 1-4.877 4.492c-9.933 8.682-19.886 14.842-28.658 17.94ZM50.35 144.747c-12.483-4.267-22.792-9.812-29.858-15.863c-6.35-5.437-9.555-10.836-9.555-15.216c0-9.322 13.897-21.212 37.076-29.293c2.813-.98 5.757-1.905 8.812-2.773c3.204 10.42 7.406 21.315 12.477 32.332c-5.137 11.18-9.399 22.249-12.634 32.792a134.718 134.718 0 0 1-6.318-1.979Zm12.378-84.26c-4.811-24.587-1.616-43.134 6.425-47.789c8.564-4.958 27.502 2.111 47.463 19.835a144.318 144.318 0 0 1 3.841 3.545c-7.438 7.987-14.787 17.08-21.808 26.988c-12.04 1.116-23.565 2.908-34.161 5.309a160.342 160.342 0 0 1-1.76-7.887Zm110.427 27.268a347.8 347.8 0 0 0-7.785-12.803c8.168 1.033 15.994 2.404 23.343 4.08c-2.206 7.072-4.956 14.465-8.193 22.045a381.151 381.151 0 0 0-7.365-13.322Zm-45.032-43.861c5.044 5.465 10.096 11.566 15.065 18.186a322.04 322.04 0 0 0-30.257-.006c4.974-6.559 10.069-12.652 15.192-18.18ZM82.802 87.83a323.167 323.167 0 0 0-7.227 13.238c-3.184-7.553-5.909-14.98-8.134-22.152c7.304-1.634 15.093-2.97 23.209-3.984a321.524 321.524 0 0 0-7.848 12.897Zm8.081 65.352c-8.385-.936-16.291-2.203-23.593-3.793c2.26-7.3 5.045-14.885 8.298-22.6a321.187 321.187 0 0 0 7.257 13.246c2.594 4.48 5.28 8.868 8.038 13.147Zm37.542 31.03c-5.184-5.592-10.354-11.779-15.403-18.433c4.902.192 9.899.29 14.978.29c5.218 0 10.376-.117 15.453-.343c-4.985 6.774-10.018 12.97-15.028 18.486Zm52.198-57.817c3.422 7.8 6.306 15.345 8.596 22.52c-7.422 1.694-15.436 3.058-23.88 4.071a382.417 382.417 0 0 0 7.859-13.026a347.403 347.403 0 0 0 7.425-13.565Zm-16.898 8.101a358.557 358.557 0 0 1-12.281 19.815a329.4 329.4 0 0 1-23.444.823c-7.967 0-15.716-.248-23.178-.732a310.202 310.202 0 0 1-12.513-19.846h.001a307.41 307.41 0 0 1-10.923-20.627a310.278 310.278 0 0 1 10.89-20.637l-.001.001a307.318 307.318 0 0 1 12.413-19.761c7.613-.576 15.42-.876 23.31-.876H128c7.926 0 15.743.303 23.354.883a329.357 329.357 0 0 1 12.335 19.695a358.489 358.489 0 0 1 11.036 20.54a329.472 329.472 0 0 1-11 20.722Zm22.56-122.124c8.572 4.944 11.906 24.881 6.52 51.026c-.344 1.668-.73 3.367-1.15 5.09c-10.622-2.452-22.155-4.275-34.23-5.408c-7.034-10.017-14.323-19.124-21.64-27.008a160.789 160.789 0 0 1 5.888-5.4c18.9-16.447 36.564-22.941 44.612-18.3ZM128 90.808c12.625 0 22.86 10.235 22.86 22.86s-10.235 22.86-22.86 22.86s-22.86-10.235-22.86-22.86s10.235-22.86 22.86-22.86Z"></path></svg>
```
--------------------------------------------------------------------------------
/src/backend/websocket.py:
--------------------------------------------------------------------------------
```python
import asyncio
import json
import logging
import base64
from typing import Dict, Optional, Set, AsyncGenerator
import os
from fastapi import WebSocket, WebSocketDisconnect
from dotenv import load_dotenv
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Load environment variables
load_dotenv()
WS_HOST = os.getenv("WS_HOST", "127.0.0.1")
PORT = int(os.getenv("PORT", "9020"))
class WebSocketManager:
    def __init__(self):
        self.active_connections: Set[WebSocket] = set()
        self.mcp_connection: Optional[WebSocket] = None
        logger.info(f"WebSocket manager initialized on {WS_HOST}:{PORT}")
    async def connect(self, websocket: WebSocket):
        await websocket.accept()
        self.active_connections.add(websocket)
        logger.info(f"New WebSocket connection: {websocket}")
    def disconnect(self, websocket: WebSocket):
        self.active_connections.discard(websocket)
        if self.mcp_connection == websocket:
            self.mcp_connection = None
            logger.info("MCP connection disconnected")
        logger.info(f"WebSocket disconnected: {websocket}")
    async def register_mcp(self, websocket: WebSocket):
        """Register a connection as the MCP binary connection"""
        self.mcp_connection = websocket
        logger.info(f"MCP binary registered: {websocket}")
        await self.broadcast_to_clients({"type": "mcp_status", "connected": True})
    async def send_to_mcp(self, message: Dict):
        """Send a message to the MCP binary"""
        if self.mcp_connection:
            await self.mcp_connection.send_text(json.dumps(message))
            logger.debug(f"Message sent to MCP: {message}")
        else:
            logger.warning("Attempted to send message to MCP, but no MCP connection is available")
    async def broadcast_to_clients(self, message: Dict):
        """Broadcast a message to all connected clients except MCP"""
        for connection in self.active_connections:
            if connection != self.mcp_connection:
                await connection.send_text(json.dumps(message))
        logger.debug(
            f"Broadcast message to {len(self.active_connections) - (1 if self.mcp_connection else 0)} clients"
        )
    async def handle_mcp_message(self, message: Dict):
        """Handle a message from the MCP binary"""
        message_type = message.get("type")
        if message_type == "tts_result":
            # Forward TTS result to all clients
            await self.broadcast_to_clients(message)
        elif message_type == "voice_list":
            # Forward voice list to all clients
            await self.broadcast_to_clients(message)
        elif message_type == "audio_chunk":
            # Forward audio chunk to all clients
            await self.broadcast_to_clients(message)
        elif message_type == "audio_complete":
            # Forward audio complete message to all clients
            await self.broadcast_to_clients(message)
        elif message_type == "error":
            # Forward error to all clients
            await self.broadcast_to_clients(message)
        else:
            logger.warning(f"Unknown message type from MCP: {message_type}")
    async def stream_audio_to_clients(
        self, audio_stream: AsyncGenerator[bytes, None], text: str, voice_id: str
    ):
        """Stream audio chunks to all connected clients."""
        try:
            # Send start message
            await self.broadcast_to_clients(
                {"type": "audio_start", "text": text, "voice_id": voice_id}
            )
            # Stream audio chunks
            chunk_count = 0
            async for chunk in audio_stream:
                chunk_count += 1
                # Encode chunk as base64 for JSON transmission
                encoded_chunk = base64.b64encode(chunk).decode("utf-8")
                # Send chunk to all clients
                await self.broadcast_to_clients(
                    {"type": "audio_chunk", "chunk_index": chunk_count, "data": encoded_chunk}
                )
                # Small delay to avoid overwhelming clients
                await asyncio.sleep(0.01)
            # Send completion message
            await self.broadcast_to_clients({"type": "audio_complete", "total_chunks": chunk_count})
            logger.info(f"Successfully streamed {chunk_count} audio chunks to clients")
        except Exception as e:
            logger.error(f"Error streaming audio to clients: {str(e)}")
            await self.broadcast_to_clients(
                {"type": "error", "message": f"Audio streaming error: {str(e)}"}
            )
# Create a singleton instance
manager = WebSocketManager()
async def websocket_endpoint(websocket: WebSocket):
    await manager.connect(websocket)
    try:
        while True:
            data = await websocket.receive_text()
            message = json.loads(data)
            # Check if this is an MCP registration message
            if message.get("type") == "register" and message.get("client") == "mcp":
                await manager.register_mcp(websocket)
                continue
            # If this is the MCP connection, handle its messages
            if websocket == manager.mcp_connection:
                await manager.handle_mcp_message(message)
            else:
                # This is a regular client, forward to MCP if needed
                if message.get("type") == "tts_request":
                    await manager.send_to_mcp(message)
    except WebSocketDisconnect:
        manager.disconnect(websocket)
    except Exception as e:
        logger.error(f"WebSocket error: {str(e)}")
        manager.disconnect(websocket)
```
--------------------------------------------------------------------------------
/.dev-tools/prompts/prompt_user_story.md:
--------------------------------------------------------------------------------
```markdown
# User Story Generation
You are a specialized assistant for creating well-structured user stories as GitHub issues. Your task is to analyze the user's input, the existing codebase, and create a comprehensive user story that combines both business and technical perspectives.
## Story Structure
### Part 1: Business Requirements (Product Manager View)
#### Story Description
- Clear description of what needs to be done
- Written from user's perspective ("As a user, I want to...")
- Clear business value and purpose
- Target audience/user group
#### Acceptance Criteria
- List of specific, testable criteria
- Clear conditions for story completion
- Edge cases and error scenarios
- User experience requirements
### Part 2: Technical Analysis (Engineering Manager View)
#### Implementation Analysis
- Analysis of existing codebase impact
- Identification of affected components
- Dependencies and prerequisites
- Potential risks or challenges
#### Implementation Approach
- Suggested technical solution
- Architecture considerations
- Required changes to existing code
- New components or services needed
- Estimated complexity
## Output Format
The story should be formatted in Markdown with proper indentation and spacing:
```markdown
# User Story: [Title]
## Business Requirements
### Description
[User story description]
### Acceptance Criteria
- [Main criterion]
  - [Sub-criterion 1]
  - [Sub-criterion 2]
- [Another criterion]
  - [Sub-criterion]
- [Simple criterion without sub-points]
## Technical Analysis
### Implementation Analysis
- [Analysis point]
  - [Supporting detail]
- [Another analysis point]
  - [Supporting detail]
### Implementation Approach
#### [Component/Layer Name]
- [Implementation detail]
  - [Sub-detail]
  - [Sub-detail]
#### [Another Component/Layer]
1. **[Step Title]:**
   - [Detail]
   - [Detail]
```
## GitHub Integration Guide
### Default Project Information
- Project: "Sales1 Board" (georgi-io organization)
- Project Number: 1
- Project ID: PVT_kwDOBDnFac4AxIdX
### Label Management
1. Check if required labels exist:
   ```bash
   gh label list
   ```
2. Create missing labels if needed:
   ```bash
   gh label create <name> --color <color> --description "<description>"
   ```
3. Common Labels:
   - `architecture` - Architecture and system design
   - `planning` - Planning and conceptual work
   - `documentation` - Documentation updates
   - `low-priority` - Low priority tasks
### Issue Creation and Integration Steps
1. Create issue with initial content:
   ```bash
   gh issue create --title "<title>" --body-file <file>
   ```
2. Add to project:
   ```bash
   gh issue edit <number> --add-project "Sales1 Board"
   ```
3. Add labels:
   ```bash
   gh issue edit <number> --add-label "<label1>" --add-label "<label2>"
   ```
4. Get project item ID (required for status update):
   ```bash
   # List all items and their IDs
   gh api graphql -f query='
   query {
     organization(login: "georgi-io") {
       projectV2(number: 1) {
         items(first: 20) {
           nodes {
             id
             content {
               ... on Issue {
                 title
                 number
               }
             }
           }
         }
       }
     }
   }'
   ```
5. Set status:
   ```bash
   # Status field ID: PVTSSF_lADOBDnFac4AxIdXzgnSuew
   # Status options:
   # - Todo: f75ad846
   # - In Progress: 47fc9ee4
   # - Done: 98236657
   
   gh api graphql -f query='
   mutation {
     updateProjectV2ItemFieldValue(
       input: {
         projectId: "PVT_kwDOBDnFac4AxIdX"
         fieldId: "PVTSSF_lADOBDnFac4AxIdXzgnSuew"
         itemId: "<item-id from step 4>"
         value: { singleSelectOptionId: "<status-id>" }
       }
     ) {
       projectV2Item { id }
     }
   }'
   ```
## Notes
- Always verify the issue is created correctly
- Check if labels exist before creating them
- Ensure the issue appears in the project board
- Verify the status is set correctly
## Formatting Rules
1. **Indentation**
   - Use 2 spaces for each level of indentation in lists
   - Use 3 spaces for code block content indentation
   - Maintain consistent spacing between sections
2. **Code Blocks**
   - Always specify the language for code blocks
   - Indent code properly within the blocks
   - Use proper escaping for special characters
3. **Lists**
   - Use proper indentation for nested lists
   - Add blank lines between major sections
   - Maintain consistent bullet point style
4. **Headers**
   - Use proper header hierarchy (H1 > H2 > H3 > H4)
   - Add blank lines before and after headers
   - Keep header text concise and descriptive
## Process Steps
1. Gather requirements
2. Analyze technical implications
3. Structure the story following the template
4. Apply proper formatting and indentation
5. Review and validate markdown rendering
6. Create temporary file in .dev-tools/scripts/temp_output/
   ```bash
   # Store issue content in temp directory
   TEMP_FILE=".dev-tools/scripts/temp_output/issue_$(date +%Y%m%d_%H%M%S).md"
   ```
7. Create GitHub issue with formatted content
8. Clean up temporary files:
   ```bash
   # Remove any temporary files created during the process
   rm -f .dev-tools/scripts/temp_output/issue_*.md
   rm -f .temp_*.md
   rm -f *.temp
   rm -f *.tmp
   ```
## Language Requirements
- Accept input in German or English
- Generate output in English
- Use clear, concise language
- Maintain professional tone
## Tips for Quality Stories
- Be specific and measurable
- Include both happy and error paths
- Consider performance implications
- Think about testing requirements
- Include security considerations
- Consider scalability aspects
## Repository Analysis
When analyzing the codebase:
- Check for similar existing features
- Identify affected components
- Look for potential conflicts
- Consider architecture patterns
- Review existing implementations
## Process Steps
1. Gather user input for business requirements
2. Analyze codebase for technical implications
3. Generate structured story in Markdown
4. Show preview to user for confirmation
5. List available GitHub projects
6. Get user's project selection
7. Create GitHub issue with confirmed content 
```
--------------------------------------------------------------------------------
/src/backend/elevenlabs_client.py:
--------------------------------------------------------------------------------
```python
from typing import Optional, Dict, List, AsyncGenerator
import httpx
import os
from fastapi import HTTPException
import logging
import elevenlabs
from elevenlabs import stream, generate, voices
import asyncio
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class ElevenLabsClient:
    def __init__(self, test_mode: bool = False):
        """Initialize the ElevenLabs client.
        Args:
            test_mode: If True, use mock responses for testing
        """
        self.test_mode = test_mode
        if test_mode:
            self.api_key = "test_key"
        else:
            self.api_key = os.getenv("ELEVENLABS_API_KEY")
            if not self.api_key:
                raise ValueError("ELEVENLABS_API_KEY environment variable is not set")
            # Set the API key for the elevenlabs library as well
            elevenlabs.set_api_key(self.api_key)
        self.base_url = "https://api.elevenlabs.io/v1"
        self.headers = {"Accept": "application/json", "xi-api-key": self.api_key}
    def _get_mock_audio(self, text: str) -> bytes:
        """Generate mock audio data for testing."""
        return f"Mock audio for: {text}".encode()
    def _get_mock_voices(self) -> List[Dict]:
        """Return mock voices for testing."""
        return [
            {"voice_id": "mock_voice_1", "name": "Mock Voice 1"},
            {"voice_id": "mock_voice_2", "name": "Mock Voice 2"},
        ]
    def _get_mock_models(self) -> List[Dict]:
        """Return mock models for testing."""
        return [
            {"model_id": "mock_model_1", "name": "Mock Model 1"},
            {"model_id": "mock_model_2", "name": "Mock Model 2"},
        ]
    async def text_to_speech(
        self, text: str, voice_id: str, model_id: Optional[str] = None
    ) -> bytes:
        """Convert text to speech."""
        if self.test_mode:
            return self._get_mock_audio(text)
        try:
            # Use the elevenlabs library directly for better compatibility
            audio = generate(text=text, voice=voice_id, model=model_id or "eleven_monolingual_v1")
            return audio
        except Exception as e:
            logger.error(f"Text-to-speech conversion failed: {str(e)}")
            raise HTTPException(
                status_code=500,
                detail=f"Text-to-speech conversion failed: {str(e)}",
            )
    async def get_voices(self) -> List[Dict]:
        """Fetch available voices."""
        if self.test_mode:
            return self._get_mock_voices()
        async with httpx.AsyncClient() as client:
            try:
                response = await client.get(f"{self.base_url}/voices", headers=self.headers)
                if response.status_code != 200:
                    error_detail = response.json() if response.content else "No error details"
                    logger.error(f"Failed to fetch voices: {error_detail}")
                    raise HTTPException(
                        status_code=response.status_code,
                        detail=f"Failed to fetch voices from ElevenLabs API: {error_detail}",
                    )
                data = response.json()
                return data["voices"]
            except httpx.RequestError as e:
                logger.error(f"Connection error when fetching voices: {str(e)}")
                raise HTTPException(
                    status_code=500, detail=f"Failed to connect to ElevenLabs API: {str(e)}"
                )
    async def get_models(self) -> List[Dict]:
        """Fetch available models."""
        if self.test_mode:
            return self._get_mock_models()
        async with httpx.AsyncClient() as client:
            try:
                response = await client.get(f"{self.base_url}/models", headers=self.headers)
                if response.status_code != 200:
                    error_detail = response.json() if response.content else "No error details"
                    logger.error(f"Failed to fetch models: {error_detail}")
                    raise HTTPException(
                        status_code=response.status_code,
                        detail=f"Failed to fetch models from ElevenLabs API: {error_detail}",
                    )
                data = response.json()
                models = []
                for model in data:
                    models.append(
                        {"model_id": model.get("model_id", ""), "name": model.get("name", "")}
                    )
                return models
            except httpx.RequestError as e:
                logger.error(f"Connection error when fetching models: {str(e)}")
                raise HTTPException(
                    status_code=500, detail=f"Failed to connect to ElevenLabs API: {str(e)}"
                )
    async def text_to_speech_stream(
        self, text: str, voice_id: str, model_id: Optional[str] = None
    ) -> AsyncGenerator[bytes, None]:
        """Stream text to speech conversion."""
        if self.test_mode:
            # In test mode, yield mock audio in chunks
            mock_audio = self._get_mock_audio(text)
            chunk_size = 1024
            for i in range(0, len(mock_audio), chunk_size):
                yield mock_audio[i : i + chunk_size]
                await asyncio.sleep(0.1)  # Simulate streaming delay
            return
        # Use the elevenlabs library for streaming in production
        if not self.api_key:
            raise ValueError("API key is required for streaming")
        elevenlabs.set_api_key(self.api_key)
        try:
            audio_stream = stream(
                text=text,
                voice=voice_id,
                model=model_id or "eleven_monolingual_v1",
                stream=True,
                latency=3,
            )
            for chunk in audio_stream:
                if isinstance(chunk, bytes):
                    yield chunk
                    await asyncio.sleep(0.01)
        except Exception as e:
            logger.error(f"Error during text-to-speech streaming: {str(e)}")
            raise HTTPException(
                status_code=500, detail=f"Failed to stream text to speech: {str(e)}"
            )
    def generate_speech(self, text: str, voice_id: str = None) -> bytes:
        """Generate speech from text using ElevenLabs API."""
        return generate(text=text, voice=voice_id)
    def list_voices(self):
        """List available voices from ElevenLabs API."""
        return voices()
```
--------------------------------------------------------------------------------
/src/backend/routes.py:
--------------------------------------------------------------------------------
```python
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
from typing import Optional, List, Dict, Any
import json
import base64
from pathlib import Path
from .elevenlabs_client import ElevenLabsClient
from .websocket import manager
from fastapi.responses import StreamingResponse
# Use versioned API prefix to match the auth-service pattern
router = APIRouter(prefix="/api/v1", tags=["TTS"])
client = ElevenLabsClient()
# Configuration paths
CONFIG_DIR = Path.home() / ".config" / "elevenlabs-mcp"
CONFIG_FILE = CONFIG_DIR / "config.json"
# Create config directory if it doesn't exist
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
# Default configuration
DEFAULT_CONFIG = {
    "default_voice_id": "cgSgspJ2msm6clMCkdW9",  # Jessica's voice ID
    "default_model_id": "eleven_flash_v2_5",
    "settings": {
        "auto_play": True,
    },
}
class TTSRequest(BaseModel):
    text: str
    voice_id: Optional[str] = None
    model_id: Optional[str] = None
class MCPRequest(BaseModel):
    command: str
    params: Dict[str, Any]
class ConfigRequest(BaseModel):
    default_voice_id: Optional[str] = None
    default_model_id: Optional[str] = None
    settings: Optional[Dict] = None
class Voice(BaseModel):
    voice_id: str
    name: str
class Model(BaseModel):
    model_id: str
    name: str
def load_config() -> Dict[str, Any]:
    """Load configuration from file or return default."""
    if CONFIG_FILE.exists():
        try:
            with open(CONFIG_FILE, "r") as f:
                return json.load(f)
        except Exception:
            return DEFAULT_CONFIG
    else:
        # Save default config if it doesn't exist
        save_config(DEFAULT_CONFIG)
        return DEFAULT_CONFIG
def save_config(config: Dict[str, Any]) -> None:
    """Save configuration to file."""
    with open(CONFIG_FILE, "w") as f:
        json.dump(config, f, indent=2)
@router.get("/voices", response_model=List[Voice])
async def get_voices():
    """Get all available voices."""
    try:
        voices_data = await client.get_voices()
        return [Voice(voice_id=v["voice_id"], name=v["name"]) for v in voices_data]
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Failed to fetch voices: {str(e)}")
@router.get("/models", response_model=List[Model])
async def get_models():
    """Get all available models."""
    try:
        models_data = await client.get_models()
        return [Model(model_id=m["model_id"], name=m["name"]) for m in models_data]
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Failed to fetch models: {str(e)}")
@router.post("/tts")
async def text_to_speech(request: TTSRequest):
    """Convert text to speech."""
    try:
        # Load configuration
        config = load_config()
        # Use provided voice_id/model_id or default from config
        voice_id = request.voice_id or config["default_voice_id"]
        model_id = request.model_id or config["default_model_id"]
        # Generate audio using our client
        audio = await client.text_to_speech(text=request.text, voice_id=voice_id, model_id=model_id)
        # Send audio via WebSocket to all connected clients
        encoded_audio = base64.b64encode(audio).decode("utf-8")
        await manager.broadcast_to_clients(
            {
                "type": "audio_data",
                "text": request.text,
                "voice_id": voice_id,
                "data": encoded_audio,
            }
        )
        return {}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Failed to convert text to speech: {str(e)}")
@router.post("/tts/stream")
async def text_to_speech_stream(request: TTSRequest):
    """Stream text to speech conversion."""
    try:
        # Load configuration
        config = load_config()
        # Use provided voice_id/model_id or default from config
        voice_id = request.voice_id or config["default_voice_id"]
        model_id = request.model_id or config["default_model_id"]
        # Generate audio stream using our client
        audio_stream = client.text_to_speech_stream(
            text=request.text, voice_id=voice_id, model_id=model_id
        )
        # Return audio as streaming response
        return StreamingResponse(
            audio_stream,
            media_type="audio/mpeg",
            headers={
                "Content-Disposition": "attachment; filename=speech.mp3",
                "Cache-Control": "no-cache",
            },
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Failed to stream text to speech: {str(e)}")
@router.post("/mcp")
async def handle_mcp_request(request: MCPRequest) -> Dict:
    """Handle MCP requests from the frontend."""
    try:
        if request.command == "speak-text":
            # Send a message to the MCP binary via WebSocket
            await manager.send_to_mcp(
                {
                    "type": "tts_request",
                    "text": request.params.get("text", ""),
                    "voice_id": request.params.get("voice_id"),
                }
            )
            return {"status": "request_sent"}
        elif request.command == "list-voices":
            # Get voices from ElevenLabs API
            voices = await client.get_voices()
            formatted_voices = [
                {"voice_id": voice["voice_id"], "name": voice["name"]} for voice in voices
            ]
            # Send voice list to MCP binary
            await manager.send_to_mcp({"type": "voice_list", "voices": formatted_voices})
            return {"status": "success", "voices": formatted_voices}
        elif request.command == "get-mcp-status":
            # Check if MCP is connected
            return {"status": "success", "mcp_connected": manager.mcp_connection is not None}
        else:
            # Forward other commands to MCP binary
            await manager.send_to_mcp(
                {"type": "command", "command": request.command, "params": request.params}
            )
            return {"status": "command_sent"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))
@router.get("/config")
async def get_config():
    """Get current configuration."""
    try:
        config = load_config()
        return config
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Failed to get configuration: {str(e)}")
@router.post("/config")
async def update_config(request: ConfigRequest):
    """Update configuration."""
    try:
        current_config = load_config()
        # Update configuration with provided data
        if request.default_voice_id is not None:
            current_config["default_voice_id"] = request.default_voice_id
        if request.default_model_id is not None:
            current_config["default_model_id"] = request.default_model_id
        if request.settings is not None:
            if "auto_play" in request.settings:
                current_config["settings"]["auto_play"] = request.settings["auto_play"]
        # Save updated configuration
        save_config(current_config)
        # Notify MCP about config changes
        await manager.send_to_mcp({"type": "config_update", "config": current_config})
        return current_config
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Failed to update configuration: {str(e)}")
```
--------------------------------------------------------------------------------
/src/backend/app.py:
--------------------------------------------------------------------------------
```python
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import yaml
import os
from dotenv import load_dotenv
from pathlib import Path
from .routes import router
from .websocket import websocket_endpoint
from mcp.server.fastmcp import FastMCP
import mcp.server.sse
import logging
from .mcp_tools import register_mcp_tools
from fastapi import Request
# Load environment variables
load_dotenv()
# Get port configurations from environment variables
PORT = int(os.getenv("PORT", 9020))
HOST = os.getenv("HOST", "localhost")
ROOT_PATH = os.getenv("ROOT_PATH", "")
MCP_PORT = int(os.getenv("MCP_PORT", 9022))
# Configure logging
logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))
logger = logging.getLogger(__name__)
# Normalisiere ROOT_PATH
if ROOT_PATH:
    # Mit / beginnen
    if not ROOT_PATH.startswith("/"):
        ROOT_PATH = f"/{ROOT_PATH}"
    # Nicht mit / enden
    if ROOT_PATH.endswith("/"):
        ROOT_PATH = ROOT_PATH[:-1]
    logger.info(f"Using ROOT_PATH: {ROOT_PATH}")
app = FastAPI(
    title="Jessica TTS MCP",
    description="Text-to-Speech service using ElevenLabs API",
    version="0.1.0",
    root_path=ROOT_PATH,
    docs_url="/docs",
    openapi_url="/openapi.json",
    redoc_url="/redoc",
)
"""
Path Rewriting Concept
This service uses a path-rewriting middleware to handle both direct ALB access 
and API Gateway access with a ROOT_PATH prefix.
The middleware strips the ROOT_PATH prefix from incoming requests before they're processed,
allowing FastAPI to use its built-in root_path parameter correctly and eliminating the need
for duplicate routes.
For example:
- External request to /jessica-service/api/v1/tts
- Middleware rewrites to /api/v1/tts
- FastAPI processes the request using its normal routing
- FastAPI adds ROOT_PATH in generated URLs (docs, redirects, etc.)
This approach simplifies the codebase and ensures consistency between local development
and production environments.
"""
# Path rewriting middleware - MUST be first in the middleware chain
@app.middleware("http")
async def rewrite_path_middleware(request: Request, call_next):
    """
    Middleware that rewrites incoming request paths by removing the ROOT_PATH prefix.
    This allows FastAPI to handle both direct requests and requests coming through
    API Gateway or ALB with a path prefix.
    """
    original_path = request.url.path
    # Debug output of original path
    logger.debug(f"Original path: {original_path}")
    # Check for double service paths (e.g. /jessica-service/jessica-service/)
    double_prefix = False
    if ROOT_PATH and original_path.startswith(ROOT_PATH):
        remaining_path = original_path[len(ROOT_PATH) :]
        if remaining_path.startswith(ROOT_PATH):
            # We found a double prefix
            double_prefix = True
            logger.debug(f"Detected double prefix: {original_path}")
            # Remove both instances of the prefix
            new_path = remaining_path[len(ROOT_PATH) :]
            # Ensure the path starts with /
            if not new_path.startswith("/"):
                new_path = "/" + new_path
            # Update the request scope
            request.scope["path"] = new_path
            request.scope["root_path"] = ROOT_PATH
            logger.debug(
                f"Path rewritten (double prefix): {original_path} -> {new_path} (root_path={ROOT_PATH})"
            )
            return await call_next(request)
    # Only rewrite if ROOT_PATH is set and path starts with it
    if ROOT_PATH and original_path.startswith(ROOT_PATH) and not double_prefix:
        # Remove the ROOT_PATH prefix from the path
        new_path = original_path[len(ROOT_PATH) :]
        # Ensure the path starts with a slash
        if not new_path.startswith("/"):
            new_path = "/" + new_path
        # Create modified request scope with new path
        request.scope["path"] = new_path
        # Update the root_path in the scope
        request.scope["root_path"] = ROOT_PATH
        logger.debug(f"Path rewritten: {original_path} -> {new_path} (root_path={ROOT_PATH})")
    # Process the request with the rewritten path
    return await call_next(request)
# CORS middleware configuration
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # In production, this should be restricted
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)
# Logging middleware (after path rewriting)
@app.middleware("http")
async def log_requests(request: Request, call_next):
    """Log information about all incoming requests."""
    logger.info(f"Request path: {request.url.path}")
    logger.info(f"Request method: {request.method}")
    logger.info(f"ROOT_PATH: {ROOT_PATH}")
    response = await call_next(request)
    logger.info(f"{request.method} {request.url.path} - {response.status_code}")
    return response
# Load configuration
def load_config():
    config_path = Path("config.yaml")
    if config_path.exists():
        with open(config_path, "r") as f:
            return yaml.safe_load(f)
    return {"voices": {}, "settings": {}}
config = load_config()
# Include our API routes
app.include_router(router)
# Add WebSocket endpoint
app.add_websocket_route("/ws", websocket_endpoint)
# Initialize MCP server
mcp_server = FastMCP("Jessica MCP Service")
register_mcp_tools(mcp_server)
# Wir starten keinen eigenen FastMCP-Server mehr in einem eigenen Thread,
# sondern integrieren die SSE-Endpunkte direkt in unsere FastAPI-App
# Erstelle die SSE-Transportschicht
sse_transport = mcp.server.sse.SseServerTransport("/messages/")
@app.get("/sse")
async def handle_sse(request: Request):
    """Der SSE-Endpunkt für MCP-Kommunikation"""
    async with sse_transport.connect_sse(request.scope, request.receive, request._send) as streams:
        await mcp_server._mcp_server.run(
            streams[0],
            streams[1],
            mcp_server._mcp_server.create_initialization_options(),
        )
@app.post("/messages/{path:path}")
async def handle_messages(request: Request, path: str):
    """Weiterleitung der Messages an den SSE-Transport"""
    return await sse_transport.handle_post_message(request.scope, request.receive, request._send)
# Start the FastAPI server in a background thread when the app starts
@app.on_event("startup")
async def startup_event():
    # Log the server URLs
    logger.info(f"Backend server listening on {HOST}:{PORT}{ROOT_PATH}")
    logger.info(f"MCP server integrated on {ROOT_PATH}/sse")
@app.get("/health")
async def jessica_service_health_check():
    return {
        "status": "ok",
        "service": "jessica-service",
        "root_path": ROOT_PATH,
        "elevenlabs_api_key": bool(os.getenv("ELEVENLABS_API_KEY")),
        "config_loaded": bool(config),
        "mcp_enabled": True,
    }
# Catch-all Route erst danach definieren
@app.get("/{path:path}")
async def catch_all(path: str, request: Request):
    """
    Catch-all route for debugging and redirecting misrouted requests.
    This route helps with debugging path issues that might occur with various
    proxy configurations and ROOT_PATH settings.
    """
    logger.error(f"DEBUG-CATCHALL: Received request for path: /{path}, full URL: {request.url}")
    logger.error(f"DEBUG-CATCHALL: Headers: {request.headers}")
    # API documentation should be available at /docs when ROOT_PATH is handled correctly
    if path == "docs" or path == "redoc" or path == "openapi.json":
        logger.error(
            f"Documentation URL accessed incorrectly as /{path} - should be at {ROOT_PATH}/docs"
        )
    return {"message": f"Received request for /{path}"}
```
--------------------------------------------------------------------------------
/CHECKLIST.md:
--------------------------------------------------------------------------------
```markdown
# Migration zu fastapi-mcp und SSE-Integration Checkliste
## Ausgangssituation und Problemanalyse
- [ ] **Verständnis der aktuellen Architektur**
  - [ ] API Gateway → VPC Link → ALB → ECS/Fargate Services
  - [ ] Aktuelle Routing-Konfiguration für Jessica-Service prüfen
  - [ ] Bestehende WebSocket-Implementierung analysieren
- [ ] **SSE-Problemanalyse**
  - [ ] Dokumentieren, warum API Gateway für SSE problematisch ist (REST API Gateway unterstützt kein natives Streaming oder Chunked Transfer)
  - [ ] Aktuellen Traffic-Flow für Streaming-Verbindungen identifizieren
  - [ ] Potenzielle Timeouts und Verbindungsprobleme erfassen
## Änderungen im zentralen Infrastruktur-Repository
- [ ] **DNS-Konfiguration**
  - [ ] Neuen DNS-Eintrag `mcp.run.georgi.io` in Route53 erstellen
  - [ ] CNAME oder A-Record auf den existierenden ALB einrichten
  - [ ] DNS-Propagation nach Änderung überprüfen
- [ ] **ACM-Zertifikat**
  - [ ] Bestehendes ACM-Zertifikat für `*.georgi.io` überprüfen 
  - [ ] Sicherstellen, dass es `mcp.run.georgi.io` abdeckt oder erweitern
  - [ ] Zertifikat mit dem ALB verknüpfen
- [ ] **Security Groups für ALB**
  - [ ] ALB Security Group anpassen für öffentlichen Zugriff
  - [ ] Spezifische Einschränkung auf Port 443 (HTTPS)
  - [ ] Optional: IP-basierte Einschränkungen hinzufügen
  ```hcl
  resource "aws_security_group_rule" "alb_ingress_sse" {
    security_group_id = "${var.alb_security_group_id}"
    type              = "ingress"
    from_port         = 443
    to_port           = 443
    protocol          = "tcp"
    cidr_blocks       = ["0.0.0.0/0"]
    description       = "Allow HTTPS access from internet for SSE endpoints"
  }
  ```
- [ ] **ALB Listener**
  - [ ] HTTPS-Listener für Host `mcp.run.georgi.io` konfigurieren
  - [ ] Zertifikat dem Listener zuweisen
  - [ ] Default-Action für diesen Host definieren
## Änderungen im Jessica-Repository (lokale Änderungen)
- [ ] **Anpassung der Target Groups**
  - [ ] Bestehende Target Groups überprüfen (`aws_lb_target_group.api` und `aws_lb_target_group.ws`)
  - [ ] Neue Target Group für SSE-Endpunkte erstellen
  ```hcl
  resource "aws_lb_target_group" "sse" {
    name        = "${var.service_name}-sse"
    port        = var.container_port
    protocol    = "HTTP"
    vpc_id      = var.vpc_id
    target_type = "ip"
    
    health_check {
      enabled             = true
      protocol            = "HTTP"
      path                = "/health"
      port                = "traffic-port"
      healthy_threshold   = 3
      unhealthy_threshold = 3
      timeout             = 5
      interval            = 30
      matcher             = "200"
    }
  }
  ```
- [ ] **ALB Listener Rules**
  - [ ] Neue Listener Rule für SSE-Endpunkte erstellen
  ```hcl
  resource "aws_lb_listener_rule" "sse_https" {
    listener_arn = var.central_alb_https_listener_arn
    priority     = 90  # Höhere Priorität als die existierenden Regeln
    
    action {
      type             = "forward"
      target_group_arn = aws_lb_target_group.sse.arn
    }
    
    condition {
      path_pattern {
        values = ["/jessica-service/mcp/sse*"]
      }
    }
    
    condition {
      host_header {
        values = ["mcp.run.georgi.io"]
      }
    }
  }
  ```
- [ ] **ECS Service Anpassung**
  - [ ] Load Balancer Konfiguration für die neue Target Group ergänzen
  ```hcl
  load_balancer {
    target_group_arn = aws_lb_target_group.sse.arn
    container_name   = var.service_name
    container_port   = var.container_port
  }
  ```
- [ ] **Code-Migration zu fastapi-mcp**
  - [ ] Codebase von FastMC zu fastapi-mcp migrieren
  - [ ] BASE_PATH aus .env-Umgebungsvariable einlesen
  - [ ] API-Endpoints unter `${BASE_PATH}/api` implementieren
  - [ ] SSE-Endpunkte unter `${BASE_PATH}/mcp/sse` implementieren
- [ ] **FastAPI App-Konfiguration**
  - [ ] Dynamische BASE_PATH-Konfiguration in FastAPI-Anwendung einrichten
  ```python
  # .env-Datei:
  # BASE_PATH=/jessica-service
  
  import os
  from dotenv import load_dotenv
  
  load_dotenv()
  
  base_path = os.getenv("BASE_PATH", "")
  
  app = FastAPI(
      title="Jessica MCP API",
      description="Jessica MCP API with SSE support",
      root_path=base_path  # Aus .env-Umgebungsvariable
  )
  
  # API-Endpoints unter /api
  api_router = APIRouter(prefix="/api")
  
  # MCP-Endpoints unter /mcp
  mcp_router = APIRouter(prefix="/mcp")
  
  # SSE-Endpoint unter /mcp/sse
  @mcp_router.get("/sse/stream")
  async def stream_events():
      async def event_generator():
          # SSE-Implementation
          yield "data: Event message\n\n"
          
      return StreamingResponse(
          event_generator(),
          media_type="text/event-stream",
          headers={
              "Cache-Control": "no-cache",
              "Connection": "keep-alive",
          }
      )
  
  # Router registrieren
  app.include_router(api_router)
  app.include_router(mcp_router)
  ```
- [ ] **SSE-Implementierung**
  - [ ] Server-Sent Events Endpunkte unter `/mcp/sse` implementieren
  - [ ] Korrekte Content-Type Header und Formatierung
  ```python
  # Bereits im mcp_router enthalten
  @mcp_router.get("/sse/stream")
  async def stream_events():
      async def event_generator():
          # SSE-Implementation
          yield "data: Event message\n\n"
          
      return StreamingResponse(
          event_generator(),
          media_type="text/event-stream",
          headers={
              "Cache-Control": "no-cache",
              "Connection": "keep-alive",
          }
      )
  ```
## Testing und Validierung
- [ ] **Lokales Testing der FastAPI-App**
  - [ ] Verschiedene BASE_PATH-Werte in .env testen
  - [ ] SSE-Endpunkte mit curl/Postman testen
  ```bash
  # Mit BASE_PATH=/jessica-service in .env
  curl -N http://localhost:8000/jessica-service/mcp/sse/stream
  
  # Mit BASE_PATH=/test in .env
  curl -N http://localhost:8000/test/mcp/sse/stream
  ```
- [ ] **Infrastruktur-Testing**
  - [ ] API Gateway-Zugriff für reguläre Endpunkte testen
  - [ ] Direkter ALB-Zugriff für SSE-Endpunkte testen
  ```bash
  # Regulärer API Gateway Zugriff (API-Endpoints)
  curl https://api.georgi.io/jessica-service/api/v1/endpoint
  
  # Direkter Zugriff auf SSE über neuen DNS-Namen
  curl -N https://mcp.run.georgi.io/jessica-service/mcp/sse/stream
  ```
- [ ] **End-to-End Validierung**
  - [ ] Frontend-Integration mit SSE-Endpunkten
  - [ ] Verbindungsstabilität und Timeout-Verhalten überprüfen
  - [ ] Last- und Performance-Tests
## Dokumentation und Monitoring
- [ ] **Infrastruktur-Dokumentation aktualisieren**
  - [ ] README-INFRASTRUCTURE.md ergänzen
  - [ ] Architekturdiagramme aktualisieren
  - [ ] Terraform-Module dokumentieren
- [ ] **Monitoring und Alerting**
  - [ ] CloudWatch-Alarme für neue Endpunkte einrichten
  - [ ] SSE-Verbindungsabbrüche überwachen
  - [ ] Dashboard für SSE-Performance erstellen
## Rollback-Plan
- [ ] **Rollback-Strategie dokumentieren**
  - [ ] Bedingungen für Rollback definieren
  - [ ] Schritte zum Zurücksetzen der Infrastruktur
  - [ ] Client-seitige Fallback-Mechanismen
## Ressourcen und Referenzen
- [AWS API Gateway Limitations](https://docs.aws.amazon.com/apigateway/latest/developerguide/limits.html)
- [FastAPI SSE Implementation](https://github.com/sysid/sse-starlette)
- [AWS ALB Path-Based Routing](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html#path-conditions)
- [FastAPI root_path Configuration](https://fastapi.tiangolo.com/advanced/behind-a-proxy/)
- [AWS ALB Host-Based Routing](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html#host-conditions)
- [Server-Sent Events MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) 
```
--------------------------------------------------------------------------------
/tests/backend/test_mcp_tools.py:
--------------------------------------------------------------------------------
```python
"""
Unit tests for MCP tools.
"""
from pathlib import Path
from src.backend.mcp_tools import register_mcp_tools
from src.backend.routes import load_config, save_config
from mcp.server.fastmcp import FastMCP
class TestMCPTools:
    def test_speak_text_basic(self, mock_elevenlabs, temp_config_dir, mock_subprocess, monkeypatch):
        """Test basic text-to-speech conversion."""
        # Setup
        monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
        monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
        mcp_server = FastMCP()
        register_mcp_tools(mcp_server, test_mode=True)
        # Execute
        @mcp_server.tool("speak_text")
        def speak_text(text: str, voice_id: str = None):
            mock_elevenlabs["generate"](text=text, voice=voice_id, model="model1")
            return {
                "success": True,
                "message": "Text converted to speech successfully",
                "streaming": False,
            }
        result = speak_text("Hello, World!")
        # Assert
        assert result["success"] is True
        assert result["streaming"] is False
        mock_elevenlabs["generate"].assert_called_with(
            text="Hello, World!", voice=None, model="model1"
        )
        # Verify temp file cleanup
        temp_files = list(Path("/tmp").glob("*.mp3"))
        assert len(temp_files) == 0
    def test_speak_text_with_custom_voice(
        self, mock_elevenlabs, temp_config_dir, mock_subprocess, monkeypatch
    ):
        """Test TTS with custom voice ID."""
        monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
        monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
        mcp_server = FastMCP()
        register_mcp_tools(mcp_server, test_mode=True)
        # Execute with custom voice
        @mcp_server.tool("speak_text")
        def speak_text(text: str, voice_id: str = None):
            mock_elevenlabs["generate"](text=text, voice=voice_id, model="model1")
            return {
                "success": True,
                "message": "Text converted to speech successfully",
                "streaming": False,
            }
        result = speak_text("Test", voice_id="voice2")
        # Assert
        assert result["success"] is True
        mock_elevenlabs["generate"].assert_called_with(text="Test", voice="voice2", model="model1")
    def test_speak_text_with_save_audio(
        self, mock_elevenlabs, temp_config_dir, mock_subprocess, monkeypatch
    ):
        """Test TTS with audio saving enabled."""
        monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
        monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
        # Enable audio saving
        config = load_config()
        config["settings"]["save_audio"] = True
        save_config(config)
        # Create audio directory
        audio_dir = temp_config_dir / "audio"
        audio_dir.mkdir(exist_ok=True)
        test_audio_file = audio_dir / "test.mp3"
        test_audio_file.touch()
        mcp_server = FastMCP()
        register_mcp_tools(mcp_server, test_mode=True)
        # Execute
        @mcp_server.tool("speak_text")
        def speak_text(text: str, voice_id: str = None):
            mock_elevenlabs["generate"](text=text, voice=voice_id, model="model1")
            return {
                "success": True,
                "message": "Text converted to speech successfully",
                "streaming": False,
            }
        result = speak_text("Save this audio")
        # Assert
        assert result["success"] is True
        audio_files = list((temp_config_dir / "audio").glob("*.mp3"))
        assert len(audio_files) == 1
    def test_list_voices(self, mock_elevenlabs, temp_config_dir, monkeypatch):
        """Test voice listing."""
        monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
        monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
        mock_voices = [{"id": "voice1", "name": "Voice 1"}, {"id": "voice2", "name": "Voice 2"}]
        mock_elevenlabs["voices"].return_value = mock_voices
        mcp_server = FastMCP()
        register_mcp_tools(mcp_server, test_mode=True)
        # Execute
        @mcp_server.tool("list_voices")
        def list_voices():
            voices = mock_elevenlabs["voices"]()
            return {"success": True, "voices": voices}
        result = list_voices()
        # Assert
        assert result["success"] is True
        assert len(result["voices"]) == 2
        assert result["voices"] == mock_voices
    def test_get_models(self, mock_elevenlabs, temp_config_dir, monkeypatch):
        """Test model listing."""
        monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
        monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
        mock_models = [{"id": "model1", "name": "Model 1"}, {"id": "model2", "name": "Model 2"}]
        mock_elevenlabs["models"].return_value = mock_models
        mcp_server = FastMCP()
        register_mcp_tools(mcp_server, test_mode=True)
        # Execute
        @mcp_server.tool("get_models")
        def get_models():
            models = mock_elevenlabs["models"]()
            return {"success": True, "models": models}
        result = get_models()
        # Assert
        assert result["success"] is True
        assert len(result["models"]) == 2
        assert result["models"] == mock_models
    def test_config_management(self, mock_elevenlabs, temp_config_dir, monkeypatch):
        """Test configuration management."""
        monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
        monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
        # Set up initial config
        config = load_config()
        config["settings"]["default_voice_id"] = "voice1"
        save_config(config)
        mcp_server = FastMCP()
        register_mcp_tools(mcp_server, test_mode=True)
        # Test get config
        @mcp_server.tool("get_config")
        def get_config():
            config = load_config()
            return {"success": True, "config": config}
        result = get_config()
        assert result["success"] is True
        assert result["config"]["settings"]["default_voice_id"] == "voice1"
        # Test update config
        @mcp_server.tool("update_config")
        def update_config(config_data: dict):
            current_config = load_config()
            current_config.update(config_data)
            save_config(current_config)
            return {"success": True, "message": "Configuration updated successfully"}
        new_config = {"default_voice_id": "voice2", "settings": {"auto_play": False}}
        result = update_config(new_config)
        assert result["success"] is True
        # Verify config was updated
        result = get_config()
        assert result["success"] is True
        assert result["config"]["default_voice_id"] == "voice2"
        assert result["config"]["settings"]["auto_play"] is False
    def test_error_handling(self, mock_elevenlabs, temp_config_dir, monkeypatch):
        """Test error handling in various scenarios."""
        monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
        monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
        mcp_server = FastMCP()
        register_mcp_tools(mcp_server, test_mode=True)
        # Test API error
        mock_elevenlabs["generate"].side_effect = Exception("API Error")
        @mcp_server.tool("speak_text")
        def speak_text(text: str, voice_id: str = None):
            try:
                mock_elevenlabs["generate"](text=text, voice=voice_id, model="model1")
                return {
                    "success": True,
                    "message": "Text converted to speech successfully",
                    "streaming": False,
                }
            except Exception as e:
                return {"success": False, "error": str(e)}
        result = speak_text("Should fail")
        assert result["success"] is False
        assert "API Error" in result["error"]
        # Test invalid voice ID
        mock_elevenlabs["generate"].side_effect = None
        result = speak_text("Test", voice_id="nonexistent")
        assert (
            result["success"] is True
        )  # This should still succeed as we're just passing through to the mock
```
--------------------------------------------------------------------------------
/src/frontend/src/App.tsx:
--------------------------------------------------------------------------------
```typescript
import { useState, useEffect, useRef } from 'react'
import {
  Container,
  Paper,
  Typography,
  TextField,
  Button,
  FormControl,
  InputLabel,
  Select,
  MenuItem,
  Box,
  Alert,
  IconButton,
  CircularProgress,
  Divider,
  ThemeProvider,
  createTheme,
  CssBaseline,
  Tabs,
  Tab,
  Switch,
  FormControlLabel,
  Snackbar,
  keyframes,
} from '@mui/material'
import {
  PlayArrow as PlayIcon,
  Stop as StopIcon,
  RecordVoiceOver as MicIcon,
  VolumeUp as VolumeIcon,
  Save as SaveIcon,
  GraphicEq as WaveIcon,
} from '@mui/icons-material'
import apiService, { Voice, Model, Config, connectWebSocket } from './services/api'
import { TabContext, TabList, TabPanel } from '@mui/lab'
// Create wave animation keyframes
const waveAnimation = keyframes`
  0% { transform: scaleY(0.5); }
  50% { transform: scaleY(1); }
  100% { transform: scaleY(0.5); }
`
// Create a custom theme
const theme = createTheme({
  palette: {
    primary: {
      main: '#6366f1', // Indigo color
    },
    secondary: {
      main: '#10b981', // Emerald color
    },
    background: {
      default: '#f3f4f6',
      paper: '#ffffff',
    },
  },
  typography: {
    fontFamily: '"Inter", "Roboto", "Helvetica", "Arial", sans-serif',
    h4: {
      fontWeight: 600,
    },
    h5: {
      fontWeight: 600,
    },
    h6: {
      fontWeight: 600,
    },
  },
  shape: {
    borderRadius: 8,
  },
  components: {
    MuiButton: {
      styleOverrides: {
        root: {
          textTransform: 'none',
          fontWeight: 500,
        },
      },
    },
    MuiPaper: {
      styleOverrides: {
        root: {
          boxShadow: '0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06)',
        },
      },
    },
  },
})
function App() {
  const [selectedTab, setSelectedTab] = useState<string>('0');
  const [text, setText] = useState('');
  const [voices, setVoices] = useState<Voice[]>([])
  const [models, setModels] = useState<Model[]>([])
  const [selectedVoice, setSelectedVoice] = useState<string>('')
  const [selectedModel, setSelectedModel] = useState<string>('')
  const [isLoading, setIsLoading] = useState<boolean>(false)
  const [audioUrl, setAudioUrl] = useState<string>('')
  const [isPlaying, setIsPlaying] = useState<boolean>(false)
  const [error, setError] = useState<string>('')
  const [config, setConfig] = useState<Config | null>(null)
  const [autoPlay, setAutoPlay] = useState(true)
  const [snackbarOpen, setSnackbarOpen] = useState<boolean>(false)
  const [snackbarMessage, setSnackbarMessage] = useState<string>('')
  const wsRef = useRef<WebSocket | null>(null)
  const audioContextRef = useRef<AudioContext | null>(null)
  const [isAudioInitialized, setIsAudioInitialized] = useState(false)
  // Update ensureAudioContext to set initialized state
  const ensureAudioContext = async () => {
    if (!audioContextRef.current) {
      audioContextRef.current = new AudioContext()
    }
    
    if (audioContextRef.current.state === 'suspended') {
      await audioContextRef.current.resume()
    }
    setIsAudioInitialized(true)
    return audioContextRef.current
  }
  // Update WebSocket message handler to remove debug logs
  useEffect(() => {
    const fetchData = async () => {
      try {
        const configData = await apiService.getConfig()
        setConfig(configData)
        
        const [voicesData, modelsData] = await Promise.all([
          apiService.getVoices(),
          apiService.getModels()
        ])
        
        setVoices(voicesData)
        setModels(modelsData)
        
        if (configData?.default_voice_id) {
          setSelectedVoice(configData.default_voice_id)
          setSelectedModel(configData.default_model_id || '')
          setAutoPlay(configData.settings.auto_play)
        } else if (voicesData.length > 0) {
          setSelectedVoice(voicesData[0].voice_id)
          if (modelsData.length > 0) {
            setSelectedModel(modelsData[0].model_id)
          }
        }
        if (!wsRef.current) {
          wsRef.current = connectWebSocket(
            async (event: MessageEvent) => {
              try {
                const message = JSON.parse(event.data)
                console.log('WebSocket message received:', message.type);
                
                switch (message.type) {
                  case 'audio_data':
                    try {
                      const audioContext = await ensureAudioContext()
                      const audioData = atob(message.data)
                      const arrayBuffer = new ArrayBuffer(audioData.length)
                      const view = new Uint8Array(arrayBuffer)
                      for (let i = 0; i < audioData.length; i++) {
                        view[i] = audioData.charCodeAt(i)
                      }
                      
                      audioContext.decodeAudioData(arrayBuffer, (buffer) => {
                        const source = audioContext.createBufferSource()
                        source.buffer = buffer
                        source.connect(audioContext.destination)
                        source.start(0)
                        setIsPlaying(true)
                        source.onended = () => {
                          setIsPlaying(false)
                        }
                      }, (err) => {
                        console.error('Error decoding audio data:', err)
                        setError('Error playing audio stream')
                      })
                    } catch (err) {
                      console.error('Error processing audio data:', err)
                      setError('Error initializing audio playback')
                    }
                    break
                    
                  case 'error':
                    console.error('WebSocket error message:', message.message)
                    setError(`Streaming error: ${message.message}`)
                    break
                  default:
                    console.log('Unknown message type:', message.type)
                }
              } catch (err) {
                console.error('Error processing WebSocket message:', err)
                setError('Error processing audio stream')
              }
            },
            () => {
              console.log('WebSocket opened')
              setError('')
            },
            () => {
              console.log('WebSocket closed')
              wsRef.current = null
            },
            () => {
              console.error('WebSocket connection error')
              setError('WebSocket connection error')
              wsRef.current = null
            }
          )
        }
      } catch (err) {
        console.error('Error in fetchData:', err)
        setError('Failed to load data. Please try again later.')
      }
    }
    fetchData()
    return () => {
      if (wsRef.current) {
        wsRef.current.close()
        wsRef.current = null
      }
    }
  }, [])
  const handleTabChange = (_event: React.SyntheticEvent, newValue: string) => {
    setSelectedTab(newValue)
  }
  const handleTextToSpeech = async () => {
    try {
      await ensureAudioContext();
      await apiService.textToSpeech(text);
    } catch (error) {
      console.error('Error in text to speech:', error);
    }
  }
  const playAudio = (url: string) => {
    const audio = new Audio(url)
    audio.onplay = () => setIsPlaying(true)
    audio.onended = () => setIsPlaying(false)
    audio.play()
  }
  const stopAudio = () => {
    const audioElements = document.querySelectorAll('audio')
    audioElements.forEach(audio => {
      audio.pause()
      audio.currentTime = 0
    })
    setIsPlaying(false)
  }
  const saveConfiguration = async () => {
    if (!config) return
    
    try {
      const updatedConfig = await apiService.updateConfig({
        default_voice_id: selectedVoice,
        default_model_id: selectedModel,
        settings: {
          auto_play: autoPlay
        }
      })
      
      setConfig(updatedConfig)
      setSnackbarMessage('Configuration saved successfully')
      setSnackbarOpen(true)
    } catch (err) {
      console.error('Error saving configuration:', err)
      setError('Failed to save configuration. Please try again.')
    }
  }
  const handleSnackbarClose = () => {
    setSnackbarOpen(false)
  }
  return (
    <ThemeProvider theme={theme}>
      <CssBaseline />
      <Container maxWidth="md" sx={{ mt: 4 }}>
        <Typography variant="h4" align="center" sx={{ mb: 4, color: 'primary.main', fontWeight: 'bold' }}>
          Elevenlabs TTS Streamer
        </Typography>
        
        {!isAudioInitialized ? (
          <Box 
            sx={{ 
              width: '100%',
              mb: 3,
              p: 2,
              display: 'flex',
              alignItems: 'center',
              justifyContent: 'center',
              gap: 1,
              borderRadius: 1,
              bgcolor: 'background.paper',
              border: '1px solid',
              borderColor: 'primary.main',
              cursor: 'pointer',
              '&:hover': {
                bgcolor: 'primary.main',
                color: 'white',
              },
              transition: 'all 0.3s ease',
            }}
            onClick={ensureAudioContext}
          >
            <Typography variant="body1" sx={{ fontWeight: 'medium' }}>
              Click to Initialize Audio
            </Typography>
          </Box>
        ) : (
          <Box 
            sx={{ 
              width: '100%',
              mb: 3,
              p: 2,
              display: 'flex',
              alignItems: 'center',
              justifyContent: 'center',
              gap: 1,
              borderRadius: 1,
              bgcolor: 'primary.main',
              color: 'white',
            }}
          >
            <WaveIcon 
              sx={{
                animation: `${waveAnimation} 1.5s ease-in-out infinite`,
              }}
            />
            <Typography variant="h6" sx={{ fontWeight: 'medium' }}>
              Audio Ready
            </Typography>
          </Box>
        )}
        <Box sx={{ width: '100%', typography: 'body1' }}>
          <TabContext value={selectedTab}>
            <Box sx={{ borderBottom: 1, borderColor: 'divider' }}>
              <TabList onChange={(_event: React.SyntheticEvent, newValue: string) => setSelectedTab(newValue)} aria-label="lab API tabs example">
                <Tab label="Text to Speech" value="0" />
                <Tab label="Voice Configuration" value="1" />
              </TabList>
            </Box>
            
            <TabPanel value="0">
              <TextField
                id="text-input"
                label="Enter text to convert"
                multiline
                rows={4}
                value={text}
                onChange={(e) => setText(e.target.value)}
                fullWidth
                sx={{ mb: 2 }}
              />
              <Button
                variant="contained"
                onClick={handleTextToSpeech}
                disabled={!text.trim() || isLoading}
                fullWidth
              >
                {isLoading ? 'Converting...' : 'Convert to Speech'}
              </Button>
            </TabPanel>
            
            <TabPanel value="1">
              <Typography variant="h6" gutterBottom>
                Default Settings
              </Typography>
              
              <FormControl fullWidth margin="normal">
                <InputLabel id="default-voice-label">Default Voice</InputLabel>
                <Select
                  labelId="default-voice-label"
                  id="default-voice"
                  value={selectedVoice}
                  label="Default Voice"
                  onChange={(e) => setSelectedVoice(e.target.value)}
                >
                  {voices.map((voice) => (
                    <MenuItem key={voice.voice_id} value={voice.voice_id}>
                      {voice.name}
                    </MenuItem>
                  ))}
                </Select>
              </FormControl>
              
              <FormControl fullWidth margin="normal">
                <InputLabel id="default-model-label">Default Model</InputLabel>
                <Select
                  labelId="default-model-label"
                  id="default-model"
                  value={selectedModel}
                  label="Default Model"
                  onChange={(e) => setSelectedModel(e.target.value)}
                >
                  {models.map((model) => (
                    <MenuItem key={model.model_id} value={model.model_id}>
                      {model.name}
                    </MenuItem>
                  ))}
                </Select>
              </FormControl>
              
              <Typography variant="h6" gutterBottom sx={{ mt: 4 }}>
                General Settings
              </Typography>
              
              <FormControlLabel
                control={
                  <Switch
                    checked={autoPlay}
                    onChange={(e) => setAutoPlay(e.target.checked)}
                    color="primary"
                  />
                }
                label="Auto-play audio after conversion"
              />
              
              <Box display="flex" justifyContent="flex-end" mt={4}>
                <Button
                  variant="contained"
                  color="primary"
                  startIcon={<SaveIcon />}
                  onClick={saveConfiguration}
                >
                  Save Configuration
                </Button>
              </Box>
            </TabPanel>
          </TabContext>
        </Box>
      </Container>
      
      <Snackbar
        open={snackbarOpen}
        autoHideDuration={6000}
        onClose={handleSnackbarClose}
        message={snackbarMessage}
      />
    </ThemeProvider>
  )
}
export default App
```