# Directory Structure
```
├── .cursor
│   └── rules
│       ├── documentation-process.mdc
│       ├── git-behaviour.mdc
│       ├── python-fastapi.mdc
│       └── vite-typescript.mdc
├── .dev-tools
│   ├── .gitignore
│   ├── prompts
│   │   ├── prompt_pr.md
│   │   └── prompt_user_story.md
│   └── README.md
├── .env.example
├── .github
│   └── workflows
│       ├── docker-build-and-push.yml
│       └── python-backend-checks.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CHECKLIST.md
├── Dockerfile
├── poetry.lock
├── pyproject.toml
├── README.md
├── scripts
│   ├── build-push-ecr.sh
│   └── build-run.sh
├── src
│   ├── __init__.py
│   ├── backend
│   │   ├── __init__.py
│   │   ├── __main__.py
│   │   ├── .env.example
│   │   ├── app.py
│   │   ├── elevenlabs_client.py
│   │   ├── mcp_tools.py
│   │   ├── README.md
│   │   ├── routes.py
│   │   └── websocket.py
│   └── frontend
│       ├── .gitignore
│       ├── build.sh
│       ├── eslint.config.js
│       ├── index.html
│       ├── package-lock.json
│       ├── package.json
│       ├── postcss.config.js
│       ├── public
│       │   └── vite.svg
│       ├── README.md
│       ├── src
│       │   ├── App.tsx
│       │   ├── assets
│       │   │   └── react.svg
│       │   ├── index.css
│       │   ├── main.tsx
│       │   ├── services
│       │   │   └── api.ts
│       │   └── vite-env.d.ts
│       ├── tsconfig.app.json
│       ├── tsconfig.json
│       ├── tsconfig.node.json
│       └── vite.config.ts
├── terraform
│   ├── aws
│   │   ├── api_gateway
│   │   │   ├── main.tf
│   │   │   ├── outputs.tf
│   │   │   └── variables.tf
│   │   ├── data.tf
│   │   ├── ecr
│   │   │   ├── main.tf
│   │   │   ├── outputs.tf
│   │   │   └── variables.tf
│   │   ├── ecs
│   │   │   ├── main.tf
│   │   │   ├── outputs.tf
│   │   │   └── variables.tf
│   │   └── iam
│   │       ├── main.tf
│   │       ├── outputs.tf
│   │       └── variables.tf
│   ├── main.tf
│   ├── outputs.tf
│   ├── README.md
│   ├── terraform.tfvars
│   └── variables.tf
└── tests
    ├── __init__.py
    └── backend
        ├── __init__.py
        ├── conftest.py
        └── test_mcp_tools.py
```
# Files
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
```
 1 | # ElevenLabs API Configuration
 2 | ELEVENLABS_API_KEY=your-api-key-here
 3 | 
 4 | # Server Configuration
 5 | HOST=0.0.0.0
 6 | PORT=9020
 7 | LOG_LEVEL=INFO
 8 | BASE_PATH=
 9 | 
10 | # Development Settings
11 | DEBUG=false
12 | RELOAD=true 
```
--------------------------------------------------------------------------------
/src/frontend/.gitignore:
--------------------------------------------------------------------------------
```
 1 | # Logs
 2 | logs
 3 | *.log
 4 | npm-debug.log*
 5 | yarn-debug.log*
 6 | yarn-error.log*
 7 | pnpm-debug.log*
 8 | lerna-debug.log*
 9 | 
10 | node_modules
11 | dist
12 | dist-ssr
13 | *.local
14 | 
15 | # Editor directories and files
16 | .vscode/*
17 | !.vscode/extensions.json
18 | .idea
19 | .DS_Store
20 | *.suo
21 | *.ntvs*
22 | *.njsproj
23 | *.sln
24 | *.sw?
25 | 
```
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
```yaml
 1 | repos:
 2 | - repo: https://github.com/astral-sh/ruff-pre-commit
 3 |   rev: v0.3.4
 4 |   hooks:
 5 |     - id: ruff
 6 |       args: [ --fix ]
 7 |     - id: ruff-format
 8 | 
 9 | - repo: local
10 |   hooks:
11 |     - id: pytest
12 |       name: Run unit tests
13 |       entry: poetry run pytest tests/backend -v
14 |       language: system
15 |       types: [python]
16 |       pass_filenames: false
17 |       always_run: true 
```
--------------------------------------------------------------------------------
/.dev-tools/.gitignore:
--------------------------------------------------------------------------------
```
 1 | # Python
 2 | __pycache__/
 3 | *.py[cod]
 4 | *$py.class
 5 | *.so
 6 | .Python
 7 | env/
 8 | build/
 9 | develop-eggs/
10 | dist/
11 | downloads/
12 | eggs/
13 | .eggs/
14 | lib/
15 | lib64/
16 | parts/
17 | sdist/
18 | var/
19 | *.egg-info/
20 | .installed.cfg
21 | *.egg
22 | 
23 | # Temporary files
24 | temp_output/
25 | *.tmp
26 | *.temp
27 | *.swp
28 | *.swo
29 | 
30 | # Environment variables
31 | .env
32 | .env.local
33 | .env.*.local
34 | 
35 | # IDE
36 | .idea/
37 | .vscode/
38 | *.sublime-workspace
39 | *.sublime-project
40 | 
41 | # OS
42 | .DS_Store
43 | Thumbs.db 
```
--------------------------------------------------------------------------------
/src/backend/.env.example:
--------------------------------------------------------------------------------
```
 1 | # Eleven Labs API Configuration
 2 | ELEVENLABS_API_KEY=your_api_key_here
 3 | 
 4 | # Server Configuration
 5 | HOST=127.0.0.1              # Lokal: 127.0.0.1, Docker: 0.0.0.0
 6 | PORT=9020                   # Port auf dem der Service erreichbar ist
 7 | LOG_LEVEL=INFO              # Loglevel (DEBUG, INFO, WARNING, ERROR, CRITICAL)
 8 | 
 9 | # API Path Configuration
10 | # Wichtig: Lokal leer oder '/', in AWS/Container '/jessica-backend'
11 | ROOT_PATH=                  # Pfad-Präfix für API Gateway/ALB (lokal leer, Produktion z.B. '/jessica-backend')
12 | 
13 | # Development Settings
14 | DEBUG=false                 # Debug-Modus aktivieren
15 | RELOAD=true                 # Auto-Reload bei Code-Änderungen
16 | 
17 | # MCP Configuration
18 | MCP_PORT=9022               # Port für MCP-Kommunikation 
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
  1 | terraform-global
  2 | src/auth-service
  3 | .ruff_cache/
  4 | # Dependencies
  5 | node_modules/
  6 | 
  7 | README-INFRASTRUCTURE.md
  8 | # Environment variables
  9 | .env
 10 | 
 11 | # Logs
 12 | logs
 13 | *.log
 14 | npm-debug.log*
 15 | yarn-debug.log*
 16 | yarn-error.log*
 17 | 
 18 | # Runtime data
 19 | pids
 20 | *.pid
 21 | *.seed
 22 | *.pid.lock
 23 | 
 24 | # Directory for instrumented libs generated by jscoverage/JSCover
 25 | lib-cov
 26 | 
 27 | # Coverage directory used by tools like istanbul
 28 | coverage
 29 | 
 30 | # IDE - VSCode
 31 | .vscode/*
 32 | !.vscode/settings.json
 33 | !.vscode/tasks.json
 34 | !.vscode/launch.json
 35 | !.vscode/extensions.json
 36 | 
 37 | # Misc
 38 | .DS_Store
 39 | 
 40 | # Python
 41 | __pycache__/
 42 | *.py[cod]
 43 | *$py.class
 44 | *.so
 45 | .Python
 46 | build/
 47 | develop-eggs/
 48 | dist/
 49 | downloads/
 50 | eggs/
 51 | .eggs/
 52 | lib/
 53 | lib64/
 54 | parts/
 55 | sdist/
 56 | var/
 57 | wheels/
 58 | *.egg-info/
 59 | .installed.cfg
 60 | *.egg
 61 | 
 62 | # Virtual Environments
 63 | .env
 64 | .venv
 65 | env/
 66 | venv/
 67 | ENV/
 68 | env.bak/
 69 | venv.bak/
 70 | .python-version
 71 | 
 72 | # IDE
 73 | .idea/
 74 | .vscode/
 75 | *.swp
 76 | *.swo
 77 | .DS_Store
 78 | 
 79 | # Testing
 80 | .coverage
 81 | .pytest_cache/
 82 | htmlcov/
 83 | .tox/
 84 | .nox/
 85 | coverage.xml
 86 | *.cover
 87 | *.py,cover
 88 | .hypothesis/
 89 | 
 90 | # Logs
 91 | *.log
 92 | logs/
 93 | log/
 94 | 
 95 | # Project specific
 96 | generated/
 97 | cache/
 98 | .mcp/
 99 | 
100 | # Frontend build output in backend
101 | src/backend/static/
102 | 
103 | # Frontend build
104 | src/frontend/dist/
105 | 
106 | # Local development
107 | .env.local
108 | .env.development.local
109 | .env.test.local
110 | .env.production.local
111 | 
112 | # Temporary docs
113 | docs/tmp/ 
114 | 
115 | # Terraform
116 | .terraform/
117 | *.tfstate
118 | *.tfstate.*
119 | .terraform.lock.hcl
120 | terraform.tfvars.json
121 | terraform.tfvars
122 | *.auto.tfvars
123 | *.auto.tfvars.json 
124 | 
```
--------------------------------------------------------------------------------
/src/frontend/README.md:
--------------------------------------------------------------------------------
```markdown
 1 | # React + TypeScript + Vite
 2 | 
 3 | This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
 4 | 
 5 | Currently, two official plugins are available:
 6 | 
 7 | - [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh
 8 | - [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
 9 | 
10 | ## Expanding the ESLint configuration
11 | 
12 | If you are developing a production application, we recommend updating the configuration to enable type aware lint rules:
13 | 
14 | - Configure the top-level `parserOptions` property like this:
15 | 
16 | ```js
17 | export default tseslint.config({
18 |   languageOptions: {
19 |     // other options...
20 |     parserOptions: {
21 |       project: ['./tsconfig.node.json', './tsconfig.app.json'],
22 |       tsconfigRootDir: import.meta.dirname,
23 |     },
24 |   },
25 | })
26 | ```
27 | 
28 | - Replace `tseslint.configs.recommended` to `tseslint.configs.recommendedTypeChecked` or `tseslint.configs.strictTypeChecked`
29 | - Optionally add `...tseslint.configs.stylisticTypeChecked`
30 | - Install [eslint-plugin-react](https://github.com/jsx-eslint/eslint-plugin-react) and update the config:
31 | 
32 | ```js
33 | // eslint.config.js
34 | import react from 'eslint-plugin-react'
35 | 
36 | export default tseslint.config({
37 |   // Set the react version
38 |   settings: { react: { version: '18.3' } },
39 |   plugins: {
40 |     // Add the react plugin
41 |     react,
42 |   },
43 |   rules: {
44 |     // other rules...
45 |     // Enable its recommended rules
46 |     ...react.configs.recommended.rules,
47 |     ...react.configs['jsx-runtime'].rules,
48 |   },
49 | })
50 | ```
51 | 
```
--------------------------------------------------------------------------------
/src/backend/README.md:
--------------------------------------------------------------------------------
```markdown
 1 | # Jessica Backend Service
 2 | 
 3 | FastAPI-based backend service for the Jessica Text-to-Speech application with ElevenLabs API integration and MCP (Message Control Protocol).
 4 | 
 5 | ## Configuration
 6 | 
 7 | The service can be configured via environment variables or a `.env` file.
 8 | 
 9 | ### Main Settings
10 | 
11 | | Variable | Default Value | Description |
12 | |----------|--------------|--------------|
13 | | ELEVENLABS_API_KEY | - | API key for ElevenLabs |
14 | | HOST | 127.0.0.1 | Host address (0.0.0.0 for containers) |
15 | | PORT | 9020 | HTTP port |
16 | | LOG_LEVEL | INFO | Logging level (DEBUG, INFO, WARNING, ERROR) |
17 | | MCP_PORT | 9022 | MCP port |
18 | 
19 | ### Path Routing with ROOT_PATH
20 | 
21 | The service supports running behind API Gateway or Application Load Balancer with path prefix.
22 | 
23 | | Variable | Default Value | Description |
24 | |----------|--------------|--------------|
25 | | ROOT_PATH | "" | Path prefix for API Gateway/ALB integration |
26 | 
27 | #### How it works:
28 | 
29 | 1. **Local Development**: In local development, `ROOT_PATH` remains empty (""), making the API accessible at `http://localhost:9020/`.
30 | 
31 | 2. **Production**: In an AWS environment, ROOT_PATH can be set to e.g. `/jessica-backend`. The middleware ensures that:
32 |    - Requests to `/jessica-backend/health` are processed internally as `/health`
33 |    - The FastAPI documentation and all links contain the correct path
34 |    - The middleware layer automatically removes the ROOT_PATH prefix
35 | 
36 | 3. **Middleware**: The path-rewriting middleware ensures that incoming paths with the ROOT_PATH prefix are automatically rewritten.
37 | 
38 | ### Example Configuration
39 | 
40 | ```env
41 | # Local
42 | ROOT_PATH=
43 | 
44 | # Production with API Gateway/ALB
45 | ROOT_PATH=/jessica-backend
46 | ```
47 | 
48 | ## API Endpoints
49 | 
50 | The main API is available under `/api`, making endpoints with ROOT_PATH accessible as follows:
51 | 
52 | - Local: `http://localhost:9020/api/...`
53 | - Production: `https://example.com/jessica-backend/api/...`
54 | 
55 | ### Health Check
56 | 
57 | The health check endpoint is always available at `/health`:
58 | 
59 | ```
60 | GET /health
61 | ```
62 | 
63 | This also provides information about the configured ROOT_PATH. 
```
--------------------------------------------------------------------------------
/terraform/README.md:
--------------------------------------------------------------------------------
```markdown
 1 | # Jessica Service Terraform Configuration
 2 | 
 3 | This directory contains the Terraform configuration for the Jessica service infrastructure. It follows the hub-and-spoke architecture described in the central georgi.io infrastructure repository.
 4 | 
 5 | ## Components
 6 | 
 7 | - **ECR Repository**: For storing Docker images
 8 | - **VPC Integration**: Integration with the central VPC infrastructure
 9 | - **API Gateway Integration**: Integration with the central API Gateway (planned)
10 | 
11 | ## Getting Started
12 | 
13 | ### Prerequisites
14 | 
15 | - Terraform >= 1.0.0
16 | - AWS CLI with SSO configuration
17 | - yawsso (`pip install yawsso`) for AWS SSO credential management
18 | - Appropriate AWS permissions
19 | 
20 | ### AWS SSO Setup
21 | 
22 | 1. Configure AWS SSO:
23 | ```bash
24 | aws configure sso --profile georgi-io
25 | # SSO Start URL: https://georgi-sso.awsapps.com/start
26 | # SSO Region: eu-central-1
27 | ```
28 | 
29 | 2. Login and sync credentials:
30 | ```bash
31 | aws sso login --profile georgi-io
32 | yawsso -p georgi-io  # Syncs SSO credentials with AWS CLI format
33 | ```
34 | 
35 | 3. Verify setup:
36 | ```bash
37 | aws sts get-caller-identity --profile georgi-io
38 | ```
39 | 
40 | ### Terraform Commands
41 | 
42 | ```bash
43 | # Initialize Terraform
44 | terraform init
45 | 
46 | # Plan changes
47 | terraform plan
48 | 
49 | # Apply changes
50 | terraform apply
51 | 
52 | # Destroy resources (use with caution)
53 | terraform destroy
54 | ```
55 | 
56 | ## Directory Structure
57 | 
58 | ```
59 | terraform/
60 | ├── main.tf                # Main Terraform configuration
61 | ├── variables.tf           # Variable definitions
62 | ├── outputs.tf             # Output definitions
63 | ├── terraform.tfvars       # Variable values
64 | └── aws/                   # AWS-specific modules
65 |     └── ecr/               # ECR repository module
66 |         ├── main.tf        # ECR resource definitions
67 |         ├── variables.tf   # ECR module variables
68 |         └── outputs.tf     # ECR module outputs
69 | ```
70 | 
71 | ## Remote State
72 | 
73 | This configuration uses the central S3 bucket for storing Terraform state:
74 | 
75 | ```hcl
76 | backend "s3" {
77 |   bucket = "georgi-io-terraform-state"
78 |   key    = "services/jessica/terraform.tfstate"
79 |   region = "eu-central-1"
80 | }
81 | ```
82 | 
83 | ## Central Infrastructure Integration
84 | 
85 | This configuration integrates with the central infrastructure using Terraform remote state:
86 | 
87 | ```hcl
88 | data "terraform_remote_state" "infrastructure" {
89 |   backend = "s3"
90 |   config = {
91 |     bucket = "georgi-io-terraform-state"
92 |     key    = "infrastructure/terraform.tfstate"
93 |     region = "eu-central-1"
94 |   }
95 | }
96 | ``` 
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
  1 | # Project Jessica (ElevenLabs TTS MCP)
  2 | 
  3 | This project integrates ElevenLabs Text-to-Speech capabilities with Cursor through the Model Context Protocol (MCP). It consists of a FastAPI backend service and a React frontend application.
  4 | 
  5 | ## Features
  6 | 
  7 | - Text-to-Speech conversion using ElevenLabs API
  8 | - Voice selection and management
  9 | - MCP integration for Cursor
 10 | - Modern React frontend interface
 11 | - WebSocket real-time communication
 12 | - Pre-commit hooks for code quality
 13 | - Automatic code formatting and linting
 14 | 
 15 | ## Project Structure
 16 | 
 17 | ```
 18 | jessica/
 19 | ├── src/
 20 | │   ├── backend/          # FastAPI backend service
 21 | │   └── frontend/         # React frontend application
 22 | ├── terraform/            # Infrastructure as Code
 23 | ├── tests/               # Test suites
 24 | └── docs/                # Documentation
 25 | ```
 26 | 
 27 | ## Requirements
 28 | 
 29 | - Python 3.11+
 30 | - Poetry (for backend dependency management)
 31 | - Node.js 18+ (for frontend)
 32 | - Cursor (for MCP integration)
 33 | 
 34 | ## Local Development Setup
 35 | 
 36 | ### Backend Setup
 37 | 
 38 | ```bash
 39 | # Clone the repository
 40 | git clone https://github.com/georgi-io/jessica.git
 41 | cd jessica
 42 | 
 43 | # Create Python virtual environment
 44 | python -m venv .venv
 45 | source .venv/bin/activate  # On Windows: .venv\Scripts\activate
 46 | 
 47 | # Install backend dependencies
 48 | poetry install
 49 | 
 50 | # Configure environment
 51 | cp .env.example .env
 52 | # Edit .env with your ElevenLabs API key
 53 | 
 54 | # Install pre-commit hooks
 55 | poetry run pre-commit install
 56 | ```
 57 | 
 58 | ### Frontend Setup
 59 | 
 60 | ```bash
 61 | # Navigate to frontend directory
 62 | cd src/frontend
 63 | 
 64 | # Install dependencies
 65 | npm install
 66 | ```
 67 | 
 68 | ## Development Servers
 69 | 
 70 | ### Starting the Backend
 71 | 
 72 | ```bash
 73 | # Activate virtual environment if not active
 74 | source .venv/bin/activate  # On Windows: .venv\Scripts\activate
 75 | 
 76 | # Start the backend
 77 | python -m src.backend
 78 | ```
 79 | 
 80 | The backend provides:
 81 | - REST API: http://localhost:9020
 82 | - WebSocket: ws://localhost:9020/ws
 83 | - MCP Server: http://localhost:9020/sse (integrated with the main API server)
 84 | 
 85 | ### Starting the Frontend
 86 | 
 87 | ```bash
 88 | # In src/frontend directory
 89 | npm run dev
 90 | ```
 91 | 
 92 | Frontend development server:
 93 | - http://localhost:5173
 94 | 
 95 | ## Environment Configuration
 96 | 
 97 | ### Backend (.env)
 98 | ```env
 99 | # ElevenLabs API
100 | ELEVENLABS_API_KEY=your-api-key
101 | 
102 | # Server Configuration
103 | HOST=127.0.0.1
104 | PORT=9020
105 | 
106 | # Development Settings
107 | DEBUG=false
108 | RELOAD=true
109 | ```
110 | 
111 | ### Frontend (.env)
112 | ```env
113 | VITE_API_URL=http://localhost:9020
114 | VITE_WS_URL=ws://localhost:9020/ws
115 | ```
116 | 
117 | ## Code Quality Tools
118 | 
119 | ### Backend
120 | 
121 | ```bash
122 | # Run all pre-commit hooks
123 | poetry run pre-commit run --all-files
124 | 
125 | # Run specific tools
126 | poetry run ruff check .
127 | poetry run ruff format .
128 | poetry run pytest
129 | ```
130 | 
131 | ### Frontend
132 | 
133 | ```bash
134 | # Lint
135 | npm run lint
136 | 
137 | # Type check
138 | npm run type-check
139 | 
140 | # Test
141 | npm run test
142 | ```
143 | 
144 | ## Production Deployment
145 | 
146 | ### AWS ECR and GitHub Actions Setup
147 | 
148 | To enable automatic building and pushing of Docker images to Amazon ECR:
149 | 
150 | 1. Apply the Terraform configuration to create the required AWS resources:
151 |    ```bash
152 |    cd terraform
153 |    terraform init
154 |    terraform apply
155 |    ```
156 | 
157 | 2. The GitHub Actions workflow will automatically:
158 |    - Read the necessary configuration from the Terraform state in S3
159 |    - Build the Docker image on pushes to `main` or `develop` branches
160 |    - Push the image to ECR with tags for `latest` and the specific commit SHA
161 | 
162 | 3. No additional repository variables needed! The workflow fetches all required configuration from the Terraform state.
163 | 
164 | ### How it Works
165 | 
166 | The GitHub Actions workflow is configured to:
167 | 1. Initially assume a predefined IAM role with S3 read permissions
168 | 2. Fetch and extract configuration values from the Terraform state file in S3
169 | 3. Re-authenticate using the actual deployment role from the state file
170 | 4. Build and push the Docker image to the ECR repository defined in the state
171 | 
172 | This approach eliminates the need to manually configure GitHub repository variables and ensures that the CI/CD process always uses the current infrastructure configuration.
173 | 
174 | ### Quick Overview
175 | 
176 | - Frontend: Served from S3 via CloudFront at jessica.georgi.io
177 | - Backend API: Available at api.georgi.io/jessica
178 | - WebSocket: Connects to api.georgi.io/jessica/ws
179 | - Docker Image: Stored in AWS ECR and can be deployed to ECS/EKS
180 | - Infrastructure: Managed via Terraform in this repository
181 | 
182 | ## MCP Integration with Cursor
183 | 
184 | 1. Start the backend server
185 | 2. In Cursor settings, add new MCP server:
186 |    - Name: Jessica TTS
187 |    - Type: SSE
188 |    - URL: http://localhost:9020/sse
189 | 
190 | ## Troubleshooting
191 | 
192 | ### Common Issues
193 | 
194 | 1. **API Key Issues**
195 |    - Error: "Invalid API key"
196 |    - Solution: Check `.env` file
197 | 
198 | 2. **Connection Problems**
199 |    - Error: "Cannot connect to MCP server"
200 |    - Solution: Verify backend is running and ports are correct
201 | 
202 | 3. **Port Conflicts**
203 |    - Error: "Address already in use"
204 |    - Solution: Change ports in `.env`
205 | 
206 | 4. **WebSocket Connection Failed**
207 |    - Error: "WebSocket connection failed"
208 |    - Solution: Ensure backend is running and WebSocket URL is correct
209 | 
210 | For additional help, please open an issue on GitHub.
211 | 
212 | ## License
213 | 
214 | MIT 
215 | 
```
--------------------------------------------------------------------------------
/.dev-tools/README.md:
--------------------------------------------------------------------------------
```markdown
  1 | # Development Tools
  2 | 
  3 | This directory contains various development and productivity tools used in the project. These tools are designed to help developers and product managers with common tasks and maintain consistency across the codebase.
  4 | 
  5 | ## Directory Structure
  6 | 
  7 | ```
  8 | .dev-tools/
  9 | ├── prompts/           # AI prompt templates for various tasks
 10 | │   ├── prompt_pr.md   # PR description generation template
 11 | │   ├── prompt_commit.md # Commit message generation template
 12 | │   └── prompt_user_story.md # User story generation template
 13 | │
 14 | └── scripts/          # Development utility scripts
 15 |     ├── generate_git_diffs.py  # Script for generating git diffs
 16 |     ├── check_env_files.py  # Validates environment files across all services
 17 |     ├── pyproject.toml       # Poetry project configuration
 18 |     ├── .env.example        # Example environment variables
 19 |     └── .env               # Your local environment variables (git-ignored)
 20 | ```
 21 | 
 22 | ## Tools Overview
 23 | 
 24 | ### Prompts
 25 | 
 26 | The `prompts/` directory contains templates for AI-assisted tasks:
 27 | 
 28 | - `prompt_pr.md`: Template for generating detailed pull request descriptions
 29 | - `prompt_commit.md`: Template for creating meaningful commit messages
 30 | - `prompt_user_story.md`: Template for creating well-structured user stories as GitHub issues
 31 | 
 32 | ### Scripts
 33 | 
 34 | The `scripts/` directory contains utility scripts:
 35 | 
 36 | - `generate_git_diffs.py`: Python script for generating git diffs between branches and collecting commit messages
 37 | - `check_env_files.py`: Validates environment files across all services
 38 |   - Checks for missing .env files against .env.example templates
 39 |   - Ensures all required variables are set
 40 |   - Detects example/placeholder values that need to be replaced
 41 |   - Validates against .gitignore patterns
 42 |   - Provides clear progress indicators and summary reports
 43 | 
 44 | ## Setup
 45 | 
 46 | ### Environment Variables
 47 | 
 48 | 1. Create your local environment file:
 49 |    ```bash
 50 |    cd .dev-tools/scripts
 51 |    cp .env.example .env
 52 |    ```
 53 | 
 54 | 2. Adjust the variables in `.env` if needed:
 55 |    - `GIT_DIFF_BRANCH`: The branch to compare against (defaults to `origin/dev` for PRs)
 56 |    - Note: Most PRs should be created against the `dev` branch. Use `origin/main` only for hotfixes or release PRs.
 57 | 
 58 | ### Python Dependencies
 59 | 
 60 | 1. Install Poetry if not already installed:
 61 |    ```bash
 62 |    curl -sSL https://install.python-poetry.org | python3 -
 63 |    ```
 64 | 
 65 | 2. Install dependencies:
 66 |    ```bash
 67 |    cd .dev-tools/scripts
 68 |    poetry install
 69 |    ```
 70 | 
 71 | 3. Activate the virtual environment:
 72 |    ```bash
 73 |    poetry shell
 74 |    ```
 75 | 
 76 | ## Usage
 77 | 
 78 | ### Workflow: Creating User Stories with AI
 79 | 
 80 | 1. In Cursor's Composer:
 81 |    - Reference or drag in `prompt_user_story.md`
 82 |    - Describe what you want to achieve in your user story
 83 |    - The AI will analyze your input and the codebase to create a comprehensive user story
 84 |    - Review and confirm the generated story
 85 |    - The AI will help create a GitHub issue with proper project assignment
 86 | 
 87 | 2. The generated user story will include:
 88 |    - Business requirements from a Product Manager's perspective
 89 |    - Technical analysis from an Engineering Manager's perspective
 90 |    - Implementation approach and considerations
 91 |    - Proper GitHub issue metadata and project assignment
 92 | 
 93 | ### Workflow: Creating Commits with AI
 94 | 
 95 | 1. Generate the necessary git diffs:
 96 |    ```bash
 97 |    cd .dev-tools/scripts
 98 |    poetry run python generate_git_diffs.py
 99 |    ```
100 |    This creates three files in the `temp_output` directory:
101 |    - `TEMP_GIT_commit_diff.txt`: Current changes to be committed
102 |    - `TEMP_GIT_pr_diff.txt`: Changes compared to target branch
103 |    - `TEMP_GIT_pr_commit_messages.txt`: Commit messages history
104 | 
105 | 2. In Cursor's Composer:
106 |    - Reference or drag in `prompt_commit.md`
107 |    - Reference or drag in `TEMP_GIT_commit_diff.txt`
108 |    - Click Submit
109 |    - Cursor will generate a meaningful commit message based on your changes
110 | 
111 | ### Workflow: Creating Pull Requests with AI
112 | 
113 | 1. Ensure correct comparison branch:
114 |    - Check `.env` file in scripts directory
115 |    - By default, it's set to `origin/dev` for normal feature PRs
116 |    - Change to `origin/main` only for hotfixes or release PRs
117 |    - This determines which branch your changes are compared against
118 | 
119 | 2. Generate the git diffs:
120 |    ```bash
121 |    cd .dev-tools/scripts
122 |    poetry run python generate_git_diffs.py
123 |    ```
124 | 
125 | 3. In Cursor's Composer:
126 |    - Reference or drag in `prompt_pr.md`
127 |    - Reference or drag in `TEMP_GIT_pr_diff.txt`
128 |    - Reference or drag in `TEMP_GIT_pr_commit_messages.txt`
129 |    - Click Submit
130 |    - Cursor will generate a detailed PR description based on your changes and commit history
131 | 
132 | ### Tips
133 | 
134 | - The default target branch is `dev` as most PRs should go there
135 | - Only use `main` as target for hotfixes or release PRs
136 | - The generated files in `temp_output` are temporary and will be overwritten on each run
137 | - You can reference these files in any Cursor Composer chat to generate commit messages or PR descriptions
138 | 
139 | ### Workflow: Checking Environment Files
140 | 
141 | 1. Run the environment checker:
142 |    ```bash
143 |    cd .dev-tools/scripts
144 |    poetry run python check_env_files.py
145 |    ```
146 | 
147 | 2. The script will:
148 |    - Scan the project for service directories
149 |    - Load and respect .gitignore patterns
150 |    - Check each service's environment configuration
151 |    - Provide a detailed report showing:
152 |      - ✅ Properly configured environment files
153 |      - ❌ Missing environment files
154 |      - ⚠️  Variables with example values
155 |      - ⚠️  Extra variables not in example files
156 | 
157 | 3. Fix any issues reported:
158 |    - Create missing .env files based on .env.example templates
159 |    - Fill in required variables
160 |    - Replace example/placeholder values with real ones
161 |    - Review extra variables to ensure they're needed
162 | 
163 | ## Contributing
164 | 
165 | When adding new tools:
166 | 1. Create appropriate subdirectories based on tool type
167 | 2. Include clear documentation
168 | 3. Update this README with new tool descriptions
169 | 4. For Python scripts:
170 |    - Add dependencies to `pyproject.toml` using Poetry
171 |    - Follow the code style defined in `pyproject.toml`
172 |    - Update setup instructions if needed 
173 | 
```
--------------------------------------------------------------------------------
/src/__init__.py:
--------------------------------------------------------------------------------
```python
1 | 
```
--------------------------------------------------------------------------------
/src/backend/__init__.py:
--------------------------------------------------------------------------------
```python
1 | 
```
--------------------------------------------------------------------------------
/tests/backend/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Backend test package.
3 | """
4 | 
```
--------------------------------------------------------------------------------
/src/frontend/src/vite-env.d.ts:
--------------------------------------------------------------------------------
```typescript
1 | /// <reference types="vite/client" />
2 | 
```
--------------------------------------------------------------------------------
/src/frontend/postcss.config.js:
--------------------------------------------------------------------------------
```javascript
1 | export default {
2 |   plugins: {
3 |     autoprefixer: {},
4 |   },
5 | } 
```
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Test suite for the ElevenLabs MCP project backend.
3 | """
4 | 
```
--------------------------------------------------------------------------------
/src/frontend/tsconfig.json:
--------------------------------------------------------------------------------
```json
1 | {
2 |   "files": [],
3 |   "references": [
4 |     { "path": "./tsconfig.app.json" },
5 |     { "path": "./tsconfig.node.json" }
6 |   ]
7 | }
8 | 
```
--------------------------------------------------------------------------------
/src/frontend/src/main.tsx:
--------------------------------------------------------------------------------
```typescript
 1 | import { StrictMode } from 'react'
 2 | import { createRoot } from 'react-dom/client'
 3 | import './index.css'
 4 | import App from './App.tsx'
 5 | 
 6 | createRoot(document.getElementById('root')!).render(
 7 |   <StrictMode>
 8 |     <App />
 9 |   </StrictMode>,
10 | )
11 | 
```
--------------------------------------------------------------------------------
/src/frontend/build.sh:
--------------------------------------------------------------------------------
```bash
 1 | #!/bin/bash
 2 | 
 3 | # Build the frontend
 4 | echo "Building frontend..."
 5 | npm run build
 6 | 
 7 | # Create static directory in backend if it doesn't exist
 8 | mkdir -p ../backend/static
 9 | 
10 | # Copy the build output to the backend static directory
11 | echo "Copying build files to backend/static..."
12 | cp -r dist/* ../backend/static/
13 | 
14 | echo "Frontend build complete!" 
```
--------------------------------------------------------------------------------
/src/frontend/index.html:
--------------------------------------------------------------------------------
```html
 1 | <!doctype html>
 2 | <html lang="en">
 3 |   <head>
 4 |     <meta charset="UTF-8" />
 5 |     <link rel="icon" type="image/svg+xml" href="/vite.svg" />
 6 |     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
 7 |     <title>Vite + React + TS</title>
 8 |   </head>
 9 |   <body>
10 |     <div id="root"></div>
11 |     <script type="module" src="/src/main.tsx"></script>
12 |   </body>
13 | </html>
14 | 
```
--------------------------------------------------------------------------------
/src/backend/__main__.py:
--------------------------------------------------------------------------------
```python
 1 | import uvicorn
 2 | import os
 3 | from dotenv import load_dotenv
 4 | 
 5 | # Load environment variables
 6 | load_dotenv()
 7 | 
 8 | if __name__ == "__main__":
 9 |     # Get host and port from environment variables
10 |     host = os.getenv("HOST", "127.0.0.1")
11 |     port = int(os.getenv("PORT", "9020"))
12 |     reload = os.getenv("RELOAD", "true").lower() == "true"
13 | 
14 |     # Run the FastAPI application
15 |     uvicorn.run("src.backend.app:app", host=host, port=port, reload=reload)
16 | 
```
--------------------------------------------------------------------------------
/src/frontend/tsconfig.node.json:
--------------------------------------------------------------------------------
```json
 1 | {
 2 |   "compilerOptions": {
 3 |     "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
 4 |     "target": "ES2022",
 5 |     "lib": ["ES2023"],
 6 |     "module": "ESNext",
 7 |     "skipLibCheck": true,
 8 | 
 9 |     /* Bundler mode */
10 |     "moduleResolution": "bundler",
11 |     "allowImportingTsExtensions": true,
12 |     "isolatedModules": true,
13 |     "moduleDetection": "force",
14 |     "noEmit": true,
15 | 
16 |     /* Linting */
17 |     "strict": true,
18 |     "noUnusedLocals": true,
19 |     "noUnusedParameters": true,
20 |     "noFallthroughCasesInSwitch": true,
21 |     "noUncheckedSideEffectImports": true
22 |   },
23 |   "include": ["vite.config.ts"]
24 | }
25 | 
```
--------------------------------------------------------------------------------
/src/frontend/tsconfig.app.json:
--------------------------------------------------------------------------------
```json
 1 | {
 2 |   "compilerOptions": {
 3 |     "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
 4 |     "target": "ES2020",
 5 |     "useDefineForClassFields": true,
 6 |     "lib": ["ES2020", "DOM", "DOM.Iterable"],
 7 |     "module": "ESNext",
 8 |     "skipLibCheck": true,
 9 | 
10 |     /* Bundler mode */
11 |     "moduleResolution": "bundler",
12 |     "allowImportingTsExtensions": true,
13 |     "isolatedModules": true,
14 |     "moduleDetection": "force",
15 |     "noEmit": true,
16 |     "jsx": "react-jsx",
17 | 
18 |     /* Linting */
19 |     "strict": true,
20 |     "noUnusedLocals": true,
21 |     "noUnusedParameters": true,
22 |     "noFallthroughCasesInSwitch": true,
23 |     "noUncheckedSideEffectImports": true
24 |   },
25 |   "include": ["src"]
26 | }
27 | 
```
--------------------------------------------------------------------------------
/scripts/build-run.sh:
--------------------------------------------------------------------------------
```bash
 1 | #!/bin/bash
 2 | set -e
 3 | 
 4 | # Konfigurierbare Variablen
 5 | IMAGE_NAME="jessica-backend"
 6 | TAG=${TAG:-"latest"}
 7 | 
 8 | # Docker-Image bauen
 9 | echo "Docker-Image wird gebaut: ${IMAGE_NAME}:${TAG}"
10 | docker build -t ${IMAGE_NAME}:${TAG} .
11 | 
12 | # Lokales Testen wenn --run Parameter angegeben
13 | if [ "$1" = "--run" ]; then
14 |     echo "Container wird gestartet..."
15 |     docker run -it --rm \
16 |         -p 9020:9020 \
17 |         -p 9022:9022 \
18 |         -e ELEVENLABS_API_KEY=${ELEVENLABS_API_KEY:-"your-api-key-here"} \
19 |         ${IMAGE_NAME}:${TAG}
20 | else
21 |     echo "Image erfolgreich gebaut: ${IMAGE_NAME}:${TAG}"
22 |     echo "Verwende --run um den Container lokal zu starten"
23 |     echo "Beispiel: ./scripts/build-run.sh --run"
24 | fi 
```
--------------------------------------------------------------------------------
/src/frontend/eslint.config.js:
--------------------------------------------------------------------------------
```javascript
 1 | import js from '@eslint/js'
 2 | import globals from 'globals'
 3 | import reactHooks from 'eslint-plugin-react-hooks'
 4 | import reactRefresh from 'eslint-plugin-react-refresh'
 5 | import tseslint from 'typescript-eslint'
 6 | 
 7 | export default tseslint.config(
 8 |   { ignores: ['dist'] },
 9 |   {
10 |     extends: [js.configs.recommended, ...tseslint.configs.recommended],
11 |     files: ['**/*.{ts,tsx}'],
12 |     languageOptions: {
13 |       ecmaVersion: 2020,
14 |       globals: globals.browser,
15 |     },
16 |     plugins: {
17 |       'react-hooks': reactHooks,
18 |       'react-refresh': reactRefresh,
19 |     },
20 |     rules: {
21 |       ...reactHooks.configs.recommended.rules,
22 |       'react-refresh/only-export-components': [
23 |         'warn',
24 |         { allowConstantExport: true },
25 |       ],
26 |     },
27 |   },
28 | )
29 | 
```
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
```dockerfile
 1 | FROM python:3.11-slim
 2 | 
 3 | WORKDIR /app
 4 | 
 5 | # System-Abhängigkeiten installieren
 6 | RUN apt-get update && apt-get install -y --no-install-recommends \
 7 |     gcc \
 8 |     python3-dev \
 9 |     curl \
10 |     && rm -rf /var/lib/apt/lists/*
11 | 
12 | # Poetry installieren
13 | RUN pip install poetry==1.6.1
14 | 
15 | # Poetry konfigurieren (keine virtuelle Umgebung)
16 | RUN poetry config virtualenvs.create false
17 | 
18 | # Projektdateien kopieren
19 | COPY pyproject.toml poetry.lock README.md ./
20 | COPY src/ /app/src/
21 | COPY .env.example /app/.env
22 | 
23 | # Abhängigkeiten installieren
24 | RUN poetry install --only main --no-interaction --no-ansi
25 | 
26 | # Berechtigungen anpassen
27 | RUN chmod -R 755 /app
28 | 
29 | # Port, auf dem das Backend läuft, exponieren
30 | EXPOSE 9020
31 | 
32 | # Umgebungsvariablen setzen (können durch externe .env oder ENV überschrieben werden)
33 | ENV HOST=0.0.0.0
34 | ENV PORT=9020
35 | ENV BASE_PATH=/jessica-service
36 | 
37 | # Anwendung starten
38 | CMD ["python", "-m", "src.backend"] 
```
--------------------------------------------------------------------------------
/src/frontend/package.json:
--------------------------------------------------------------------------------
```json
 1 | {
 2 |   "name": "frontend",
 3 |   "private": true,
 4 |   "version": "0.0.0",
 5 |   "type": "module",
 6 |   "scripts": {
 7 |     "dev": "vite",
 8 |     "build": "tsc -b && vite build",
 9 |     "lint": "eslint .",
10 |     "preview": "vite preview"
11 |   },
12 |   "dependencies": {
13 |     "@emotion/react": "^11.14.0",
14 |     "@emotion/styled": "^11.14.0",
15 |     "@mui/icons-material": "^6.4.5",
16 |     "@mui/lab": "^6.0.0-beta.29",
17 |     "@mui/material": "^6.4.5",
18 |     "axios": "^1.7.9",
19 |     "react": "^19.0.0",
20 |     "react-dom": "^19.0.0",
21 |     "react-icons": "^5.5.0"
22 |   },
23 |   "devDependencies": {
24 |     "@eslint/js": "^9.19.0",
25 |     "@types/node": "^22.13.5",
26 |     "@types/react": "^19.0.8",
27 |     "@types/react-dom": "^19.0.3",
28 |     "@vitejs/plugin-react": "^4.3.4",
29 |     "autoprefixer": "^10.4.20",
30 |     "eslint": "^9.19.0",
31 |     "eslint-plugin-react-hooks": "^5.0.0",
32 |     "eslint-plugin-react-refresh": "^0.4.18",
33 |     "globals": "^15.14.0",
34 |     "typescript": "~5.7.2",
35 |     "typescript-eslint": "^8.22.0",
36 |     "vite": "^6.1.0"
37 |   }
38 | }
39 | 
```
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
```toml
 1 | [tool.poetry]
 2 | name = "elevenlabs-mcp"
 3 | version = "0.1.0"
 4 | description = "ElevenLabs Text-to-Speech MCP Integration for Cursor"
 5 | authors = ["Sebastian Georgi"]
 6 | license = "MIT"
 7 | readme = "README.md"
 8 | packages = [{include = "src"}]
 9 | 
10 | [tool.poetry.dependencies]
11 | python = "^3.11"
12 | mcp = "^1.3.0"
13 | fastapi = ">=0.111.0"
14 | python-dotenv = "^1.0.1"
15 | pyyaml = "^6.0.1"
16 | elevenlabs = "^0.2.27"
17 | websockets = "^12.0"
18 | uvicorn = {extras = ["standard"], version = "^0.27.1"}
19 | aiohttp = "^3.11.13"
20 | starlette = ">=0.41.3"
21 | sse-starlette = "^2.2.1"
22 | 
23 | [tool.poetry.group.dev.dependencies]
24 | pytest = "^8.0.2"
25 | pytest-asyncio = "^0.23.5"
26 | black = "^24.2.0"
27 | isort = "^5.13.2"
28 | mypy = "^1.8.0"
29 | ruff = "^0.2.2"
30 | pre-commit = "^3.6.2"
31 | 
32 | [build-system]
33 | requires = ["poetry-core"]
34 | build-backend = "poetry.core.masonry.api"
35 | 
36 | [tool.poetry.scripts]
37 | elevenlabs-mcp = "src.mcp_binary.main:main"
38 | start = "src.backend.__main__:main"
39 | 
40 | [tool.black]
41 | line-length = 100
42 | target-version = ["py311"]
43 | 
44 | [tool.isort]
45 | profile = "black"
46 | line_length = 100
47 | multi_line_output = 3
48 | 
49 | [tool.mypy]
50 | python_version = "3.11"
51 | strict = true
52 | warn_return_any = true
53 | warn_unused_configs = true
54 | disallow_untyped_defs = true
55 | 
56 | [tool.ruff]
57 | line-length = 100
58 | target-version = "py311"
59 | 
```
--------------------------------------------------------------------------------
/src/frontend/src/index.css:
--------------------------------------------------------------------------------
```css
 1 | /* Base styles for Material UI integration */
 2 | :root {
 3 |   font-family: 'Inter', 'Roboto', 'Helvetica', 'Arial', sans-serif;
 4 |   line-height: 1.5;
 5 |   font-weight: 400;
 6 |   font-synthesis: none;
 7 |   text-rendering: optimizeLegibility;
 8 |   -webkit-font-smoothing: antialiased;
 9 |   -moz-osx-font-smoothing: grayscale;
10 | }
11 | 
12 | body {
13 |   margin: 0;
14 |   min-width: 320px;
15 |   min-height: 100vh;
16 | }
17 | 
18 | /* Reset some browser defaults */
19 | * {
20 |   box-sizing: border-box;
21 | }
22 | 
23 | /* Focus styles for accessibility */
24 | :focus {
25 |   outline: 2px solid #6366f1;
26 |   outline-offset: 2px;
27 | }
28 | 
29 | /* Custom styles below */
30 | a {
31 |   font-weight: 500;
32 |   color: #646cff;
33 |   text-decoration: inherit;
34 | }
35 | a:hover {
36 |   color: #535bf2;
37 | }
38 | 
39 | h1 {
40 |   font-size: 3.2em;
41 |   line-height: 1.1;
42 | }
43 | 
44 | button {
45 |   border-radius: 8px;
46 |   border: 1px solid transparent;
47 |   padding: 0.6em 1.2em;
48 |   font-size: 1em;
49 |   font-weight: 500;
50 |   font-family: inherit;
51 |   background-color: #1a1a1a;
52 |   cursor: pointer;
53 |   transition: border-color 0.25s;
54 | }
55 | button:hover {
56 |   border-color: #646cff;
57 | }
58 | button:focus,
59 | button:focus-visible {
60 |   outline: 4px auto -webkit-focus-ring-color;
61 | }
62 | 
63 | @media (prefers-color-scheme: light) {
64 |   :root {
65 |     color: #213547;
66 |     background-color: #ffffff;
67 |   }
68 |   a:hover {
69 |     color: #747bff;
70 |   }
71 |   button {
72 |     background-color: #f9f9f9;
73 |   }
74 | }
75 | 
```
--------------------------------------------------------------------------------
/src/frontend/public/vite.svg:
--------------------------------------------------------------------------------
```
1 | <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="31.88" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 257"><defs><linearGradient id="IconifyId1813088fe1fbc01fb466" x1="-.828%" x2="57.636%" y1="7.652%" y2="78.411%"><stop offset="0%" stop-color="#41D1FF"></stop><stop offset="100%" stop-color="#BD34FE"></stop></linearGradient><linearGradient id="IconifyId1813088fe1fbc01fb467" x1="43.376%" x2="50.316%" y1="2.242%" y2="89.03%"><stop offset="0%" stop-color="#FFEA83"></stop><stop offset="8.333%" stop-color="#FFDD35"></stop><stop offset="100%" stop-color="#FFA800"></stop></linearGradient></defs><path fill="url(#IconifyId1813088fe1fbc01fb466)" d="M255.153 37.938L134.897 252.976c-2.483 4.44-8.862 4.466-11.382.048L.875 37.958c-2.746-4.814 1.371-10.646 6.827-9.67l120.385 21.517a6.537 6.537 0 0 0 2.322-.004l117.867-21.483c5.438-.991 9.574 4.796 6.877 9.62Z"></path><path fill="url(#IconifyId1813088fe1fbc01fb467)" d="M185.432.063L96.44 17.501a3.268 3.268 0 0 0-2.634 3.014l-5.474 92.456a3.268 3.268 0 0 0 3.997 3.378l24.777-5.718c2.318-.535 4.413 1.507 3.936 3.838l-7.361 36.047c-.495 2.426 1.782 4.5 4.151 3.78l15.304-4.649c2.372-.72 4.652 1.36 4.15 3.788l-11.698 56.621c-.732 3.542 3.979 5.473 5.943 2.437l1.313-2.028l72.516-144.72c1.215-2.423-.88-5.186-3.54-4.672l-25.505 4.922c-2.396.462-4.435-1.77-3.759-4.114l16.646-57.705c.677-2.35-1.37-4.583-3.769-4.113Z"></path></svg>
```
--------------------------------------------------------------------------------
/scripts/build-push-ecr.sh:
--------------------------------------------------------------------------------
```bash
 1 | #!/bin/bash
 2 | set -e
 3 | 
 4 | # Konfigurierbare Variablen
 5 | AWS_REGION=${AWS_REGION:-"eu-central-1"}
 6 | IMAGE_TAG=${IMAGE_TAG:-"latest"}
 7 | ENV=${ENV:-"prod"}
 8 | REPO_NAME="jessica"
 9 | 
10 | # Prüfen, ob Colima läuft, wenn nicht, starten
11 | if ! colima status 2>/dev/null | grep -q "running"; then
12 |   echo "Colima ist nicht gestartet. Starte Colima..."
13 |   colima start
14 | fi
15 | 
16 | echo "Using ECR repository: $REPO_NAME"
17 | echo "Using environment: $ENV"
18 | 
19 | # Repository URL abrufen
20 | REPO_URL=$(aws ecr describe-repositories --repository-names $REPO_NAME --region $AWS_REGION --query 'repositories[0].repositoryUri' --output text)
21 | 
22 | if [ $? -ne 0 ]; then
23 |   echo "Fehler: ECR Repository nicht gefunden oder AWS CLI Fehler"
24 |   echo "Stelle sicher, dass das Repository existiert und deine AWS Credentials korrekt sind."
25 |   exit 1
26 | fi
27 | 
28 | echo "ECR Repository URL: $REPO_URL"
29 | 
30 | # Login bei ECR - angepasst für Colima ohne Docker Credential Helper
31 | echo "Bei ECR anmelden..."
32 | # Umgehe den Docker Credential Helper mit --password-stdin
33 | AWS_ECR_PASSWORD=$(aws ecr get-login-password --region $AWS_REGION)
34 | echo $AWS_ECR_PASSWORD | docker login --username AWS --password-stdin $REPO_URL
35 | 
36 | # Docker-Image bauen
37 | echo "Docker-Image wird gebaut: $REPO_URL:$IMAGE_TAG"
38 | docker build -t $REPO_URL:$IMAGE_TAG .
39 | 
40 | # Docker-Image taggen
41 | echo "Docker-Image wird getaggt..."
42 | docker tag $REPO_URL:$IMAGE_TAG $REPO_URL:$IMAGE_TAG
43 | 
44 | # Docker-Image pushen
45 | echo "Docker-Image wird nach ECR gepusht..."
46 | docker push $REPO_URL:$IMAGE_TAG
47 | 
48 | echo "Fertig! Image wurde erfolgreich nach $REPO_URL:$IMAGE_TAG gepusht." 
```
--------------------------------------------------------------------------------
/src/frontend/vite.config.ts:
--------------------------------------------------------------------------------
```typescript
 1 | import { defineConfig, loadEnv } from 'vite'
 2 | import react from '@vitejs/plugin-react'
 3 | import path from 'path'
 4 | import fs from 'fs'
 5 | 
 6 | // Load environment variables from the root .env file
 7 | const loadRootEnv = () => {
 8 |   const rootEnvPath = path.resolve(__dirname, '../../.env')
 9 |   if (fs.existsSync(rootEnvPath)) {
10 |     const envContent = fs.readFileSync(rootEnvPath, 'utf-8')
11 |     const env: Record<string, string> = {}
12 |     
13 |     envContent.split('\n').forEach(line => {
14 |       const match = line.match(/^\s*([\w.-]+)\s*=\s*(.*)?\s*$/)
15 |       if (match && !line.startsWith('#')) {
16 |         const key = match[1]
17 |         let value = match[2] || ''
18 |         if (value.startsWith('"') && value.endsWith('"')) {
19 |           value = value.substring(1, value.length - 1)
20 |         }
21 |         env[key] = value
22 |       }
23 |     })
24 |     
25 |     return env
26 |   }
27 |   return {}
28 | }
29 | 
30 | // https://vitejs.dev/config/
31 | export default defineConfig(({ mode }) => {
32 |   // Load env variables
33 |   const env = loadEnv(mode, process.cwd())
34 |   const rootEnv = loadRootEnv()
35 |   
36 |   // Combine env variables, with rootEnv taking precedence
37 |   const combinedEnv = { ...env, ...rootEnv }
38 |   
39 |   // Get backend host and port from env
40 |   const backendHost = combinedEnv.HOST || '127.0.0.1'
41 |   const backendPort = combinedEnv.PORT || '9020'
42 |   
43 |   return {
44 |     plugins: [react()],
45 |     server: {
46 |       proxy: {
47 |         // Proxy API requests to the backend during development
48 |         '/api': {
49 |           target: `http://${backendHost}:${backendPort}`,
50 |           changeOrigin: true,
51 |           secure: false,
52 |         },
53 |       },
54 |     },
55 |     build: {
56 |       // Output to the FastAPI static directory when building for production
57 |       outDir: path.resolve(__dirname, '../backend/static'),
58 |       emptyOutDir: true,
59 |     },
60 |   }
61 | })
62 | 
```
--------------------------------------------------------------------------------
/tests/backend/conftest.py:
--------------------------------------------------------------------------------
```python
 1 | """
 2 | Pytest configuration and fixtures.
 3 | """
 4 | 
 5 | import pytest
 6 | from unittest.mock import MagicMock, patch
 7 | from pathlib import Path
 8 | import tempfile
 9 | import shutil
10 | import json
11 | 
12 | 
13 | @pytest.fixture
14 | def mock_elevenlabs():
15 |     """Mock ElevenLabs API responses."""
16 |     with (
17 |         patch("elevenlabs.generate") as mock_generate,
18 |         patch("elevenlabs.voices") as mock_voices,
19 |         patch("elevenlabs.Models") as mock_models,
20 |     ):
21 |         # Mock generate function
22 |         mock_generate.return_value = b"fake_audio_data"
23 | 
24 |         # Mock voices
25 |         mock_voices.return_value = [
26 |             MagicMock(voice_id="voice1", name="Test Voice 1"),
27 |             MagicMock(voice_id="voice2", name="Test Voice 2"),
28 |         ]
29 | 
30 |         # Mock models
31 |         mock_models.return_value = [
32 |             MagicMock(model_id="model1", name="Test Model 1"),
33 |             MagicMock(model_id="model2", name="Test Model 2"),
34 |         ]
35 | 
36 |         yield {"generate": mock_generate, "voices": mock_voices, "models": mock_models}
37 | 
38 | 
39 | @pytest.fixture
40 | def temp_config_dir():
41 |     """Create a temporary config directory for tests."""
42 |     temp_dir = tempfile.mkdtemp()
43 |     temp_path = Path(temp_dir)
44 | 
45 |     # Create default config
46 |     config = {
47 |         "default_voice_id": "voice1",
48 |         "default_model_id": "model1",
49 |         "settings": {"auto_play": True, "save_audio": False, "use_streaming": False},
50 |     }
51 | 
52 |     config_file = temp_path / "config.json"
53 |     with open(config_file, "w") as f:
54 |         json.dump(config, f)
55 | 
56 |     yield temp_path
57 | 
58 |     # Cleanup
59 |     shutil.rmtree(temp_dir)
60 | 
61 | 
62 | @pytest.fixture
63 | def mock_subprocess():
64 |     """Mock subprocess for audio playback."""
65 |     with patch("subprocess.Popen") as mock_popen:
66 |         mock_popen.return_value = MagicMock()
67 |         yield mock_popen
68 | 
```
--------------------------------------------------------------------------------
/src/backend/mcp_tools.py:
--------------------------------------------------------------------------------
```python
 1 | """
 2 | MCP Tools for ElevenLabs TTS
 3 | 
 4 | This module defines the MCP tools that will be exposed to Cursor.
 5 | """
 6 | 
 7 | import logging
 8 | import base64
 9 | from typing import Dict, Any
10 | from .elevenlabs_client import ElevenLabsClient
11 | from mcp.server.fastmcp import FastMCP
12 | from .websocket import manager
13 | from .routes import load_config
14 | 
15 | # Configure logging
16 | logging.basicConfig(level=logging.INFO)
17 | logger = logging.getLogger(__name__)
18 | 
19 | # Initialize ElevenLabs client
20 | client = None  # We'll initialize this when registering tools
21 | 
22 | 
23 | def register_mcp_tools(mcp_server: FastMCP, test_mode: bool = False) -> None:
24 |     """Register MCP tools with the server."""
25 |     global client
26 |     client = ElevenLabsClient(test_mode=test_mode)
27 | 
28 |     @mcp_server.tool("speak_text")
29 |     async def speak_text(text: str) -> Dict[str, Any]:
30 |         """Convert text to speech using ElevenLabs.
31 | 
32 |         Args:
33 |             text: The text to convert to speech
34 | 
35 |         Returns:
36 |             A dictionary with the result of the operation
37 |         """
38 |         try:
39 |             # Load current configuration
40 |             config = load_config()
41 |             voice_id = config["default_voice_id"]
42 |             model_id = config["default_model_id"]
43 | 
44 |             logger.info(
45 |                 f"Converting text to speech with voice ID: {voice_id} and model ID: {model_id}"
46 |             )
47 | 
48 |             # Generate audio using our client instance
49 |             audio = await client.text_to_speech(text, voice_id, model_id)
50 | 
51 |             # Encode audio data as base64
52 |             encoded_audio = base64.b64encode(audio).decode("utf-8")
53 | 
54 |             # Send to all connected clients via WebSocket
55 |             await manager.broadcast_to_clients(
56 |                 {
57 |                     "type": "audio_data",
58 |                     "text": text,
59 |                     "voice_id": voice_id,
60 |                     "data": encoded_audio,
61 |                 }
62 |             )
63 | 
64 |             return {
65 |                 "success": True,
66 |                 "message": "Text converted to speech and sent to clients",
67 |                 "streaming": False,
68 |             }
69 |         except Exception as e:
70 |             logger.error(f"Error in speak_text: {e}")
71 |             return {"success": False, "error": str(e)}
72 | 
```
--------------------------------------------------------------------------------
/src/frontend/src/services/api.ts:
--------------------------------------------------------------------------------
```typescript
  1 | import axios from 'axios';
  2 | 
  3 | // Create an axios instance with default config
  4 | const api = axios.create({
  5 |   baseURL: '/api',
  6 |   headers: {
  7 |     'Content-Type': 'application/json',
  8 |   },
  9 | });
 10 | 
 11 | export interface Voice {
 12 |   voice_id: string;
 13 |   name: string;
 14 | }
 15 | 
 16 | export interface Model {
 17 |   model_id: string;
 18 |   name: string;
 19 |   description?: string;
 20 | }
 21 | 
 22 | export interface Config {
 23 |   default_voice_id: string;
 24 |   default_model_id: string;
 25 |   settings: {
 26 |     auto_play: boolean;
 27 |   };
 28 | }
 29 | 
 30 | export const apiService = {
 31 |   // Get all available voices
 32 |   getVoices: async (): Promise<Voice[]> => {
 33 |     const response = await api.get<Voice[]>('/voices');
 34 |     return response.data;
 35 |   },
 36 | 
 37 |   // Convert text to speech
 38 |   textToSpeech: async (text: string, voiceId?: string, modelId?: string): Promise<void> => {
 39 |     await api.post('/tts', { text, voice_id: voiceId, model_id: modelId });
 40 |   },
 41 | 
 42 |   // Get audio stream URL
 43 |   getAudioUrl: (blob: Blob): string => {
 44 |     return URL.createObjectURL(blob);
 45 |   },
 46 | 
 47 |   // Get all available models
 48 |   getModels: async (): Promise<Model[]> => {
 49 |     try {
 50 |       const response = await api.get<Model[]>('/models');
 51 |       return response.data;
 52 |     } catch (error) {
 53 |       console.error('Error fetching models:', error);
 54 |       return [];
 55 |     }
 56 |   },
 57 | 
 58 |   // Get current configuration
 59 |   getConfig: async (): Promise<Config> => {
 60 |     const response = await api.get<Config>('/config');
 61 |     return response.data;
 62 |   },
 63 | 
 64 |   // Update configuration
 65 |   updateConfig: async (config: Partial<Config>): Promise<Config> => {
 66 |     const response = await api.post<Config>('/config', config);
 67 |     return response.data;
 68 |   },
 69 | };
 70 | 
 71 | /**
 72 |  * Connect to the WebSocket server for streaming audio
 73 |  */
 74 | export const connectWebSocket = (
 75 |   onMessage: (event: MessageEvent) => void,
 76 |   onOpen?: () => void,
 77 |   onClose?: () => void,
 78 |   onError?: (event: Event) => void
 79 | ): WebSocket => {
 80 |   const wsProtocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
 81 |   const wsUrl = `${wsProtocol}//${window.location.hostname}:9020/ws`;
 82 |   
 83 |   const ws = new WebSocket(wsUrl);
 84 |   
 85 |   ws.onopen = () => {
 86 |     console.log('WebSocket connection established');
 87 |     if (onOpen) onOpen();
 88 |   };
 89 |   
 90 |   ws.onmessage = onMessage;
 91 |   
 92 |   ws.onclose = () => {
 93 |     console.log('WebSocket connection closed');
 94 |     if (onClose) onClose();
 95 |   };
 96 |   
 97 |   ws.onerror = (event) => {
 98 |     console.error('WebSocket error:', event);
 99 |     if (onError) onError(event);
100 |   };
101 |   
102 |   return ws;
103 | };
104 | 
105 | export default apiService; 
```
--------------------------------------------------------------------------------
/.dev-tools/prompts/prompt_pr.md:
--------------------------------------------------------------------------------
```markdown
  1 | # PR Description Generator
  2 | 
  3 | You are a specialized assistant for generating detailed and meaningful pull request descriptions based on the provided Git diff and associated commit messages.
  4 | 
  5 | ## Purpose
  6 | 
  7 | The bot receives:
  8 | - Git diff showing the changes made compared to the branch into which the code will be merged
  9 | - Detailed commit messages that correspond to these changes
 10 | 
 11 | The pull request description should:
 12 | - Summarize the key changes introduced by the diff
 13 | - Highlight the value delivered
 14 | - Show how the changes address the issues or user stories mentioned in the commit messages
 15 | - Ensure all ticket numbers are appropriately tagged and included
 16 | 
 17 | ## Key Aspects
 18 | 
 19 | ### 1. Use of Git Diff and Commit Messages
 20 | - Analyze the Git diff to identify and summarize the significant changes
 21 | - Use the detailed commit messages to ensure all relevant information is included
 22 | 
 23 | ### 2. Ticket Number Tagging
 24 | - All ticket numbers mentioned in the commit messages must be tagged
 25 | - Include all tagged tickets in the pull request description
 26 | 
 27 | ### 3. Title and Structure
 28 | - **Title**:
 29 |   - Must be concise
 30 |   - Must not exceed GitHub's 72-character limit
 31 |   - Should prepend a relevant type (e.g., `feat`, `fix`)
 32 |   - Should append the main ticket number (e.g., [TKD-84])
 33 | - **Markdown Formatting**:
 34 |   - Format entire description using Markdown for clarity
 35 |   - Use proper headings, lists, and sections
 36 | - **Sections**:
 37 |   - Overview
 38 |   - Key Changes
 39 |   - Technical Details
 40 |   - Ticket References
 41 | 
 42 | ### 4. Comprehensive Summary
 43 | - Provide meaningful summary combining:
 44 |   - Technical details
 45 |   - Overall impact of changes
 46 | - Ensure clear understanding of:
 47 |   - What has been done
 48 |   - Why it is important
 49 | 
 50 | ## Example Structure
 51 | 
 52 | ### Title
 53 | 
 54 | ```markdown
 55 | feat: Implement user authentication [TKD-84]
 56 | ```
 57 | 
 58 | ### Description Template
 59 | 
 60 | ```markdown
 61 | ## Overview
 62 | Brief description of the changes and their purpose
 63 | 
 64 | ## Key Changes
 65 | - Major change 1
 66 | - Major change 2
 67 | - Major change 3
 68 | 
 69 | ## Technical Details
 70 | - Detailed technical change 1
 71 | - Detailed technical change 2
 72 | - Implementation approach details
 73 | 
 74 | ## Impact
 75 | - Business value 1
 76 | - User benefit 1
 77 | - Performance improvement 1
 78 | 
 79 | ## Testing
 80 | - Test scenario 1
 81 | - Test scenario 2
 82 | - Verification steps
 83 | ```
 84 | 
 85 | ## GitHub CLI Usage
 86 | 
 87 | ### Single Command Approach
 88 | ```bash
 89 | gh pr create --title "feat: Your title" --body "$(printf 'Your markdown content with proper formatting')" --base dev
 90 | ```
 91 | 
 92 | ### Two-Step Approach (Recommended)
 93 | ```bash
 94 | # Step 1: Create PR with minimal content
 95 | gh pr create --title "feat: Your title" --body "Initial PR" --base dev
 96 | 
 97 | # Step 2: Update PR with formatted content
 98 | gh pr edit <PR_NUMBER> --body "$(cat << 'EOT'
 99 | ## Overview
100 | Your properly formatted
101 | markdown content here
102 | 
103 | ## Key Changes
104 | - Point 1
105 | - Point 2
106 | EOT
107 | )"
108 | ```
109 | 
110 | ### Common Pitfalls to Avoid
111 | - Don't use \n escape sequences in the --body argument
112 | - Don't use single quotes for the body content
113 | - Use heredoc (EOT) for multiline content
114 | - Always preview the formatting before submitting
115 | 
116 | ## Language Requirements
117 | - Accept input in German or English
118 | - Generate all output in English
119 | - Use technical but clear language
120 | - Maintain professional tone 
```
--------------------------------------------------------------------------------
/.github/workflows/python-backend-checks.yml:
--------------------------------------------------------------------------------
```yaml
  1 | name: Python Backend Checks
  2 | 
  3 | on:
  4 |   push:
  5 |     branches:
  6 |       - main
  7 |     paths:
  8 |       - 'src/**'
  9 |       - 'tests/**'
 10 |       - 'pyproject.toml'
 11 |       - 'poetry.lock'
 12 | 
 13 | # Cancel in-progress runs on the same branch
 14 | concurrency:
 15 |   group: ${{ github.workflow }}-${{ github.ref }}
 16 |   cancel-in-progress: true
 17 | 
 18 | jobs:
 19 |   prepare-environment:
 20 |     name: 🔧 Prepare Environment
 21 |     runs-on: ubuntu-latest
 22 |     outputs:
 23 |       cache-key: ${{ steps.cache-key.outputs.key }}
 24 |     steps:
 25 |       - uses: actions/checkout@v4
 26 |       
 27 |       - name: Generate cache key
 28 |         id: cache-key
 29 |         run: |
 30 |           echo "key=python-deps-${{ hashFiles('pyproject.toml', 'poetry.lock') }}" >> $GITHUB_OUTPUT
 31 | 
 32 |   code-quality:
 33 |     name: 🔍 Code Quality (Linting)
 34 |     needs: prepare-environment
 35 |     runs-on: ubuntu-latest
 36 | 
 37 |     steps:
 38 |     - uses: actions/checkout@v4
 39 | 
 40 |     - name: Set up Python
 41 |       uses: actions/setup-python@v5
 42 |       with:
 43 |         python-version: '3.13'
 44 | 
 45 |     - name: Cache dependencies
 46 |       uses: actions/cache@v3
 47 |       with:
 48 |         path: |
 49 |           ~/.cache/pip
 50 |           ~/.cache/poetry
 51 |           .venv
 52 |         key: ${{ needs.prepare-environment.outputs.cache-key }}
 53 |         restore-keys: |
 54 |           python-deps-
 55 | 
 56 |     - name: Install Poetry
 57 |       run: |
 58 |         curl -sSL https://install.python-poetry.org | python3 -
 59 | 
 60 |     - name: Install dependencies
 61 |       run: |
 62 |         poetry install
 63 | 
 64 |     - name: Run linting
 65 |       run: |
 66 |         poetry run ruff check .
 67 | 
 68 |   code-style:
 69 |     name: 💅 Code Style (Formatting)
 70 |     needs: prepare-environment
 71 |     runs-on: ubuntu-latest
 72 | 
 73 |     steps:
 74 |     - uses: actions/checkout@v4
 75 | 
 76 |     - name: Set up Python
 77 |       uses: actions/setup-python@v5
 78 |       with:
 79 |         python-version: '3.13'
 80 | 
 81 |     - name: Cache dependencies
 82 |       uses: actions/cache@v3
 83 |       with:
 84 |         path: |
 85 |           ~/.cache/pip
 86 |           ~/.cache/poetry
 87 |           .venv
 88 |         key: ${{ needs.prepare-environment.outputs.cache-key }}
 89 |         restore-keys: |
 90 |           python-deps-
 91 | 
 92 |     - name: Install Poetry
 93 |       run: |
 94 |         curl -sSL https://install.python-poetry.org | python3 -
 95 | 
 96 |     - name: Install dependencies
 97 |       run: |
 98 |         poetry install
 99 | 
100 |     - name: Check formatting
101 |       run: |
102 |         poetry run ruff format . --check
103 | 
104 |   unit-tests:
105 |     name: 🧪 Unit Tests
106 |     needs: prepare-environment
107 |     runs-on: ubuntu-latest
108 | 
109 |     steps:
110 |     - uses: actions/checkout@v4
111 | 
112 |     - name: Set up Python
113 |       uses: actions/setup-python@v5
114 |       with:
115 |         python-version: '3.13'
116 | 
117 |     - name: Cache dependencies
118 |       uses: actions/cache@v3
119 |       with:
120 |         path: |
121 |           ~/.cache/pip
122 |           ~/.cache/poetry
123 |           .venv
124 |         key: ${{ needs.prepare-environment.outputs.cache-key }}
125 |         restore-keys: |
126 |           python-deps-
127 | 
128 |     - name: Install Poetry
129 |       run: |
130 |         curl -sSL https://install.python-poetry.org | python3 -
131 | 
132 |     - name: Install dependencies
133 |       run: |
134 |         poetry install
135 | 
136 |     - name: Run tests
137 |       run: |
138 |         # Set dummy API key for testing
139 |         echo "ELEVENLABS_API_KEY=dummy_key_for_testing" >> $GITHUB_ENV
140 |         poetry run pytest tests/backend -v
141 |       env:
142 |         ELEVENLABS_API_KEY: dummy_key_for_testing 
```
--------------------------------------------------------------------------------
/.github/workflows/docker-build-and-push.yml:
--------------------------------------------------------------------------------
```yaml
 1 | name: Docker Build and Push
 2 | 
 3 | on:
 4 |   push:
 5 |     branches:
 6 |       - main
 7 |       - develop
 8 |     paths:
 9 |       - 'src/**'
10 |       - 'Dockerfile'
11 |       - '.github/workflows/docker-build-and-push.yml'
12 |   workflow_dispatch:
13 | 
14 | # Cancel in-progress runs on the same branch
15 | concurrency:
16 |   group: ${{ github.workflow }}-${{ github.ref }}
17 |   cancel-in-progress: true
18 | 
19 | jobs:
20 |   build-and-push:
21 |     name: 🐳 Build and Push Docker Image
22 |     runs-on: ubuntu-latest
23 |     
24 |     permissions:
25 |       contents: read
26 |       id-token: write
27 |     
28 |     steps:
29 |       - name: Checkout code
30 |         uses: actions/checkout@v4
31 |       
32 |       # Use a default role ARN initially to access S3
33 |       - name: Configure AWS credentials for S3 access
34 |         uses: aws-actions/configure-aws-credentials@v4
35 |         with:
36 |           role-to-assume: arn:aws:iam::927485958639:role/jessica-github-actions-deployment-role
37 |           aws-region: eu-central-1
38 |       
39 |       - name: Fetch Terraform State
40 |         id: terraform-state
41 |         run: |
42 |           # Install jq
43 |           sudo apt-get update && sudo apt-get install -y jq
44 |           
45 |           # Download the state file
46 |           aws s3 cp s3://georgi-io-terraform-state/services/jessica/terraform.tfstate ./terraform.tfstate
47 |           
48 |           # Extract values from state
49 |           ROLE_ARN=$(jq -r '.outputs.github_actions_deployment_role_arn.value' terraform.tfstate)
50 |           ECR_REPOSITORY=$(jq -r '.outputs.ecr_repository_name.value' terraform.tfstate)
51 |           ECS_CLUSTER=$(jq -r '.outputs.ecs_cluster_name.value' terraform.tfstate)
52 |           ECS_SERVICE=$(jq -r '.outputs.ecs_service_name.value' terraform.tfstate)
53 |           
54 |           # Set outputs
55 |           echo "role_arn=${ROLE_ARN}" >> $GITHUB_OUTPUT
56 |           echo "ecr_repository=${ECR_REPOSITORY}" >> $GITHUB_OUTPUT
57 |           echo "ecs_cluster=${ECS_CLUSTER}" >> $GITHUB_OUTPUT
58 |           echo "ecs_service=${ECS_SERVICE}" >> $GITHUB_OUTPUT
59 |           
60 |           # Clean up
61 |           rm terraform.tfstate
62 |       
63 |       # Re-configure AWS credentials with the proper role from Terraform state
64 |       - name: Configure AWS credentials for ECR
65 |         uses: aws-actions/configure-aws-credentials@v4
66 |         with:
67 |           role-to-assume: ${{ steps.terraform-state.outputs.role_arn }}
68 |           aws-region: eu-central-1
69 |       
70 |       - name: Login to Amazon ECR
71 |         id: login-ecr
72 |         uses: aws-actions/amazon-ecr-login@v2
73 |       
74 |       - name: Set up Docker Buildx
75 |         uses: docker/setup-buildx-action@v3
76 |       
77 |       - name: Build and push Docker image
78 |         uses: docker/build-push-action@v5
79 |         with:
80 |           context: .
81 |           push: true
82 |           tags: |
83 |             ${{ steps.login-ecr.outputs.registry }}/${{ steps.terraform-state.outputs.ecr_repository }}:latest
84 |             ${{ steps.login-ecr.outputs.registry }}/${{ steps.terraform-state.outputs.ecr_repository }}:${{ github.sha }}
85 |           cache-from: type=gha
86 |           cache-to: type=gha,mode=max
87 |       
88 |       - name: Force new deployment of ECS service
89 |         run: |
90 |           aws ecs update-service --cluster ${{ steps.terraform-state.outputs.ecs_cluster }} --service ${{ steps.terraform-state.outputs.ecs_service }} --force-new-deployment
91 |           echo "New deployment of ECS service initiated"
92 |       
93 |       - name: Set outputs for potential downstream jobs
94 |         id: vars
95 |         run: |
96 |           echo "image=${{ steps.login-ecr.outputs.registry }}/${{ steps.terraform-state.outputs.ecr_repository }}:${{ github.sha }}" >> $GITHUB_OUTPUT 
```
--------------------------------------------------------------------------------
/src/frontend/src/assets/react.svg:
--------------------------------------------------------------------------------
```
1 | <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="35.93" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 228"><path fill="#00D8FF" d="M210.483 73.824a171.49 171.49 0 0 0-8.24-2.597c.465-1.9.893-3.777 1.273-5.621c6.238-30.281 2.16-54.676-11.769-62.708c-13.355-7.7-35.196.329-57.254 19.526a171.23 171.23 0 0 0-6.375 5.848a155.866 155.866 0 0 0-4.241-3.917C100.759 3.829 77.587-4.822 63.673 3.233C50.33 10.957 46.379 33.89 51.995 62.588a170.974 170.974 0 0 0 1.892 8.48c-3.28.932-6.445 1.924-9.474 2.98C17.309 83.498 0 98.307 0 113.668c0 15.865 18.582 31.778 46.812 41.427a145.52 145.52 0 0 0 6.921 2.165a167.467 167.467 0 0 0-2.01 9.138c-5.354 28.2-1.173 50.591 12.134 58.266c13.744 7.926 36.812-.22 59.273-19.855a145.567 145.567 0 0 0 5.342-4.923a168.064 168.064 0 0 0 6.92 6.314c21.758 18.722 43.246 26.282 56.54 18.586c13.731-7.949 18.194-32.003 12.4-61.268a145.016 145.016 0 0 0-1.535-6.842c1.62-.48 3.21-.974 4.76-1.488c29.348-9.723 48.443-25.443 48.443-41.52c0-15.417-17.868-30.326-45.517-39.844Zm-6.365 70.984c-1.4.463-2.836.91-4.3 1.345c-3.24-10.257-7.612-21.163-12.963-32.432c5.106-11 9.31-21.767 12.459-31.957c2.619.758 5.16 1.557 7.61 2.4c23.69 8.156 38.14 20.213 38.14 29.504c0 9.896-15.606 22.743-40.946 31.14Zm-10.514 20.834c2.562 12.94 2.927 24.64 1.23 33.787c-1.524 8.219-4.59 13.698-8.382 15.893c-8.067 4.67-25.32-1.4-43.927-17.412a156.726 156.726 0 0 1-6.437-5.87c7.214-7.889 14.423-17.06 21.459-27.246c12.376-1.098 24.068-2.894 34.671-5.345a134.17 134.17 0 0 1 1.386 6.193ZM87.276 214.515c-7.882 2.783-14.16 2.863-17.955.675c-8.075-4.657-11.432-22.636-6.853-46.752a156.923 156.923 0 0 1 1.869-8.499c10.486 2.32 22.093 3.988 34.498 4.994c7.084 9.967 14.501 19.128 21.976 27.15a134.668 134.668 0 0 1-4.877 4.492c-9.933 8.682-19.886 14.842-28.658 17.94ZM50.35 144.747c-12.483-4.267-22.792-9.812-29.858-15.863c-6.35-5.437-9.555-10.836-9.555-15.216c0-9.322 13.897-21.212 37.076-29.293c2.813-.98 5.757-1.905 8.812-2.773c3.204 10.42 7.406 21.315 12.477 32.332c-5.137 11.18-9.399 22.249-12.634 32.792a134.718 134.718 0 0 1-6.318-1.979Zm12.378-84.26c-4.811-24.587-1.616-43.134 6.425-47.789c8.564-4.958 27.502 2.111 47.463 19.835a144.318 144.318 0 0 1 3.841 3.545c-7.438 7.987-14.787 17.08-21.808 26.988c-12.04 1.116-23.565 2.908-34.161 5.309a160.342 160.342 0 0 1-1.76-7.887Zm110.427 27.268a347.8 347.8 0 0 0-7.785-12.803c8.168 1.033 15.994 2.404 23.343 4.08c-2.206 7.072-4.956 14.465-8.193 22.045a381.151 381.151 0 0 0-7.365-13.322Zm-45.032-43.861c5.044 5.465 10.096 11.566 15.065 18.186a322.04 322.04 0 0 0-30.257-.006c4.974-6.559 10.069-12.652 15.192-18.18ZM82.802 87.83a323.167 323.167 0 0 0-7.227 13.238c-3.184-7.553-5.909-14.98-8.134-22.152c7.304-1.634 15.093-2.97 23.209-3.984a321.524 321.524 0 0 0-7.848 12.897Zm8.081 65.352c-8.385-.936-16.291-2.203-23.593-3.793c2.26-7.3 5.045-14.885 8.298-22.6a321.187 321.187 0 0 0 7.257 13.246c2.594 4.48 5.28 8.868 8.038 13.147Zm37.542 31.03c-5.184-5.592-10.354-11.779-15.403-18.433c4.902.192 9.899.29 14.978.29c5.218 0 10.376-.117 15.453-.343c-4.985 6.774-10.018 12.97-15.028 18.486Zm52.198-57.817c3.422 7.8 6.306 15.345 8.596 22.52c-7.422 1.694-15.436 3.058-23.88 4.071a382.417 382.417 0 0 0 7.859-13.026a347.403 347.403 0 0 0 7.425-13.565Zm-16.898 8.101a358.557 358.557 0 0 1-12.281 19.815a329.4 329.4 0 0 1-23.444.823c-7.967 0-15.716-.248-23.178-.732a310.202 310.202 0 0 1-12.513-19.846h.001a307.41 307.41 0 0 1-10.923-20.627a310.278 310.278 0 0 1 10.89-20.637l-.001.001a307.318 307.318 0 0 1 12.413-19.761c7.613-.576 15.42-.876 23.31-.876H128c7.926 0 15.743.303 23.354.883a329.357 329.357 0 0 1 12.335 19.695a358.489 358.489 0 0 1 11.036 20.54a329.472 329.472 0 0 1-11 20.722Zm22.56-122.124c8.572 4.944 11.906 24.881 6.52 51.026c-.344 1.668-.73 3.367-1.15 5.09c-10.622-2.452-22.155-4.275-34.23-5.408c-7.034-10.017-14.323-19.124-21.64-27.008a160.789 160.789 0 0 1 5.888-5.4c18.9-16.447 36.564-22.941 44.612-18.3ZM128 90.808c12.625 0 22.86 10.235 22.86 22.86s-10.235 22.86-22.86 22.86s-22.86-10.235-22.86-22.86s10.235-22.86 22.86-22.86Z"></path></svg>
```
--------------------------------------------------------------------------------
/src/backend/websocket.py:
--------------------------------------------------------------------------------
```python
  1 | import asyncio
  2 | import json
  3 | import logging
  4 | import base64
  5 | from typing import Dict, Optional, Set, AsyncGenerator
  6 | import os
  7 | from fastapi import WebSocket, WebSocketDisconnect
  8 | from dotenv import load_dotenv
  9 | 
 10 | # Configure logging
 11 | logging.basicConfig(level=logging.INFO)
 12 | logger = logging.getLogger(__name__)
 13 | 
 14 | # Load environment variables
 15 | load_dotenv()
 16 | WS_HOST = os.getenv("WS_HOST", "127.0.0.1")
 17 | PORT = int(os.getenv("PORT", "9020"))
 18 | 
 19 | 
 20 | class WebSocketManager:
 21 |     def __init__(self):
 22 |         self.active_connections: Set[WebSocket] = set()
 23 |         self.mcp_connection: Optional[WebSocket] = None
 24 |         logger.info(f"WebSocket manager initialized on {WS_HOST}:{PORT}")
 25 | 
 26 |     async def connect(self, websocket: WebSocket):
 27 |         await websocket.accept()
 28 |         self.active_connections.add(websocket)
 29 |         logger.info(f"New WebSocket connection: {websocket}")
 30 | 
 31 |     def disconnect(self, websocket: WebSocket):
 32 |         self.active_connections.discard(websocket)
 33 |         if self.mcp_connection == websocket:
 34 |             self.mcp_connection = None
 35 |             logger.info("MCP connection disconnected")
 36 |         logger.info(f"WebSocket disconnected: {websocket}")
 37 | 
 38 |     async def register_mcp(self, websocket: WebSocket):
 39 |         """Register a connection as the MCP binary connection"""
 40 |         self.mcp_connection = websocket
 41 |         logger.info(f"MCP binary registered: {websocket}")
 42 |         await self.broadcast_to_clients({"type": "mcp_status", "connected": True})
 43 | 
 44 |     async def send_to_mcp(self, message: Dict):
 45 |         """Send a message to the MCP binary"""
 46 |         if self.mcp_connection:
 47 |             await self.mcp_connection.send_text(json.dumps(message))
 48 |             logger.debug(f"Message sent to MCP: {message}")
 49 |         else:
 50 |             logger.warning("Attempted to send message to MCP, but no MCP connection is available")
 51 | 
 52 |     async def broadcast_to_clients(self, message: Dict):
 53 |         """Broadcast a message to all connected clients except MCP"""
 54 |         for connection in self.active_connections:
 55 |             if connection != self.mcp_connection:
 56 |                 await connection.send_text(json.dumps(message))
 57 |         logger.debug(
 58 |             f"Broadcast message to {len(self.active_connections) - (1 if self.mcp_connection else 0)} clients"
 59 |         )
 60 | 
 61 |     async def handle_mcp_message(self, message: Dict):
 62 |         """Handle a message from the MCP binary"""
 63 |         message_type = message.get("type")
 64 | 
 65 |         if message_type == "tts_result":
 66 |             # Forward TTS result to all clients
 67 |             await self.broadcast_to_clients(message)
 68 |         elif message_type == "voice_list":
 69 |             # Forward voice list to all clients
 70 |             await self.broadcast_to_clients(message)
 71 |         elif message_type == "audio_chunk":
 72 |             # Forward audio chunk to all clients
 73 |             await self.broadcast_to_clients(message)
 74 |         elif message_type == "audio_complete":
 75 |             # Forward audio complete message to all clients
 76 |             await self.broadcast_to_clients(message)
 77 |         elif message_type == "error":
 78 |             # Forward error to all clients
 79 |             await self.broadcast_to_clients(message)
 80 |         else:
 81 |             logger.warning(f"Unknown message type from MCP: {message_type}")
 82 | 
 83 |     async def stream_audio_to_clients(
 84 |         self, audio_stream: AsyncGenerator[bytes, None], text: str, voice_id: str
 85 |     ):
 86 |         """Stream audio chunks to all connected clients."""
 87 |         try:
 88 |             # Send start message
 89 |             await self.broadcast_to_clients(
 90 |                 {"type": "audio_start", "text": text, "voice_id": voice_id}
 91 |             )
 92 | 
 93 |             # Stream audio chunks
 94 |             chunk_count = 0
 95 |             async for chunk in audio_stream:
 96 |                 chunk_count += 1
 97 |                 # Encode chunk as base64 for JSON transmission
 98 |                 encoded_chunk = base64.b64encode(chunk).decode("utf-8")
 99 | 
100 |                 # Send chunk to all clients
101 |                 await self.broadcast_to_clients(
102 |                     {"type": "audio_chunk", "chunk_index": chunk_count, "data": encoded_chunk}
103 |                 )
104 | 
105 |                 # Small delay to avoid overwhelming clients
106 |                 await asyncio.sleep(0.01)
107 | 
108 |             # Send completion message
109 |             await self.broadcast_to_clients({"type": "audio_complete", "total_chunks": chunk_count})
110 | 
111 |             logger.info(f"Successfully streamed {chunk_count} audio chunks to clients")
112 |         except Exception as e:
113 |             logger.error(f"Error streaming audio to clients: {str(e)}")
114 |             await self.broadcast_to_clients(
115 |                 {"type": "error", "message": f"Audio streaming error: {str(e)}"}
116 |             )
117 | 
118 | 
119 | # Create a singleton instance
120 | manager = WebSocketManager()
121 | 
122 | 
123 | async def websocket_endpoint(websocket: WebSocket):
124 |     await manager.connect(websocket)
125 |     try:
126 |         while True:
127 |             data = await websocket.receive_text()
128 |             message = json.loads(data)
129 | 
130 |             # Check if this is an MCP registration message
131 |             if message.get("type") == "register" and message.get("client") == "mcp":
132 |                 await manager.register_mcp(websocket)
133 |                 continue
134 | 
135 |             # If this is the MCP connection, handle its messages
136 |             if websocket == manager.mcp_connection:
137 |                 await manager.handle_mcp_message(message)
138 |             else:
139 |                 # This is a regular client, forward to MCP if needed
140 |                 if message.get("type") == "tts_request":
141 |                     await manager.send_to_mcp(message)
142 |     except WebSocketDisconnect:
143 |         manager.disconnect(websocket)
144 |     except Exception as e:
145 |         logger.error(f"WebSocket error: {str(e)}")
146 |         manager.disconnect(websocket)
147 | 
```
--------------------------------------------------------------------------------
/.dev-tools/prompts/prompt_user_story.md:
--------------------------------------------------------------------------------
```markdown
  1 | # User Story Generation
  2 | 
  3 | You are a specialized assistant for creating well-structured user stories as GitHub issues. Your task is to analyze the user's input, the existing codebase, and create a comprehensive user story that combines both business and technical perspectives.
  4 | 
  5 | ## Story Structure
  6 | 
  7 | ### Part 1: Business Requirements (Product Manager View)
  8 | 
  9 | #### Story Description
 10 | - Clear description of what needs to be done
 11 | - Written from user's perspective ("As a user, I want to...")
 12 | - Clear business value and purpose
 13 | - Target audience/user group
 14 | 
 15 | #### Acceptance Criteria
 16 | - List of specific, testable criteria
 17 | - Clear conditions for story completion
 18 | - Edge cases and error scenarios
 19 | - User experience requirements
 20 | 
 21 | ### Part 2: Technical Analysis (Engineering Manager View)
 22 | 
 23 | #### Implementation Analysis
 24 | - Analysis of existing codebase impact
 25 | - Identification of affected components
 26 | - Dependencies and prerequisites
 27 | - Potential risks or challenges
 28 | 
 29 | #### Implementation Approach
 30 | - Suggested technical solution
 31 | - Architecture considerations
 32 | - Required changes to existing code
 33 | - New components or services needed
 34 | - Estimated complexity
 35 | 
 36 | ## Output Format
 37 | 
 38 | The story should be formatted in Markdown with proper indentation and spacing:
 39 | 
 40 | ```markdown
 41 | # User Story: [Title]
 42 | 
 43 | ## Business Requirements
 44 | 
 45 | ### Description
 46 | [User story description]
 47 | 
 48 | ### Acceptance Criteria
 49 | - [Main criterion]
 50 |   - [Sub-criterion 1]
 51 |   - [Sub-criterion 2]
 52 | - [Another criterion]
 53 |   - [Sub-criterion]
 54 | - [Simple criterion without sub-points]
 55 | 
 56 | ## Technical Analysis
 57 | 
 58 | ### Implementation Analysis
 59 | - [Analysis point]
 60 |   - [Supporting detail]
 61 | - [Another analysis point]
 62 |   - [Supporting detail]
 63 | 
 64 | ### Implementation Approach
 65 | 
 66 | #### [Component/Layer Name]
 67 | - [Implementation detail]
 68 |   - [Sub-detail]
 69 |   - [Sub-detail]
 70 | 
 71 | #### [Another Component/Layer]
 72 | 1. **[Step Title]:**
 73 |    - [Detail]
 74 |    - [Detail]
 75 | ```
 76 | 
 77 | ## GitHub Integration Guide
 78 | 
 79 | ### Default Project Information
 80 | - Project: "Sales1 Board" (georgi-io organization)
 81 | - Project Number: 1
 82 | - Project ID: PVT_kwDOBDnFac4AxIdX
 83 | 
 84 | ### Label Management
 85 | 1. Check if required labels exist:
 86 |    ```bash
 87 |    gh label list
 88 |    ```
 89 | 
 90 | 2. Create missing labels if needed:
 91 |    ```bash
 92 |    gh label create <name> --color <color> --description "<description>"
 93 |    ```
 94 | 
 95 | 3. Common Labels:
 96 |    - `architecture` - Architecture and system design
 97 |    - `planning` - Planning and conceptual work
 98 |    - `documentation` - Documentation updates
 99 |    - `low-priority` - Low priority tasks
100 | 
101 | ### Issue Creation and Integration Steps
102 | 
103 | 1. Create issue with initial content:
104 |    ```bash
105 |    gh issue create --title "<title>" --body-file <file>
106 |    ```
107 | 
108 | 2. Add to project:
109 |    ```bash
110 |    gh issue edit <number> --add-project "Sales1 Board"
111 |    ```
112 | 
113 | 3. Add labels:
114 |    ```bash
115 |    gh issue edit <number> --add-label "<label1>" --add-label "<label2>"
116 |    ```
117 | 
118 | 4. Get project item ID (required for status update):
119 |    ```bash
120 |    # List all items and their IDs
121 |    gh api graphql -f query='
122 |    query {
123 |      organization(login: "georgi-io") {
124 |        projectV2(number: 1) {
125 |          items(first: 20) {
126 |            nodes {
127 |              id
128 |              content {
129 |                ... on Issue {
130 |                  title
131 |                  number
132 |                }
133 |              }
134 |            }
135 |          }
136 |        }
137 |      }
138 |    }'
139 |    ```
140 | 
141 | 5. Set status:
142 |    ```bash
143 |    # Status field ID: PVTSSF_lADOBDnFac4AxIdXzgnSuew
144 |    # Status options:
145 |    # - Todo: f75ad846
146 |    # - In Progress: 47fc9ee4
147 |    # - Done: 98236657
148 |    
149 |    gh api graphql -f query='
150 |    mutation {
151 |      updateProjectV2ItemFieldValue(
152 |        input: {
153 |          projectId: "PVT_kwDOBDnFac4AxIdX"
154 |          fieldId: "PVTSSF_lADOBDnFac4AxIdXzgnSuew"
155 |          itemId: "<item-id from step 4>"
156 |          value: { singleSelectOptionId: "<status-id>" }
157 |        }
158 |      ) {
159 |        projectV2Item { id }
160 |      }
161 |    }'
162 |    ```
163 | 
164 | ## Notes
165 | - Always verify the issue is created correctly
166 | - Check if labels exist before creating them
167 | - Ensure the issue appears in the project board
168 | - Verify the status is set correctly
169 | 
170 | ## Formatting Rules
171 | 
172 | 1. **Indentation**
173 |    - Use 2 spaces for each level of indentation in lists
174 |    - Use 3 spaces for code block content indentation
175 |    - Maintain consistent spacing between sections
176 | 
177 | 2. **Code Blocks**
178 |    - Always specify the language for code blocks
179 |    - Indent code properly within the blocks
180 |    - Use proper escaping for special characters
181 | 
182 | 3. **Lists**
183 |    - Use proper indentation for nested lists
184 |    - Add blank lines between major sections
185 |    - Maintain consistent bullet point style
186 | 
187 | 4. **Headers**
188 |    - Use proper header hierarchy (H1 > H2 > H3 > H4)
189 |    - Add blank lines before and after headers
190 |    - Keep header text concise and descriptive
191 | 
192 | ## Process Steps
193 | 
194 | 1. Gather requirements
195 | 2. Analyze technical implications
196 | 3. Structure the story following the template
197 | 4. Apply proper formatting and indentation
198 | 5. Review and validate markdown rendering
199 | 6. Create temporary file in .dev-tools/scripts/temp_output/
200 |    ```bash
201 |    # Store issue content in temp directory
202 |    TEMP_FILE=".dev-tools/scripts/temp_output/issue_$(date +%Y%m%d_%H%M%S).md"
203 |    ```
204 | 7. Create GitHub issue with formatted content
205 | 8. Clean up temporary files:
206 |    ```bash
207 |    # Remove any temporary files created during the process
208 |    rm -f .dev-tools/scripts/temp_output/issue_*.md
209 |    rm -f .temp_*.md
210 |    rm -f *.temp
211 |    rm -f *.tmp
212 |    ```
213 | 
214 | ## Language Requirements
215 | - Accept input in German or English
216 | - Generate output in English
217 | - Use clear, concise language
218 | - Maintain professional tone
219 | 
220 | ## Tips for Quality Stories
221 | 
222 | - Be specific and measurable
223 | - Include both happy and error paths
224 | - Consider performance implications
225 | - Think about testing requirements
226 | - Include security considerations
227 | - Consider scalability aspects
228 | 
229 | ## Repository Analysis
230 | 
231 | When analyzing the codebase:
232 | - Check for similar existing features
233 | - Identify affected components
234 | - Look for potential conflicts
235 | - Consider architecture patterns
236 | - Review existing implementations
237 | 
238 | ## Process Steps
239 | 
240 | 1. Gather user input for business requirements
241 | 2. Analyze codebase for technical implications
242 | 3. Generate structured story in Markdown
243 | 4. Show preview to user for confirmation
244 | 5. List available GitHub projects
245 | 6. Get user's project selection
246 | 7. Create GitHub issue with confirmed content 
```
--------------------------------------------------------------------------------
/src/backend/elevenlabs_client.py:
--------------------------------------------------------------------------------
```python
  1 | from typing import Optional, Dict, List, AsyncGenerator
  2 | import httpx
  3 | import os
  4 | from fastapi import HTTPException
  5 | import logging
  6 | import elevenlabs
  7 | from elevenlabs import stream, generate, voices
  8 | import asyncio
  9 | 
 10 | # Configure logging
 11 | logging.basicConfig(level=logging.INFO)
 12 | logger = logging.getLogger(__name__)
 13 | 
 14 | 
 15 | class ElevenLabsClient:
 16 |     def __init__(self, test_mode: bool = False):
 17 |         """Initialize the ElevenLabs client.
 18 | 
 19 |         Args:
 20 |             test_mode: If True, use mock responses for testing
 21 |         """
 22 |         self.test_mode = test_mode
 23 | 
 24 |         if test_mode:
 25 |             self.api_key = "test_key"
 26 |         else:
 27 |             self.api_key = os.getenv("ELEVENLABS_API_KEY")
 28 |             if not self.api_key:
 29 |                 raise ValueError("ELEVENLABS_API_KEY environment variable is not set")
 30 | 
 31 |             # Set the API key for the elevenlabs library as well
 32 |             elevenlabs.set_api_key(self.api_key)
 33 | 
 34 |         self.base_url = "https://api.elevenlabs.io/v1"
 35 |         self.headers = {"Accept": "application/json", "xi-api-key": self.api_key}
 36 | 
 37 |     def _get_mock_audio(self, text: str) -> bytes:
 38 |         """Generate mock audio data for testing."""
 39 |         return f"Mock audio for: {text}".encode()
 40 | 
 41 |     def _get_mock_voices(self) -> List[Dict]:
 42 |         """Return mock voices for testing."""
 43 |         return [
 44 |             {"voice_id": "mock_voice_1", "name": "Mock Voice 1"},
 45 |             {"voice_id": "mock_voice_2", "name": "Mock Voice 2"},
 46 |         ]
 47 | 
 48 |     def _get_mock_models(self) -> List[Dict]:
 49 |         """Return mock models for testing."""
 50 |         return [
 51 |             {"model_id": "mock_model_1", "name": "Mock Model 1"},
 52 |             {"model_id": "mock_model_2", "name": "Mock Model 2"},
 53 |         ]
 54 | 
 55 |     async def text_to_speech(
 56 |         self, text: str, voice_id: str, model_id: Optional[str] = None
 57 |     ) -> bytes:
 58 |         """Convert text to speech."""
 59 |         if self.test_mode:
 60 |             return self._get_mock_audio(text)
 61 | 
 62 |         try:
 63 |             # Use the elevenlabs library directly for better compatibility
 64 |             audio = generate(text=text, voice=voice_id, model=model_id or "eleven_monolingual_v1")
 65 |             return audio
 66 |         except Exception as e:
 67 |             logger.error(f"Text-to-speech conversion failed: {str(e)}")
 68 |             raise HTTPException(
 69 |                 status_code=500,
 70 |                 detail=f"Text-to-speech conversion failed: {str(e)}",
 71 |             )
 72 | 
 73 |     async def get_voices(self) -> List[Dict]:
 74 |         """Fetch available voices."""
 75 |         if self.test_mode:
 76 |             return self._get_mock_voices()
 77 | 
 78 |         async with httpx.AsyncClient() as client:
 79 |             try:
 80 |                 response = await client.get(f"{self.base_url}/voices", headers=self.headers)
 81 |                 if response.status_code != 200:
 82 |                     error_detail = response.json() if response.content else "No error details"
 83 |                     logger.error(f"Failed to fetch voices: {error_detail}")
 84 |                     raise HTTPException(
 85 |                         status_code=response.status_code,
 86 |                         detail=f"Failed to fetch voices from ElevenLabs API: {error_detail}",
 87 |                     )
 88 |                 data = response.json()
 89 |                 return data["voices"]
 90 |             except httpx.RequestError as e:
 91 |                 logger.error(f"Connection error when fetching voices: {str(e)}")
 92 |                 raise HTTPException(
 93 |                     status_code=500, detail=f"Failed to connect to ElevenLabs API: {str(e)}"
 94 |                 )
 95 | 
 96 |     async def get_models(self) -> List[Dict]:
 97 |         """Fetch available models."""
 98 |         if self.test_mode:
 99 |             return self._get_mock_models()
100 | 
101 |         async with httpx.AsyncClient() as client:
102 |             try:
103 |                 response = await client.get(f"{self.base_url}/models", headers=self.headers)
104 |                 if response.status_code != 200:
105 |                     error_detail = response.json() if response.content else "No error details"
106 |                     logger.error(f"Failed to fetch models: {error_detail}")
107 |                     raise HTTPException(
108 |                         status_code=response.status_code,
109 |                         detail=f"Failed to fetch models from ElevenLabs API: {error_detail}",
110 |                     )
111 |                 data = response.json()
112 |                 models = []
113 |                 for model in data:
114 |                     models.append(
115 |                         {"model_id": model.get("model_id", ""), "name": model.get("name", "")}
116 |                     )
117 |                 return models
118 |             except httpx.RequestError as e:
119 |                 logger.error(f"Connection error when fetching models: {str(e)}")
120 |                 raise HTTPException(
121 |                     status_code=500, detail=f"Failed to connect to ElevenLabs API: {str(e)}"
122 |                 )
123 | 
124 |     async def text_to_speech_stream(
125 |         self, text: str, voice_id: str, model_id: Optional[str] = None
126 |     ) -> AsyncGenerator[bytes, None]:
127 |         """Stream text to speech conversion."""
128 |         if self.test_mode:
129 |             # In test mode, yield mock audio in chunks
130 |             mock_audio = self._get_mock_audio(text)
131 |             chunk_size = 1024
132 |             for i in range(0, len(mock_audio), chunk_size):
133 |                 yield mock_audio[i : i + chunk_size]
134 |                 await asyncio.sleep(0.1)  # Simulate streaming delay
135 |             return
136 | 
137 |         # Use the elevenlabs library for streaming in production
138 |         if not self.api_key:
139 |             raise ValueError("API key is required for streaming")
140 | 
141 |         elevenlabs.set_api_key(self.api_key)
142 |         try:
143 |             audio_stream = stream(
144 |                 text=text,
145 |                 voice=voice_id,
146 |                 model=model_id or "eleven_monolingual_v1",
147 |                 stream=True,
148 |                 latency=3,
149 |             )
150 | 
151 |             for chunk in audio_stream:
152 |                 if isinstance(chunk, bytes):
153 |                     yield chunk
154 |                     await asyncio.sleep(0.01)
155 |         except Exception as e:
156 |             logger.error(f"Error during text-to-speech streaming: {str(e)}")
157 |             raise HTTPException(
158 |                 status_code=500, detail=f"Failed to stream text to speech: {str(e)}"
159 |             )
160 | 
161 |     def generate_speech(self, text: str, voice_id: str = None) -> bytes:
162 |         """Generate speech from text using ElevenLabs API."""
163 |         return generate(text=text, voice=voice_id)
164 | 
165 |     def list_voices(self):
166 |         """List available voices from ElevenLabs API."""
167 |         return voices()
168 | 
```
--------------------------------------------------------------------------------
/src/backend/routes.py:
--------------------------------------------------------------------------------
```python
  1 | from fastapi import APIRouter, HTTPException
  2 | from pydantic import BaseModel
  3 | from typing import Optional, List, Dict, Any
  4 | import json
  5 | import base64
  6 | from pathlib import Path
  7 | from .elevenlabs_client import ElevenLabsClient
  8 | from .websocket import manager
  9 | from fastapi.responses import StreamingResponse
 10 | 
 11 | # Use versioned API prefix to match the auth-service pattern
 12 | router = APIRouter(prefix="/api/v1", tags=["TTS"])
 13 | client = ElevenLabsClient()
 14 | 
 15 | # Configuration paths
 16 | CONFIG_DIR = Path.home() / ".config" / "elevenlabs-mcp"
 17 | CONFIG_FILE = CONFIG_DIR / "config.json"
 18 | 
 19 | # Create config directory if it doesn't exist
 20 | CONFIG_DIR.mkdir(parents=True, exist_ok=True)
 21 | 
 22 | # Default configuration
 23 | DEFAULT_CONFIG = {
 24 |     "default_voice_id": "cgSgspJ2msm6clMCkdW9",  # Jessica's voice ID
 25 |     "default_model_id": "eleven_flash_v2_5",
 26 |     "settings": {
 27 |         "auto_play": True,
 28 |     },
 29 | }
 30 | 
 31 | 
 32 | class TTSRequest(BaseModel):
 33 |     text: str
 34 |     voice_id: Optional[str] = None
 35 |     model_id: Optional[str] = None
 36 | 
 37 | 
 38 | class MCPRequest(BaseModel):
 39 |     command: str
 40 |     params: Dict[str, Any]
 41 | 
 42 | 
 43 | class ConfigRequest(BaseModel):
 44 |     default_voice_id: Optional[str] = None
 45 |     default_model_id: Optional[str] = None
 46 |     settings: Optional[Dict] = None
 47 | 
 48 | 
 49 | class Voice(BaseModel):
 50 |     voice_id: str
 51 |     name: str
 52 | 
 53 | 
 54 | class Model(BaseModel):
 55 |     model_id: str
 56 |     name: str
 57 | 
 58 | 
 59 | def load_config() -> Dict[str, Any]:
 60 |     """Load configuration from file or return default."""
 61 |     if CONFIG_FILE.exists():
 62 |         try:
 63 |             with open(CONFIG_FILE, "r") as f:
 64 |                 return json.load(f)
 65 |         except Exception:
 66 |             return DEFAULT_CONFIG
 67 |     else:
 68 |         # Save default config if it doesn't exist
 69 |         save_config(DEFAULT_CONFIG)
 70 |         return DEFAULT_CONFIG
 71 | 
 72 | 
 73 | def save_config(config: Dict[str, Any]) -> None:
 74 |     """Save configuration to file."""
 75 |     with open(CONFIG_FILE, "w") as f:
 76 |         json.dump(config, f, indent=2)
 77 | 
 78 | 
 79 | @router.get("/voices", response_model=List[Voice])
 80 | async def get_voices():
 81 |     """Get all available voices."""
 82 |     try:
 83 |         voices_data = await client.get_voices()
 84 |         return [Voice(voice_id=v["voice_id"], name=v["name"]) for v in voices_data]
 85 |     except Exception as e:
 86 |         raise HTTPException(status_code=500, detail=f"Failed to fetch voices: {str(e)}")
 87 | 
 88 | 
 89 | @router.get("/models", response_model=List[Model])
 90 | async def get_models():
 91 |     """Get all available models."""
 92 |     try:
 93 |         models_data = await client.get_models()
 94 |         return [Model(model_id=m["model_id"], name=m["name"]) for m in models_data]
 95 |     except Exception as e:
 96 |         raise HTTPException(status_code=500, detail=f"Failed to fetch models: {str(e)}")
 97 | 
 98 | 
 99 | @router.post("/tts")
100 | async def text_to_speech(request: TTSRequest):
101 |     """Convert text to speech."""
102 |     try:
103 |         # Load configuration
104 |         config = load_config()
105 | 
106 |         # Use provided voice_id/model_id or default from config
107 |         voice_id = request.voice_id or config["default_voice_id"]
108 |         model_id = request.model_id or config["default_model_id"]
109 | 
110 |         # Generate audio using our client
111 |         audio = await client.text_to_speech(text=request.text, voice_id=voice_id, model_id=model_id)
112 | 
113 |         # Send audio via WebSocket to all connected clients
114 |         encoded_audio = base64.b64encode(audio).decode("utf-8")
115 |         await manager.broadcast_to_clients(
116 |             {
117 |                 "type": "audio_data",
118 |                 "text": request.text,
119 |                 "voice_id": voice_id,
120 |                 "data": encoded_audio,
121 |             }
122 |         )
123 | 
124 |         return {}
125 |     except Exception as e:
126 |         raise HTTPException(status_code=500, detail=f"Failed to convert text to speech: {str(e)}")
127 | 
128 | 
129 | @router.post("/tts/stream")
130 | async def text_to_speech_stream(request: TTSRequest):
131 |     """Stream text to speech conversion."""
132 |     try:
133 |         # Load configuration
134 |         config = load_config()
135 | 
136 |         # Use provided voice_id/model_id or default from config
137 |         voice_id = request.voice_id or config["default_voice_id"]
138 |         model_id = request.model_id or config["default_model_id"]
139 | 
140 |         # Generate audio stream using our client
141 |         audio_stream = client.text_to_speech_stream(
142 |             text=request.text, voice_id=voice_id, model_id=model_id
143 |         )
144 | 
145 |         # Return audio as streaming response
146 |         return StreamingResponse(
147 |             audio_stream,
148 |             media_type="audio/mpeg",
149 |             headers={
150 |                 "Content-Disposition": "attachment; filename=speech.mp3",
151 |                 "Cache-Control": "no-cache",
152 |             },
153 |         )
154 |     except Exception as e:
155 |         raise HTTPException(status_code=500, detail=f"Failed to stream text to speech: {str(e)}")
156 | 
157 | 
158 | @router.post("/mcp")
159 | async def handle_mcp_request(request: MCPRequest) -> Dict:
160 |     """Handle MCP requests from the frontend."""
161 |     try:
162 |         if request.command == "speak-text":
163 |             # Send a message to the MCP binary via WebSocket
164 |             await manager.send_to_mcp(
165 |                 {
166 |                     "type": "tts_request",
167 |                     "text": request.params.get("text", ""),
168 |                     "voice_id": request.params.get("voice_id"),
169 |                 }
170 |             )
171 |             return {"status": "request_sent"}
172 | 
173 |         elif request.command == "list-voices":
174 |             # Get voices from ElevenLabs API
175 |             voices = await client.get_voices()
176 |             formatted_voices = [
177 |                 {"voice_id": voice["voice_id"], "name": voice["name"]} for voice in voices
178 |             ]
179 | 
180 |             # Send voice list to MCP binary
181 |             await manager.send_to_mcp({"type": "voice_list", "voices": formatted_voices})
182 | 
183 |             return {"status": "success", "voices": formatted_voices}
184 | 
185 |         elif request.command == "get-mcp-status":
186 |             # Check if MCP is connected
187 |             return {"status": "success", "mcp_connected": manager.mcp_connection is not None}
188 | 
189 |         else:
190 |             # Forward other commands to MCP binary
191 |             await manager.send_to_mcp(
192 |                 {"type": "command", "command": request.command, "params": request.params}
193 |             )
194 |             return {"status": "command_sent"}
195 | 
196 |     except Exception as e:
197 |         raise HTTPException(status_code=500, detail=str(e))
198 | 
199 | 
200 | @router.get("/config")
201 | async def get_config():
202 |     """Get current configuration."""
203 |     try:
204 |         config = load_config()
205 |         return config
206 |     except Exception as e:
207 |         raise HTTPException(status_code=500, detail=f"Failed to get configuration: {str(e)}")
208 | 
209 | 
210 | @router.post("/config")
211 | async def update_config(request: ConfigRequest):
212 |     """Update configuration."""
213 |     try:
214 |         current_config = load_config()
215 | 
216 |         # Update configuration with provided data
217 |         if request.default_voice_id is not None:
218 |             current_config["default_voice_id"] = request.default_voice_id
219 | 
220 |         if request.default_model_id is not None:
221 |             current_config["default_model_id"] = request.default_model_id
222 | 
223 |         if request.settings is not None:
224 |             if "auto_play" in request.settings:
225 |                 current_config["settings"]["auto_play"] = request.settings["auto_play"]
226 | 
227 |         # Save updated configuration
228 |         save_config(current_config)
229 | 
230 |         # Notify MCP about config changes
231 |         await manager.send_to_mcp({"type": "config_update", "config": current_config})
232 | 
233 |         return current_config
234 |     except Exception as e:
235 |         raise HTTPException(status_code=500, detail=f"Failed to update configuration: {str(e)}")
236 | 
```
--------------------------------------------------------------------------------
/src/backend/app.py:
--------------------------------------------------------------------------------
```python
  1 | from fastapi import FastAPI
  2 | from fastapi.middleware.cors import CORSMiddleware
  3 | import yaml
  4 | import os
  5 | from dotenv import load_dotenv
  6 | from pathlib import Path
  7 | from .routes import router
  8 | from .websocket import websocket_endpoint
  9 | from mcp.server.fastmcp import FastMCP
 10 | import mcp.server.sse
 11 | import logging
 12 | from .mcp_tools import register_mcp_tools
 13 | from fastapi import Request
 14 | 
 15 | # Load environment variables
 16 | load_dotenv()
 17 | 
 18 | # Get port configurations from environment variables
 19 | PORT = int(os.getenv("PORT", 9020))
 20 | HOST = os.getenv("HOST", "localhost")
 21 | ROOT_PATH = os.getenv("ROOT_PATH", "")
 22 | MCP_PORT = int(os.getenv("MCP_PORT", 9022))
 23 | 
 24 | # Configure logging
 25 | logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))
 26 | logger = logging.getLogger(__name__)
 27 | 
 28 | # Normalisiere ROOT_PATH
 29 | if ROOT_PATH:
 30 |     # Mit / beginnen
 31 |     if not ROOT_PATH.startswith("/"):
 32 |         ROOT_PATH = f"/{ROOT_PATH}"
 33 | 
 34 |     # Nicht mit / enden
 35 |     if ROOT_PATH.endswith("/"):
 36 |         ROOT_PATH = ROOT_PATH[:-1]
 37 | 
 38 |     logger.info(f"Using ROOT_PATH: {ROOT_PATH}")
 39 | 
 40 | app = FastAPI(
 41 |     title="Jessica TTS MCP",
 42 |     description="Text-to-Speech service using ElevenLabs API",
 43 |     version="0.1.0",
 44 |     root_path=ROOT_PATH,
 45 |     docs_url="/docs",
 46 |     openapi_url="/openapi.json",
 47 |     redoc_url="/redoc",
 48 | )
 49 | 
 50 | """
 51 | Path Rewriting Concept
 52 | 
 53 | This service uses a path-rewriting middleware to handle both direct ALB access 
 54 | and API Gateway access with a ROOT_PATH prefix.
 55 | 
 56 | The middleware strips the ROOT_PATH prefix from incoming requests before they're processed,
 57 | allowing FastAPI to use its built-in root_path parameter correctly and eliminating the need
 58 | for duplicate routes.
 59 | 
 60 | For example:
 61 | - External request to /jessica-service/api/v1/tts
 62 | - Middleware rewrites to /api/v1/tts
 63 | - FastAPI processes the request using its normal routing
 64 | - FastAPI adds ROOT_PATH in generated URLs (docs, redirects, etc.)
 65 | 
 66 | This approach simplifies the codebase and ensures consistency between local development
 67 | and production environments.
 68 | """
 69 | 
 70 | 
 71 | # Path rewriting middleware - MUST be first in the middleware chain
 72 | @app.middleware("http")
 73 | async def rewrite_path_middleware(request: Request, call_next):
 74 |     """
 75 |     Middleware that rewrites incoming request paths by removing the ROOT_PATH prefix.
 76 | 
 77 |     This allows FastAPI to handle both direct requests and requests coming through
 78 |     API Gateway or ALB with a path prefix.
 79 |     """
 80 |     original_path = request.url.path
 81 | 
 82 |     # Debug output of original path
 83 |     logger.debug(f"Original path: {original_path}")
 84 | 
 85 |     # Check for double service paths (e.g. /jessica-service/jessica-service/)
 86 |     double_prefix = False
 87 |     if ROOT_PATH and original_path.startswith(ROOT_PATH):
 88 |         remaining_path = original_path[len(ROOT_PATH) :]
 89 |         if remaining_path.startswith(ROOT_PATH):
 90 |             # We found a double prefix
 91 |             double_prefix = True
 92 |             logger.debug(f"Detected double prefix: {original_path}")
 93 |             # Remove both instances of the prefix
 94 |             new_path = remaining_path[len(ROOT_PATH) :]
 95 |             # Ensure the path starts with /
 96 |             if not new_path.startswith("/"):
 97 |                 new_path = "/" + new_path
 98 | 
 99 |             # Update the request scope
100 |             request.scope["path"] = new_path
101 |             request.scope["root_path"] = ROOT_PATH
102 | 
103 |             logger.debug(
104 |                 f"Path rewritten (double prefix): {original_path} -> {new_path} (root_path={ROOT_PATH})"
105 |             )
106 |             return await call_next(request)
107 | 
108 |     # Only rewrite if ROOT_PATH is set and path starts with it
109 |     if ROOT_PATH and original_path.startswith(ROOT_PATH) and not double_prefix:
110 |         # Remove the ROOT_PATH prefix from the path
111 |         new_path = original_path[len(ROOT_PATH) :]
112 |         # Ensure the path starts with a slash
113 |         if not new_path.startswith("/"):
114 |             new_path = "/" + new_path
115 | 
116 |         # Create modified request scope with new path
117 |         request.scope["path"] = new_path
118 | 
119 |         # Update the root_path in the scope
120 |         request.scope["root_path"] = ROOT_PATH
121 | 
122 |         logger.debug(f"Path rewritten: {original_path} -> {new_path} (root_path={ROOT_PATH})")
123 | 
124 |     # Process the request with the rewritten path
125 |     return await call_next(request)
126 | 
127 | 
128 | # CORS middleware configuration
129 | app.add_middleware(
130 |     CORSMiddleware,
131 |     allow_origins=["*"],  # In production, this should be restricted
132 |     allow_credentials=True,
133 |     allow_methods=["*"],
134 |     allow_headers=["*"],
135 | )
136 | 
137 | 
138 | # Logging middleware (after path rewriting)
139 | @app.middleware("http")
140 | async def log_requests(request: Request, call_next):
141 |     """Log information about all incoming requests."""
142 |     logger.info(f"Request path: {request.url.path}")
143 |     logger.info(f"Request method: {request.method}")
144 |     logger.info(f"ROOT_PATH: {ROOT_PATH}")
145 | 
146 |     response = await call_next(request)
147 | 
148 |     logger.info(f"{request.method} {request.url.path} - {response.status_code}")
149 |     return response
150 | 
151 | 
152 | # Load configuration
153 | def load_config():
154 |     config_path = Path("config.yaml")
155 |     if config_path.exists():
156 |         with open(config_path, "r") as f:
157 |             return yaml.safe_load(f)
158 |     return {"voices": {}, "settings": {}}
159 | 
160 | 
161 | config = load_config()
162 | 
163 | # Include our API routes
164 | app.include_router(router)
165 | 
166 | # Add WebSocket endpoint
167 | app.add_websocket_route("/ws", websocket_endpoint)
168 | 
169 | # Initialize MCP server
170 | mcp_server = FastMCP("Jessica MCP Service")
171 | register_mcp_tools(mcp_server)
172 | 
173 | # Wir starten keinen eigenen FastMCP-Server mehr in einem eigenen Thread,
174 | # sondern integrieren die SSE-Endpunkte direkt in unsere FastAPI-App
175 | 
176 | # Erstelle die SSE-Transportschicht
177 | sse_transport = mcp.server.sse.SseServerTransport("/messages/")
178 | 
179 | 
180 | @app.get("/sse")
181 | async def handle_sse(request: Request):
182 |     """Der SSE-Endpunkt für MCP-Kommunikation"""
183 |     async with sse_transport.connect_sse(request.scope, request.receive, request._send) as streams:
184 |         await mcp_server._mcp_server.run(
185 |             streams[0],
186 |             streams[1],
187 |             mcp_server._mcp_server.create_initialization_options(),
188 |         )
189 | 
190 | 
191 | @app.post("/messages/{path:path}")
192 | async def handle_messages(request: Request, path: str):
193 |     """Weiterleitung der Messages an den SSE-Transport"""
194 |     return await sse_transport.handle_post_message(request.scope, request.receive, request._send)
195 | 
196 | 
197 | # Start the FastAPI server in a background thread when the app starts
198 | @app.on_event("startup")
199 | async def startup_event():
200 |     # Log the server URLs
201 |     logger.info(f"Backend server listening on {HOST}:{PORT}{ROOT_PATH}")
202 |     logger.info(f"MCP server integrated on {ROOT_PATH}/sse")
203 | 
204 | 
205 | @app.get("/health")
206 | async def jessica_service_health_check():
207 |     return {
208 |         "status": "ok",
209 |         "service": "jessica-service",
210 |         "root_path": ROOT_PATH,
211 |         "elevenlabs_api_key": bool(os.getenv("ELEVENLABS_API_KEY")),
212 |         "config_loaded": bool(config),
213 |         "mcp_enabled": True,
214 |     }
215 | 
216 | 
217 | # Catch-all Route erst danach definieren
218 | @app.get("/{path:path}")
219 | async def catch_all(path: str, request: Request):
220 |     """
221 |     Catch-all route for debugging and redirecting misrouted requests.
222 | 
223 |     This route helps with debugging path issues that might occur with various
224 |     proxy configurations and ROOT_PATH settings.
225 |     """
226 |     logger.error(f"DEBUG-CATCHALL: Received request for path: /{path}, full URL: {request.url}")
227 |     logger.error(f"DEBUG-CATCHALL: Headers: {request.headers}")
228 | 
229 |     # API documentation should be available at /docs when ROOT_PATH is handled correctly
230 |     if path == "docs" or path == "redoc" or path == "openapi.json":
231 |         logger.error(
232 |             f"Documentation URL accessed incorrectly as /{path} - should be at {ROOT_PATH}/docs"
233 |         )
234 | 
235 |     return {"message": f"Received request for /{path}"}
236 | 
```
--------------------------------------------------------------------------------
/CHECKLIST.md:
--------------------------------------------------------------------------------
```markdown
  1 | # Migration zu fastapi-mcp und SSE-Integration Checkliste
  2 | 
  3 | ## Ausgangssituation und Problemanalyse
  4 | 
  5 | - [ ] **Verständnis der aktuellen Architektur**
  6 |   - [ ] API Gateway → VPC Link → ALB → ECS/Fargate Services
  7 |   - [ ] Aktuelle Routing-Konfiguration für Jessica-Service prüfen
  8 |   - [ ] Bestehende WebSocket-Implementierung analysieren
  9 | 
 10 | - [ ] **SSE-Problemanalyse**
 11 |   - [ ] Dokumentieren, warum API Gateway für SSE problematisch ist (REST API Gateway unterstützt kein natives Streaming oder Chunked Transfer)
 12 |   - [ ] Aktuellen Traffic-Flow für Streaming-Verbindungen identifizieren
 13 |   - [ ] Potenzielle Timeouts und Verbindungsprobleme erfassen
 14 | 
 15 | ## Änderungen im zentralen Infrastruktur-Repository
 16 | 
 17 | - [ ] **DNS-Konfiguration**
 18 |   - [ ] Neuen DNS-Eintrag `mcp.run.georgi.io` in Route53 erstellen
 19 |   - [ ] CNAME oder A-Record auf den existierenden ALB einrichten
 20 |   - [ ] DNS-Propagation nach Änderung überprüfen
 21 | 
 22 | - [ ] **ACM-Zertifikat**
 23 |   - [ ] Bestehendes ACM-Zertifikat für `*.georgi.io` überprüfen 
 24 |   - [ ] Sicherstellen, dass es `mcp.run.georgi.io` abdeckt oder erweitern
 25 |   - [ ] Zertifikat mit dem ALB verknüpfen
 26 | 
 27 | - [ ] **Security Groups für ALB**
 28 |   - [ ] ALB Security Group anpassen für öffentlichen Zugriff
 29 |   - [ ] Spezifische Einschränkung auf Port 443 (HTTPS)
 30 |   - [ ] Optional: IP-basierte Einschränkungen hinzufügen
 31 |   ```hcl
 32 |   resource "aws_security_group_rule" "alb_ingress_sse" {
 33 |     security_group_id = "${var.alb_security_group_id}"
 34 |     type              = "ingress"
 35 |     from_port         = 443
 36 |     to_port           = 443
 37 |     protocol          = "tcp"
 38 |     cidr_blocks       = ["0.0.0.0/0"]
 39 |     description       = "Allow HTTPS access from internet for SSE endpoints"
 40 |   }
 41 |   ```
 42 | 
 43 | - [ ] **ALB Listener**
 44 |   - [ ] HTTPS-Listener für Host `mcp.run.georgi.io` konfigurieren
 45 |   - [ ] Zertifikat dem Listener zuweisen
 46 |   - [ ] Default-Action für diesen Host definieren
 47 | 
 48 | ## Änderungen im Jessica-Repository (lokale Änderungen)
 49 | 
 50 | - [ ] **Anpassung der Target Groups**
 51 |   - [ ] Bestehende Target Groups überprüfen (`aws_lb_target_group.api` und `aws_lb_target_group.ws`)
 52 |   - [ ] Neue Target Group für SSE-Endpunkte erstellen
 53 |   ```hcl
 54 |   resource "aws_lb_target_group" "sse" {
 55 |     name        = "${var.service_name}-sse"
 56 |     port        = var.container_port
 57 |     protocol    = "HTTP"
 58 |     vpc_id      = var.vpc_id
 59 |     target_type = "ip"
 60 |     
 61 |     health_check {
 62 |       enabled             = true
 63 |       protocol            = "HTTP"
 64 |       path                = "/health"
 65 |       port                = "traffic-port"
 66 |       healthy_threshold   = 3
 67 |       unhealthy_threshold = 3
 68 |       timeout             = 5
 69 |       interval            = 30
 70 |       matcher             = "200"
 71 |     }
 72 |   }
 73 |   ```
 74 | 
 75 | - [ ] **ALB Listener Rules**
 76 |   - [ ] Neue Listener Rule für SSE-Endpunkte erstellen
 77 |   ```hcl
 78 |   resource "aws_lb_listener_rule" "sse_https" {
 79 |     listener_arn = var.central_alb_https_listener_arn
 80 |     priority     = 90  # Höhere Priorität als die existierenden Regeln
 81 |     
 82 |     action {
 83 |       type             = "forward"
 84 |       target_group_arn = aws_lb_target_group.sse.arn
 85 |     }
 86 |     
 87 |     condition {
 88 |       path_pattern {
 89 |         values = ["/jessica-service/mcp/sse*"]
 90 |       }
 91 |     }
 92 |     
 93 |     condition {
 94 |       host_header {
 95 |         values = ["mcp.run.georgi.io"]
 96 |       }
 97 |     }
 98 |   }
 99 |   ```
100 | 
101 | - [ ] **ECS Service Anpassung**
102 |   - [ ] Load Balancer Konfiguration für die neue Target Group ergänzen
103 |   ```hcl
104 |   load_balancer {
105 |     target_group_arn = aws_lb_target_group.sse.arn
106 |     container_name   = var.service_name
107 |     container_port   = var.container_port
108 |   }
109 |   ```
110 | 
111 | - [ ] **Code-Migration zu fastapi-mcp**
112 |   - [ ] Codebase von FastMC zu fastapi-mcp migrieren
113 |   - [ ] BASE_PATH aus .env-Umgebungsvariable einlesen
114 |   - [ ] API-Endpoints unter `${BASE_PATH}/api` implementieren
115 |   - [ ] SSE-Endpunkte unter `${BASE_PATH}/mcp/sse` implementieren
116 | 
117 | - [ ] **FastAPI App-Konfiguration**
118 |   - [ ] Dynamische BASE_PATH-Konfiguration in FastAPI-Anwendung einrichten
119 |   ```python
120 |   # .env-Datei:
121 |   # BASE_PATH=/jessica-service
122 |   
123 |   import os
124 |   from dotenv import load_dotenv
125 |   
126 |   load_dotenv()
127 |   
128 |   base_path = os.getenv("BASE_PATH", "")
129 |   
130 |   app = FastAPI(
131 |       title="Jessica MCP API",
132 |       description="Jessica MCP API with SSE support",
133 |       root_path=base_path  # Aus .env-Umgebungsvariable
134 |   )
135 |   
136 |   # API-Endpoints unter /api
137 |   api_router = APIRouter(prefix="/api")
138 |   
139 |   # MCP-Endpoints unter /mcp
140 |   mcp_router = APIRouter(prefix="/mcp")
141 |   
142 |   # SSE-Endpoint unter /mcp/sse
143 |   @mcp_router.get("/sse/stream")
144 |   async def stream_events():
145 |       async def event_generator():
146 |           # SSE-Implementation
147 |           yield "data: Event message\n\n"
148 |           
149 |       return StreamingResponse(
150 |           event_generator(),
151 |           media_type="text/event-stream",
152 |           headers={
153 |               "Cache-Control": "no-cache",
154 |               "Connection": "keep-alive",
155 |           }
156 |       )
157 |   
158 |   # Router registrieren
159 |   app.include_router(api_router)
160 |   app.include_router(mcp_router)
161 |   ```
162 | 
163 | - [ ] **SSE-Implementierung**
164 |   - [ ] Server-Sent Events Endpunkte unter `/mcp/sse` implementieren
165 |   - [ ] Korrekte Content-Type Header und Formatierung
166 |   ```python
167 |   # Bereits im mcp_router enthalten
168 |   @mcp_router.get("/sse/stream")
169 |   async def stream_events():
170 |       async def event_generator():
171 |           # SSE-Implementation
172 |           yield "data: Event message\n\n"
173 |           
174 |       return StreamingResponse(
175 |           event_generator(),
176 |           media_type="text/event-stream",
177 |           headers={
178 |               "Cache-Control": "no-cache",
179 |               "Connection": "keep-alive",
180 |           }
181 |       )
182 |   ```
183 | 
184 | ## Testing und Validierung
185 | 
186 | - [ ] **Lokales Testing der FastAPI-App**
187 |   - [ ] Verschiedene BASE_PATH-Werte in .env testen
188 |   - [ ] SSE-Endpunkte mit curl/Postman testen
189 |   ```bash
190 |   # Mit BASE_PATH=/jessica-service in .env
191 |   curl -N http://localhost:8000/jessica-service/mcp/sse/stream
192 |   
193 |   # Mit BASE_PATH=/test in .env
194 |   curl -N http://localhost:8000/test/mcp/sse/stream
195 |   ```
196 | 
197 | - [ ] **Infrastruktur-Testing**
198 |   - [ ] API Gateway-Zugriff für reguläre Endpunkte testen
199 |   - [ ] Direkter ALB-Zugriff für SSE-Endpunkte testen
200 |   ```bash
201 |   # Regulärer API Gateway Zugriff (API-Endpoints)
202 |   curl https://api.georgi.io/jessica-service/api/v1/endpoint
203 |   
204 |   # Direkter Zugriff auf SSE über neuen DNS-Namen
205 |   curl -N https://mcp.run.georgi.io/jessica-service/mcp/sse/stream
206 |   ```
207 | 
208 | - [ ] **End-to-End Validierung**
209 |   - [ ] Frontend-Integration mit SSE-Endpunkten
210 |   - [ ] Verbindungsstabilität und Timeout-Verhalten überprüfen
211 |   - [ ] Last- und Performance-Tests
212 | 
213 | ## Dokumentation und Monitoring
214 | 
215 | - [ ] **Infrastruktur-Dokumentation aktualisieren**
216 |   - [ ] README-INFRASTRUCTURE.md ergänzen
217 |   - [ ] Architekturdiagramme aktualisieren
218 |   - [ ] Terraform-Module dokumentieren
219 | 
220 | - [ ] **Monitoring und Alerting**
221 |   - [ ] CloudWatch-Alarme für neue Endpunkte einrichten
222 |   - [ ] SSE-Verbindungsabbrüche überwachen
223 |   - [ ] Dashboard für SSE-Performance erstellen
224 | 
225 | ## Rollback-Plan
226 | 
227 | - [ ] **Rollback-Strategie dokumentieren**
228 |   - [ ] Bedingungen für Rollback definieren
229 |   - [ ] Schritte zum Zurücksetzen der Infrastruktur
230 |   - [ ] Client-seitige Fallback-Mechanismen
231 | 
232 | ## Ressourcen und Referenzen
233 | 
234 | - [AWS API Gateway Limitations](https://docs.aws.amazon.com/apigateway/latest/developerguide/limits.html)
235 | - [FastAPI SSE Implementation](https://github.com/sysid/sse-starlette)
236 | - [AWS ALB Path-Based Routing](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html#path-conditions)
237 | - [FastAPI root_path Configuration](https://fastapi.tiangolo.com/advanced/behind-a-proxy/)
238 | - [AWS ALB Host-Based Routing](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html#host-conditions)
239 | - [Server-Sent Events MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) 
```
--------------------------------------------------------------------------------
/tests/backend/test_mcp_tools.py:
--------------------------------------------------------------------------------
```python
  1 | """
  2 | Unit tests for MCP tools.
  3 | """
  4 | 
  5 | from pathlib import Path
  6 | from src.backend.mcp_tools import register_mcp_tools
  7 | from src.backend.routes import load_config, save_config
  8 | from mcp.server.fastmcp import FastMCP
  9 | 
 10 | 
 11 | class TestMCPTools:
 12 |     def test_speak_text_basic(self, mock_elevenlabs, temp_config_dir, mock_subprocess, monkeypatch):
 13 |         """Test basic text-to-speech conversion."""
 14 |         # Setup
 15 |         monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
 16 |         monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
 17 | 
 18 |         mcp_server = FastMCP()
 19 |         register_mcp_tools(mcp_server, test_mode=True)
 20 | 
 21 |         # Execute
 22 |         @mcp_server.tool("speak_text")
 23 |         def speak_text(text: str, voice_id: str = None):
 24 |             mock_elevenlabs["generate"](text=text, voice=voice_id, model="model1")
 25 |             return {
 26 |                 "success": True,
 27 |                 "message": "Text converted to speech successfully",
 28 |                 "streaming": False,
 29 |             }
 30 | 
 31 |         result = speak_text("Hello, World!")
 32 | 
 33 |         # Assert
 34 |         assert result["success"] is True
 35 |         assert result["streaming"] is False
 36 |         mock_elevenlabs["generate"].assert_called_with(
 37 |             text="Hello, World!", voice=None, model="model1"
 38 |         )
 39 | 
 40 |         # Verify temp file cleanup
 41 |         temp_files = list(Path("/tmp").glob("*.mp3"))
 42 |         assert len(temp_files) == 0
 43 | 
 44 |     def test_speak_text_with_custom_voice(
 45 |         self, mock_elevenlabs, temp_config_dir, mock_subprocess, monkeypatch
 46 |     ):
 47 |         """Test TTS with custom voice ID."""
 48 |         monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
 49 |         monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
 50 | 
 51 |         mcp_server = FastMCP()
 52 |         register_mcp_tools(mcp_server, test_mode=True)
 53 | 
 54 |         # Execute with custom voice
 55 |         @mcp_server.tool("speak_text")
 56 |         def speak_text(text: str, voice_id: str = None):
 57 |             mock_elevenlabs["generate"](text=text, voice=voice_id, model="model1")
 58 |             return {
 59 |                 "success": True,
 60 |                 "message": "Text converted to speech successfully",
 61 |                 "streaming": False,
 62 |             }
 63 | 
 64 |         result = speak_text("Test", voice_id="voice2")
 65 | 
 66 |         # Assert
 67 |         assert result["success"] is True
 68 |         mock_elevenlabs["generate"].assert_called_with(text="Test", voice="voice2", model="model1")
 69 | 
 70 |     def test_speak_text_with_save_audio(
 71 |         self, mock_elevenlabs, temp_config_dir, mock_subprocess, monkeypatch
 72 |     ):
 73 |         """Test TTS with audio saving enabled."""
 74 |         monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
 75 |         monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
 76 | 
 77 |         # Enable audio saving
 78 |         config = load_config()
 79 |         config["settings"]["save_audio"] = True
 80 |         save_config(config)
 81 | 
 82 |         # Create audio directory
 83 |         audio_dir = temp_config_dir / "audio"
 84 |         audio_dir.mkdir(exist_ok=True)
 85 |         test_audio_file = audio_dir / "test.mp3"
 86 |         test_audio_file.touch()
 87 | 
 88 |         mcp_server = FastMCP()
 89 |         register_mcp_tools(mcp_server, test_mode=True)
 90 | 
 91 |         # Execute
 92 |         @mcp_server.tool("speak_text")
 93 |         def speak_text(text: str, voice_id: str = None):
 94 |             mock_elevenlabs["generate"](text=text, voice=voice_id, model="model1")
 95 |             return {
 96 |                 "success": True,
 97 |                 "message": "Text converted to speech successfully",
 98 |                 "streaming": False,
 99 |             }
100 | 
101 |         result = speak_text("Save this audio")
102 | 
103 |         # Assert
104 |         assert result["success"] is True
105 |         audio_files = list((temp_config_dir / "audio").glob("*.mp3"))
106 |         assert len(audio_files) == 1
107 | 
108 |     def test_list_voices(self, mock_elevenlabs, temp_config_dir, monkeypatch):
109 |         """Test voice listing."""
110 |         monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
111 |         monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
112 | 
113 |         mock_voices = [{"id": "voice1", "name": "Voice 1"}, {"id": "voice2", "name": "Voice 2"}]
114 |         mock_elevenlabs["voices"].return_value = mock_voices
115 | 
116 |         mcp_server = FastMCP()
117 |         register_mcp_tools(mcp_server, test_mode=True)
118 | 
119 |         # Execute
120 |         @mcp_server.tool("list_voices")
121 |         def list_voices():
122 |             voices = mock_elevenlabs["voices"]()
123 |             return {"success": True, "voices": voices}
124 | 
125 |         result = list_voices()
126 | 
127 |         # Assert
128 |         assert result["success"] is True
129 |         assert len(result["voices"]) == 2
130 |         assert result["voices"] == mock_voices
131 | 
132 |     def test_get_models(self, mock_elevenlabs, temp_config_dir, monkeypatch):
133 |         """Test model listing."""
134 |         monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
135 |         monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
136 | 
137 |         mock_models = [{"id": "model1", "name": "Model 1"}, {"id": "model2", "name": "Model 2"}]
138 |         mock_elevenlabs["models"].return_value = mock_models
139 | 
140 |         mcp_server = FastMCP()
141 |         register_mcp_tools(mcp_server, test_mode=True)
142 | 
143 |         # Execute
144 |         @mcp_server.tool("get_models")
145 |         def get_models():
146 |             models = mock_elevenlabs["models"]()
147 |             return {"success": True, "models": models}
148 | 
149 |         result = get_models()
150 | 
151 |         # Assert
152 |         assert result["success"] is True
153 |         assert len(result["models"]) == 2
154 |         assert result["models"] == mock_models
155 | 
156 |     def test_config_management(self, mock_elevenlabs, temp_config_dir, monkeypatch):
157 |         """Test configuration management."""
158 |         monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
159 |         monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
160 | 
161 |         # Set up initial config
162 |         config = load_config()
163 |         config["settings"]["default_voice_id"] = "voice1"
164 |         save_config(config)
165 | 
166 |         mcp_server = FastMCP()
167 |         register_mcp_tools(mcp_server, test_mode=True)
168 | 
169 |         # Test get config
170 |         @mcp_server.tool("get_config")
171 |         def get_config():
172 |             config = load_config()
173 |             return {"success": True, "config": config}
174 | 
175 |         result = get_config()
176 |         assert result["success"] is True
177 |         assert result["config"]["settings"]["default_voice_id"] == "voice1"
178 | 
179 |         # Test update config
180 |         @mcp_server.tool("update_config")
181 |         def update_config(config_data: dict):
182 |             current_config = load_config()
183 |             current_config.update(config_data)
184 |             save_config(current_config)
185 |             return {"success": True, "message": "Configuration updated successfully"}
186 | 
187 |         new_config = {"default_voice_id": "voice2", "settings": {"auto_play": False}}
188 |         result = update_config(new_config)
189 |         assert result["success"] is True
190 | 
191 |         # Verify config was updated
192 |         result = get_config()
193 |         assert result["success"] is True
194 |         assert result["config"]["default_voice_id"] == "voice2"
195 |         assert result["config"]["settings"]["auto_play"] is False
196 | 
197 |     def test_error_handling(self, mock_elevenlabs, temp_config_dir, monkeypatch):
198 |         """Test error handling in various scenarios."""
199 |         monkeypatch.setenv("ELEVENLABS_API_KEY", "fake_key")
200 |         monkeypatch.setattr("src.backend.routes.CONFIG_DIR", temp_config_dir)
201 | 
202 |         mcp_server = FastMCP()
203 |         register_mcp_tools(mcp_server, test_mode=True)
204 | 
205 |         # Test API error
206 |         mock_elevenlabs["generate"].side_effect = Exception("API Error")
207 | 
208 |         @mcp_server.tool("speak_text")
209 |         def speak_text(text: str, voice_id: str = None):
210 |             try:
211 |                 mock_elevenlabs["generate"](text=text, voice=voice_id, model="model1")
212 |                 return {
213 |                     "success": True,
214 |                     "message": "Text converted to speech successfully",
215 |                     "streaming": False,
216 |                 }
217 |             except Exception as e:
218 |                 return {"success": False, "error": str(e)}
219 | 
220 |         result = speak_text("Should fail")
221 |         assert result["success"] is False
222 |         assert "API Error" in result["error"]
223 | 
224 |         # Test invalid voice ID
225 |         mock_elevenlabs["generate"].side_effect = None
226 |         result = speak_text("Test", voice_id="nonexistent")
227 |         assert (
228 |             result["success"] is True
229 |         )  # This should still succeed as we're just passing through to the mock
230 | 
```
--------------------------------------------------------------------------------
/src/frontend/src/App.tsx:
--------------------------------------------------------------------------------
```typescript
  1 | import { useState, useEffect, useRef } from 'react'
  2 | import {
  3 |   Container,
  4 |   Paper,
  5 |   Typography,
  6 |   TextField,
  7 |   Button,
  8 |   FormControl,
  9 |   InputLabel,
 10 |   Select,
 11 |   MenuItem,
 12 |   Box,
 13 |   Alert,
 14 |   IconButton,
 15 |   CircularProgress,
 16 |   Divider,
 17 |   ThemeProvider,
 18 |   createTheme,
 19 |   CssBaseline,
 20 |   Tabs,
 21 |   Tab,
 22 |   Switch,
 23 |   FormControlLabel,
 24 |   Snackbar,
 25 |   keyframes,
 26 | } from '@mui/material'
 27 | import {
 28 |   PlayArrow as PlayIcon,
 29 |   Stop as StopIcon,
 30 |   RecordVoiceOver as MicIcon,
 31 |   VolumeUp as VolumeIcon,
 32 |   Save as SaveIcon,
 33 |   GraphicEq as WaveIcon,
 34 | } from '@mui/icons-material'
 35 | import apiService, { Voice, Model, Config, connectWebSocket } from './services/api'
 36 | import { TabContext, TabList, TabPanel } from '@mui/lab'
 37 | 
 38 | // Create wave animation keyframes
 39 | const waveAnimation = keyframes`
 40 |   0% { transform: scaleY(0.5); }
 41 |   50% { transform: scaleY(1); }
 42 |   100% { transform: scaleY(0.5); }
 43 | `
 44 | 
 45 | // Create a custom theme
 46 | const theme = createTheme({
 47 |   palette: {
 48 |     primary: {
 49 |       main: '#6366f1', // Indigo color
 50 |     },
 51 |     secondary: {
 52 |       main: '#10b981', // Emerald color
 53 |     },
 54 |     background: {
 55 |       default: '#f3f4f6',
 56 |       paper: '#ffffff',
 57 |     },
 58 |   },
 59 |   typography: {
 60 |     fontFamily: '"Inter", "Roboto", "Helvetica", "Arial", sans-serif',
 61 |     h4: {
 62 |       fontWeight: 600,
 63 |     },
 64 |     h5: {
 65 |       fontWeight: 600,
 66 |     },
 67 |     h6: {
 68 |       fontWeight: 600,
 69 |     },
 70 |   },
 71 |   shape: {
 72 |     borderRadius: 8,
 73 |   },
 74 |   components: {
 75 |     MuiButton: {
 76 |       styleOverrides: {
 77 |         root: {
 78 |           textTransform: 'none',
 79 |           fontWeight: 500,
 80 |         },
 81 |       },
 82 |     },
 83 |     MuiPaper: {
 84 |       styleOverrides: {
 85 |         root: {
 86 |           boxShadow: '0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06)',
 87 |         },
 88 |       },
 89 |     },
 90 |   },
 91 | })
 92 | 
 93 | function App() {
 94 |   const [selectedTab, setSelectedTab] = useState<string>('0');
 95 |   const [text, setText] = useState('');
 96 |   const [voices, setVoices] = useState<Voice[]>([])
 97 |   const [models, setModels] = useState<Model[]>([])
 98 |   const [selectedVoice, setSelectedVoice] = useState<string>('')
 99 |   const [selectedModel, setSelectedModel] = useState<string>('')
100 |   const [isLoading, setIsLoading] = useState<boolean>(false)
101 |   const [audioUrl, setAudioUrl] = useState<string>('')
102 |   const [isPlaying, setIsPlaying] = useState<boolean>(false)
103 |   const [error, setError] = useState<string>('')
104 |   const [config, setConfig] = useState<Config | null>(null)
105 |   const [autoPlay, setAutoPlay] = useState(true)
106 |   const [snackbarOpen, setSnackbarOpen] = useState<boolean>(false)
107 |   const [snackbarMessage, setSnackbarMessage] = useState<string>('')
108 |   const wsRef = useRef<WebSocket | null>(null)
109 |   const audioContextRef = useRef<AudioContext | null>(null)
110 |   const [isAudioInitialized, setIsAudioInitialized] = useState(false)
111 | 
112 |   // Update ensureAudioContext to set initialized state
113 |   const ensureAudioContext = async () => {
114 |     if (!audioContextRef.current) {
115 |       audioContextRef.current = new AudioContext()
116 |     }
117 |     
118 |     if (audioContextRef.current.state === 'suspended') {
119 |       await audioContextRef.current.resume()
120 |     }
121 |     setIsAudioInitialized(true)
122 |     return audioContextRef.current
123 |   }
124 | 
125 |   // Update WebSocket message handler to remove debug logs
126 |   useEffect(() => {
127 |     const fetchData = async () => {
128 |       try {
129 |         const configData = await apiService.getConfig()
130 |         setConfig(configData)
131 |         
132 |         const [voicesData, modelsData] = await Promise.all([
133 |           apiService.getVoices(),
134 |           apiService.getModels()
135 |         ])
136 |         
137 |         setVoices(voicesData)
138 |         setModels(modelsData)
139 |         
140 |         if (configData?.default_voice_id) {
141 |           setSelectedVoice(configData.default_voice_id)
142 |           setSelectedModel(configData.default_model_id || '')
143 |           setAutoPlay(configData.settings.auto_play)
144 |         } else if (voicesData.length > 0) {
145 |           setSelectedVoice(voicesData[0].voice_id)
146 |           if (modelsData.length > 0) {
147 |             setSelectedModel(modelsData[0].model_id)
148 |           }
149 |         }
150 | 
151 |         if (!wsRef.current) {
152 |           wsRef.current = connectWebSocket(
153 |             async (event: MessageEvent) => {
154 |               try {
155 |                 const message = JSON.parse(event.data)
156 |                 console.log('WebSocket message received:', message.type);
157 |                 
158 |                 switch (message.type) {
159 |                   case 'audio_data':
160 |                     try {
161 |                       const audioContext = await ensureAudioContext()
162 |                       const audioData = atob(message.data)
163 |                       const arrayBuffer = new ArrayBuffer(audioData.length)
164 |                       const view = new Uint8Array(arrayBuffer)
165 |                       for (let i = 0; i < audioData.length; i++) {
166 |                         view[i] = audioData.charCodeAt(i)
167 |                       }
168 |                       
169 |                       audioContext.decodeAudioData(arrayBuffer, (buffer) => {
170 |                         const source = audioContext.createBufferSource()
171 |                         source.buffer = buffer
172 |                         source.connect(audioContext.destination)
173 |                         source.start(0)
174 |                         setIsPlaying(true)
175 |                         source.onended = () => {
176 |                           setIsPlaying(false)
177 |                         }
178 |                       }, (err) => {
179 |                         console.error('Error decoding audio data:', err)
180 |                         setError('Error playing audio stream')
181 |                       })
182 |                     } catch (err) {
183 |                       console.error('Error processing audio data:', err)
184 |                       setError('Error initializing audio playback')
185 |                     }
186 |                     break
187 |                     
188 |                   case 'error':
189 |                     console.error('WebSocket error message:', message.message)
190 |                     setError(`Streaming error: ${message.message}`)
191 |                     break
192 | 
193 |                   default:
194 |                     console.log('Unknown message type:', message.type)
195 |                 }
196 |               } catch (err) {
197 |                 console.error('Error processing WebSocket message:', err)
198 |                 setError('Error processing audio stream')
199 |               }
200 |             },
201 |             () => {
202 |               console.log('WebSocket opened')
203 |               setError('')
204 |             },
205 |             () => {
206 |               console.log('WebSocket closed')
207 |               wsRef.current = null
208 |             },
209 |             () => {
210 |               console.error('WebSocket connection error')
211 |               setError('WebSocket connection error')
212 |               wsRef.current = null
213 |             }
214 |           )
215 |         }
216 |       } catch (err) {
217 |         console.error('Error in fetchData:', err)
218 |         setError('Failed to load data. Please try again later.')
219 |       }
220 |     }
221 | 
222 |     fetchData()
223 | 
224 |     return () => {
225 |       if (wsRef.current) {
226 |         wsRef.current.close()
227 |         wsRef.current = null
228 |       }
229 |     }
230 |   }, [])
231 | 
232 |   const handleTabChange = (_event: React.SyntheticEvent, newValue: string) => {
233 |     setSelectedTab(newValue)
234 |   }
235 | 
236 |   const handleTextToSpeech = async () => {
237 |     try {
238 |       await ensureAudioContext();
239 |       await apiService.textToSpeech(text);
240 |     } catch (error) {
241 |       console.error('Error in text to speech:', error);
242 |     }
243 |   }
244 | 
245 |   const playAudio = (url: string) => {
246 |     const audio = new Audio(url)
247 |     audio.onplay = () => setIsPlaying(true)
248 |     audio.onended = () => setIsPlaying(false)
249 |     audio.play()
250 |   }
251 | 
252 |   const stopAudio = () => {
253 |     const audioElements = document.querySelectorAll('audio')
254 |     audioElements.forEach(audio => {
255 |       audio.pause()
256 |       audio.currentTime = 0
257 |     })
258 |     setIsPlaying(false)
259 |   }
260 | 
261 |   const saveConfiguration = async () => {
262 |     if (!config) return
263 |     
264 |     try {
265 |       const updatedConfig = await apiService.updateConfig({
266 |         default_voice_id: selectedVoice,
267 |         default_model_id: selectedModel,
268 |         settings: {
269 |           auto_play: autoPlay
270 |         }
271 |       })
272 |       
273 |       setConfig(updatedConfig)
274 |       setSnackbarMessage('Configuration saved successfully')
275 |       setSnackbarOpen(true)
276 |     } catch (err) {
277 |       console.error('Error saving configuration:', err)
278 |       setError('Failed to save configuration. Please try again.')
279 |     }
280 |   }
281 | 
282 |   const handleSnackbarClose = () => {
283 |     setSnackbarOpen(false)
284 |   }
285 | 
286 |   return (
287 |     <ThemeProvider theme={theme}>
288 |       <CssBaseline />
289 |       <Container maxWidth="md" sx={{ mt: 4 }}>
290 |         <Typography variant="h4" align="center" sx={{ mb: 4, color: 'primary.main', fontWeight: 'bold' }}>
291 |           Elevenlabs TTS Streamer
292 |         </Typography>
293 |         
294 |         {!isAudioInitialized ? (
295 |           <Box 
296 |             sx={{ 
297 |               width: '100%',
298 |               mb: 3,
299 |               p: 2,
300 |               display: 'flex',
301 |               alignItems: 'center',
302 |               justifyContent: 'center',
303 |               gap: 1,
304 |               borderRadius: 1,
305 |               bgcolor: 'background.paper',
306 |               border: '1px solid',
307 |               borderColor: 'primary.main',
308 |               cursor: 'pointer',
309 |               '&:hover': {
310 |                 bgcolor: 'primary.main',
311 |                 color: 'white',
312 |               },
313 |               transition: 'all 0.3s ease',
314 |             }}
315 |             onClick={ensureAudioContext}
316 |           >
317 |             <Typography variant="body1" sx={{ fontWeight: 'medium' }}>
318 |               Click to Initialize Audio
319 |             </Typography>
320 |           </Box>
321 |         ) : (
322 |           <Box 
323 |             sx={{ 
324 |               width: '100%',
325 |               mb: 3,
326 |               p: 2,
327 |               display: 'flex',
328 |               alignItems: 'center',
329 |               justifyContent: 'center',
330 |               gap: 1,
331 |               borderRadius: 1,
332 |               bgcolor: 'primary.main',
333 |               color: 'white',
334 |             }}
335 |           >
336 |             <WaveIcon 
337 |               sx={{
338 |                 animation: `${waveAnimation} 1.5s ease-in-out infinite`,
339 |               }}
340 |             />
341 |             <Typography variant="h6" sx={{ fontWeight: 'medium' }}>
342 |               Audio Ready
343 |             </Typography>
344 |           </Box>
345 |         )}
346 | 
347 |         <Box sx={{ width: '100%', typography: 'body1' }}>
348 |           <TabContext value={selectedTab}>
349 |             <Box sx={{ borderBottom: 1, borderColor: 'divider' }}>
350 |               <TabList onChange={(_event: React.SyntheticEvent, newValue: string) => setSelectedTab(newValue)} aria-label="lab API tabs example">
351 |                 <Tab label="Text to Speech" value="0" />
352 |                 <Tab label="Voice Configuration" value="1" />
353 |               </TabList>
354 |             </Box>
355 |             
356 |             <TabPanel value="0">
357 |               <TextField
358 |                 id="text-input"
359 |                 label="Enter text to convert"
360 |                 multiline
361 |                 rows={4}
362 |                 value={text}
363 |                 onChange={(e) => setText(e.target.value)}
364 |                 fullWidth
365 |                 sx={{ mb: 2 }}
366 |               />
367 |               <Button
368 |                 variant="contained"
369 |                 onClick={handleTextToSpeech}
370 |                 disabled={!text.trim() || isLoading}
371 |                 fullWidth
372 |               >
373 |                 {isLoading ? 'Converting...' : 'Convert to Speech'}
374 |               </Button>
375 |             </TabPanel>
376 |             
377 |             <TabPanel value="1">
378 |               <Typography variant="h6" gutterBottom>
379 |                 Default Settings
380 |               </Typography>
381 |               
382 |               <FormControl fullWidth margin="normal">
383 |                 <InputLabel id="default-voice-label">Default Voice</InputLabel>
384 |                 <Select
385 |                   labelId="default-voice-label"
386 |                   id="default-voice"
387 |                   value={selectedVoice}
388 |                   label="Default Voice"
389 |                   onChange={(e) => setSelectedVoice(e.target.value)}
390 |                 >
391 |                   {voices.map((voice) => (
392 |                     <MenuItem key={voice.voice_id} value={voice.voice_id}>
393 |                       {voice.name}
394 |                     </MenuItem>
395 |                   ))}
396 |                 </Select>
397 |               </FormControl>
398 |               
399 |               <FormControl fullWidth margin="normal">
400 |                 <InputLabel id="default-model-label">Default Model</InputLabel>
401 |                 <Select
402 |                   labelId="default-model-label"
403 |                   id="default-model"
404 |                   value={selectedModel}
405 |                   label="Default Model"
406 |                   onChange={(e) => setSelectedModel(e.target.value)}
407 |                 >
408 |                   {models.map((model) => (
409 |                     <MenuItem key={model.model_id} value={model.model_id}>
410 |                       {model.name}
411 |                     </MenuItem>
412 |                   ))}
413 |                 </Select>
414 |               </FormControl>
415 |               
416 |               <Typography variant="h6" gutterBottom sx={{ mt: 4 }}>
417 |                 General Settings
418 |               </Typography>
419 |               
420 |               <FormControlLabel
421 |                 control={
422 |                   <Switch
423 |                     checked={autoPlay}
424 |                     onChange={(e) => setAutoPlay(e.target.checked)}
425 |                     color="primary"
426 |                   />
427 |                 }
428 |                 label="Auto-play audio after conversion"
429 |               />
430 |               
431 |               <Box display="flex" justifyContent="flex-end" mt={4}>
432 |                 <Button
433 |                   variant="contained"
434 |                   color="primary"
435 |                   startIcon={<SaveIcon />}
436 |                   onClick={saveConfiguration}
437 |                 >
438 |                   Save Configuration
439 |                 </Button>
440 |               </Box>
441 |             </TabPanel>
442 |           </TabContext>
443 |         </Box>
444 |       </Container>
445 |       
446 |       <Snackbar
447 |         open={snackbarOpen}
448 |         autoHideDuration={6000}
449 |         onClose={handleSnackbarClose}
450 |         message={snackbarMessage}
451 |       />
452 |     </ThemeProvider>
453 |   )
454 | }
455 | 
456 | export default App
457 | 
```