#
tokens: 43396/50000 6/67 files (page 3/3)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 3 of 3. Use http://codebase.md/jhacksman/openscad-mcp-server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── implementation_plan.md
├── old
│   ├── download_sam2_checkpoint.py
│   ├── src
│   │   ├── ai
│   │   │   └── sam_segmentation.py
│   │   ├── models
│   │   │   └── threestudio_generator.py
│   │   └── workflow
│   │       └── image_to_model_pipeline.py
│   └── test_sam2_segmentation.py
├── README.md
├── requirements.txt
├── rtfmd
│   ├── decisions
│   │   ├── ai-driven-code-generation.md
│   │   └── export-formats.md
│   ├── files
│   │   └── src
│   │       ├── ai
│   │       │   └── ai_service.py.md
│   │       ├── main.py.md
│   │       ├── models
│   │       │   └── code_generator.py.md
│   │       └── nlp
│   │           └── parameter_extractor.py.md
│   ├── knowledge
│   │   ├── ai
│   │   │   └── natural-language-processing.md
│   │   ├── nlp
│   │   │   └── parameter-extraction.md
│   │   └── openscad
│   │       ├── export-formats.md
│   │       ├── openscad-basics.md
│   │       └── primitive-testing.md
│   └── README.md
├── scad
│   └── simple_cube.scad
├── src
│   ├── __init__.py
│   ├── __pycache__
│   │   └── __init__.cpython-312.pyc
│   ├── ai
│   │   ├── ai_service.py
│   │   ├── gemini_api.py
│   │   └── venice_api.py
│   ├── config.py
│   ├── main_remote.py
│   ├── main.py
│   ├── main.py.new
│   ├── models
│   │   ├── __init__.py
│   │   ├── __pycache__
│   │   │   ├── __init__.cpython-312.pyc
│   │   │   └── code_generator.cpython-312.pyc
│   │   ├── code_generator.py
│   │   ├── cuda_mvs.py
│   │   └── scad_templates
│   │       └── basic_shapes.scad
│   ├── nlp
│   │   ├── __init__.py
│   │   ├── __pycache__
│   │   │   ├── __init__.cpython-312.pyc
│   │   │   └── parameter_extractor.cpython-312.pyc
│   │   └── parameter_extractor.py
│   ├── openscad_wrapper
│   │   ├── __init__.py
│   │   ├── __pycache__
│   │   │   ├── __init__.cpython-312.pyc
│   │   │   └── wrapper.cpython-312.pyc
│   │   └── wrapper.py
│   ├── printer_discovery
│   │   ├── __init__.py
│   │   └── printer_discovery.py
│   ├── remote
│   │   ├── connection_manager.py
│   │   ├── cuda_mvs_client.py
│   │   ├── cuda_mvs_server.py
│   │   └── error_handling.py
│   ├── testing
│   │   ├── __init__.py
│   │   ├── __pycache__
│   │   │   ├── __init__.cpython-312.pyc
│   │   │   ├── primitive_tester.cpython-312.pyc
│   │   │   └── test_primitives.cpython-312.pyc
│   │   ├── primitive_tester.py
│   │   └── test_primitives.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── __pycache__
│   │   │   ├── __init__.cpython-312.pyc
│   │   │   ├── stl_exporter.cpython-312.pyc
│   │   │   └── stl_validator.cpython-312.pyc
│   │   ├── cad_exporter.py
│   │   ├── format_validator.py
│   │   ├── stl_exporter.py
│   │   ├── stl_repair.py
│   │   └── stl_validator.py
│   ├── visualization
│   │   ├── __init__.py
│   │   ├── __pycache__
│   │   │   ├── __init__.cpython-312.pyc
│   │   │   └── renderer.cpython-312.pyc
│   │   ├── headless_renderer.py
│   │   ├── renderer.py
│   │   └── web_interface.py
│   └── workflow
│       ├── image_approval.py
│       └── multi_view_to_model_pipeline.py
├── test_complete_workflow.py
├── test_cuda_mvs.py
├── test_gemini_api.py
├── test_image_approval_workflow.py
├── test_image_approval.py
├── test_image_to_model_pipeline.py
├── test_model_selection.py
├── test_multi_view_pipeline.py
├── test_primitives.sh
├── test_rabbit_direct.py
├── test_remote_cuda_mvs.py
└── test_venice_example.py
```

# Files

--------------------------------------------------------------------------------
/src/printer_discovery/printer_discovery.py:
--------------------------------------------------------------------------------

```python
  1 | import os
  2 | import logging
  3 | import socket
  4 | import json
  5 | import time
  6 | import threading
  7 | from typing import Dict, List, Any, Optional, Callable
  8 | 
  9 | logger = logging.getLogger(__name__)
 10 | 
 11 | class PrinterDiscovery:
 12 |     """
 13 |     Discovers 3D printers on the network and provides interfaces for direct printing.
 14 |     """
 15 |     
 16 |     def __init__(self):
 17 |         """Initialize the printer discovery service."""
 18 |         self.printers = {}  # Dictionary of discovered printers
 19 |         self.discovery_thread = None
 20 |         self.discovery_stop_event = threading.Event()
 21 |         self.discovery_callback = None
 22 |     
 23 |     def start_discovery(self, callback: Optional[Callable[[Dict[str, Any]], None]] = None) -> None:
 24 |         """
 25 |         Start discovering 3D printers on the network.
 26 |         
 27 |         Args:
 28 |             callback: Optional callback function to call when a printer is discovered
 29 |         """
 30 |         if self.discovery_thread and self.discovery_thread.is_alive():
 31 |             logger.warning("Printer discovery already running")
 32 |             return
 33 |         
 34 |         self.discovery_callback = callback
 35 |         self.discovery_stop_event.clear()
 36 |         self.discovery_thread = threading.Thread(target=self._discover_printers)
 37 |         self.discovery_thread.daemon = True
 38 |         self.discovery_thread.start()
 39 |         
 40 |         logger.info("Started printer discovery")
 41 |     
 42 |     def stop_discovery(self) -> None:
 43 |         """Stop discovering 3D printers."""
 44 |         if self.discovery_thread and self.discovery_thread.is_alive():
 45 |             self.discovery_stop_event.set()
 46 |             self.discovery_thread.join(timeout=2.0)
 47 |             logger.info("Stopped printer discovery")
 48 |         else:
 49 |             logger.warning("Printer discovery not running")
 50 |     
 51 |     def get_printers(self) -> Dict[str, Any]:
 52 |         """
 53 |         Get the list of discovered printers.
 54 |         
 55 |         Returns:
 56 |             Dictionary of printer information
 57 |         """
 58 |         return self.printers
 59 |     
 60 |     def _discover_printers(self) -> None:
 61 |         """Discover 3D printers on the network using various protocols."""
 62 |         # This is a simplified implementation that simulates printer discovery
 63 |         # In a real implementation, you would use protocols like mDNS, SNMP, or OctoPrint API
 64 |         
 65 |         # Simulate discovering printers
 66 |         while not self.discovery_stop_event.is_set():
 67 |             try:
 68 |                 # Simulate network discovery
 69 |                 self._discover_octoprint_printers()
 70 |                 self._discover_prusa_printers()
 71 |                 self._discover_ultimaker_printers()
 72 |                 
 73 |                 # Wait before next discovery cycle
 74 |                 time.sleep(10)
 75 |             except Exception as e:
 76 |                 logger.error(f"Error in printer discovery: {str(e)}")
 77 |                 time.sleep(5)
 78 |     
 79 |     def _discover_octoprint_printers(self) -> None:
 80 |         """Discover OctoPrint servers on the network."""
 81 |         # Simulate discovering OctoPrint servers
 82 |         # In a real implementation, you would use mDNS to discover OctoPrint instances
 83 |         
 84 |         # Simulate finding a printer
 85 |         printer_id = "octoprint_1"
 86 |         if printer_id not in self.printers:
 87 |             printer_info = {
 88 |                 "id": printer_id,
 89 |                 "name": "OctoPrint Printer",
 90 |                 "type": "octoprint",
 91 |                 "address": "192.168.1.100",
 92 |                 "port": 80,
 93 |                 "api_key": None,  # Would need to be provided by user
 94 |                 "status": "online",
 95 |                 "capabilities": ["print", "status", "cancel"]
 96 |             }
 97 |             
 98 |             self.printers[printer_id] = printer_info
 99 |             
100 |             if self.discovery_callback:
101 |                 self.discovery_callback(printer_info)
102 |             
103 |             logger.info(f"Discovered OctoPrint printer: {printer_info['name']}")
104 |     
105 |     def _discover_prusa_printers(self) -> None:
106 |         """Discover Prusa printers on the network."""
107 |         # Simulate discovering Prusa printers
108 |         
109 |         # Simulate finding a printer
110 |         printer_id = "prusa_1"
111 |         if printer_id not in self.printers:
112 |             printer_info = {
113 |                 "id": printer_id,
114 |                 "name": "Prusa MK3S",
115 |                 "type": "prusa",
116 |                 "address": "192.168.1.101",
117 |                 "port": 80,
118 |                 "status": "online",
119 |                 "capabilities": ["print", "status"]
120 |             }
121 |             
122 |             self.printers[printer_id] = printer_info
123 |             
124 |             if self.discovery_callback:
125 |                 self.discovery_callback(printer_info)
126 |             
127 |             logger.info(f"Discovered Prusa printer: {printer_info['name']}")
128 |     
129 |     def _discover_ultimaker_printers(self) -> None:
130 |         """Discover Ultimaker printers on the network."""
131 |         # Simulate discovering Ultimaker printers
132 |         
133 |         # Simulate finding a printer
134 |         printer_id = "ultimaker_1"
135 |         if printer_id not in self.printers:
136 |             printer_info = {
137 |                 "id": printer_id,
138 |                 "name": "Ultimaker S5",
139 |                 "type": "ultimaker",
140 |                 "address": "192.168.1.102",
141 |                 "port": 80,
142 |                 "status": "online",
143 |                 "capabilities": ["print", "status", "cancel"]
144 |             }
145 |             
146 |             self.printers[printer_id] = printer_info
147 |             
148 |             if self.discovery_callback:
149 |                 self.discovery_callback(printer_info)
150 |             
151 |             logger.info(f"Discovered Ultimaker printer: {printer_info['name']}")
152 | 
153 | 
154 | class PrinterInterface:
155 |     """
156 |     Interface for communicating with 3D printers.
157 |     """
158 |     
159 |     def __init__(self, printer_discovery: PrinterDiscovery):
160 |         """
161 |         Initialize the printer interface.
162 |         
163 |         Args:
164 |             printer_discovery: Instance of PrinterDiscovery for finding printers
165 |         """
166 |         self.printer_discovery = printer_discovery
167 |         self.connected_printers = {}  # Dictionary of connected printers
168 |     
169 |     def connect_to_printer(self, printer_id: str, credentials: Optional[Dict[str, Any]] = None) -> bool:
170 |         """
171 |         Connect to a specific printer.
172 |         
173 |         Args:
174 |             printer_id: ID of the printer to connect to
175 |             credentials: Optional credentials for authentication
176 |             
177 |         Returns:
178 |             True if connection successful, False otherwise
179 |         """
180 |         printers = self.printer_discovery.get_printers()
181 |         if printer_id not in printers:
182 |             logger.error(f"Printer not found: {printer_id}")
183 |             return False
184 |         
185 |         printer_info = printers[printer_id]
186 |         
187 |         # Create appropriate printer client based on type
188 |         if printer_info["type"] == "octoprint":
189 |             client = OctoPrintClient(printer_info, credentials)
190 |         elif printer_info["type"] == "prusa":
191 |             client = PrusaClient(printer_info, credentials)
192 |         elif printer_info["type"] == "ultimaker":
193 |             client = UltimakerClient(printer_info, credentials)
194 |         else:
195 |             logger.error(f"Unsupported printer type: {printer_info['type']}")
196 |             return False
197 |         
198 |         # Connect to the printer
199 |         if client.connect():
200 |             self.connected_printers[printer_id] = client
201 |             return True
202 |         else:
203 |             return False
204 |     
205 |     def disconnect_from_printer(self, printer_id: str) -> bool:
206 |         """
207 |         Disconnect from a specific printer.
208 |         
209 |         Args:
210 |             printer_id: ID of the printer to disconnect from
211 |             
212 |         Returns:
213 |             True if disconnection successful, False otherwise
214 |         """
215 |         if printer_id not in self.connected_printers:
216 |             logger.error(f"Not connected to printer: {printer_id}")
217 |             return False
218 |         
219 |         client = self.connected_printers[printer_id]
220 |         if client.disconnect():
221 |             del self.connected_printers[printer_id]
222 |             return True
223 |         else:
224 |             return False
225 |     
226 |     def print_file(self, printer_id: str, file_path: str, print_settings: Optional[Dict[str, Any]] = None) -> bool:
227 |         """
228 |         Send a file to a printer for printing.
229 |         
230 |         Args:
231 |             printer_id: ID of the printer to print on
232 |             file_path: Path to the STL file to print
233 |             print_settings: Optional print settings
234 |             
235 |         Returns:
236 |             True if print job started successfully, False otherwise
237 |         """
238 |         if printer_id not in self.connected_printers:
239 |             logger.error(f"Not connected to printer: {printer_id}")
240 |             return False
241 |         
242 |         client = self.connected_printers[printer_id]
243 |         return client.print_file(file_path, print_settings)
244 |     
245 |     def get_printer_status(self, printer_id: str) -> Dict[str, Any]:
246 |         """
247 |         Get the status of a specific printer.
248 |         
249 |         Args:
250 |             printer_id: ID of the printer to get status for
251 |             
252 |         Returns:
253 |             Dictionary with printer status information
254 |         """
255 |         if printer_id not in self.connected_printers:
256 |             logger.error(f"Not connected to printer: {printer_id}")
257 |             return {"error": "Not connected to printer"}
258 |         
259 |         client = self.connected_printers[printer_id]
260 |         return client.get_status()
261 |     
262 |     def cancel_print(self, printer_id: str) -> bool:
263 |         """
264 |         Cancel a print job on a specific printer.
265 |         
266 |         Args:
267 |             printer_id: ID of the printer to cancel print on
268 |             
269 |         Returns:
270 |             True if cancellation successful, False otherwise
271 |         """
272 |         if printer_id not in self.connected_printers:
273 |             logger.error(f"Not connected to printer: {printer_id}")
274 |             return False
275 |         
276 |         client = self.connected_printers[printer_id]
277 |         return client.cancel_print()
278 | 
279 | 
280 | class PrinterClient:
281 |     """Base class for printer clients."""
282 |     
283 |     def __init__(self, printer_info: Dict[str, Any], credentials: Optional[Dict[str, Any]] = None):
284 |         """
285 |         Initialize the printer client.
286 |         
287 |         Args:
288 |             printer_info: Information about the printer
289 |             credentials: Optional credentials for authentication
290 |         """
291 |         self.printer_info = printer_info
292 |         self.credentials = credentials or {}
293 |         self.connected = False
294 |     
295 |     def connect(self) -> bool:
296 |         """
297 |         Connect to the printer.
298 |         
299 |         Returns:
300 |             True if connection successful, False otherwise
301 |         """
302 |         # Base implementation - should be overridden by subclasses
303 |         self.connected = True
304 |         return True
305 |     
306 |     def disconnect(self) -> bool:
307 |         """
308 |         Disconnect from the printer.
309 |         
310 |         Returns:
311 |             True if disconnection successful, False otherwise
312 |         """
313 |         # Base implementation - should be overridden by subclasses
314 |         self.connected = False
315 |         return True
316 |     
317 |     def print_file(self, file_path: str, print_settings: Optional[Dict[str, Any]] = None) -> bool:
318 |         """
319 |         Send a file to the printer for printing.
320 |         
321 |         Args:
322 |             file_path: Path to the STL file to print
323 |             print_settings: Optional print settings
324 |             
325 |         Returns:
326 |             True if print job started successfully, False otherwise
327 |         """
328 |         # Base implementation - should be overridden by subclasses
329 |         if not self.connected:
330 |             logger.error("Not connected to printer")
331 |             return False
332 |         
333 |         logger.info(f"Printing file: {file_path}")
334 |         return True
335 |     
336 |     def get_status(self) -> Dict[str, Any]:
337 |         """
338 |         Get the status of the printer.
339 |         
340 |         Returns:
341 |             Dictionary with printer status information
342 |         """
343 |         # Base implementation - should be overridden by subclasses
344 |         if not self.connected:
345 |             return {"status": "disconnected"}
346 |         
347 |         return {"status": "connected"}
348 |     
349 |     def cancel_print(self) -> bool:
350 |         """
351 |         Cancel the current print job.
352 |         
353 |         Returns:
354 |             True if cancellation successful, False otherwise
355 |         """
356 |         # Base implementation - should be overridden by subclasses
357 |         if not self.connected:
358 |             logger.error("Not connected to printer")
359 |             return False
360 |         
361 |         logger.info("Cancelling print job")
362 |         return True
363 | 
364 | 
365 | class OctoPrintClient(PrinterClient):
366 |     """Client for OctoPrint printers."""
367 |     
368 |     def connect(self) -> bool:
369 |         """Connect to an OctoPrint server."""
370 |         try:
371 |             # In a real implementation, you would use the OctoPrint API
372 |             # to connect to the printer
373 |             
374 |             # Check if API key is provided
375 |             if "api_key" not in self.credentials:
376 |                 logger.error("API key required for OctoPrint")
377 |                 return False
378 |             
379 |             # Simulate connection
380 |             logger.info(f"Connected to OctoPrint server: {self.printer_info['address']}")
381 |             self.connected = True
382 |             return True
383 |         except Exception as e:
384 |             logger.error(f"Error connecting to OctoPrint server: {str(e)}")
385 |             return False
386 |     
387 |     def print_file(self, file_path: str, print_settings: Optional[Dict[str, Any]] = None) -> bool:
388 |         """Send a file to an OctoPrint server for printing."""
389 |         if not self.connected:
390 |             logger.error("Not connected to OctoPrint server")
391 |             return False
392 |         
393 |         try:
394 |             # In a real implementation, you would use the OctoPrint API
395 |             # to upload the file and start printing
396 |             
397 |             # Check if file exists
398 |             if not os.path.exists(file_path):
399 |                 logger.error(f"File not found: {file_path}")
400 |                 return False
401 |             
402 |             # Simulate printing
403 |             logger.info(f"Printing file on OctoPrint server: {file_path}")
404 |             return True
405 |         except Exception as e:
406 |             logger.error(f"Error printing file on OctoPrint server: {str(e)}")
407 |             return False
408 |     
409 |     def get_status(self) -> Dict[str, Any]:
410 |         """Get the status of an OctoPrint server."""
411 |         if not self.connected:
412 |             return {"status": "disconnected"}
413 |         
414 |         try:
415 |             # In a real implementation, you would use the OctoPrint API
416 |             # to get the printer status
417 |             
418 |             # Simulate status
419 |             return {
420 |                 "status": "connected",
421 |                 "printer": {
422 |                     "state": "operational",
423 |                     "temperature": {
424 |                         "bed": {"actual": 60.0, "target": 60.0},
425 |                         "tool0": {"actual": 210.0, "target": 210.0}
426 |                     }
427 |                 },
428 |                 "job": {
429 |                     "file": {"name": "example.gcode"},
430 |                     "progress": {"completion": 0.0, "printTime": 0, "printTimeLeft": 0}
431 |                 }
432 |             }
433 |         except Exception as e:
434 |             logger.error(f"Error getting status from OctoPrint server: {str(e)}")
435 |             return {"status": "error", "message": str(e)}
436 | 
437 | 
438 | class PrusaClient(PrinterClient):
439 |     """Client for Prusa printers."""
440 |     
441 |     def connect(self) -> bool:
442 |         """Connect to a Prusa printer."""
443 |         try:
444 |             # In a real implementation, you would use the Prusa API
445 |             # to connect to the printer
446 |             
447 |             # Simulate connection
448 |             logger.info(f"Connected to Prusa printer: {self.printer_info['address']}")
449 |             self.connected = True
450 |             return True
451 |         except Exception as e:
452 |             logger.error(f"Error connecting to Prusa printer: {str(e)}")
453 |             return False
454 | 
455 | 
456 | class UltimakerClient(PrinterClient):
457 |     """Client for Ultimaker printers."""
458 |     
459 |     def connect(self) -> bool:
460 |         """Connect to an Ultimaker printer."""
461 |         try:
462 |             # In a real implementation, you would use the Ultimaker API
463 |             # to connect to the printer
464 |             
465 |             # Simulate connection
466 |             logger.info(f"Connected to Ultimaker printer: {self.printer_info['address']}")
467 |             self.connected = True
468 |             return True
469 |         except Exception as e:
470 |             logger.error(f"Error connecting to Ultimaker printer: {str(e)}")
471 |             return False
472 | 
```

--------------------------------------------------------------------------------
/src/nlp/parameter_extractor.py:
--------------------------------------------------------------------------------

```python
  1 | import re
  2 | import logging
  3 | from typing import Dict, Any, Tuple, List, Optional
  4 | import json
  5 | 
  6 | logger = logging.getLogger(__name__)
  7 | 
  8 | class ParameterExtractor:
  9 |     """
 10 |     Extract parameters from natural language descriptions.
 11 |     Implements dialog flow for collecting specifications and translating them to OpenSCAD parameters.
 12 |     """
 13 |     
 14 |     def __init__(self):
 15 |         """Initialize the parameter extractor."""
 16 |         # Using only millimeters as per project requirements
 17 |         self.unit_conversions = {
 18 |             'mm': 1.0
 19 |         }
 20 |         
 21 |         # Shape recognition patterns with expanded vocabulary
 22 |         self.shape_patterns = {
 23 |             'cube': r'\b(cube|box|square|rectangular|block|cuboid|brick)\b',
 24 |             'sphere': r'\b(sphere|ball|round|circular|globe|orb)\b',
 25 |             'cylinder': r'\b(cylinder|tube|pipe|rod|circular column|pillar|column)\b',
 26 |             'box': r'\b(hollow box|container|case|enclosure|bin|chest|tray)\b',
 27 |             'rounded_box': r'\b(rounded box|rounded container|rounded case|rounded enclosure|smooth box|rounded corners|chamfered box)\b',
 28 |             'cone': r'\b(cone|pyramid|tapered cylinder|funnel)\b',
 29 |             'torus': r'\b(torus|donut|ring|loop|circular ring)\b',
 30 |             'prism': r'\b(prism|triangular prism|wedge|triangular shape)\b',
 31 |             'custom': r'\b(custom|complex|special|unique|combined|composite)\b'
 32 |         }
 33 |         
 34 |         # Parameter recognition patterns with enhanced unit detection
 35 |         self.parameter_patterns = {
 36 |             'width': r'(\d+(?:\.\d+)?)\s*(?:mm|cm|m|in|inch|inches|ft|foot|feet)?\s*(?:wide|width|across|w)',
 37 |             'height': r'(\d+(?:\.\d+)?)\s*(?:mm|cm|m|in|inch|inches|ft|foot|feet)?\s*(?:high|height|tall|h)',
 38 |             'depth': r'(\d+(?:\.\d+)?)\s*(?:mm|cm|m|in|inch|inches|ft|foot|feet)?\s*(?:deep|depth|long|d|length)',
 39 |             'radius': r'(\d+(?:\.\d+)?)\s*(?:mm|cm|m|in|inch|inches|ft|foot|feet)?\s*(?:radius|r)',
 40 |             'diameter': r'(\d+(?:\.\d+)?)\s*(?:mm|cm|m|in|inch|inches|ft|foot|feet)?\s*(?:diameter|dia)',
 41 |             'thickness': r'(\d+(?:\.\d+)?)\s*(?:mm|cm|m|in|inch|inches|ft|foot|feet)?\s*(?:thick|thickness|t)',
 42 |             'segments': r'(\d+)\s*(?:segments|sides|faces|facets|smoothness)',
 43 |             'center': r'\b(centered|center|middle|origin)\b',
 44 |             'angle': r'(\d+(?:\.\d+)?)\s*(?:deg|degree|degrees|°)?\s*(?:angle|rotation|rotate|tilt)',
 45 |             'scale': r'(\d+(?:\.\d+)?)\s*(?:x|times|scale|scaling|factor)',
 46 |             'resolution': r'(\d+(?:\.\d+)?)\s*(?:resolution|quality|detail)'
 47 |         }
 48 |         
 49 |         # Dialog state for multi-turn conversations
 50 |         self.dialog_state = {}
 51 |     
 52 |     def extract_parameters(self, description: str, model_type: Optional[str] = None, 
 53 |                               existing_parameters: Optional[Dict[str, Any]] = None) -> Tuple[str, Dict[str, Any]]:
 54 |         """
 55 |         Extract model type and parameters from a natural language description.
 56 |         
 57 |         Args:
 58 |             description: Natural language description of the 3D object
 59 |             model_type: Optional model type for context (if already known)
 60 |             existing_parameters: Optional existing parameters for context (for modifications)
 61 |             
 62 |         Returns:
 63 |             Tuple of (model_type, parameters)
 64 |         """
 65 |         # Use provided model_type or determine from description
 66 |         if model_type is None:
 67 |             model_type = self._determine_shape_type(description)
 68 |         
 69 |         # Start with existing parameters if provided
 70 |         parameters = existing_parameters.copy() if existing_parameters else {}
 71 |         
 72 |         # Extract parameters based on the shape type
 73 |         new_parameters = self._extract_shape_parameters(description, model_type)
 74 |         
 75 |         # Update parameters with newly extracted ones
 76 |         parameters.update(new_parameters)
 77 |         
 78 |         # Apply default parameters if needed
 79 |         parameters = self._apply_default_parameters(model_type, parameters)
 80 |         
 81 |         logger.info(f"Extracted model type: {model_type}, parameters: {parameters}")
 82 |         return model_type, parameters
 83 |     
 84 |     def extract_parameters_from_modifications(self, modifications: str, model_type: Optional[str] = None, 
 85 |                                                existing_parameters: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
 86 |         """
 87 |         Extract parameters from modification description with contextual understanding.
 88 |         
 89 |         Args:
 90 |             modifications: Description of modifications to make
 91 |             model_type: Optional model type for context
 92 |             existing_parameters: Optional existing parameters for context
 93 |             
 94 |         Returns:
 95 |             Dictionary of parameters to update
 96 |         """
 97 |         # Start with existing parameters if provided
 98 |         parameters = existing_parameters.copy() if existing_parameters else {}
 99 |         
100 |         # Extract all possible parameters from the modifications
101 |         new_parameters = {}
102 |         for param_name, pattern in self.parameter_patterns.items():
103 |             matches = re.findall(pattern, modifications, re.IGNORECASE)
104 |             if matches:
105 |                 # Take the last match if multiple are found
106 |                 value = matches[-1]
107 |                 if isinstance(value, tuple):
108 |                     value = value[0]  # Extract from capture group
109 |                 new_parameters[param_name] = self._convert_to_mm(value, modifications)
110 |         
111 |         # Update parameters with newly extracted ones
112 |         parameters.update(new_parameters)
113 |         
114 |         # Apply contextual understanding based on model type
115 |         if model_type and not new_parameters:
116 |             # If no explicit parameters were found, try to infer from context
117 |             # For now, we'll just log this case since inference is complex
118 |             logger.info(f"No explicit parameters found in '{modifications}', using existing parameters")
119 |         
120 |         logger.info(f"Extracted modification parameters: {parameters}")
121 |         return parameters
122 |     
123 |     def get_missing_parameters(self, model_type: str, parameters: Dict[str, Any]) -> List[str]:
124 |         """
125 |         Determine which required parameters are missing for a given model type.
126 |         
127 |         Args:
128 |             model_type: Type of model
129 |             parameters: Currently extracted parameters
130 |             
131 |         Returns:
132 |             List of missing parameter names
133 |         """
134 |         required_params = self._get_required_parameters(model_type)
135 |         return [param for param in required_params if param not in parameters]
136 |     
137 |     def update_dialog_state(self, user_id: str, model_type: Optional[str] = None, 
138 |                            parameters: Optional[Dict[str, Any]] = None) -> None:
139 |         """
140 |         Update the dialog state for a user.
141 |         
142 |         Args:
143 |             user_id: Unique identifier for the user
144 |             model_type: Optional model type to update
145 |             parameters: Optional parameters to update
146 |         """
147 |         if user_id not in self.dialog_state:
148 |             self.dialog_state[user_id] = {
149 |                 'model_type': None,
150 |                 'parameters': {},
151 |                 'missing_parameters': [],
152 |                 'current_question': None
153 |             }
154 |         
155 |         if model_type:
156 |             self.dialog_state[user_id]['model_type'] = model_type
157 |         
158 |         if parameters:
159 |             self.dialog_state[user_id]['parameters'].update(parameters)
160 |             
161 |         # Update missing parameters
162 |         if self.dialog_state[user_id]['model_type']:
163 |             missing = self.get_missing_parameters(
164 |                 self.dialog_state[user_id]['model_type'],
165 |                 self.dialog_state[user_id]['parameters']
166 |             )
167 |             self.dialog_state[user_id]['missing_parameters'] = missing
168 |     
169 |     def get_next_question(self, user_id: str) -> Optional[str]:
170 |         """
171 |         Get the next question to ask the user based on missing parameters.
172 |         
173 |         Args:
174 |             user_id: Unique identifier for the user
175 |             
176 |         Returns:
177 |             Question string or None if all parameters are collected
178 |         """
179 |         if user_id not in self.dialog_state:
180 |             return "What kind of 3D object would you like to create?"
181 |         
182 |         state = self.dialog_state[user_id]
183 |         
184 |         # If we don't have a model type yet, ask for it
185 |         if not state['model_type']:
186 |             state['current_question'] = "What kind of 3D object would you like to create?"
187 |             return state['current_question']
188 |         
189 |         # If we have missing parameters, ask for the first one
190 |         if state['missing_parameters']:
191 |             param = state['missing_parameters'][0]
192 |             question = self._get_parameter_question(param, state['model_type'])
193 |             state['current_question'] = question
194 |             return question
195 |         
196 |         # All parameters collected
197 |         state['current_question'] = None
198 |         return None
199 |     
200 |     def process_answer(self, user_id: str, answer: str) -> Dict[str, Any]:
201 |         """
202 |         Process a user's answer to a question.
203 |         
204 |         Args:
205 |             user_id: Unique identifier for the user
206 |             answer: User's answer to the current question
207 |             
208 |         Returns:
209 |             Updated dialog state
210 |         """
211 |         if user_id not in self.dialog_state:
212 |             # Initialize with default state
213 |             self.update_dialog_state(user_id)
214 |         
215 |         state = self.dialog_state[user_id]
216 |         current_question = state['current_question']
217 |         
218 |         # Process based on current question
219 |         if not state['model_type']:
220 |             # Trying to determine the model type
221 |             model_type = self._determine_shape_type(answer)
222 |             self.update_dialog_state(user_id, model_type=model_type)
223 |         elif state['missing_parameters']:
224 |             # Trying to collect a specific parameter
225 |             param = state['missing_parameters'][0]
226 |             value = self._extract_parameter_value(param, answer)
227 |             if value is not None:
228 |                 self.update_dialog_state(user_id, parameters={param: value})
229 |         
230 |         # Return the updated state
231 |         return self.dialog_state[user_id]
232 |     
233 |     def _determine_shape_type(self, description: str) -> str:
234 |         """
235 |         Determine the shape type from the description.
236 |         Enhanced to support more shape types and better pattern matching.
237 |         """
238 |         # Check for explicit shape mentions
239 |         for shape, pattern in self.shape_patterns.items():
240 |             if re.search(pattern, description, re.IGNORECASE):
241 |                 logger.info(f"Detected shape type: {shape} from pattern: {pattern}")
242 |                 return shape
243 |         
244 |         # Try to infer shape from context if no explicit mention
245 |         if re.search(r'\b(round|circular|sphere|ball)\b', description, re.IGNORECASE):
246 |             return "sphere"
247 |         elif re.search(r'\b(tall|column|pillar|rod)\b', description, re.IGNORECASE):
248 |             return "cylinder"
249 |         elif re.search(r'\b(box|container|case|enclosure)\b', description, re.IGNORECASE):
250 |             # Determine if it should be a rounded box
251 |             if re.search(r'\b(rounded|smooth|chamfered)\b', description, re.IGNORECASE):
252 |                 return "rounded_box"
253 |             return "box"
254 |         
255 |         # Default to cube if no shape is detected
256 |         logger.info("No specific shape detected, defaulting to cube")
257 |         return "cube"
258 |     
259 |     def _extract_shape_parameters(self, description: str, model_type: str) -> Dict[str, Any]:
260 |         """Extract parameters for a specific shape type."""
261 |         parameters = {}
262 |         
263 |         # Extract all possible parameters
264 |         for param_name, pattern in self.parameter_patterns.items():
265 |             matches = re.findall(pattern, description, re.IGNORECASE)
266 |             if matches:
267 |                 # Take the last match if multiple are found
268 |                 value = matches[-1]
269 |                 if isinstance(value, tuple):
270 |                     value = value[0]  # Extract from capture group
271 |                 parameters[param_name] = self._convert_to_mm(value, description)
272 |         
273 |         # Special case for diameter -> radius conversion
274 |         if 'diameter' in parameters and 'radius' not in parameters:
275 |             parameters['radius'] = parameters['diameter'] / 2
276 |             del parameters['diameter']
277 |         
278 |         # Special case for center parameter
279 |         if 'center' in parameters:
280 |             center_value = parameters['center']
281 |             if isinstance(center_value, (int, float)):
282 |                 # Convert numeric value to boolean string
283 |                 parameters['center'] = 'true' if center_value > 0 else 'false'
284 |             else:
285 |                 # Convert string value to boolean string
286 |                 center_str = str(center_value).lower()
287 |                 parameters['center'] = 'true' if center_str in ['true', 'yes', 'y', '1'] else 'false'
288 |         
289 |         return parameters
290 |     
291 |     def _convert_to_mm(self, value_str: str, context: str) -> float:
292 |         """
293 |         Convert a value to millimeters.
294 |         As per project requirements, we only use millimeters for design.
295 |         """
296 |         try:
297 |             value = float(value_str)
298 |             
299 |             # Since we're only using millimeters, we just return the value directly
300 |             # This simplifies the conversion logic while maintaining the function interface
301 |             logger.info(f"Using value {value} in millimeters")
302 |             return value
303 |         except ValueError:
304 |             logger.warning(f"Could not convert value to float: {value_str}")
305 |             return 0.0
306 |     
307 |     def _apply_default_parameters(self, model_type: str, parameters: Dict[str, Any]) -> Dict[str, Any]:
308 |         """Apply default parameters based on the model type."""
309 |         defaults = {
310 |             'cube': {'width': 10, 'depth': 10, 'height': 10, 'center': 'false'},
311 |             'sphere': {'radius': 10, 'segments': 32},
312 |             'cylinder': {'radius': 10, 'height': 20, 'center': 'false', 'segments': 32},
313 |             'box': {'width': 30, 'depth': 20, 'height': 15, 'thickness': 2},
314 |             'rounded_box': {'width': 30, 'depth': 20, 'height': 15, 'radius': 3, 'segments': 32},
315 |             'cone': {'base_radius': 10, 'height': 20, 'center': 'false', 'segments': 32},
316 |             'torus': {'major_radius': 20, 'minor_radius': 5, 'segments': 32},
317 |             'prism': {'width': 20, 'height': 15, 'depth': 20, 'center': 'false'},
318 |             'custom': {'width': 20, 'height': 20, 'depth': 20, 'center': 'false'}
319 |         }
320 |         
321 |         # Get defaults for the model type
322 |         model_defaults = defaults.get(model_type, {})
323 |         
324 |         # Apply defaults for missing parameters
325 |         for param, default_value in model_defaults.items():
326 |             if param not in parameters:
327 |                 parameters[param] = default_value
328 |         
329 |         return parameters
330 |     
331 |     def _get_required_parameters(self, model_type: str) -> List[str]:
332 |         """Get the list of required parameters for a model type."""
333 |         required_params = {
334 |             'cube': ['width', 'depth', 'height'],
335 |             'sphere': ['radius'],
336 |             'cylinder': ['radius', 'height'],
337 |             'box': ['width', 'depth', 'height', 'thickness'],
338 |             'rounded_box': ['width', 'depth', 'height', 'radius'],
339 |             'cone': ['base_radius', 'height'],
340 |             'torus': ['major_radius', 'minor_radius'],
341 |             'prism': ['width', 'height', 'depth'],
342 |             'custom': ['width', 'height', 'depth']
343 |         }
344 |         
345 |         return required_params.get(model_type, [])
346 |     
347 |     def _get_parameter_question(self, param: str, model_type: str) -> str:
348 |         """Get a question to ask for a specific parameter."""
349 |         questions = {
350 |             'width': f"What should be the width of the {model_type} in mm?",
351 |             'depth': f"What should be the depth of the {model_type} in mm?",
352 |             'height': f"What should be the height of the {model_type} in mm?",
353 |             'radius': f"What should be the radius of the {model_type} in mm?",
354 |             'thickness': f"What should be the wall thickness of the {model_type} in mm?",
355 |             'segments': f"How many segments should the {model_type} have for smoothness?",
356 |             'base_radius': f"What should be the base radius of the {model_type} in mm?",
357 |             'major_radius': f"What should be the major radius of the {model_type} in mm?",
358 |             'minor_radius': f"What should be the minor radius of the {model_type} in mm?",
359 |             'diameter': f"What should be the diameter of the {model_type} in mm?",
360 |             'angle': f"What should be the angle of the {model_type} in degrees?",
361 |             'scale': f"What should be the scale factor for the {model_type}?",
362 |             'resolution': f"What resolution should the {model_type} have (higher means more detailed)?",
363 |             'center': f"Should the {model_type} be centered? (yes/no)"
364 |         }
365 |         
366 |         return questions.get(param, f"What should be the {param} of the {model_type}?")
367 |     
368 |     def _extract_parameter_value(self, param: str, answer: str) -> Optional[float]:
369 |         """Extract a parameter value from an answer."""
370 |         pattern = self.parameter_patterns.get(param)
371 |         if not pattern:
372 |             # For parameters without specific patterns, try to extract any number
373 |             pattern = r'(\d+(?:\.\d+)?)'
374 |         
375 |         matches = re.findall(pattern, answer, re.IGNORECASE)
376 |         if matches:
377 |             value = matches[-1]
378 |             if isinstance(value, tuple):
379 |                 value = value[0]  # Extract from capture group
380 |             return self._convert_to_mm(value, answer)
381 |         
382 |         # Try to extract just a number
383 |         matches = re.findall(r'(\d+(?:\.\d+)?)', answer)
384 |         if matches:
385 |             value = matches[-1]
386 |             return self._convert_to_mm(value, answer)
387 |         
388 |         return None
389 | 
```

--------------------------------------------------------------------------------
/src/remote/connection_manager.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Connection manager for remote CUDA Multi-View Stereo processing.
  3 | 
  4 | This module provides functionality to discover, connect to, and manage
  5 | connections with remote CUDA MVS servers within the LAN.
  6 | """
  7 | 
  8 | import os
  9 | import json
 10 | import logging
 11 | import time
 12 | import threading
 13 | from typing import Dict, List, Optional, Any, Union, Callable
 14 | import socket
 15 | from zeroconf import ServiceBrowser, ServiceListener, Zeroconf
 16 | 
 17 | from src.remote.cuda_mvs_client import CUDAMVSClient
 18 | 
 19 | # Configure logging
 20 | logging.basicConfig(level=logging.INFO)
 21 | logger = logging.getLogger(__name__)
 22 | 
 23 | class CUDAMVSConnectionManager:
 24 |     """
 25 |     Connection manager for remote CUDA MVS servers.
 26 |     
 27 |     This class handles:
 28 |     1. Discovering available CUDA MVS servers on the LAN
 29 |     2. Managing connections to multiple servers
 30 |     3. Load balancing across available servers
 31 |     4. Monitoring server health and status
 32 |     5. Automatic failover if a server becomes unavailable
 33 |     """
 34 |     
 35 |     def __init__(
 36 |         self,
 37 |         api_key: Optional[str] = None,
 38 |         discovery_port: int = 8765,
 39 |         connection_timeout: int = 10,
 40 |         health_check_interval: int = 60,
 41 |         auto_discover: bool = True
 42 |     ):
 43 |         """
 44 |         Initialize the connection manager.
 45 |         
 46 |         Args:
 47 |             api_key: API key for authentication (if required)
 48 |             discovery_port: Port used for server discovery
 49 |             connection_timeout: Timeout for server connections in seconds
 50 |             health_check_interval: Interval for health checks in seconds
 51 |             auto_discover: Whether to automatically discover servers on startup
 52 |         """
 53 |         self.api_key = api_key
 54 |         self.discovery_port = discovery_port
 55 |         self.connection_timeout = connection_timeout
 56 |         self.health_check_interval = health_check_interval
 57 |         
 58 |         # Server tracking
 59 |         self.servers: Dict[str, Dict[str, Any]] = {}
 60 |         self.clients: Dict[str, CUDAMVSClient] = {}
 61 |         self.server_lock = threading.RLock()
 62 |         
 63 |         # Health check thread
 64 |         self.health_check_thread = None
 65 |         self.health_check_stop_event = threading.Event()
 66 |         
 67 |         # Discovery
 68 |         self.zeroconf = None
 69 |         self.browser = None
 70 |         
 71 |         # Start discovery if enabled
 72 |         if auto_discover:
 73 |             self.start_discovery()
 74 |             
 75 |         # Start health check thread
 76 |         self.start_health_check()
 77 |     
 78 |     def start_discovery(self):
 79 |         """
 80 |         Start discovering CUDA MVS servers on the LAN.
 81 |         """
 82 |         if self.zeroconf is not None:
 83 |             return
 84 |         
 85 |         try:
 86 |             self.zeroconf = Zeroconf()
 87 |             listener = CUDAMVSServiceListener(self)
 88 |             self.browser = ServiceBrowser(self.zeroconf, "_cudamvs._tcp.local.", listener)
 89 |             logger.info("Started CUDA MVS server discovery")
 90 |         except Exception as e:
 91 |             logger.error(f"Error starting discovery: {e}")
 92 |     
 93 |     def stop_discovery(self):
 94 |         """
 95 |         Stop discovering CUDA MVS servers.
 96 |         """
 97 |         if self.zeroconf is not None:
 98 |             try:
 99 |                 self.zeroconf.close()
100 |                 self.zeroconf = None
101 |                 self.browser = None
102 |                 logger.info("Stopped CUDA MVS server discovery")
103 |             except Exception as e:
104 |                 logger.error(f"Error stopping discovery: {e}")
105 |     
106 |     def start_health_check(self):
107 |         """
108 |         Start the health check thread.
109 |         """
110 |         if self.health_check_thread is not None and self.health_check_thread.is_alive():
111 |             return
112 |         
113 |         self.health_check_stop_event.clear()
114 |         self.health_check_thread = threading.Thread(
115 |             target=self._health_check_loop,
116 |             daemon=True
117 |         )
118 |         self.health_check_thread.start()
119 |         logger.info("Started health check thread")
120 |     
121 |     def stop_health_check(self):
122 |         """
123 |         Stop the health check thread.
124 |         """
125 |         if self.health_check_thread is not None:
126 |             self.health_check_stop_event.set()
127 |             self.health_check_thread.join(timeout=5)
128 |             self.health_check_thread = None
129 |             logger.info("Stopped health check thread")
130 |     
131 |     def _health_check_loop(self):
132 |         """
133 |         Health check loop that runs in a separate thread.
134 |         """
135 |         while not self.health_check_stop_event.is_set():
136 |             try:
137 |                 self.check_all_servers()
138 |             except Exception as e:
139 |                 logger.error(f"Error in health check: {e}")
140 |             
141 |             # Wait for the next check interval or until stopped
142 |             self.health_check_stop_event.wait(self.health_check_interval)
143 |     
144 |     def add_server(self, server_info: Dict[str, Any]):
145 |         """
146 |         Add a server to the manager.
147 |         
148 |         Args:
149 |             server_info: Dictionary with server information
150 |         """
151 |         server_id = server_info.get("server_id")
152 |         if not server_id:
153 |             logger.error("Cannot add server without server_id")
154 |             return
155 |         
156 |         with self.server_lock:
157 |             # Check if server already exists
158 |             if server_id in self.servers:
159 |                 # Update existing server info
160 |                 self.servers[server_id].update(server_info)
161 |                 logger.info(f"Updated server: {server_info.get('name')} at {server_info.get('url')}")
162 |             else:
163 |                 # Add new server
164 |                 self.servers[server_id] = server_info
165 |                 
166 |                 # Create client for the server
167 |                 self.clients[server_id] = CUDAMVSClient(
168 |                     server_url=server_info.get("url"),
169 |                     api_key=self.api_key,
170 |                     connection_timeout=self.connection_timeout
171 |                 )
172 |                 
173 |                 logger.info(f"Added server: {server_info.get('name')} at {server_info.get('url')}")
174 |     
175 |     def remove_server(self, server_id: str):
176 |         """
177 |         Remove a server from the manager.
178 |         
179 |         Args:
180 |             server_id: ID of the server to remove
181 |         """
182 |         with self.server_lock:
183 |             if server_id in self.servers:
184 |                 server_info = self.servers.pop(server_id)
185 |                 if server_id in self.clients:
186 |                     del self.clients[server_id]
187 |                 logger.info(f"Removed server: {server_info.get('name')} at {server_info.get('url')}")
188 |     
189 |     def get_servers(self) -> List[Dict[str, Any]]:
190 |         """
191 |         Get a list of all servers.
192 |         
193 |         Returns:
194 |             List of dictionaries with server information
195 |         """
196 |         with self.server_lock:
197 |             return list(self.servers.values())
198 |     
199 |     def get_server(self, server_id: str) -> Optional[Dict[str, Any]]:
200 |         """
201 |         Get information about a specific server.
202 |         
203 |         Args:
204 |             server_id: ID of the server
205 |             
206 |         Returns:
207 |             Dictionary with server information or None if not found
208 |         """
209 |         with self.server_lock:
210 |             return self.servers.get(server_id)
211 |     
212 |     def get_client(self, server_id: str) -> Optional[CUDAMVSClient]:
213 |         """
214 |         Get the client for a specific server.
215 |         
216 |         Args:
217 |             server_id: ID of the server
218 |             
219 |         Returns:
220 |             CUDAMVSClient instance or None if not found
221 |         """
222 |         with self.server_lock:
223 |             return self.clients.get(server_id)
224 |     
225 |     def get_best_server(self) -> Optional[str]:
226 |         """
227 |         Get the ID of the best server to use based on availability and load.
228 |         
229 |         Returns:
230 |             Server ID or None if no servers are available
231 |         """
232 |         with self.server_lock:
233 |             available_servers = [
234 |                 server_id for server_id, server in self.servers.items()
235 |                 if server.get("status") == "available"
236 |             ]
237 |             
238 |             if not available_servers:
239 |                 return None
240 |             
241 |             # For now, just return the first available server
242 |             # In a more advanced implementation, this would consider
243 |             # server load, capabilities, latency, etc.
244 |             return available_servers[0]
245 |     
246 |     def check_server(self, server_id: str) -> Dict[str, Any]:
247 |         """
248 |         Check the status of a specific server.
249 |         
250 |         Args:
251 |             server_id: ID of the server to check
252 |             
253 |         Returns:
254 |             Dictionary with server status information
255 |         """
256 |         client = self.get_client(server_id)
257 |         if not client:
258 |             return {"status": "error", "message": f"Server {server_id} not found"}
259 |         
260 |         # Test connection
261 |         result = client.test_connection()
262 |         
263 |         with self.server_lock:
264 |             if server_id in self.servers:
265 |                 # Update server status
266 |                 if result["status"] == "success":
267 |                     self.servers[server_id]["status"] = "available"
268 |                     self.servers[server_id]["last_check"] = time.time()
269 |                     self.servers[server_id]["latency_ms"] = result.get("latency_ms")
270 |                     
271 |                     # Update capabilities if available
272 |                     if "server_info" in result and "capabilities" in result["server_info"]:
273 |                         self.servers[server_id]["capabilities"] = result["server_info"]["capabilities"]
274 |                 else:
275 |                     self.servers[server_id]["status"] = "unavailable"
276 |                     self.servers[server_id]["last_check"] = time.time()
277 |                     self.servers[server_id]["error"] = result.get("message")
278 |         
279 |         return result
280 |     
281 |     def check_all_servers(self) -> Dict[str, Dict[str, Any]]:
282 |         """
283 |         Check the status of all servers.
284 |         
285 |         Returns:
286 |             Dictionary mapping server IDs to status information
287 |         """
288 |         results = {}
289 |         
290 |         with self.server_lock:
291 |             server_ids = list(self.servers.keys())
292 |         
293 |         for server_id in server_ids:
294 |             results[server_id] = self.check_server(server_id)
295 |         
296 |         return results
297 |     
298 |     def discover_servers(self) -> List[Dict[str, Any]]:
299 |         """
300 |         Manually discover CUDA MVS servers on the LAN.
301 |         
302 |         Returns:
303 |             List of dictionaries containing server information
304 |         """
305 |         # Create a temporary client to discover servers
306 |         client = CUDAMVSClient(
307 |             api_key=self.api_key,
308 |             discovery_port=self.discovery_port,
309 |             connection_timeout=self.connection_timeout
310 |         )
311 |         
312 |         discovered_servers = client.discover_servers()
313 |         
314 |         # Add discovered servers
315 |         for server_info in discovered_servers:
316 |             self.add_server(server_info)
317 |         
318 |         return discovered_servers
319 |     
320 |     def upload_images_to_best_server(self, image_paths: List[str]) -> Dict[str, Any]:
321 |         """
322 |         Upload images to the best available server.
323 |         
324 |         Args:
325 |             image_paths: List of paths to images to upload
326 |             
327 |         Returns:
328 |             Dictionary with upload status and job information
329 |         """
330 |         server_id = self.get_best_server()
331 |         if not server_id:
332 |             return {"status": "error", "message": "No available servers"}
333 |         
334 |         client = self.get_client(server_id)
335 |         if not client:
336 |             return {"status": "error", "message": f"Client for server {server_id} not found"}
337 |         
338 |         # Upload images
339 |         result = client.upload_images(image_paths)
340 |         
341 |         # Add server information to result
342 |         result["server_id"] = server_id
343 |         result["server_name"] = self.servers[server_id].get("name")
344 |         
345 |         return result
346 |     
347 |     def generate_model_from_images(
348 |         self,
349 |         image_paths: List[str],
350 |         output_format: str = "obj",
351 |         wait_for_completion: bool = True,
352 |         poll_interval: int = 5,
353 |         server_id: Optional[str] = None
354 |     ) -> Dict[str, Any]:
355 |         """
356 |         Generate a 3D model from images using the best available server.
357 |         
358 |         Args:
359 |             image_paths: List of paths to images
360 |             output_format: Format of the output model
361 |             wait_for_completion: Whether to wait for job completion
362 |             poll_interval: Interval in seconds to poll for job status
363 |             server_id: ID of the server to use (uses best server if None)
364 |             
365 |         Returns:
366 |             Dictionary with job status and model information if completed
367 |         """
368 |         # Get server to use
369 |         if server_id is None:
370 |             server_id = self.get_best_server()
371 |             if not server_id:
372 |                 return {"status": "error", "message": "No available servers"}
373 |         
374 |         client = self.get_client(server_id)
375 |         if not client:
376 |             return {"status": "error", "message": f"Client for server {server_id} not found"}
377 |         
378 |         # Generate model
379 |         result = client.generate_model_from_images(
380 |             image_paths=image_paths,
381 |             output_format=output_format,
382 |             wait_for_completion=wait_for_completion,
383 |             poll_interval=poll_interval
384 |         )
385 |         
386 |         # Add server information to result
387 |         result["server_id"] = server_id
388 |         result["server_name"] = self.servers[server_id].get("name")
389 |         
390 |         return result
391 |     
392 |     def get_job_status(self, job_id: str, server_id: str) -> Dict[str, Any]:
393 |         """
394 |         Get the status of a job on a specific server.
395 |         
396 |         Args:
397 |             job_id: ID of the job to check
398 |             server_id: ID of the server
399 |             
400 |         Returns:
401 |             Dictionary with job status information
402 |         """
403 |         client = self.get_client(server_id)
404 |         if not client:
405 |             return {"status": "error", "message": f"Client for server {server_id} not found"}
406 |         
407 |         return client.get_job_status(job_id)
408 |     
409 |     def download_model(self, job_id: str, server_id: str, output_format: str = "obj") -> Dict[str, Any]:
410 |         """
411 |         Download a processed 3D model from a specific server.
412 |         
413 |         Args:
414 |             job_id: ID of the job to download
415 |             server_id: ID of the server
416 |             output_format: Format of the model to download
417 |             
418 |         Returns:
419 |             Dictionary with download status and local file path
420 |         """
421 |         client = self.get_client(server_id)
422 |         if not client:
423 |             return {"status": "error", "message": f"Client for server {server_id} not found"}
424 |         
425 |         return client.download_model(job_id, output_format)
426 |     
427 |     def cancel_job(self, job_id: str, server_id: str) -> Dict[str, Any]:
428 |         """
429 |         Cancel a running job on a specific server.
430 |         
431 |         Args:
432 |             job_id: ID of the job to cancel
433 |             server_id: ID of the server
434 |             
435 |         Returns:
436 |             Dictionary with cancellation status
437 |         """
438 |         client = self.get_client(server_id)
439 |         if not client:
440 |             return {"status": "error", "message": f"Client for server {server_id} not found"}
441 |         
442 |         return client.cancel_job(job_id)
443 |     
444 |     def cleanup(self):
445 |         """
446 |         Clean up resources.
447 |         """
448 |         self.stop_health_check()
449 |         self.stop_discovery()
450 | 
451 | 
452 | class CUDAMVSServiceListener(ServiceListener):
453 |     """
454 |     Zeroconf service listener for CUDA MVS servers.
455 |     """
456 |     
457 |     def __init__(self, connection_manager: CUDAMVSConnectionManager):
458 |         """
459 |         Initialize the service listener.
460 |         
461 |         Args:
462 |             connection_manager: Connection manager to update with discovered servers
463 |         """
464 |         self.connection_manager = connection_manager
465 |     
466 |     def add_service(self, zc: Zeroconf, type_: str, name: str):
467 |         """
468 |         Called when a service is discovered.
469 |         
470 |         Args:
471 |             zc: Zeroconf instance
472 |             type_: Service type
473 |             name: Service name
474 |         """
475 |         info = zc.get_service_info(type_, name)
476 |         if info:
477 |             try:
478 |                 # Extract server information
479 |                 server_id = name.split('.')[0]
480 |                 server_name = info.properties.get(b'name', b'Unknown').decode('utf-8')
481 |                 
482 |                 # Get server URL
483 |                 addresses = info.parsed_addresses()
484 |                 if not addresses:
485 |                     return
486 |                 
487 |                 server_url = f"http://{addresses[0]}:{info.port}"
488 |                 
489 |                 # Parse capabilities
490 |                 capabilities = {}
491 |                 if b'capabilities' in info.properties:
492 |                     try:
493 |                         capabilities = json.loads(info.properties[b'capabilities'].decode('utf-8'))
494 |                     except json.JSONDecodeError:
495 |                         pass
496 |                 
497 |                 # Create server info
498 |                 server_info = {
499 |                     "server_id": server_id,
500 |                     "name": server_name,
501 |                     "url": server_url,
502 |                     "capabilities": capabilities,
503 |                     "status": "unknown",
504 |                     "discovered_at": time.time()
505 |                 }
506 |                 
507 |                 # Add server to connection manager
508 |                 self.connection_manager.add_server(server_info)
509 |                 
510 |             except Exception as e:
511 |                 logger.error(f"Error processing discovered service: {e}")
512 |     
513 |     def remove_service(self, zc: Zeroconf, type_: str, name: str):
514 |         """
515 |         Called when a service is removed.
516 |         
517 |         Args:
518 |             zc: Zeroconf instance
519 |             type_: Service type
520 |             name: Service name
521 |         """
522 |         try:
523 |             server_id = name.split('.')[0]
524 |             self.connection_manager.remove_server(server_id)
525 |         except Exception as e:
526 |             logger.error(f"Error removing service: {e}")
527 |     
528 |     def update_service(self, zc: Zeroconf, type_: str, name: str):
529 |         """
530 |         Called when a service is updated.
531 |         
532 |         Args:
533 |             zc: Zeroconf instance
534 |             type_: Service type
535 |             name: Service name
536 |         """
537 |         self.add_service(zc, type_, name)
538 | 
```

--------------------------------------------------------------------------------
/src/visualization/web_interface.py:
--------------------------------------------------------------------------------

```python
  1 | import os
  2 | import base64
  3 | import logging
  4 | from typing import Dict, Any, List, Optional
  5 | from fastapi import APIRouter, HTTPException, Request
  6 | from fastapi.responses import HTMLResponse
  7 | from fastapi.templating import Jinja2Templates
  8 | from fastapi.staticfiles import StaticFiles
  9 | 
 10 | logger = logging.getLogger(__name__)
 11 | 
 12 | class WebInterface:
 13 |     """
 14 |     Web interface for displaying model previews and managing 3D models.
 15 |     """
 16 |     
 17 |     def __init__(self, app, static_dir: str, templates_dir: str, output_dir: str):
 18 |         """
 19 |         Initialize the web interface.
 20 |         
 21 |         Args:
 22 |             app: FastAPI application
 23 |             static_dir: Directory for static files
 24 |             templates_dir: Directory for templates
 25 |             output_dir: Directory containing output files (STL, PNG)
 26 |         """
 27 |         self.app = app
 28 |         self.static_dir = static_dir
 29 |         self.templates_dir = templates_dir
 30 |         self.output_dir = output_dir
 31 |         self.preview_dir = os.path.join(output_dir, "preview")
 32 |         self.stl_dir = os.path.join(output_dir, "stl")
 33 |         
 34 |         # Create directories if they don't exist
 35 |         os.makedirs(self.static_dir, exist_ok=True)
 36 |         os.makedirs(self.templates_dir, exist_ok=True)
 37 |         
 38 |         # Create router
 39 |         self.router = APIRouter(prefix="/ui", tags=["UI"])
 40 |         
 41 |         # Set up static files
 42 |         self.app.mount("/static", StaticFiles(directory=static_dir), name="static")
 43 |         
 44 |         # Set up templates
 45 |         self.templates = Jinja2Templates(directory=templates_dir)
 46 |         
 47 |         # Register routes
 48 |         self._register_routes()
 49 |         
 50 |         # Create template files
 51 |         self._create_template_files()
 52 |         
 53 |         # Create static files
 54 |         self._create_static_files()
 55 |     
 56 |     def _register_routes(self):
 57 |         """Register routes for the web interface."""
 58 |         # Home page
 59 |         @self.router.get("/", response_class=HTMLResponse)
 60 |         async def home(request: Request):
 61 |             return self.templates.TemplateResponse("index.html", {"request": request})
 62 |         
 63 |         # Model preview page
 64 |         @self.router.get("/preview/{model_id}", response_class=HTMLResponse)
 65 |         async def preview(request: Request, model_id: str):
 66 |             # Check if preview exists
 67 |             preview_file = os.path.join(self.preview_dir, f"{model_id}.png")
 68 |             if not os.path.exists(preview_file):
 69 |                 raise HTTPException(status_code=404, detail="Preview not found")
 70 |             
 71 |             # Get multi-angle previews if they exist
 72 |             angles = ["front", "top", "right", "perspective"]
 73 |             previews = {}
 74 |             
 75 |             for angle in angles:
 76 |                 angle_file = os.path.join(self.preview_dir, f"{model_id}_{angle}.png")
 77 |                 if os.path.exists(angle_file):
 78 |                     previews[angle] = f"/api/preview/{model_id}_{angle}"
 79 |             
 80 |             # If no multi-angle previews, use the main preview
 81 |             if not previews:
 82 |                 previews["main"] = f"/api/preview/{model_id}"
 83 |             
 84 |             # Check if STL exists
 85 |             stl_file = os.path.join(self.stl_dir, f"{model_id}.stl")
 86 |             stl_url = f"/api/stl/{model_id}" if os.path.exists(stl_file) else None
 87 |             
 88 |             return self.templates.TemplateResponse(
 89 |                 "preview.html", 
 90 |                 {
 91 |                     "request": request, 
 92 |                     "model_id": model_id,
 93 |                     "previews": previews,
 94 |                     "stl_url": stl_url
 95 |                 }
 96 |             )
 97 |         
 98 |         # List all models
 99 |         @self.router.get("/models", response_class=HTMLResponse)
100 |         async def list_models(request: Request):
101 |             # Get all STL files
102 |             stl_files = []
103 |             if os.path.exists(self.stl_dir):
104 |                 stl_files = [f for f in os.listdir(self.stl_dir) if f.endswith(".stl")]
105 |             
106 |             # Extract model IDs
107 |             model_ids = [os.path.splitext(f)[0] for f in stl_files]
108 |             
109 |             # Get preview URLs
110 |             models = []
111 |             for model_id in model_ids:
112 |                 preview_file = os.path.join(self.preview_dir, f"{model_id}.png")
113 |                 preview_url = f"/api/preview/{model_id}" if os.path.exists(preview_file) else None
114 |                 stl_url = f"/api/stl/{model_id}"
115 |                 
116 |                 models.append({
117 |                     "id": model_id,
118 |                     "preview_url": preview_url,
119 |                     "stl_url": stl_url
120 |                 })
121 |             
122 |             return self.templates.TemplateResponse(
123 |                 "models.html", 
124 |                 {"request": request, "models": models}
125 |             )
126 |         
127 |         # API endpoints for serving files
128 |         
129 |         # Serve preview image
130 |         @self.app.get("/api/preview/{preview_id}")
131 |         async def get_preview(preview_id: str):
132 |             preview_file = os.path.join(self.preview_dir, f"{preview_id}.png")
133 |             if not os.path.exists(preview_file):
134 |                 raise HTTPException(status_code=404, detail="Preview not found")
135 |             
136 |             # Return the file
137 |             with open(preview_file, "rb") as f:
138 |                 content = f.read()
139 |             
140 |             return {
141 |                 "content": base64.b64encode(content).decode("utf-8"),
142 |                 "content_type": "image/png"
143 |             }
144 |         
145 |         # Serve STL file
146 |         @self.app.get("/api/stl/{model_id}")
147 |         async def get_stl(model_id: str):
148 |             stl_file = os.path.join(self.stl_dir, f"{model_id}.stl")
149 |             if not os.path.exists(stl_file):
150 |                 raise HTTPException(status_code=404, detail="STL file not found")
151 |             
152 |             # Return the file
153 |             with open(stl_file, "rb") as f:
154 |                 content = f.read()
155 |             
156 |             return {
157 |                 "content": base64.b64encode(content).decode("utf-8"),
158 |                 "content_type": "application/octet-stream",
159 |                 "filename": f"{model_id}.stl"
160 |             }
161 |         
162 |         # Register the router with the app
163 |         self.app.include_router(self.router)
164 |     
165 |     def _create_template_files(self):
166 |         """Create template files for the web interface."""
167 |         # Create base template
168 |         base_template = """<!DOCTYPE html>
169 | <html>
170 | <head>
171 |     <meta charset="utf-8">
172 |     <meta name="viewport" content="width=device-width, initial-scale=1">
173 |     <title>{% block title %}OpenSCAD MCP Server{% endblock %}</title>
174 |     <link rel="stylesheet" href="/static/styles.css">
175 | </head>
176 | <body>
177 |     <header>
178 |         <h1>OpenSCAD MCP Server</h1>
179 |         <nav>
180 |             <ul>
181 |                 <li><a href="/ui/">Home</a></li>
182 |                 <li><a href="/ui/models">Models</a></li>
183 |             </ul>
184 |         </nav>
185 |     </header>
186 |     
187 |     <main>
188 |         {% block content %}{% endblock %}
189 |     </main>
190 |     
191 |     <footer>
192 |         <p>OpenSCAD MCP Server - Model Context Protocol Implementation</p>
193 |     </footer>
194 |     
195 |     <script src="/static/script.js"></script>
196 | </body>
197 | </html>
198 | """
199 |         
200 |         # Create index template
201 |         index_template = """{% extends "base.html" %}
202 | 
203 | {% block title %}OpenSCAD MCP Server - Home{% endblock %}
204 | 
205 | {% block content %}
206 | <section class="hero">
207 |     <h2>Welcome to OpenSCAD MCP Server</h2>
208 |     <p>A Model Context Protocol server for generating 3D models with OpenSCAD</p>
209 | </section>
210 | 
211 | <section class="features">
212 |     <div class="feature">
213 |         <h3>Natural Language Processing</h3>
214 |         <p>Describe 3D objects in natural language and get parametric models</p>
215 |     </div>
216 |     
217 |     <div class="feature">
218 |         <h3>Preview Generation</h3>
219 |         <p>See your models from multiple angles before exporting</p>
220 |     </div>
221 |     
222 |     <div class="feature">
223 |         <h3>STL Export</h3>
224 |         <p>Generate STL files ready for 3D printing</p>
225 |     </div>
226 | </section>
227 | {% endblock %}
228 | """
229 |         
230 |         # Create preview template
231 |         preview_template = """{% extends "base.html" %}
232 | 
233 | {% block title %}Model Preview - {{ model_id }}{% endblock %}
234 | 
235 | {% block content %}
236 | <section class="model-preview">
237 |     <h2>Model Preview: {{ model_id }}</h2>
238 |     
239 |     <div class="preview-container">
240 |         {% for angle, url in previews.items() %}
241 |         <div class="preview-angle">
242 |             <h3>{{ angle|title }} View</h3>
243 |             <img src="{{ url }}" alt="{{ angle }} view of {{ model_id }}" class="preview-image" data-angle="{{ angle }}">
244 |         </div>
245 |         {% endfor %}
246 |     </div>
247 |     
248 |     {% if stl_url %}
249 |     <div class="download-container">
250 |         <a href="{{ stl_url }}" class="download-button" download="{{ model_id }}.stl">Download STL</a>
251 |     </div>
252 |     {% endif %}
253 | </section>
254 | {% endblock %}
255 | """
256 |         
257 |         # Create models template
258 |         models_template = """{% extends "base.html" %}
259 | 
260 | {% block title %}All Models{% endblock %}
261 | 
262 | {% block content %}
263 | <section class="models-list">
264 |     <h2>All Models</h2>
265 |     
266 |     {% if models %}
267 |     <div class="models-grid">
268 |         {% for model in models %}
269 |         <div class="model-card">
270 |             <h3>{{ model.id }}</h3>
271 |             {% if model.preview_url %}
272 |             <img src="{{ model.preview_url }}" alt="Preview of {{ model.id }}" class="model-thumbnail">
273 |             {% else %}
274 |             <div class="no-preview">No preview available</div>
275 |             {% endif %}
276 |             <div class="model-actions">
277 |                 <a href="/ui/preview/{{ model.id }}" class="view-button">View</a>
278 |                 <a href="{{ model.stl_url }}" class="download-button" download="{{ model.id }}.stl">Download</a>
279 |             </div>
280 |         </div>
281 |         {% endfor %}
282 |     </div>
283 |     {% else %}
284 |     <p>No models found.</p>
285 |     {% endif %}
286 | </section>
287 | {% endblock %}
288 | """
289 |         
290 |         # Write templates to files
291 |         os.makedirs(self.templates_dir, exist_ok=True)
292 |         
293 |         with open(os.path.join(self.templates_dir, "base.html"), "w") as f:
294 |             f.write(base_template)
295 |         
296 |         with open(os.path.join(self.templates_dir, "index.html"), "w") as f:
297 |             f.write(index_template)
298 |         
299 |         with open(os.path.join(self.templates_dir, "preview.html"), "w") as f:
300 |             f.write(preview_template)
301 |         
302 |         with open(os.path.join(self.templates_dir, "models.html"), "w") as f:
303 |             f.write(models_template)
304 |     
305 |     def _create_static_files(self):
306 |         """Create static files for the web interface."""
307 |         # Create CSS file
308 |         css = """/* Base styles */
309 | * {
310 |     box-sizing: border-box;
311 |     margin: 0;
312 |     padding: 0;
313 | }
314 | 
315 | body {
316 |     font-family: Arial, sans-serif;
317 |     line-height: 1.6;
318 |     color: #333;
319 |     background-color: #f4f4f4;
320 | }
321 | 
322 | header {
323 |     background-color: #333;
324 |     color: #fff;
325 |     padding: 1rem;
326 | }
327 | 
328 | header h1 {
329 |     margin-bottom: 0.5rem;
330 | }
331 | 
332 | nav ul {
333 |     display: flex;
334 |     list-style: none;
335 | }
336 | 
337 | nav ul li {
338 |     margin-right: 1rem;
339 | }
340 | 
341 | nav ul li a {
342 |     color: #fff;
343 |     text-decoration: none;
344 | }
345 | 
346 | nav ul li a:hover {
347 |     text-decoration: underline;
348 | }
349 | 
350 | main {
351 |     max-width: 1200px;
352 |     margin: 0 auto;
353 |     padding: 2rem;
354 | }
355 | 
356 | footer {
357 |     background-color: #333;
358 |     color: #fff;
359 |     text-align: center;
360 |     padding: 1rem;
361 |     margin-top: 2rem;
362 | }
363 | 
364 | /* Home page */
365 | .hero {
366 |     text-align: center;
367 |     margin-bottom: 2rem;
368 | }
369 | 
370 | .hero h2 {
371 |     font-size: 2rem;
372 |     margin-bottom: 1rem;
373 | }
374 | 
375 | .features {
376 |     display: flex;
377 |     justify-content: space-between;
378 |     flex-wrap: wrap;
379 | }
380 | 
381 | .feature {
382 |     flex: 1;
383 |     background-color: #fff;
384 |     padding: 1.5rem;
385 |     margin: 0.5rem;
386 |     border-radius: 5px;
387 |     box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1);
388 | }
389 | 
390 | .feature h3 {
391 |     margin-bottom: 1rem;
392 | }
393 | 
394 | /* Preview page */
395 | .model-preview h2 {
396 |     margin-bottom: 1.5rem;
397 | }
398 | 
399 | .preview-container {
400 |     display: flex;
401 |     flex-wrap: wrap;
402 |     gap: 1rem;
403 |     margin-bottom: 1.5rem;
404 | }
405 | 
406 | .preview-angle {
407 |     flex: 1;
408 |     min-width: 300px;
409 |     background-color: #fff;
410 |     padding: 1rem;
411 |     border-radius: 5px;
412 |     box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1);
413 | }
414 | 
415 | .preview-angle h3 {
416 |     margin-bottom: 0.5rem;
417 | }
418 | 
419 | .preview-image {
420 |     width: 100%;
421 |     height: auto;
422 |     border: 1px solid #ddd;
423 | }
424 | 
425 | .download-container {
426 |     text-align: center;
427 |     margin-top: 1rem;
428 | }
429 | 
430 | .download-button {
431 |     display: inline-block;
432 |     background-color: #4CAF50;
433 |     color: white;
434 |     padding: 0.5rem 1rem;
435 |     text-decoration: none;
436 |     border-radius: 4px;
437 | }
438 | 
439 | .download-button:hover {
440 |     background-color: #45a049;
441 | }
442 | 
443 | /* Models page */
444 | .models-grid {
445 |     display: grid;
446 |     grid-template-columns: repeat(auto-fill, minmax(250px, 1fr));
447 |     gap: 1.5rem;
448 | }
449 | 
450 | .model-card {
451 |     background-color: #fff;
452 |     border-radius: 5px;
453 |     box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1);
454 |     overflow: hidden;
455 | }
456 | 
457 | .model-card h3 {
458 |     padding: 1rem;
459 |     background-color: #f8f8f8;
460 |     border-bottom: 1px solid #eee;
461 | }
462 | 
463 | .model-thumbnail {
464 |     width: 100%;
465 |     height: 200px;
466 |     object-fit: contain;
467 |     background-color: #f4f4f4;
468 | }
469 | 
470 | .no-preview {
471 |     width: 100%;
472 |     height: 200px;
473 |     display: flex;
474 |     align-items: center;
475 |     justify-content: center;
476 |     background-color: #f4f4f4;
477 |     color: #999;
478 | }
479 | 
480 | .model-actions {
481 |     display: flex;
482 |     padding: 1rem;
483 | }
484 | 
485 | .view-button, .download-button {
486 |     flex: 1;
487 |     text-align: center;
488 |     padding: 0.5rem;
489 |     text-decoration: none;
490 |     border-radius: 4px;
491 |     margin: 0 0.25rem;
492 | }
493 | 
494 | .view-button {
495 |     background-color: #2196F3;
496 |     color: white;
497 | }
498 | 
499 | .view-button:hover {
500 |     background-color: #0b7dda;
501 | }
502 | """
503 |         
504 |         # Create JavaScript file
505 |         js = """// JavaScript for OpenSCAD MCP Server web interface
506 | 
507 | document.addEventListener('DOMContentLoaded', function() {
508 |     // Handle image loading errors
509 |     const images = document.querySelectorAll('img');
510 |     images.forEach(img => {
511 |         img.onerror = function() {
512 |             this.src = '/static/placeholder.png';
513 |         };
514 |     });
515 |     
516 |     // Handle STL download
517 |     const downloadButtons = document.querySelectorAll('.download-button');
518 |     downloadButtons.forEach(button => {
519 |         button.addEventListener('click', async function(e) {
520 |             e.preventDefault();
521 |             
522 |             const url = this.getAttribute('href');
523 |             const filename = this.hasAttribute('download') ? this.getAttribute('download') : 'model.stl';
524 |             
525 |             try {
526 |                 const response = await fetch(url);
527 |                 const data = await response.json();
528 |                 
529 |                 // Decode base64 content
530 |                 const content = atob(data.content);
531 |                 
532 |                 // Convert to Blob
533 |                 const bytes = new Uint8Array(content.length);
534 |                 for (let i = 0; i < content.length; i++) {
535 |                     bytes[i] = content.charCodeAt(i);
536 |                 }
537 |                 const blob = new Blob([bytes], { type: data.content_type });
538 |                 
539 |                 // Create download link
540 |                 const downloadLink = document.createElement('a');
541 |                 downloadLink.href = URL.createObjectURL(blob);
542 |                 downloadLink.download = data.filename || filename;
543 |                 
544 |                 // Trigger download
545 |                 document.body.appendChild(downloadLink);
546 |                 downloadLink.click();
547 |                 document.body.removeChild(downloadLink);
548 |             } catch (error) {
549 |                 console.error('Error downloading file:', error);
550 |                 alert('Error downloading file. Please try again.');
551 |             }
552 |         });
553 |     });
554 |     
555 |     // Handle preview images
556 |     const previewImages = document.querySelectorAll('.preview-image');
557 |     previewImages.forEach(img => {
558 |         img.addEventListener('click', function() {
559 |             const url = this.getAttribute('src');
560 |             const angle = this.getAttribute('data-angle');
561 |             
562 |             // Create modal for larger view
563 |             const modal = document.createElement('div');
564 |             modal.className = 'preview-modal';
565 |             modal.innerHTML = `
566 |                 <div class="modal-content">
567 |                     <span class="close-button">&times;</span>
568 |                     <h3>${angle ? angle.charAt(0).toUpperCase() + angle.slice(1) + ' View' : 'Preview'}</h3>
569 |                     <img src="${url}" alt="Preview">
570 |                 </div>
571 |             `;
572 |             
573 |             // Add modal styles
574 |             modal.style.position = 'fixed';
575 |             modal.style.top = '0';
576 |             modal.style.left = '0';
577 |             modal.style.width = '100%';
578 |             modal.style.height = '100%';
579 |             modal.style.backgroundColor = 'rgba(0,0,0,0.7)';
580 |             modal.style.display = 'flex';
581 |             modal.style.alignItems = 'center';
582 |             modal.style.justifyContent = 'center';
583 |             modal.style.zIndex = '1000';
584 |             
585 |             const modalContent = modal.querySelector('.modal-content');
586 |             modalContent.style.backgroundColor = '#fff';
587 |             modalContent.style.padding = '20px';
588 |             modalContent.style.borderRadius = '5px';
589 |             modalContent.style.maxWidth = '90%';
590 |             modalContent.style.maxHeight = '90%';
591 |             modalContent.style.overflow = 'auto';
592 |             
593 |             const closeButton = modal.querySelector('.close-button');
594 |             closeButton.style.float = 'right';
595 |             closeButton.style.fontSize = '1.5rem';
596 |             closeButton.style.fontWeight = 'bold';
597 |             closeButton.style.cursor = 'pointer';
598 |             
599 |             const modalImg = modal.querySelector('img');
600 |             modalImg.style.maxWidth = '100%';
601 |             modalImg.style.maxHeight = '70vh';
602 |             modalImg.style.display = 'block';
603 |             modalImg.style.margin = '0 auto';
604 |             
605 |             // Add modal to body
606 |             document.body.appendChild(modal);
607 |             
608 |             // Close modal when clicking close button or outside the modal
609 |             closeButton.addEventListener('click', function() {
610 |                 document.body.removeChild(modal);
611 |             });
612 |             
613 |             modal.addEventListener('click', function(e) {
614 |                 if (e.target === modal) {
615 |                     document.body.removeChild(modal);
616 |                 }
617 |             });
618 |         });
619 |     });
620 | });
621 | """
622 |         
623 |         # Create placeholder image
624 |         placeholder_svg = """<svg xmlns="http://www.w3.org/2000/svg" width="800" height="600" viewBox="0 0 800 600">
625 |     <rect width="800" height="600" fill="#f0f0f0"/>
626 |     <text x="400" y="300" font-family="Arial" font-size="24" text-anchor="middle" fill="#999">Preview not available</text>
627 | </svg>"""
628 |         
629 |         # Write static files
630 |         os.makedirs(self.static_dir, exist_ok=True)
631 |         
632 |         with open(os.path.join(self.static_dir, "styles.css"), "w") as f:
633 |             f.write(css)
634 |         
635 |         with open(os.path.join(self.static_dir, "script.js"), "w") as f:
636 |             f.write(js)
637 |         
638 |         with open(os.path.join(self.static_dir, "placeholder.svg"), "w") as f:
639 |             f.write(placeholder_svg)
640 | 
```

--------------------------------------------------------------------------------
/src/remote/cuda_mvs_server.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Server for remote CUDA Multi-View Stereo processing.
  3 | 
  4 | This module provides a server implementation that can be deployed on a machine
  5 | with CUDA capabilities to process multi-view images into 3D models remotely.
  6 | """
  7 | 
  8 | import os
  9 | import sys
 10 | import json
 11 | import uuid
 12 | import logging
 13 | import argparse
 14 | import tempfile
 15 | import subprocess
 16 | from typing import Dict, List, Optional, Any, Union
 17 | from pathlib import Path
 18 | import shutil
 19 | import time
 20 | 
 21 | from fastapi import FastAPI, File, UploadFile, Form, HTTPException, BackgroundTasks, Depends
 22 | from fastapi.responses import FileResponse, JSONResponse
 23 | from fastapi.middleware.cors import CORSMiddleware
 24 | from fastapi.security import APIKeyHeader
 25 | import uvicorn
 26 | from pydantic import BaseModel, Field
 27 | from zeroconf import ServiceInfo, Zeroconf
 28 | 
 29 | # Configure logging
 30 | logging.basicConfig(level=logging.INFO)
 31 | logger = logging.getLogger(__name__)
 32 | 
 33 | # Constants
 34 | DEFAULT_PORT = 8765
 35 | DEFAULT_HOST = "0.0.0.0"
 36 | DEFAULT_CUDA_MVS_PATH = "/opt/cuda-multi-view-stereo"
 37 | DEFAULT_OUTPUT_DIR = "output"
 38 | DEFAULT_MAX_JOBS = 5
 39 | DEFAULT_MAX_IMAGES_PER_JOB = 50
 40 | DEFAULT_JOB_TIMEOUT = 3600  # 1 hour
 41 | 
 42 | # Models
 43 | class JobStatus(BaseModel):
 44 |     """Job status model."""
 45 |     job_id: str
 46 |     status: str = "created"  # created, uploading, processing, completed, failed
 47 |     created_at: float = Field(default_factory=time.time)
 48 |     updated_at: float = Field(default_factory=time.time)
 49 |     num_images: int = 0
 50 |     images_uploaded: int = 0
 51 |     progress: float = 0.0
 52 |     error_message: Optional[str] = None
 53 |     model_id: Optional[str] = None
 54 |     output_dir: Optional[str] = None
 55 |     point_cloud_file: Optional[str] = None
 56 |     obj_file: Optional[str] = None
 57 |     processing_time: Optional[float] = None
 58 | 
 59 | class ServerConfig(BaseModel):
 60 |     """Server configuration model."""
 61 |     cuda_mvs_path: str = DEFAULT_CUDA_MVS_PATH
 62 |     output_dir: str = DEFAULT_OUTPUT_DIR
 63 |     max_jobs: int = DEFAULT_MAX_JOBS
 64 |     max_images_per_job: int = DEFAULT_MAX_IMAGES_PER_JOB
 65 |     job_timeout: int = DEFAULT_JOB_TIMEOUT
 66 |     api_key: Optional[str] = None
 67 |     server_name: str = "CUDA MVS Server"
 68 |     advertise_service: bool = True
 69 |     gpu_info: Optional[str] = None
 70 | 
 71 | class JobRequest(BaseModel):
 72 |     """Job creation request model."""
 73 |     num_images: int
 74 | 
 75 | class ProcessRequest(BaseModel):
 76 |     """Process job request model."""
 77 |     reconstruction_quality: str = "normal"  # low, normal, high
 78 |     output_formats: List[str] = ["obj", "ply"]
 79 | 
 80 | class ServerInfo(BaseModel):
 81 |     """Server information model."""
 82 |     server_id: str
 83 |     name: str
 84 |     version: str = "1.0.0"
 85 |     status: str = "running"
 86 |     capabilities: Dict[str, Any]
 87 |     jobs: Dict[str, Any]
 88 |     uptime: float
 89 | 
 90 | # Server implementation
 91 | class CUDAMVSServer:
 92 |     """
 93 |     Server for remote CUDA Multi-View Stereo processing.
 94 |     
 95 |     This server:
 96 |     1. Accepts image uploads
 97 |     2. Processes images using CUDA MVS
 98 |     3. Provides 3D model downloads
 99 |     4. Advertises itself on the local network
100 |     """
101 |     
102 |     def __init__(self, config: ServerConfig):
103 |         """
104 |         Initialize the CUDA MVS server.
105 |         
106 |         Args:
107 |             config: Server configuration
108 |         """
109 |         self.config = config
110 |         self.app = FastAPI(title="CUDA MVS Server", description="Remote CUDA Multi-View Stereo processing server")
111 |         self.jobs: Dict[str, JobStatus] = {}
112 |         self.server_id = str(uuid.uuid4())
113 |         self.start_time = time.time()
114 |         self.zeroconf = None
115 |         
116 |         # Create output directory
117 |         os.makedirs(config.output_dir, exist_ok=True)
118 |         
119 |         # Configure CORS
120 |         self.app.add_middleware(
121 |             CORSMiddleware,
122 |             allow_origins=["*"],
123 |             allow_credentials=True,
124 |             allow_methods=["*"],
125 |             allow_headers=["*"],
126 |         )
127 |         
128 |         # Detect GPU info
129 |         self.detect_gpu_info()
130 |         
131 |         # Register routes
132 |         self.register_routes()
133 |         
134 |         # Advertise service if enabled
135 |         if config.advertise_service:
136 |             self.advertise_service()
137 |     
138 |     def detect_gpu_info(self):
139 |         """Detect GPU information."""
140 |         if not self.config.gpu_info:
141 |             try:
142 |                 # Try to get GPU info using nvidia-smi
143 |                 result = subprocess.run(
144 |                     ["nvidia-smi", "--query-gpu=name,memory.total", "--format=csv,noheader"],
145 |                     capture_output=True,
146 |                     text=True,
147 |                     check=True
148 |                 )
149 |                 self.config.gpu_info = result.stdout.strip()
150 |             except (subprocess.SubprocessError, FileNotFoundError):
151 |                 # If nvidia-smi fails, try lspci
152 |                 try:
153 |                     result = subprocess.run(
154 |                         ["lspci", "-v", "|", "grep", "-i", "vga"],
155 |                         capture_output=True,
156 |                         text=True,
157 |                         shell=True
158 |                     )
159 |                     self.config.gpu_info = result.stdout.strip()
160 |                 except subprocess.SubprocessError:
161 |                     self.config.gpu_info = "Unknown GPU"
162 |     
163 |     def register_routes(self):
164 |         """Register API routes."""
165 |         
166 |         # Authentication dependency
167 |         api_key_header = APIKeyHeader(name="Authorization", auto_error=False)
168 |         
169 |         async def verify_api_key(api_key: str = Depends(api_key_header)):
170 |             if self.config.api_key:
171 |                 if not api_key:
172 |                     raise HTTPException(status_code=401, detail="API key required")
173 |                 
174 |                 # Check if the API key is in the format "Bearer <key>"
175 |                 if api_key.startswith("Bearer "):
176 |                     api_key = api_key[7:]
177 |                 
178 |                 if api_key != self.config.api_key:
179 |                     raise HTTPException(status_code=401, detail="Invalid API key")
180 |             return True
181 |         
182 |         # Status endpoint
183 |         @self.app.get("/api/status")
184 |         async def get_status():
185 |             return self.get_server_info()
186 |         
187 |         # Job management endpoints
188 |         @self.app.post("/api/jobs", status_code=201, dependencies=[Depends(verify_api_key)])
189 |         async def create_job(job_request: JobRequest):
190 |             return self.create_job(job_request.num_images)
191 |         
192 |         @self.app.get("/api/jobs", dependencies=[Depends(verify_api_key)])
193 |         async def list_jobs():
194 |             return {"jobs": self.jobs}
195 |         
196 |         @self.app.get("/api/jobs/{job_id}", dependencies=[Depends(verify_api_key)])
197 |         async def get_job(job_id: str):
198 |             if job_id not in self.jobs:
199 |                 raise HTTPException(status_code=404, detail="Job not found")
200 |             return self.jobs[job_id]
201 |         
202 |         @self.app.delete("/api/jobs/{job_id}", dependencies=[Depends(verify_api_key)])
203 |         async def cancel_job(job_id: str):
204 |             if job_id not in self.jobs:
205 |                 raise HTTPException(status_code=404, detail="Job not found")
206 |             
207 |             # Cancel the job
208 |             job = self.jobs[job_id]
209 |             if job.status in ["created", "uploading", "processing"]:
210 |                 job.status = "cancelled"
211 |                 job.updated_at = time.time()
212 |                 
213 |                 # Clean up job directory
214 |                 job_dir = os.path.join(self.config.output_dir, job_id)
215 |                 if os.path.exists(job_dir):
216 |                     shutil.rmtree(job_dir)
217 |                 
218 |                 return {"status": "success", "message": "Job cancelled"}
219 |             else:
220 |                 return {"status": "error", "message": f"Cannot cancel job in {job.status} state"}
221 |         
222 |         # Image upload endpoint
223 |         @self.app.post("/api/jobs/{job_id}/images", dependencies=[Depends(verify_api_key)])
224 |         async def upload_image(
225 |             job_id: str,
226 |             file: UploadFile = File(...),
227 |             metadata: str = Form(None)
228 |         ):
229 |             if job_id not in self.jobs:
230 |                 raise HTTPException(status_code=404, detail="Job not found")
231 |             
232 |             job = self.jobs[job_id]
233 |             
234 |             # Check if job is in a valid state for uploads
235 |             if job.status not in ["created", "uploading"]:
236 |                 raise HTTPException(
237 |                     status_code=400,
238 |                     detail=f"Cannot upload images to job in {job.status} state"
239 |                 )
240 |             
241 |             # Check if we've reached the maximum number of images
242 |             if job.images_uploaded >= job.num_images:
243 |                 raise HTTPException(
244 |                     status_code=400,
245 |                     detail=f"Maximum number of images ({job.num_images}) already uploaded"
246 |                 )
247 |             
248 |             # Update job status
249 |             job.status = "uploading"
250 |             job.updated_at = time.time()
251 |             
252 |             # Create job directory if it doesn't exist
253 |             job_dir = os.path.join(self.config.output_dir, job_id, "images")
254 |             os.makedirs(job_dir, exist_ok=True)
255 |             
256 |             # Parse metadata
257 |             image_metadata = {}
258 |             if metadata:
259 |                 try:
260 |                     image_metadata = json.loads(metadata)
261 |                 except json.JSONDecodeError:
262 |                     logger.warning(f"Invalid metadata format: {metadata}")
263 |             
264 |             # Save the file
265 |             image_index = job.images_uploaded
266 |             file_extension = os.path.splitext(file.filename)[1]
267 |             image_path = os.path.join(job_dir, f"image_{image_index:04d}{file_extension}")
268 |             
269 |             with open(image_path, "wb") as f:
270 |                 shutil.copyfileobj(file.file, f)
271 |             
272 |             # Update job status
273 |             job.images_uploaded += 1
274 |             job.updated_at = time.time()
275 |             
276 |             return {
277 |                 "status": "success",
278 |                 "job_id": job_id,
279 |                 "image_index": image_index,
280 |                 "image_path": image_path,
281 |                 "images_uploaded": job.images_uploaded,
282 |                 "total_images": job.num_images
283 |             }
284 |         
285 |         # Process job endpoint
286 |         @self.app.post("/api/jobs/{job_id}/process", status_code=202, dependencies=[Depends(verify_api_key)])
287 |         async def process_job(
288 |             job_id: str,
289 |             process_request: ProcessRequest,
290 |             background_tasks: BackgroundTasks
291 |         ):
292 |             if job_id not in self.jobs:
293 |                 raise HTTPException(status_code=404, detail="Job not found")
294 |             
295 |             job = self.jobs[job_id]
296 |             
297 |             # Check if job is in a valid state for processing
298 |             if job.status != "uploading":
299 |                 raise HTTPException(
300 |                     status_code=400,
301 |                     detail=f"Cannot process job in {job.status} state"
302 |                 )
303 |             
304 |             # Check if all images have been uploaded
305 |             if job.images_uploaded < job.num_images:
306 |                 raise HTTPException(
307 |                     status_code=400,
308 |                     detail=f"Not all images uploaded ({job.images_uploaded}/{job.num_images})"
309 |                 )
310 |             
311 |             # Update job status
312 |             job.status = "processing"
313 |             job.updated_at = time.time()
314 |             job.progress = 0.0
315 |             
316 |             # Start processing in the background
317 |             background_tasks.add_task(
318 |                 self.process_job_task,
319 |                 job_id,
320 |                 process_request.reconstruction_quality,
321 |                 process_request.output_formats
322 |             )
323 |             
324 |             return {
325 |                 "status": "success",
326 |                 "job_id": job_id,
327 |                 "message": "Processing started"
328 |             }
329 |         
330 |         # Model download endpoint
331 |         @self.app.get("/api/jobs/{job_id}/model", dependencies=[Depends(verify_api_key)])
332 |         async def download_model(job_id: str, format: str = "obj"):
333 |             if job_id not in self.jobs:
334 |                 raise HTTPException(status_code=404, detail="Job not found")
335 |             
336 |             job = self.jobs[job_id]
337 |             
338 |             # Check if job is completed
339 |             if job.status != "completed":
340 |                 raise HTTPException(
341 |                     status_code=400,
342 |                     detail=f"Job is not completed. Current status: {job.status}"
343 |                 )
344 |             
345 |             # Check if the requested format is available
346 |             if format == "obj" and job.obj_file:
347 |                 return FileResponse(job.obj_file, filename=f"{job.model_id}.obj")
348 |             elif format == "ply" and job.point_cloud_file:
349 |                 return FileResponse(job.point_cloud_file, filename=f"{job.model_id}.ply")
350 |             else:
351 |                 raise HTTPException(
352 |                     status_code=404,
353 |                     detail=f"Model in {format} format not available"
354 |                 )
355 |     
356 |     def create_job(self, num_images: int) -> Dict[str, Any]:
357 |         """
358 |         Create a new job.
359 |         
360 |         Args:
361 |             num_images: Number of images to be uploaded
362 |             
363 |         Returns:
364 |             Dictionary with job information
365 |         """
366 |         # Check if we've reached the maximum number of jobs
367 |         active_jobs = sum(1 for job in self.jobs.values() if job.status in ["created", "uploading", "processing"])
368 |         if active_jobs >= self.config.max_jobs:
369 |             raise HTTPException(
370 |                 status_code=429,
371 |                 detail=f"Maximum number of active jobs ({self.config.max_jobs}) reached"
372 |             )
373 |         
374 |         # Check if the number of images is valid
375 |         if num_images <= 0 or num_images > self.config.max_images_per_job:
376 |             raise HTTPException(
377 |                 status_code=400,
378 |                 detail=f"Number of images must be between 1 and {self.config.max_images_per_job}"
379 |             )
380 |         
381 |         # Create a new job
382 |         job_id = str(uuid.uuid4())
383 |         job = JobStatus(
384 |             job_id=job_id,
385 |             num_images=num_images
386 |         )
387 |         
388 |         # Add job to the list
389 |         self.jobs[job_id] = job
390 |         
391 |         # Create job directory
392 |         job_dir = os.path.join(self.config.output_dir, job_id)
393 |         os.makedirs(job_dir, exist_ok=True)
394 |         
395 |         return job.dict()
396 |     
397 |     async def process_job_task(
398 |         self,
399 |         job_id: str,
400 |         reconstruction_quality: str,
401 |         output_formats: List[str]
402 |     ):
403 |         """
404 |         Process a job in the background.
405 |         
406 |         Args:
407 |             job_id: ID of the job to process
408 |             reconstruction_quality: Quality of the reconstruction (low, normal, high)
409 |             output_formats: List of output formats to generate
410 |         """
411 |         if job_id not in self.jobs:
412 |             logger.error(f"Job {job_id} not found")
413 |             return
414 |         
415 |         job = self.jobs[job_id]
416 |         job_dir = os.path.join(self.config.output_dir, job_id)
417 |         images_dir = os.path.join(job_dir, "images")
418 |         output_dir = os.path.join(job_dir, "output")
419 |         
420 |         # Create output directory
421 |         os.makedirs(output_dir, exist_ok=True)
422 |         
423 |         # Generate a model ID
424 |         model_id = f"model_{job_id[:8]}"
425 |         job.model_id = model_id
426 |         
427 |         # Update job status
428 |         job.status = "processing"
429 |         job.progress = 0.0
430 |         job.updated_at = time.time()
431 |         
432 |         try:
433 |             # Start timing
434 |             start_time = time.time()
435 |             
436 |             # Run CUDA MVS
437 |             await self.run_cuda_mvs(
438 |                 job,
439 |                 images_dir,
440 |                 output_dir,
441 |                 model_id,
442 |                 reconstruction_quality
443 |             )
444 |             
445 |             # Convert output formats if needed
446 |             if "obj" in output_formats and not job.obj_file:
447 |                 # Convert PLY to OBJ if needed
448 |                 ply_file = job.point_cloud_file
449 |                 if ply_file and os.path.exists(ply_file):
450 |                     obj_file = os.path.join(output_dir, f"{model_id}.obj")
451 |                     await self.convert_ply_to_obj(ply_file, obj_file)
452 |                     job.obj_file = obj_file
453 |             
454 |             # Calculate processing time
455 |             job.processing_time = time.time() - start_time
456 |             
457 |             # Update job status
458 |             job.status = "completed"
459 |             job.progress = 100.0
460 |             job.updated_at = time.time()
461 |             job.output_dir = output_dir
462 |             
463 |             logger.info(f"Job {job_id} completed successfully in {job.processing_time:.2f} seconds")
464 |             
465 |         except Exception as e:
466 |             # Update job status
467 |             job.status = "failed"
468 |             job.error_message = str(e)
469 |             job.updated_at = time.time()
470 |             
471 |             logger.error(f"Job {job_id} failed: {e}")
472 |     
473 |     async def run_cuda_mvs(
474 |         self,
475 |         job: JobStatus,
476 |         images_dir: str,
477 |         output_dir: str,
478 |         model_id: str,
479 |         reconstruction_quality: str
480 |     ):
481 |         """
482 |         Run CUDA MVS on the uploaded images.
483 |         
484 |         Args:
485 |             job: Job status object
486 |             images_dir: Directory containing the images
487 |             output_dir: Directory to save the output
488 |             model_id: ID of the model
489 |             reconstruction_quality: Quality of the reconstruction
490 |         """
491 |         # Check if CUDA MVS is installed
492 |         cuda_mvs_executable = os.path.join(self.config.cuda_mvs_path, "build", "app_patch_match_mvs")
493 |         if not os.path.exists(cuda_mvs_executable):
494 |             raise FileNotFoundError(f"CUDA MVS executable not found at {cuda_mvs_executable}")
495 |         
496 |         # Create a list of image paths
497 |         image_files = []
498 |         for file in os.listdir(images_dir):
499 |             if file.lower().endswith((".jpg", ".jpeg", ".png")):
500 |                 image_files.append(os.path.join(images_dir, file))
501 |         
502 |         if not image_files:
503 |             raise ValueError("No valid image files found")
504 |         
505 |         # Sort image files to ensure consistent order
506 |         image_files.sort()
507 |         
508 |         # Create a camera parameter file
509 |         camera_params_file = os.path.join(output_dir, "cameras.txt")
510 |         await self.generate_camera_params(image_files, camera_params_file)
511 |         
512 |         # Set quality parameters
513 |         if reconstruction_quality == "low":
514 |             num_iterations = 3
515 |             max_resolution = 1024
516 |         elif reconstruction_quality == "normal":
517 |             num_iterations = 5
518 |             max_resolution = 2048
519 |         elif reconstruction_quality == "high":
520 |             num_iterations = 7
521 |             max_resolution = 4096
522 |         else:
523 |             num_iterations = 5
524 |             max_resolution = 2048
525 |         
526 |         # Prepare output files
527 |         point_cloud_file = os.path.join(output_dir, f"{model_id}.ply")
528 |         
529 |         # Build the command
530 |         cmd = [
531 |             cuda_mvs_executable,
532 |             "--input_folder", images_dir,
533 |             "--camera_file", camera_params_file,
534 |             "--output_folder", output_dir,
535 |             "--output_file", point_cloud_file,
536 |             "--num_iterations", str(num_iterations),
537 |             "--max_resolution", str(max_resolution)
538 |         ]
539 |         
540 |         # Run the command
541 |         logger.info(f"Running CUDA MVS: {' '.join(cmd)}")
542 |         
543 |         process = subprocess.Popen(
544 |             cmd,
545 |             stdout=subprocess.PIPE,
546 |             stderr=subprocess.PIPE,
547 |             text=True
548 |         )
549 |         
550 |         # Monitor progress
551 |         while True:
552 |             if process.poll() is not None:
553 |                 break
554 |             
555 |             # Read output line by line
556 |             output = process.stdout.readline()
557 |             if output:
558 |                 # Try to parse progress information
559 |                 if "Progress:" in output:
560 |                     try:
561 |                         progress_str = output.split("Progress:")[1].strip().rstrip("%")
562 |                         progress = float(progress_str)
563 |                         job.progress = progress
564 |                         job.updated_at = time.time()
565 |                     except (ValueError, IndexError):
566 |                         pass
567 |             
568 |             # Sleep briefly to avoid CPU spinning
569 |             await asyncio.sleep(0.1)
570 |         
571 |         # Get the final output
572 |         stdout, stderr = process.communicate()
573 |         
574 |         # Check if the process was successful
575 |         if process.returncode != 0:
576 |             raise RuntimeError(f"CUDA MVS failed with error: {stderr}")
577 |         
578 |         # Check if the output file was created
579 |         if not os.path.exists(point_cloud_file):
580 |             raise FileNotFoundError(f"Output file not created: {point_cloud_file}")
581 |         
582 |         # Update job with the output file
583 |         job.point_cloud_file = point_cloud_file
584 |     
585 |     async def generate_camera_params(self, image_files: List[str], output_file: str):
586 |         """
587 |         Generate camera parameters for CUDA MVS.
588 |         
589 |         Args:
590 |             image_files: List of image files
591 |             output_file: Output file for camera parameters
592 |         """
593 |         # For now, use a simple camera model with default parameters
594 |         # In a real implementation, this would use structure from motion
595 |         # to estimate camera parameters from the images
596 |         
597 |         with open(output_file, "w") as f:
598 |             f.write(f"# Camera parameters for {len(image_files)} images\n")
599 |             f.write("# Format: image_name width height fx fy cx cy\n")
600 |             
601 |             for i, image_file in enumerate(image_files):
602 |                 # Get image dimensions
603 |                 from PIL import Image
604 |                 with Image.open(image_file) as img:
605 |                     width, height = img.size
606 |                 
607 |                 # Use default camera parameters
608 |                 fx = width * 1.2  # Focal length x
609 |                 fy = height * 1.2  # Focal length y
610 |                 cx = width / 2  # Principal point x
611 |                 cy = height / 2  # Principal point y
612 |                 
613 |                 # Write camera parameters
614 |                 f.write(f"{os.path.basename(image_file)} {width} {height} {fx} {fy} {cx} {cy}\n")
615 |     
616 |     async def convert_ply_to_obj(self, ply_file: str, obj_file: str):
617 |         """
618 |         Convert PLY file to OBJ format.
619 |         
620 |         Args:
621 |             ply_file: Input PLY file
622 |             obj_file: Output OBJ file
623 |         """
624 |         try:
625 |             import open3d as o3d
626 |             
627 |             # Load the PLY file
628 |             mesh = o3d.io.read_triangle_mesh(ply_file)
629 |             
630 |             # Save as OBJ
631 |             o3d.io.write_triangle_mesh(obj_file, mesh)
632 |             
633 |             logger.info(f"Converted {ply_file} to {obj_file}")
634 |             
635 |         except ImportError:
636 |             # If open3d is not available, use a subprocess
637 |             try:
638 |                 # Try using meshlab
639 |                 subprocess.run(
640 |                     ["meshlabserver", "-i", ply_file, "-o", obj_file],
641 |                     check=True,
642 |                     capture_output=True
643 |                 )
644 |             except (subprocess.SubprocessError, FileNotFoundError):
645 |                 # If meshlab is not available, try using assimp
646 |                 try:
647 |                     subprocess.run(
648 |                         ["assimp", "export", ply_file, obj_file],
649 |                         check=True,
650 |                         capture_output=True
651 |                     )
652 |                 except (subprocess.SubprocessError, FileNotFoundError):
653 |                     raise RuntimeError("No suitable tool found to convert PLY to OBJ")
654 |     
655 |     def get_server_info(self) -> Dict[str, Any]:
656 |         """
657 |         Get server information.
658 |         
659 |         Returns:
660 |             Dictionary with server information
661 |         """
662 |         # Count active jobs
663 |         active_jobs = sum(1 for job in self.jobs.values() if job.status in ["created", "uploading", "processing"])
664 |         
665 |         # Get server capabilities
666 |         capabilities = {
667 |             "max_jobs": self.config.max_jobs,
668 |             "max_images_per_job": self.config.max_images_per_job,
669 |             "job_timeout": self.config.job_timeout,
670 |             "supported_formats": ["obj", "ply"],
671 |             "gpu_info": self.config.gpu_info
672 |         }
673 |         
674 |         # Get job summary
675 |         job_summary = {
676 |             "total": len(self.jobs),
677 |             "active": active_jobs,
678 |             "completed": sum(1 for job in self.jobs.values() if job.status == "completed"),
679 |             "failed": sum(1 for job in self.jobs.values() if job.status == "failed")
680 |         }
681 |         
682 |         return {
683 |             "server_id": self.server_id,
684 |             "name": self.config.server_name,
685 |             "version": "1.0.0",
686 |             "status": "running",
687 |             "capabilities": capabilities,
688 |             "jobs": job_summary,
689 |             "uptime": time.time() - self.start_time
690 |         }
691 |     
692 |     def advertise_service(self):
693 |         """Advertise the server on the local network using Zeroconf."""
694 |         try:
695 |             import socket
696 |             from zeroconf import ServiceInfo, Zeroconf
697 |             
698 |             # Get local IP address
699 |             s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
700 |             s.connect(("8.8.8.8", 80))
701 |             local_ip = s.getsockname()[0]
702 |             s.close()
703 |             
704 |             # Create service info
705 |             capabilities = {
706 |                 "max_jobs": self.config.max_jobs,
707 |                 "max_images_per_job": self.config.max_images_per_job,
708 |                 "supported_formats": ["obj", "ply"],
709 |                 "gpu_info": self.config.gpu_info
710 |             }
711 |             
712 |             service_info = ServiceInfo(
713 |                 "_cudamvs._tcp.local.",
714 |                 f"{self.server_id}._cudamvs._tcp.local.",
715 |                 addresses=[socket.inet_aton(local_ip)],
716 |                 port=DEFAULT_PORT,
717 |                 properties={
718 |                     b"name": self.config.server_name.encode("utf-8"),
719 |                     b"capabilities": json.dumps(capabilities).encode("utf-8")
720 |                 }
721 |             )
722 |             
723 |             # Register service
724 |             self.zeroconf = Zeroconf()
725 |             self.zeroconf.register_service(service_info)
726 |             
727 |             logger.info(f"Advertising CUDA MVS service on {local_ip}:{DEFAULT_PORT}")
728 |             
729 |         except Exception as e:
730 |             logger.error(f"Failed to advertise service: {e}")
731 |     
732 |     def run(self, host: str = DEFAULT_HOST, port: int = DEFAULT_PORT):
733 |         """
734 |         Run the server.
735 |         
736 |         Args:
737 |             host: Host to bind to
738 |             port: Port to bind to
739 |         """
740 |         uvicorn.run(self.app, host=host, port=port)
741 |     
742 |     def cleanup(self):
743 |         """Clean up resources."""
744 |         if self.zeroconf:
745 |             self.zeroconf.close()
746 | 
747 | # Main entry point
748 | def main():
749 |     """Main entry point."""
750 |     parser = argparse.ArgumentParser(description="CUDA MVS Server")
751 |     parser.add_argument("--host", default=DEFAULT_HOST, help="Host to bind to")
752 |     parser.add_argument("--port", type=int, default=DEFAULT_PORT, help="Port to bind to")
753 |     parser.add_argument("--cuda-mvs-path", default=DEFAULT_CUDA_MVS_PATH, help="Path to CUDA MVS installation")
754 |     parser.add_argument("--output-dir", default=DEFAULT_OUTPUT_DIR, help="Output directory")
755 |     parser.add_argument("--max-jobs", type=int, default=DEFAULT_MAX_JOBS, help="Maximum number of concurrent jobs")
756 |     parser.add_argument("--max-images", type=int, default=DEFAULT_MAX_IMAGES_PER_JOB, help="Maximum images per job")
757 |     parser.add_argument("--job-timeout", type=int, default=DEFAULT_JOB_TIMEOUT, help="Job timeout in seconds")
758 |     parser.add_argument("--api-key", help="API key for authentication")
759 |     parser.add_argument("--server-name", default="CUDA MVS Server", help="Server name")
760 |     parser.add_argument("--no-advertise", action="store_true", help="Don't advertise service on the network")
761 |     
762 |     args = parser.parse_args()
763 |     
764 |     # Create server configuration
765 |     config = ServerConfig(
766 |         cuda_mvs_path=args.cuda_mvs_path,
767 |         output_dir=args.output_dir,
768 |         max_jobs=args.max_jobs,
769 |         max_images_per_job=args.max_images,
770 |         job_timeout=args.job_timeout,
771 |         api_key=args.api_key,
772 |         server_name=args.server_name,
773 |         advertise_service=not args.no_advertise
774 |     )
775 |     
776 |     # Create and run server
777 |     server = CUDAMVSServer(config)
778 |     
779 |     try:
780 |         server.run(host=args.host, port=args.port)
781 |     finally:
782 |         server.cleanup()
783 | 
784 | if __name__ == "__main__":
785 |     # Add asyncio import for async/await support
786 |     import asyncio
787 |     main()
788 | 
```

--------------------------------------------------------------------------------
/src/main.py:
--------------------------------------------------------------------------------

```python
   1 | import os
   2 | import logging
   3 | import uuid
   4 | import json
   5 | from typing import Dict, Any, List, Optional, Tuple
   6 | from fastapi import FastAPI, Request, Response, HTTPException
   7 | from fastapi.responses import JSONResponse, FileResponse
   8 | from fastapi.middleware.cors import CORSMiddleware
   9 | from fastapi.staticfiles import StaticFiles
  10 | from fastapi.templating import Jinja2Templates
  11 | import uvicorn
  12 | from mcp import MCPServer, MCPTool, MCPToolCall, MCPToolCallResult
  13 | 
  14 | # Import configuration
  15 | from src.config import *
  16 | 
  17 | # Import components
  18 | from src.nlp.parameter_extractor import ParameterExtractor
  19 | from src.models.code_generator import CodeGenerator
  20 | from src.openscad_wrapper.wrapper import OpenSCADWrapper
  21 | from src.utils.cad_exporter import CADExporter
  22 | from src.visualization.headless_renderer import HeadlessRenderer
  23 | from src.printer_discovery.printer_discovery import PrinterDiscovery, PrinterInterface
  24 | from src.ai.venice_api import VeniceImageGenerator
  25 | from src.ai.sam_segmentation import SAMSegmenter
  26 | 
  27 | # Configure logging
  28 | logging.basicConfig(
  29 |     level=logging.INFO,
  30 |     format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
  31 | )
  32 | logger = logging.getLogger(__name__)
  33 | 
  34 | # Create FastAPI app
  35 | app = FastAPI(title="OpenSCAD MCP Server")
  36 | 
  37 | # Add CORS middleware
  38 | app.add_middleware(
  39 |     CORSMiddleware,
  40 |     allow_origins=["*"],
  41 |     allow_credentials=True,
  42 |     allow_methods=["*"],
  43 |     allow_headers=["*"],
  44 | )
  45 | 
  46 | # Create directories
  47 | os.makedirs("scad", exist_ok=True)
  48 | os.makedirs("output", exist_ok=True)
  49 | os.makedirs("output/models", exist_ok=True)
  50 | os.makedirs("output/preview", exist_ok=True)
  51 | os.makedirs("templates", exist_ok=True)
  52 | os.makedirs("static", exist_ok=True)
  53 | 
  54 | # Initialize components
  55 | parameter_extractor = ParameterExtractor()
  56 | code_generator = CodeGenerator("scad", "output")
  57 | openscad_wrapper = OpenSCADWrapper("scad", "output")
  58 | cad_exporter = CADExporter()
  59 | headless_renderer = HeadlessRenderer()
  60 | printer_discovery = PrinterDiscovery()
  61 | 
  62 | # Initialize AI components
  63 | venice_generator = VeniceImageGenerator(VENICE_API_KEY, IMAGES_DIR)
  64 | gemini_generator = GeminiImageGenerator(GEMINI_API_KEY, IMAGES_DIR)
  65 | cuda_mvs = CUDAMultiViewStereo(CUDA_MVS_PATH, MODELS_DIR, use_gpu=CUDA_MVS_USE_GPU)
  66 | image_approval = ImageApprovalTool(APPROVED_IMAGES_DIR)
  67 | 
  68 | # Initialize remote processing components if enabled
  69 | remote_connection_manager = None
  70 | if REMOTE_CUDA_MVS["ENABLED"]:
  71 |     logger.info("Initializing remote CUDA MVS connection manager")
  72 |     remote_connection_manager = CUDAMVSConnectionManager(
  73 |         api_key=REMOTE_CUDA_MVS["API_KEY"],
  74 |         discovery_port=REMOTE_CUDA_MVS["DISCOVERY_PORT"],
  75 |         use_lan_discovery=REMOTE_CUDA_MVS["USE_LAN_DISCOVERY"],
  76 |         server_url=REMOTE_CUDA_MVS["SERVER_URL"] if REMOTE_CUDA_MVS["SERVER_URL"] else None
  77 |     )
  78 | 
  79 | # Initialize workflow pipeline
  80 | multi_view_pipeline = MultiViewToModelPipeline(
  81 |     gemini_generator=gemini_generator,
  82 |     cuda_mvs=cuda_mvs,
  83 |     approval_tool=image_approval,
  84 |     output_dir=OUTPUT_DIR
  85 | )
  86 | 
  87 | # SAM2 segmenter will be initialized on first use to avoid loading the model unnecessarily
  88 | sam_segmenter = None
  89 | 
  90 | def get_sam_segmenter():
  91 |     """
  92 |     Get or initialize the SAM2 segmenter.
  93 |     
  94 |     Returns:
  95 |         SAMSegmenter instance
  96 |     """
  97 |     global sam_segmenter
  98 |     if sam_segmenter is None:
  99 |         logger.info("Initializing SAM2 segmenter")
 100 |         sam_segmenter = SAMSegmenter(
 101 |             model_type=SAM2_MODEL_TYPE,
 102 |             checkpoint_path=SAM2_CHECKPOINT_PATH,
 103 |             use_gpu=SAM2_USE_GPU,
 104 |             output_dir=MASKS_DIR
 105 |         )
 106 |     return sam_segmenter
 107 | 
 108 | # Store models in memory
 109 | models = {}
 110 | printers = {}
 111 | approved_images = {}
 112 | remote_jobs = {}
 113 | 
 114 | # Create MCP server
 115 | mcp_server = MCPServer()
 116 | 
 117 | # Mount static files
 118 | app.mount("/static", StaticFiles(directory="static"), name="static")
 119 | 
 120 | # Create Jinja2 templates
 121 | templates = Jinja2Templates(directory="templates")
 122 | 
 123 | # Create model preview template
 124 | with open("templates/preview.html", "w") as f:
 125 |     f.write("""
 126 | <!DOCTYPE html>
 127 | <html lang="en">
 128 | <head>
 129 |     <meta charset="UTF-8">
 130 |     <meta name="viewport" content="width=device-width, initial-scale=1.0">
 131 |     <title>OpenSCAD Model Preview</title>
 132 |     <style>
 133 |         body {
 134 |             font-family: Arial, sans-serif;
 135 |             margin: 0;
 136 |             padding: 0;
 137 |             background-color: #f5f5f5;
 138 |         }
 139 |         .container {
 140 |             max-width: 1200px;
 141 |             margin: 0 auto;
 142 |             padding: 20px;
 143 |         }
 144 |         h1 {
 145 |             color: #333;
 146 |             margin-bottom: 20px;
 147 |         }
 148 |         .preview-container {
 149 |             display: flex;
 150 |             flex-wrap: wrap;
 151 |             gap: 20px;
 152 |             margin-top: 20px;
 153 |         }
 154 |         .preview-image {
 155 |             border: 1px solid #ddd;
 156 |             border-radius: 5px;
 157 |             padding: 10px;
 158 |             background-color: white;
 159 |         }
 160 |         .preview-image img {
 161 |             max-width: 100%;
 162 |             height: auto;
 163 |         }
 164 |         .preview-image h3 {
 165 |             margin-top: 10px;
 166 |             margin-bottom: 5px;
 167 |             color: #555;
 168 |         }
 169 |         .parameters {
 170 |             margin-top: 20px;
 171 |             background-color: #f9f9f9;
 172 |             padding: 15px;
 173 |             border-radius: 5px;
 174 |             border: 1px solid #eee;
 175 |         }
 176 |         .parameters h2 {
 177 |             margin-top: 0;
 178 |             color: #333;
 179 |         }
 180 |         .parameters table {
 181 |             width: 100%;
 182 |             border-collapse: collapse;
 183 |         }
 184 |         .parameters table th, .parameters table td {
 185 |             padding: 8px;
 186 |             text-align: left;
 187 |             border-bottom: 1px solid #ddd;
 188 |         }
 189 |         .parameters table th {
 190 |             background-color: #f2f2f2;
 191 |         }
 192 |         .actions {
 193 |             margin-top: 20px;
 194 |             display: flex;
 195 |             gap: 10px;
 196 |         }
 197 |         .actions button {
 198 |             padding: 10px 15px;
 199 |             background-color: #4CAF50;
 200 |             color: white;
 201 |             border: none;
 202 |             border-radius: 4px;
 203 |             cursor: pointer;
 204 |         }
 205 |         .actions button:hover {
 206 |             background-color: #45a049;
 207 |         }
 208 |     </style>
 209 | </head>
 210 | <body>
 211 |     <div class="container">
 212 |         <h1>OpenSCAD Model Preview: {{ model_id }}</h1>
 213 |         
 214 |         <div class="parameters">
 215 |             <h2>Parameters</h2>
 216 |             <table>
 217 |                 <tr>
 218 |                     <th>Parameter</th>
 219 |                     <th>Value</th>
 220 |                 </tr>
 221 |                 {% for key, value in parameters.items() %}
 222 |                 <tr>
 223 |                     <td>{{ key }}</td>
 224 |                     <td>{{ value }}</td>
 225 |                 </tr>
 226 |                 {% endfor %}
 227 |             </table>
 228 |         </div>
 229 |         
 230 |         <div class="preview-container">
 231 |             {% for view, image_path in previews.items() %}
 232 |             <div class="preview-image">
 233 |                 <h3>{{ view|title }} View</h3>
 234 |                 <img src="{{ image_path }}" alt="{{ view }} view">
 235 |             </div>
 236 |             {% endfor %}
 237 |         </div>
 238 |         
 239 |         <div class="actions">
 240 |             <button onclick="window.location.href='/download/{{ model_id }}'">Download Model</button>
 241 |         </div>
 242 |     </div>
 243 | </body>
 244 | </html>
 245 |     """)
 246 | 
 247 | # Define MCP tools
 248 | @mcp_server.tool
 249 | def create_3d_model(description: str) -> Dict[str, Any]:
 250 |     """
 251 |     Create a 3D model from a natural language description.
 252 |     
 253 |     Args:
 254 |         description: Natural language description of the 3D model
 255 |         
 256 |     Returns:
 257 |         Dictionary with model information
 258 |     """
 259 |     # Extract parameters from description
 260 |     model_type, parameters = parameter_extractor.extract_parameters(description)
 261 |     
 262 |     # Generate a unique model ID
 263 |     model_id = str(uuid.uuid4())
 264 |     
 265 |     # Generate OpenSCAD code
 266 |     scad_code = code_generator.generate_code(model_type, parameters)
 267 |     
 268 |     # Save the SCAD file
 269 |     scad_file = openscad_wrapper.generate_scad(scad_code, model_id)
 270 |     
 271 |     # Generate preview images
 272 |     previews = openscad_wrapper.generate_multi_angle_previews(scad_file, parameters)
 273 |     
 274 |     # Export to parametric format (CSG by default)
 275 |     success, model_file, error = cad_exporter.export_model(
 276 |         scad_file, 
 277 |         "csg",
 278 |         parameters,
 279 |         metadata={
 280 |             "description": description,
 281 |             "model_type": model_type,
 282 |         }
 283 |     )
 284 |     
 285 |     # Store model information
 286 |     models[model_id] = {
 287 |         "id": model_id,
 288 |         "type": model_type,
 289 |         "parameters": parameters,
 290 |         "description": description,
 291 |         "scad_file": scad_file,
 292 |         "model_file": model_file if success else None,
 293 |         "previews": previews,
 294 |         "format": "csg"
 295 |     }
 296 |     
 297 |     # Create response
 298 |     response = {
 299 |         "model_id": model_id,
 300 |         "model_type": model_type,
 301 |         "parameters": parameters,
 302 |         "preview_url": f"/ui/preview/{model_id}",
 303 |         "supported_formats": cad_exporter.get_supported_formats()
 304 |     }
 305 |     
 306 |     return response
 307 | 
 308 | @mcp_server.tool
 309 | def modify_3d_model(model_id: str, modifications: str) -> Dict[str, Any]:
 310 |     """
 311 |     Modify an existing 3D model.
 312 |     
 313 |     Args:
 314 |         model_id: ID of the model to modify
 315 |         modifications: Natural language description of the modifications
 316 |         
 317 |     Returns:
 318 |         Dictionary with updated model information
 319 |     """
 320 |     # Check if model exists
 321 |     if model_id not in models:
 322 |         raise ValueError(f"Model with ID {model_id} not found")
 323 |     
 324 |     # Get existing model information
 325 |     model_info = models[model_id]
 326 |     
 327 |     # Extract parameters from modifications
 328 |     _, new_parameters = parameter_extractor.extract_parameters(
 329 |         modifications, 
 330 |         model_type=model_info["type"],
 331 |         existing_parameters=model_info["parameters"]
 332 |     )
 333 |     
 334 |     # Generate OpenSCAD code with updated parameters
 335 |     scad_code = code_generator.generate_code(model_info["type"], new_parameters)
 336 |     
 337 |     # Save the SCAD file
 338 |     scad_file = openscad_wrapper.generate_scad(scad_code, model_id)
 339 |     
 340 |     # Generate preview images
 341 |     previews = openscad_wrapper.generate_multi_angle_previews(scad_file, new_parameters)
 342 |     
 343 |     # Export to parametric format (same as original)
 344 |     success, model_file, error = cad_exporter.export_model(
 345 |         scad_file, 
 346 |         model_info["format"],
 347 |         new_parameters,
 348 |         metadata={
 349 |             "description": model_info["description"] + " | " + modifications,
 350 |             "model_type": model_info["type"],
 351 |         }
 352 |     )
 353 |     
 354 |     # Update model information
 355 |     models[model_id] = {
 356 |         "id": model_id,
 357 |         "type": model_info["type"],
 358 |         "parameters": new_parameters,
 359 |         "description": model_info["description"] + " | " + modifications,
 360 |         "scad_file": scad_file,
 361 |         "model_file": model_file if success else None,
 362 |         "previews": previews,
 363 |         "format": model_info["format"]
 364 |     }
 365 |     
 366 |     # Create response
 367 |     response = {
 368 |         "model_id": model_id,
 369 |         "model_type": model_info["type"],
 370 |         "parameters": new_parameters,
 371 |         "preview_url": f"/ui/preview/{model_id}",
 372 |         "supported_formats": cad_exporter.get_supported_formats()
 373 |     }
 374 |     
 375 |     return response
 376 | 
 377 | @mcp_server.tool
 378 | def export_model(model_id: str, format: str = "csg") -> Dict[str, Any]:
 379 |     """
 380 |     Export a 3D model to a specific format.
 381 |     
 382 |     Args:
 383 |         model_id: ID of the model to export
 384 |         format: Format to export to (csg, stl, obj, etc.)
 385 |         
 386 |     Returns:
 387 |         Dictionary with export information
 388 |     """
 389 |     # Check if model exists
 390 |     if model_id not in models:
 391 |         raise ValueError(f"Model with ID {model_id} not found")
 392 |     
 393 |     # Get model information
 394 |     model_info = models[model_id]
 395 |     
 396 |     # Check if format is supported
 397 |     supported_formats = cad_exporter.get_supported_formats()
 398 |     if format not in supported_formats:
 399 |         raise ValueError(f"Format {format} not supported. Supported formats: {', '.join(supported_formats)}")
 400 |     
 401 |     # Export model
 402 |     success, model_file, error = cad_exporter.export_model(
 403 |         model_info["scad_file"],
 404 |         format,
 405 |         model_info["parameters"],
 406 |         metadata={
 407 |             "description": model_info["description"],
 408 |             "model_type": model_info["type"],
 409 |         }
 410 |     )
 411 |     
 412 |     if not success:
 413 |         raise ValueError(f"Failed to export model: {error}")
 414 |     
 415 |     # Update model information
 416 |     models[model_id]["model_file"] = model_file
 417 |     models[model_id]["format"] = format
 418 |     
 419 |     # Create response
 420 |     response = {
 421 |         "model_id": model_id,
 422 |         "format": format,
 423 |         "model_file": model_file,
 424 |         "download_url": f"/download/{model_id}"
 425 |     }
 426 |     
 427 |     return response
 428 | 
 429 | @mcp_server.tool
 430 | def discover_printers() -> Dict[str, Any]:
 431 |     """
 432 |     Discover 3D printers on the network.
 433 |     
 434 |     Returns:
 435 |         Dictionary with discovered printers
 436 |     """
 437 |     # Discover printers
 438 |     discovered_printers = printer_discovery.discover_printers()
 439 |     
 440 |     # Store printers
 441 |     for printer in discovered_printers:
 442 |         printers[printer["id"]] = printer
 443 |     
 444 |     # Create response
 445 |     response = {
 446 |         "printers": discovered_printers
 447 |     }
 448 |     
 449 |     return response
 450 | 
 451 | @mcp_server.tool
 452 | def connect_to_printer(printer_id: str) -> Dict[str, Any]:
 453 |     """
 454 |     Connect to a 3D printer.
 455 |     
 456 |     Args:
 457 |         printer_id: ID of the printer to connect to
 458 |         
 459 |     Returns:
 460 |         Dictionary with connection information
 461 |     """
 462 |     # Check if printer exists
 463 |     if printer_id not in printers:
 464 |         raise ValueError(f"Printer with ID {printer_id} not found")
 465 |     
 466 |     # Get printer information
 467 |     printer_info = printers[printer_id]
 468 |     
 469 |     # Connect to printer
 470 |     printer_interface = PrinterInterface(printer_info)
 471 |     success, error = printer_interface.connect()
 472 |     
 473 |     if not success:
 474 |         raise ValueError(f"Failed to connect to printer: {error}")
 475 |     
 476 |     # Update printer information
 477 |     printers[printer_id]["connected"] = True
 478 |     printers[printer_id]["interface"] = printer_interface
 479 |     
 480 |     # Create response
 481 |     response = {
 482 |         "printer_id": printer_id,
 483 |         "connected": True,
 484 |         "printer_info": printer_info
 485 |     }
 486 |     
 487 |     return response
 488 | 
 489 | @mcp_server.tool
 490 | def print_model(model_id: str, printer_id: str) -> Dict[str, Any]:
 491 |     """
 492 |     Print a 3D model on a connected printer.
 493 |     
 494 |     Args:
 495 |         model_id: ID of the model to print
 496 |         printer_id: ID of the printer to print on
 497 |         
 498 |     Returns:
 499 |         Dictionary with print job information
 500 |     """
 501 |     # Check if model exists
 502 |     if model_id not in models:
 503 |         raise ValueError(f"Model with ID {model_id} not found")
 504 |     
 505 |     # Check if printer exists
 506 |     if printer_id not in printers:
 507 |         raise ValueError(f"Printer with ID {printer_id} not found")
 508 |     
 509 |     # Check if printer is connected
 510 |     if not printers[printer_id].get("connected", False):
 511 |         raise ValueError(f"Printer with ID {printer_id} is not connected")
 512 |     
 513 |     # Get model and printer information
 514 |     model_info = models[model_id]
 515 |     printer_info = printers[printer_id]
 516 |     
 517 |     # Check if model has been exported to a printable format
 518 |     if not model_info.get("model_file"):
 519 |         raise ValueError(f"Model with ID {model_id} has not been exported")
 520 |     
 521 |     # Print model
 522 |     printer_interface = printer_info["interface"]
 523 |     job_id, error = printer_interface.print_model(model_info["model_file"])
 524 |     
 525 |     if not job_id:
 526 |         raise ValueError(f"Failed to print model: {error}")
 527 |     
 528 |     # Create response
 529 |     response = {
 530 |         "model_id": model_id,
 531 |         "printer_id": printer_id,
 532 |         "job_id": job_id,
 533 |         "status": "printing"
 534 |     }
 535 |     
 536 |     return response
 537 | 
 538 | @mcp_server.tool
 539 | def get_printer_status(printer_id: str) -> Dict[str, Any]:
 540 |     """
 541 |     Get the status of a printer.
 542 |     
 543 |     Args:
 544 |         printer_id: ID of the printer to get status for
 545 |         
 546 |     Returns:
 547 |         Dictionary with printer status
 548 |     """
 549 |     # Check if printer exists
 550 |     if printer_id not in printers:
 551 |         raise ValueError(f"Printer with ID {printer_id} not found")
 552 |     
 553 |     # Check if printer is connected
 554 |     if not printers[printer_id].get("connected", False):
 555 |         raise ValueError(f"Printer with ID {printer_id} is not connected")
 556 |     
 557 |     # Get printer information
 558 |     printer_info = printers[printer_id]
 559 |     
 560 |     # Get printer status
 561 |     printer_interface = printer_info["interface"]
 562 |     status = printer_interface.get_status()
 563 |     
 564 |     # Create response
 565 |     response = {
 566 |         "printer_id": printer_id,
 567 |         "status": status
 568 |     }
 569 |     
 570 |     return response
 571 | 
 572 | @mcp_server.tool
 573 | def cancel_print_job(printer_id: str, job_id: str) -> Dict[str, Any]:
 574 |     """
 575 |     Cancel a print job.
 576 |     
 577 |     Args:
 578 |         printer_id: ID of the printer
 579 |         job_id: ID of the print job to cancel
 580 |         
 581 |     Returns:
 582 |         Dictionary with cancellation information
 583 |     """
 584 |     # Check if printer exists
 585 |     if printer_id not in printers:
 586 |         raise ValueError(f"Printer with ID {printer_id} not found")
 587 |     
 588 |     # Check if printer is connected
 589 |     if not printers[printer_id].get("connected", False):
 590 |         raise ValueError(f"Printer with ID {printer_id} is not connected")
 591 |     
 592 |     # Get printer information
 593 |     printer_info = printers[printer_id]
 594 |     
 595 |     # Cancel print job
 596 |     printer_interface = printer_info["interface"]
 597 |     success, error = printer_interface.cancel_job(job_id)
 598 |     
 599 |     if not success:
 600 |         raise ValueError(f"Failed to cancel print job: {error}")
 601 |     
 602 |     # Create response
 603 |     response = {
 604 |         "printer_id": printer_id,
 605 |         "job_id": job_id,
 606 |         "status": "cancelled"
 607 |     }
 608 |     
 609 |     return response
 610 | 
 611 | # Add Venice.ai image generation tool
 612 | @mcp_server.tool
 613 | def generate_image(prompt: str, model: str = "fluently-xl") -> Dict[str, Any]:
 614 |     """
 615 |     Generate an image using Venice.ai's image generation models.
 616 |     
 617 |     Args:
 618 |         prompt: Text description for image generation
 619 |         model: Model to use (default: fluently-xl). Options include:
 620 |             - "fluently-xl" (fastest, 2.30s): Quick generation with good quality
 621 |             - "flux-dev" (high quality): Detailed, premium image quality
 622 |             - "flux-dev-uncensored": Uncensored version of flux-dev model
 623 |             - "stable-diffusion-3.5": Standard stable diffusion model
 624 |             - "pony-realism": Specialized for realistic outputs
 625 |             - "lustify-sdxl": Artistic stylization model
 626 |             
 627 |             You can also use natural language like:
 628 |             - "fastest model", "quick generation", "efficient"
 629 |             - "high quality", "detailed", "premium quality"
 630 |             - "realistic", "photorealistic"
 631 |             - "artistic", "stylized", "creative"
 632 |         
 633 |     Returns:
 634 |         Dictionary with image information
 635 |     """
 636 |     # Generate a unique image ID
 637 |     image_id = str(uuid.uuid4())
 638 |     
 639 |     # Generate image
 640 |     result = venice_generator.generate_image(prompt, model)
 641 |     
 642 |     # Create response
 643 |     response = {
 644 |         "image_id": image_id,
 645 |         "prompt": prompt,
 646 |         "model": model,
 647 |         "image_path": result.get("local_path"),
 648 |         "image_url": result.get("image_url")
 649 |     }
 650 |     
 651 |     return response
 652 | 
 653 | # Add SAM2 segmentation tool
 654 | @mcp_server.tool
 655 | def segment_image(image_path: str, points: Optional[List[Tuple[int, int]]] = None) -> Dict[str, Any]:
 656 |     """
 657 |     Segment objects in an image using SAM2 (Segment Anything Model 2).
 658 |     
 659 |     Args:
 660 |         image_path: Path to the input image
 661 |         points: Optional list of (x, y) points to guide segmentation
 662 |                If not provided, automatic segmentation will be used
 663 |         
 664 |     Returns:
 665 |         Dictionary with segmentation masks and metadata
 666 |     """
 667 |     # Get or initialize SAM2 segmenter
 668 |     sam_segmenter = get_sam_segmenter()
 669 |     
 670 |     # Generate a unique segmentation ID
 671 |     segmentation_id = str(uuid.uuid4())
 672 |     
 673 |     try:
 674 |         # Perform segmentation
 675 |         if points:
 676 |             result = sam_segmenter.segment_image(image_path, points)
 677 |         else:
 678 |             result = sam_segmenter.segment_with_auto_points(image_path)
 679 |         
 680 |         # Create response
 681 |         response = {
 682 |             "segmentation_id": segmentation_id,
 683 |             "image_path": image_path,
 684 |             "mask_paths": result.get("mask_paths", []),
 685 |             "num_masks": result.get("num_masks", 0),
 686 |             "points_used": points if points else result.get("points", [])
 687 |         }
 688 |         
 689 |         return response
 690 |     except Exception as e:
 691 |         logger.error(f"Error segmenting image: {str(e)}")
 692 |         raise HTTPException(status_code=500, detail=f"Error segmenting image: {str(e)}")
 693 | 
 694 | 
 695 | # Add Google Gemini image generation tool
 696 | @mcp_server.tool
 697 | def generate_image_gemini(prompt: str, model: str = GEMINI_MODEL) -> Dict[str, Any]:
 698 |     """
 699 |     Generate an image using Google Gemini's image generation models.
 700 |     
 701 |     Args:
 702 |         prompt: Text description for image generation
 703 |         model: Model to use (default: gemini-2.0-flash-exp-image-generation)
 704 |         
 705 |     Returns:
 706 |         Dictionary with image information
 707 |     """
 708 |     # Generate a unique image ID
 709 |     image_id = str(uuid.uuid4())
 710 |     
 711 |     # Generate image
 712 |     result = gemini_generator.generate_image(prompt, model)
 713 |     
 714 |     # Create response
 715 |     response = {
 716 |         "image_id": image_id,
 717 |         "prompt": prompt,
 718 |         "model": model,
 719 |         "image_path": result.get("local_path"),
 720 |         "image_url": f"/images/{os.path.basename(result.get('local_path', ''))}"
 721 |     }
 722 |     
 723 |     return response
 724 | 
 725 | 
 726 | # Add multi-view image generation tool
 727 | @mcp_server.tool
 728 | def generate_multi_view_images(prompt: str, num_views: int = 4) -> Dict[str, Any]:
 729 |     """
 730 |     Generate multiple views of the same 3D object using Google Gemini.
 731 |     
 732 |     Args:
 733 |         prompt: Text description of the 3D object
 734 |         num_views: Number of views to generate (default: 4)
 735 |         
 736 |     Returns:
 737 |         Dictionary with multi-view image information
 738 |     """
 739 |     # Validate number of views
 740 |     if num_views < MULTI_VIEW_PIPELINE["MIN_NUM_VIEWS"]:
 741 |         raise ValueError(f"Number of views must be at least {MULTI_VIEW_PIPELINE['MIN_NUM_VIEWS']}")
 742 |     
 743 |     if num_views > MULTI_VIEW_PIPELINE["MAX_NUM_VIEWS"]:
 744 |         raise ValueError(f"Number of views cannot exceed {MULTI_VIEW_PIPELINE['MAX_NUM_VIEWS']}")
 745 |     
 746 |     # Generate a unique multi-view ID
 747 |     multi_view_id = str(uuid.uuid4())
 748 |     
 749 |     # Generate multi-view images
 750 |     results = gemini_generator.generate_multiple_views(prompt, num_views)
 751 |     
 752 |     # Create response
 753 |     response = {
 754 |         "multi_view_id": multi_view_id,
 755 |         "prompt": prompt,
 756 |         "num_views": num_views,
 757 |         "views": [
 758 |             {
 759 |                 "view_id": result.get("view_id", f"view_{i+1}"),
 760 |                 "view_index": result.get("view_index", i+1),
 761 |                 "view_direction": result.get("view_direction", ""),
 762 |                 "image_path": result.get("local_path"),
 763 |                 "image_url": f"/images/{os.path.basename(result.get('local_path', ''))}"
 764 |             }
 765 |             for i, result in enumerate(results)
 766 |         ],
 767 |         "approval_required": IMAGE_APPROVAL["ENABLED"] and not IMAGE_APPROVAL["AUTO_APPROVE"]
 768 |     }
 769 |     
 770 |     # Store multi-view information for approval
 771 |     if IMAGE_APPROVAL["ENABLED"]:
 772 |         approved_images[multi_view_id] = {
 773 |             "multi_view_id": multi_view_id,
 774 |             "prompt": prompt,
 775 |             "num_views": num_views,
 776 |             "views": response["views"],
 777 |             "approved_views": [] if not IMAGE_APPROVAL["AUTO_APPROVE"] else [view["view_id"] for view in response["views"]],
 778 |             "rejected_views": [],
 779 |             "approval_complete": IMAGE_APPROVAL["AUTO_APPROVE"]
 780 |         }
 781 |     
 782 |     return response
 783 | 
 784 | 
 785 | # Add image approval tool
 786 | @mcp_server.tool
 787 | def approve_image(multi_view_id: str, view_id: str) -> Dict[str, Any]:
 788 |     """
 789 |     Approve an image for 3D model generation.
 790 |     
 791 |     Args:
 792 |         multi_view_id: ID of the multi-view set
 793 |         view_id: ID of the view to approve
 794 |         
 795 |     Returns:
 796 |         Dictionary with approval information
 797 |     """
 798 |     # Check if multi-view ID exists
 799 |     if multi_view_id not in approved_images:
 800 |         raise ValueError(f"Multi-view set with ID {multi_view_id} not found")
 801 |     
 802 |     # Get multi-view information
 803 |     multi_view_info = approved_images[multi_view_id]
 804 |     
 805 |     # Check if view ID exists
 806 |     view_exists = False
 807 |     for view in multi_view_info["views"]:
 808 |         if view["view_id"] == view_id:
 809 |             view_exists = True
 810 |             break
 811 |     
 812 |     if not view_exists:
 813 |         raise ValueError(f"View with ID {view_id} not found in multi-view set {multi_view_id}")
 814 |     
 815 |     # Check if view is already approved
 816 |     if view_id in multi_view_info["approved_views"]:
 817 |         return {
 818 |             "multi_view_id": multi_view_id,
 819 |             "view_id": view_id,
 820 |             "status": "already_approved",
 821 |             "approved_views": multi_view_info["approved_views"],
 822 |             "rejected_views": multi_view_info["rejected_views"],
 823 |             "approval_complete": multi_view_info["approval_complete"]
 824 |         }
 825 |     
 826 |     # Remove from rejected views if present
 827 |     if view_id in multi_view_info["rejected_views"]:
 828 |         multi_view_info["rejected_views"].remove(view_id)
 829 |     
 830 |     # Add to approved views
 831 |     multi_view_info["approved_views"].append(view_id)
 832 |     
 833 |     # Check if approval is complete
 834 |     if len(multi_view_info["approved_views"]) >= IMAGE_APPROVAL["MIN_APPROVED_IMAGES"]:
 835 |         multi_view_info["approval_complete"] = True
 836 |     
 837 |     # Create response
 838 |     response = {
 839 |         "multi_view_id": multi_view_id,
 840 |         "view_id": view_id,
 841 |         "status": "approved",
 842 |         "approved_views": multi_view_info["approved_views"],
 843 |         "rejected_views": multi_view_info["rejected_views"],
 844 |         "approval_complete": multi_view_info["approval_complete"]
 845 |     }
 846 |     
 847 |     return response
 848 | 
 849 | 
 850 | # Add image rejection tool
 851 | @mcp_server.tool
 852 | def reject_image(multi_view_id: str, view_id: str) -> Dict[str, Any]:
 853 |     """
 854 |     Reject an image for 3D model generation.
 855 |     
 856 |     Args:
 857 |         multi_view_id: ID of the multi-view set
 858 |         view_id: ID of the view to reject
 859 |         
 860 |     Returns:
 861 |         Dictionary with rejection information
 862 |     """
 863 |     # Check if multi-view ID exists
 864 |     if multi_view_id not in approved_images:
 865 |         raise ValueError(f"Multi-view set with ID {multi_view_id} not found")
 866 |     
 867 |     # Get multi-view information
 868 |     multi_view_info = approved_images[multi_view_id]
 869 |     
 870 |     # Check if view ID exists
 871 |     view_exists = False
 872 |     for view in multi_view_info["views"]:
 873 |         if view["view_id"] == view_id:
 874 |             view_exists = True
 875 |             break
 876 |     
 877 |     if not view_exists:
 878 |         raise ValueError(f"View with ID {view_id} not found in multi-view set {multi_view_id}")
 879 |     
 880 |     # Check if view is already rejected
 881 |     if view_id in multi_view_info["rejected_views"]:
 882 |         return {
 883 |             "multi_view_id": multi_view_id,
 884 |             "view_id": view_id,
 885 |             "status": "already_rejected",
 886 |             "approved_views": multi_view_info["approved_views"],
 887 |             "rejected_views": multi_view_info["rejected_views"],
 888 |             "approval_complete": multi_view_info["approval_complete"]
 889 |         }
 890 |     
 891 |     # Remove from approved views if present
 892 |     if view_id in multi_view_info["approved_views"]:
 893 |         multi_view_info["approved_views"].remove(view_id)
 894 |     
 895 |     # Add to rejected views
 896 |     multi_view_info["rejected_views"].append(view_id)
 897 |     
 898 |     # Check if approval is complete
 899 |     if len(multi_view_info["approved_views"]) >= IMAGE_APPROVAL["MIN_APPROVED_IMAGES"]:
 900 |         multi_view_info["approval_complete"] = True
 901 |     else:
 902 |         multi_view_info["approval_complete"] = False
 903 |     
 904 |     # Create response
 905 |     response = {
 906 |         "multi_view_id": multi_view_id,
 907 |         "view_id": view_id,
 908 |         "status": "rejected",
 909 |         "approved_views": multi_view_info["approved_views"],
 910 |         "rejected_views": multi_view_info["rejected_views"],
 911 |         "approval_complete": multi_view_info["approval_complete"]
 912 |     }
 913 |     
 914 |     return response
 915 | 
 916 | 
 917 | # Add 3D model generation from approved images tool
 918 | @mcp_server.tool
 919 | def create_3d_model_from_images(multi_view_id: str, output_name: Optional[str] = None) -> Dict[str, Any]:
 920 |     """
 921 |     Create a 3D model from approved multi-view images.
 922 |     
 923 |     Args:
 924 |         multi_view_id: ID of the multi-view set
 925 |         output_name: Optional name for the output model
 926 |         
 927 |     Returns:
 928 |         Dictionary with model information
 929 |     """
 930 |     # Check if multi-view ID exists
 931 |     if multi_view_id not in approved_images:
 932 |         raise ValueError(f"Multi-view set with ID {multi_view_id} not found")
 933 |     
 934 |     # Get multi-view information
 935 |     multi_view_info = approved_images[multi_view_id]
 936 |     
 937 |     # Check if approval is complete
 938 |     if not multi_view_info["approval_complete"]:
 939 |         raise ValueError(f"Approval for multi-view set {multi_view_id} is not complete")
 940 |     
 941 |     # Check if there are enough approved images
 942 |     if len(multi_view_info["approved_views"]) < IMAGE_APPROVAL["MIN_APPROVED_IMAGES"]:
 943 |         raise ValueError(f"Not enough approved images. Need at least {IMAGE_APPROVAL['MIN_APPROVED_IMAGES']}, but only have {len(multi_view_info['approved_views'])}")
 944 |     
 945 |     # Get approved image paths
 946 |     approved_image_paths = []
 947 |     for view in multi_view_info["views"]:
 948 |         if view["view_id"] in multi_view_info["approved_views"]:
 949 |             approved_image_paths.append(view["image_path"])
 950 |     
 951 |     # Generate a unique model ID
 952 |     model_id = str(uuid.uuid4())
 953 |     
 954 |     # Set output name if not provided
 955 |     if not output_name:
 956 |         output_name = f"model_{model_id[:8]}"
 957 |     
 958 |     # Create 3D model
 959 |     if REMOTE_CUDA_MVS["ENABLED"] and remote_connection_manager:
 960 |         # Use remote CUDA MVS processing
 961 |         servers = discover_remote_servers()
 962 |         
 963 |         if not servers:
 964 |             raise ValueError("No remote CUDA MVS servers found")
 965 |         
 966 |         # Use the first available server
 967 |         server_id = servers[0]["id"]
 968 |         
 969 |         # Upload images
 970 |         upload_result = upload_images_to_server(server_id, approved_image_paths)
 971 |         
 972 |         if not upload_result or "job_id" not in upload_result:
 973 |             raise ValueError("Failed to upload images to remote server")
 974 |         
 975 |         job_id = upload_result["job_id"]
 976 |         
 977 |         # Process images
 978 |         process_result = process_images_remotely(
 979 |             server_id,
 980 |             job_id,
 981 |             {
 982 |                 "quality": REMOTE_CUDA_MVS["DEFAULT_RECONSTRUCTION_QUALITY"],
 983 |                 "output_format": REMOTE_CUDA_MVS["DEFAULT_OUTPUT_FORMAT"]
 984 |             }
 985 |         )
 986 |         
 987 |         if not process_result:
 988 |             raise ValueError(f"Failed to process images for job {job_id}")
 989 |         
 990 |         # Wait for completion if requested
 991 |         if REMOTE_CUDA_MVS["WAIT_FOR_COMPLETION"]:
 992 |             import time
 993 |             
 994 |             while True:
 995 |                 status = get_job_status(job_id)
 996 |                 
 997 |                 if not status:
 998 |                     raise ValueError(f"Failed to get status for job {job_id}")
 999 |                 
1000 |                 if status["status"] in ["completed", "failed", "cancelled"]:
1001 |                     break
1002 |                 
1003 |                 time.sleep(REMOTE_CUDA_MVS["POLL_INTERVAL"])
1004 |             
1005 |             if status["status"] == "completed":
1006 |                 # Download model
1007 |                 download_result = download_remote_model(job_id)
1008 |                 
1009 |                 if not download_result:
1010 |                     raise ValueError(f"Failed to download model for job {job_id}")
1011 |                 
1012 |                 # Store model information
1013 |                 models[model_id] = {
1014 |                     "id": model_id,
1015 |                     "type": "cuda_mvs_remote",
1016 |                     "parameters": {
1017 |                         "multi_view_id": multi_view_id,
1018 |                         "prompt": multi_view_info["prompt"],
1019 |                         "num_views": len(approved_image_paths),
1020 |                         "quality": REMOTE_CUDA_MVS["DEFAULT_RECONSTRUCTION_QUALITY"],
1021 |                         "output_format": REMOTE_CUDA_MVS["DEFAULT_OUTPUT_FORMAT"]
1022 |                     },
1023 |                     "description": f"3D model generated from {len(approved_image_paths)} views of '{multi_view_info['prompt']}'",
1024 |                     "model_file": download_result.get("model_path"),
1025 |                     "point_cloud_file": download_result.get("point_cloud_path"),
1026 |                     "previews": {},  # Will be generated later
1027 |                     "format": REMOTE_CUDA_MVS["DEFAULT_OUTPUT_FORMAT"],
1028 |                     "remote_job_id": job_id
1029 |                 }
1030 |                 
1031 |                 # Create response
1032 |                 response = {
1033 |                     "model_id": model_id,
1034 |                     "multi_view_id": multi_view_id,
1035 |                     "status": "completed",
1036 |                     "model_path": download_result.get("model_path"),
1037 |                     "point_cloud_path": download_result.get("point_cloud_path"),
1038 |                     "format": REMOTE_CUDA_MVS["DEFAULT_OUTPUT_FORMAT"]
1039 |                 }
1040 |             else:
1041 |                 # Store job information
1042 |                 remote_jobs[job_id] = {
1043 |                     "model_id": model_id,
1044 |                     "multi_view_id": multi_view_id,
1045 |                     "server_id": server_id,
1046 |                     "job_id": job_id,
1047 |                     "status": status["status"],
1048 |                     "message": status.get("message", "")
1049 |                 }
1050 |                 
1051 |                 # Create response
1052 |                 response = {
1053 |                     "model_id": model_id,
1054 |                     "multi_view_id": multi_view_id,
1055 |                     "status": status["status"],
1056 |                     "message": status.get("message", ""),
1057 |                     "job_id": job_id
1058 |                 }
1059 |         else:
1060 |             # Store job information
1061 |             remote_jobs[job_id] = {
1062 |                 "model_id": model_id,
1063 |                 "multi_view_id": multi_view_id,
1064 |                 "server_id": server_id,
1065 |                 "job_id": job_id,
1066 |                 "status": "processing"
1067 |             }
1068 |             
1069 |             # Create response
1070 |             response = {
1071 |                 "model_id": model_id,
1072 |                 "multi_view_id": multi_view_id,
1073 |                 "status": "processing",
1074 |                 "job_id": job_id,
1075 |                 "server_id": server_id
1076 |             }
1077 |     else:
1078 |         # Use local CUDA MVS processing
1079 |         result = cuda_mvs.process_images(
1080 |             approved_image_paths,
1081 |             output_name=output_name,
1082 |             quality=REMOTE_CUDA_MVS["DEFAULT_RECONSTRUCTION_QUALITY"],
1083 |             output_format=REMOTE_CUDA_MVS["DEFAULT_OUTPUT_FORMAT"]
1084 |         )
1085 |         
1086 |         # Store model information
1087 |         models[model_id] = {
1088 |             "id": model_id,
1089 |             "type": "cuda_mvs_local",
1090 |             "parameters": {
1091 |                 "multi_view_id": multi_view_id,
1092 |                 "prompt": multi_view_info["prompt"],
1093 |                 "num_views": len(approved_image_paths),
1094 |                 "quality": REMOTE_CUDA_MVS["DEFAULT_RECONSTRUCTION_QUALITY"],
1095 |                 "output_format": REMOTE_CUDA_MVS["DEFAULT_OUTPUT_FORMAT"]
1096 |             },
1097 |             "description": f"3D model generated from {len(approved_image_paths)} views of '{multi_view_info['prompt']}'",
1098 |             "model_file": result.get("model_path"),
1099 |             "point_cloud_file": result.get("point_cloud_path"),
1100 |             "previews": {},  # Will be generated later
1101 |             "format": REMOTE_CUDA_MVS["DEFAULT_OUTPUT_FORMAT"]
1102 |         }
1103 |         
1104 |         # Create response
1105 |         response = {
1106 |             "model_id": model_id,
1107 |             "multi_view_id": multi_view_id,
1108 |             "status": "completed",
1109 |             "model_path": result.get("model_path"),
1110 |             "point_cloud_path": result.get("point_cloud_path"),
1111 |             "format": REMOTE_CUDA_MVS["DEFAULT_OUTPUT_FORMAT"]
1112 |         }
1113 |     
1114 |     return response
1115 | 
1116 | 
1117 | # Add complete pipeline tool (text to 3D model)
1118 | @mcp_server.tool
1119 | def create_3d_model_from_text(prompt: str, num_views: int = 4, wait_for_completion: bool = True) -> Dict[str, Any]:
1120 |     """
1121 |     Create a 3D model from a text description using the complete pipeline.
1122 |     
1123 |     Args:
1124 |         prompt: Text description of the 3D object
1125 |         num_views: Number of views to generate (default: 4)
1126 |         wait_for_completion: Whether to wait for remote processing to complete
1127 |         
1128 |     Returns:
1129 |         Dictionary with model information
1130 |     """
1131 |     # Generate multi-view images
1132 |     multi_view_result = generate_multi_view_images(prompt, num_views)
1133 |     
1134 |     multi_view_id = multi_view_result["multi_view_id"]
1135 |     
1136 |     # Auto-approve all images if enabled
1137 |     if IMAGE_APPROVAL["AUTO_APPROVE"]:
1138 |         for view in multi_view_result["views"]:
1139 |             approve_image(multi_view_id, view["view_id"])
1140 |     else:
1141 |         # Return multi-view result for manual approval
1142 |         return {
1143 |             "status": "awaiting_approval",
1144 |             "message": "Please approve or reject each image before proceeding",
1145 |             "multi_view_id": multi_view_id,
1146 |             "views": multi_view_result["views"]
1147 |         }
1148 |     
1149 |     # Create 3D model from approved images
1150 |     model_result = create_3d_model_from_images(multi_view_id)
1151 |     
1152 |     # If remote processing is not waiting for completion, return job information
1153 |     if not wait_for_completion and model_result.get("status") == "processing":
1154 |         return model_result
1155 |     
1156 |     # Return model information
1157 |     return model_result
1158 | 
1159 | 
1160 | # Add remote CUDA MVS server discovery tool
1161 | @mcp_server.tool
1162 | def discover_remote_cuda_mvs_servers() -> Dict[str, Any]:
1163 |     """
1164 |     Discover remote CUDA MVS servers on the network.
1165 |     
1166 |     Returns:
1167 |         Dictionary with discovered servers
1168 |     """
1169 |     if not REMOTE_CUDA_MVS["ENABLED"]:
1170 |         raise ValueError("Remote CUDA MVS processing is not enabled")
1171 |     
1172 |     if not remote_connection_manager:
1173 |         raise ValueError("Remote CUDA MVS connection manager is not initialized")
1174 |     
1175 |     servers = discover_remote_servers()
1176 |     
1177 |     return {
1178 |         "servers": servers,
1179 |         "count": len(servers)
1180 |     }
1181 | 
1182 | 
1183 | # Add remote job status tool
1184 | @mcp_server.tool
1185 | def get_remote_job_status(job_id: str) -> Dict[str, Any]:
1186 |     """
1187 |     Get the status of a remote CUDA MVS processing job.
1188 |     
1189 |     Args:
1190 |         job_id: ID of the job to get status for
1191 |         
1192 |     Returns:
1193 |         Dictionary with job status
1194 |     """
1195 |     if not REMOTE_CUDA_MVS["ENABLED"]:
1196 |         raise ValueError("Remote CUDA MVS processing is not enabled")
1197 |     
1198 |     if not remote_connection_manager:
1199 |         raise ValueError("Remote CUDA MVS connection manager is not initialized")
1200 |     
1201 |     # Check if job exists
1202 |     if job_id not in remote_jobs:
1203 |         raise ValueError(f"Job with ID {job_id} not found")
1204 |     
1205 |     # Get job information
1206 |     job_info = remote_jobs[job_id]
1207 |     
1208 |     # Get status from server
1209 |     status = get_job_status(job_id)
1210 |     
1211 |     if not status:
1212 |         raise ValueError(f"Failed to get status for job with ID {job_id}")
1213 |     
1214 |     # Update job information
1215 |     job_info["status"] = status.get("status", job_info["status"])
1216 |     job_info["progress"] = status.get("progress", 0)
1217 |     job_info["message"] = status.get("message", "")
1218 |     
1219 |     return job_info
1220 | 
1221 | 
1222 | # Add remote model download tool
1223 | @mcp_server.tool
1224 | def download_remote_model_result(job_id: str) -> Dict[str, Any]:
1225 |     """
1226 |     Download a processed model from a remote CUDA MVS server.
1227 |     
1228 |     Args:
1229 |         job_id: ID of the job to download model for
1230 |         
1231 |     Returns:
1232 |         Dictionary with model information
1233 |     """
1234 |     if not REMOTE_CUDA_MVS["ENABLED"]:
1235 |         raise ValueError("Remote CUDA MVS processing is not enabled")
1236 |     
1237 |     if not remote_connection_manager:
1238 |         raise ValueError("Remote CUDA MVS connection manager is not initialized")
1239 |     
1240 |     # Check if job exists
1241 |     if job_id not in remote_jobs:
1242 |         raise ValueError(f"Job with ID {job_id} not found")
1243 |     
1244 |     # Get job information
1245 |     job_info = remote_jobs[job_id]
1246 |     
1247 |     # Check if job is completed
1248 |     if job_info["status"] != "completed":
1249 |         raise ValueError(f"Job with ID {job_id} is not completed (status: {job_info['status']})")
1250 |     
1251 |     # Download model
1252 |     result = download_remote_model(job_id)
1253 |     
1254 |     if not result:
1255 |         raise ValueError(f"Failed to download model for job with ID {job_id}")
1256 |     
1257 |     # Update job information
1258 |     job_info["model_path"] = result.get("model_path")
1259 |     job_info["point_cloud_path"] = result.get("point_cloud_path")
1260 |     job_info["downloaded"] = True
1261 |     
1262 |     # Update model information if available
1263 |     if "model_id" in job_info and job_info["model_id"] in models:
1264 |         model_id = job_info["model_id"]
1265 |         models[model_id]["model_file"] = result.get("model_path")
1266 |         models[model_id]["point_cloud_file"] = result.get("point_cloud_path")
1267 |     
1268 |     return {
1269 |         "job_id": job_id,
1270 |         "model_path": result.get("model_path"),
1271 |         "point_cloud_path": result.get("point_cloud_path"),
1272 |         "format": REMOTE_CUDA_MVS["DEFAULT_OUTPUT_FORMAT"]
1273 |     }
1274 | 
1275 | 
1276 | # Add remote job cancellation tool
1277 | @mcp_server.tool
1278 | def cancel_remote_job(job_id: str) -> Dict[str, Any]:
1279 |     """
1280 |     Cancel a remote CUDA MVS processing job.
1281 |     
1282 |     Args:
1283 |         job_id: ID of the job to cancel
1284 |         
1285 |     Returns:
1286 |         Dictionary with cancellation result
1287 |     """
1288 |     if not REMOTE_CUDA_MVS["ENABLED"]:
1289 |         raise ValueError("Remote CUDA MVS processing is not enabled")
1290 |     
1291 |     if not remote_connection_manager:
1292 |         raise ValueError("Remote CUDA MVS connection manager is not initialized")
1293 |     
1294 |     # Check if job exists
1295 |     if job_id not in remote_jobs:
1296 |         raise ValueError(f"Job with ID {job_id} not found")
1297 |     
1298 |     # Get job information
1299 |     job_info = remote_jobs[job_id]
1300 |     
1301 |     # Cancel job
1302 |     result = cancel_job(job_id)
1303 |     
1304 |     if not result:
1305 |         raise ValueError(f"Failed to cancel job with ID {job_id}")
1306 |     
1307 |     # Update job information
1308 |     if result.get("cancelled", False):
1309 |         job_info["status"] = "cancelled"
1310 |         job_info["message"] = "Job cancelled by user"
1311 |     
1312 |     return {
1313 |         "job_id": job_id,
1314 |         "cancelled": result.get("cancelled", False),
1315 |         "status": job_info["status"],
1316 |         "message": job_info.get("message", "")
1317 |     }
1318 | 
1319 | 
1320 | # FastAPI routes
1321 | @app.post("/tool_call")
1322 | async def handle_tool_call(request: Request) -> JSONResponse:
1323 |     """
1324 |     Handle a tool call from a client.
1325 |     
1326 |     Args:
1327 |         request: FastAPI request object
1328 |         
1329 |     Returns:
1330 |         JSON response with tool call result
1331 |     """
1332 |     # Parse request
1333 |     data = await request.json()
1334 |     
1335 |     # Check if tool name is provided
1336 |     if "tool_name" not in data:
1337 |         raise HTTPException(status_code=400, detail="Tool name is required")
1338 |     
1339 |     # Check if tool exists
1340 |     tool_name = data["tool_name"]
1341 |     if tool_name not in mcp_server.tools:
1342 |         raise HTTPException(status_code=404, detail=f"Tool {tool_name} not found")
1343 |     
1344 |     # Get tool parameters
1345 |     tool_params = data.get("tool_params", {})
1346 |     
1347 |     # Call tool
1348 |     try:
1349 |         result = mcp_server.tools[tool_name](**tool_params)
1350 |         return JSONResponse(content=result)
1351 |     except Exception as e:
1352 |         logger.error(f"Error calling tool {tool_name}: {str(e)}")
1353 |         raise HTTPException(status_code=500, detail=str(e))
1354 | 
1355 | @app.get("/ui/preview/{model_id}")
1356 | async def preview_model(request: Request, model_id: str) -> Response:
1357 |     """
1358 |     Render a preview page for a model.
1359 |     
1360 |     Args:
1361 |         request: FastAPI request object
1362 |         model_id: ID of the model to preview
1363 |         
1364 |     Returns:
1365 |         HTML response with model preview
1366 |     """
1367 |     # Check if model exists
1368 |     if model_id not in models:
1369 |         raise HTTPException(status_code=404, detail=f"Model with ID {model_id} not found")
1370 |     
1371 |     # Get model information
1372 |     model_info = models[model_id]
1373 |     
1374 |     # Render template
1375 |     return templates.TemplateResponse(
1376 |         "preview.html",
1377 |         {
1378 |             "request": request,
1379 |             "model_id": model_id,
1380 |             "parameters": model_info["parameters"],
1381 |             "previews": model_info["previews"]
1382 |         }
1383 |     )
1384 | 
1385 | @app.get("/preview/{view}/{model_id}")
1386 | async def get_preview(view: str, model_id: str) -> FileResponse:
1387 |     """
1388 |     Get a preview image for a model.
1389 |     
1390 |     Args:
1391 |         view: View to get preview for
1392 |         model_id: ID of the model
1393 |         
1394 |     Returns:
1395 |         Image file response
1396 |     """
1397 |     # Check if model exists
1398 |     if model_id not in models:
1399 |         raise HTTPException(status_code=404, detail=f"Model with ID {model_id} not found")
1400 |     
1401 |     # Get model information
1402 |     model_info = models[model_id]
1403 |     
1404 |     # Check if preview exists
1405 |     if view not in model_info["previews"]:
1406 |         raise HTTPException(status_code=404, detail=f"Preview for view {view} not found")
1407 |     
1408 |     # Return preview image
1409 |     return FileResponse(model_info["previews"][view])
1410 | 
1411 | @app.get("/download/{model_id}")
1412 | async def download_model(model_id: str) -> FileResponse:
1413 |     """
1414 |     Download a model file.
1415 |     
1416 |     Args:
1417 |         model_id: ID of the model to download
1418 |         
1419 |     Returns:
1420 |         Model file response
1421 |     """
1422 |     # Check if model exists
1423 |     if model_id not in models:
1424 |         raise HTTPException(status_code=404, detail=f"Model with ID {model_id} not found")
1425 |     
1426 |     # Get model information
1427 |     model_info = models[model_id]
1428 |     
1429 |     # Check if model file exists
1430 |     if not model_info.get("model_file"):
1431 |         raise HTTPException(status_code=404, detail=f"Model file for model with ID {model_id} not found")
1432 |     
1433 |     # Return model file
1434 |     return FileResponse(
1435 |         model_info["model_file"],
1436 |         filename=f"{model_id}.{model_info['format']}"
1437 |     )
1438 | 
1439 | @app.get("/")
1440 | async def root() -> Dict[str, Any]:
1441 |     """
1442 |     Root endpoint.
1443 |     
1444 |     Returns:
1445 |         Dictionary with server information
1446 |     """
1447 |     return {
1448 |         "name": "OpenSCAD MCP Server",
1449 |         "version": "1.0.0",
1450 |         "description": "MCP server for OpenSCAD",
1451 |         "tools": list(mcp_server.tools.keys())
1452 |     }
1453 | 
1454 | # Run server
1455 | if __name__ == "__main__":
1456 |     uvicorn.run("src.main:app", host="0.0.0.0", port=8000, reload=True)
1457 | 
```
Page 3/3FirstPrevNextLast