#
tokens: 40292/50000 3/511 files (page 13/16)
lines: off (toggle) GitHub
raw markdown copy
This is page 13 of 16. Use http://codebase.md/fujitsu-ai/mcp-server-for-mas-developments?page={x} to view the full context.

# Directory Structure

```
├── .gitattributes
├── .gitignore
├── agents
│   ├── __init__.py
│   ├── AgentInterface
│   │   ├── __init__.py
│   │   ├── Python
│   │   │   ├── __init__.py
│   │   │   ├── agent.py
│   │   │   ├── color.py
│   │   │   ├── config.py
│   │   │   ├── language.py
│   │   │   ├── local_file_handler.py
│   │   │   └── network.py
│   │   └── requirements.txt
│   ├── AgentMonitoring
│   │   ├── ChatBot-Agent Dashboard Example - Grafana.json
│   │   ├── images
│   │   │   ├── Grafana.png
│   │   │   └── Prometheus.png
│   │   ├── IoT-Agent Dashboard Example - Grafana.json
│   │   ├── OpenAI compatible API - Agent Dashboard Example - Grafana.json
│   │   ├── prometheus Example.yml
│   │   └── README.md
│   ├── ChatBotAgent
│   │   ├── __init__.py
│   │   ├── config.json.example
│   │   ├── html
│   │   │   ├── favicon.ico
│   │   │   ├── index_de.html
│   │   │   ├── index.html
│   │   │   ├── Logo_light.svg
│   │   │   ├── start_http_server.ps1
│   │   │   └── start_http_server.sh
│   │   ├── Python
│   │   │   ├── __init__.py
│   │   │   └── chatbot_agent.py
│   │   ├── README.md
│   │   └── requirements.txt
│   ├── IoTAgent
│   │   ├── config_example.json
│   │   ├── Python
│   │   │   ├── iot_mqtt_agent.py
│   │   │   └── language.py
│   │   ├── README.md
│   │   └── requirements.txt
│   ├── ISMAgent
│   │   ├── config_example.json
│   │   ├── PGPT Scenario Prompts
│   │   │   ├── ISM System Prompt - Detecting Error State.txt
│   │   │   ├── ISM User Post-Prompt - Detecting Error State.txt
│   │   │   ├── ISM User Pre-Prompt - Detecting Error State.txt
│   │   │   └── README.md
│   │   ├── Python
│   │   │   ├── ism_agent.py
│   │   │   └── language.py
│   │   ├── README.md
│   │   ├── requirements.txt
│   │   └── start_ism_agent.ps1
│   ├── MCP-Client
│   │   ├── __init__.py
│   │   ├── .env.example
│   │   ├── Python
│   │   │   ├── __init__.py
│   │   │   ├── chat_handler.py
│   │   │   ├── config.py
│   │   │   ├── environment.py
│   │   │   ├── llm_client.py
│   │   │   ├── mcp_client_sse.py
│   │   │   ├── mcp_client.py
│   │   │   ├── messages
│   │   │   │   ├── __init__.py
│   │   │   │   ├── message_types
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── incrementing_id_message.py
│   │   │   │   │   ├── initialize_message.py
│   │   │   │   │   ├── json_rpc_message.py
│   │   │   │   │   ├── ping_message.py
│   │   │   │   │   ├── prompts_messages.py
│   │   │   │   │   ├── prompts_models.py
│   │   │   │   │   ├── resources_messages.py
│   │   │   │   │   └── tools_messages.py
│   │   │   │   ├── send_call_tool.py
│   │   │   │   ├── send_initialize_message.py
│   │   │   │   ├── send_message.py
│   │   │   │   ├── send_ping.py
│   │   │   │   ├── send_prompts.py
│   │   │   │   ├── send_resources.py
│   │   │   │   └── send_tools_list.py
│   │   │   ├── system_prompt_generator.py
│   │   │   ├── tools_handler.py
│   │   │   └── transport
│   │   │       ├── __init__.py
│   │   │       └── stdio
│   │   │           ├── __init__.py
│   │   │           ├── stdio_client.py
│   │   │           ├── stdio_server_parameters.py
│   │   │           └── stdio_server_shutdown.py
│   │   ├── README.md
│   │   ├── requirements.txt
│   │   └── server_config.json
│   ├── OpenAI_Compatible_API_Agent
│   │   ├── __init__.py
│   │   ├── docker-compose.yml
│   │   ├── Dockerfile
│   │   ├── pgpt_openai_api_mcp.json.example
│   │   ├── pgpt_openai_api_proxy.json.example
│   │   ├── Python
│   │   │   ├── __init__.py
│   │   │   ├── client_tests
│   │   │   │   ├── __init__.py
│   │   │   │   ├── openai_test_client_structured.py
│   │   │   │   ├── openai_test_client_tools.py
│   │   │   │   ├── openai_test_client.py
│   │   │   │   ├── vllm_client_multimodal.py
│   │   │   │   ├── vllm_client.py
│   │   │   │   ├── vllm_structured.py
│   │   │   │   └── vllm_structured2.py
│   │   │   ├── generate_api_key.py
│   │   │   ├── open_ai_helper.py
│   │   │   ├── openai_compatible_api.py
│   │   │   ├── openai_mcp_api.py
│   │   │   ├── pgpt_api.py
│   │   │   ├── privategpt_api.py
│   │   │   └── vllmproxy.py
│   │   ├── README.md
│   │   └── requirements.txt
│   └── SourceManagerAgent
│       ├── __init__.py
│       ├── config.json.example
│       └── Python
│           ├── __init__.py
│           ├── file_tools
│           │   └── loader_factory.py
│           ├── file_upload_agent.py
│           └── local_db.py
├── clients
│   ├── __init__.py
│   ├── C# .Net
│   │   ├── 1.0 mcp_login
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_login.deps.json
│   │   │   │           ├── mcp_login.dll
│   │   │   │           ├── mcp_login.exe
│   │   │   │           ├── mcp_login.pdb
│   │   │   │           ├── mcp_login.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_login.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_login.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_login.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_login.assets.cache
│   │   │   │   │       ├── mcp_login.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_login.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_login.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_login.csproj.Up2Date
│   │   │   │   │       ├── mcp_login.dll
│   │   │   │   │       ├── mcp_login.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_login.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_login.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_login.pdb
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_login.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_login.dll
│   │   │   │   ├── mcp_login.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_login.csproj.nuget.g.props
│   │   │   │   ├── mcp_login.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 1.1 mcp_logout
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_logout.deps.json
│   │   │   │           ├── mcp_logout.dll
│   │   │   │           ├── mcp_logout.exe
│   │   │   │           ├── mcp_logout.pdb
│   │   │   │           ├── mcp_logout.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_logout.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_logout.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_logout.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_logout.assets.cache
│   │   │   │   │       ├── mcp_logout.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_logout.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_logout.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_logout.csproj.Up2Date
│   │   │   │   │       ├── mcp_logout.dll
│   │   │   │   │       ├── mcp_logout.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_logout.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_logout.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_logout.pdb
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_logout.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_logout.dll
│   │   │   │   ├── mcp_logout.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_logout.csproj.nuget.g.props
│   │   │   │   ├── mcp_logout.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 2.0 mcp_chat
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_chat.deps.json
│   │   │   │           ├── mcp_chat.dll
│   │   │   │           ├── mcp_chat.exe
│   │   │   │           ├── mcp_chat.pdb
│   │   │   │           ├── mcp_chat.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_chat.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_chat.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_chat.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_chat.assets.cache
│   │   │   │   │       ├── mcp_chat.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_chat.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_chat.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_chat.csproj.Up2Date
│   │   │   │   │       ├── mcp_chat.dll
│   │   │   │   │       ├── mcp_chat.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_chat.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_chat.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_chat.pdb
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_chat.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_chat.dll
│   │   │   │   ├── mcp_chat.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_chat.csproj.nuget.g.props
│   │   │   │   ├── mcp_chat.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 2.1 mcp_continue_chat
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_continue_chat.deps.json
│   │   │   │           ├── mcp_continue_chat.dll
│   │   │   │           ├── mcp_continue_chat.exe
│   │   │   │           ├── mcp_continue_chat.pdb
│   │   │   │           ├── mcp_continue_chat.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_continue_chat.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_cont.EF178231.Up2Date
│   │   │   │   │       ├── mcp_continue_chat.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_continue_chat.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_continue_chat.assets.cache
│   │   │   │   │       ├── mcp_continue_chat.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_continue_chat.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_continue_chat.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_continue_chat.dll
│   │   │   │   │       ├── mcp_continue_chat.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_continue_chat.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_continue_chat.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_continue_chat.pdb
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_continue_chat.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_continue_chat.dll
│   │   │   │   ├── mcp_continue_chat.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_continue_chat.csproj.nuget.g.props
│   │   │   │   ├── mcp_continue_chat.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 2.2 mcp_get_chat_info
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_get_chat_info.deps.json
│   │   │   │           ├── mcp_get_chat_info.dll
│   │   │   │           ├── mcp_get_chat_info.exe
│   │   │   │           ├── mcp_get_chat_info.pdb
│   │   │   │           ├── mcp_get_chat_info.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── Dokumente - Verknüpfung.lnk
│   │   │   ├── mcp_get_chat_info.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_get_.DFF47B4E.Up2Date
│   │   │   │   │       ├── mcp_get_chat_info.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_get_chat_info.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_get_chat_info.assets.cache
│   │   │   │   │       ├── mcp_get_chat_info.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_get_chat_info.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_get_chat_info.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_get_chat_info.dll
│   │   │   │   │       ├── mcp_get_chat_info.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_get_chat_info.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_get_chat_info.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_get_chat_info.pdb
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_get_chat_info.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_get_chat_info.dll
│   │   │   │   ├── mcp_get_chat_info.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_get_chat_info.csproj.nuget.g.props
│   │   │   │   ├── mcp_get_chat_info.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 3.0 mcp_create_source
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_create_source.deps.json
│   │   │   │           ├── mcp_create_source.dll
│   │   │   │           ├── mcp_create_source.exe
│   │   │   │           ├── mcp_create_source.pdb
│   │   │   │           ├── mcp_create_source.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_create_source.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_crea.CB4ED912.Up2Date
│   │   │   │   │       ├── mcp_create_source.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_create_source.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_create_source.assets.cache
│   │   │   │   │       ├── mcp_create_source.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_create_source.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_create_source.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_create_source.dll
│   │   │   │   │       ├── mcp_create_source.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_create_source.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_create_source.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_create_source.pdb
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_create_source.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_create_source.dll
│   │   │   │   ├── mcp_create_source.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_create_source.csproj.nuget.g.props
│   │   │   │   ├── mcp_create_source.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 3.1 mcp_get_source
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_get_source.deps.json
│   │   │   │           ├── mcp_get_source.dll
│   │   │   │           ├── mcp_get_source.exe
│   │   │   │           ├── mcp_get_source.pdb
│   │   │   │           ├── mcp_get_source.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_get_source.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_get_.4E61956F.Up2Date
│   │   │   │   │       ├── mcp_get_source.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_get_source.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_get_source.assets.cache
│   │   │   │   │       ├── mcp_get_source.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_get_source.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_get_source.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_get_source.dll
│   │   │   │   │       ├── mcp_get_source.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_get_source.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_get_source.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_get_source.pdb
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_get_source.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_get_source.dll
│   │   │   │   ├── mcp_get_source.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_get_source.csproj.nuget.g.props
│   │   │   │   ├── mcp_get_source.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 3.2 mcp_list_sources
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_list_sources.deps.json
│   │   │   │           ├── mcp_list_sources.dll
│   │   │   │           ├── mcp_list_sources.exe
│   │   │   │           ├── mcp_list_sources.pdb
│   │   │   │           ├── mcp_list_sources.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_list_sources.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_list_sources.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_list_sources.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_list_sources.assets.cache
│   │   │   │   │       ├── mcp_list_sources.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_list_sources.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_list_sources.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_list_sources.dll
│   │   │   │   │       ├── mcp_list_sources.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_list_sources.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_list_sources.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_list_sources.pdb
│   │   │   │   │       ├── mcp_list.A720E197.Up2Date
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_list_sources.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_list_sources.dll
│   │   │   │   ├── mcp_list_sources.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_list_sources.csproj.nuget.g.props
│   │   │   │   ├── mcp_list_sources.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 3.3 mcp_edit_source
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_edit_source.deps.json
│   │   │   │           ├── mcp_edit_source.dll
│   │   │   │           ├── mcp_edit_source.exe
│   │   │   │           ├── mcp_edit_source.pdb
│   │   │   │           ├── mcp_edit_source.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_edit_source.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_edit_source.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_edit_source.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_edit_source.assets.cache
│   │   │   │   │       ├── mcp_edit_source.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_edit_source.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_edit_source.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_edit_source.dll
│   │   │   │   │       ├── mcp_edit_source.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_edit_source.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_edit_source.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_edit_source.pdb
│   │   │   │   │       ├── mcp_edit.7303BE3B.Up2Date
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_edit_source.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_edit_source.dll
│   │   │   │   ├── mcp_edit_source.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_edit_source.csproj.nuget.g.props
│   │   │   │   ├── mcp_edit_source.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 3.4 mcp_delete_source
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_delete_source.deps.json
│   │   │   │           ├── mcp_delete_source.dll
│   │   │   │           ├── mcp_delete_source.exe
│   │   │   │           ├── mcp_delete_source.pdb
│   │   │   │           ├── mcp_delete_source.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_delete_source.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_dele.67DD13F9.Up2Date
│   │   │   │   │       ├── mcp_delete_source.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_delete_source.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_delete_source.assets.cache
│   │   │   │   │       ├── mcp_delete_source.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_delete_source.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_delete_source.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_delete_source.dll
│   │   │   │   │       ├── mcp_delete_source.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_delete_source.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_delete_source.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_delete_source.pdb
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_delete_source.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_delete_source.dll
│   │   │   │   ├── mcp_delete_source.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_delete_source.csproj.nuget.g.props
│   │   │   │   ├── mcp_delete_source.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 4.0 mcp_list_groups
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_list_groups.deps.json
│   │   │   │           ├── mcp_list_groups.dll
│   │   │   │           ├── mcp_list_groups.exe
│   │   │   │           ├── mcp_list_groups.pdb
│   │   │   │           ├── mcp_list_groups.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_list_groups.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_list_groups.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_list_groups.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_list_groups.assets.cache
│   │   │   │   │       ├── mcp_list_groups.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_list_groups.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_list_groups.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_list_groups.dll
│   │   │   │   │       ├── mcp_list_groups.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_list_groups.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_list_groups.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_list_groups.pdb
│   │   │   │   │       ├── mcp_list.EBD5E0D2.Up2Date
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_list_groups.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_list_groups.dll
│   │   │   │   ├── mcp_list_groups.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_list_groups.csproj.nuget.g.props
│   │   │   │   ├── mcp_list_groups.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 4.1 mcp_store_group
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_store_group.deps.json
│   │   │   │           ├── mcp_store_group.dll
│   │   │   │           ├── mcp_store_group.exe
│   │   │   │           ├── mcp_store_group.pdb
│   │   │   │           ├── mcp_store_group.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_store_group.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_stor.AFB4AA35.Up2Date
│   │   │   │   │       ├── mcp_store_group.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_store_group.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_store_group.assets.cache
│   │   │   │   │       ├── mcp_store_group.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_store_group.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_store_group.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_store_group.dll
│   │   │   │   │       ├── mcp_store_group.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_store_group.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_store_group.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_store_group.pdb
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_store_group.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_store_group.dll
│   │   │   │   ├── mcp_store_group.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_store_group.csproj.nuget.g.props
│   │   │   │   ├── mcp_store_group.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 4.2 mcp_delete_group
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_delete_group.deps.json
│   │   │   │           ├── mcp_delete_group.dll
│   │   │   │           ├── mcp_delete_group.exe
│   │   │   │           ├── mcp_delete_group.pdb
│   │   │   │           ├── mcp_delete_group.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_delete_group.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_dele.FE1C6298.Up2Date
│   │   │   │   │       ├── mcp_delete_group.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_delete_group.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_delete_group.assets.cache
│   │   │   │   │       ├── mcp_delete_group.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_delete_group.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_delete_group.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_delete_group.dll
│   │   │   │   │       ├── mcp_delete_group.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_delete_group.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_delete_group.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_delete_group.pdb
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_delete_group.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_delete_group.dll
│   │   │   │   ├── mcp_delete_group.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_delete_group.csproj.nuget.g.props
│   │   │   │   ├── mcp_delete_group.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 5.0 mcp_store_user
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_store_user.deps.json
│   │   │   │           ├── mcp_store_user.dll
│   │   │   │           ├── mcp_store_user.exe
│   │   │   │           ├── mcp_store_user.pdb
│   │   │   │           ├── mcp_store_user.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_store_user.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_stor.6C0F0C8A.Up2Date
│   │   │   │   │       ├── mcp_store_user.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_store_user.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_store_user.assets.cache
│   │   │   │   │       ├── mcp_store_user.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_store_user.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_store_user.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_store_user.dll
│   │   │   │   │       ├── mcp_store_user.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_store_user.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_store_user.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_store_user.pdb
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_store_user.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_store_user.dll
│   │   │   │   ├── mcp_store_user.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_store_user.csproj.nuget.g.props
│   │   │   │   ├── mcp_store_user.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 5.1 mcp_edit_user
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_edit_user.deps.json
│   │   │   │           ├── mcp_edit_user.dll
│   │   │   │           ├── mcp_edit_user.exe
│   │   │   │           ├── mcp_edit_user.pdb
│   │   │   │           ├── mcp_edit_user.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_edit_user.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_edit_user.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_edit_user.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_edit_user.assets.cache
│   │   │   │   │       ├── mcp_edit_user.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_edit_user.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_edit_user.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_edit_user.dll
│   │   │   │   │       ├── mcp_edit_user.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_edit_user.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_edit_user.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_edit_user.pdb
│   │   │   │   │       ├── mcp_edit.94A30270.Up2Date
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_edit_user.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_edit_user.dll
│   │   │   │   ├── mcp_edit_user.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_edit_user.csproj.nuget.g.props
│   │   │   │   ├── mcp_edit_user.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── 5.2 mcp_delete_user
│   │   │   ├── bin
│   │   │   │   └── Debug
│   │   │   │       └── net9.0
│   │   │   │           ├── mcp_delete_user.deps.json
│   │   │   │           ├── mcp_delete_user.dll
│   │   │   │           ├── mcp_delete_user.exe
│   │   │   │           ├── mcp_delete_user.pdb
│   │   │   │           ├── mcp_delete_user.runtimeconfig.json
│   │   │   │           └── Newtonsoft.Json.dll
│   │   │   ├── mcp_delete_user.csproj
│   │   │   ├── obj
│   │   │   │   ├── Debug
│   │   │   │   │   └── net9.0
│   │   │   │   │       ├── .NETCoreApp,Version=v9.0.AssemblyAttributes.cs
│   │   │   │   │       ├── apphost.exe
│   │   │   │   │       ├── mcp_dele.CEB7E33D.Up2Date
│   │   │   │   │       ├── mcp_delete_user.AssemblyInfo.cs
│   │   │   │   │       ├── mcp_delete_user.AssemblyInfoInputs.cache
│   │   │   │   │       ├── mcp_delete_user.assets.cache
│   │   │   │   │       ├── mcp_delete_user.csproj.AssemblyReference.cache
│   │   │   │   │       ├── mcp_delete_user.csproj.CoreCompileInputs.cache
│   │   │   │   │       ├── mcp_delete_user.csproj.FileListAbsolute.txt
│   │   │   │   │       ├── mcp_delete_user.dll
│   │   │   │   │       ├── mcp_delete_user.GeneratedMSBuildEditorConfig.editorconfig
│   │   │   │   │       ├── mcp_delete_user.genruntimeconfig.cache
│   │   │   │   │       ├── mcp_delete_user.GlobalUsings.g.cs
│   │   │   │   │       ├── mcp_delete_user.pdb
│   │   │   │   │       ├── ref
│   │   │   │   │       │   └── mcp_delete_user.dll
│   │   │   │   │       └── refint
│   │   │   │   │           └── mcp_delete_user.dll
│   │   │   │   ├── mcp_delete_user.csproj.nuget.dgspec.json
│   │   │   │   ├── mcp_delete_user.csproj.nuget.g.props
│   │   │   │   ├── mcp_delete_user.csproj.nuget.g.targets
│   │   │   │   ├── project.assets.json
│   │   │   │   └── project.nuget.cache
│   │   │   └── Program.cs
│   │   ├── Code Archiv
│   │   │   ├── mcp_chat.cs
│   │   │   ├── mcp_continue_chat.cs
│   │   │   ├── mcp_create_source.cs
│   │   │   ├── mcp_delete_group.cs
│   │   │   ├── mcp_delete_source.cs
│   │   │   ├── mcp_delete_user.cs
│   │   │   ├── mcp_edit_source.cs
│   │   │   ├── mcp_edit_user.cs
│   │   │   ├── mcp_get_chat_info.cs
│   │   │   ├── mcp_get_source.cs
│   │   │   ├── mcp_list_groups.cs
│   │   │   ├── mcp_list_sources.cs
│   │   │   ├── mcp_login.cs
│   │   │   ├── mcp_logout.cs
│   │   │   ├── mcp_store_group.cs
│   │   │   └── mcp_store_user.cs
│   │   └── README.md
│   ├── C++
│   │   ├── .vscode
│   │   │   └── launch.json
│   │   ├── 1.0 mcp_login
│   │   │   ├── MCPLoginClient.cpp
│   │   │   └── Non-TLS version
│   │   │       ├── MCPLoginClient.cpp
│   │   │       └── MCPLoginClient.exe
│   │   ├── 1.1 mcp_logout
│   │   │   ├── MCPLogoutClient.cpp
│   │   │   └── MCPLogoutClient.exe
│   │   ├── 2.0 mcp_chat
│   │   │   ├── MCPChatClient.cpp
│   │   │   └── MCPChatClient.exe
│   │   ├── 2.1 mcp_continue_chat
│   │   │   ├── MCPChatContinuationClient.cpp
│   │   │   └── MCPChatContinuationClient.exe
│   │   ├── 2.2 mcp_get_chat_info
│   │   │   ├── MCPGetChatInfoClient.cpp
│   │   │   └── MCPGetChatInfoClient.exe
│   │   ├── 3.0 mcp_create_source
│   │   │   ├── MCPCreateSourceClient.cpp
│   │   │   └── MCPCreateSourceClient.exe
│   │   ├── 3.1 mcp_get_source
│   │   │   ├── MCPGetSourceClient.cpp
│   │   │   └── MCPGetSourceClient.exe
│   │   ├── 3.2 mcp_list_sources
│   │   │   ├── MCPListSourcesClient.cpp
│   │   │   └── MCPListSourcesClient.exe
│   │   ├── 3.3 mcp_edit_source
│   │   │   ├── MCPEditSourceClient.cpp
│   │   │   └── MCPEditSourceClient.exe
│   │   ├── 3.4 mcp_delete_source
│   │   │   ├── MCPDeleteSourceClient.cpp
│   │   │   └── MCPDeleteSourceClient.exe
│   │   ├── 4.0 mcp_list_groups
│   │   │   ├── MCPListGroupsClient.cpp
│   │   │   └── MCPListGroupsClient.exe
│   │   ├── 4.1 mcp_store_group
│   │   │   ├── MCPStoreGroupClient.cpp
│   │   │   └── MCPStoreGroupClient.exe
│   │   ├── 4.2 mcp_delete_group
│   │   │   ├── MPCDeleteGroupClient.cpp
│   │   │   └── MPCDeleteGroupClient.exe
│   │   ├── 5.0 mcp_store_user
│   │   │   ├── MCPStoreUserClient.cpp
│   │   │   └── MCPStoreUserClient.exe
│   │   ├── 5.1 mcp_edit_user
│   │   │   ├── MCPEditUserClient.cpp
│   │   │   └── MCPEditUserClient.exe
│   │   ├── 5.2 mcp_delete_user
│   │   │   ├── MCPDeleteUserClient.cpp
│   │   │   └── MCPDeleteUserClient.exe
│   │   ├── 9.0 mcp_keygen
│   │   │   ├── MCPKeygenClient.cpp
│   │   │   └── MCPKeygenClient.exe
│   │   └── README.md
│   ├── Go
│   │   ├── 1.0 mcp_login
│   │   │   ├── go.mod
│   │   │   ├── MCPLoginClient.exe
│   │   │   └── MCPLoginClient.go
│   │   ├── 1.1 mcp_logout
│   │   │   ├── MCPLogoutClient.exe
│   │   │   └── MCPLogoutClient.go
│   │   ├── 2.0 mcp_chat
│   │   │   ├── MCPChatClient.exe
│   │   │   └── MCPChatClient.go
│   │   ├── 2.1 mcp_continue_chat
│   │   │   ├── MCPChatContinuationClient.exe
│   │   │   └── MCPChatContinuationClient.go
│   │   ├── 2.2 mcp_get_chat_info
│   │   │   ├── MCPGetChatInfoClient.exe
│   │   │   └── MCPGetChatInfoClient.go
│   │   ├── 3.0 mcp_create_source
│   │   │   ├── MCPCreateSourceClient.exe
│   │   │   └── MCPCreateSourceClient.go
│   │   ├── 3.1 mcp_get_source
│   │   │   ├── MCPGetSourceClient.exe
│   │   │   └── MCPGetSourceClient.go
│   │   ├── 3.2 mcp_list_sources
│   │   │   ├── MCPListSourcesClient.exe
│   │   │   └── MCPListSourcesClient.go
│   │   ├── 3.3 mcp_edit_source
│   │   │   ├── MCPEditSourceClient.exe
│   │   │   └── MCPEditSourceClient.go
│   │   ├── 3.4 mcp_delete_source
│   │   │   ├── MCPDeleteSourceClient.exe
│   │   │   └── MCPDeleteSourceClient.go
│   │   ├── 4.0 mcp_list_groups
│   │   │   ├── MCPListGroupsClient.exe
│   │   │   └── MCPListGroupsClient.go
│   │   ├── 4.1 mcp_store_group
│   │   │   ├── MCPStoreGroupClient.exe
│   │   │   └── MCPStoreGroupClient.go
│   │   ├── 4.2 mcp_delete_group
│   │   │   ├── MCPDeleteGroupClient.exe
│   │   │   └── MCPDeleteGroupClient.go
│   │   ├── 5.0 mcp_store_user
│   │   │   ├── MCPStoreUserClient.exe
│   │   │   └── MCPStoreUserClient.go
│   │   ├── 5.1 mcp_edit_user
│   │   │   ├── MCPEditUserClient.exe
│   │   │   └── MCPEditUserClient.go
│   │   ├── 5.2 mcp_delete_user
│   │   │   ├── MCPDeleteUserClient.exe
│   │   │   └── MCPDeleteUserClient.go
│   │   ├── 9.0 mcp_keygen
│   │   │   ├── MCPKeygenClient.exe
│   │   │   └── MCPKeygenClient.go
│   │   └── README.md
│   ├── Gradio
│   │   ├── Api.py
│   │   ├── config.json.example
│   │   ├── config.py
│   │   ├── favicon.ico
│   │   ├── file_tools
│   │   │   └── loader_factory.py
│   │   ├── language.py
│   │   ├── logos
│   │   │   ├── fsas.png
│   │   │   └── Logo_dark.svg
│   │   ├── main.py
│   │   ├── mcp_client.py
│   │   ├── mcp_servers
│   │   │   ├── arxiv
│   │   │   │   ├── arxiv-stdio.js
│   │   │   │   ├── package.json
│   │   │   │   ├── README.md
│   │   │   │   ├── requirements.txt
│   │   │   │   └── server_config.example.json
│   │   │   ├── demo-mcp-server
│   │   │   │   ├── demo-tools-sse.js
│   │   │   │   ├── demo-tools-stdio.js
│   │   │   │   └── tools
│   │   │   │       ├── assets.js
│   │   │   │       ├── calculator.js
│   │   │   │       └── weather.js
│   │   │   ├── filesystem
│   │   │   │   ├── Dockerfile
│   │   │   │   ├── index.ts
│   │   │   │   ├── package.json
│   │   │   │   ├── README.md
│   │   │   │   ├── test
│   │   │   │   │   └── new.txt
│   │   │   │   └── tsconfig.json
│   │   │   ├── moondream
│   │   │   │   └── server.py
│   │   │   ├── pgpt
│   │   │   │   ├── __init__.py
│   │   │   │   ├── Api.py
│   │   │   │   ├── config.json.example
│   │   │   │   ├── config.py
│   │   │   │   ├── language.py
│   │   │   │   ├── pyproject.toml
│   │   │   │   ├── README.md
│   │   │   │   └── server.py
│   │   │   ├── replicate_flux
│   │   │   │   └── server.py
│   │   │   └── sqlite
│   │   │       ├── .python-version
│   │   │       ├── Dockerfile
│   │   │       ├── pyproject.toml
│   │   │       ├── README.md
│   │   │       └── src
│   │   │           └── mcp_server_sqlite
│   │   │               ├── __init__.py
│   │   │               └── server.py
│   │   ├── messages
│   │   │   ├── __init__.py
│   │   │   ├── message_types
│   │   │   │   ├── __init__.py
│   │   │   │   ├── incrementing_id_message.py
│   │   │   │   ├── initialize_message.py
│   │   │   │   ├── json_rpc_message.py
│   │   │   │   ├── ping_message.py
│   │   │   │   ├── prompts_messages.py
│   │   │   │   ├── prompts_models.py
│   │   │   │   ├── resources_messages.py
│   │   │   │   └── tools_messages.py
│   │   │   ├── send_call_tool.py
│   │   │   ├── send_initialize_message.py
│   │   │   ├── send_message.py
│   │   │   ├── send_ping.py
│   │   │   ├── send_prompts.py
│   │   │   ├── send_resources.py
│   │   │   └── send_tools_list.py
│   │   ├── README.md
│   │   ├── requirements.txt
│   │   ├── server_config.json
│   │   ├── SourceManagement.py
│   │   ├── transport
│   │   │   ├── __init__.py
│   │   │   └── stdio
│   │   │       ├── __init__.py
│   │   │       ├── stdio_client.py
│   │   │       ├── stdio_server_parameters.py
│   │   │       └── stdio_server_shutdown.py
│   │   ├── tsconfig.json
│   │   └── UserManagement.py
│   ├── Java
│   │   ├── 1.0 mcp_login
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPLoginClient.class
│   │   │   └── MCPLoginClient.java
│   │   ├── 1.1 mcp_logout
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPLogoutClient.class
│   │   │   └── MCPLogoutClient.java
│   │   ├── 2.0 mcp_chat
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPChatClient.class
│   │   │   └── MCPChatClient.java
│   │   ├── 2.1 mcp_continue_chat
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPContinueChatClient.class
│   │   │   └── MCPContinueChatClient.java
│   │   ├── 2.2 mcp_get_chat_info
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPGetChatInfoClient.class
│   │   │   └── MCPGetChatInfoClient.java
│   │   ├── 3.0 mcp_create_source
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPCreateSourceClient.class
│   │   │   └── MCPCreateSourceClient.java
│   │   ├── 3.1 mcp_get_source
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPGetSourceClient.class
│   │   │   └── MCPGetSourceClient.java
│   │   ├── 3.2 mcp_list_sources
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPListSourcesClient.class
│   │   │   └── MCPListSourcesClient.java
│   │   ├── 3.3 mcp_edit_source
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPEditSourceClient.class
│   │   │   └── MCPEditSourceClient.java
│   │   ├── 3.4 mcp_delete_source
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPDeleteSourceClient.class
│   │   │   └── MCPDeleteSourceClient.java
│   │   ├── 4.0 mcp_list_groups
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPListGroupsClient.class
│   │   │   └── MCPListGroupsClient.java
│   │   ├── 4.1 mcp_store_group
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPStoreGroupClient.class
│   │   │   └── MCPStoreGroupClient.java
│   │   ├── 4.2 mcp_delete_group
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPDeleteGroupClient.class
│   │   │   └── MCPDeleteGroupClient.java
│   │   ├── 5.0 mcp_store_user
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPStoreUserClient.class
│   │   │   └── MCPStoreUserClient.java
│   │   ├── 5.1 mcp_edit_user
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPEditUserClient.class
│   │   │   └── MCPEditUserClient.java
│   │   ├── 5.2 mcp_delete_user
│   │   │   ├── json-20241224.jar
│   │   │   ├── MCPDeleteUserClient.class
│   │   │   └── MCPDeleteUserClient.java
│   │   └── README.md
│   ├── JavaScript
│   │   ├── 1.0 mcp_login
│   │   │   └── MCPLoginClient.js
│   │   ├── 1.1 mcp_logout
│   │   │   └── MCPLogoutClient.js
│   │   ├── 2.0 mcp_chat
│   │   │   └── MCPChatClient.js
│   │   ├── 2.1 mcp_continue_chat
│   │   │   └── MCPContinueChatClient.js
│   │   ├── 2.2 mcp_get_chat_info
│   │   │   └── MCPGetChatInfoClient.js
│   │   ├── 3.0 mcp_create_source
│   │   │   └── MCPCreateSourceClient.js
│   │   ├── 3.1 mcp_get_source
│   │   │   └── MCPGetSourceClient.js
│   │   ├── 3.2 mcp_list_sources
│   │   │   └── MCPListSourcesClient.js
│   │   ├── 3.3 mcp_edit_source
│   │   │   └── MCPEditSourceClient.js
│   │   ├── 3.4 mcp_delete_source
│   │   │   └── MCPDeleteSourceClient.js
│   │   ├── 4.0 mcp_list_groups
│   │   │   └── MCPListGroupsClient.js
│   │   ├── 4.1 mcp_store_group
│   │   │   └── MCPStoreGroupClient.js
│   │   ├── 4.2 mcp_delete_group
│   │   │   └── MCPDeleteGroupClient.js
│   │   ├── 5.0 mcp_store_user
│   │   │   └── MCPStoreUserClient.js
│   │   ├── 5.1 mcp_edit_user
│   │   │   └── MCPEditUserClient.js
│   │   ├── 5.2 mcp_delete_user
│   │   │   └── MCPDeleteUserClient.js
│   │   ├── 9.0 mcp_keygen
│   │   │   └── MCPKeygenClient.js
│   │   └── README.md
│   ├── PHP
│   │   ├── 1.0 mcp_login
│   │   │   └── MCPLoginClient.php
│   │   ├── 1.1 mcp_logout
│   │   │   └── MCPLogoutClient.php
│   │   ├── 2.0 mcp_chat
│   │   │   └── MCPChatClient.php
│   │   ├── 2.1 mcp_continue_chat
│   │   │   └── MCPContinueChatClient.php
│   │   ├── 2.2 mcp_get_chat_info
│   │   │   └── MCPGetChatInfoClient.php
│   │   ├── 3.0 mcp_create_source
│   │   │   └── MCPCreateSourceClient.php
│   │   ├── 3.1 mcp_get_source
│   │   │   └── MCPGetSourceClient.php
│   │   ├── 3.2 mcp_list_sources
│   │   │   └── MCPListSourcesClient.php
│   │   ├── 3.3 mcp_edit_source
│   │   │   └── MCPEditSourceClient.php
│   │   ├── 3.4 mcp_delete_source
│   │   │   └── MCPDeleteSourceClient.php
│   │   ├── 4.0 mcp_list_groups
│   │   │   └── MCPListGroupsClient.php
│   │   ├── 4.1 mcp_store_group
│   │   │   └── MCPStoreGroupClient.php
│   │   ├── 4.2 mcp_delete_group
│   │   │   └── MCPDeleteGroupClient.php
│   │   ├── 5.0 mcp_store_user
│   │   │   └── MCPStoreUserClient.php
│   │   ├── 5.1 mcp_edit_user
│   │   │   └── MCPEditUserClient.php
│   │   ├── 5.2 mcp_delete_user
│   │   │   └── MCPDeleteUserClient.php
│   │   ├── 9.0 mcp_keygen
│   │   │   └── MCPKeygenClient.php
│   │   └── README.md
│   └── Python
│       ├── __init__.py
│       ├── 1.0 mcp_login
│       │   └── MCPLoginClient.py
│       ├── 1.1 mcp_logout
│       │   └── MCPLogoutClient.py
│       ├── 2.0 mcp_chat
│       │   └── MCPChatClient.py
│       ├── 2.1 mcp_continue_chat
│       │   └── MCPContinueChatClient.py
│       ├── 2.2 mcp_get_chat_info
│       │   └── MCPGetChatInfoClient.py
│       ├── 2.3 mcp_delete_all_chats
│       │   └── MCPDeleteAllChatsClient.py
│       ├── 2.4 mcp_delete_chat
│       │   └── MCPDeleteChatClient.py
│       ├── 3.0 mcp_create_source
│       │   └── MCPCreateSourceClient.py
│       ├── 3.1 mcp_get_source
│       │   └── MCPGetSourceClient.py
│       ├── 3.2 mcp_list_sources
│       │   └── MCPListSourcesClient.py
│       ├── 3.3 mcp_edit_source
│       │   └── MCPEditSourceClient.py
│       ├── 3.4 mcp_delete_source
│       │   └── MCPDeleteSourceClient.py
│       ├── 4.0 mcp_list_groups
│       │   └── MCPListGroupsClient.py
│       ├── 4.1 mcp_store_group
│       │   └── MCPStoreGroupClient.py
│       ├── 4.2 mcp_delete_group
│       │   └── MCPDeleteGroupClient.py
│       ├── 5.0 mcp_store_user
│       │   └── MCPStoreUserClient.py
│       ├── 5.1 mcp_edit_user
│       │   └── MCPEditUserClient.py
│       ├── 5.2 mcp_delete_user
│       │   └── MCPDeleteUserClient.py
│       ├── 9.0 mcp_keygen
│       │   └── MCPKeygenClient.py
│       ├── Gradio
│       │   ├── __init__.py
│       │   └── server_config.json
│       └── README.md
├── examples
│   ├── create_users_from_csv
│   │   ├── config.json.example
│   │   ├── config.py
│   │   ├── create_users_from_csv.py
│   │   └── language.py
│   ├── dynamic_sources
│   │   └── rss_reader
│   │       ├── Api.py
│   │       ├── config.json.example
│   │       ├── config.py
│   │       ├── demo_dynamic_sources.py
│   │       └── rss_parser.py
│   ├── example_users_to_add_no_tz.csv
│   └── sftp_upload_with_id
│       ├── Api.py
│       ├── config_ftp.json.example
│       ├── config.py
│       ├── demo_upload.py
│       ├── language.py
│       └── requirements.txt
├── images
│   ├── alternative mcp client.png
│   ├── favicon
│   │   ├── android-chrome-192x192.png
│   │   ├── android-chrome-512x512.png
│   │   ├── apple-touch-icon.png
│   │   ├── favicon-16x16.png
│   │   ├── favicon-32x32.png
│   │   ├── favicon.ico
│   │   └── site.webmanifest
│   ├── mcp-general-architecture.png
│   ├── privateGPT-MCP.png
│   └── privateGPT.png
├── InstallMPCServer.sh
├── jest.config.js
├── LICENSE
├── package.json
├── pgpt.env.json.example
├── README.md
├── security
│   ├── generate_decrypted_password.js
│   └── generate_encrypted_password.js
├── src
│   ├── helper.js
│   ├── index.js
│   ├── logger.js
│   ├── pgpt-messages.js
│   ├── public
│   │   ├── index.html
│   │   └── pgpt-mcp-logo.png
│   ├── services
│   │   └── pgpt-service.ts
│   └── types
│       └── api.ts
├── start_chatbot_agent.ps1
├── start_chatbot_agent.sh
├── start_iot_agent.ps1
├── start_iot_agent.sh
├── start_openai_compatible_api_agent.ps1
├── start_openai_compatible_api_agent.sh
├── tsconfig.json
├── ver
│   ├── index_np.js
│   └── index_proxy_np.js
└── WORKLOG.md
```

# Files

--------------------------------------------------------------------------------
/ver/index_proxy_np.js:
--------------------------------------------------------------------------------

```javascript
![PrivateGPT MCP Server](docs/images/pGPT-MCP.png)

## Table of Contents
- [PrivateGPT MCP Server](#privategpt-mcp-server)
- [What is MCP?](#what-is-mcp)
  - [Why MCP?](#why-mcp)
  - [Why Agents](#why-agents)
  - [How it Works](#how-it-works) 
- [Interaction Between Agents, LLMs, and MCP Servers](#interaction-between-agents-llms-and-mcp-servers)
  - [Scenario](#scenario)
    1. [User Input](#user-input)
    2. [Agent Processing](#agent-processing)
    3. [LLM Interaction](#llm-interaction)
    4. [Response Processing](#response-processing)
    5. [Security and Logging](#security-and-logging)
- [Advantages of Using Agents in This Context](#advantages-of-using-agents-in-this-context)
  - [Modularity](#modularity)
  - [Security](#security)
  - [Efficiency](#efficiency)
  - [Flexibility](#flexibility)
- [Overview](#overview)
- [Security Features Overview](#security)
  - [1. Password Encryption](#1-password-encryption)
  - [2. Key Management](#2-key-management)
  - [3. Decryption on the Server](#3-decryption-on-the-server)
  - [4. Transport Layer Security (TLS)](#4-transport-layer-security-tls)
  - [5. Authorization Tokens](#5-authorization-tokens)
  - [6. Restriction of Key Generation (Keygen)](#6-restriction-of-key-generation-keygen)
  - [7. Certificate-Based Access Control (CBAC)](#7-certificate-based-access-control-cbac)
  - [8. Secure Configuration](#8-secure-configuration)
  - [9. Logging and Monitoring](#9-logging-and-monitoring)
  - [Summary](#summary)
  - [Encrypted Password Generation Tool](#encrypted-password-generation-tool)
    1. [Generate a password for the client and/or the server's Proxy_Config](#generate-encrypted-password)
    2. [Check the generated encrypted password](#check-the-generated-encrypted-password)
  - [Encrypted Password Decryption Tool](#encrypted-password-decryption-tool)
    1. [Check the generated encrypted password](#check-the-generated-encrypted-password)
- [Feature Overview for PGPT Server](#feature-overview-for-pgpt-server)
  - [1. **Authentication and Authorization**](#1-authentication-and-authorization)
  - [2. **Chat Management**](#2-chat-management)
  - [3. **Group Management**](#3-group-management)
  - [4. **Source Management**](#4-source-management)
  - [5. **User Management**](#5-user-management)
  - [6. **Configuration Flexibility**](#6-configuration-flexibility)
  - [7. **Error Handling and Logging**](#7-error-handling-and-logging)
  - [8. **Security Features**](#8-security-features)
  - [Example Use Cases](#example-use-cases)
  - [How to Use](#how-to-use)
- [Installation](#installation)
  - [Prerequisites](#prerequisites)
  - [Install Dependencies](#install-dependencies)
  - [Build the Project](#build-the-project-1)
- [Configuration Description](#configuration-description)
  - [Server Configuration](#server-configuration-1)
  - [PGPT URL](#pgpt-url)
  - [Server Port](#server-port)
  - [Language](#language-1)
  - [SSL Validation](#ssl-validation-1)
  - [Encryption](#encryption)
  - [Group Restrictions](#group-restrictions)
  - [Feature Activation/Deactivation](#feature-activationdeactivation)
- [Usage](#usage-1)
  - [Available Tools](#available-tools)
- [Development](#development-1)
  - [Building](#building-1)
  - [Type Checking](#type-checking)
  - [Linting](#linting)
  - [Testing](#testing)
- [Project Structure](#project-structure-1)
- [Error Handling](#error-handling-1)
- [License](#license)


# PrivateGPT MCP Server
A Model Context Protocol (MCP) server implementation that allows you to use PrivateGPT as an agent for your preferred MCP client. 
This enables seamless integration between PrivateGPT's powerful capabilities and any MCP-compatible application.

## What is MCP?
MCP is an open protocol that standardizes how applications provide context to LLMs. Think of MCP like a USB-C port for AI applications. Just as USB-C provides a standardized way to connect your devices to various peripherals and accessories, MCP provides a standardized way to connect AI models to different data sources and tools.

### Why MCP?
MCP helps you build agents and complex workflows on top of LLMs. LLMs frequently need to integrate with data and tools, and MCP provides:
- A growing list of pre-built integrations that your LLM can directly plug into
- The flexibility to switch between LLM providers and vendors
- Best practices for securing your data within your infrastructure

### How it Works
At its core, MCP follows a client-server architecture where a host application can connect to multiple servers:

![MCP General Architecture](docs/images/mcp-general-architecture.png)

- **MCP Hosts**: Programs like Applications, Claude Desktop, IDEs, or AI tools that want to access data through MCP
- **MCP Clients**: Protocol clients that maintain 1:1 connections with servers
- **MCP Servers**: Lightweight programs that each expose specific capabilities through the standardized Model Context Protocol
- **Local Data Sources**: Your computer's files, databases, and services that MCP servers can securely access
- **Remote Services**: External systems available over the internet (e.g., through APIs) that MCP servers can connect to

## Overview
This server provides a bridge between MCP clients and the PrivateGPT API, allowing you to:
- Chat with PrivateGPT using both public and private knowledge bases
- Create and manage knowledge sources
- Organize sources into groups
- Control access through group-based permissions


---

# Why Agents
An **agent** in relation to **LLMs** (Large Language Models) and **MCP servers** is a specialized software component that acts as an intermediary between language models and applications. It handles tasks such as processing requests, interacting with the LLM via MCP, managing workflows, ensuring security and efficiency within the overall system, and much more. By utilizing agents, complex AI-based applications can be designed to be effective, secure, and scalable.
**The code for agents in this repsoitory can be used to implement it into own solutions / applications.**

## Interaction Between Agents, LLMs, and MCP Servers
The interaction of these components enables the development of powerful, scalable, and secure AI applications. Below is a simplified scenario that illustrates this interaction:

1. **User Input:** A user sends a request through a user interface hosted on the MCP server.
2. **Agent Processing:** An agent on the MCP server receives the request, validates it, and prepares it for the LLM.
3. **LLM Interaction:** The agent forwards the request to the LLM, which generates a response.
4. **Response Processing:** The agent receives the response from the LLM, processes it further if necessary (e.g., formatting, integration with other data sources), and sends it back to the user.
5. **Security and Logging:** During this process, the agent monitors interactions, ensures that security policies are adhered to, and logs relevant information for later analysis.

## Advantages of Using Agents in This Context
- **Modularity:** Agents allow for a clear separation of responsibilities, making the system easier to maintain and scale.
- **Security:** Centralized management of access and monitoring of activities help minimize security risks.
- **Efficiency:** Automated agents can perform tasks faster and more consistently than manual processes.
- **Flexibility:** Agents can be easily adapted or extended to support new functions or changes in business requirements.

### Importance of Encrypting Passwords
In any application handling sensitive data, security is paramount. This server manages two critical sets of credentials:
1. **Proxy Passwords:** Used by HAProxy to authenticate and manage traffic.
2. **LLM Access Passwords:** Used to secure access to the large language models.

Storing these passwords in plaintext poses significant security risks, including unauthorized access and potential data breaches. 
To mitigate these risks, it is essential to encrypt these passwords and handle only their ciphertext within the system.

### Benefits of Using Ciphertext Only
- **Enhanced Security:** Even if an attacker gains access to the configuration files or environment variables, encrypted passwords remain unintelligible without the corresponding decryption keys.
- **Compliance:** Encryption helps in adhering to security standards and regulations that mandate the protection of sensitive information.
- **Integrity:** Ensures that passwords cannot be tampered with, maintaining the integrity of the authentication mechanisms.

---

# Security
The following security features are implemented to ensure data protection and secure communication between the client application and server. These features cover encryption, decryption, key management, and transport security.

## 1. Password Encryption
Passwords can be encrypted using RSA (Rivest–Shamir–Adleman) public-key cryptography. This ensures that sensitive data, such as user passwords, are never transmitted in plaintext.

### Method
- **Public key encryption** with a **2048-bit key length**.
- **Padding**: `RSA_PKCS1_PADDING` to enhance security and prevent known padding attacks.

### Process
1. The client encrypts the password using the server's public key (`id_rsa_public.pem`).
2. The encrypted password is sent to the server, where it is decrypted using the server's private key.

### Advantages
- **Asymmetric encryption** ensures that only the server can decrypt the password.
- Even if the communication channel is compromised, encrypted data remains secure.

## 2. Key Management
To secure data communication and encryption processes, the following key management principles are followed:

### Public Key
- Stored securely on the server (`id_rsa_public.pem`).
- Used only for encryption and does not pose a security risk if exposed.

### Private Key
- Stored securely on the server (`id_rsa`).
- Restricted access with appropriate file permissions (`chmod 600`).
- Used exclusively for decryption operations.

### Key Rotation
- Keys are rotated periodically or upon detection of a security incident.
- Old keys are securely invalidated.

## 3. Decryption on the Server
Decryption is exclusively performed on the server using the private key:

### Process
1. The server receives the encrypted password from the client.
2. The private key decrypts the password to retrieve the original plaintext.
3. The decrypted password is used internally (e.g., authentication) and never stored in plaintext.

### Secure Handling
- Decrypted passwords exist in memory only for the duration of processing.
- Secure memory management practices ensure sensitive data is cleared immediately after use.

## 4. Transport Layer Security (TLS)
To secure communication between the client and server:

### TLS Encryption
- All data transmitted between the client and server is encrypted using TLS (minimum version 1.2).
- Prevents man-in-the-middle (MITM) attacks and eavesdropping.

### Certificate Validation
- Certificates are validated on both sides to ensure the authenticity of the server and client.
- Optionally, mutual TLS can be enabled for enhanced security.

## 5. Authorization Tokens
Tokens are used to authenticate requests and ensure only authorized users can access the system:

### Token Management
- Tokens are generated upon successful login.
- They are short-lived and automatically expire after a predefined time.
- Tokens are signed using HMAC or RSA, making them tamper-proof.

### Secure Storage
- Tokens are stored securely on the client side (e.g., in memory or encrypted storage).

## 6. Restriction of Key Generation (Keygen)
To prevent misuse of the system, key generation (`keygen`) is restricted:

### Configuration
- The server has a configuration option (`ALLOW_KEYGEN`) to enable or disable key generation.
- Attempts to call the keygen function when disabled result in an error message.

### Audit Logging
- All keygen operations are logged for auditing and monitoring purposes.

## 7. Certificate-Based Access Control (CBAC)
- As the agent does not require a password when certificate authentication is activated and logs on to the server using a key, it is automatically locked to this server. 
If it would want to log in to another MCP PGPT server, this login attempt is rejected as the key is checked against the server's private certificate. 

### Features
- Functions such as `keygen`, `store_user`, and `edit_source` are only accessible to authorized roles.
- Unauthorized access attempts are denied with detailed error messages.

### Configuration
- Enabled or disabled functions can be specified in the server's configuration (`Functions` object).

## 8. Secure Configuration
The server configuration contains several security-related options:

### SSL_VALIDATE
- Validates SSL/TLS certificates to ensure secure communication.
- Can be enabled or disabled based on environment requirements (e.g., testing vs. production).

### PW_ENCRYPTION
- Enables or disables password encryption.
- Ensures compatibility in environments where encryption is not feasible.

## 9. Logging and Monitoring
All security-related events are logged for monitoring and troubleshooting:

### Logged Events
- Failed login attempts.
- Key generation requests.
- Unauthorized access attempts.
- Encryption and decryption errors.


## Summary
The implemented security features ensure:

- **Confidentiality** of sensitive data through encryption and secure transport.
- **Integrity** of operations using robust key management and token validation.
- **Role-based and configurable access control** for system functions.
- **Comprehensive monitoring** for proactive detection and response to security threats.

These measures collectively provide a secure environment for client-server communication and data handling.


# Feature Overview for PGPT Server
The PGPT Server offers a robust set of features designed to provide efficient, flexible, and secure communication with the Model Context Protocol (MCP). Below is an overview of the key features and functionalities available in the server.

---

## Key Features

### 1. **Authentication and Authorization**
- **Login Functionality**: Authenticate users with email and password to generate access tokens.
- **Logout Functionality**: Securely invalidate user tokens to end sessions.

---

### 2. **Chat Management**
- **Start a Chat**: Initiate a conversation with the server, optionally using public knowledge bases or specific group contexts.
- **Continue a Chat**: Resume an ongoing conversation by providing the chat ID and a follow-up message.
- **Retrieve Chat Information**: Fetch metadata and messages for a specific chat by its ID.

---

### 3. **Group Management**
- **List Groups**: View personal and assignable groups available to the user.
- **Create Groups**: Add new groups with names and descriptions for organizational purposes.
- **Delete Groups**: Remove existing groups (if enabled in configuration).

---

### 4. **Source Management**
- **Create Sources**: Add new sources with content and assign them to specific groups.
- **Edit Sources**: Update existing sources with new content or metadata.
- **Delete Sources**: Remove sources that are no longer needed.
- **Retrieve Sources**: Fetch information about a specific source by its ID.
- **List Sources**: View all sources assigned to a specific group.

---

### 5. **User Management**
- **Create Users**: Register new users with customizable roles, groups, and settings.
- **Edit Users**: Update user information, including name, email, password, and roles.
- **Delete Users**: Remove users from the system (if enabled in configuration).

---

### 6. **Configuration Flexibility**
- **Function Activation/Deactivation**: Enable or disable individual server functionalities through the `.env` configuration file.
- **Language Support**: Customize the server's system messages to your preferred language (e.g., English or German).
- **SSL Validation**: Toggle SSL validation for secure connections to the server.

---

### 7. **Error Handling and Logging**
- Detailed error messages and logging for:
  - Invalid requests
  - Authentication failures
  - API communication issues
- Configurable responses when a disabled function is accessed.

---

### 8. **Security Features**
- **Token-Based Authentication**: Ensures secure and controlled access to server features.
- **Restricted Group Access**: Option to limit access to `assignableGroups` for enhanced privacy.

---

### Example Use Cases
1. **Customer Support**: Use chat features to build intelligent conversational agents.
2. **Knowledge Management**: Manage and retrieve structured data with sources and groups.
3. **Multi-User Collaboration**: Create, edit, and assign users to groups for collaborative workflows.
4. **Customizable Functionality**: Activate only the features you need for your application.

---

### How to Use
1. Configure the server by editing the `.env` file.
2. Start the server using the provided script.
3. Interact with the server via API calls to utilize its features.

Refer to the **API Documentation** for detailed usage instructions and examples for each endpoint.

---

The PGPT Server is a powerful tool for managing structured communication and data in a customizable environment. Tailor its features to your needs for maximum efficiency and control.


## Installation
1. Clone the repository:
```bash
git clone https://github.com/pgpt-dev/MCP-Server-for-MAS-Developments.git
cd MCP-Server-for-MAS-Developments
```

2. Install npm:
```bash
sudo apt install npm
```
3. Install Dependencies
```bash
npm install
```

   and 

```bash
npm install dotenv
npm install moment
```

4. Build the project:
```bash
npm run build
```

---

## Configuration Description

### Server Configuration

For secure certificate authentification, create a `.env` file with your PrivateGPT credentials, for example pgpt.env.json 
Settings can be adjusted in the `.env` file to customize the server and its functionalities.

Generate the cerificates, .pem files are needed:
```bash
ssh-keygen -f ~/.ssh/id_rsa.pub -e -m PEM > ~/.ssh/id_rsa_public.pem
ssh-keygen -p -m PEM -f ~/.ssh/id_rsa
```

After this process, you can create Ciphertext from passwords by using the Encrypted Password Encryption Tool and test the cipher with the Encrypted Password Decryption Tool.
You will find the descriptionof how it works in the `Security` section of this document.

---

Below is a sample `.env` configuration file for the PGPT server, including descriptions for each setting. 
Customize these values to fit your environment and requirements.

---

## PGPT URL

| Key                       | Description                                              | Example Value                                |
|---------------------------|----------------------------------------------------------|----------------------------------------------|
| **PRIVATE_GPT_API_URL**   | The base URL of the Private GPT API.                     | `https://<YOUR DOMAIN OR IP>/api/v1`         |
| **API_URL**               | Alias for the base API URL.                              | `https://*<YOUR DOMAIN OR IP>*/api/v1`       |

---

## Server Port
| Key      | Description                                    | Example Value |
|----------|------------------------------------------------|---------------|
| **PORT** | The port on which the MCP server runs.         | `5000`        |

---

## Language
| Key        | Description                                                         | Example Value |
|------------|---------------------------------------------------------------------|---------------|
| **LANGUAGE** | The language for the server's system messages (`en` or `de`).     | `"de"`        |

---

## SSL Validation
| Key            | Description                                                                             | Example Value |
|----------------|-----------------------------------------------------------------------------------------|---------------|
| **SSL_VALIDATE** | Use `"false"` only if the server's certificate cannot be validated by the client.     | `"false"`     |

---

## Encryption
| Key               | Description                                                                          | Example Value                    |
|-------------------|--------------------------------------------------------------------------------------|----------------------------------|
| **PW_ENCRYPTION** | If set to `"true"` the server only accepts passwords in Ciphertext.                  | `"false"`                        |
| **PUBLIC_KEY**    | Specifies the file system path to the server's public PEM file used for SSL/TLS.     | `"~/.ssh/id_rsa_public.pem"`     |
| **PRIVATE_KEY**   | Specifies the file system path to the server's private key file used for decryption. | `"~/.ssh/id_rsa_public.pem"`     |

---

## Group Restrictions
| Key                 | Description                                                                                     | Example Value |
|---------------------|-------------------------------------------------------------------------------------------------|---------------|
| **RESTRICTED_GROUPS** | Setting `true` prevents client access to `assignableGroups`.                                  | `false`       |

---

## Feature Activation/Deactivation
Control the availability of individual server functions. Set the corresponding value to `true` to enable the function, or `false` to disable it. Disabled functions will return a message indicating they are not available.

| Key                       | Description                                        | Example Value |
|---------------------------|----------------------------------------------------|---------------|
| **ENABLE_LOGIN**          | Enables or disables the login function.            | `true`        |
| **ENABLE_LOGOUT**         | Enables or disables the logout function.           | `true`        |
| **ENABLE_CHAT**           | Enables or disables the chat functionality.        | `true`        |
| **ENABLE_CONTINUE_CHAT**  | Enables or disables continuing a chat.             | `true`        |
| **ENABLE_GET_CHAT_INFO**  | Enables or disables retrieving chat information.   | `true`        |
| **ENABLE_LIST_GROUPS**    | Enables or disables listing groups.                | `true`        |
| **ENABLE_STORE_GROUP**    | Enables or disables creating a group.              | `true`        |
| **ENABLE_DELETE_GROUP**   | Enables or disables deleting a group.              | `false`       |
| **ENABLE_CREATE_SOURCE**  | Enables or disables creating a source.             | `true`        |
| **ENABLE_EDIT_SOURCE**    | Enables or disables editing a source.              | `true`        |
| **ENABLE_DELETE_SOURCE**  | Enables or disables deleting a source.             | `true`        |
| **ENABLE_GET_SOURCE**     | Enables or disables retrieving a source.           | `true`        |
| **ENABLE_LIST_SOURCES**   | Enables or disables listing sources.               | `true`        |
| **ENABLE_STORE_USER**     | Enables or disables creating a user.               | `true`        |
| **ENABLE_EDIT_USER**      | Enables or disables editing a user.                | `false`       |
| **ENABLE_DELETE_USER**    | Enables or disables deleting a user.               | `false`       |

---

## Usage
- **Enable a Function**: Set the corresponding value in the `.env` file to `true`.
- **Disable a Function**: Set the corresponding value in the `.env` file to `false`.
  - The server will respond with a message indicating that the function is disabled.

Example `.env` entry:
```dotenv
{
    "PGPT_Url": {
        "PRIVATE_GPT_API_URL": "https://<YOUR_PGPT_URL>/api/v1",
        "API_URL": "https://<YOUR_PGPT_URL>/api/v1"
    },
    "Proxy_Config": {
    "USE_PROXY": "true",
    "AUTH_REQUIRED": "true",
    "UNSECURE_PW": "false",
    "USER": "username@<MY_PGPT_DOMAIN>",
    "PASSWORD": "Example: H3i2ufJEV8v5eQTeArvKIvl..."
    },        
    "Server_Config": {
        "PORT": 5000,
        "LANGUAGE": "en",
        "SSL_VALIDATE": "false",
        "PW_ENCRYPTION": "true",
        "ALLOW_KEYGEN": "false",
        "PUBLIC_KEY": "/<SERVER_PATH>/.ssh/id_rsa_public.pem",
        "PRIVATE_KEY": "/<SERVER_PATH>/.ssh/id_rsa"
    },
    "Restrictions": {
        "RESTRICTED_GROUPS": false
    },
    "Functions": {
        "ENABLE_LOGIN": true,
        "ENABLE_LOGOUT": true,
        "ENABLE_CHAT": true,
        "ENABLE_CONTINUE_CHAT": true,
        "ENABLE_GET_CHAT_INFO": true,
        "ENABLE_LIST_GROUPS": true,
        "ENABLE_STORE_GROUP": true,
        "ENABLE_DELETE_GROUP": false,
        "ENABLE_CREATE_SOURCE": true,
        "ENABLE_EDIT_SOURCE": true,
        "ENABLE_DELETE_SOURCE": false,
        "ENABLE_GET_SOURCE": true,
        "ENABLE_LIST_SOURCES": true,
        "ENABLE_STORE_USER": true,
        "ENABLE_EDIT_USER": false,
        "ENABLE_DELETE_USER": false
    }
}
```

## Usage
Start the server:
```bash
node dist/index.js
```

The server will start and listen on stdio for MCP commands.

## Project Structure
```
src/
  ├── index.js                        # Main server implementation
  ├── types/                          # TypeScript type definitions
  │   └── api.ts                      # API interface types
  └── security/                       # Security Tools
  │   └── generate_encrypted_password # Encrypt the passwod and use Cyphertext for login. It replaces the plain text password in the --password parameter and has to executed on the server.
  │   └── generate_decrypted_password # Decrypt the Cyphertext to get the password. Execute it on the server to ensure the cyphertext contains the correct password.
  └── services/                       # Service implementations
  │   └── pgpt-service.ts             # PrivateGPT API service
  └── clients/                        # Service implementations
      └── Python                      # **Python**
      │	  └── mcp...                  # Client examples written in Python
      └── C# .Net                     # **C#**
      │	  └── Code                    # Original files
      │	    └── mcp...                # Client examples written in C#
      └── C++                         # **C++**
      │	  └── mcp...                  # Client examples written in C++
      └── Java                        # **Java**
      │	  └── mcp...                  # Client examples written in Java
      └── JavaScrip                   # **JavaScript**
      │	  └── mcp...                  # Client examples written in JavaScript
      └── PHP                         # **PHP**
      │	  └── mcp...                  # Client examples written in PHP
      └── Go                          # **Go**
          └── mcp...                  # Client examples written in Go
```

## Error Handling
The server handles various error scenarios:
- Authentication failures
- Network errors
- Invalid requests
- API errors
- Rate limiting
- Timeout errors

Errors are mapped to appropriate MCP error codes and include detailed messages for debugging.

## Available Tools

### Generate Encrypted Password
Generate a password for the client and/or the server's Proxy_Config->Password entry:
```bash
 node security/generate_encrypted_password.js ~/.ssh/id_rsa_public.pem
```

### Check the generated encrypted password
To check the encrytion use:
```bash
 node security/generate_decrypted_password.js ~/.ssh/id_rsa
```

See the sections `Encrypted Password Generation Tool` and `Encrypted Password Decryption Tool` below for further information

---

# Encrypted Password Generation Tool

## Overview
The **Encrypted Password Generation Tool** is a Node.js script designed to securely encrypt user passwords using RSA public-key cryptography. This tool ensures that sensitive password data remains protected during transmission and storage by leveraging robust encryption mechanisms. It is an essential component for systems requiring secure password handling and transmission between clients and servers.

## Features
- **Secure Encryption:** Utilizes RSA (Rivest–Shamir–Adleman) public-key cryptography to encrypt sensitive passwords.
- **User-Friendly Interface:** Prompts users to input their passwords securely via the command line.
- **Error Handling:** Provides comprehensive error messages for missing keys or encryption issues.
- **Flexible Integration:** Can be integrated into larger systems requiring secure password handling and encryption.

## How It Works
1. **Public Key Loading:** The script loads the RSA public key from a specified file path provided as a command-line argument.
2. **Password Input:** It prompts the user to enter their password securely via the command line.
3. **Encryption Process:** Using the loaded public key and `RSA_PKCS1_PADDING`, the script encrypts the entered password.
4. **Output:** The encrypted password is displayed in Base64 format, ready for secure transmission or storage.

## Prerequisites
- **Node.js:** Ensure that Node.js is installed on your system. You can download it from the [Node.js Official Website](https://nodejs.org/).
- **RSA Public Key:** You must have access to the RSA public key (`id_rsa_public.pem`) used for encrypting the password.

## Installation
-  **Install Dependencies:**
    The script uses built-in Node.js modules, so no additional dependencies are required. However, ensure that your Node.js version supports ES6 modules.
    ```bash
    npm install
    ```

## Usage
1. **Prepare Your RSA Public Key:**
    Ensure you have your RSA public key (`id_rsa_public.pem`) stored securely on your MCP server.

2. **Run the Script, you will find it at the `security` directory of the MCP server:**
    Execute the script using Node.js, providing the path to your public key as a command-line argument.
    ```bash
    node encrypt_password.js /path/to/your/id_rsa_public.pem
    ```

    **Example:**
    ```bash
    node security/encrypt_password.js ~/.ssh/id_rsa_public.pem
    ```

3. **Enter Your Password:**
    When prompted, input your password securely.
    ```bash
    Please enter your password: ********
    ```

4. **View the Encrypted Password:**
    The script will output the encrypted password in Base64 format.
    ```bash
    Encrypted Password: <Your_Encrypted_Password>
    ```

---

# Encrypted Password Decryption Tool

## Overview
The **Encrypted Password Decryption Tool** is a Node.js script designed to securely decrypt encrypted passwords using RSA private-key cryptography. 
This tool ensures that sensitive password data remains protected during transmission and storage by leveraging robust encryption and decryption mechanisms.
To verify or decrypt an encrypted password, use the private key. This is helpful to ensure that the encryption was performed correctly.

## Features
- **Secure Decryption:** Utilizes RSA (Rivest–Shamir–Adleman) private-key cryptography to decrypt sensitive password data.
- **Error Handling:** Provides comprehensive error messages for missing keys or decryption issues.
- **User-Friendly Interface:** Prompts users to input encrypted passwords securely via the command line.
- **Flexible Integration:** Can be integrated into larger systems requiring secure password handling.

## How It Works
1. **Private Key Loading:** The script loads the RSA private key from a specified file path provided as a command-line argument.
2. **Encrypted Password Input:** It prompts the user to enter an encrypted password in Base64 format.
3. **Decryption Process:** Using the loaded private key and RSA_PKCS1_PADDING, the script decrypts the encrypted password.
4. **Output:** The decrypted plaintext password is displayed in the console.

## Prerequisites
- **Node.js:** Ensure that Node.js is installed on your system. You can download it from [Node.js Official Website](https://nodejs.org/).
- **RSA Private Key:** You must have access to the RSA private key (`id_rsa`) on your MCP server used for decrypting the password.

## Installation
- **Install Dependencies:**
    The script uses built-in Node.js modules, so no additional dependencies are required. However, ensure that your Node.js version supports ES6 modules.

## Usage
1. **Prepare Your RSA Private Key:**
    Ensure you have your RSA private key (`id_rsa`) stored securely on your machine.

2. **Run the Script, you will find it at the `security` directory of the MCP server:**:**
    Execute the script using Node.js, providing the path to your private key as a command-line argument.
    ```bash
    node decrypt_password.js /path/to/your/id_rsa
    ```

    **Example:**
    ```bash
    node decrypt_password.js ~/.ssh/id_rsa
    ```

3. **Enter the Encrypted Password:**
    When prompted, input the encrypted password in Base64 format.
    ```bash
    Please enter the encrypted password: <Your_Encrypted_Password>
    ```

4. **View the Decrypted Password:**
    The script will output the decrypted plaintext password.
    ```bash
    Decrypted Password: your_plaintext_password
    ```
This will decrypt the encrypted password and display the original value.


### Notes
- Ensure that the `~/.ssh/id_rsa_public.pem` (public key) and `~/.ssh/id_rsa` (private key) files exist and have the correct permissions.
- The encryption tool relies on the public key, while the decryption tool requires the private key.

## License
This project is licensed under the MIT License - see the LICENSE file for details.

```

--------------------------------------------------------------------------------
/agents/ISMAgent/Python/ism_agent.py:
--------------------------------------------------------------------------------

```python
# ============================================================
# ISM Agent – generate descriptive text for ISM nodes
# - Structured white console logs (emoji-safe, aligned)
# - Optional AgentInterface imports (PrivateGPTAgent, etc.)
# - HTTP (FIPA-ACL) request to chatbot agent
# - Robust error handling, retries with backoff, NDJSON events
# - Paths (input/output/logs) are read from config["paths"]
# - After successful input read: archive input with sequential extension
# - NEW: Optional SFTP upload of the output file after completion
# - NEW: Delete local output file upon successful SFTP upload
# - NEW: logge Dauer zwischen chatbot request und response
# ============================================================

import json
import re
import time
import sys
import argparse
import logging
import os
import shutil
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional

import requests
from colorama import init as colorama_init, Fore, Style
from wcwidth import wcswidth  # emoji/wide-char aware width calc

# NEW: SFTP deps
import paramiko
import posixpath

# ------------------------------------------------------------
# OPTIONAL: AgentInterface (PrivateGPT) – best-effort imports
# ------------------------------------------------------------
try:
    from .AgentInterface.Python.agent import PrivateGPTAgent, GroupValidationError
    from .AgentInterface.Python.config import Config as PGPTConfig, ConfigError as PGPTConfigError
    from .AgentInterface.Python.language import languages as pgpt_languages
    from .AgentInterface.Python.color import Color
    _HAS_AGENT_IFACE = True
except Exception:
    _HAS_AGENT_IFACE = False

colorama_init()  # enable ANSI handling on Windows

# ============================================================
# Structured console / file logging (white text only)
# ============================================================
class StructuredLog:
    """IoT-style structured console/file logger with white text only and emoji-safe alignment."""
    COL_TIME = 19
    COL_ICON = 4
    COL_COMP = 12
    COL_ACT = 14
    COL_DIR = 9

    LEVEL_ICONS = {
        "DEBUG": "🐛",
        "INFO": "ℹ️",
        "WARNING": "⚠️",
        "ERROR": "❌",
        "CRITICAL": "‼️",
    }

    def __init__(self, ndjson_path: Optional[str] = None, use_color: bool = True):
        self.ndjson_path = ndjson_path
        self.use_color = use_color
        self._ensure_dir()

    # ---------------- internal helpers ----------------
    def _ensure_dir(self) -> None:
        if self.ndjson_path:
            os.makedirs(os.path.dirname(self.ndjson_path), exist_ok=True)
            if not os.path.exists(self.ndjson_path):
                with open(self.ndjson_path, "w", encoding="utf-8"):
                    pass

    @staticmethod
    def _ts() -> str:
        return datetime.now().strftime("%Y-%m-%d %H:%M:%S")

    @staticmethod
    def _pad_raw(s: str, width: int) -> str:
        """Pad string to visual width 'width' (handles emojis & wide chars)."""
        s = (s or "")
        vis = wcswidth(s)
        if vis < 0:
            vis = len(s)
        return s + " " * max(0, width - vis)

    @staticmethod
    def _pad_raw_right_aligned(s: str, width: int) -> str:
        """Pad string to visual width 'width', right-aligned."""
        s = (s or "")
        vis = wcswidth(s)
        if vis < 0:
            vis = len(s)
        return " " * max(0, width - vis) + s

    def _white(self, text: str) -> str:
        """Force bright white output for all columns."""
        if not self.use_color:
            return text
        return f"{Style.BRIGHT}{Fore.WHITE}{text}{Style.RESET_ALL}"

    def _icon(self, kind: str, level: Optional[str] = None) -> str:
        """Return an icon (emoji + trailing pipe)."""
        if level:
            lvl = level.upper()
            if lvl in self.LEVEL_ICONS:
                return self.LEVEL_ICONS[lvl] #+ " |"
        return "ℹ️ "

    # >>> FIXED HERE <<< (kein level mehr!)
    def _line(self, icon: str, component: str, action: str, direction: str, message: str) -> str:
        # icon ist bereits fertig (z. B. "ℹ️ |" oder "‼️ |")
        t_col = self._pad_raw(self._ts(), self.COL_TIME)
        i_col = self._pad_raw(icon, self.COL_ICON)
        c_col = self._pad_raw(component, self.COL_COMP)
        a_col = self._pad_raw(action, self.COL_ACT)

        # bestimmte Felder rechtsbündig
        if (
            component.lower() == "filesystem" and (action == ":write" or action == ":append")
        ) or (component.lower() == "ism" and action == ":process"):
            d_col = self._pad_raw_right_aligned(direction, self.COL_DIR)
        else:
            d_col = self._pad_raw(direction, self.COL_DIR)

        c_col = self._white(c_col)
        a_col = self._white(a_col)
        d_col = self._white(d_col)
        msg_col = self._white(message or "")

        return "".join(
            [
                t_col,
                " | ",
                i_col,
                " ",
                c_col,
                " ",
                a_col,
                " ",
                d_col,
                " | ",
                msg_col,
            ]
        )

    # ---------------- public API ----------------
    def console(
        self,
        icon_kind: str,
        component: str,
        action: str,
        direction: str,
        message: str,
        level: str = "info",
    ) -> None:
        line = self._line(self._icon(icon_kind, level), component, action, direction, message)
        lvl = (level or "info").lower()
        if lvl == "error":
            logging.error(line)
        elif lvl == "warning":
            logging.warning(line)
        elif lvl == "debug":
            logging.debug(line)
        elif lvl == "critical":
            logging.critical(line)
        else:
            logging.info(line)

    def file_event(self, **record) -> None:
        """Append a JSON record to NDJSON log (if configured)."""
        if not self.ndjson_path:
            return
        record.setdefault("ts", self._ts())
        with open(self.ndjson_path, "a", encoding="utf-8") as f:
            f.write(json.dumps(record, ensure_ascii=False) + "\n")


slog: Optional[StructuredLog] = None


# ============================================================
# Logging setup
# ============================================================
def setup_logging(verbose: bool):
    level = logging.DEBUG if verbose else logging.INFO
    logging.basicConfig(level=level, format="%(message)s")


# ============================================================
# Config / Input Utilities
# ============================================================
def load_config(path: Path) -> Dict[str, Any]:
    if not path.exists():
        raise FileNotFoundError(f"Configuration file not found: {path}")
    try:
        data = json.loads(path.read_text(encoding="utf-8"))
    except json.JSONDecodeError as e:
        raise ValueError(f"Invalid JSON in configuration file: {path} ({e})")

    chatbot = data.get("chatbot_agent", {})
    if not chatbot.get("api_url"):
        raise ValueError("Missing 'chatbot_agent.api_url' in config.")
    if not chatbot.get("api_key"):
        raise ValueError("Missing 'chatbot_agent.api_key' in config.")

    chatbot.setdefault("use_public", True)
    chatbot.setdefault("groups", [])
    chatbot.setdefault("timeout_seconds", 20)
    chatbot.setdefault("prompt_template", "prompt_template parameter not set. Repeat this sentence.")
    data["chatbot_agent"] = chatbot

    data.setdefault("language", "en")

    paths = data.get("paths", {})
    paths.setdefault("input", "agents/ISMAgent/data/ism_nodes.json")
    paths.setdefault("inventory", "agents/ISMAgent/data/ism_inventory.json")
    paths.setdefault("output", "agents/ISMAgent/output/ism_nodes_report.txt")
    paths.setdefault("ndjson", "agents/ISMAgent/logs/ism_agent.ndjson")
    paths.setdefault("dump_json_dir", "agents/ISMAgent/logs/node_json")
    paths.setdefault("archive_dir", "agents/ISMAgent/archive")
    data["paths"] = paths

    # optionale SFTP-Konfig
    sftp = data.get("sftp", {})
    if sftp:
        sftp.setdefault("enabled", True)
        sftp.setdefault("port", 22)
        sftp.setdefault("remote_path", "/")
        sftp.setdefault("remote_filename", None)
        data["sftp"] = sftp

    return data


def _parse_pdf_json(pdf_path: Path) -> Dict[str, Any]:
    try:
        import PyPDF2
    except Exception as e:
        raise RuntimeError("PyPDF2 is required to read PDFs. Install via 'pip install PyPDF2'.") from e

    text_chunks = []
    with open(pdf_path, "rb") as f:
        reader = PyPDF2.PdfReader(f)
        for page in reader.pages:
            text_chunks.append(page.extract_text() or "")
    full = "\n".join(text_chunks)
    start, end = full.find("{"), full.rfind("}")
    if start == -1 or end == -1 or end <= start:
        raise ValueError(f"No JSON found inside PDF: {pdf_path}")
    json_text = full[start : end + 1]
    json_text = re.sub(r"-\n", "", json_text)
    json_text = re.sub(r"[ \t\r\f\v]+", " ", json_text)
    try:
        return json.loads(json_text)
    except json.JSONDecodeError:
        json_text_2 = re.sub(r",\s*([}\]])", r"\1", json_text)
        return json.loads(json_text_2)


def load_nodes(path: Path) -> List[Dict[str, Any]]:
    if slog:
        slog.console("info", "ism", ":filesystem", "read", f"Reading input file: {path}")

    if not path.exists():
        if slog:
            slog.console("error", "ism", ":filesystem", "Error", f"Input file not found: {path}", level="error")
        raise FileNotFoundError(f"Input file not found: {path}")

    suffix = path.suffix.lower()
    if suffix == ".json":
        try:
            data = json.loads(path.read_text(encoding="utf-8"))
        except json.JSONDecodeError as e:
            if slog:
                slog.console("error", "ism", ":json", "Error", f"Invalid JSON: {e}", level="error")
            raise
    elif suffix == ".pdf":
        data = _parse_pdf_json(path)
    else:
        raise ValueError(f"Unsupported input file type: {suffix} (expected .json or .pdf)")

    if isinstance(data, dict):
        nodes = (data.get("IsmBody") or {}).get("Nodes") or []
    elif isinstance(data, list):
        nodes = data
    else:
        raise ValueError("Unexpected JSON type (expected dict or list).")

    if not isinstance(nodes, list):
        raise ValueError("'Nodes' field is not a list.")

    if not nodes:
        if slog:
            slog.console("warning", "ism", ":json", "-", "No nodes found in input.", level="warning")
    else:
        if slog:
            slog.console("info", "ism", ":json", "-", f"{len(nodes)} nodes loaded.")
    if slog:
        slog.file_event(event="nodes_loaded", count=len(nodes), source=str(path))
    return nodes


def load_inventory_map(path: Path) -> Dict[int, Dict[str, Any]]:
    if slog:
        slog.console("info", "ism", ":filesystem", "read", f"Reading inventory file: {path}")

    if not path.exists():
        if slog:
            slog.console(
                "warning",
                "ism",
                ":filesystem",
                "Warn",
                f"Inventory file not found: {path}. Continuing without detailed inventory.",
                level="warning",
            )
        return {}

    try:
        data = json.loads(path.read_text(encoding="utf-8"))
    except json.JSONDecodeError as e:
        if slog:
            slog.console(
                "error",
                "ism",
                ":json",
                "Error",
                f"Invalid JSON in inventory: {e}. Continuing with empty inventory.",
                level="error",
            )
        return {}

    nodes = (data.get("IsmBody") or {}).get("Nodes") or []
    inventory_map = {}

    for node in nodes:
        node_id = node.get("NodeId")
        if node_id is not None:
            inventory_map[int(node_id)] = node.get("VariableData", {})

    if slog:
        slog.console("info", "ism", ":json", "-", f"{len(inventory_map)} inventory details mapped.")
        slog.file_event(event="inventory_mapped", count=len(inventory_map), source=str(path))

    return inventory_map


# ============================================================
# Health check (optional)
# ============================================================
def check_server_health(cfg: Dict[str, Any]) -> None:
    url = (cfg.get("chatbot_agent") or {}).get("health_url")
    if not url:
        return
    try:
        r = requests.get(url, timeout=5)
        if r.status_code >= 400:
            if slog:
                slog.console("warning", "chatbot", ":health", "Warn", f"{r.status_code}: {r.text[:200]}", level="warning")
    except Exception as e:
        if slog:
            slog.console("warning", "chatbot", ":health", "Error", f"{e}", level="warning")


# ============================================================
# Node parameter builder
# ============================================================
def _v(x: Any) -> str:
    if x is None:
        return ""
    s = str(x).strip()
    return s if s and s not in ("-", "None", "null", "NULL") and not s.endswith(" -") else ""


def node_params(node: Dict[str, Any], inventory_map: Dict[int, Dict[str, Any]]) -> Dict[str, Any]:
    node_id = int(node.get("NodeId", 0))
    inv_data = inventory_map.get(node_id, {})

    params = {
        "Node Name": _v(node.get("Name")),
        "NodeId": _v(node.get("NodeId")),
        "Type": _v(node.get("Type")),
        "Model": _v(node.get("Model")),
        "Status": _v(node.get("Status")),
        "AlarmStatus": _v(node.get("AlarmStatus")),
        "PowerStatus": _v(node.get("PowerStatus")),
        "IP-Version": _v(node.get("IpVersion")),
        "IP": _v(node.get("IpAddress")),
        "WEB-URL": _v(node.get("WebUrl")),
        "Rack Position": _v((node.get("RackInfo") or {}).get("Name")),
        "Node Group": _v(node.get("NodeGroupName")),
    }

    if inv_data:
        cpus = inv_data.get("Cpus", [])
        if cpus:
            cpu_model = _v(cpus[0].get("Model"))
            cpu_core_speed = _v(cpus[0].get("CoreSpeed"))
            cpu_count = len(cpus)
            if cpu_model and cpu_core_speed:
                params["CPU Summary"] = f"{cpu_count}x {cpu_model} @ {_v(cpu_core_speed)}MHz"
            elif cpu_count > 0:
                params["CPU Summary"] = f"{cpu_count}x CPU (Details missing)"

        memory_modules = [m for m in inv_data.get("MemoryModules", []) if _v(m.get("MemorySize"))]
        total_mem_gb = 0
        if memory_modules:
            for m in memory_modules:
                size_str = _v(m.get("MemorySize"))
                if size_str and "GB" in size_str:
                    try:
                        total_mem_gb += int(size_str.replace("GB", "").strip())
                    except ValueError:
                        pass
            if total_mem_gb > 0:
                mem_freq = _v(memory_modules[0].get("Frequency"))
                params["Memory Summary"] = f"{len(memory_modules)} physical modules, {total_mem_gb}GB total RAM @ {mem_freq}"

        disks = inv_data.get("Disks", [])
        if disks:
            disk_count = len(disks)
            disk_types = ", ".join(
                sorted(list(set([_v(d.get("MediaType")) for d in disks if _v(d.get("MediaType"))])))
            )
            disk_models = ", ".join(
                sorted(list(set([_v(d.get("Model")) for d in disks if _v(d.get("Model"))])))
            )
            total_raid_capacity_bytes = sum(
                [
                    int(_v(r.get("TotalCapacity", 0)))
                    for r in inv_data.get("Raid", [])
                    if _v(r.get("TotalCapacityUnit")) == "B"
                ]
            )
            total_raid_capacity_tb = round(total_raid_capacity_bytes / (1000**4), 2)
            if disk_types or disk_models or total_raid_capacity_bytes > 0:
                params["Storage Summary"] = (
                    f"{disk_count} disks ({disk_types}), "
                    f"{total_raid_capacity_tb}TB RAID capacity, models: {disk_models}"
                )

        os_list = inv_data.get("ElcmStatus", {}).get("SupportedOsList", [])
        if os_list:
            supported_os = ", ".join(
                sorted(list(set([_v(os.get("OsType")) for os in os_list if _v(os.get("OsType"))])))
            )
            if supported_os:
                params["Supported OS List"] = supported_os

        firmware_details = []
        for fw in inv_data.get("Firmware", []):
            fw_type = _v(fw.get("Type"))
            fw_version = _v(fw.get("FirmwareVersion"))
            fw_model = _v(fw.get("Model"))
            if fw_type and fw_version:
                detail = f"{fw_model or 'Unknown'} {fw_type}: {fw_version}"
                firmware_details.append(detail)
        if firmware_details:
            params["Firmware Details"] = "; ".join(firmware_details)

        disk_health_issues = [d for d in disks if _v(d.get("Health")) and int(_v(d.get("Health"))) < 100]
        if disk_health_issues:
            params["Hardware Issues"] = "Disk health warning or failure detected."
            params["Disk Health Issues"] = (
                f"{len(disk_health_issues)} disks report issues (e.g., predicted life left < 100%)."
            )

    description = _v(node.get("Description"))
    if description:
        params["Node Description"] = description

    hardware_issues_from_nodes = _v(node.get("HardwareIssues")) or "No specific hardware problems mentioned."
    params.setdefault("Hardware Issues", hardware_issues_from_nodes)

    final_params = {k: v for k, v in params.items() if v and v not in ("-", "None", "null", "NULL", "Not specified")}
    return final_params


# ============================================================
# Chatbot Request (FIPA-ACL) – One Node per Request with retry
# ============================================================
def generate_logical_sentence(
    parameters: Dict[str, Any],
    language_code: str,
    config: Dict[str, Any],
    use_public: Optional[bool] = None,
    groups: Optional[List[str]] = None,
    wait_seconds: float = 5.0,
    max_retries: int = 5,
) -> str:
    attempt = 0
    last_error = None

    prompt_template = config.get("chatbot_agent", {}).get("prompt_template")
    if not prompt_template:
        prompt_template = (
            "Generate a fluent, well-written paragraph in {language_code} describing the following node. "
            "It should read like a technical report (no tables or bullet points). "
            "Here are the data:\n"
            "{json_data}"
        )

    prompt = prompt_template.format(
        language_code=language_code.upper(),
        json_data=json.dumps(parameters, ensure_ascii=False, indent=4),
    )

    if use_public is None:
        use_public = bool(config.get("chatbot_agent", {}).get("use_public", True))
    if groups is None:
        groups = config.get("chatbot_agent", {}).get("groups", [])
    if not isinstance(groups, list):
        groups = []

    timeout_sec = int(config.get("chatbot_agent", {}).get("timeout_seconds", 20))
    api_url = config["chatbot_agent"]["api_url"]

    node_name = parameters.get("Node Name", "<unknown>")

    while attempt < max_retries:
        attempt += 1
        try:
            payload = {
                "performative": "request",
                "sender": "ISM_Agent",
                "receiver": "Chatbot_Agent",
                "language": "fipa-sl",
                "ontology": "fujitsu-iot-ontology",
                "content": {
                    "question": prompt,
                    "usePublic": use_public,
                    "groups": groups,
                    "language": language_code or config.get("language", "en"),
                    "json_data": parameters,
                    "node": parameters,  # falls der Server 'node' statt 'json_data' erwartet
                },
            }

            headers = {
                "Content-Type": "application/json",
                "X-API-KEY": config["chatbot_agent"]["api_key"],
            }

            if slog:
                slog.console("cb", "chatbot", ":request", "Outgoing", f"Request for node: {node_name}")
                slog.file_event(event="request", component="chatbot_agent", node=node_name)

            # >>> NEU: Zeitmessung
            t_start = time.perf_counter()
            response = requests.post(api_url, json=payload, headers=headers, timeout=timeout_sec)
            t_end = time.perf_counter()
            elapsed = t_end - t_start  # Sekunden als float

            if slog:
                slog.console(
                    "cb",
                    "chatbot",
                    ":response",
                    "Incoming",
                    f"{response.status_code} ({elapsed:.3f}s)",
                )
                slog.file_event(
                    event="response",
                    status=response.status_code,
                    node=node_name,
                    elapsed_seconds=round(elapsed, 3),
                )
            # <<< ENDE NEU

            if response.status_code != 200:
                try:
                    body = response.json()
                except Exception:
                    body = response.text
                raise RuntimeError(f"HTTP {response.status_code}: {body}")

            data = response.json()
            generated_sentence = (
                (data.get("content") or {}).get("answer")
                or data.get("answer")
                or data.get("response")
                or ""
            )
            if not generated_sentence:
                raise RuntimeError(f"Empty response from chatbot agent: {data}")

            return generated_sentence.strip()

        except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
            last_error = f"Network error: {e}"
            backoff = min(wait_seconds * (2 ** (attempt - 1)), 30)
            if slog:
                slog.console("warning", "chatbot", ":network", "Error", last_error, level="warning")
            time.sleep(backoff)
        except Exception as e:
            last_error = f"Chatbot request failed: {e}"
            backoff = min(wait_seconds * (2 ** (attempt - 1)), 30)
            if slog:
                slog.console("warning", "chatbot", ":error", "-", last_error, level="warning")
            time.sleep(backoff)

    raise RuntimeError(last_error or "Unknown chatbot request error.")


# ============================================================
# Per-node JSON dump helpers
# ============================================================
def safe_filename(s: str) -> str:
    s = re.sub(r"[^\w\-.]+", "_", s.strip())
    return s or "noname"


def dump_node_json(
    dump_dir: Optional[Path],
    idx: int,
    node_name: str,
    params: Dict[str, Any],
    answer: Optional[str] = None,
):
    if dump_dir is None:
        return
    dump_dir.mkdir(parents=True, exist_ok=True)
    ts = datetime.now().strftime("%Y%m%d%H%M%S")
    fname = f"{ts}_{idx:05d}_{safe_filename(node_name)}.json"
    payload = {
        "node": node_name,
        "index": idx,
        "parameters": params,
        "answer": answer,
        "timestamp": ts,
    }

    json_text = json.dumps(payload, ensure_ascii=False, indent=2)
    dump_length_bytes = len(json_text.encode("utf-8"))

    (dump_dir / fname).write_text(json_text, encoding="utf-8")

    if slog:
        slog.console(
            "file",
            "filesystem",
            ":write",
            str(dump_length_bytes) + " B",
            f"Record added to {dump_dir.name}/{fname}",
        )
        slog.file_event(
            event="node_dump_written",
            file=str(dump_dir / fname),
            node=node_name,
            size=dump_length_bytes,
        )


# ============================================================
# Input Archiving helpers
# ============================================================
def _next_archive_path(src: Path, archive_dir: Path) -> Path:
    archive_dir.mkdir(parents=True, exist_ok=True)
    stem_with_suffix = src.name
    max_idx = 0
    for p in archive_dir.glob(stem_with_suffix + ".*"):
        suf = p.suffix
        if len(suf) >= 2 and suf[1:].isdigit():
            try:
                idx = int(suf[1:])
                if idx > max_idx:
                    max_idx = idx
            except ValueError:
                pass
    next_idx = max_idx + 1
    return archive_dir / f"{stem_with_suffix}.{next_idx:03d}"


def archive_input_file(src: Path, archive_dir: Path) -> Optional[Path]:
    try:
        try:
            src.resolve().relative_to(archive_dir.resolve())
            return None
        except Exception:
            pass

        if not src.exists():
            return None
        target = _next_archive_path(src, archive_dir)
        shutil.move(str(src), str(target))
        if slog:
            slog.console("file", "filesystem", ":archive", "-", f"Archived input to: {target}")
            slog.file_event(event="input_archived", source=str(src), target=str(target))
        return target
    except Exception as e:
        if slog:
            slog.console("warning", "filesystem", ":archive", "Error", f"{e}", level="warning")
            slog.file_event(event="archive_failed", source=str(src), error=str(e))
        return None


# ============================================================
# NEW: SFTP helpers
# ============================================================
def _sftp_mkdirs(sftp: paramiko.SFTPClient, remote_dir: str) -> None:
    remote_dir = posixpath.normpath(remote_dir)
    parts = [p for p in remote_dir.split("/") if p]
    path = "/"
    for p in parts:
        path = posixpath.join(path, p)
        try:
            sftp.chdir(path)
        except IOError:
            sftp.mkdir(path)
            sftp.chdir(path)


def sftp_upload_file(local_path: Path, sftp_cfg: Dict[str, Any]) -> bool:
    enabled = sftp_cfg.get("enabled", False)
    host = sftp_cfg.get("host")
    user = sftp_cfg.get("user") or sftp_cfg.get("username")
    pwd = sftp_cfg.get("password")
    port = int(sftp_cfg.get("port", 22))
    remote_base = sftp_cfg.get("remote_path", "/")
    remote_name = sftp_cfg.get("remote_filename") or local_path.name

    if not enabled or not host or not user or not pwd:
        if slog:
            slog.console("warning", "sftp", ":config", "Skip", "SFTP enabled but config incomplete.", level="warning")
            slog.file_event(event="sftp_skipped", reason="incomplete_config")
        return False

    transport = None
    try:
        if slog:
            slog.console("info", "sftp", ":connect", "Outgoing", f"{user}@{host}:{port}")
        transport = paramiko.Transport((host, port))
        transport.connect(username=user, password=pwd)
        sftp = paramiko.SFTPClient.from_transport(transport)

        _sftp_mkdirs(sftp, remote_base)

        remote_path = posixpath.join(remote_base, remote_name)
        sftp.put(str(local_path), remote_path)

        if slog:
            slog.console("info", "sftp", ":put", "Outgoing", f"{local_path} → {remote_path}")
            slog.file_event(event="sftp_upload_ok", local=str(local_path), remote=remote_path)

        sftp.close()
        transport.close()
        return True
    except Exception as e:
        if slog:
            slog.console("error", "sftp", ":put", "Error", f"{e}", level="error")
            slog.file_event(event="sftp_upload_failed", local=str(local_path), error=str(e))
        if transport:
            try:
                transport.close()
            except Exception:
                pass
        return False


# ============================================================
# Main
# ============================================================
def main():
    parser = argparse.ArgumentParser(description="ISM Agent – robust generator for ISM nodes.")
    parser.add_argument("--config", default="agents/ISMAgent/config.json", help="Path to config.json")
    parser.add_argument("--language", help="Override language from config (optional)")
    parser.add_argument("--delay", type=float, default=0.5, help="Seconds to wait between requests")
    parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
    args = parser.parse_args()

    setup_logging(args.verbose)
    global slog

    cfg = load_config(Path(args.config))
    paths = cfg.get("paths", {})
    input_path = Path(paths.get("input", "agents/ISMAgent/data/ism_nodes.json"))
    inventory_path = Path(paths.get("inventory", "agents/ISMAgent/data/ism_inventory.json"))
    output_path = Path(paths.get("output", "agents/ISMAgent/output/ism_nodes_report.txt"))
    ndjson_path = paths.get("ndjson", "agents/ISMAgent/logs/ism_agent.ndjson")
    dump_dir = Path(paths.get("dump_json_dir", "agents/ISMAgent/logs/node_json"))
    archive_dir = Path(paths.get("archive_dir", "agents/ISMAgent/archive"))

    slog = StructuredLog(ndjson_path, use_color=True)

    check_server_health(cfg)

    lang = (args.language or cfg.get("language") or "en").strip().lower()

    try:
        nodes = load_nodes(input_path)
    except FileNotFoundError as e:
        if slog:
            slog.console("critical", "main", ":fatal", "Error", f"Fatal: Missing primary input file. {e}", level="critical")
        sys.exit(1)
    except Exception as e:
        if slog:
            slog.console("critical", "main", ":fatal", "Error", f"Fatal: Error loading nodes. {e}", level="critical")
        sys.exit(1)

    try:
        inventory_map = load_inventory_map(inventory_path)
    except Exception:
        inventory_map = {}

    archive_input_file(input_path, archive_dir)

    results: List[str] = []

    for idx, node in enumerate(nodes, 1):
        node_name = node.get("Name", f"Node{idx}")
        try:
            params = node_params(node, inventory_map)
            counter_str = f"{idx}/{len(nodes)}"

            if slog:
                slog.console("proc", "ism", ":process", counter_str, f"Processing node: {node_name}")
                slog.file_event(event="node_processing", node=node_name, index=idx)

            text = generate_logical_sentence(params, lang, cfg, max_retries=5)
            results.append(text.strip())

            dump_node_json(dump_dir, idx, node_name, params, text)

            time.sleep(max(0.0, float(args.delay)))
        except Exception as e:
            if slog:
                slog.console("error", "ism", ":process", "Error", f"{node_name}: {e}", level="error")
                slog.file_event(event="node_failed", node=node_name, error=str(e))
            # continue

    if not results:
        if slog:
            slog.console("error", "main", ":report", "Error", "No report could be generated.", level="error")
        sys.exit(2)

    wrote_ok = False
    try:
        output_path.parent.mkdir(parents=True, exist_ok=True)
        out_text = "\n\n".join(results).rstrip() + "\n"
        output_length_bytes = len(out_text.encode("utf-8"))
        byte_output_str = str(output_length_bytes) + "B"

        if output_path.exists():
            with open(output_path, "a", encoding="utf-8") as f:
                f.write(out_text)
            if slog:
                slog.console("file", "filesystem", ":append", byte_output_str, f"Appended {output_length_bytes} bytes to: {output_path}")
                slog.file_event(event="report_appended", path=str(output_path), size=len(out_text))
        else:
            with open(output_path, "w", encoding="utf-8") as f:
                f.write(out_text)
            if slog:
                slog.console("file", "filesystem", ":write", byte_output_str, f"Report created: {output_path}")
                slog.file_event(event="report_written", path=str(output_path), size=len(out_text))
        wrote_ok = True
    except Exception as e:
        if slog:
            slog.console("error", "filesystem", ":write", "Error", f"{e}", level="error")
        sys.exit(3)

    if wrote_ok:
        sftp_cfg = cfg.get("sftp") or {}
        if sftp_cfg.get("enabled", False):
            upload_ok = sftp_upload_file(output_path, sftp_cfg)

            if upload_ok:
                try:
                    os.remove(output_path)
                    if slog:
                        slog.console("file", "filesystem", ":delete", "Done", f"Local report deleted after successful SFTP: {output_path}")
                        slog.file_event(event="report_deleted", path=str(output_path))
                except Exception as e:
                    if slog:
                        slog.console("error", "filesystem", ":delete", "Error", f"Failed to delete local report: {e}", level="error")
                        slog.file_event(event="delete_failed", path=str(output_path), error=str(e))
            else:
                if slog:
                    slog.console("warning", "sftp", ":post", "Warn", "Upload failed; report remains local.", level="warning")
        else:
            if slog:
                slog.console("info", "sftp", ":post", "Skip", "SFTP disabled in config.")


if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        if slog:
            slog.console("warning", "main", ":shutdown", "-", "User aborted (CTRL+C).", level="warning")
        sys.exit(130)
    except Exception as e:
        if slog:
            slog.console("critical", "main", ":fatal", "Error", f"{e}", level="critical")
        sys.exit(99)

```

--------------------------------------------------------------------------------
/clients/Gradio/main.py:
--------------------------------------------------------------------------------

```python
import asyncio
import base64
import io
import json
import os
import shutil
import time
import uuid
from datetime import datetime
from pathlib import Path

import gradio as gr
import httpx
from PIL import Image
from gradio_modal import Modal
from openai import OpenAI

from agents.AgentInterface.Python.config import Config, ConfigError
from clients.Gradio.Api import PrivateGPTAPI
from clients.Gradio.file_tools.loader_factory import LoadersFactory
from clients.Gradio.mcp_client import MCPClient, generate_system_prompt, load_config, clean_response
from clients.Gradio.messages.send_call_tool import send_call_tool
from clients.Gradio.messages.send_initialize_message import send_initialize
from clients.Gradio.messages.send_tools_list import send_tools_list
from clients.Gradio.transport.stdio.stdio_client import stdio_client


# config
mcp_config = "./clients/Gradio/server_config.json"

#selection of mcp servers from the config
server_names = ["dp"] #"demo-tools", "filesystem", "sqlite", "nostr",  "agent_web_search", "hf_flux", ] #"google-calendar"] #
# if all_mcp_servers is True, the above list will be overwritten and all servers in the config will be considered
all_mcp_servers = True

temperature = 0.8
top_p = 0.8
#model = "/models/mistral-nemo-12b" #vllm
#model = "mistralai/Mistral-Small-3.1-24B-Instruct-2503"
#model = "mistralai/Mistral-Small-3.2-24B-Instruct-2506"
md_model = None


image_url = None



# Load configuration file
try:
    # Get the absolute path to the config.json file
    config_file = Path.absolute(Path(__file__).parent / "config.json")
    # Initialize configuration with required fields
    config = Config(config_file=config_file, required_fields=["base_url"])
    # Retrieve default groups and VLLM configuration from config file
    default_groups = config.get("groups", [])
    vllm_url =  config.get("vllm_url", "")
    vllm_api_key = config.get("vllm_api_key", "")
    model = config.get("model", "/models/mistral-nemo-12b")
    access_header = config.get("access_header", None)
    proxy_user = config.get("proxy_user", None)
    if proxy_user == "":
        proxy_user = None
    proxy_password = config.get("proxy_password", None)
    if proxy_password == "":
        proxy_password = None
except ConfigError as e:
    # Display an error message and exit if configuration cannot be loaded
    print(f"Configuration Error: {e}")
    exit(1)

# If all_mcp_servers is set to True, load all mcp servers from the config file
if all_mcp_servers:
    with open(mcp_config, 'r') as f:
        # Read the config file and extract mcp server names
        server_names = list(json.load(f)['mcpServers'].keys())
        print(server_names)



mcp_servers = []

#user_data_source = ["User1", "User2", "User3", "User4", "User5"]
selected_groups = []
pgpt = None

# Function to handle login logic
async def login(username, password, selected_options, selected_options2):
    global pgpt
    config.set_value("email", username)
    config.set_value("password", password)
    pgpt = PrivateGPTAPI(config)
    if pgpt.logged_in:
        # Successful login
        groups = pgpt.list_personal_groups()
        return gr.update(visible=False), gr.update(visible=True), "", gr.update(choices=groups, value=None), gr.update(choices=groups, value=None)
    else:
        gr.Warning("Error connecting.")
        return gr.update(), gr.update(visible=False), "Invalid credentials. Please try again.", gr.update(choices=[], value=None), gr.update(choices=[], value=None)

MAX_ITEMS = 200  # Max number of sources

def delete_source(sources, index):
    if 0 <= index < len(sources):
        source_to_delete = sources[index]
        print(source_to_delete)
        status = pgpt.delete_source(source_to_delete["id"])
        if status == "success":
            gr.Success("Entry deleted")
        else:
            gr.Error("Failed")
        sources.pop(index)

    return sources

def render_ui(sources):
    updates = []
    if sources is None:
        sources = []
    for i in range(MAX_ITEMS):
        if i < len(sources):
            src = sources[i]
            updates.extend([
                gr.update(visible=True),                # Row visible
                gr.update(value=src["name"]),           # Name
                gr.update(value="\n".join(src["groups"])),         # Groups
                gr.update(value=src["creator"]),        # Creator
                gr.update(value=src["date"]),           # Date
                gr.update(value=src["status"]),         # Status
                gr.update(visible=True),                # Delete button visible
            ])
        else:
            updates.extend([
                gr.update(visible=False),
                gr.update(value=""),
                gr.update(value=""),
                gr.update(value=""),
                gr.update(value=""),
                gr.update(value="Draft"),
                gr.update(visible=False),
            ])
    return updates


async def init_mcp_stdio(mcp_config, server_names):
    try:
        for server_name in server_names:
            mcp_client = MCPClient(vllm_url, vllm_api_key)
            server_params = await load_config(mcp_config, server_name)
            try:
                await mcp_client.connect_to_stdio_server(server_params, server_name)
                response = await mcp_client.session.list_tools()
                tools = []
                for tool in response.tools:
                    try:
                        print(tool)
                        tools.append(
                            {
                            "type": "function",
                            "function": {
                                "name": tool.name,
                                "description": tool.description,
                                "parameters": tool.inputSchema
                                }
                             }
                        )
                    except Exception as e:
                        print(e)

                mcp_servers.append((mcp_client, tools, server_name))
            except Exception as e:
                print(e)

    except Exception as e:
        print(e)
        print("error connecting to MCP Stdio server")
    #finally:
    #    await client.cleanup()


def show_image(img):
    return img

def transcribe_whisper(file_path):
    from faster_whisper import WhisperModel

    model_size = "base"

    # Run on GPU with FP16
    # model = WhisperModel(model_size, device="cuda", compute_type="float16")

    # or run on GPU with INT8
    # model = WhisperModel(model_size, device="cuda", compute_type="int8_float16")
    # or run on CPU with INT8
    whisper_model = WhisperModel(model_size, device="cpu", compute_type="int8")

    segments, info = whisper_model.transcribe(file_path, beam_size=5)

    print("Detected language '%s' with probability %f" % (
        info.language, info.language_probability))
    message = ""
    for segment in segments:
        print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))
        message += segment.text + "\n"

    return message.rstrip("\n")

async def create_interface():
    theme = gr.themes.Default(primary_hue="blue").set(
        loader_color="#FF0000",
        slider_color="#FF0000",
    )
    with (gr.Blocks(theme="ocean",
                   title="PrivateGPT MCP Multi-API Demo",
                   fill_height=True,
                   #css="footer{display:none !important}"
                   css="footer {visibility: hidden}"
                    )
          as demo):
        # Login UI Elements
        login_message = gr.Markdown("")

        await init_mcp_stdio(mcp_config=mcp_config, server_names=server_names)



        with gr.Group() as login_interface:
            # Store/Save credentials in browser
            get_local_storage = """
                function() {
                  globalThis.setStorage = (key, value)=>{
                    localStorage.setItem(key, JSON.stringify(value))
                  }
                   globalThis.getStorage = (key, value)=>{
                    return JSON.parse(localStorage.getItem(key))
                  }
                   const username_input =  getStorage('login')
                   const password_input =  getStorage('password')
                   return [username_input, password_input];
                  }
                """

            with gr.Row():
                gr.Image(value="./clients/Gradio/logos/Logo_dark.svg", show_label=False,
                         show_download_button=False,
                         show_fullscreen_button=False, height=300, width=200, scale=1)
                gr.Image(value="./clients/Gradio/logos/fsas.png", show_label=False,
                         show_download_button=False,
                         show_fullscreen_button=False, height=300, scale=3)
            username_input = gr.Textbox(label="Username")
            username_input.change(None, username_input, None, js="(v)=>{ setStorage('login',v) }")
            password_input = gr.Textbox(label="Password", type="password")
            password_input.change(None, password_input, None, js="(v)=>{ setStorage('password',v) }")

            login_button = gr.Button("Login")

            with gr.Blocks() as vl:
                vl.load(
                    None,
                    inputs=None,
                    outputs=[username_input, password_input],
                    js=get_local_storage,
                )


            saved_message = gr.Markdown("✅ Saved to local storage", visible=False)

        # Dashboard UI Elements
        with gr.Group(visible=False) as dashboard_interface:

            with gr.Blocks() as main:
                with gr.Tab("Chat"):
                    async def predict(message, history):
                        global selected_groups
                        global mcp_servers
                        global temperature
                        global top_p
                        global model
                        global md_model
                        global image_url

                        files = []
                        # deal with multimodal textfield
                        try:
                            files = message["files"]
                            message = str(message["text"])
                        except:
                            print("using regular message")

                        image_url = None
                        if len(files) > 0:
                            for file_path in files:
                                print(file_path)
                                # Get the file extension
                                file_extension = os.path.splitext(file_path)[1]
                                print(f"File Extension: {file_extension}")

                                if file_extension == ".wav":
                                   message = transcribe_whisper(file_path)

                                elif file_extension == ".jpg" or file_extension == ".jpeg" or file_extension == ".png" or file_extension == ".bmp":
                                    image_url = file_path

                                else:

                                    content = ""
                                    if file_extension == ".pdf":
                                        content = LoadersFactory().pdf(file_path)
                                    elif file_extension == ".csv":
                                        content = LoadersFactory().csv(file_path)
                                    elif file_extension == ".xlsx":
                                        content = LoadersFactory().xlsx(file_path)
                                    elif file_extension == ".md":
                                        content = LoadersFactory().markdown(file_path)
                                    # todo add more sources

                                    markdown = LoadersFactory().convert_documents_to_markdown(content)
                                    print(markdown)
                                    message += "\n\n" + markdown


                        history_openai_format = []
                        tools = []
                        file_extension = ""
                        if len(files) > 0:
                            file_extension = os.path.splitext(files[0])[1]
                        # only add mcp servers when we don't have a file attached for now.
                        if len(files) == 0  or len(files) == 1 and (file_extension == ".wav" or  file_extension == ".jpg" or file_extension == ".jpeg" or file_extension == ".png" or file_extension == ".bmp"):
                            for mcp_server, mcptools, mcpname in mcp_servers:
                                for tool in mcptools:
                                    tools.append(tool)




                        if len(selected_groups) == 0:
                            # If we don't use a group, we use vllm directly.

                            # only make the mcp prompt when we don't have a file attached
                            if len(files) == 0 or len(files) == 1 and os.path.splitext(files[0])[1] == ".wav":
                                system_prompt = generate_system_prompt(tools)

                            else:
                                system_prompt = "You have access to a document. The user will instruct you what to do with it."

                            history_openai_format.append({"role": "system", "content": system_prompt})


                            last_role = "system"
                            for entry in history:
                                if last_role != entry["role"] and not hasattr(entry, "tool_calls") or  (hasattr(entry, "tool_calls") and (entry["tool_calls"] is None  or entry["tool_calls"] == [])):
                                    history_openai_format.append({"role": entry["role"], "content": str(entry["content"])})
                                    last_role = entry["role"]

                            if image_url is None:
                                history_openai_format.append({"role": "user", "content": message})
                            else:
                                image = Image.open(image_url)
                                # Convert the image to a byte stream
                                buffered = io.BytesIO()
                                image.save(buffered, format="JPEG")  # Specify the format (e.g., JPEG, PNG)
                                image_bytes = buffered.getvalue()
                                image_base64 = base64.b64encode(image_bytes).decode("utf-8")

                                history_openai_format.append(

                                    {
                                        "role": "user",
                                        "content": [
                                            {"type": "text", "text": message},
                                            {
                                                "type": "image_url",
                                                "image_url": {
                                                    "url": f"data:image/jpeg;base64,{image_base64}"
                                                },
                                            },
                                        ],
                                    }

                                )

                            print(history_openai_format)
                            headers = {
                                'Accept': 'application/json',
                                'Content-Type': 'application/json',
                            }

                            if access_header is not None:
                                headers['X-Custom-Header'] = access_header
                            elif proxy_user is not None and proxy_password is not None:
                                auth = base64.b64encode(f"{proxy_user}:{proxy_password}".encode()).decode()
                                headers['Authorization'] = f'Basic {auth}'

                            client = OpenAI(
                                base_url=vllm_url,
                                api_key=vllm_api_key,
                                http_client=httpx.Client(verify=False, headers=headers)
                            )

                            completion = client.chat.completions.create(
                                model=model,
                                messages=history_openai_format,
                                temperature=temperature,
                                top_p=top_p,
                                tools = tools or None,
                                stream=False,
                            )

                            # Process response and handle tool calls
                            tool_results = []

                            result = completion.choices[0].message
                            print(result)
                            tool_calls = []

                            # work around the mistral llm weirdness
                            other_weird_stuff = str(result.content).lstrip().replace("[\n```", "```").replace("{\n```", "```")
                            if result.content is not None or other_weird_stuff.startswith('```'):
                                if result.content.startswith("[TOOL_CALLS]") or other_weird_stuff.startswith("```") :
                                    print("entering TOOL_CALLS")
                                    time.ctime()  # 'Mon Oct 18 13:35:29 2010'
                                    current_timestamp = time.time()
                                    formatted_timestamp = time.strftime("%Y-%m-%d %H:%M:%S",
                                                                        time.localtime(current_timestamp))
                                    system_prompt = "The timezone is Europe/Berlin. This is the CURRENT DATE: " + formatted_timestamp

                                    history_openai_format = [{"role": "system", "content": system_prompt},
                                                             {"role": "user", "content": message}]

                                    completion = client.chat.completions.create(
                                        model=model,
                                        messages=history_openai_format,
                                        temperature=temperature,
                                        top_p=top_p,
                                        tools=tools or None,
                                        stream=False
                                    )
                                    tool_results = []

                                    result = completion.choices[0].message
                                    print(message)
                                    tool_calls = []

                            # Convert tool calls to OpenAI format
                            if hasattr(result, "tool_calls") and result.tool_calls:
                                for tool in result.tool_calls:
                                    print(tool.function.arguments)
                                    tool_calls.append(
                                        {
                                            "id": str(uuid.uuid4()),  # Generate unique ID
                                            "type": "function",
                                            "function": {
                                                "name": tool.function.name,
                                                "arguments": tool.function.arguments,
                                            },
                                        }
                                    )

                            if tool_calls:
                                for tool_call in tool_calls:
                                    tool_call_id = str(uuid.uuid4())
                                    if hasattr(tool_call, "id"):
                                        tool_call_id = str(tool_call.id)

                                    if hasattr(tool_call, "function"):
                                        print(tool_call.function)
                                        tool_name = getattr(tool_call.function, "name", "unknown tool")
                                        raw_arguments = getattr(tool_call.function, "arguments", {})

                                    elif isinstance(tool_call, dict) and "function" in tool_call:
                                        fn_info = tool_call["function"]
                                        tool_name = fn_info.get("name", "unknown tool")
                                        raw_arguments = fn_info.get("arguments", {})
                                    else:
                                        tool_name = "unknown tool"
                                        raw_arguments = {}

                                    # If raw_arguments is a string, try to parse it as JSON
                                    if isinstance(raw_arguments, str):
                                        try:
                                            raw_arguments = json.loads(raw_arguments)
                                        except json.JSONDecodeError:
                                            print("error json not valid")
                                            # If it's not valid JSON, just display as is
                                            pass

                                    # Now raw_arguments should be a dict or something we can pretty-print as JSON
                                    tool_args_str = json.dumps(raw_arguments, indent=2)

                                    tool_message =  f"```json\n{tool_args_str}\n```"
                                    print(tool_message)


                                    yield [
                                        {
                                            "role": "assistant",
                                            "content": "\n" + tool_message + "\n",
                                            "metadata": {"title": f"🛠️ Using tool {tool_name}",
                                                         "status": "pending"}
                                        },

                                    ]

                                    for mcp_server, tools, name in mcp_servers:
                                        if tool_name in str(tools): #todo: better check
                                            print(tool_name + " in tools")

                                            meta = await call_tool(mcp_server.name, tool_name, raw_arguments)
                                            if meta is None:
                                                return

                                            content = meta.get('content', [])
                                            print("Tool " + tool_name + " reply: " + str(content))

                                            tool_results.append({"call": str(tool_name), "result": content})

                                            history_openai_format.append(
                                                {
                                                    "role": "assistant",
                                                    "content": None,
                                                    "tool_calls": [
                                                        {
                                                            "id": tool_call_id,
                                                            "type": "function",
                                                            "function": {
                                                                "name": tool_name,
                                                                "arguments": json.dumps(raw_arguments)
                                                                if isinstance(raw_arguments, dict)
                                                                else raw_arguments,
                                                            },
                                                        }
                                                    ],
                                                }
                                            )

                                            # Continue conversation with tool results
                                            if  len(content)> 0 and content[0].get("type") == "text" and content[0].get("text") is not None:

                                                #temporary workaround, move to image instead of text

                                                content = content[0].get("text")
                                                isimagejson = False
                                                isvideojson = False
                                                j = None
                                                try:
                                                    j = json.loads(content)
                                                    if j.get("type") == "image":
                                                        isimagejson = True
                                                    elif  j.get("type") == "video":
                                                        isvideojson = True
                                                except:
                                                    isimagejson = False

                                                if isimagejson:
                                                    yield  [
                                                        {
                                                            "role": "assistant",
                                                            "content": "\n" + tool_message + "\n" + f"Reply:\n {content}" + "\n",
                                                            "tool_calls": [tool_name],
                                                            "metadata": {"title": f"🛠️ Used tool {tool_name}",
                                                                         "status": "done"}
                                                        },
                                                        {
                                                            "role": "assistant",
                                                            "content":  f"{j.get("message")}:\n![Image Description]({j.get("url")})"
                                                        }


                                                    ]
                                                elif isvideojson:
                                                    html_content = f"""
                                                    <!DOCTYPE html>
                                                    <html>
                                                    <head>
                                                        <title>Video Preview</title>
                                                    </head>
                                                    <body>
                                                        <video width="640" controls>
                                                            <source src="{j.get("url")}" type="video/mp4">
                                                            Your browser does not support the video tag.
                                                        </video>
                                                    </body>
                                                    </html>
                                                    """



                                                    yield  [
                                                        {
                                                            "role": "assistant",
                                                            "content": "\n" + tool_message + "\n" + f"Reply:\n {content}" + "\n",
                                                            "tool_calls": [tool_name],
                                                            "metadata": {"title": f"🛠️ Used tool {tool_name}",
                                                                         "status": "done"}
                                                        },
                                                        {
                                                            "role": "assistant",
                                                            "content":  html_content
                                                        }


                                                    ]

                                                else:

                                                    history_openai_format.append(
                                                        {
                                                            "role": "tool",
                                                            "name": tool_name,
                                                            "content": content,
                                                            "tool_call_id": tool_call_id,
                                                        }
                                                    )

                                                    response = client.chat.completions.create(
                                                        model=model,
                                                        messages=history_openai_format,
                                                        temperature=temperature,
                                                        top_p=top_p,
                                                        stream=False
                                                    )

                                                    partial_mes = ""
                                                    #history.append({"role": "assistant", "content": "I called a tool",
                                                    #                     "metadata": {"title": f"🛠️ Used tool {"Test"}"}})
                                                    tokens = clean_response(response.choices[0].message.content).split(" ")
                                                    for i, token in enumerate(tokens):
                                                        partial_mes = partial_mes + token + " "
                                                        await asyncio.sleep(0.05)
                                                        yield partial_mes
                                                    #history.append({"role": "assistant", "content": clean_response(response.choices[0].message.content)})

                                                    yield [
                                                            {
                                                                "role": "assistant",
                                                                "content": "\n" + tool_message + "\n" + f"Reply:\n {content}" + "\n",
                                                                "tool_calls": [tool_name],
                                                                "metadata": {"title": f"🛠️ Used tool {tool_name}",
                                                                             "status": "done"}
                                                            },
                                                            {
                                                                "role": "assistant",
                                                                "content": clean_response(response.choices[0].message.content)
                                                            },


                                                        ]

                                                    break
                                            elif len(content)> 0 and content[0].get("type") == "image":
                                                try:
                                                    base64_string = content[0].get("data")
                                                    image_bytes = base64.b64decode(base64_string)

                                                    from PIL import Image as PilImage
                                                    pil_image = PilImage.open(io.BytesIO(image_bytes))

                                                    pil_image.save("test.jpg")
                                                    fullpath = Path("test.jpg").absolute()
                                                    print(fullpath)


                                                    yield [ {
                                                            "role": "assistant",
                                                            "content": "\n" + tool_message + "\n",
                                                            "tool_calls": [tool_name],
                                                            "metadata": {"title": f"🛠️ Used tool {tool_name}",
                                                                         "status": "done"}
                                                        },
                                                        {
                                                        "text": content[0].get("message"),
                                                        "files" : [fullpath]
                                                        }

                                                    ]

                                                except Exception as e:
                                                    print(e)
                                                    yield [
                                                        {
                                                            "role": "assistant",
                                                            "content": "\n" + tool_message + "\n" + f"Reply:\n {content}" + "\n",
                                                            "tool_calls": [tool_name],
                                                            "metadata": {"title": f"🛠️ Used tool {tool_name}",
                                                                         "status": "done"}
                                                        },
                                                        {
                                                            "role": "assistant",
                                                            "content": "Error receiving an image"
                                                        }
                                                    ]

                            else:
                                partial_mes = ""
                                tokens = clean_response(result.content).split(" ")
                                for i, token in enumerate(tokens):
                                    partial_mes = partial_mes + token + " "
                                    await asyncio.sleep(0.05)
                                    yield partial_mes


                        else:
                            # if at least one group is seleceted we use the api code to use the rag.

                            last_role = "system"
                            for entry in history:
                                if last_role != entry["role"]:
                                    history_openai_format.append({"role": entry["role"], "content": entry["content"]})
                                    last_role = entry["role"]

                            history_openai_format.append({"role": "user", "content": message})


                            config.set_value("groups", selected_groups)
                            pgpt = PrivateGPTAPI(config)
                            response = pgpt.respond_with_context(history_openai_format)
                            print(response)
                            user_input = ""
                            for message in history_openai_format:
                                user_input += json.dumps(message)

                            tokens = response["answer"].split(" ")
                            partial_message = ""

                            for i, token in enumerate(tokens):
                                partial_message = partial_message + token + " "
                                await asyncio.sleep(0.05)
                                yield partial_message

                            citations = []
                            for source in response["sources"]:
                                document_info = pgpt.get_document_info(source["documentId"])

                                citations.append(document_info["title"] +
                                                 #" Page: " + str(source["page"] + 1) +
                                                 "\n" + str(source["context"]).replace("#", "") + "\n\n")
                            result = [{"role": "assistant",
                                    "content": response["answer"]
                                    }
                                 ]
                            if len(citations) > 0:
                                result.append({
                                    "role": "user",
                                    "content": " "
                                })
                                result.append( {
                                       "role": "assistant",
                                       "content": "\n".join([f"• {cite}" for cite in citations]),
                                       "metadata": {"title": "📚 Citations",
                                                    "status": "done"}
                                   })

                            yield result

                    async def call_tool(mcp_server, tool_name, tool_args) -> json:
                        print("starting to call the tool")

                        tool_response = None
                        try:
                            server_params = await load_config(mcp_config, mcp_server)
                            try:
                                async with stdio_client(server_params) as (read_stream, write_stream):
                                    # Check if our current config has a tool.

                                    init_result = await send_initialize(read_stream, write_stream)
                                    # check we got a result
                                    if not init_result:
                                        print("Server initialization failed")
                                        return

                                    tools = await send_tools_list(read_stream, write_stream)
                                    stuff = json.dumps(tools)
                                    toolsobject = json.loads(stuff)["tools"]
                                    print(toolsobject)

                                    server_has_tool = False
                                    for tool in toolsobject:
                                        if tool["name"] == tool_name:
                                            print(f"Found tool {tool_name}.")
                                            server_has_tool = True
                                    if server_has_tool is False:
                                        print("no tool in server")
                                    else:
                                        print(tool_args)
                                        tool_response = await send_call_tool(
                                            tool_name, tool_args, read_stream, write_stream)
                                        raise BaseException()  # Until we find a better way to leave the async with

                            except:
                                raise BaseException()

                            raise BaseException()
                        except BaseException as e:
                            pass

                        return tool_response

                    def change_group(selected_item):
                        global selected_groups
                        selected_groups = selected_item

                    groupslist = gr.CheckboxGroup(choices=[], label="Groups")
                    groupslist.change(change_group, groupslist, None)

                    chatbot = gr.Chatbot(
                                        height="60vh",
                                        show_label=False,
                                        type="messages",
                                        avatar_images=(
                                              None,
                                              "./clients/Gradio/logos/Logo_dark.svg"
                                          ),
                                         )

                    gr.ChatInterface(predict,
                                     chatbot=chatbot,
                                     type="messages",
                                     textbox=gr.MultimodalTextbox(placeholder="Ask me a question", autofocus=True, container=True, scale=7, sources=["upload", "microphone"]),
                                     examples=["Hello", "Write a Python function that counts all numbers from 1 to 10",
                                               "How is the weather today in Munich?"],
                                     cache_examples=False


                    )
                    with gr.Row():
                        #prompt_dd = gr.Dropdown(choices=prompt_dict)
                        show_btn = gr.Button("Chat Settings")
                        show_btn2 = gr.Button("MCP Tools")
                with gr.Tab("Sources"):


                    def upload_file(file, sources):
                        global pgpt
                        global selected_groups
                        global default_groups
                        UPLOAD_FOLDER = "./data"

                        if len(selected_groups) == 0:
                            gr.Warning("Select at least one group, source was not added")
                            return sources

                        if not os.path.exists(UPLOAD_FOLDER):
                            os.mkdir(UPLOAD_FOLDER)
                        shutil.copy(file, UPLOAD_FOLDER)
                        print()
                        file_path = os.path.join(UPLOAD_FOLDER, file)


                        file_extension = os.path.splitext(file_path)[1]
                        print(f"File Extension: {file_extension}")


                        if file_extension == ".wav":
                            markdown = transcribe_whisper(file_path)

                        else:
                            content = ""
                            if file_extension == ".pdf":
                                content = LoadersFactory().pdf(file_path)
                            elif file_extension == ".csv":
                                content = LoadersFactory().csv(file_path)
                            elif file_extension == ".xlsx":
                                content = LoadersFactory().xlsx(file_path)
                            elif file_extension == ".md":
                                content = LoadersFactory().markdown(file_path)
                            # todo add more sources

                            markdown = LoadersFactory().convert_documents_to_markdown(content)
                            print(markdown)

                            
                        if os.path.exists(file_path):
                            os.remove(file_path)
                            print("File deleted successfully.")
                        else:
                            print("File does not exist.")


                        gr.Info("Processing, please wait...")


                        if pgpt is not None:
                            print(pgpt.base_url)
                            filepath = Path(file_path)
                            file_name = filepath.name
                            answer =  pgpt.add_source(markdown, selected_groups, file_name)
                            print(str(answer["documentId"]))
                            document_info = pgpt.get_document_info(answer["documentId"])
                            #gr.Info("Added:" + str(document_info))
                            dt = datetime.fromisoformat(document_info["createdAt"])
                            # Format to human-readable string
                            human_readable = dt.strftime("%A, %B %d, %Y  %I:%M %p %Z")

                            if len(sources) < MAX_ITEMS:
                                sources.append({"name": document_info["title"], "creator": document_info["creator"]["name"], "date": human_readable, "status": document_info["state"], "groups": document_info["groups"], "id": document_info["sourceId"]})
                            return sources




                    gr.Markdown("## 📚 PrivateGPT Sources")

                    groupslist2 = gr.CheckboxGroup(choices=[], label="Groups")
                    groupslist2.change(change_group, groupslist2, None)

                    upload_button = gr.UploadButton("➕ Add Source")



                    MAX_ITEMS = 200  # Max number of sources


                    sources_state = gr.State([])  # Start with an empty state

                    rows = []

                    # Create rows for each source
                    for i in range(MAX_ITEMS):
                        with gr.Row(visible=False) as row:  # Initially invisible
                            name = gr.Text(value="", label="Name", interactive=False, show_label=False)  # Read-only field
                            groups = gr.Text(value="", label="Groups", interactive=False,
                                              show_label=False)  # Read-only field
                            creator = gr.Text(value="", label="Creator", interactive=False,  show_label=False)  # Read-only field
                            date = gr.Text(value="", label="Date", interactive=False,  show_label=False)  # Read-only field
                            status = gr.Text(value="", label="Status",  show_label=False)  # Label for status
                            delete_btn = gr.Button("🗑️", scale=0)
                            rows.append((row, name, groups, creator, date, status, delete_btn))

                            # Delete handler
                            delete_btn.click(delete_source, inputs=[sources_state, gr.State(i)],
                                             outputs=[sources_state])

                    # Add source button
                    #upload_button.click(add_source, inputs=[sources_state], outputs=[sources_state])
                    upload_button.upload(upload_file, inputs=[upload_button, sources_state], outputs=sources_state)

                    # Auto re-render UI when sources change
                    sources_state.change(
                        render_ui,
                        inputs=[sources_state],
                        outputs=[comp for row in rows for comp in row]
                    )

                    # Fetch sources from "API" and initialize the UI with them
                    def load_sources():
                        #todo that's ugly.
                        while pgpt is None:
                            time.sleep(2)

                        if not pgpt.logged_in:
                            return


                        groups = pgpt.list_personal_groups()
                        print(groups)

                        sources = []
                        for group in groups:
                            group_sources = pgpt.get_sources_from_group(group)
                            for entry in group_sources:
                                sources.append(entry)

                        final = []
                        for source in sources:
                            print(source)
                            dt = datetime.fromisoformat(source["createdAt"])
                            # Format to human-readable string
                            human_readable = dt.strftime("%A, %B %d, %Y  %I:%M %p %Z")
                            final.append({"name": source["title"], "creator": source["creator"]["name"], "date": human_readable, "status": source["state"], "groups": source["groups"], "id": source["sourceId"]})

                        return final

                    #load_data()

                    # Trigger data fetching and rendering on app load

                    main.load(load_sources, outputs=[sources_state])

                with Modal(visible=False) as modalsettings:
                    global temperature
                    global top_p

                    def change_temperature(value):
                        global temperature
                        try:
                            val = float(value)
                            if isinstance(val, float):
                                if 0.0 <= val <= 1.0:
                                    temperature = float(value)
                                    success_message = gr.Success("New Temperature saved")
                        except:
                            error_message = gr.Warning("Not a valid entry")

                    def change_top_p(value):
                        global top_p
                        try:
                            val = float(value)
                            if isinstance(val, float):
                                if 0.0 <= val <= 1.0:
                                    top_p = float(value)
                                    success_message = gr.Success("New top_p value saved")
                        except:
                            error_message = gr.Warning("Not a valid entry")



                    temperature_input = gr.Textbox(label="Temperature", placeholder=str(temperature))
                    temperature_input.change(change_temperature, temperature_input)

                    top_p_input = gr.Textbox(label="Top_p", placeholder=str(top_p))
                    top_p_input.change(change_top_p, top_p_input)

                with Modal(visible=False) as modalmcp:
                    global mcp_servers

                    gr.Markdown("## 🛠️ Available MCP Tools")

                    for mcp_server in mcp_servers:
                        descr = ""
                        lines = 1
                        for tool in mcp_server[1]:
                            descr += "Tool: " + tool["function"]["name"] + "\n" + "Description: " + tool["function"]["description"] + "\n\n"
                            lines+=3


                        gr.Textbox(descr, show_label=True, label=mcp_server[2], lines=lines)


                show_btn.click(lambda: Modal(visible=True), None, modalsettings)
                show_btn2.click(lambda: Modal(visible=True), None, modalmcp)
                # todo add management of sources, users etc later.


            #with gr.Tab("Users"):
                # Initial data source
            #    gr.Markdown("Test function, not working.")
                # TODO Api.


        # Connect button to function and update components accordingly
        login_button.click(
            fn=login,
            inputs=[username_input, password_input, groupslist, groupslist2],
            outputs=[login_interface, dashboard_interface, login_message, groupslist, groupslist2]
        )

    demo.launch(favicon_path="./clients/Gradio/favicon.ico")


asyncio.run(create_interface())

```
Page 13/16FirstPrevNextLast