This is page 30 of 114. Use http://codebase.md/microsoft/semanticworkbench?lines=false&page={x} to view the full context.
# Directory Structure
```
├── .devcontainer
│ ├── .vscode
│ │ └── settings.json
│ ├── devcontainer.json
│ ├── OPTIMIZING_FOR_CODESPACES.md
│ ├── POST_SETUP_README.md
│ └── README.md
├── .dockerignore
├── .gitattributes
├── .github
│ ├── policheck.yml
│ └── workflows
│ ├── assistants-codespace-assistant.yml
│ ├── assistants-document-assistant.yml
│ ├── assistants-explorer-assistant.yml
│ ├── assistants-guided-conversation-assistant.yml
│ ├── assistants-knowledge-transfer-assistant.yml
│ ├── assistants-navigator-assistant.yml
│ ├── assistants-project-assistant.yml
│ ├── assistants-prospector-assistant.yml
│ ├── assistants-skill-assistant.yml
│ ├── libraries.yml
│ ├── mcp-server-giphy.yml
│ ├── mcp-server-memory-filesystem-edit.yml
│ ├── mcp-server-memory-user-bio.yml
│ ├── mcp-server-memory-whiteboard.yml
│ ├── mcp-server-open-deep-research-clone.yml
│ ├── mcp-server-web-research.yml
│ ├── workbench-app.yml
│ └── workbench-service.yml
├── .gitignore
├── .multi-root-tools
│ ├── Makefile
│ └── README.md
├── .vscode
│ ├── extensions.json
│ ├── launch.json
│ └── settings.json
├── ai_context
│ └── generated
│ ├── ASPIRE_ORCHESTRATOR.md
│ ├── ASSISTANT_CODESPACE.md
│ ├── ASSISTANT_DOCUMENT.md
│ ├── ASSISTANT_NAVIGATOR.md
│ ├── ASSISTANT_PROJECT.md
│ ├── ASSISTANT_PROSPECTOR.md
│ ├── ASSISTANTS_OTHER.md
│ ├── ASSISTANTS_OVERVIEW.md
│ ├── CONFIGURATION.md
│ ├── DOTNET_LIBRARIES.md
│ ├── EXAMPLES.md
│ ├── MCP_SERVERS.md
│ ├── PYTHON_LIBRARIES_AI_CLIENTS.md
│ ├── PYTHON_LIBRARIES_CORE.md
│ ├── PYTHON_LIBRARIES_EXTENSIONS.md
│ ├── PYTHON_LIBRARIES_SKILLS.md
│ ├── PYTHON_LIBRARIES_SPECIALIZED.md
│ ├── TOOLS.md
│ ├── WORKBENCH_FRONTEND.md
│ └── WORKBENCH_SERVICE.md
├── aspire-orchestrator
│ ├── .editorconfig
│ ├── Aspire.AppHost
│ │ ├── .gitignore
│ │ ├── appsettings.json
│ │ ├── Aspire.AppHost.csproj
│ │ ├── Program.cs
│ │ └── Properties
│ │ └── launchSettings.json
│ ├── Aspire.Extensions
│ │ ├── Aspire.Extensions.csproj
│ │ ├── Dashboard.cs
│ │ ├── DockerFileExtensions.cs
│ │ ├── PathNormalizer.cs
│ │ ├── UvAppHostingExtensions.cs
│ │ ├── UvAppResource.cs
│ │ ├── VirtualEnvironment.cs
│ │ └── WorkbenchServiceHostingExtensions.cs
│ ├── Aspire.ServiceDefaults
│ │ ├── Aspire.ServiceDefaults.csproj
│ │ └── Extensions.cs
│ ├── README.md
│ ├── run.sh
│ ├── SemanticWorkbench.Aspire.sln
│ └── SemanticWorkbench.Aspire.sln.DotSettings
├── assistants
│ ├── codespace-assistant
│ │ ├── .claude
│ │ │ └── settings.local.json
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── extensions.json
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── assets
│ │ │ │ ├── icon_context_transfer.svg
│ │ │ │ └── icon.svg
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── helpers.py
│ │ │ ├── response
│ │ │ │ ├── __init__.py
│ │ │ │ ├── completion_handler.py
│ │ │ │ ├── models.py
│ │ │ │ ├── request_builder.py
│ │ │ │ ├── response.py
│ │ │ │ ├── step_handler.py
│ │ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── abbreviations.py
│ │ │ │ ├── formatting_utils.py
│ │ │ │ ├── message_utils.py
│ │ │ │ └── openai_utils.py
│ │ │ ├── text_includes
│ │ │ │ ├── card_content_context_transfer.md
│ │ │ │ ├── card_content.md
│ │ │ │ ├── codespace_assistant_info.md
│ │ │ │ ├── context_transfer_assistant_info.md
│ │ │ │ ├── guardrails_prompt.txt
│ │ │ │ ├── guidance_prompt_context_transfer.txt
│ │ │ │ ├── guidance_prompt.txt
│ │ │ │ ├── instruction_prompt_context_transfer.txt
│ │ │ │ └── instruction_prompt.txt
│ │ │ └── whiteboard
│ │ │ ├── __init__.py
│ │ │ ├── _inspector.py
│ │ │ └── _whiteboard.py
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── document-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── assets
│ │ │ │ └── icon.svg
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── context_management
│ │ │ │ ├── __init__.py
│ │ │ │ └── inspector.py
│ │ │ ├── filesystem
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _convert.py
│ │ │ │ ├── _file_sources.py
│ │ │ │ ├── _filesystem.py
│ │ │ │ ├── _inspector.py
│ │ │ │ ├── _model.py
│ │ │ │ ├── _prompts.py
│ │ │ │ └── _tasks.py
│ │ │ ├── guidance
│ │ │ │ ├── __init__.py
│ │ │ │ ├── dynamic_ui_inspector.py
│ │ │ │ ├── guidance_config.py
│ │ │ │ ├── guidance_prompts.py
│ │ │ │ └── README.md
│ │ │ ├── response
│ │ │ │ ├── __init__.py
│ │ │ │ ├── completion_handler.py
│ │ │ │ ├── models.py
│ │ │ │ ├── prompts.py
│ │ │ │ ├── responder.py
│ │ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── formatting_utils.py
│ │ │ │ ├── message_utils.py
│ │ │ │ ├── openai_utils.py
│ │ │ │ ├── tokens_tiktoken.py
│ │ │ │ └── workbench_messages.py
│ │ │ ├── text_includes
│ │ │ │ └── document_assistant_info.md
│ │ │ ├── types.py
│ │ │ └── whiteboard
│ │ │ ├── __init__.py
│ │ │ ├── _inspector.py
│ │ │ └── _whiteboard.py
│ │ ├── assistant.code-workspace
│ │ ├── CLAUDE.md
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── __init__.py
│ │ │ ├── test_convert.py
│ │ │ └── test_data
│ │ │ ├── blank_image.png
│ │ │ ├── Formatting Test.docx
│ │ │ ├── sample_data.csv
│ │ │ ├── sample_data.xlsx
│ │ │ ├── sample_page.html
│ │ │ ├── sample_presentation.pptx
│ │ │ └── simple_pdf.pdf
│ │ └── uv.lock
│ ├── explorer-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── helpers.py
│ │ │ ├── response
│ │ │ │ ├── __init__.py
│ │ │ │ ├── model.py
│ │ │ │ ├── response_anthropic.py
│ │ │ │ ├── response_openai.py
│ │ │ │ └── response.py
│ │ │ └── text_includes
│ │ │ └── guardrails_prompt.txt
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── guided-conversation-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── agents
│ │ │ │ ├── guided_conversation
│ │ │ │ │ ├── config.py
│ │ │ │ │ ├── definition.py
│ │ │ │ │ └── definitions
│ │ │ │ │ ├── er_triage.py
│ │ │ │ │ ├── interview.py
│ │ │ │ │ ├── patient_intake.py
│ │ │ │ │ └── poem_feedback.py
│ │ │ │ └── guided_conversation_agent.py
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ └── text_includes
│ │ │ └── guardrails_prompt.txt
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── knowledge-transfer-assistant
│ │ ├── .claude
│ │ │ └── settings.local.json
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── agentic
│ │ │ │ ├── __init__.py
│ │ │ │ ├── analysis.py
│ │ │ │ ├── coordinator_support.py
│ │ │ │ └── team_welcome.py
│ │ │ ├── assets
│ │ │ │ ├── icon-knowledge-transfer.svg
│ │ │ │ └── icon.svg
│ │ │ ├── assistant.py
│ │ │ ├── common.py
│ │ │ ├── config.py
│ │ │ ├── conversation_clients.py
│ │ │ ├── conversation_share_link.py
│ │ │ ├── data.py
│ │ │ ├── domain
│ │ │ │ ├── __init__.py
│ │ │ │ ├── audience_manager.py
│ │ │ │ ├── information_request_manager.py
│ │ │ │ ├── knowledge_brief_manager.py
│ │ │ │ ├── knowledge_digest_manager.py
│ │ │ │ ├── learning_objectives_manager.py
│ │ │ │ └── share_manager.py
│ │ │ ├── files.py
│ │ │ ├── logging.py
│ │ │ ├── notifications.py
│ │ │ ├── respond.py
│ │ │ ├── storage_models.py
│ │ │ ├── storage.py
│ │ │ ├── string_utils.py
│ │ │ ├── text_includes
│ │ │ │ ├── assistant_info.md
│ │ │ │ ├── card_content.md
│ │ │ │ ├── coordinator_instructions.txt
│ │ │ │ ├── coordinator_role.txt
│ │ │ │ ├── knowledge_digest_instructions.txt
│ │ │ │ ├── knowledge_digest_prompt.txt
│ │ │ │ ├── share_information_request_detection.txt
│ │ │ │ ├── team_instructions.txt
│ │ │ │ ├── team_role.txt
│ │ │ │ └── welcome_message_generation.txt
│ │ │ ├── tools
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base.py
│ │ │ │ ├── information_requests.py
│ │ │ │ ├── learning_objectives.py
│ │ │ │ ├── learning_outcomes.py
│ │ │ │ ├── progress_tracking.py
│ │ │ │ └── share_setup.py
│ │ │ ├── ui_tabs
│ │ │ │ ├── __init__.py
│ │ │ │ ├── brief.py
│ │ │ │ ├── common.py
│ │ │ │ ├── debug.py
│ │ │ │ ├── learning.py
│ │ │ │ └── sharing.py
│ │ │ └── utils.py
│ │ ├── CLAUDE.md
│ │ ├── docs
│ │ │ ├── design
│ │ │ │ ├── actions.md
│ │ │ │ └── inference.md
│ │ │ ├── DEV_GUIDE.md
│ │ │ ├── how-kta-works.md
│ │ │ ├── JTBD.md
│ │ │ ├── knowledge-transfer-goals.md
│ │ │ ├── learning_assistance.md
│ │ │ ├── notable_claude_conversations
│ │ │ │ ├── clarifying_quad_modal_design.md
│ │ │ │ ├── CLAUDE_PROMPTS.md
│ │ │ │ ├── transfer_state.md
│ │ │ │ └── trying_the_context_agent.md
│ │ │ └── opportunities-of-knowledge-transfer.md
│ │ ├── knowledge-transfer-assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── __init__.py
│ │ │ ├── test_artifact_loading.py
│ │ │ ├── test_inspector.py
│ │ │ ├── test_share_manager.py
│ │ │ ├── test_share_storage.py
│ │ │ ├── test_share_tools.py
│ │ │ └── test_team_mode.py
│ │ └── uv.lock
│ ├── Makefile
│ ├── navigator-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── assets
│ │ │ │ ├── card_content.md
│ │ │ │ └── icon.svg
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── helpers.py
│ │ │ ├── response
│ │ │ │ ├── __init__.py
│ │ │ │ ├── completion_handler.py
│ │ │ │ ├── completion_requestor.py
│ │ │ │ ├── local_tool
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── add_assistant_to_conversation.py
│ │ │ │ │ ├── list_assistant_services.py
│ │ │ │ │ └── model.py
│ │ │ │ ├── models.py
│ │ │ │ ├── prompt.py
│ │ │ │ ├── request_builder.py
│ │ │ │ ├── response.py
│ │ │ │ ├── step_handler.py
│ │ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── formatting_utils.py
│ │ │ │ ├── message_utils.py
│ │ │ │ ├── openai_utils.py
│ │ │ │ └── tools.py
│ │ │ ├── text_includes
│ │ │ │ ├── guardrails_prompt.md
│ │ │ │ ├── guidance_prompt.md
│ │ │ │ ├── instruction_prompt.md
│ │ │ │ ├── navigator_assistant_info.md
│ │ │ │ └── semantic_workbench_features.md
│ │ │ └── whiteboard
│ │ │ ├── __init__.py
│ │ │ ├── _inspector.py
│ │ │ └── _whiteboard.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── project-assistant
│ │ ├── .cspell
│ │ │ └── custom-dictionary-workspace.txt
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── agentic
│ │ │ │ ├── __init__.py
│ │ │ │ ├── act.py
│ │ │ │ ├── coordinator_next_action.py
│ │ │ │ ├── create_invitation.py
│ │ │ │ ├── detect_audience_and_takeaways.py
│ │ │ │ ├── detect_coordinator_actions.py
│ │ │ │ ├── detect_information_request_needs.py
│ │ │ │ ├── detect_knowledge_package_gaps.py
│ │ │ │ ├── focus.py
│ │ │ │ ├── respond.py
│ │ │ │ ├── team_welcome.py
│ │ │ │ └── update_digest.py
│ │ │ ├── assets
│ │ │ │ ├── icon-knowledge-transfer.svg
│ │ │ │ └── icon.svg
│ │ │ ├── assistant.py
│ │ │ ├── common.py
│ │ │ ├── config.py
│ │ │ ├── conversation_clients.py
│ │ │ ├── data.py
│ │ │ ├── domain
│ │ │ │ ├── __init__.py
│ │ │ │ ├── audience_manager.py
│ │ │ │ ├── conversation_preferences_manager.py
│ │ │ │ ├── information_request_manager.py
│ │ │ │ ├── knowledge_brief_manager.py
│ │ │ │ ├── knowledge_digest_manager.py
│ │ │ │ ├── learning_objectives_manager.py
│ │ │ │ ├── share_manager.py
│ │ │ │ ├── tasks_manager.py
│ │ │ │ └── transfer_manager.py
│ │ │ ├── errors.py
│ │ │ ├── files.py
│ │ │ ├── logging.py
│ │ │ ├── notifications.py
│ │ │ ├── prompt_utils.py
│ │ │ ├── storage.py
│ │ │ ├── string_utils.py
│ │ │ ├── text_includes
│ │ │ │ ├── actor_instructions.md
│ │ │ │ ├── assistant_info.md
│ │ │ │ ├── card_content.md
│ │ │ │ ├── coordinator_instructions copy.md
│ │ │ │ ├── coordinator_instructions.md
│ │ │ │ ├── create_invitation.md
│ │ │ │ ├── detect_audience.md
│ │ │ │ ├── detect_coordinator_actions.md
│ │ │ │ ├── detect_information_request_needs.md
│ │ │ │ ├── detect_knowledge_package_gaps.md
│ │ │ │ ├── focus.md
│ │ │ │ ├── knowledge_digest_instructions.txt
│ │ │ │ ├── team_instructions.txt
│ │ │ │ ├── to_do.md
│ │ │ │ ├── update_knowledge_brief.md
│ │ │ │ ├── update_knowledge_digest.md
│ │ │ │ └── welcome_message_generation.txt
│ │ │ ├── tools
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base.py
│ │ │ │ ├── conversation_preferences.py
│ │ │ │ ├── information_requests.py
│ │ │ │ ├── learning_objectives.py
│ │ │ │ ├── learning_outcomes.py
│ │ │ │ ├── progress_tracking.py
│ │ │ │ ├── share_setup.py
│ │ │ │ ├── system_reminders.py
│ │ │ │ ├── tasks.py
│ │ │ │ └── todo.py
│ │ │ ├── ui_tabs
│ │ │ │ ├── __init__.py
│ │ │ │ ├── brief.py
│ │ │ │ ├── common.py
│ │ │ │ ├── debug.py
│ │ │ │ ├── learning.py
│ │ │ │ └── sharing.py
│ │ │ └── utils.py
│ │ ├── CLAUDE.md
│ │ ├── docs
│ │ │ ├── design
│ │ │ │ ├── actions.md
│ │ │ │ ├── control_options.md
│ │ │ │ ├── design.md
│ │ │ │ ├── inference.md
│ │ │ │ └── PXL_20250814_190140267.jpg
│ │ │ ├── DEV_GUIDE.md
│ │ │ ├── how-kta-works.md
│ │ │ ├── JTBD.md
│ │ │ ├── knowledge-transfer-goals.md
│ │ │ ├── learning_assistance.md
│ │ │ ├── notable_claude_conversations
│ │ │ │ ├── clarifying_quad_modal_design.md
│ │ │ │ ├── CLAUDE_PROMPTS.md
│ │ │ │ ├── transfer_state.md
│ │ │ │ └── trying_the_context_agent.md
│ │ │ └── opportunities-of-knowledge-transfer.md
│ │ ├── knowledge-transfer-assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── __init__.py
│ │ │ ├── test_artifact_loading.py
│ │ │ ├── test_inspector.py
│ │ │ ├── test_share_manager.py
│ │ │ ├── test_share_storage.py
│ │ │ └── test_team_mode.py
│ │ └── uv.lock
│ ├── prospector-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── agents
│ │ │ │ ├── artifact_agent.py
│ │ │ │ ├── document
│ │ │ │ │ ├── config.py
│ │ │ │ │ ├── gc_draft_content_feedback_config.py
│ │ │ │ │ ├── gc_draft_outline_feedback_config.py
│ │ │ │ │ ├── guided_conversation.py
│ │ │ │ │ └── state.py
│ │ │ │ └── document_agent.py
│ │ │ ├── artifact_creation_extension
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _llm.py
│ │ │ │ ├── config.py
│ │ │ │ ├── document.py
│ │ │ │ ├── extension.py
│ │ │ │ ├── store.py
│ │ │ │ ├── test
│ │ │ │ │ ├── conftest.py
│ │ │ │ │ ├── evaluation.py
│ │ │ │ │ ├── test_completion_with_tools.py
│ │ │ │ │ └── test_extension.py
│ │ │ │ └── tools.py
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── form_fill_extension
│ │ │ │ ├── __init__.py
│ │ │ │ ├── config.py
│ │ │ │ ├── extension.py
│ │ │ │ ├── inspector.py
│ │ │ │ ├── state.py
│ │ │ │ └── steps
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _guided_conversation.py
│ │ │ │ ├── _llm.py
│ │ │ │ ├── acquire_form_step.py
│ │ │ │ ├── extract_form_fields_step.py
│ │ │ │ ├── fill_form_step.py
│ │ │ │ └── types.py
│ │ │ ├── helpers.py
│ │ │ ├── legacy.py
│ │ │ └── text_includes
│ │ │ ├── artifact_agent_enabled.md
│ │ │ ├── guardrails_prompt.txt
│ │ │ ├── guided_conversation_agent_enabled.md
│ │ │ └── skills_agent_enabled.md
│ │ ├── assistant.code-workspace
│ │ ├── gc_learnings
│ │ │ ├── gc_learnings.md
│ │ │ └── images
│ │ │ ├── gc_conversation_plan_fcn.png
│ │ │ ├── gc_conversation_plan_template.png
│ │ │ ├── gc_execute_plan_callstack.png
│ │ │ ├── gc_functions.png
│ │ │ ├── gc_generate_plan_callstack.png
│ │ │ ├── gc_get_resource_instructions.png
│ │ │ ├── gc_get_termination_instructions.png
│ │ │ ├── gc_kernel_arguments.png
│ │ │ ├── gc_plan_calls.png
│ │ │ ├── gc_termination_instructions.png
│ │ │ ├── sk_get_chat_message_contents.png
│ │ │ ├── sk_inner_get_chat_message_contents.png
│ │ │ ├── sk_send_request_prep.png
│ │ │ └── sk_send_request.png
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ └── skill-assistant
│ ├── .env.example
│ ├── .vscode
│ │ ├── launch.json
│ │ └── settings.json
│ ├── assistant
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── logging.py
│ │ ├── skill_assistant.py
│ │ ├── skill_engine_registry.py
│ │ ├── skill_event_mapper.py
│ │ ├── text_includes
│ │ │ └── guardrails_prompt.txt
│ │ └── workbench_helpers.py
│ ├── assistant.code-workspace
│ ├── Makefile
│ ├── pyproject.toml
│ ├── README.md
│ ├── tests
│ │ └── test_setup.py
│ └── uv.lock
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docs
│ ├── .vscode
│ │ └── settings.json
│ ├── ASSISTANT_CONFIG.md
│ ├── ASSISTANT_DEVELOPMENT_GUIDE.md
│ ├── CUSTOM_APP_REGISTRATION.md
│ ├── HOSTED_ASSISTANT_WITH_LOCAL_MCP_SERVERS.md
│ ├── images
│ │ ├── architecture-animation.gif
│ │ ├── configure_assistant.png
│ │ ├── conversation_canvas_open.png
│ │ ├── conversation_duplicate.png
│ │ ├── conversation_export.png
│ │ ├── conversation_share_dialog.png
│ │ ├── conversation_share_link.png
│ │ ├── dashboard_configured_view.png
│ │ ├── dashboard_view.png
│ │ ├── license_agreement.png
│ │ ├── message_bar.png
│ │ ├── message_inspection.png
│ │ ├── message_link.png
│ │ ├── new_prospector_assistant_dialog.png
│ │ ├── open_conversation_canvas.png
│ │ ├── prospector_example.png
│ │ ├── readme1.png
│ │ ├── readme2.png
│ │ ├── readme3.png
│ │ ├── rewind.png
│ │ ├── signin_page.png
│ │ └── splash_screen.png
│ ├── LOCAL_ASSISTANT_WITH_REMOTE_WORKBENCH.md
│ ├── SETUP_DEV_ENVIRONMENT.md
│ └── WORKBENCH_APP.md
├── examples
│ ├── dotnet
│ │ ├── .editorconfig
│ │ ├── dotnet-01-echo-bot
│ │ │ ├── appsettings.json
│ │ │ ├── dotnet-01-echo-bot.csproj
│ │ │ ├── MyAgent.cs
│ │ │ ├── MyAgentConfig.cs
│ │ │ ├── MyWorkbenchConnector.cs
│ │ │ ├── Program.cs
│ │ │ └── README.md
│ │ ├── dotnet-02-message-types-demo
│ │ │ ├── appsettings.json
│ │ │ ├── ConnectorExtensions.cs
│ │ │ ├── docs
│ │ │ │ ├── abc.png
│ │ │ │ ├── code.png
│ │ │ │ ├── config.png
│ │ │ │ ├── echo.png
│ │ │ │ ├── markdown.png
│ │ │ │ ├── mermaid.png
│ │ │ │ ├── reverse.png
│ │ │ │ └── safety-check.png
│ │ │ ├── dotnet-02-message-types-demo.csproj
│ │ │ ├── MyAgent.cs
│ │ │ ├── MyAgentConfig.cs
│ │ │ ├── MyWorkbenchConnector.cs
│ │ │ ├── Program.cs
│ │ │ └── README.md
│ │ └── dotnet-03-simple-chatbot
│ │ ├── appsettings.json
│ │ ├── ConnectorExtensions.cs
│ │ ├── dotnet-03-simple-chatbot.csproj
│ │ ├── MyAgent.cs
│ │ ├── MyAgentConfig.cs
│ │ ├── MyWorkbenchConnector.cs
│ │ ├── Program.cs
│ │ └── README.md
│ ├── Makefile
│ └── python
│ ├── python-01-echo-bot
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── chat.py
│ │ │ └── config.py
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── python-02-simple-chatbot
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ └── text_includes
│ │ │ └── guardrails_prompt.txt
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ └── python-03-multimodel-chatbot
│ ├── .env.example
│ ├── .vscode
│ │ ├── launch.json
│ │ └── settings.json
│ ├── assistant
│ │ ├── __init__.py
│ │ ├── chat.py
│ │ ├── config.py
│ │ ├── model_adapters.py
│ │ └── text_includes
│ │ └── guardrails_prompt.txt
│ ├── assistant.code-workspace
│ ├── Makefile
│ ├── pyproject.toml
│ ├── README.md
│ └── uv.lock
├── KNOWN_ISSUES.md
├── libraries
│ ├── dotnet
│ │ ├── .editorconfig
│ │ ├── pack.sh
│ │ ├── README.md
│ │ ├── SemanticWorkbench.sln
│ │ ├── SemanticWorkbench.sln.DotSettings
│ │ └── WorkbenchConnector
│ │ ├── AgentBase.cs
│ │ ├── AgentConfig
│ │ │ ├── AgentConfigBase.cs
│ │ │ ├── AgentConfigPropertyAttribute.cs
│ │ │ └── ConfigUtils.cs
│ │ ├── Constants.cs
│ │ ├── IAgentBase.cs
│ │ ├── icon.png
│ │ ├── Models
│ │ │ ├── Command.cs
│ │ │ ├── Conversation.cs
│ │ │ ├── ConversationEvent.cs
│ │ │ ├── DebugInfo.cs
│ │ │ ├── Insight.cs
│ │ │ ├── Message.cs
│ │ │ ├── MessageMetadata.cs
│ │ │ ├── Participant.cs
│ │ │ ├── Sender.cs
│ │ │ └── ServiceInfo.cs
│ │ ├── Storage
│ │ │ ├── AgentInfo.cs
│ │ │ ├── AgentServiceStorage.cs
│ │ │ └── IAgentServiceStorage.cs
│ │ ├── StringLoggingExtensions.cs
│ │ ├── Webservice.cs
│ │ ├── WorkbenchConfig.cs
│ │ ├── WorkbenchConnector.cs
│ │ └── WorkbenchConnector.csproj
│ ├── Makefile
│ └── python
│ ├── anthropic-client
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── anthropic_client
│ │ │ ├── __init__.py
│ │ │ ├── client.py
│ │ │ ├── config.py
│ │ │ └── messages.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── assistant-data-gen
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant_data_gen
│ │ │ ├── __init__.py
│ │ │ ├── assistant_api.py
│ │ │ ├── config.py
│ │ │ ├── gce
│ │ │ │ ├── __init__.py
│ │ │ │ ├── gce_agent.py
│ │ │ │ └── prompts.py
│ │ │ └── pydantic_ai_utils.py
│ │ ├── configs
│ │ │ └── document_assistant_example_config.yaml
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── scripts
│ │ │ ├── gce_simulation.py
│ │ │ └── generate_scenario.py
│ │ └── uv.lock
│ ├── assistant-drive
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── extensions.json
│ │ │ └── settings.json
│ │ ├── assistant_drive
│ │ │ ├── __init__.py
│ │ │ ├── drive.py
│ │ │ └── tests
│ │ │ └── test_basic.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── pytest.ini
│ │ ├── README.md
│ │ ├── usage.ipynb
│ │ └── uv.lock
│ ├── assistant-extensions
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── assistant_extensions
│ │ │ ├── __init__.py
│ │ │ ├── ai_clients
│ │ │ │ └── config.py
│ │ │ ├── artifacts
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _artifacts.py
│ │ │ │ ├── _inspector.py
│ │ │ │ └── _model.py
│ │ │ ├── attachments
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _attachments.py
│ │ │ │ ├── _convert.py
│ │ │ │ ├── _model.py
│ │ │ │ ├── _shared.py
│ │ │ │ └── _summarizer.py
│ │ │ ├── chat_context_toolkit
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _config.py
│ │ │ │ ├── archive
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── _archive.py
│ │ │ │ │ └── _summarizer.py
│ │ │ │ ├── message_history
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── _history.py
│ │ │ │ │ └── _message.py
│ │ │ │ └── virtual_filesystem
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _archive_file_source.py
│ │ │ │ └── _attachments_file_source.py
│ │ │ ├── dashboard_card
│ │ │ │ ├── __init__.py
│ │ │ │ └── _dashboard_card.py
│ │ │ ├── document_editor
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _extension.py
│ │ │ │ ├── _inspector.py
│ │ │ │ └── _model.py
│ │ │ ├── mcp
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _assistant_file_resource_handler.py
│ │ │ │ ├── _client_utils.py
│ │ │ │ ├── _devtunnel.py
│ │ │ │ ├── _model.py
│ │ │ │ ├── _openai_utils.py
│ │ │ │ ├── _sampling_handler.py
│ │ │ │ ├── _tool_utils.py
│ │ │ │ └── _workbench_file_resource_handler.py
│ │ │ ├── navigator
│ │ │ │ ├── __init__.py
│ │ │ │ └── _navigator.py
│ │ │ └── workflows
│ │ │ ├── __init__.py
│ │ │ ├── _model.py
│ │ │ ├── _workflows.py
│ │ │ └── runners
│ │ │ └── _user_proxy.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── test
│ │ │ └── attachments
│ │ │ └── test_attachments.py
│ │ └── uv.lock
│ ├── chat-context-toolkit
│ │ ├── .claude
│ │ │ └── settings.local.json
│ │ ├── .env.sample
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── assets
│ │ │ ├── archive_v1.png
│ │ │ ├── history_v1.png
│ │ │ └── vfs_v1.png
│ │ ├── chat_context_toolkit
│ │ │ ├── __init__.py
│ │ │ ├── archive
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _archive_reader.py
│ │ │ │ ├── _archive_task_queue.py
│ │ │ │ ├── _state.py
│ │ │ │ ├── _types.py
│ │ │ │ └── summarization
│ │ │ │ ├── __init__.py
│ │ │ │ └── _summarizer.py
│ │ │ ├── history
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _budget.py
│ │ │ │ ├── _decorators.py
│ │ │ │ ├── _history.py
│ │ │ │ ├── _prioritize.py
│ │ │ │ ├── _types.py
│ │ │ │ └── tool_abbreviations
│ │ │ │ ├── __init__.py
│ │ │ │ └── _tool_abbreviations.py
│ │ │ └── virtual_filesystem
│ │ │ ├── __init__.py
│ │ │ ├── _types.py
│ │ │ ├── _virtual_filesystem.py
│ │ │ ├── README.md
│ │ │ └── tools
│ │ │ ├── __init__.py
│ │ │ ├── _ls_tool.py
│ │ │ ├── _tools.py
│ │ │ └── _view_tool.py
│ │ ├── CLAUDE.md
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── test
│ │ │ ├── archive
│ │ │ │ └── test_archive_reader.py
│ │ │ ├── history
│ │ │ │ ├── test_abbreviate_messages.py
│ │ │ │ ├── test_history.py
│ │ │ │ ├── test_pair_and_order_tool_messages.py
│ │ │ │ ├── test_prioritize.py
│ │ │ │ └── test_truncate_messages.py
│ │ │ └── virtual_filesystem
│ │ │ ├── test_virtual_filesystem.py
│ │ │ └── tools
│ │ │ ├── test_ls_tool.py
│ │ │ ├── test_tools.py
│ │ │ └── test_view_tool.py
│ │ └── uv.lock
│ ├── content-safety
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── content_safety
│ │ │ ├── __init__.py
│ │ │ ├── evaluators
│ │ │ │ ├── __init__.py
│ │ │ │ ├── azure_content_safety
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── config.py
│ │ │ │ │ └── evaluator.py
│ │ │ │ ├── config.py
│ │ │ │ ├── evaluator.py
│ │ │ │ └── openai_moderations
│ │ │ │ ├── __init__.py
│ │ │ │ ├── config.py
│ │ │ │ └── evaluator.py
│ │ │ └── README.md
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── events
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── events
│ │ │ ├── __init__.py
│ │ │ └── events.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── guided-conversation
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── guided_conversation
│ │ │ ├── __init__.py
│ │ │ ├── functions
│ │ │ │ ├── __init__.py
│ │ │ │ ├── conversation_plan.py
│ │ │ │ ├── execution.py
│ │ │ │ └── final_update_plan.py
│ │ │ ├── guided_conversation_agent.py
│ │ │ ├── plugins
│ │ │ │ ├── __init__.py
│ │ │ │ ├── agenda.py
│ │ │ │ └── artifact.py
│ │ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── base_model_llm.py
│ │ │ ├── conversation_helpers.py
│ │ │ ├── openai_tool_calling.py
│ │ │ ├── plugin_helpers.py
│ │ │ └── resources.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── llm-client
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── llm_client
│ │ │ ├── __init__.py
│ │ │ └── model.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── Makefile
│ ├── mcp-extensions
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_extensions
│ │ │ ├── __init__.py
│ │ │ ├── _client_session.py
│ │ │ ├── _model.py
│ │ │ ├── _sampling.py
│ │ │ ├── _server_extensions.py
│ │ │ ├── _tool_utils.py
│ │ │ ├── llm
│ │ │ │ ├── __init__.py
│ │ │ │ ├── chat_completion.py
│ │ │ │ ├── helpers.py
│ │ │ │ ├── llm_types.py
│ │ │ │ ├── mcp_chat_completion.py
│ │ │ │ └── openai_chat_completion.py
│ │ │ └── server
│ │ │ ├── __init__.py
│ │ │ └── storage.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ └── test_tool_utils.py
│ │ └── uv.lock
│ ├── mcp-tunnel
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_tunnel
│ │ │ ├── __init__.py
│ │ │ ├── _devtunnel.py
│ │ │ ├── _dir.py
│ │ │ └── _main.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── openai-client
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── openai_client
│ │ │ ├── __init__.py
│ │ │ ├── chat_driver
│ │ │ │ ├── __init__.py
│ │ │ │ ├── chat_driver.ipynb
│ │ │ │ ├── chat_driver.py
│ │ │ │ ├── message_history_providers
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── in_memory_message_history_provider.py
│ │ │ │ │ ├── local_message_history_provider.py
│ │ │ │ │ ├── message_history_provider.py
│ │ │ │ │ └── tests
│ │ │ │ │ └── formatted_instructions_test.py
│ │ │ │ └── README.md
│ │ │ ├── client.py
│ │ │ ├── completion.py
│ │ │ ├── config.py
│ │ │ ├── errors.py
│ │ │ ├── logging.py
│ │ │ ├── messages.py
│ │ │ ├── tokens.py
│ │ │ └── tools.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── test_command_parsing.py
│ │ │ ├── test_formatted_messages.py
│ │ │ ├── test_messages.py
│ │ │ └── test_tokens.py
│ │ └── uv.lock
│ ├── semantic-workbench-api-model
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── semantic_workbench_api_model
│ │ │ ├── __init__.py
│ │ │ ├── assistant_model.py
│ │ │ ├── assistant_service_client.py
│ │ │ ├── workbench_model.py
│ │ │ └── workbench_service_client.py
│ │ └── uv.lock
│ ├── semantic-workbench-assistant
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── semantic_workbench_assistant
│ │ │ ├── __init__.py
│ │ │ ├── assistant_app
│ │ │ │ ├── __init__.py
│ │ │ │ ├── assistant.py
│ │ │ │ ├── config.py
│ │ │ │ ├── content_safety.py
│ │ │ │ ├── context.py
│ │ │ │ ├── error.py
│ │ │ │ ├── export_import.py
│ │ │ │ ├── protocol.py
│ │ │ │ └── service.py
│ │ │ ├── assistant_service.py
│ │ │ ├── auth.py
│ │ │ ├── canonical.py
│ │ │ ├── command.py
│ │ │ ├── config.py
│ │ │ ├── logging_config.py
│ │ │ ├── settings.py
│ │ │ ├── start.py
│ │ │ └── storage.py
│ │ ├── tests
│ │ │ ├── conftest.py
│ │ │ ├── test_assistant_app.py
│ │ │ ├── test_canonical.py
│ │ │ ├── test_config.py
│ │ │ └── test_storage.py
│ │ └── uv.lock
│ └── skills
│ ├── .vscode
│ │ └── settings.json
│ ├── Makefile
│ ├── README.md
│ └── skill-library
│ ├── .vscode
│ │ └── settings.json
│ ├── docs
│ │ └── vs-recipe-tool.md
│ ├── Makefile
│ ├── pyproject.toml
│ ├── README.md
│ ├── skill_library
│ │ ├── __init__.py
│ │ ├── chat_driver_helpers.py
│ │ ├── cli
│ │ │ ├── azure_openai.py
│ │ │ ├── conversation_history.py
│ │ │ ├── README.md
│ │ │ ├── run_routine.py
│ │ │ ├── settings.py
│ │ │ └── skill_logger.py
│ │ ├── engine.py
│ │ ├── llm_info.txt
│ │ ├── logging.py
│ │ ├── README.md
│ │ ├── routine_stack.py
│ │ ├── skill.py
│ │ ├── skills
│ │ │ ├── common
│ │ │ │ ├── __init__.py
│ │ │ │ ├── common_skill.py
│ │ │ │ └── routines
│ │ │ │ ├── bing_search.py
│ │ │ │ ├── consolidate.py
│ │ │ │ ├── echo.py
│ │ │ │ ├── gather_context.py
│ │ │ │ ├── get_content_from_url.py
│ │ │ │ ├── gpt_complete.py
│ │ │ │ ├── select_user_intent.py
│ │ │ │ └── summarize.py
│ │ │ ├── eval
│ │ │ │ ├── __init__.py
│ │ │ │ ├── eval_skill.py
│ │ │ │ └── routines
│ │ │ │ └── eval.py
│ │ │ ├── fabric
│ │ │ │ ├── __init__.py
│ │ │ │ ├── fabric_skill.py
│ │ │ │ ├── patterns
│ │ │ │ │ ├── agility_story
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── ai
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_answers
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_candidates
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_cfp_submission
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_claims
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_comments
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_debate
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_email_headers
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_incident
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_interviewer_techniques
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_logs
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_malware
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_military_strategy
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_mistakes
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_paper
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_patent
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_personality
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_presentation
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_product_feedback
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_proposition
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_prose
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_prose_json
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_prose_pinker
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_risk
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_sales_call
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_spiritual_text
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_tech_impact
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_threat_report
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_threat_report_cmds
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_threat_report_trends
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── answer_interview_question
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── ask_secure_by_design_questions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── ask_uncle_duke
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── capture_thinkers_work
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── check_agreement
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── clean_text
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── coding_master
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── compare_and_contrast
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── convert_to_markdown
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_5_sentence_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_academic_paper
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_ai_jobs_analysis
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_aphorisms
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_art_prompt
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_better_frame
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_coding_project
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_command
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_cyber_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_design_document
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_diy
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_formal_email
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_git_diff_commit
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_graph_from_input
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_hormozi_offer
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_idea_compass
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_investigation_visualization
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_keynote
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_logo
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_markmap_visualization
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_mermaid_visualization
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_mermaid_visualization_for_github
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_micro_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_network_threat_landscape
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_newsletter_entry
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_npc
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_pattern
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_prd
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_prediction_block
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_quiz
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_reading_plan
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_recursive_outline
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_report_finding
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_rpg_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_security_update
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_show_intro
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_sigma_rules
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_story_explanation
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_stride_threat_model
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_tags
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_threat_scenarios
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_ttrc_graph
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_ttrc_narrative
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_upgrade_pack
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_user_story
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_video_chapters
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_visualization
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── dialog_with_socrates
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── enrich_blog_post
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── explain_code
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── explain_docs
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── explain_math
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── explain_project
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── explain_terms
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── export_data_as_csv
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_algorithm_update_recommendations
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_article_wisdom
│ │ │ │ │ │ ├── dmiessler
│ │ │ │ │ │ │ └── extract_wisdom-1.0.0
│ │ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ │ └── user.md
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_book_ideas
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_book_recommendations
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_business_ideas
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_controversial_ideas
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_core_message
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_ctf_writeup
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_domains
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_extraordinary_claims
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_ideas
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_insights
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_insights_dm
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_instructions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_jokes
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_latest_video
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_main_idea
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_most_redeeming_thing
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_patterns
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_poc
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_predictions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_primary_problem
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_primary_solution
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_product_features
│ │ │ │ │ │ ├── dmiessler
│ │ │ │ │ │ │ └── extract_wisdom-1.0.0
│ │ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ │ └── user.md
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_questions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_recipe
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_recommendations
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_references
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_skills
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_song_meaning
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_sponsors
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_videoid
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_wisdom
│ │ │ │ │ │ ├── dmiessler
│ │ │ │ │ │ │ └── extract_wisdom-1.0.0
│ │ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ │ └── user.md
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_wisdom_agents
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_wisdom_dm
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_wisdom_nometa
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── find_hidden_message
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── find_logical_fallacies
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── get_wow_per_minute
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── get_youtube_rss
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── humanize
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_dsrp_distinctions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_dsrp_perspectives
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_dsrp_relationships
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_dsrp_systems
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_job_stories
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── improve_academic_writing
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── improve_prompt
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── improve_report_finding
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── improve_writing
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── judge_output
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── label_and_rate
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── loaded
│ │ │ │ │ ├── md_callout
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── official_pattern_template
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── pattern_explanations.md
│ │ │ │ │ ├── prepare_7s_strategy
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── provide_guidance
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── rate_ai_response
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── rate_ai_result
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── rate_content
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── rate_value
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── raw_query
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── raycast
│ │ │ │ │ │ ├── capture_thinkers_work
│ │ │ │ │ │ ├── create_story_explanation
│ │ │ │ │ │ ├── extract_primary_problem
│ │ │ │ │ │ ├── extract_wisdom
│ │ │ │ │ │ └── yt
│ │ │ │ │ ├── recommend_artists
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── recommend_pipeline_upgrades
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── recommend_talkpanel_topics
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── refine_design_document
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── review_design
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── sanitize_broken_html_to_markdown
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── show_fabric_options_markmap
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── solve_with_cot
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── stringify
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── suggest_pattern
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize
│ │ │ │ │ │ ├── dmiessler
│ │ │ │ │ │ │ └── summarize
│ │ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ │ └── user.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_debate
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_git_changes
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_git_diff
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_lecture
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_legislation
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_meeting
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_micro
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_newsletter
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_paper
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_prompt
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_pull-requests
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_rpg_session
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_analyze_challenge_handling
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_check_metrics
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_create_h3_career
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_create_opening_sentences
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_describe_life_outlook
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_extract_intro_sentences
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_extract_panel_topics
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_find_blindspots
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_find_negative_thinking
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_find_neglected_goals
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_give_encouragement
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_red_team_thinking
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_threat_model_plans
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_visualize_mission_goals_projects
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_year_in_review
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── to_flashcards
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── transcribe_minutes
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── translate
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── tweet
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_essay
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_hackerone_report
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_latex
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_micro_essay
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_nuclei_template_rule
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── write_pull-request
│ │ │ │ │ │ └── system.md
│ │ │ │ │ └── write_semgrep_rule
│ │ │ │ │ ├── system.md
│ │ │ │ │ └── user.md
│ │ │ │ └── routines
│ │ │ │ ├── list.py
│ │ │ │ ├── run.py
│ │ │ │ └── show.py
│ │ │ ├── guided_conversation
│ │ │ │ ├── __init__.py
│ │ │ │ ├── agenda.py
│ │ │ │ ├── artifact_helpers.py
│ │ │ │ ├── chat_completions
│ │ │ │ │ ├── fix_agenda_error.py
│ │ │ │ │ ├── fix_artifact_error.py
│ │ │ │ │ ├── generate_agenda.py
│ │ │ │ │ ├── generate_artifact_updates.py
│ │ │ │ │ ├── generate_final_artifact.py
│ │ │ │ │ └── generate_message.py
│ │ │ │ ├── conversation_guides
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── acrostic_poem.py
│ │ │ │ │ ├── er_triage.py
│ │ │ │ │ ├── interview.py
│ │ │ │ │ └── patient_intake.py
│ │ │ │ ├── guide.py
│ │ │ │ ├── guided_conversation_skill.py
│ │ │ │ ├── logging.py
│ │ │ │ ├── message.py
│ │ │ │ ├── resources.py
│ │ │ │ ├── routines
│ │ │ │ │ └── guided_conversation.py
│ │ │ │ └── tests
│ │ │ │ ├── conftest.py
│ │ │ │ ├── test_artifact_helpers.py
│ │ │ │ ├── test_generate_agenda.py
│ │ │ │ ├── test_generate_artifact_updates.py
│ │ │ │ ├── test_generate_final_artifact.py
│ │ │ │ └── test_resource.py
│ │ │ ├── meta
│ │ │ │ ├── __init__.py
│ │ │ │ ├── meta_skill.py
│ │ │ │ ├── README.md
│ │ │ │ └── routines
│ │ │ │ └── generate_routine.py
│ │ │ ├── posix
│ │ │ │ ├── __init__.py
│ │ │ │ ├── posix_skill.py
│ │ │ │ ├── routines
│ │ │ │ │ ├── append_file.py
│ │ │ │ │ ├── cd.py
│ │ │ │ │ ├── ls.py
│ │ │ │ │ ├── make_home_dir.py
│ │ │ │ │ ├── mkdir.py
│ │ │ │ │ ├── mv.py
│ │ │ │ │ ├── pwd.py
│ │ │ │ │ ├── read_file.py
│ │ │ │ │ ├── rm.py
│ │ │ │ │ ├── touch.py
│ │ │ │ │ └── write_file.py
│ │ │ │ └── sandbox_shell.py
│ │ │ ├── README.md
│ │ │ ├── research
│ │ │ │ ├── __init__.py
│ │ │ │ ├── README.md
│ │ │ │ ├── research_skill.py
│ │ │ │ └── routines
│ │ │ │ ├── answer_question_about_content.py
│ │ │ │ ├── evaluate_answer.py
│ │ │ │ ├── generate_research_plan.py
│ │ │ │ ├── generate_search_query.py
│ │ │ │ ├── update_research_plan.py
│ │ │ │ ├── web_research.py
│ │ │ │ └── web_search.py
│ │ │ ├── research2
│ │ │ │ ├── __init__.py
│ │ │ │ ├── README.md
│ │ │ │ ├── research_skill.py
│ │ │ │ └── routines
│ │ │ │ ├── facts.py
│ │ │ │ ├── make_final_report.py
│ │ │ │ ├── research.py
│ │ │ │ ├── search_plan.py
│ │ │ │ ├── search.py
│ │ │ │ └── visit_pages.py
│ │ │ └── web_research
│ │ │ ├── __init__.py
│ │ │ ├── README.md
│ │ │ ├── research_skill.py
│ │ │ └── routines
│ │ │ ├── facts.py
│ │ │ ├── make_final_report.py
│ │ │ ├── research.py
│ │ │ ├── search_plan.py
│ │ │ ├── search.py
│ │ │ └── visit_pages.py
│ │ ├── tests
│ │ │ ├── test_common_skill.py
│ │ │ ├── test_integration.py
│ │ │ ├── test_routine_stack.py
│ │ │ ├── tst_skill
│ │ │ │ ├── __init__.py
│ │ │ │ └── routines
│ │ │ │ ├── __init__.py
│ │ │ │ └── a_routine.py
│ │ │ └── utilities
│ │ │ ├── test_find_template_vars.py
│ │ │ ├── test_make_arg_set.py
│ │ │ ├── test_paramspec.py
│ │ │ ├── test_parse_command_string.py
│ │ │ └── test_to_string.py
│ │ ├── types.py
│ │ ├── usage.py
│ │ └── utilities.py
│ └── uv.lock
├── LICENSE
├── Makefile
├── mcp-servers
│ ├── ai-assist-content
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── mcp-example-brave-search.md
│ │ ├── mcp-fastmcp-typescript-README.md
│ │ ├── mcp-llms-full.txt
│ │ ├── mcp-metadata-tips.md
│ │ ├── mcp-python-sdk-README.md
│ │ ├── mcp-typescript-sdk-README.md
│ │ ├── pydanticai-documentation.md
│ │ ├── pydanticai-example-question-graph.md
│ │ ├── pydanticai-example-weather.md
│ │ ├── pydanticai-tutorial.md
│ │ └── README.md
│ ├── Makefile
│ ├── mcp-server-bing-search
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_bing_search
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── prompts
│ │ │ │ ├── __init__.py
│ │ │ │ ├── clean_website.py
│ │ │ │ └── filter_links.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ ├── tools.py
│ │ │ ├── types.py
│ │ │ ├── utils.py
│ │ │ └── web
│ │ │ ├── __init__.py
│ │ │ ├── get_content.py
│ │ │ ├── llm_processing.py
│ │ │ ├── process_website.py
│ │ │ └── search_bing.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ └── test_tools.py
│ │ └── uv.lock
│ ├── mcp-server-bundle
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_bundle
│ │ │ ├── __init__.py
│ │ │ └── main.py
│ │ ├── pyinstaller.spec
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-filesystem
│ │ ├── .env.example
│ │ ├── .github
│ │ │ └── workflows
│ │ │ └── ci.yml
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_filesystem
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── server.py
│ │ │ └── start.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ └── test_filesystem.py
│ │ └── uv.lock
│ ├── mcp-server-filesystem-edit
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── data
│ │ │ ├── attachments
│ │ │ │ ├── Daily Game Ideas.txt
│ │ │ │ ├── Frontend Framework Proposal.txt
│ │ │ │ ├── ReDoodle.txt
│ │ │ │ └── Research Template.tex
│ │ │ ├── test_cases.yaml
│ │ │ └── transcripts
│ │ │ ├── transcript_research_simple.md
│ │ │ ├── transcript_Startup_Idea_1_202503031513.md
│ │ │ ├── transcript_Startup_Idea_2_202503031659.md
│ │ │ └── transcript_Web_Frontends_202502281551.md
│ │ ├── Makefile
│ │ ├── mcp_server_filesystem_edit
│ │ │ ├── __init__.py
│ │ │ ├── app_handling
│ │ │ │ ├── __init__.py
│ │ │ │ ├── excel.py
│ │ │ │ ├── miktex.py
│ │ │ │ ├── office_common.py
│ │ │ │ ├── powerpoint.py
│ │ │ │ └── word.py
│ │ │ ├── config.py
│ │ │ ├── evals
│ │ │ │ ├── __init__.py
│ │ │ │ ├── common.py
│ │ │ │ ├── run_comments.py
│ │ │ │ ├── run_edit.py
│ │ │ │ └── run_ppt_edit.py
│ │ │ ├── prompts
│ │ │ │ ├── __init__.py
│ │ │ │ ├── add_comments.py
│ │ │ │ ├── analyze_comments.py
│ │ │ │ ├── latex_edit.py
│ │ │ │ ├── markdown_draft.py
│ │ │ │ ├── markdown_edit.py
│ │ │ │ └── powerpoint_edit.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ ├── tools
│ │ │ │ ├── __init__.py
│ │ │ │ ├── add_comments.py
│ │ │ │ ├── edit_adapters
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── common.py
│ │ │ │ │ ├── latex.py
│ │ │ │ │ └── markdown.py
│ │ │ │ ├── edit.py
│ │ │ │ └── helpers.py
│ │ │ └── types.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── app_handling
│ │ │ │ ├── test_excel.py
│ │ │ │ ├── test_miktext.py
│ │ │ │ ├── test_office_common.py
│ │ │ │ ├── test_powerpoint.py
│ │ │ │ └── test_word.py
│ │ │ ├── conftest.py
│ │ │ └── tools
│ │ │ └── edit_adapters
│ │ │ ├── test_latex.py
│ │ │ └── test_markdown.py
│ │ └── uv.lock
│ ├── mcp-server-fusion
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── AddInIcon.svg
│ │ ├── config.py
│ │ ├── FusionMCPServerAddIn.manifest
│ │ ├── FusionMCPServerAddIn.py
│ │ ├── mcp_server_fusion
│ │ │ ├── __init__.py
│ │ │ ├── fusion_mcp_server.py
│ │ │ ├── fusion_utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── event_utils.py
│ │ │ │ ├── general_utils.py
│ │ │ │ └── tool_utils.py
│ │ │ ├── mcp_tools
│ │ │ │ ├── __init__.py
│ │ │ │ ├── fusion_3d_operation.py
│ │ │ │ ├── fusion_geometry.py
│ │ │ │ ├── fusion_pattern.py
│ │ │ │ └── fusion_sketch.py
│ │ │ └── vendor
│ │ │ └── README.md
│ │ ├── README.md
│ │ └── requirements.txt
│ ├── mcp-server-giphy
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── giphy_search.py
│ │ │ ├── sampling.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ └── utils.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-memory-user-bio
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_memory_user_bio
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── server.py
│ │ │ └── start.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-memory-whiteboard
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_memory_whiteboard
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── server.py
│ │ │ └── start.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-office
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── build.sh
│ │ ├── data
│ │ │ ├── attachments
│ │ │ │ ├── Daily Game Ideas.txt
│ │ │ │ ├── Frontend Framework Proposal.txt
│ │ │ │ └── ReDoodle.txt
│ │ │ └── word
│ │ │ ├── test_cases.yaml
│ │ │ └── transcripts
│ │ │ ├── transcript_Startup_Idea_1_202503031513.md
│ │ │ ├── transcript_Startup_Idea_2_202503031659.md
│ │ │ └── transcript_Web_Frontends_202502281551.md
│ │ ├── Makefile
│ │ ├── mcp_server
│ │ │ ├── __init__.py
│ │ │ ├── app_interaction
│ │ │ │ ├── __init__.py
│ │ │ │ ├── excel_editor.py
│ │ │ │ ├── powerpoint_editor.py
│ │ │ │ └── word_editor.py
│ │ │ ├── config.py
│ │ │ ├── constants.py
│ │ │ ├── evals
│ │ │ │ ├── __init__.py
│ │ │ │ ├── common.py
│ │ │ │ ├── run_comment_analysis.py
│ │ │ │ ├── run_feedback.py
│ │ │ │ └── run_markdown_edit.py
│ │ │ ├── helpers.py
│ │ │ ├── markdown_edit
│ │ │ │ ├── __init__.py
│ │ │ │ ├── comment_analysis.py
│ │ │ │ ├── feedback_step.py
│ │ │ │ ├── markdown_edit.py
│ │ │ │ └── utils.py
│ │ │ ├── prompts
│ │ │ │ ├── __init__.py
│ │ │ │ ├── comment_analysis.py
│ │ │ │ ├── feedback.py
│ │ │ │ ├── markdown_draft.py
│ │ │ │ └── markdown_edit.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ └── types.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ └── test_word_editor.py
│ │ └── uv.lock
│ ├── mcp-server-open-deep-research
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── libs
│ │ │ │ └── open_deep_research
│ │ │ │ ├── cookies.py
│ │ │ │ ├── mdconvert.py
│ │ │ │ ├── run_agents.py
│ │ │ │ ├── text_inspector_tool.py
│ │ │ │ ├── text_web_browser.py
│ │ │ │ └── visual_qa.py
│ │ │ ├── open_deep_research.py
│ │ │ ├── server.py
│ │ │ └── start.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-open-deep-research-clone
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_open_deep_research_clone
│ │ │ ├── __init__.py
│ │ │ ├── azure_openai.py
│ │ │ ├── config.py
│ │ │ ├── logging.py
│ │ │ ├── sampling.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ ├── utils.py
│ │ │ └── web_research.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── test
│ │ │ └── test_open_deep_research_clone.py
│ │ └── uv.lock
│ ├── mcp-server-template
│ │ ├── .taplo.toml
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── copier.yml
│ │ ├── README.md
│ │ └── template
│ │ └── {{ project_slug }}
│ │ ├── .env.example.jinja
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json.jinja
│ │ │ └── settings.json
│ │ ├── {{ module_name }}
│ │ │ ├── __init__.py
│ │ │ ├── config.py.jinja
│ │ │ ├── server.py.jinja
│ │ │ └── start.py.jinja
│ │ ├── Makefile.jinja
│ │ ├── pyproject.toml.jinja
│ │ └── README.md.jinja
│ ├── mcp-server-vscode
│ │ ├── .eslintrc.cjs
│ │ ├── .gitignore
│ │ ├── .npmrc
│ │ ├── .vscode
│ │ │ ├── extensions.json
│ │ │ ├── launch.json
│ │ │ ├── settings.json
│ │ │ └── tasks.json
│ │ ├── .vscode-test.mjs
│ │ ├── .vscodeignore
│ │ ├── ASSISTANT_BOOTSTRAP.md
│ │ ├── eslint.config.mjs
│ │ ├── images
│ │ │ └── icon.png
│ │ ├── LICENSE
│ │ ├── Makefile
│ │ ├── out
│ │ │ ├── extension.d.ts
│ │ │ ├── extension.js
│ │ │ ├── test
│ │ │ │ ├── extension.test.d.ts
│ │ │ │ └── extension.test.js
│ │ │ ├── tools
│ │ │ │ ├── code_checker.d.ts
│ │ │ │ ├── code_checker.js
│ │ │ │ ├── debug_tools.d.ts
│ │ │ │ ├── debug_tools.js
│ │ │ │ ├── focus_editor.d.ts
│ │ │ │ ├── focus_editor.js
│ │ │ │ ├── search_symbol.d.ts
│ │ │ │ └── search_symbol.js
│ │ │ └── utils
│ │ │ ├── port.d.ts
│ │ │ └── port.js
│ │ ├── package.json
│ │ ├── pnpm-lock.yaml
│ │ ├── prettier.config.cjs
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── extension.d.ts
│ │ │ ├── extension.ts
│ │ │ ├── test
│ │ │ │ ├── extension.test.d.ts
│ │ │ │ └── extension.test.ts
│ │ │ ├── tools
│ │ │ │ ├── code_checker.d.ts
│ │ │ │ ├── code_checker.ts
│ │ │ │ ├── debug_tools.d.ts
│ │ │ │ ├── debug_tools.ts
│ │ │ │ ├── focus_editor.d.ts
│ │ │ │ ├── focus_editor.ts
│ │ │ │ ├── search_symbol.d.ts
│ │ │ │ └── search_symbol.ts
│ │ │ └── utils
│ │ │ ├── port.d.ts
│ │ │ └── port.ts
│ │ ├── tsconfig.json
│ │ ├── tsconfig.tsbuildinfo
│ │ ├── vsc-extension-quickstart.md
│ │ └── webpack.config.js
│ └── mcp-server-web-research
│ ├── .env.example
│ ├── .gitignore
│ ├── .vscode
│ │ ├── launch.json
│ │ └── settings.json
│ ├── Makefile
│ ├── mcp_server_web_research
│ │ ├── __init__.py
│ │ ├── azure_openai.py
│ │ ├── config.py
│ │ ├── logging.py
│ │ ├── sampling.py
│ │ ├── server.py
│ │ ├── start.py
│ │ ├── utils.py
│ │ └── web_research.py
│ ├── pyproject.toml
│ ├── README.md
│ ├── test
│ │ └── test_web_research.py
│ └── uv.lock
├── README.md
├── RESPONSIBLE_AI_FAQ.md
├── ruff.toml
├── SECURITY.md
├── semantic-workbench.code-workspace
├── SUPPORT.md
├── tools
│ ├── build_ai_context_files.py
│ ├── collect_files.py
│ ├── docker
│ │ ├── azure_website_sshd.conf
│ │ ├── docker-entrypoint.sh
│ │ ├── Dockerfile.assistant
│ │ └── Dockerfile.mcp-server
│ ├── makefiles
│ │ ├── docker-assistant.mk
│ │ ├── docker-mcp-server.mk
│ │ ├── docker.mk
│ │ ├── python.mk
│ │ ├── recursive.mk
│ │ └── shell.mk
│ ├── reset-service-data.ps1
│ ├── reset-service-data.sh
│ ├── run-app.ps1
│ ├── run-app.sh
│ ├── run-canonical-agent.ps1
│ ├── run-canonical-agent.sh
│ ├── run-dotnet-examples-with-aspire.sh
│ ├── run-python-example1.sh
│ ├── run-python-example2.ps1
│ ├── run-python-example2.sh
│ ├── run-service.ps1
│ ├── run-service.sh
│ ├── run-workbench-chatbot.ps1
│ └── run-workbench-chatbot.sh
├── workbench-app
│ ├── .dockerignore
│ ├── .env.example
│ ├── .eslintrc.cjs
│ ├── .gitignore
│ ├── .vscode
│ │ ├── launch.json
│ │ └── settings.json
│ ├── docker-entrypoint.sh
│ ├── Dockerfile
│ ├── docs
│ │ ├── APP_DEV_GUIDE.md
│ │ ├── MESSAGE_METADATA.md
│ │ ├── MESSAGE_TYPES.md
│ │ ├── README.md
│ │ └── STATE_INSPECTORS.md
│ ├── index.html
│ ├── Makefile
│ ├── nginx.conf
│ ├── package.json
│ ├── pnpm-lock.yaml
│ ├── prettier.config.cjs
│ ├── public
│ │ └── assets
│ │ ├── background-1-upscaled.jpg
│ │ ├── background-1-upscaled.png
│ │ ├── background-1.jpg
│ │ ├── background-1.png
│ │ ├── background-2.jpg
│ │ ├── background-2.png
│ │ ├── experimental-feature.jpg
│ │ ├── favicon.svg
│ │ ├── workflow-designer-1.jpg
│ │ ├── workflow-designer-outlets.jpg
│ │ ├── workflow-designer-states.jpg
│ │ └── workflow-designer-transitions.jpg
│ ├── README.md
│ ├── run.sh
│ ├── src
│ │ ├── components
│ │ │ ├── App
│ │ │ │ ├── AppFooter.tsx
│ │ │ │ ├── AppHeader.tsx
│ │ │ │ ├── AppMenu.tsx
│ │ │ │ ├── AppView.tsx
│ │ │ │ ├── CodeLabel.tsx
│ │ │ │ ├── CommandButton.tsx
│ │ │ │ ├── ConfirmLeave.tsx
│ │ │ │ ├── ContentExport.tsx
│ │ │ │ ├── ContentImport.tsx
│ │ │ │ ├── CopyButton.tsx
│ │ │ │ ├── DialogControl.tsx
│ │ │ │ ├── DynamicIframe.tsx
│ │ │ │ ├── ErrorListFromAppState.tsx
│ │ │ │ ├── ErrorMessageBar.tsx
│ │ │ │ ├── ExperimentalNotice.tsx
│ │ │ │ ├── FormWidgets
│ │ │ │ │ ├── BaseModelEditorWidget.tsx
│ │ │ │ │ ├── CustomizedArrayFieldTemplate.tsx
│ │ │ │ │ ├── CustomizedFieldTemplate.tsx
│ │ │ │ │ ├── CustomizedObjectFieldTemplate.tsx
│ │ │ │ │ └── InspectableWidget.tsx
│ │ │ │ ├── LabelWithDescription.tsx
│ │ │ │ ├── Loading.tsx
│ │ │ │ ├── MenuItemControl.tsx
│ │ │ │ ├── MiniControl.tsx
│ │ │ │ ├── MyAssistantServiceRegistrations.tsx
│ │ │ │ ├── MyItemsManager.tsx
│ │ │ │ ├── OverflowMenu.tsx
│ │ │ │ ├── PresenceMotionList.tsx
│ │ │ │ ├── ProfileSettings.tsx
│ │ │ │ └── TooltipWrapper.tsx
│ │ │ ├── Assistants
│ │ │ │ ├── ApplyConfigButton.tsx
│ │ │ │ ├── AssistantAdd.tsx
│ │ │ │ ├── AssistantConfigExportButton.tsx
│ │ │ │ ├── AssistantConfigImportButton.tsx
│ │ │ │ ├── AssistantConfiguration.tsx
│ │ │ │ ├── AssistantConfigure.tsx
│ │ │ │ ├── AssistantCreate.tsx
│ │ │ │ ├── AssistantDelete.tsx
│ │ │ │ ├── AssistantDuplicate.tsx
│ │ │ │ ├── AssistantExport.tsx
│ │ │ │ ├── AssistantImport.tsx
│ │ │ │ ├── AssistantRemove.tsx
│ │ │ │ ├── AssistantRename.tsx
│ │ │ │ ├── AssistantServiceInfo.tsx
│ │ │ │ ├── AssistantServiceMetadata.tsx
│ │ │ │ └── MyAssistants.tsx
│ │ │ ├── AssistantServiceRegistrations
│ │ │ │ ├── AssistantServiceRegistrationApiKey.tsx
│ │ │ │ ├── AssistantServiceRegistrationApiKeyReset.tsx
│ │ │ │ ├── AssistantServiceRegistrationCreate.tsx
│ │ │ │ └── AssistantServiceRegistrationRemove.tsx
│ │ │ ├── Conversations
│ │ │ │ ├── Canvas
│ │ │ │ │ ├── AssistantCanvas.tsx
│ │ │ │ │ ├── AssistantCanvasList.tsx
│ │ │ │ │ ├── AssistantInspector.tsx
│ │ │ │ │ ├── AssistantInspectorList.tsx
│ │ │ │ │ └── ConversationCanvas.tsx
│ │ │ │ ├── ChatInputPlugins
│ │ │ │ │ ├── ClearEditorPlugin.tsx
│ │ │ │ │ ├── LexicalMenu.ts
│ │ │ │ │ ├── ParticipantMentionsPlugin.tsx
│ │ │ │ │ ├── TypeaheadMenuPlugin.css
│ │ │ │ │ └── TypeaheadMenuPlugin.tsx
│ │ │ │ ├── ContentRenderers
│ │ │ │ │ ├── CodeContentRenderer.tsx
│ │ │ │ │ ├── ContentListRenderer.tsx
│ │ │ │ │ ├── ContentRenderer.tsx
│ │ │ │ │ ├── DiffRenderer.tsx
│ │ │ │ │ ├── HtmlContentRenderer.tsx
│ │ │ │ │ ├── JsonSchemaContentRenderer.tsx
│ │ │ │ │ ├── MarkdownContentRenderer.tsx
│ │ │ │ │ ├── MarkdownEditorRenderer.tsx
│ │ │ │ │ ├── MermaidContentRenderer.tsx
│ │ │ │ │ ├── MusicABCContentRenderer.css
│ │ │ │ │ └── MusicABCContentRenderer.tsx
│ │ │ │ ├── ContextWindow.tsx
│ │ │ │ ├── ConversationCreate.tsx
│ │ │ │ ├── ConversationDuplicate.tsx
│ │ │ │ ├── ConversationExport.tsx
│ │ │ │ ├── ConversationFileIcon.tsx
│ │ │ │ ├── ConversationRemove.tsx
│ │ │ │ ├── ConversationRename.tsx
│ │ │ │ ├── ConversationShare.tsx
│ │ │ │ ├── ConversationShareCreate.tsx
│ │ │ │ ├── ConversationShareList.tsx
│ │ │ │ ├── ConversationShareView.tsx
│ │ │ │ ├── ConversationsImport.tsx
│ │ │ │ ├── ConversationTranscript.tsx
│ │ │ │ ├── DebugInspector.tsx
│ │ │ │ ├── FileItem.tsx
│ │ │ │ ├── FileList.tsx
│ │ │ │ ├── InputAttachmentList.tsx
│ │ │ │ ├── InputOptionsControl.tsx
│ │ │ │ ├── InteractHistory.tsx
│ │ │ │ ├── InteractInput.tsx
│ │ │ │ ├── Message
│ │ │ │ │ ├── AttachmentSection.tsx
│ │ │ │ │ ├── ContentRenderer.tsx
│ │ │ │ │ ├── ContentSafetyNotice.tsx
│ │ │ │ │ ├── InteractMessage.tsx
│ │ │ │ │ ├── MessageActions.tsx
│ │ │ │ │ ├── MessageBase.tsx
│ │ │ │ │ ├── MessageBody.tsx
│ │ │ │ │ ├── MessageContent.tsx
│ │ │ │ │ ├── MessageFooter.tsx
│ │ │ │ │ ├── MessageHeader.tsx
│ │ │ │ │ ├── NotificationAccordion.tsx
│ │ │ │ │ └── ToolResultMessage.tsx
│ │ │ │ ├── MessageDelete.tsx
│ │ │ │ ├── MessageLink.tsx
│ │ │ │ ├── MyConversations.tsx
│ │ │ │ ├── MyShares.tsx
│ │ │ │ ├── ParticipantAvatar.tsx
│ │ │ │ ├── ParticipantAvatarGroup.tsx
│ │ │ │ ├── ParticipantItem.tsx
│ │ │ │ ├── ParticipantList.tsx
│ │ │ │ ├── ParticipantStatus.tsx
│ │ │ │ ├── RewindConversation.tsx
│ │ │ │ ├── ShareRemove.tsx
│ │ │ │ ├── SpeechButton.tsx
│ │ │ │ └── ToolCalls.tsx
│ │ │ └── FrontDoor
│ │ │ ├── Chat
│ │ │ │ ├── AssistantDrawer.tsx
│ │ │ │ ├── CanvasDrawer.tsx
│ │ │ │ ├── Chat.tsx
│ │ │ │ ├── ChatCanvas.tsx
│ │ │ │ ├── ChatControls.tsx
│ │ │ │ └── ConversationDrawer.tsx
│ │ │ ├── Controls
│ │ │ │ ├── AssistantCard.tsx
│ │ │ │ ├── AssistantSelector.tsx
│ │ │ │ ├── AssistantServiceSelector.tsx
│ │ │ │ ├── ConversationItem.tsx
│ │ │ │ ├── ConversationList.tsx
│ │ │ │ ├── ConversationListOptions.tsx
│ │ │ │ ├── NewConversationButton.tsx
│ │ │ │ ├── NewConversationForm.tsx
│ │ │ │ └── SiteMenuButton.tsx
│ │ │ ├── GlobalContent.tsx
│ │ │ └── MainContent.tsx
│ │ ├── Constants.ts
│ │ ├── global.d.ts
│ │ ├── index.css
│ │ ├── libs
│ │ │ ├── AppStorage.ts
│ │ │ ├── AuthHelper.ts
│ │ │ ├── EventSubscriptionManager.ts
│ │ │ ├── Theme.ts
│ │ │ ├── useAssistantCapabilities.ts
│ │ │ ├── useChatCanvasController.ts
│ │ │ ├── useConversationEvents.ts
│ │ │ ├── useConversationUtility.ts
│ │ │ ├── useCreateConversation.ts
│ │ │ ├── useDebugComponentLifecycle.ts
│ │ │ ├── useDragAndDrop.ts
│ │ │ ├── useEnvironment.ts
│ │ │ ├── useExportUtility.ts
│ │ │ ├── useHistoryUtility.ts
│ │ │ ├── useKeySequence.ts
│ │ │ ├── useMediaQuery.ts
│ │ │ ├── useMicrosoftGraph.ts
│ │ │ ├── useNotify.tsx
│ │ │ ├── useParticipantUtility.tsx
│ │ │ ├── useSiteUtility.ts
│ │ │ ├── useWorkbenchEventSource.ts
│ │ │ ├── useWorkbenchService.ts
│ │ │ └── Utility.ts
│ │ ├── main.tsx
│ │ ├── models
│ │ │ ├── Assistant.ts
│ │ │ ├── AssistantCapability.ts
│ │ │ ├── AssistantServiceInfo.ts
│ │ │ ├── AssistantServiceRegistration.ts
│ │ │ ├── Config.ts
│ │ │ ├── Conversation.ts
│ │ │ ├── ConversationFile.ts
│ │ │ ├── ConversationMessage.ts
│ │ │ ├── ConversationMessageDebug.ts
│ │ │ ├── ConversationParticipant.ts
│ │ │ ├── ConversationShare.ts
│ │ │ ├── ConversationShareRedemption.ts
│ │ │ ├── ConversationState.ts
│ │ │ ├── ConversationStateDescription.ts
│ │ │ ├── ServiceEnvironment.ts
│ │ │ └── User.ts
│ │ ├── redux
│ │ │ ├── app
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── rtkQueryErrorLogger.ts
│ │ │ │ └── store.ts
│ │ │ └── features
│ │ │ ├── app
│ │ │ │ ├── appSlice.ts
│ │ │ │ └── AppState.ts
│ │ │ ├── chatCanvas
│ │ │ │ ├── chatCanvasSlice.ts
│ │ │ │ └── ChatCanvasState.ts
│ │ │ ├── localUser
│ │ │ │ ├── localUserSlice.ts
│ │ │ │ └── LocalUserState.ts
│ │ │ └── settings
│ │ │ ├── settingsSlice.ts
│ │ │ └── SettingsState.ts
│ │ ├── Root.tsx
│ │ ├── routes
│ │ │ ├── AcceptTerms.tsx
│ │ │ ├── AssistantEditor.tsx
│ │ │ ├── AssistantServiceRegistrationEditor.tsx
│ │ │ ├── Dashboard.tsx
│ │ │ ├── ErrorPage.tsx
│ │ │ ├── FrontDoor.tsx
│ │ │ ├── Login.tsx
│ │ │ ├── Settings.tsx
│ │ │ ├── ShareRedeem.tsx
│ │ │ └── Shares.tsx
│ │ ├── services
│ │ │ └── workbench
│ │ │ ├── assistant.ts
│ │ │ ├── assistantService.ts
│ │ │ ├── conversation.ts
│ │ │ ├── file.ts
│ │ │ ├── index.ts
│ │ │ ├── participant.ts
│ │ │ ├── share.ts
│ │ │ ├── state.ts
│ │ │ └── workbench.ts
│ │ └── vite-env.d.ts
│ ├── tools
│ │ └── filtered-ts-prune.cjs
│ ├── tsconfig.json
│ └── vite.config.ts
└── workbench-service
├── .env.example
├── .vscode
│ ├── extensions.json
│ ├── launch.json
│ └── settings.json
├── alembic.ini
├── devdb
│ ├── docker-compose.yaml
│ └── postgresql-init.sh
├── Dockerfile
├── Makefile
├── migrations
│ ├── env.py
│ ├── README
│ ├── script.py.mako
│ └── versions
│ ├── 2024_09_19_000000_69dcda481c14_init.py
│ ├── 2024_09_19_190029_dffb1d7e219a_file_version_filename.py
│ ├── 2024_09_20_204130_b29524775484_share.py
│ ├── 2024_10_30_231536_039bec8edc33_index_message_type.py
│ ├── 2024_11_04_204029_5149c7fb5a32_conversationmessagedebug.py
│ ├── 2024_11_05_015124_245baf258e11_double_check_debugs.py
│ ├── 2024_11_25_191056_a106de176394_drop_workflow.py
│ ├── 2025_03_19_140136_aaaf792d4d72_set_user_title_set.py
│ ├── 2025_03_21_153250_3763629295ad_add_assistant_template_id.py
│ ├── 2025_05_19_163613_b2f86e981885_delete_context_transfer_assistants.py
│ └── 2025_06_18_174328_503c739152f3_delete_knowlege_transfer_assistants.py
├── pyproject.toml
├── README.md
├── semantic_workbench_service
│ ├── __init__.py
│ ├── api.py
│ ├── assistant_api_key.py
│ ├── auth.py
│ ├── azure_speech.py
│ ├── config.py
│ ├── controller
│ │ ├── __init__.py
│ │ ├── assistant_service_client_pool.py
│ │ ├── assistant_service_registration.py
│ │ ├── assistant.py
│ │ ├── conversation_share.py
│ │ ├── conversation.py
│ │ ├── convert.py
│ │ ├── exceptions.py
│ │ ├── export_import.py
│ │ ├── file.py
│ │ ├── participant.py
│ │ └── user.py
│ ├── db.py
│ ├── event.py
│ ├── files.py
│ ├── logging_config.py
│ ├── middleware.py
│ ├── query.py
│ ├── service_user_principals.py
│ ├── service.py
│ └── start.py
├── tests
│ ├── __init__.py
│ ├── conftest.py
│ ├── docker-compose.yaml
│ ├── test_assistant_api_key.py
│ ├── test_files.py
│ ├── test_integration.py
│ ├── test_middleware.py
│ ├── test_migrations.py
│ ├── test_workbench_service.py
│ └── types.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/.github/workflows/workbench-app.yml:
--------------------------------------------------------------------------------
```yaml
# Docs for the Azure Web Apps Deploy action: https://github.com/Azure/webapps-deploy
# More GitHub Actions for Azure: https://github.com/Azure/actions
name: workbench-app continuous integration
permissions:
contents: read
pull-requests: write
on:
push:
branches:
- main
paths:
- "workbench-app/**"
- ".github/workflows/workbench-app.yml"
pull_request:
branches:
- main
paths:
- "workbench-app/**"
- ".github/workflows/workbench-app.yml"
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
if: ${{ github.ref != 'refs/heads/main' }}
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v4
name: Install pnpm
with:
version: 9
run_install: false
- name: Install Node.js
uses: actions/setup-node@v4
with:
node-version: 20
cache: "pnpm"
cache-dependency-path: workbench-app/pnpm-lock.yaml
- name: pnpm install
working-directory: ./workbench-app
run: pnpm install --frozen-lockfile
- name: pnpm build
env:
NODE_OPTIONS: "--max_old_space_size=8192"
VITE_SEMANTIC_WORKBENCH_AUTHORITY: ${{ secrets.VITE_SEMANTIC_WORKBENCH_AUTHORITY }}
VITE_SEMANTIC_WORKBENCH_CLIENT_ID: ${{ secrets.VITE_SEMANTIC_WORKBENCH_CLIENT_ID }}
VITE_SEMANTIC_WORKBENCH_SERVICE_URL: ${{ secrets.VITE_SEMANTIC_WORKBENCH_SERVICE_URL }}
working-directory: ./workbench-app
run: pnpm run build
build-main:
runs-on: ubuntu-latest
if: ${{ github.ref == 'refs/heads/main' }}
environment:
name: production
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v4
name: Install pnpm
with:
version: 9
run_install: false
- name: Install Node.js
uses: actions/setup-node@v4
with:
node-version: 20
cache: "pnpm"
cache-dependency-path: workbench-app/pnpm-lock.yaml
- name: pnpm install
working-directory: ./workbench-app
run: pnpm install --frozen-lockfile
- name: pnpm build
env:
NODE_OPTIONS: "--max_old_space_size=8192"
VITE_SEMANTIC_WORKBENCH_AUTHORITY: ${{ secrets.VITE_SEMANTIC_WORKBENCH_AUTHORITY }}
VITE_SEMANTIC_WORKBENCH_CLIENT_ID: ${{ secrets.VITE_SEMANTIC_WORKBENCH_CLIENT_ID }}
VITE_SEMANTIC_WORKBENCH_SERVICE_URL: ${{ secrets.VITE_SEMANTIC_WORKBENCH_SERVICE_URL }}
working-directory: ./workbench-app
run: pnpm run build
- name: Zip artifact for deployment
if: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }}
run: zip semantic-workbench-release.zip . -r
working-directory: ./workbench-app/build
- name: Upload artifact for deployment job
if: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }}
uses: actions/upload-artifact@v4
with:
name: semantic-workbench-node-app
path: ./workbench-app/build/semantic-workbench-release.zip
deploy:
runs-on: ubuntu-latest
if: ${{ (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/main' && vars.DEPLOYMENT_ENABLED == 'true' }}
needs: build-main
environment:
name: production
permissions:
id-token: write #This is required for requesting the JWT
steps:
- name: Download artifact from build job
uses: actions/download-artifact@v4
with:
name: semantic-workbench-node-app
- name: Unzip artifact for deployment
run: unzip semantic-workbench-release.zip
- name: Login to Azure
uses: azure/login@v2
with:
client-id: ${{ secrets.AZURE_CLIENT_ID }}
tenant-id: ${{ secrets.AZURE_TENANT_ID }}
subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- name: "Deploy to Azure Web App"
id: deploy-to-webapp
uses: azure/webapps-deploy@v2
with:
app-name: ${{ secrets.AZURE_WORKBENCH_APP_SERVICE_NAME }}
slot-name: "Production"
package: .
```
--------------------------------------------------------------------------------
/workbench-app/src/services/workbench/state.ts:
--------------------------------------------------------------------------------
```typescript
import { Config } from '../../models/Config';
import { ConversationState } from '../../models/ConversationState';
import { ConversationStateDescription } from '../../models/ConversationStateDescription';
import { workbenchApi } from './workbench';
const stateApi = workbenchApi.injectEndpoints({
endpoints: (builder) => ({
getConfig: builder.query<Config, { assistantId: string }>({
query: ({ assistantId }) => `/assistants/${assistantId}/config`,
providesTags: ['Config'],
transformResponse: (response: any) => transformResponseToConfig(response),
}),
updateConfig: builder.mutation<Config, { assistantId: string; config: Config }>({
query: (body) => ({
url: `/assistants/${body.assistantId}/config`,
method: 'PUT',
body: transformConfigForRequest(body.config),
}),
invalidatesTags: ['Config', 'State'],
transformResponse: (response: any) => transformResponseToConfig(response),
}),
getConversationStateDescriptions: builder.query<
ConversationStateDescription[],
{ assistantId: string; conversationId: string }
>({
query: ({ assistantId, conversationId }) =>
`/assistants/${assistantId}/conversations/${conversationId}/states`,
providesTags: ['State'],
transformResponse: (response: any) =>
response.states.map((stateDescription: any) => ({
id: stateDescription.id,
displayName: stateDescription.display_name,
description: stateDescription.description,
enabled: stateDescription.enabled,
})),
}),
getConversationState: builder.query<
ConversationState,
{ assistantId: string; conversationId: string; stateId: string }
>({
query: ({ assistantId, conversationId, stateId }) =>
`/assistants/${assistantId}/conversations/${conversationId}/states/${stateId}`,
providesTags: ['State'],
transformResponse: (response: any, _meta, { stateId }) =>
transformResponseToConversationState(response, stateId),
}),
updateConversationState: builder.mutation<
ConversationState,
{ assistantId: string; conversationId: string; state: ConversationState }
>({
query: (body) => ({
url: `/assistants/${body.assistantId}/conversations/${body.conversationId}/states/${body.state.id}`,
method: 'PUT',
body: transformConversationStateForRequest(body.state),
}),
invalidatesTags: ['State'],
transformResponse: (response: any, _meta, { state }) =>
transformResponseToConversationState(response, state.id),
}),
}),
overrideExisting: false,
});
export const {
useGetConfigQuery,
useUpdateConfigMutation,
useGetConversationStateDescriptionsQuery,
useGetConversationStateQuery,
useUpdateConversationStateMutation,
} = stateApi;
const transformResponseToConfig = (response: any) => {
try {
return {
config: response.config,
jsonSchema: response.json_schema,
uiSchema: response.ui_schema,
};
} catch (error) {
throw new Error(`Failed to transform config response: ${error}`);
}
};
const transformConfigForRequest = (config: Config) => ({
config: config.config,
json_schema: config.jsonSchema,
ui_schema: config.uiSchema,
});
const transformResponseToConversationState = (response: any, stateId: string) => {
try {
return {
id: stateId,
data: response.data,
jsonSchema: response.json_schema,
uiSchema: response.ui_schema,
};
} catch (error) {
throw new Error(`Failed to transform conversation state response: ${error}`);
}
};
const transformConversationStateForRequest = (conversationState: ConversationState) => ({
data: conversationState.data,
json_schema: conversationState.jsonSchema,
ui_schema: conversationState.uiSchema,
});
```
--------------------------------------------------------------------------------
/mcp-servers/mcp-server-filesystem-edit/mcp_server_filesystem_edit/tools/edit_adapters/common.py:
--------------------------------------------------------------------------------
```python
# Copyright (c) Microsoft. All rights reserved.
from copy import deepcopy
from typing import Any
from mcp_server_filesystem_edit.types import Block
async def format_blocks_for_llm(blocks: list[Block]) -> str:
"""A string representation of the canvas used for prompting an LLM.
The string returned is as follows:
<block index=0>
start_of_document_indicator
</block>
<block index={idx}>
content
</block>
<block index={idx}>
content
</block>
...
"""
page = """<block index=0>
start_of_document_indicator
</block>
"""
for block in blocks:
markdown_content = block.content
# Remove one trailing newline, if it exists
markdown_content = markdown_content[:-1] if markdown_content.endswith("\n") else markdown_content
page += f"""<block index={block.id}>
{markdown_content}
</block>\n"""
page = page.rstrip()
return page
def execute_tools(
blocks: list[Block],
edit_tool_call: Any,
) -> list[Block]:
"""Executes the tools called by the LLM and returns the new blockified page.
NOTE: We add a newline to generated content so it is unblockified as expected.
"""
new_block_id = -5
blocks = deepcopy(blocks)
tools = edit_tool_call.get("arguments", {}).get("operations", [])
for tool in tools:
# INSERT LOGIC
# 1. Find the index, or the first index after (in the case the targeted block was deleted), the model generated.
# 2. Execute a "prepend" operation by finding the next block in the mapping
# that is not a newly inserted block and prepend before that block.
# 3. If we do not find the next block in the mapping (meaning the index chosen was the last), we append to the end.
if tool["type"] == "insert":
# Find the the block at the generated index
try:
index = int(tool["index"])
except ValueError:
index = 0
# Any index less than 0 assume the intent was the append at the beginning
if index < 0:
index = 0
block_inserted = False
for i, block in enumerate(deepcopy(blocks)):
# Iterate until we find the next block one index greater than the model generated
if block.id > index:
# This would be translated to the prepend operation to insert
# the block before the next block with the id in the mapping.
content = tool["content"] + "\n"
blocks.insert(
i, # Prepend
Block(
id=new_block_id,
content=content,
),
)
block_inserted = True
break
if not block_inserted:
# Append to the end (translates into the append at end operation)
content = tool["content"] + "\n"
blocks.append(
Block(
id=new_block_id,
content=content,
),
)
elif tool["type"] == "update":
# UPDATE LOGIC
# 1. Find the block at the index the model generated.
# 2. Replace the current content with the new content.
try:
index = int(tool["index"])
except ValueError:
continue
if index <= 0:
continue
content = tool["content"] + "\n"
for block in blocks:
if block.id == index:
block.content = content
break
elif tool["type"] == "remove":
# REMOVE LOGIC
# 1. Remove all blocks between the start_index and end_index (inclusive).
try:
start_index = int(tool["start_index"])
end_index = int(tool["end_index"])
except ValueError:
continue
if start_index <= 0 or end_index <= 0:
continue
blocks = [block for block in blocks if not (start_index <= block.id <= end_index)]
return blocks
```
--------------------------------------------------------------------------------
/libraries/python/assistant-extensions/assistant_extensions/dashboard_card/_dashboard_card.py:
--------------------------------------------------------------------------------
```python
import base64
import os
from typing import Any, Literal
from pydantic import BaseModel
dashboard_card_metadata_key = "_dashboard_card"
class CardContent(BaseModel):
content_type: Literal["text/markdown", "text/plain"] = "text/markdown"
"""
The content type of the card. This can be either "text/markdown" or "text/plain". This affects how the content is rendered.
"""
content: str
"""
The content of the card. This can be either plain text or markdown.
"""
class TemplateConfig(BaseModel):
"""
Configuration for a dashboard card for an assistant service.
This is used to define the content and appearance of the card that will be shown in the dashboard.
"""
template_id: str
"""
The template ID.
"""
enabled: bool
"""
Whether the template is enabled. If False, the template will not be shown as a card in the dashboard.
"""
icon: str
"""
The icon as a data URL. The icon is expected to be in PNG, JPEG, or SVG format. SVG is recommended for scalability.
fluent v9 icons from https://react.fluentui.dev/?path=/docs/icons-catalog--docs, specifically the "20Regular" icons, is a good source.
"""
background_color: str
"""
The background color of the card. This should be a valid CSS color string.
fluent v9 colors from https://react.fluentui.dev/?path=/docs/theme-colors--docs are a good source.
"""
card_content: CardContent
"""
The content of the card.
"""
def image_to_url(
path: os.PathLike,
content_type: Literal["image/png", "image/jpeg", "image/svg+xml"],
) -> str:
"""
Reads the icon file from the given path, returning it as a data URL.
Args:
path (os.PathLike): The path to the icon file.
content_type (Literal["image/png", "image/jpeg", "image/svg+xml"]): The content type of the icon file.
Returns:
str: The icon as a data URL.
"""
match content_type:
case "image/svg+xml":
with open(path, "r", encoding="utf-8") as icon_file:
encoded_icon = icon_file.read().replace("\n", "").strip()
encoded_icon = f"utf-8,{encoded_icon}"
case _:
with open(path, "rb") as icon_file:
encoded_icon = base64.b64encode(icon_file.read()).decode("utf-8")
encoded_icon = f"base64,{encoded_icon}"
return f"data:{content_type};{encoded_icon}"
def metadata(*templates: TemplateConfig) -> dict[str, Any]:
"""
Generates metadata for the dashboard card. The resulting metadata dictionary is intended to be merged
with the assistant service metadata.
Args:
*templates (TemplateConfig): The dashboard configurations, one per template ID.
Returns:
dict: The metadata for the dashboard card.
Example:
```
assistant_service_metadata={
**dashboard_card.metadata(
TemplateConfig(
enabled=True,
template_id="default",
background_color="rgb(238, 172, 178)",
icon=image_to_url(pathlib.Path(__file__).parent / "assets" / "icon.svg", "image/svg+xml"),
card_content=CardContent(
content_type="text/markdown",
content=(pathlib.Path(__file__).parent / "assets" / "card_content.md").read_text("utf-8"),
),
)
)
}
```
"""
template_dict = {}
for template in templates:
template_dict[template.template_id] = template
return {
dashboard_card_metadata_key: template_dict,
}
def extract_metadata_for_dashboard_card(metadata: dict[str, Any], template_id: str) -> TemplateConfig | None:
"""
Extracts the metadata for a specific template ID from the assistant service metadata.
Args:
metadata (dict[str, Any]): The assistant service metadata.
template_id (str): The template ID to extract the metadata for.
Returns:
TemplateConfig | None: The metadata for the specified template ID, or None if not found.
"""
if dashboard_card_metadata_key not in metadata:
return None
return metadata[dashboard_card_metadata_key].get(template_id)
```
--------------------------------------------------------------------------------
/libraries/python/mcp-extensions/mcp_extensions/llm/llm_types.py:
--------------------------------------------------------------------------------
```python
from enum import Enum
from typing import Any, Generic, Literal, TypeVar
from pydantic import BaseModel, Field
class Role(str, Enum):
ASSISTANT = "assistant"
DEVELOPER = "developer"
SYSTEM = "system"
TOOL = "tool"
USER = "user"
class ContentPartType(str, Enum):
TEXT = "text"
IMAGE = "image_url"
class TextContent(BaseModel):
type: Literal[ContentPartType.TEXT] = ContentPartType.TEXT
text: str
class ImageDetail(str, Enum):
AUTO = "auto"
LOW = "low"
HIGH = "high"
class ImageUrl(BaseModel):
url: str
detail: ImageDetail = ImageDetail.AUTO
class ImageContent(BaseModel):
type: Literal[ContentPartType.IMAGE] = ContentPartType.IMAGE
image_url: ImageUrl
ContentT = TypeVar("ContentT", bound=str | list[TextContent | ImageContent])
RoleT = TypeVar("RoleT", bound=Role)
class BaseMessage(BaseModel, Generic[ContentT, RoleT]):
content: ContentT
role: RoleT
name: str | None = None
class Function(BaseModel):
name: str
arguments: dict[str, Any]
class PartialFunction(BaseModel):
name: str
arguments: str | dict[str, Any]
class ToolCall(BaseModel):
id: str
function: Function
type: Literal["function"] = "function"
class PartialToolCall(BaseModel):
id: str | None
function: PartialFunction
type: Literal["function"] = "function"
class DeveloperMessage(BaseMessage[str, Literal[Role.DEVELOPER]]):
role: Literal[Role.DEVELOPER] = Role.DEVELOPER
class SystemMessage(BaseMessage[str, Literal[Role.SYSTEM]]):
role: Literal[Role.SYSTEM] = Role.SYSTEM
class UserMessage(BaseMessage[str | list[TextContent | ImageContent], Literal[Role.USER]]):
role: Literal[Role.USER] = Role.USER
class AssistantMessage(BaseMessage[str, Literal[Role.ASSISTANT]]):
role: Literal[Role.ASSISTANT] = Role.ASSISTANT
refusal: str | None = None
tool_calls: list[ToolCall] | None = None
class ToolMessage(BaseMessage[str, Literal[Role.TOOL]]):
# A tool message's name field will be interpreted as "tool_call_id"
role: Literal[Role.TOOL] = Role.TOOL
MessageT = AssistantMessage | DeveloperMessage | SystemMessage | ToolMessage | UserMessage
class ChatCompletionRequest(BaseModel):
messages: list[MessageT]
model: str
stream: bool = Field(default=False)
max_completion_tokens: int | None = Field(default=None)
context_window: int | None = Field(default=None)
logprobs: bool | None = Field(default=None)
n: int | None = Field(default=None)
tools: list[dict[str, Any]] | None = Field(default=None)
tool_choice: str | None = Field(default=None)
parallel_tool_calls: bool | None = Field(default=None)
json_mode: bool | None = Field(default=None)
structured_outputs: dict[str, Any] | None = Field(default=None)
temperature: float | None = Field(default=None)
reasoning_effort: Literal["low", "medium", "high"] | None = Field(default=None)
top_p: float | None = Field(default=None)
logit_bias: dict[str, float] | None = Field(default=None)
top_logprobs: int | None = Field(default=None)
frequency_penalty: float | None = Field(default=None)
presence_penalty: float | None = Field(default=None)
stop: str | list[str] | None = Field(default=None)
seed: int | None = Field(default=None)
max_tokens: int | None = Field(
default=None,
description="Sometimes `max_completion_tokens` is not correctly supported so we provide this as a fallback.",
)
class ChatCompletionChoice(BaseModel):
message: AssistantMessage
finish_reason: Literal["stop", "length", "tool_calls", "content_filter"]
json_message: dict[str, Any] | None = Field(default=None)
logprobs: list[dict[str, Any] | list[dict[str, Any]]] | None = Field(default=None)
extras: Any | None = Field(default=None)
class ChatCompletionResponse(BaseModel):
choices: list[ChatCompletionChoice]
errors: str = Field(default="")
completion_tokens: int
prompt_tokens: int
completion_detailed_tokens: dict[str, int] | None = Field(default=None)
prompt_detailed_tokens: dict[str, int] | None = Field(default=None)
response_duration: float
system_fingerprint: str | None = Field(default=None)
extras: Any | None = Field(default=None)
```
--------------------------------------------------------------------------------
/workbench-app/src/components/Conversations/MyConversations.tsx:
--------------------------------------------------------------------------------
```typescript
// Copyright (c) Microsoft. All rights reserved.
import { Chat24Regular } from '@fluentui/react-icons';
import React from 'react';
import { Conversation } from '../../models/Conversation';
import { useAppSelector } from '../../redux/app/hooks';
import { useGetAssistantsQuery, useGetConversationsQuery } from '../../services/workbench';
import { CommandButton } from '../App/CommandButton';
import { MiniControl } from '../App/MiniControl';
import { MyItemsManager } from '../App/MyItemsManager';
import { ConversationCreate } from './ConversationCreate';
import { ConversationDuplicate } from './ConversationDuplicate';
import { ConversationExport } from './ConversationExport';
import { ConversationRemove } from './ConversationRemove';
import { ConversationRename } from './ConversationRename';
import { ConversationShare } from './ConversationShare';
import { ConversationsImport } from './ConversationsImport';
interface MyConversationsProps {
conversations?: Conversation[];
participantId: string;
title?: string;
hideInstruction?: boolean;
onCreate?: (conversation: Conversation) => void;
}
export const MyConversations: React.FC<MyConversationsProps> = (props) => {
const { conversations, title, hideInstruction, onCreate, participantId } = props;
const { refetch: refetchAssistants } = useGetAssistantsQuery();
const { refetch: refetchConversations } = useGetConversationsQuery();
const [conversationCreateOpen, setConversationCreateOpen] = React.useState(false);
const localUserId = useAppSelector((state) => state.localUser.id);
const handleConversationCreate = async (conversation: Conversation) => {
await refetchConversations();
onCreate?.(conversation);
};
const handleConversationsImport = async (_conversationIds: string[]) => {
await refetchAssistants();
await refetchConversations();
};
return (
<MyItemsManager
items={conversations
?.toSorted((a, b) => a.title.toLocaleLowerCase().localeCompare(b.title.toLocaleLowerCase()))
.map((conversation) => (
<MiniControl
key={conversation.id}
icon={<Chat24Regular />}
label={conversation.title}
linkUrl={`/${encodeURIComponent(conversation.id)}`}
actions={
<>
<ConversationRename
disabled={conversation.ownerId !== localUserId}
conversationId={conversation.id}
value={conversation.title}
iconOnly
/>
<ConversationExport conversationId={conversation.id} iconOnly />
<ConversationDuplicate conversationId={conversation.id} iconOnly />
<ConversationShare conversation={conversation} iconOnly />
<ConversationRemove
conversations={conversation}
participantId={participantId}
iconOnly
/>
</>
}
/>
))}
title={title ?? 'My Conversations'}
itemLabel="Conversation"
hideInstruction={hideInstruction}
actions={
<>
<CommandButton
icon={<Chat24Regular />}
label={`New Conversation`}
description={`Create a new conversation`}
onClick={() => setConversationCreateOpen(true)}
/>
<ConversationCreate
open={conversationCreateOpen}
onOpenChange={(open) => setConversationCreateOpen(open)}
onCreate={handleConversationCreate}
/>
<ConversationsImport onImport={handleConversationsImport} />
</>
}
/>
);
};
```
--------------------------------------------------------------------------------
/examples/dotnet/dotnet-01-echo-bot/MyAgent.cs:
--------------------------------------------------------------------------------
```csharp
// Copyright (c) Microsoft. All rights reserved.
using System.Text.Json;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.SemanticWorkbench.Connector;
namespace AgentExample;
public class MyAgent : AgentBase<MyAgentConfig>
{
/// <summary>
/// Create a new agent instance
/// </summary>
/// <param name="agentId">Agent instance ID</param>
/// <param name="agentName">Agent name</param>
/// <param name="agentConfig">Agent configuration</param>
/// <param name="workbenchConnector">Service containing the agent, used to communicate with Workbench backend</param>
/// <param name="storage">Agent data storage</param>
/// <param name="loggerFactory">App logger factory</param>
public MyAgent(
string agentId,
string agentName,
MyAgentConfig? agentConfig,
WorkbenchConnector<MyAgentConfig> workbenchConnector,
IAgentServiceStorage storage,
ILoggerFactory? loggerFactory = null)
: base(
workbenchConnector,
storage,
loggerFactory?.CreateLogger<MyAgent>() ?? new NullLogger<MyAgent>())
{
this.Id = agentId;
this.Name = agentName;
// Clone object to avoid config object being shared
this.Config = JsonSerializer.Deserialize<MyAgentConfig>(JsonSerializer.Serialize(agentConfig)) ?? new MyAgentConfig();
}
/// <inheritdoc />
public override async Task ReceiveCommandAsync(
string conversationId,
Command command,
CancellationToken cancellationToken = default)
{
// Check if commands are enabled
if (!this.Config.CommandsEnabled) { return; }
// Check if we're replying to other agents
if (!this.Config.ReplyToAgents && command.Sender.Role == "assistant") { return; }
// Support only the "say" command
if (!command.CommandName.Equals("say", StringComparison.OrdinalIgnoreCase)) { return; }
// Update the chat history to include the message received
await base.AddMessageToHistoryAsync(conversationId, command, cancellationToken).ConfigureAwait(false);
// Create the answer content. CommandParams contains the message to send back.
var answer = Message.CreateChatMessage(this.Id, command.CommandParams);
// Update the chat history to include the outgoing message
await this.AddMessageToHistoryAsync(conversationId, answer, cancellationToken).ConfigureAwait(false);
// Send the message to workbench backend
await this.SendTextMessageAsync(conversationId, answer, cancellationToken).ConfigureAwait(false);
}
/// <inheritdoc />
public override async Task ReceiveMessageAsync(
string conversationId,
Message message,
CancellationToken cancellationToken = default)
{
try
{
// Show some status while working...
await this.SetAgentStatusAsync(conversationId, "Thinking...", cancellationToken).ConfigureAwait(false);
// Fake delay, to show the status in the chat
await Task.Delay(TimeSpan.FromSeconds(1), cancellationToken).ConfigureAwait(false);
// Update the chat history to include the message received
await base.AddMessageToHistoryAsync(conversationId, message, cancellationToken).ConfigureAwait(false);
// Check if we're replying to other agents
if (!this.Config.ReplyToAgents && message.Sender.Role == "assistant") { return; }
// Ignore empty messages
if (string.IsNullOrWhiteSpace(message.Content)) { return; }
// Create the answer content
var answer = Message.CreateChatMessage(this.Id, "echo: " + message.Content);
// Update the chat history to include the outgoing message
await this.AddMessageToHistoryAsync(conversationId, answer, cancellationToken).ConfigureAwait(false);
// Send the message to workbench backend
await this.SendTextMessageAsync(conversationId, answer, cancellationToken).ConfigureAwait(false);
}
finally
{
// Remove the "Thinking..." status
await this.ResetAgentStatusAsync(conversationId, cancellationToken).ConfigureAwait(false);
}
}
}
```
--------------------------------------------------------------------------------
/mcp-servers/mcp-server-office/tests/test_word_editor.py:
--------------------------------------------------------------------------------
```python
# Copyright (c) Microsoft. All rights reserved.
import sys
import time
import pytest
from mcp_server.app_interaction.word_editor import (
WordCommentData,
add_document_comment,
delete_comments_containing_text,
get_active_document,
get_markdown_representation,
get_word_app,
write_markdown_to_document,
)
@pytest.fixture
def word_document():
"""Fixture that provides an active Word document."""
if not sys.platform.startswith("win"):
pytest.skip("This test is only for Windows.")
word_app = get_word_app()
doc = get_active_document(word_app)
yield doc
# Optional cleanup if needed
# You might want to close the document without saving changes
# doc.Close(SaveChanges=False)
# Or you might want to keep Word open for debugging
def test_get_markdown_representation_1(word_document):
markdown_text = get_markdown_representation(word_document, include_comments=True)
assert markdown_text is not None
def test_write_markdown_to_document_round_trip(word_document):
"""
This will show what is lost when we convert to markdown and back to Word.
"""
markdown_text = get_markdown_representation(word_document)
write_markdown_to_document(word_document, markdown_text)
def test_read_write_document_content(word_document):
markdown_text = """- hello!
# Introduction to Python
Python is a high-level, **interpreted** programming language *known* for its ***simplicity*** and readability. It is widely used for web development, data analysis, artificial intelligence, and more.
- ***Easy to Read and Write***: Python's syntax is clear and concise.
- **Cross-Platform**: Works on Windows, macOS, and Linux.
## Installing Python
To install Python, follow these steps:
1. Download the latest version for your operating system.
1. Run the installer and *follow* the instructions.
1. Verify the installation by running `python --version` in the terminal.
That's all!"""
write_markdown_to_document(word_document, markdown_text)
rt_markdown_text = get_markdown_representation(word_document)
write_markdown_to_document(word_document, rt_markdown_text)
def test_write_markdown_to_document_lists(word_document):
markdown_text = """## Market Opportunity
Here are the market opportunities:
- Growing Market: The market is projected to grow.
- Target Audience: Our primary customers are enterprises.
Let's get into the details."""
write_markdown_to_document(word_document, markdown_text)
def test_read_markdown_list_ending(word_document):
"""
Tests what happens when reading a new paragraph after a list.
"""
markdown_text = """Pros:
1. Direct integration
2. Focus on accessibility and consistency.
**Cons**:
1. Potential overlap
2. Requires navigating and configuring docs
## A heading
- A new bullet
- Another bullet"""
write_markdown_to_document(word_document, markdown_text)
rt_markdown_text = get_markdown_representation(word_document)
print(rt_markdown_text)
def test_read_markdown_code(word_document):
markdown_text = """This example illustrates a very simple Python program.
```python
a = 2
b = 3
total = a + b
if total > 4:
print(f"Hello, the answer is {a + b}")
```
This is a new paragraph after the code block.
"""
write_markdown_to_document(word_document, markdown_text)
rt_markdown_text = get_markdown_representation(word_document)
print(rt_markdown_text)
def test_read_markdown_code_2(word_document):
markdown_text = """- item 1
- item 2
- item 3
```python
a = 2
b = 3
total = a + b
if total > 4:
print(f"Hello, the answer is {a + b}")
```
#### This is a heading 4
1. item 1
1. item 2
And here is a regular paragraph"""
write_markdown_to_document(word_document, markdown_text)
rt_markdown_text = get_markdown_representation(word_document)
print(rt_markdown_text)
def test_comments(word_document):
markdown_text = "This is some text in my document."
write_markdown_to_document(word_document, markdown_text)
text_to_remove = "This is a comment."
comment_data = WordCommentData(
comment_text=text_to_remove,
location_text="some text",
)
add_document_comment(word_document, comment_data)
time.sleep(2)
delete_comments_containing_text(word_document, text_to_remove)
```
--------------------------------------------------------------------------------
/mcp-servers/mcp-server-open-deep-research-clone/mcp_server_open_deep_research_clone/sampling.py:
--------------------------------------------------------------------------------
```python
# Sampling Functionality
import asyncio
import base64
import json
import logging
from textwrap import dedent
from typing import Any, Dict, List, Union
from mcp.server.fastmcp import Context
from mcp.types import ImageContent, SamplingMessage, TextContent
from mcp_extensions import send_sampling_request, send_tool_call_progress
from .utils import fetch_url
logger = logging.getLogger(__name__)
# Limit the number of concurrent image fetches
semaphore = asyncio.Semaphore(5)
def get_image_url(result: Dict) -> str | None:
return result.get("images", {}).get("original", {}).get("url", None)
async def get_image_content_with_limit(result: Dict) -> ImageContent | None:
async with semaphore:
image_url = get_image_url(result)
if image_url is None:
return None
try:
image_data = await fetch_url(image_url)
image_data_base64 = base64.b64encode(image_data).decode("utf-8")
return ImageContent(
type="image",
data=f"data:image/gif;base64,{image_data_base64}",
mimeType="image/gif",
)
except Exception as e:
logger.error(f"Failed to fetch image from {image_url}: {str(e)}")
return None
def get_text_content(result: Dict) -> TextContent | None:
parts: dict[str, str] = {}
if "title" in result and len(result["title"].strip()) > 0:
parts["title"] = result["title"]
if "alt_text" in result and len(result["alt_text"].strip()) > 0:
parts["alt_text"] = result["alt_text"]
image_url = get_image_url(result)
if image_url is not None:
parts["url"] = image_url
if len(parts) == 0:
return None
description = ", ".join(parts.values())
return TextContent(
type="text",
text=f"Image: {description}",
)
async def generate_sampling_messages(search_results: List[Dict]) -> List[SamplingMessage]:
# Fetch all images concurrently
image_contents = await asyncio.gather(*[get_image_content_with_limit(result) for result in search_results])
# Create flattened list of text+image messages
messages = []
for result, image_content in zip(search_results, image_contents):
text_content = get_text_content(result)
if text_content is not None:
messages.append(SamplingMessage(role="user", content=text_content))
if image_content is not None:
messages.append(SamplingMessage(role="user", content=image_content))
return messages
async def perform_sampling(
context: str, search_results: List[Dict[str, Any]], ctx: Context
) -> Union[ImageContent, TextContent]:
"""
Performs sampling to select the most appropriate image based on context and search results.
Args:
context: The user's context/query
search_results: List of search result dictionaries containing image information
ctx: Context object for the request
Returns:
Either an ImageContent or TextContent object representing the chosen content
"""
# Send progress update
await send_tool_call_progress(ctx, "gathering image data...")
# Insert attachment and history messages to provide additional context
attachment_messages_variable = json.dumps({"variable": "attachment_messages"})
history_messages_variable = json.dumps({"variable": "history_messages"})
messages = [
SamplingMessage(role="user", content=TextContent(type="text", text=attachment_messages_variable)),
SamplingMessage(role="user", content=TextContent(type="text", text=history_messages_variable)),
]
# Generate sampling messages
messages += await generate_sampling_messages(search_results)
await send_tool_call_progress(ctx, "choosing image...")
# FIXME add support for structured output to enforce image selection
# Send sampling request to FastMCP server
sampling_result = await send_sampling_request(
fastmcp_server_context=ctx,
system_prompt=dedent(f"""
Analyze these images and choose the best choice based on provided context.
Context: {context}
Return the url for the chosen image.
""").strip(),
messages=messages,
max_tokens=100,
)
return sampling_result.content
```
--------------------------------------------------------------------------------
/mcp-servers/mcp-server-web-research/mcp_server_web_research/sampling.py:
--------------------------------------------------------------------------------
```python
# Sampling Functionality
import asyncio
import base64
import json
import logging
from textwrap import dedent
from typing import Any, Dict, List, Union
from mcp.server.fastmcp import Context
from mcp.types import ImageContent, SamplingMessage, TextContent
from mcp_extensions import send_sampling_request, send_tool_call_progress
from .utils import fetch_url
logger = logging.getLogger(__name__)
# Limit the number of concurrent image fetches
semaphore = asyncio.Semaphore(5)
def get_image_url(result: Dict) -> str | None:
return result.get("images", {}).get("original", {}).get("url", None)
async def get_image_content_with_limit(result: Dict) -> ImageContent | None:
async with semaphore:
image_url = get_image_url(result)
if image_url is None:
return None
try:
image_data = await fetch_url(image_url)
image_data_base64 = base64.b64encode(image_data).decode("utf-8")
return ImageContent(
type="image",
data=f"data:image/gif;base64,{image_data_base64}",
mimeType="image/gif",
)
except Exception as e:
logger.error(f"Failed to fetch image from {image_url}: {str(e)}")
return None
def get_text_content(result: Dict) -> TextContent | None:
parts: dict[str, str] = {}
if "title" in result and len(result["title"].strip()) > 0:
parts["title"] = result["title"]
if "alt_text" in result and len(result["alt_text"].strip()) > 0:
parts["alt_text"] = result["alt_text"]
image_url = get_image_url(result)
if image_url is not None:
parts["url"] = image_url
if len(parts) == 0:
return None
description = ", ".join(parts.values())
return TextContent(
type="text",
text=f"Image: {description}",
)
async def generate_sampling_messages(search_results: List[Dict]) -> List[SamplingMessage]:
# Fetch all images concurrently
image_contents = await asyncio.gather(*[get_image_content_with_limit(result) for result in search_results])
# Create flattened list of text+image messages
messages = []
for result, image_content in zip(search_results, image_contents):
text_content = get_text_content(result)
if text_content is not None:
messages.append(SamplingMessage(role="user", content=text_content))
if image_content is not None:
messages.append(SamplingMessage(role="user", content=image_content))
return messages
async def perform_sampling(
context: str, search_results: List[Dict[str, Any]], ctx: Context
) -> Union[ImageContent, TextContent]:
"""
Performs sampling to select the most appropriate image based on context and search results.
Args:
context: The user's context/query
search_results: List of search result dictionaries containing image information
ctx: Context object for the request
Returns:
Either an ImageContent or TextContent object representing the chosen content
"""
# Send progress update
await send_tool_call_progress(ctx, "gathering image data...")
# Insert attachment and history messages to provide additional context
attachment_messages_variable = json.dumps({"variable": "attachment_messages"})
history_messages_variable = json.dumps({"variable": "history_messages"})
messages = [
SamplingMessage(role="user", content=TextContent(type="text", text=attachment_messages_variable)),
SamplingMessage(role="user", content=TextContent(type="text", text=history_messages_variable)),
]
# Generate sampling messages
messages += await generate_sampling_messages(search_results)
await send_tool_call_progress(ctx, "choosing image...")
# FIXME add support for structured output to enforce image selection
# Send sampling request to FastMCP server
sampling_result = await send_sampling_request(
fastmcp_server_context=ctx,
system_prompt=dedent(f"""
Analyze these images and choose the best choice based on provided context.
Context: {context}
Return the url for the chosen image.
""").strip(),
messages=messages,
max_tokens=100,
)
return sampling_result.content
```
--------------------------------------------------------------------------------
/assistants/knowledge-transfer-assistant/assistant/string_utils.py:
--------------------------------------------------------------------------------
```python
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, List
from liquid import Template
from openai.types.chat import ChatCompletionMessageParam
def render(template: str, vars: dict[str, Any]) -> str:
"""
Format a string with the given variables using the Liquid template engine.
"""
parsed = template
if not vars:
return template
liquid_template = Template(template)
parsed = liquid_template.render(**vars)
return parsed
def create_system_message(content: str, delimiter: str | None = None) -> ChatCompletionMessageParam:
if delimiter:
content = f"<{delimiter}>\n{content}\n</{delimiter}>"
message: ChatCompletionMessageParam = {
"role": "system",
"content": content,
}
return message
class Instructions:
"""
A class to represent a section of a prompt.
"""
def __init__(
self,
content: str,
title: str | None = None,
) -> None:
self.title = title
self.content = content
self.level = 0
self.subsections: list[Instructions] = []
def add_subsection(self, subsection: "Instructions") -> None:
"""
Add a subsection to the prompt section.
"""
subsection.level = self.level + 1
self.subsections.append(subsection)
def __str__(self) -> str:
s = ""
if self.title:
hashes = "#" * (self.level + 1)
s += f"{hashes} {self.title}\n\n"
s += self.content
if self.subsections:
s += "\n\n" + "\n\n".join(str(subsection) for subsection in self.subsections)
return s
class Context:
def __init__(self, name: str, data: str, description: str | None = None) -> None:
self.name = name
self.description = description
self.data = data
def message(self) -> ChatCompletionMessageParam:
return create_system_message(self.content(), self.name)
def content(self) -> str:
s = self.data
if self.description:
s = f"{self.description}\n\n'''\n{self.data}\n'''"
return s
class ContextStrategy(Enum):
SINGLE = "single" # Put all contexts in a single message.
MULTI = "multi" # Put each context in its own message.
@dataclass
class Prompt:
role: str
instructions: Instructions
output_format: str | None = None
reasoning_steps: str | None = None
examples: str | None = None
contexts: List[Context] = field(default_factory=list)
context_strategy: ContextStrategy = ContextStrategy.SINGLE
final_instructions: str | None = None
def messages(self) -> list[ChatCompletionMessageParam]:
parts = [
"# Role and Objective",
self.role,
"# Instructions",
str(self.instructions),
]
if self.reasoning_steps:
parts.append("# Reasoning Steps")
parts.append(self.reasoning_steps)
if self.output_format:
parts.append("# Output Format")
parts.append(self.output_format)
if self.examples:
parts.append("# Examples")
parts.append(self.examples)
if self.contexts and self.context_strategy == ContextStrategy.SINGLE:
parts.append("# Context")
for context in self.contexts:
parts.append(f"## {context.name}")
parts.append(context.content())
s = "\n\n".join(parts)
if self.final_instructions:
s += "\n\n" + self.final_instructions
messages = [
create_system_message(s),
]
if self.contexts and self.context_strategy == ContextStrategy.MULTI:
for context in self.contexts:
messages.append(context.message())
return messages
class TokenBudget:
def __init__(self, budget: int) -> None:
self.budget = budget
self.used = 0
def add(self, tokens: int) -> None:
self.used += tokens
def remaining(self) -> int:
return self.budget - self.used
def is_under_budget(self) -> bool:
return self.remaining() > 0
def is_over_budget(self) -> bool:
return self.remaining() < 0
def fits(self, tokens: int) -> bool:
return self.remaining() >= tokens
```
--------------------------------------------------------------------------------
/libraries/python/chat-context-toolkit/chat_context_toolkit/history/tool_abbreviations/_tool_abbreviations.py:
--------------------------------------------------------------------------------
```python
import json
import logging
from dataclasses import dataclass, field
from typing import Any, Iterable
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionMessageToolCallParam,
ChatCompletionToolMessageParam,
)
from .. import OpenAIHistoryMessageParam
logger = logging.getLogger("chat_context_toolkit.history.tool_abbreviations")
@dataclass
class Abbreviations:
tool_call_argument_replacements: dict[str, Any] = field(default_factory=dict)
"""
The argument names, with abbreviated values, to replace in abbreviated the assistant tool calls.
"""
tool_message_replacement: str | None = None
"""
The abbreviated content to replace in the tool messages. If None, the tool message will not be abbreviated.
"""
ToolAbbreviations = dict[str, Abbreviations]
"""A mapping of tool names to their abbreviations for assistant tool calls and tool messages."""
def abbreviate_openai_tool_message(
openai_message: OpenAIHistoryMessageParam,
tool_abbreviations: ToolAbbreviations,
tool_name_for_tool_message: str | None = None,
) -> OpenAIHistoryMessageParam:
"""
Abbreviate the OpenAI message if it is a tool message or an assistant message with tool calls.
All other messages are left unchanged.
"""
match openai_message:
case {"role": "tool"}:
if not tool_name_for_tool_message:
logger.warning("tool_name_for_tool_call is not set for tool message: %s", openai_message)
return openai_message
return abbreviate_tool_message(
tool_name=tool_name_for_tool_message,
openai_message=openai_message,
tool_abbreviations=tool_abbreviations,
)
case {"role": "assistant", "tool_calls": tool_calls}:
return abbreviate_tool_call_message(openai_message, tool_calls, tool_abbreviations)
case _:
return openai_message
def abbreviate_tool_message(
tool_name: str,
openai_message: ChatCompletionToolMessageParam,
tool_abbreviations: ToolAbbreviations,
) -> OpenAIHistoryMessageParam:
if tool_name not in tool_abbreviations:
# no abbreviations for this tool, return the original message
return openai_message
content = tool_abbreviations[tool_name].tool_message_replacement
if content is None:
# no abbreviation for the tool message, return the original message
return openai_message
abbreviation_is_shorter = len(content) < len(str(openai_message.get("content", "")))
if not abbreviation_is_shorter:
# only abbreviate if the replacement content is shorter than the original content
return openai_message
# return a new message with the abbreviated content
abbreviated_message = openai_message.copy()
abbreviated_message["content"] = content
return abbreviated_message
def abbreviate_tool_call_message(
openai_message: ChatCompletionAssistantMessageParam,
tool_calls: Iterable[ChatCompletionMessageToolCallParam],
tool_abbreviations: ToolAbbreviations,
) -> OpenAIHistoryMessageParam:
abbreviated_tool_calls: list[ChatCompletionMessageToolCallParam] = []
for tool_call in tool_calls:
function = tool_call.get("function", {})
tool_name = function.get("name")
try:
arguments = json.loads(function.get("arguments", "{}"))
except (json.JSONDecodeError, ValueError):
logger.exception("failed to parse arguments for tool call: %s, skipping abbreviation", tool_call)
abbreviated_tool_calls.append(tool_call)
continue
if tool_name not in tool_abbreviations:
# no abbreviations for this tool, return the original message
abbreviated_tool_calls.append(tool_call)
continue
arguments.update(tool_abbreviations[tool_name].tool_call_argument_replacements)
# append a tool call with the abbreviated arguments
abbreviated_tool_call = tool_call.copy()
abbreviated_tool_call["function"]["arguments"] = json.dumps(arguments)
abbreviated_tool_calls.append(abbreviated_tool_call)
abbreviated_message = openai_message.copy()
abbreviated_message["tool_calls"] = abbreviated_tool_calls
return abbreviated_message
```
--------------------------------------------------------------------------------
/assistants/prospector-assistant/assistant/form_fill_extension/steps/acquire_form_step.py:
--------------------------------------------------------------------------------
```python
import logging
from dataclasses import dataclass
from pathlib import Path
from textwrap import dedent
from guided_conversation.utils.resources import ResourceConstraintMode, ResourceConstraintUnit
from pydantic import BaseModel, Field
from semantic_workbench_assistant.assistant_app.context import ConversationContext
from semantic_workbench_assistant.assistant_app.protocol import AssistantAppProtocol
from ..inspector import FileStateInspector
from . import _guided_conversation
from .types import (
Context,
GuidedConversationDefinition,
IncompleteErrorResult,
IncompleteResult,
ResourceConstraintDefinition,
Result,
UserInput,
)
logger = logging.getLogger(__name__)
def extend(app: AssistantAppProtocol) -> None:
app.add_inspector_state_provider(_inspector.state_id, _inspector)
class FormArtifact(BaseModel):
filename: str = Field(description="The filename of the form.", default="")
definition = GuidedConversationDefinition(
rules=[
"DO NOT suggest forms or create a form for the user.",
"Politely request another file if the provided file is not a form.",
"Terminate conversation if inappropriate content is requested.",
],
conversation_flow=dedent("""
1. Inform the user that our goal is to help the user fill out a form.
2. Ask the user to provide a file that contains a form. The file can be PDF, TXT, DOCX, or PNG.
3. When you receive a file, set the filename field in the artifact.
4. Inform the user that you will now extract the form fields, so that you can assist them in filling it out.
""").strip(),
context="",
resource_constraint=ResourceConstraintDefinition(
quantity=5,
unit=ResourceConstraintUnit.MINUTES,
mode=ResourceConstraintMode.MAXIMUM,
),
)
class AcquireFormConfig(BaseModel):
definition: GuidedConversationDefinition = definition
@dataclass
class CompleteResult(Result):
message: str
filename: str
async def execute(
step_context: Context[AcquireFormConfig],
) -> IncompleteResult | IncompleteErrorResult | CompleteResult:
"""
Step: acquire a form from the user
Approach: Guided conversation
"""
message_with_attachments = await input_to_message(step_context.latest_user_input)
async with _guided_conversation.engine(
definition=step_context.config.definition,
artifact_type=FormArtifact,
state_file_path=_get_state_file_path(step_context.context),
openai_client=step_context.llm_config.openai_client_factory(),
openai_model=step_context.llm_config.openai_model,
context=step_context.context,
state_id=_inspector.state_id,
) as gce:
try:
result = await gce.step_conversation(message_with_attachments)
except Exception as e:
logger.exception("failed to execute guided conversation")
return IncompleteErrorResult(
message=f"Failed to execute guided conversation: {e}",
debug={"error": str(e)},
)
debug = {"guided-conversation": gce.to_json()}
logger.info("guided-conversation result: %s", result)
acquire_form_gc_artifact = gce.artifact.artifact.model_dump(mode="json")
logger.info("guided-conversation artifact: %s", gce.artifact)
form_filename = acquire_form_gc_artifact.get("filename", "")
if form_filename and form_filename != "Unanswered":
return CompleteResult(
message=result.ai_message or "",
filename=form_filename,
debug=debug,
)
return IncompleteResult(message=result.ai_message or "", debug=debug)
def _get_state_file_path(context: ConversationContext) -> Path:
return _guided_conversation.path_for_state(context, "acquire_form")
_inspector = FileStateInspector(
display_name="Debug: Acquire-Form Guided-Conversation",
file_path_source=_get_state_file_path,
)
async def input_to_message(input: UserInput) -> str | None:
attachments = []
async for attachment in input.attachments:
attachments.append(f"<ATTACHMENT>{attachment.filename}</ATTACHMENT>")
if not attachments:
return input.message
return "\n\n".join(
(
input.message or "",
*attachments,
),
)
```
--------------------------------------------------------------------------------
/libraries/python/skills/skill-library/skill_library/skills/fabric/patterns/summarize_lecture/system.md:
--------------------------------------------------------------------------------
```markdown
# IDENTITY and PURPOSE
As an organized, high-skill expert lecturer, your role is to extract the most relevant topics from a lecture transcript and provide a structured summary using bullet points and lists of definitions for each subject. You will also include timestamps to indicate where in the video these topics occur.
Take a step back and think step-by-step about how you would do this. You would probably start by "watching" the video (via the transcript) and taking notes on each definition were in the lecture, because you're an organized you'll also make headlines and list of all relevant topics was in the lecture and break through complex parts. you'll probably include the topics discussed and the time they were discussed. Then you would take those notes and create a list of topics and timestamps.
# STEPS
Fully consume the transcript as if you're watching or listening to the content.
Think deeply about the topics learned and what were the most relevant subjects and tools in the content.
Pay close attention to the structure, especially when it includes bullet points, lists, definitions, and headers. Ensure you divide the content in the most effective way.
Node each topic as a headline. In case it has sub-topics or tools, use sub-headlines as markdowns.
For each topic or subject provide the most accurate definition without making guesses.
Extract a summary of the lecture in 25 words, including the most important keynotes into a section called SUMMARY.
Extract all the tools you noticed there was mention and gather them with one line description into a section called TOOLS.
Extract the most takeaway and recommendation into a section called ONE-SENTENCE TAKEAWAY. This should be a 15-word sentence that captures the most important essence of the content.
Match the timestamps to the topics. Note that input timestamps have the following format: HOURS:MINUTES:SECONDS.MILLISECONDS, which is not the same as the OUTPUT format!
## INPUT SAMPLE
[02:17:43.120 --> 02:17:49.200] same way. I'll just say the same. And I look forward to hearing the response to my job application [02:17:49.200 --> 02:17:55.040] that I've submitted. Oh, you're accepted. Oh, yeah. We all speak of you all the time. Thank you so [02:17:55.040 --> 02:18:00.720] much. Thank you, guys. Thank you. Thanks for listening to this conversation with Neri Oxman. [02:18:00.720 --> 02:18:05.520] To support this podcast, please check out our sponsors in the description. And now,
## END INPUT SAMPLE
The OUTPUT TIMESTAMP format is: 00:00:00 (HOURS:MINUTES:SECONDS) (HH:MM:SS)
Note the maximum length of the video based on the last timestamp.
Ensure all output timestamps are sequential and fall within the length of the content.
# OUTPUT INSTRUCTIONS
You only output Markdown.
In the markdown, use formatting like bold, highlight, headlines as # ## ### , blockquote as > , code block in necessary as ``` {block_code} ```, lists as * , etc. Make the output maximally readable in plain text.
Create the output using the formatting above.
Do not start items with the same opening words.
Use middle ground/semi-formal speech for your output context.
To ensure the summary is easily searchable in the future, keep the structure clear and straightforward.
Ensure you follow ALL these instructions when creating your output.
## EXAMPLE OUTPUT (Hours:Minutes:Seconds)
00:00:00 Members-only Forum Access 00:00:10 Live Hacking Demo 00:00:26 Ideas vs. Book 00:00:30 Meeting Will Smith 00:00:44 How to Influence Others 00:01:34 Learning by Reading 00:58:30 Writing With Punch 00:59:22 100 Posts or GTFO 01:00:32 How to Gain Followers 01:01:31 The Music That Shapes 01:27:21 Subdomain Enumeration Demo 01:28:40 Hiding in Plain Sight 01:29:06 The Universe Machine 00:09:36 Early School Experiences 00:10:12 The First Business Failure 00:10:32 David Foster Wallace 00:12:07 Copying Other Writers 00:12:32 Practical Advice for N00bs
## END EXAMPLE OUTPUT
Ensure all output timestamps are sequential and fall within the length of the content, e.g., if the total length of the video is 24 minutes. (00:00:00 - 00:24:00), then no output can be 01:01:25, or anything over 00:25:00 or over!
ENSURE the output timestamps and topics are shown gradually and evenly incrementing from 00:00:00 to the final timestamp of the content.
# INPUT:
INPUT:
```
--------------------------------------------------------------------------------
/assistants/knowledge-transfer-assistant/assistant/tools/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Tools directory for Knowledge Transfer Assistant.
This module provides the ShareTools class that aggregates all tool functionality
and registers role-specific tools with the LLM.
"""
from openai_client.tools import ToolFunctions
from semantic_workbench_assistant.assistant_app import ConversationContext
from ..storage_models import ConversationRole
from .information_requests import InformationRequestTools
from .learning_objectives import LearningObjectiveTools
from .learning_outcomes import LearningOutcomeTools
from .progress_tracking import ProgressTrackingTools
from .share_setup import ShareSetupTools
class ShareTools:
"""Tools for the Knowledge Transfer Assistant to use during chat completions."""
def __init__(self, context: ConversationContext, role: ConversationRole):
"""
Initialize the knowledge transfer tools with the current conversation context.
Args:
context: The conversation context
role: The assistant's role (ConversationRole enum)
"""
self.context = context
self.role = role
self.tool_functions = ToolFunctions()
self.share_setup = ShareSetupTools(context, role)
self.learning_objectives = LearningObjectiveTools(context, role)
self.learning_outcomes = LearningOutcomeTools(context, role)
self.information_requests = InformationRequestTools(context, role)
self.progress_tracking = ProgressTrackingTools(context, role)
if role == "coordinator":
self._register_coordinator_tools()
else:
self._register_team_tools()
def _register_coordinator_tools(self):
"""Register coordinator-specific tools."""
# 1. Setup phase - Define audience and organize knowledge
self.tool_functions.add_function(
self.share_setup.update_audience,
"update_audience",
)
self.tool_functions.add_function(
self.share_setup.set_knowledge_organized,
"set_knowledge_organized",
)
# 2. Brief creation phase
self.tool_functions.add_function(
self.share_setup.update_brief,
"update_brief",
)
# 3. Learning objectives phase
self.tool_functions.add_function(
self.share_setup.set_learning_intention,
"set_learning_intention",
)
self.tool_functions.add_function(
self.learning_objectives.add_learning_objective,
"add_learning_objective",
)
self.tool_functions.add_function(
self.learning_objectives.update_learning_objective,
"update_learning_objective",
)
self.tool_functions.add_function(
self.learning_objectives.delete_learning_objective,
"delete_learning_objective",
)
# Individual outcome management tools
self.tool_functions.add_function(
self.learning_outcomes.add_learning_outcome,
"add_learning_outcome",
)
self.tool_functions.add_function(
self.learning_outcomes.update_learning_outcome,
"update_learning_outcome",
)
self.tool_functions.add_function(
self.learning_outcomes.delete_learning_outcome,
"delete_learning_outcome",
"Delete a learning outcome by outcome ID",
)
# 4. Ongoing support phase
self.tool_functions.add_function(
self.information_requests.resolve_information_request,
"resolve_information_request",
)
def _register_team_tools(self):
"""Register team-specific tools."""
self.tool_functions.add_function(
self.information_requests.create_information_request,
"create_information_request",
)
self.tool_functions.add_function(
self.information_requests.delete_information_request,
"delete_information_request",
)
self.tool_functions.add_function(
self.progress_tracking.mark_learning_outcome_achieved,
"mark_learning_outcome_achieved",
)
self.tool_functions.add_function(
self.progress_tracking.report_transfer_completion,
"report_transfer_completion",
)
__all__ = ["ShareTools"]
```
--------------------------------------------------------------------------------
/mcp-servers/mcp-server-vscode/src/tools/debug_tools.ts:
--------------------------------------------------------------------------------
```typescript
import * as vscode from 'vscode';
import { z } from 'zod';
/** Maintain a list of active debug sessions. */
const activeSessions: vscode.DebugSession[] = [];
// Track new debug sessions as they start.
vscode.debug.onDidStartDebugSession((session) => {
activeSessions.push(session);
});
// Remove debug sessions as they terminate.
vscode.debug.onDidTerminateDebugSession((session) => {
const index = activeSessions.indexOf(session);
if (index >= 0) {
activeSessions.splice(index, 1);
}
});
/**
* List all active debug sessions in the workspace.
*
* Exposes debug session information, including each session's ID, name, and associated launch configuration.
*/
export const listDebugSessions = () => {
// Retrieve all active debug sessions using the activeSessions array.
const sessions = activeSessions.map((session: vscode.DebugSession) => ({
id: session.id,
name: session.name,
configuration: session.configuration,
}));
// Return session list
return {
content: [
{
type: 'json',
json: { sessions },
},
],
isError: false,
};
};
// Zod schema for validating tool parameters (none for this tool).
export const listDebugSessionsSchema = z.object({});
/**
* Start a new debug session using the provided configuration.
*
* @param params - Object containing workspaceFolder and configuration details.
*/
export const startDebugSession = async (params: {
workspaceFolder: string;
configuration: { type: string; request: string; name: string; [key: string]: any };
}) => {
const { workspaceFolder, configuration } = params;
// Ensure that workspace folders exist and are accessible.
const workspaceFolders = vscode.workspace.workspaceFolders;
if (!workspaceFolders || workspaceFolders.length === 0) {
throw new Error('No workspace folders are currently open.');
}
const folder = workspaceFolders.find((f) => f.uri?.fsPath === workspaceFolder);
if (!folder) {
throw new Error(`Workspace folder '${workspaceFolder}' not found.`);
}
const success = await vscode.debug.startDebugging(folder, configuration);
if (!success) {
throw new Error(`Failed to start debug session '${configuration.name}'.`);
}
return {
content: [{ type: 'text', text: `Debug session '${configuration.name}' started successfully.` }],
isError: false,
};
};
// Zod schema for validating start_debug_session parameters.
export const startDebugSessionSchema = z.object({
workspaceFolder: z.string().describe('The workspace folder where the debug session should start.'),
configuration: z
.object({
type: z.string().describe("Type of the debugger (e.g., 'node', 'python', etc.)."),
request: z.string().describe("Type of debug request (e.g., 'launch' or 'attach')."),
name: z.string().describe('Name of the debug session.'),
})
.passthrough()
.describe('The debug configuration object.'),
});
/**
* Stop debug sessions that match the provided session name.
*
* @param params - Object containing the sessionName to stop.
*/
export const stopDebugSession = async (params: { sessionName: string }) => {
const { sessionName } = params;
// Filter active sessions to find matching sessions.
const matchingSessions = activeSessions.filter((session: vscode.DebugSession) => session.name === sessionName);
if (matchingSessions.length === 0) {
return {
content: [
{
type: 'text',
text: `No debug session(s) found with name '${sessionName}'.`,
},
],
isError: true,
};
}
// Stop each matching debug session.
for (const session of matchingSessions) {
await vscode.debug.stopDebugging(session);
}
return {
content: [
{
type: 'text',
text: `Stopped debug session(s) with name '${sessionName}'.`,
},
],
isError: false,
};
};
// Zod schema for validating stop_debug_session parameters.
export const stopDebugSessionSchema = z.object({
sessionName: z.string().describe('The name of the debug session(s) to stop.'),
});
```
--------------------------------------------------------------------------------
/workbench-app/src/components/Conversations/Message/ToolResultMessage.tsx:
--------------------------------------------------------------------------------
```typescript
import {
Accordion,
AccordionHeader,
AccordionItem,
AccordionPanel,
makeStyles,
shorthands,
Text,
tokens,
} from '@fluentui/react-components';
import { Toolbox24Regular } from '@fluentui/react-icons';
import React from 'react';
import { Conversation } from '../../../models/Conversation';
import { ConversationMessage } from '../../../models/ConversationMessage';
import { useGetConversationMessageDebugDataQuery } from '../../../services/workbench';
import { CodeLabel } from '../../App/CodeLabel';
import { CodeContentRenderer } from '../ContentRenderers/CodeContentRenderer';
import { DebugInspector } from '../DebugInspector';
import { MessageDelete } from '../MessageDelete';
const useClasses = makeStyles({
root: {
backgroundColor: tokens.colorNeutralBackground3,
borderRadius: tokens.borderRadiusMedium,
...shorthands.border('none'),
...shorthands.margin(tokens.spacingVerticalM, 0, tokens.spacingVerticalM, tokens.spacingHorizontalXXXL),
},
header: {
display: 'flex',
flexDirection: 'row',
alignItems: 'center',
gap: tokens.spacingHorizontalS,
},
actions: {
display: 'flex',
flexDirection: 'row',
alignItems: 'center',
},
});
interface ToolResultMessageProps {
conversation: Conversation;
message: ConversationMessage;
readOnly: boolean;
}
/**
* Allows experimental support for displaying tool call results that are attached to a message
* via the metadata property. To use this, the message must have a metadata property
* with a 'tool_result' key, which is an object with a 'tool_call_id' key, and a 'tool_calls'
* key, which is an array of tool calls, each with an 'id', 'name', and 'arguments' property.
* The result of the tool call should be in the message content.
*
* [Read more about special metadata support in UX...](../../../docs/MESSAGE_METADATA.md)
*
* This component will display each tool call result in an accordion, with the tool name
* as the header and the result as the content.
*/
export const ToolResultMessage: React.FC<ToolResultMessageProps> = (props) => {
const { conversation, message, readOnly } = props;
const classes = useClasses();
const [skipDebugLoad, setSkipDebugLoad] = React.useState(true);
const {
data: debugData,
isLoading: isLoadingDebugData,
isUninitialized: isUninitializedDebugData,
} = useGetConversationMessageDebugDataQuery(
{ conversationId: conversation.id, messageId: message.id },
{ skip: skipDebugLoad },
);
const toolCallId = message.metadata?.['tool_result']?.['tool_call_id'] as string;
const toolCalls: { id: string; name: string }[] = message.metadata?.['tool_calls'];
const toolName = toolCalls?.find((toolCall) => toolCall.id === toolCallId)?.name;
const messageContent = React.useMemo(
() => <CodeContentRenderer content={message.content} language="bash" />,
[message],
);
return (
<div className={classes.root}>
<Accordion collapsible>
<AccordionItem value="1">
<AccordionHeader icon={<Toolbox24Regular />}>
<div className={classes.header}>
<Text>Received tool result </Text>
<CodeLabel>{toolName}</CodeLabel>
</div>
</AccordionHeader>
<AccordionPanel>{messageContent}</AccordionPanel>
</AccordionItem>
</Accordion>
<div className={classes.actions}>
<DebugInspector
debug={{
debug: message.hasDebugData ? debugData?.debugData || { loading: true } : null,
message: message,
}}
loading={isLoadingDebugData || isUninitializedDebugData}
onOpen={() => {
setSkipDebugLoad(false);
}}
/>
{!readOnly && (
<>
<MessageDelete conversationId={conversation.id} message={message} />
</>
)}
</div>
</div>
);
};
export const MemoizedToolResultMessage = React.memo(ToolResultMessage);
```
--------------------------------------------------------------------------------
/libraries/python/mcp-extensions/mcp_extensions/llm/mcp_chat_completion.py:
--------------------------------------------------------------------------------
```python
# Copyright (c) Microsoft. All rights reserved.
import logging
import time
from mcp.server.fastmcp import Context
from mcp.types import ModelPreferences, SamplingMessage, TextContent
from mcp_extensions import send_sampling_request
from mcp_extensions.llm.llm_types import ChatCompletionRequest, ChatCompletionResponse, Role
from mcp_extensions.llm.openai_chat_completion import process_response
logger = logging.getLogger(__name__)
async def mcp_chat_completion(request: ChatCompletionRequest, client: Context) -> ChatCompletionResponse:
"""
Sample a response from the MCP server.
"""
# For the system prompt, look for the first message with role system or developer
# The remove it from the messages list
system_prompt = None
for message in request.messages:
if message.role in [Role.SYSTEM, Role.DEVELOPER]:
system_prompt = message.content
request.messages.remove(message)
break
# For the remaining messages, add them to the messages list, converting and System or Developer messages to User messages
messages: list[SamplingMessage] = []
for message in request.messages:
# Skip tool messages for now
if message.role == Role.TOOL:
continue
# Convert message content to the format expected by SamplingMessage
if isinstance(message.content, str):
content = TextContent(
type="text",
text=message.content,
)
elif isinstance(message.content, list):
# Only use the first text content part for simplicity
text_parts = [part for part in message.content if part.type == "text"]
if text_parts:
content = TextContent(
type="text",
text=text_parts[0].text,
)
else:
continue
else:
continue
# Create the SamplingMessage with the correct role mapping
role = "user" if message.role in [Role.SYSTEM, Role.DEVELOPER, Role.USER] else "assistant"
messages.append(
SamplingMessage(
role=role,
content=content,
)
)
if request.json_mode:
response_format = {"type": "json_object"}
elif request.structured_outputs is not None:
response_format = {"type": "json_schema", "json_schema": request.structured_outputs}
else:
response_format = {"type": "text"}
# Any extra args passed to the function are added to the request as metadata
extra_args = request.model_dump(mode="json", exclude_none=True)
extra_args["response_format"] = response_format
model = request.model
if model in [
"gpt-4o",
"gpt-4o-2024-11-20",
"gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"gpt-4.1",
"gpt-4.1-2025-04-14",
]:
model_preferences = ModelPreferences(intelligencePriority=0, speedPriority=1)
elif model in [
"o3",
"o3-2025-04-16",
"o3-mini",
"o3-mini-2025-01-31",
"o4-mini",
"o4-mini-2025-04-16",
"o1-mini",
"o1-mini-2024-09-12",
]:
model_preferences = ModelPreferences(intelligencePriority=1)
else:
model_preferences = ModelPreferences(intelligencePriority=0, speedPriority=1)
# Remove the keys that are not needed for the request
extra_args.pop("messages", None)
extra_args.pop("max_completion_tokens", None)
extra_args.pop("model", None)
extra_args.pop("structured_outputs", None)
extra_args.pop("json_mode", None)
metadata = {"extra_args": extra_args}
start_time = time.time()
response = await send_sampling_request(
fastmcp_server_context=client,
messages=messages,
max_tokens=request.max_completion_tokens or 8000,
system_prompt=system_prompt, # type: ignore
model_preferences=model_preferences,
metadata=metadata,
)
end_time = time.time()
response_duration = round(end_time - start_time, 4)
logger.info(f"Model called: {response.meta.get('response', {}).get('model', 'unknown')}") # type: ignore
openai_response = response.meta.get("response", {}) # type: ignore
response = process_response(openai_response, response_duration, request)
return response
```