This is page 28 of 114. Use http://codebase.md/microsoft/semanticworkbench?lines=false&page={x} to view the full context.
# Directory Structure
```
├── .devcontainer
│ ├── .vscode
│ │ └── settings.json
│ ├── devcontainer.json
│ ├── OPTIMIZING_FOR_CODESPACES.md
│ ├── POST_SETUP_README.md
│ └── README.md
├── .dockerignore
├── .gitattributes
├── .github
│ ├── policheck.yml
│ └── workflows
│ ├── assistants-codespace-assistant.yml
│ ├── assistants-document-assistant.yml
│ ├── assistants-explorer-assistant.yml
│ ├── assistants-guided-conversation-assistant.yml
│ ├── assistants-knowledge-transfer-assistant.yml
│ ├── assistants-navigator-assistant.yml
│ ├── assistants-project-assistant.yml
│ ├── assistants-prospector-assistant.yml
│ ├── assistants-skill-assistant.yml
│ ├── libraries.yml
│ ├── mcp-server-giphy.yml
│ ├── mcp-server-memory-filesystem-edit.yml
│ ├── mcp-server-memory-user-bio.yml
│ ├── mcp-server-memory-whiteboard.yml
│ ├── mcp-server-open-deep-research-clone.yml
│ ├── mcp-server-web-research.yml
│ ├── workbench-app.yml
│ └── workbench-service.yml
├── .gitignore
├── .multi-root-tools
│ ├── Makefile
│ └── README.md
├── .vscode
│ ├── extensions.json
│ ├── launch.json
│ └── settings.json
├── ai_context
│ └── generated
│ ├── ASPIRE_ORCHESTRATOR.md
│ ├── ASSISTANT_CODESPACE.md
│ ├── ASSISTANT_DOCUMENT.md
│ ├── ASSISTANT_NAVIGATOR.md
│ ├── ASSISTANT_PROJECT.md
│ ├── ASSISTANT_PROSPECTOR.md
│ ├── ASSISTANTS_OTHER.md
│ ├── ASSISTANTS_OVERVIEW.md
│ ├── CONFIGURATION.md
│ ├── DOTNET_LIBRARIES.md
│ ├── EXAMPLES.md
│ ├── MCP_SERVERS.md
│ ├── PYTHON_LIBRARIES_AI_CLIENTS.md
│ ├── PYTHON_LIBRARIES_CORE.md
│ ├── PYTHON_LIBRARIES_EXTENSIONS.md
│ ├── PYTHON_LIBRARIES_SKILLS.md
│ ├── PYTHON_LIBRARIES_SPECIALIZED.md
│ ├── TOOLS.md
│ ├── WORKBENCH_FRONTEND.md
│ └── WORKBENCH_SERVICE.md
├── aspire-orchestrator
│ ├── .editorconfig
│ ├── Aspire.AppHost
│ │ ├── .gitignore
│ │ ├── appsettings.json
│ │ ├── Aspire.AppHost.csproj
│ │ ├── Program.cs
│ │ └── Properties
│ │ └── launchSettings.json
│ ├── Aspire.Extensions
│ │ ├── Aspire.Extensions.csproj
│ │ ├── Dashboard.cs
│ │ ├── DockerFileExtensions.cs
│ │ ├── PathNormalizer.cs
│ │ ├── UvAppHostingExtensions.cs
│ │ ├── UvAppResource.cs
│ │ ├── VirtualEnvironment.cs
│ │ └── WorkbenchServiceHostingExtensions.cs
│ ├── Aspire.ServiceDefaults
│ │ ├── Aspire.ServiceDefaults.csproj
│ │ └── Extensions.cs
│ ├── README.md
│ ├── run.sh
│ ├── SemanticWorkbench.Aspire.sln
│ └── SemanticWorkbench.Aspire.sln.DotSettings
├── assistants
│ ├── codespace-assistant
│ │ ├── .claude
│ │ │ └── settings.local.json
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── extensions.json
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── assets
│ │ │ │ ├── icon_context_transfer.svg
│ │ │ │ └── icon.svg
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── helpers.py
│ │ │ ├── response
│ │ │ │ ├── __init__.py
│ │ │ │ ├── completion_handler.py
│ │ │ │ ├── models.py
│ │ │ │ ├── request_builder.py
│ │ │ │ ├── response.py
│ │ │ │ ├── step_handler.py
│ │ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── abbreviations.py
│ │ │ │ ├── formatting_utils.py
│ │ │ │ ├── message_utils.py
│ │ │ │ └── openai_utils.py
│ │ │ ├── text_includes
│ │ │ │ ├── card_content_context_transfer.md
│ │ │ │ ├── card_content.md
│ │ │ │ ├── codespace_assistant_info.md
│ │ │ │ ├── context_transfer_assistant_info.md
│ │ │ │ ├── guardrails_prompt.txt
│ │ │ │ ├── guidance_prompt_context_transfer.txt
│ │ │ │ ├── guidance_prompt.txt
│ │ │ │ ├── instruction_prompt_context_transfer.txt
│ │ │ │ └── instruction_prompt.txt
│ │ │ └── whiteboard
│ │ │ ├── __init__.py
│ │ │ ├── _inspector.py
│ │ │ └── _whiteboard.py
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── document-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── assets
│ │ │ │ └── icon.svg
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── context_management
│ │ │ │ ├── __init__.py
│ │ │ │ └── inspector.py
│ │ │ ├── filesystem
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _convert.py
│ │ │ │ ├── _file_sources.py
│ │ │ │ ├── _filesystem.py
│ │ │ │ ├── _inspector.py
│ │ │ │ ├── _model.py
│ │ │ │ ├── _prompts.py
│ │ │ │ └── _tasks.py
│ │ │ ├── guidance
│ │ │ │ ├── __init__.py
│ │ │ │ ├── dynamic_ui_inspector.py
│ │ │ │ ├── guidance_config.py
│ │ │ │ ├── guidance_prompts.py
│ │ │ │ └── README.md
│ │ │ ├── response
│ │ │ │ ├── __init__.py
│ │ │ │ ├── completion_handler.py
│ │ │ │ ├── models.py
│ │ │ │ ├── prompts.py
│ │ │ │ ├── responder.py
│ │ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── formatting_utils.py
│ │ │ │ ├── message_utils.py
│ │ │ │ ├── openai_utils.py
│ │ │ │ ├── tokens_tiktoken.py
│ │ │ │ └── workbench_messages.py
│ │ │ ├── text_includes
│ │ │ │ └── document_assistant_info.md
│ │ │ ├── types.py
│ │ │ └── whiteboard
│ │ │ ├── __init__.py
│ │ │ ├── _inspector.py
│ │ │ └── _whiteboard.py
│ │ ├── assistant.code-workspace
│ │ ├── CLAUDE.md
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── __init__.py
│ │ │ ├── test_convert.py
│ │ │ └── test_data
│ │ │ ├── blank_image.png
│ │ │ ├── Formatting Test.docx
│ │ │ ├── sample_data.csv
│ │ │ ├── sample_data.xlsx
│ │ │ ├── sample_page.html
│ │ │ ├── sample_presentation.pptx
│ │ │ └── simple_pdf.pdf
│ │ └── uv.lock
│ ├── explorer-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── helpers.py
│ │ │ ├── response
│ │ │ │ ├── __init__.py
│ │ │ │ ├── model.py
│ │ │ │ ├── response_anthropic.py
│ │ │ │ ├── response_openai.py
│ │ │ │ └── response.py
│ │ │ └── text_includes
│ │ │ └── guardrails_prompt.txt
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── guided-conversation-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── agents
│ │ │ │ ├── guided_conversation
│ │ │ │ │ ├── config.py
│ │ │ │ │ ├── definition.py
│ │ │ │ │ └── definitions
│ │ │ │ │ ├── er_triage.py
│ │ │ │ │ ├── interview.py
│ │ │ │ │ ├── patient_intake.py
│ │ │ │ │ └── poem_feedback.py
│ │ │ │ └── guided_conversation_agent.py
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ └── text_includes
│ │ │ └── guardrails_prompt.txt
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── knowledge-transfer-assistant
│ │ ├── .claude
│ │ │ └── settings.local.json
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── agentic
│ │ │ │ ├── __init__.py
│ │ │ │ ├── analysis.py
│ │ │ │ ├── coordinator_support.py
│ │ │ │ └── team_welcome.py
│ │ │ ├── assets
│ │ │ │ ├── icon-knowledge-transfer.svg
│ │ │ │ └── icon.svg
│ │ │ ├── assistant.py
│ │ │ ├── common.py
│ │ │ ├── config.py
│ │ │ ├── conversation_clients.py
│ │ │ ├── conversation_share_link.py
│ │ │ ├── data.py
│ │ │ ├── domain
│ │ │ │ ├── __init__.py
│ │ │ │ ├── audience_manager.py
│ │ │ │ ├── information_request_manager.py
│ │ │ │ ├── knowledge_brief_manager.py
│ │ │ │ ├── knowledge_digest_manager.py
│ │ │ │ ├── learning_objectives_manager.py
│ │ │ │ └── share_manager.py
│ │ │ ├── files.py
│ │ │ ├── logging.py
│ │ │ ├── notifications.py
│ │ │ ├── respond.py
│ │ │ ├── storage_models.py
│ │ │ ├── storage.py
│ │ │ ├── string_utils.py
│ │ │ ├── text_includes
│ │ │ │ ├── assistant_info.md
│ │ │ │ ├── card_content.md
│ │ │ │ ├── coordinator_instructions.txt
│ │ │ │ ├── coordinator_role.txt
│ │ │ │ ├── knowledge_digest_instructions.txt
│ │ │ │ ├── knowledge_digest_prompt.txt
│ │ │ │ ├── share_information_request_detection.txt
│ │ │ │ ├── team_instructions.txt
│ │ │ │ ├── team_role.txt
│ │ │ │ └── welcome_message_generation.txt
│ │ │ ├── tools
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base.py
│ │ │ │ ├── information_requests.py
│ │ │ │ ├── learning_objectives.py
│ │ │ │ ├── learning_outcomes.py
│ │ │ │ ├── progress_tracking.py
│ │ │ │ └── share_setup.py
│ │ │ ├── ui_tabs
│ │ │ │ ├── __init__.py
│ │ │ │ ├── brief.py
│ │ │ │ ├── common.py
│ │ │ │ ├── debug.py
│ │ │ │ ├── learning.py
│ │ │ │ └── sharing.py
│ │ │ └── utils.py
│ │ ├── CLAUDE.md
│ │ ├── docs
│ │ │ ├── design
│ │ │ │ ├── actions.md
│ │ │ │ └── inference.md
│ │ │ ├── DEV_GUIDE.md
│ │ │ ├── how-kta-works.md
│ │ │ ├── JTBD.md
│ │ │ ├── knowledge-transfer-goals.md
│ │ │ ├── learning_assistance.md
│ │ │ ├── notable_claude_conversations
│ │ │ │ ├── clarifying_quad_modal_design.md
│ │ │ │ ├── CLAUDE_PROMPTS.md
│ │ │ │ ├── transfer_state.md
│ │ │ │ └── trying_the_context_agent.md
│ │ │ └── opportunities-of-knowledge-transfer.md
│ │ ├── knowledge-transfer-assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── __init__.py
│ │ │ ├── test_artifact_loading.py
│ │ │ ├── test_inspector.py
│ │ │ ├── test_share_manager.py
│ │ │ ├── test_share_storage.py
│ │ │ ├── test_share_tools.py
│ │ │ └── test_team_mode.py
│ │ └── uv.lock
│ ├── Makefile
│ ├── navigator-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── assets
│ │ │ │ ├── card_content.md
│ │ │ │ └── icon.svg
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── helpers.py
│ │ │ ├── response
│ │ │ │ ├── __init__.py
│ │ │ │ ├── completion_handler.py
│ │ │ │ ├── completion_requestor.py
│ │ │ │ ├── local_tool
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── add_assistant_to_conversation.py
│ │ │ │ │ ├── list_assistant_services.py
│ │ │ │ │ └── model.py
│ │ │ │ ├── models.py
│ │ │ │ ├── prompt.py
│ │ │ │ ├── request_builder.py
│ │ │ │ ├── response.py
│ │ │ │ ├── step_handler.py
│ │ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── formatting_utils.py
│ │ │ │ ├── message_utils.py
│ │ │ │ ├── openai_utils.py
│ │ │ │ └── tools.py
│ │ │ ├── text_includes
│ │ │ │ ├── guardrails_prompt.md
│ │ │ │ ├── guidance_prompt.md
│ │ │ │ ├── instruction_prompt.md
│ │ │ │ ├── navigator_assistant_info.md
│ │ │ │ └── semantic_workbench_features.md
│ │ │ └── whiteboard
│ │ │ ├── __init__.py
│ │ │ ├── _inspector.py
│ │ │ └── _whiteboard.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── project-assistant
│ │ ├── .cspell
│ │ │ └── custom-dictionary-workspace.txt
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── agentic
│ │ │ │ ├── __init__.py
│ │ │ │ ├── act.py
│ │ │ │ ├── coordinator_next_action.py
│ │ │ │ ├── create_invitation.py
│ │ │ │ ├── detect_audience_and_takeaways.py
│ │ │ │ ├── detect_coordinator_actions.py
│ │ │ │ ├── detect_information_request_needs.py
│ │ │ │ ├── detect_knowledge_package_gaps.py
│ │ │ │ ├── focus.py
│ │ │ │ ├── respond.py
│ │ │ │ ├── team_welcome.py
│ │ │ │ └── update_digest.py
│ │ │ ├── assets
│ │ │ │ ├── icon-knowledge-transfer.svg
│ │ │ │ └── icon.svg
│ │ │ ├── assistant.py
│ │ │ ├── common.py
│ │ │ ├── config.py
│ │ │ ├── conversation_clients.py
│ │ │ ├── data.py
│ │ │ ├── domain
│ │ │ │ ├── __init__.py
│ │ │ │ ├── audience_manager.py
│ │ │ │ ├── conversation_preferences_manager.py
│ │ │ │ ├── information_request_manager.py
│ │ │ │ ├── knowledge_brief_manager.py
│ │ │ │ ├── knowledge_digest_manager.py
│ │ │ │ ├── learning_objectives_manager.py
│ │ │ │ ├── share_manager.py
│ │ │ │ ├── tasks_manager.py
│ │ │ │ └── transfer_manager.py
│ │ │ ├── errors.py
│ │ │ ├── files.py
│ │ │ ├── logging.py
│ │ │ ├── notifications.py
│ │ │ ├── prompt_utils.py
│ │ │ ├── storage.py
│ │ │ ├── string_utils.py
│ │ │ ├── text_includes
│ │ │ │ ├── actor_instructions.md
│ │ │ │ ├── assistant_info.md
│ │ │ │ ├── card_content.md
│ │ │ │ ├── coordinator_instructions copy.md
│ │ │ │ ├── coordinator_instructions.md
│ │ │ │ ├── create_invitation.md
│ │ │ │ ├── detect_audience.md
│ │ │ │ ├── detect_coordinator_actions.md
│ │ │ │ ├── detect_information_request_needs.md
│ │ │ │ ├── detect_knowledge_package_gaps.md
│ │ │ │ ├── focus.md
│ │ │ │ ├── knowledge_digest_instructions.txt
│ │ │ │ ├── team_instructions.txt
│ │ │ │ ├── to_do.md
│ │ │ │ ├── update_knowledge_brief.md
│ │ │ │ ├── update_knowledge_digest.md
│ │ │ │ └── welcome_message_generation.txt
│ │ │ ├── tools
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base.py
│ │ │ │ ├── conversation_preferences.py
│ │ │ │ ├── information_requests.py
│ │ │ │ ├── learning_objectives.py
│ │ │ │ ├── learning_outcomes.py
│ │ │ │ ├── progress_tracking.py
│ │ │ │ ├── share_setup.py
│ │ │ │ ├── system_reminders.py
│ │ │ │ ├── tasks.py
│ │ │ │ └── todo.py
│ │ │ ├── ui_tabs
│ │ │ │ ├── __init__.py
│ │ │ │ ├── brief.py
│ │ │ │ ├── common.py
│ │ │ │ ├── debug.py
│ │ │ │ ├── learning.py
│ │ │ │ └── sharing.py
│ │ │ └── utils.py
│ │ ├── CLAUDE.md
│ │ ├── docs
│ │ │ ├── design
│ │ │ │ ├── actions.md
│ │ │ │ ├── control_options.md
│ │ │ │ ├── design.md
│ │ │ │ ├── inference.md
│ │ │ │ └── PXL_20250814_190140267.jpg
│ │ │ ├── DEV_GUIDE.md
│ │ │ ├── how-kta-works.md
│ │ │ ├── JTBD.md
│ │ │ ├── knowledge-transfer-goals.md
│ │ │ ├── learning_assistance.md
│ │ │ ├── notable_claude_conversations
│ │ │ │ ├── clarifying_quad_modal_design.md
│ │ │ │ ├── CLAUDE_PROMPTS.md
│ │ │ │ ├── transfer_state.md
│ │ │ │ └── trying_the_context_agent.md
│ │ │ └── opportunities-of-knowledge-transfer.md
│ │ ├── knowledge-transfer-assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── __init__.py
│ │ │ ├── test_artifact_loading.py
│ │ │ ├── test_inspector.py
│ │ │ ├── test_share_manager.py
│ │ │ ├── test_share_storage.py
│ │ │ └── test_team_mode.py
│ │ └── uv.lock
│ ├── prospector-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── agents
│ │ │ │ ├── artifact_agent.py
│ │ │ │ ├── document
│ │ │ │ │ ├── config.py
│ │ │ │ │ ├── gc_draft_content_feedback_config.py
│ │ │ │ │ ├── gc_draft_outline_feedback_config.py
│ │ │ │ │ ├── guided_conversation.py
│ │ │ │ │ └── state.py
│ │ │ │ └── document_agent.py
│ │ │ ├── artifact_creation_extension
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _llm.py
│ │ │ │ ├── config.py
│ │ │ │ ├── document.py
│ │ │ │ ├── extension.py
│ │ │ │ ├── store.py
│ │ │ │ ├── test
│ │ │ │ │ ├── conftest.py
│ │ │ │ │ ├── evaluation.py
│ │ │ │ │ ├── test_completion_with_tools.py
│ │ │ │ │ └── test_extension.py
│ │ │ │ └── tools.py
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── form_fill_extension
│ │ │ │ ├── __init__.py
│ │ │ │ ├── config.py
│ │ │ │ ├── extension.py
│ │ │ │ ├── inspector.py
│ │ │ │ ├── state.py
│ │ │ │ └── steps
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _guided_conversation.py
│ │ │ │ ├── _llm.py
│ │ │ │ ├── acquire_form_step.py
│ │ │ │ ├── extract_form_fields_step.py
│ │ │ │ ├── fill_form_step.py
│ │ │ │ └── types.py
│ │ │ ├── helpers.py
│ │ │ ├── legacy.py
│ │ │ └── text_includes
│ │ │ ├── artifact_agent_enabled.md
│ │ │ ├── guardrails_prompt.txt
│ │ │ ├── guided_conversation_agent_enabled.md
│ │ │ └── skills_agent_enabled.md
│ │ ├── assistant.code-workspace
│ │ ├── gc_learnings
│ │ │ ├── gc_learnings.md
│ │ │ └── images
│ │ │ ├── gc_conversation_plan_fcn.png
│ │ │ ├── gc_conversation_plan_template.png
│ │ │ ├── gc_execute_plan_callstack.png
│ │ │ ├── gc_functions.png
│ │ │ ├── gc_generate_plan_callstack.png
│ │ │ ├── gc_get_resource_instructions.png
│ │ │ ├── gc_get_termination_instructions.png
│ │ │ ├── gc_kernel_arguments.png
│ │ │ ├── gc_plan_calls.png
│ │ │ ├── gc_termination_instructions.png
│ │ │ ├── sk_get_chat_message_contents.png
│ │ │ ├── sk_inner_get_chat_message_contents.png
│ │ │ ├── sk_send_request_prep.png
│ │ │ └── sk_send_request.png
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ └── skill-assistant
│ ├── .env.example
│ ├── .vscode
│ │ ├── launch.json
│ │ └── settings.json
│ ├── assistant
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── logging.py
│ │ ├── skill_assistant.py
│ │ ├── skill_engine_registry.py
│ │ ├── skill_event_mapper.py
│ │ ├── text_includes
│ │ │ └── guardrails_prompt.txt
│ │ └── workbench_helpers.py
│ ├── assistant.code-workspace
│ ├── Makefile
│ ├── pyproject.toml
│ ├── README.md
│ ├── tests
│ │ └── test_setup.py
│ └── uv.lock
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docs
│ ├── .vscode
│ │ └── settings.json
│ ├── ASSISTANT_CONFIG.md
│ ├── ASSISTANT_DEVELOPMENT_GUIDE.md
│ ├── CUSTOM_APP_REGISTRATION.md
│ ├── HOSTED_ASSISTANT_WITH_LOCAL_MCP_SERVERS.md
│ ├── images
│ │ ├── architecture-animation.gif
│ │ ├── configure_assistant.png
│ │ ├── conversation_canvas_open.png
│ │ ├── conversation_duplicate.png
│ │ ├── conversation_export.png
│ │ ├── conversation_share_dialog.png
│ │ ├── conversation_share_link.png
│ │ ├── dashboard_configured_view.png
│ │ ├── dashboard_view.png
│ │ ├── license_agreement.png
│ │ ├── message_bar.png
│ │ ├── message_inspection.png
│ │ ├── message_link.png
│ │ ├── new_prospector_assistant_dialog.png
│ │ ├── open_conversation_canvas.png
│ │ ├── prospector_example.png
│ │ ├── readme1.png
│ │ ├── readme2.png
│ │ ├── readme3.png
│ │ ├── rewind.png
│ │ ├── signin_page.png
│ │ └── splash_screen.png
│ ├── LOCAL_ASSISTANT_WITH_REMOTE_WORKBENCH.md
│ ├── SETUP_DEV_ENVIRONMENT.md
│ └── WORKBENCH_APP.md
├── examples
│ ├── dotnet
│ │ ├── .editorconfig
│ │ ├── dotnet-01-echo-bot
│ │ │ ├── appsettings.json
│ │ │ ├── dotnet-01-echo-bot.csproj
│ │ │ ├── MyAgent.cs
│ │ │ ├── MyAgentConfig.cs
│ │ │ ├── MyWorkbenchConnector.cs
│ │ │ ├── Program.cs
│ │ │ └── README.md
│ │ ├── dotnet-02-message-types-demo
│ │ │ ├── appsettings.json
│ │ │ ├── ConnectorExtensions.cs
│ │ │ ├── docs
│ │ │ │ ├── abc.png
│ │ │ │ ├── code.png
│ │ │ │ ├── config.png
│ │ │ │ ├── echo.png
│ │ │ │ ├── markdown.png
│ │ │ │ ├── mermaid.png
│ │ │ │ ├── reverse.png
│ │ │ │ └── safety-check.png
│ │ │ ├── dotnet-02-message-types-demo.csproj
│ │ │ ├── MyAgent.cs
│ │ │ ├── MyAgentConfig.cs
│ │ │ ├── MyWorkbenchConnector.cs
│ │ │ ├── Program.cs
│ │ │ └── README.md
│ │ └── dotnet-03-simple-chatbot
│ │ ├── appsettings.json
│ │ ├── ConnectorExtensions.cs
│ │ ├── dotnet-03-simple-chatbot.csproj
│ │ ├── MyAgent.cs
│ │ ├── MyAgentConfig.cs
│ │ ├── MyWorkbenchConnector.cs
│ │ ├── Program.cs
│ │ └── README.md
│ ├── Makefile
│ └── python
│ ├── python-01-echo-bot
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── chat.py
│ │ │ └── config.py
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── python-02-simple-chatbot
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ └── text_includes
│ │ │ └── guardrails_prompt.txt
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ └── python-03-multimodel-chatbot
│ ├── .env.example
│ ├── .vscode
│ │ ├── launch.json
│ │ └── settings.json
│ ├── assistant
│ │ ├── __init__.py
│ │ ├── chat.py
│ │ ├── config.py
│ │ ├── model_adapters.py
│ │ └── text_includes
│ │ └── guardrails_prompt.txt
│ ├── assistant.code-workspace
│ ├── Makefile
│ ├── pyproject.toml
│ ├── README.md
│ └── uv.lock
├── KNOWN_ISSUES.md
├── libraries
│ ├── dotnet
│ │ ├── .editorconfig
│ │ ├── pack.sh
│ │ ├── README.md
│ │ ├── SemanticWorkbench.sln
│ │ ├── SemanticWorkbench.sln.DotSettings
│ │ └── WorkbenchConnector
│ │ ├── AgentBase.cs
│ │ ├── AgentConfig
│ │ │ ├── AgentConfigBase.cs
│ │ │ ├── AgentConfigPropertyAttribute.cs
│ │ │ └── ConfigUtils.cs
│ │ ├── Constants.cs
│ │ ├── IAgentBase.cs
│ │ ├── icon.png
│ │ ├── Models
│ │ │ ├── Command.cs
│ │ │ ├── Conversation.cs
│ │ │ ├── ConversationEvent.cs
│ │ │ ├── DebugInfo.cs
│ │ │ ├── Insight.cs
│ │ │ ├── Message.cs
│ │ │ ├── MessageMetadata.cs
│ │ │ ├── Participant.cs
│ │ │ ├── Sender.cs
│ │ │ └── ServiceInfo.cs
│ │ ├── Storage
│ │ │ ├── AgentInfo.cs
│ │ │ ├── AgentServiceStorage.cs
│ │ │ └── IAgentServiceStorage.cs
│ │ ├── StringLoggingExtensions.cs
│ │ ├── Webservice.cs
│ │ ├── WorkbenchConfig.cs
│ │ ├── WorkbenchConnector.cs
│ │ └── WorkbenchConnector.csproj
│ ├── Makefile
│ └── python
│ ├── anthropic-client
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── anthropic_client
│ │ │ ├── __init__.py
│ │ │ ├── client.py
│ │ │ ├── config.py
│ │ │ └── messages.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── assistant-data-gen
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant_data_gen
│ │ │ ├── __init__.py
│ │ │ ├── assistant_api.py
│ │ │ ├── config.py
│ │ │ ├── gce
│ │ │ │ ├── __init__.py
│ │ │ │ ├── gce_agent.py
│ │ │ │ └── prompts.py
│ │ │ └── pydantic_ai_utils.py
│ │ ├── configs
│ │ │ └── document_assistant_example_config.yaml
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── scripts
│ │ │ ├── gce_simulation.py
│ │ │ └── generate_scenario.py
│ │ └── uv.lock
│ ├── assistant-drive
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── extensions.json
│ │ │ └── settings.json
│ │ ├── assistant_drive
│ │ │ ├── __init__.py
│ │ │ ├── drive.py
│ │ │ └── tests
│ │ │ └── test_basic.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── pytest.ini
│ │ ├── README.md
│ │ ├── usage.ipynb
│ │ └── uv.lock
│ ├── assistant-extensions
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── assistant_extensions
│ │ │ ├── __init__.py
│ │ │ ├── ai_clients
│ │ │ │ └── config.py
│ │ │ ├── artifacts
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _artifacts.py
│ │ │ │ ├── _inspector.py
│ │ │ │ └── _model.py
│ │ │ ├── attachments
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _attachments.py
│ │ │ │ ├── _convert.py
│ │ │ │ ├── _model.py
│ │ │ │ ├── _shared.py
│ │ │ │ └── _summarizer.py
│ │ │ ├── chat_context_toolkit
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _config.py
│ │ │ │ ├── archive
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── _archive.py
│ │ │ │ │ └── _summarizer.py
│ │ │ │ ├── message_history
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── _history.py
│ │ │ │ │ └── _message.py
│ │ │ │ └── virtual_filesystem
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _archive_file_source.py
│ │ │ │ └── _attachments_file_source.py
│ │ │ ├── dashboard_card
│ │ │ │ ├── __init__.py
│ │ │ │ └── _dashboard_card.py
│ │ │ ├── document_editor
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _extension.py
│ │ │ │ ├── _inspector.py
│ │ │ │ └── _model.py
│ │ │ ├── mcp
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _assistant_file_resource_handler.py
│ │ │ │ ├── _client_utils.py
│ │ │ │ ├── _devtunnel.py
│ │ │ │ ├── _model.py
│ │ │ │ ├── _openai_utils.py
│ │ │ │ ├── _sampling_handler.py
│ │ │ │ ├── _tool_utils.py
│ │ │ │ └── _workbench_file_resource_handler.py
│ │ │ ├── navigator
│ │ │ │ ├── __init__.py
│ │ │ │ └── _navigator.py
│ │ │ └── workflows
│ │ │ ├── __init__.py
│ │ │ ├── _model.py
│ │ │ ├── _workflows.py
│ │ │ └── runners
│ │ │ └── _user_proxy.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── test
│ │ │ └── attachments
│ │ │ └── test_attachments.py
│ │ └── uv.lock
│ ├── chat-context-toolkit
│ │ ├── .claude
│ │ │ └── settings.local.json
│ │ ├── .env.sample
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── assets
│ │ │ ├── archive_v1.png
│ │ │ ├── history_v1.png
│ │ │ └── vfs_v1.png
│ │ ├── chat_context_toolkit
│ │ │ ├── __init__.py
│ │ │ ├── archive
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _archive_reader.py
│ │ │ │ ├── _archive_task_queue.py
│ │ │ │ ├── _state.py
│ │ │ │ ├── _types.py
│ │ │ │ └── summarization
│ │ │ │ ├── __init__.py
│ │ │ │ └── _summarizer.py
│ │ │ ├── history
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _budget.py
│ │ │ │ ├── _decorators.py
│ │ │ │ ├── _history.py
│ │ │ │ ├── _prioritize.py
│ │ │ │ ├── _types.py
│ │ │ │ └── tool_abbreviations
│ │ │ │ ├── __init__.py
│ │ │ │ └── _tool_abbreviations.py
│ │ │ └── virtual_filesystem
│ │ │ ├── __init__.py
│ │ │ ├── _types.py
│ │ │ ├── _virtual_filesystem.py
│ │ │ ├── README.md
│ │ │ └── tools
│ │ │ ├── __init__.py
│ │ │ ├── _ls_tool.py
│ │ │ ├── _tools.py
│ │ │ └── _view_tool.py
│ │ ├── CLAUDE.md
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── test
│ │ │ ├── archive
│ │ │ │ └── test_archive_reader.py
│ │ │ ├── history
│ │ │ │ ├── test_abbreviate_messages.py
│ │ │ │ ├── test_history.py
│ │ │ │ ├── test_pair_and_order_tool_messages.py
│ │ │ │ ├── test_prioritize.py
│ │ │ │ └── test_truncate_messages.py
│ │ │ └── virtual_filesystem
│ │ │ ├── test_virtual_filesystem.py
│ │ │ └── tools
│ │ │ ├── test_ls_tool.py
│ │ │ ├── test_tools.py
│ │ │ └── test_view_tool.py
│ │ └── uv.lock
│ ├── content-safety
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── content_safety
│ │ │ ├── __init__.py
│ │ │ ├── evaluators
│ │ │ │ ├── __init__.py
│ │ │ │ ├── azure_content_safety
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── config.py
│ │ │ │ │ └── evaluator.py
│ │ │ │ ├── config.py
│ │ │ │ ├── evaluator.py
│ │ │ │ └── openai_moderations
│ │ │ │ ├── __init__.py
│ │ │ │ ├── config.py
│ │ │ │ └── evaluator.py
│ │ │ └── README.md
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── events
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── events
│ │ │ ├── __init__.py
│ │ │ └── events.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── guided-conversation
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── guided_conversation
│ │ │ ├── __init__.py
│ │ │ ├── functions
│ │ │ │ ├── __init__.py
│ │ │ │ ├── conversation_plan.py
│ │ │ │ ├── execution.py
│ │ │ │ └── final_update_plan.py
│ │ │ ├── guided_conversation_agent.py
│ │ │ ├── plugins
│ │ │ │ ├── __init__.py
│ │ │ │ ├── agenda.py
│ │ │ │ └── artifact.py
│ │ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── base_model_llm.py
│ │ │ ├── conversation_helpers.py
│ │ │ ├── openai_tool_calling.py
│ │ │ ├── plugin_helpers.py
│ │ │ └── resources.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── llm-client
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── llm_client
│ │ │ ├── __init__.py
│ │ │ └── model.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── Makefile
│ ├── mcp-extensions
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_extensions
│ │ │ ├── __init__.py
│ │ │ ├── _client_session.py
│ │ │ ├── _model.py
│ │ │ ├── _sampling.py
│ │ │ ├── _server_extensions.py
│ │ │ ├── _tool_utils.py
│ │ │ ├── llm
│ │ │ │ ├── __init__.py
│ │ │ │ ├── chat_completion.py
│ │ │ │ ├── helpers.py
│ │ │ │ ├── llm_types.py
│ │ │ │ ├── mcp_chat_completion.py
│ │ │ │ └── openai_chat_completion.py
│ │ │ └── server
│ │ │ ├── __init__.py
│ │ │ └── storage.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ └── test_tool_utils.py
│ │ └── uv.lock
│ ├── mcp-tunnel
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_tunnel
│ │ │ ├── __init__.py
│ │ │ ├── _devtunnel.py
│ │ │ ├── _dir.py
│ │ │ └── _main.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── openai-client
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── openai_client
│ │ │ ├── __init__.py
│ │ │ ├── chat_driver
│ │ │ │ ├── __init__.py
│ │ │ │ ├── chat_driver.ipynb
│ │ │ │ ├── chat_driver.py
│ │ │ │ ├── message_history_providers
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── in_memory_message_history_provider.py
│ │ │ │ │ ├── local_message_history_provider.py
│ │ │ │ │ ├── message_history_provider.py
│ │ │ │ │ └── tests
│ │ │ │ │ └── formatted_instructions_test.py
│ │ │ │ └── README.md
│ │ │ ├── client.py
│ │ │ ├── completion.py
│ │ │ ├── config.py
│ │ │ ├── errors.py
│ │ │ ├── logging.py
│ │ │ ├── messages.py
│ │ │ ├── tokens.py
│ │ │ └── tools.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── test_command_parsing.py
│ │ │ ├── test_formatted_messages.py
│ │ │ ├── test_messages.py
│ │ │ └── test_tokens.py
│ │ └── uv.lock
│ ├── semantic-workbench-api-model
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── semantic_workbench_api_model
│ │ │ ├── __init__.py
│ │ │ ├── assistant_model.py
│ │ │ ├── assistant_service_client.py
│ │ │ ├── workbench_model.py
│ │ │ └── workbench_service_client.py
│ │ └── uv.lock
│ ├── semantic-workbench-assistant
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── semantic_workbench_assistant
│ │ │ ├── __init__.py
│ │ │ ├── assistant_app
│ │ │ │ ├── __init__.py
│ │ │ │ ├── assistant.py
│ │ │ │ ├── config.py
│ │ │ │ ├── content_safety.py
│ │ │ │ ├── context.py
│ │ │ │ ├── error.py
│ │ │ │ ├── export_import.py
│ │ │ │ ├── protocol.py
│ │ │ │ └── service.py
│ │ │ ├── assistant_service.py
│ │ │ ├── auth.py
│ │ │ ├── canonical.py
│ │ │ ├── command.py
│ │ │ ├── config.py
│ │ │ ├── logging_config.py
│ │ │ ├── settings.py
│ │ │ ├── start.py
│ │ │ └── storage.py
│ │ ├── tests
│ │ │ ├── conftest.py
│ │ │ ├── test_assistant_app.py
│ │ │ ├── test_canonical.py
│ │ │ ├── test_config.py
│ │ │ └── test_storage.py
│ │ └── uv.lock
│ └── skills
│ ├── .vscode
│ │ └── settings.json
│ ├── Makefile
│ ├── README.md
│ └── skill-library
│ ├── .vscode
│ │ └── settings.json
│ ├── docs
│ │ └── vs-recipe-tool.md
│ ├── Makefile
│ ├── pyproject.toml
│ ├── README.md
│ ├── skill_library
│ │ ├── __init__.py
│ │ ├── chat_driver_helpers.py
│ │ ├── cli
│ │ │ ├── azure_openai.py
│ │ │ ├── conversation_history.py
│ │ │ ├── README.md
│ │ │ ├── run_routine.py
│ │ │ ├── settings.py
│ │ │ └── skill_logger.py
│ │ ├── engine.py
│ │ ├── llm_info.txt
│ │ ├── logging.py
│ │ ├── README.md
│ │ ├── routine_stack.py
│ │ ├── skill.py
│ │ ├── skills
│ │ │ ├── common
│ │ │ │ ├── __init__.py
│ │ │ │ ├── common_skill.py
│ │ │ │ └── routines
│ │ │ │ ├── bing_search.py
│ │ │ │ ├── consolidate.py
│ │ │ │ ├── echo.py
│ │ │ │ ├── gather_context.py
│ │ │ │ ├── get_content_from_url.py
│ │ │ │ ├── gpt_complete.py
│ │ │ │ ├── select_user_intent.py
│ │ │ │ └── summarize.py
│ │ │ ├── eval
│ │ │ │ ├── __init__.py
│ │ │ │ ├── eval_skill.py
│ │ │ │ └── routines
│ │ │ │ └── eval.py
│ │ │ ├── fabric
│ │ │ │ ├── __init__.py
│ │ │ │ ├── fabric_skill.py
│ │ │ │ ├── patterns
│ │ │ │ │ ├── agility_story
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── ai
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_answers
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_candidates
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_cfp_submission
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_claims
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_comments
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_debate
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_email_headers
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_incident
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_interviewer_techniques
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_logs
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_malware
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_military_strategy
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_mistakes
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_paper
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_patent
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_personality
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_presentation
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_product_feedback
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_proposition
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_prose
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_prose_json
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_prose_pinker
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_risk
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_sales_call
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_spiritual_text
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_tech_impact
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_threat_report
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_threat_report_cmds
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_threat_report_trends
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── answer_interview_question
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── ask_secure_by_design_questions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── ask_uncle_duke
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── capture_thinkers_work
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── check_agreement
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── clean_text
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── coding_master
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── compare_and_contrast
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── convert_to_markdown
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_5_sentence_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_academic_paper
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_ai_jobs_analysis
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_aphorisms
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_art_prompt
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_better_frame
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_coding_project
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_command
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_cyber_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_design_document
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_diy
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_formal_email
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_git_diff_commit
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_graph_from_input
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_hormozi_offer
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_idea_compass
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_investigation_visualization
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_keynote
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_logo
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_markmap_visualization
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_mermaid_visualization
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_mermaid_visualization_for_github
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_micro_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_network_threat_landscape
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_newsletter_entry
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_npc
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_pattern
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_prd
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_prediction_block
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_quiz
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_reading_plan
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_recursive_outline
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_report_finding
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_rpg_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_security_update
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_show_intro
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_sigma_rules
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_story_explanation
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_stride_threat_model
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_tags
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_threat_scenarios
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_ttrc_graph
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_ttrc_narrative
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_upgrade_pack
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_user_story
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_video_chapters
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_visualization
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── dialog_with_socrates
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── enrich_blog_post
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── explain_code
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── explain_docs
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── explain_math
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── explain_project
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── explain_terms
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── export_data_as_csv
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_algorithm_update_recommendations
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_article_wisdom
│ │ │ │ │ │ ├── dmiessler
│ │ │ │ │ │ │ └── extract_wisdom-1.0.0
│ │ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ │ └── user.md
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_book_ideas
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_book_recommendations
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_business_ideas
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_controversial_ideas
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_core_message
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_ctf_writeup
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_domains
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_extraordinary_claims
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_ideas
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_insights
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_insights_dm
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_instructions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_jokes
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_latest_video
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_main_idea
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_most_redeeming_thing
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_patterns
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_poc
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_predictions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_primary_problem
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_primary_solution
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_product_features
│ │ │ │ │ │ ├── dmiessler
│ │ │ │ │ │ │ └── extract_wisdom-1.0.0
│ │ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ │ └── user.md
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_questions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_recipe
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_recommendations
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_references
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_skills
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_song_meaning
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_sponsors
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_videoid
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_wisdom
│ │ │ │ │ │ ├── dmiessler
│ │ │ │ │ │ │ └── extract_wisdom-1.0.0
│ │ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ │ └── user.md
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_wisdom_agents
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_wisdom_dm
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_wisdom_nometa
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── find_hidden_message
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── find_logical_fallacies
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── get_wow_per_minute
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── get_youtube_rss
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── humanize
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_dsrp_distinctions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_dsrp_perspectives
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_dsrp_relationships
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_dsrp_systems
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_job_stories
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── improve_academic_writing
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── improve_prompt
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── improve_report_finding
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── improve_writing
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── judge_output
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── label_and_rate
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── loaded
│ │ │ │ │ ├── md_callout
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── official_pattern_template
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── pattern_explanations.md
│ │ │ │ │ ├── prepare_7s_strategy
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── provide_guidance
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── rate_ai_response
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── rate_ai_result
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── rate_content
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── rate_value
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── raw_query
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── raycast
│ │ │ │ │ │ ├── capture_thinkers_work
│ │ │ │ │ │ ├── create_story_explanation
│ │ │ │ │ │ ├── extract_primary_problem
│ │ │ │ │ │ ├── extract_wisdom
│ │ │ │ │ │ └── yt
│ │ │ │ │ ├── recommend_artists
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── recommend_pipeline_upgrades
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── recommend_talkpanel_topics
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── refine_design_document
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── review_design
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── sanitize_broken_html_to_markdown
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── show_fabric_options_markmap
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── solve_with_cot
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── stringify
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── suggest_pattern
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize
│ │ │ │ │ │ ├── dmiessler
│ │ │ │ │ │ │ └── summarize
│ │ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ │ └── user.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_debate
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_git_changes
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_git_diff
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_lecture
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_legislation
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_meeting
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_micro
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_newsletter
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_paper
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_prompt
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_pull-requests
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_rpg_session
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_analyze_challenge_handling
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_check_metrics
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_create_h3_career
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_create_opening_sentences
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_describe_life_outlook
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_extract_intro_sentences
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_extract_panel_topics
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_find_blindspots
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_find_negative_thinking
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_find_neglected_goals
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_give_encouragement
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_red_team_thinking
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_threat_model_plans
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_visualize_mission_goals_projects
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_year_in_review
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── to_flashcards
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── transcribe_minutes
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── translate
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── tweet
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_essay
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_hackerone_report
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_latex
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_micro_essay
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_nuclei_template_rule
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── write_pull-request
│ │ │ │ │ │ └── system.md
│ │ │ │ │ └── write_semgrep_rule
│ │ │ │ │ ├── system.md
│ │ │ │ │ └── user.md
│ │ │ │ └── routines
│ │ │ │ ├── list.py
│ │ │ │ ├── run.py
│ │ │ │ └── show.py
│ │ │ ├── guided_conversation
│ │ │ │ ├── __init__.py
│ │ │ │ ├── agenda.py
│ │ │ │ ├── artifact_helpers.py
│ │ │ │ ├── chat_completions
│ │ │ │ │ ├── fix_agenda_error.py
│ │ │ │ │ ├── fix_artifact_error.py
│ │ │ │ │ ├── generate_agenda.py
│ │ │ │ │ ├── generate_artifact_updates.py
│ │ │ │ │ ├── generate_final_artifact.py
│ │ │ │ │ └── generate_message.py
│ │ │ │ ├── conversation_guides
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── acrostic_poem.py
│ │ │ │ │ ├── er_triage.py
│ │ │ │ │ ├── interview.py
│ │ │ │ │ └── patient_intake.py
│ │ │ │ ├── guide.py
│ │ │ │ ├── guided_conversation_skill.py
│ │ │ │ ├── logging.py
│ │ │ │ ├── message.py
│ │ │ │ ├── resources.py
│ │ │ │ ├── routines
│ │ │ │ │ └── guided_conversation.py
│ │ │ │ └── tests
│ │ │ │ ├── conftest.py
│ │ │ │ ├── test_artifact_helpers.py
│ │ │ │ ├── test_generate_agenda.py
│ │ │ │ ├── test_generate_artifact_updates.py
│ │ │ │ ├── test_generate_final_artifact.py
│ │ │ │ └── test_resource.py
│ │ │ ├── meta
│ │ │ │ ├── __init__.py
│ │ │ │ ├── meta_skill.py
│ │ │ │ ├── README.md
│ │ │ │ └── routines
│ │ │ │ └── generate_routine.py
│ │ │ ├── posix
│ │ │ │ ├── __init__.py
│ │ │ │ ├── posix_skill.py
│ │ │ │ ├── routines
│ │ │ │ │ ├── append_file.py
│ │ │ │ │ ├── cd.py
│ │ │ │ │ ├── ls.py
│ │ │ │ │ ├── make_home_dir.py
│ │ │ │ │ ├── mkdir.py
│ │ │ │ │ ├── mv.py
│ │ │ │ │ ├── pwd.py
│ │ │ │ │ ├── read_file.py
│ │ │ │ │ ├── rm.py
│ │ │ │ │ ├── touch.py
│ │ │ │ │ └── write_file.py
│ │ │ │ └── sandbox_shell.py
│ │ │ ├── README.md
│ │ │ ├── research
│ │ │ │ ├── __init__.py
│ │ │ │ ├── README.md
│ │ │ │ ├── research_skill.py
│ │ │ │ └── routines
│ │ │ │ ├── answer_question_about_content.py
│ │ │ │ ├── evaluate_answer.py
│ │ │ │ ├── generate_research_plan.py
│ │ │ │ ├── generate_search_query.py
│ │ │ │ ├── update_research_plan.py
│ │ │ │ ├── web_research.py
│ │ │ │ └── web_search.py
│ │ │ ├── research2
│ │ │ │ ├── __init__.py
│ │ │ │ ├── README.md
│ │ │ │ ├── research_skill.py
│ │ │ │ └── routines
│ │ │ │ ├── facts.py
│ │ │ │ ├── make_final_report.py
│ │ │ │ ├── research.py
│ │ │ │ ├── search_plan.py
│ │ │ │ ├── search.py
│ │ │ │ └── visit_pages.py
│ │ │ └── web_research
│ │ │ ├── __init__.py
│ │ │ ├── README.md
│ │ │ ├── research_skill.py
│ │ │ └── routines
│ │ │ ├── facts.py
│ │ │ ├── make_final_report.py
│ │ │ ├── research.py
│ │ │ ├── search_plan.py
│ │ │ ├── search.py
│ │ │ └── visit_pages.py
│ │ ├── tests
│ │ │ ├── test_common_skill.py
│ │ │ ├── test_integration.py
│ │ │ ├── test_routine_stack.py
│ │ │ ├── tst_skill
│ │ │ │ ├── __init__.py
│ │ │ │ └── routines
│ │ │ │ ├── __init__.py
│ │ │ │ └── a_routine.py
│ │ │ └── utilities
│ │ │ ├── test_find_template_vars.py
│ │ │ ├── test_make_arg_set.py
│ │ │ ├── test_paramspec.py
│ │ │ ├── test_parse_command_string.py
│ │ │ └── test_to_string.py
│ │ ├── types.py
│ │ ├── usage.py
│ │ └── utilities.py
│ └── uv.lock
├── LICENSE
├── Makefile
├── mcp-servers
│ ├── ai-assist-content
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── mcp-example-brave-search.md
│ │ ├── mcp-fastmcp-typescript-README.md
│ │ ├── mcp-llms-full.txt
│ │ ├── mcp-metadata-tips.md
│ │ ├── mcp-python-sdk-README.md
│ │ ├── mcp-typescript-sdk-README.md
│ │ ├── pydanticai-documentation.md
│ │ ├── pydanticai-example-question-graph.md
│ │ ├── pydanticai-example-weather.md
│ │ ├── pydanticai-tutorial.md
│ │ └── README.md
│ ├── Makefile
│ ├── mcp-server-bing-search
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_bing_search
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── prompts
│ │ │ │ ├── __init__.py
│ │ │ │ ├── clean_website.py
│ │ │ │ └── filter_links.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ ├── tools.py
│ │ │ ├── types.py
│ │ │ ├── utils.py
│ │ │ └── web
│ │ │ ├── __init__.py
│ │ │ ├── get_content.py
│ │ │ ├── llm_processing.py
│ │ │ ├── process_website.py
│ │ │ └── search_bing.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ └── test_tools.py
│ │ └── uv.lock
│ ├── mcp-server-bundle
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_bundle
│ │ │ ├── __init__.py
│ │ │ └── main.py
│ │ ├── pyinstaller.spec
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-filesystem
│ │ ├── .env.example
│ │ ├── .github
│ │ │ └── workflows
│ │ │ └── ci.yml
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_filesystem
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── server.py
│ │ │ └── start.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ └── test_filesystem.py
│ │ └── uv.lock
│ ├── mcp-server-filesystem-edit
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── data
│ │ │ ├── attachments
│ │ │ │ ├── Daily Game Ideas.txt
│ │ │ │ ├── Frontend Framework Proposal.txt
│ │ │ │ ├── ReDoodle.txt
│ │ │ │ └── Research Template.tex
│ │ │ ├── test_cases.yaml
│ │ │ └── transcripts
│ │ │ ├── transcript_research_simple.md
│ │ │ ├── transcript_Startup_Idea_1_202503031513.md
│ │ │ ├── transcript_Startup_Idea_2_202503031659.md
│ │ │ └── transcript_Web_Frontends_202502281551.md
│ │ ├── Makefile
│ │ ├── mcp_server_filesystem_edit
│ │ │ ├── __init__.py
│ │ │ ├── app_handling
│ │ │ │ ├── __init__.py
│ │ │ │ ├── excel.py
│ │ │ │ ├── miktex.py
│ │ │ │ ├── office_common.py
│ │ │ │ ├── powerpoint.py
│ │ │ │ └── word.py
│ │ │ ├── config.py
│ │ │ ├── evals
│ │ │ │ ├── __init__.py
│ │ │ │ ├── common.py
│ │ │ │ ├── run_comments.py
│ │ │ │ ├── run_edit.py
│ │ │ │ └── run_ppt_edit.py
│ │ │ ├── prompts
│ │ │ │ ├── __init__.py
│ │ │ │ ├── add_comments.py
│ │ │ │ ├── analyze_comments.py
│ │ │ │ ├── latex_edit.py
│ │ │ │ ├── markdown_draft.py
│ │ │ │ ├── markdown_edit.py
│ │ │ │ └── powerpoint_edit.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ ├── tools
│ │ │ │ ├── __init__.py
│ │ │ │ ├── add_comments.py
│ │ │ │ ├── edit_adapters
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── common.py
│ │ │ │ │ ├── latex.py
│ │ │ │ │ └── markdown.py
│ │ │ │ ├── edit.py
│ │ │ │ └── helpers.py
│ │ │ └── types.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── app_handling
│ │ │ │ ├── test_excel.py
│ │ │ │ ├── test_miktext.py
│ │ │ │ ├── test_office_common.py
│ │ │ │ ├── test_powerpoint.py
│ │ │ │ └── test_word.py
│ │ │ ├── conftest.py
│ │ │ └── tools
│ │ │ └── edit_adapters
│ │ │ ├── test_latex.py
│ │ │ └── test_markdown.py
│ │ └── uv.lock
│ ├── mcp-server-fusion
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── AddInIcon.svg
│ │ ├── config.py
│ │ ├── FusionMCPServerAddIn.manifest
│ │ ├── FusionMCPServerAddIn.py
│ │ ├── mcp_server_fusion
│ │ │ ├── __init__.py
│ │ │ ├── fusion_mcp_server.py
│ │ │ ├── fusion_utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── event_utils.py
│ │ │ │ ├── general_utils.py
│ │ │ │ └── tool_utils.py
│ │ │ ├── mcp_tools
│ │ │ │ ├── __init__.py
│ │ │ │ ├── fusion_3d_operation.py
│ │ │ │ ├── fusion_geometry.py
│ │ │ │ ├── fusion_pattern.py
│ │ │ │ └── fusion_sketch.py
│ │ │ └── vendor
│ │ │ └── README.md
│ │ ├── README.md
│ │ └── requirements.txt
│ ├── mcp-server-giphy
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── giphy_search.py
│ │ │ ├── sampling.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ └── utils.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-memory-user-bio
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_memory_user_bio
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── server.py
│ │ │ └── start.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-memory-whiteboard
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_memory_whiteboard
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── server.py
│ │ │ └── start.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-office
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── build.sh
│ │ ├── data
│ │ │ ├── attachments
│ │ │ │ ├── Daily Game Ideas.txt
│ │ │ │ ├── Frontend Framework Proposal.txt
│ │ │ │ └── ReDoodle.txt
│ │ │ └── word
│ │ │ ├── test_cases.yaml
│ │ │ └── transcripts
│ │ │ ├── transcript_Startup_Idea_1_202503031513.md
│ │ │ ├── transcript_Startup_Idea_2_202503031659.md
│ │ │ └── transcript_Web_Frontends_202502281551.md
│ │ ├── Makefile
│ │ ├── mcp_server
│ │ │ ├── __init__.py
│ │ │ ├── app_interaction
│ │ │ │ ├── __init__.py
│ │ │ │ ├── excel_editor.py
│ │ │ │ ├── powerpoint_editor.py
│ │ │ │ └── word_editor.py
│ │ │ ├── config.py
│ │ │ ├── constants.py
│ │ │ ├── evals
│ │ │ │ ├── __init__.py
│ │ │ │ ├── common.py
│ │ │ │ ├── run_comment_analysis.py
│ │ │ │ ├── run_feedback.py
│ │ │ │ └── run_markdown_edit.py
│ │ │ ├── helpers.py
│ │ │ ├── markdown_edit
│ │ │ │ ├── __init__.py
│ │ │ │ ├── comment_analysis.py
│ │ │ │ ├── feedback_step.py
│ │ │ │ ├── markdown_edit.py
│ │ │ │ └── utils.py
│ │ │ ├── prompts
│ │ │ │ ├── __init__.py
│ │ │ │ ├── comment_analysis.py
│ │ │ │ ├── feedback.py
│ │ │ │ ├── markdown_draft.py
│ │ │ │ └── markdown_edit.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ └── types.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ └── test_word_editor.py
│ │ └── uv.lock
│ ├── mcp-server-open-deep-research
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── libs
│ │ │ │ └── open_deep_research
│ │ │ │ ├── cookies.py
│ │ │ │ ├── mdconvert.py
│ │ │ │ ├── run_agents.py
│ │ │ │ ├── text_inspector_tool.py
│ │ │ │ ├── text_web_browser.py
│ │ │ │ └── visual_qa.py
│ │ │ ├── open_deep_research.py
│ │ │ ├── server.py
│ │ │ └── start.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-open-deep-research-clone
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_open_deep_research_clone
│ │ │ ├── __init__.py
│ │ │ ├── azure_openai.py
│ │ │ ├── config.py
│ │ │ ├── logging.py
│ │ │ ├── sampling.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ ├── utils.py
│ │ │ └── web_research.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── test
│ │ │ └── test_open_deep_research_clone.py
│ │ └── uv.lock
│ ├── mcp-server-template
│ │ ├── .taplo.toml
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── copier.yml
│ │ ├── README.md
│ │ └── template
│ │ └── {{ project_slug }}
│ │ ├── .env.example.jinja
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json.jinja
│ │ │ └── settings.json
│ │ ├── {{ module_name }}
│ │ │ ├── __init__.py
│ │ │ ├── config.py.jinja
│ │ │ ├── server.py.jinja
│ │ │ └── start.py.jinja
│ │ ├── Makefile.jinja
│ │ ├── pyproject.toml.jinja
│ │ └── README.md.jinja
│ ├── mcp-server-vscode
│ │ ├── .eslintrc.cjs
│ │ ├── .gitignore
│ │ ├── .npmrc
│ │ ├── .vscode
│ │ │ ├── extensions.json
│ │ │ ├── launch.json
│ │ │ ├── settings.json
│ │ │ └── tasks.json
│ │ ├── .vscode-test.mjs
│ │ ├── .vscodeignore
│ │ ├── ASSISTANT_BOOTSTRAP.md
│ │ ├── eslint.config.mjs
│ │ ├── images
│ │ │ └── icon.png
│ │ ├── LICENSE
│ │ ├── Makefile
│ │ ├── out
│ │ │ ├── extension.d.ts
│ │ │ ├── extension.js
│ │ │ ├── test
│ │ │ │ ├── extension.test.d.ts
│ │ │ │ └── extension.test.js
│ │ │ ├── tools
│ │ │ │ ├── code_checker.d.ts
│ │ │ │ ├── code_checker.js
│ │ │ │ ├── debug_tools.d.ts
│ │ │ │ ├── debug_tools.js
│ │ │ │ ├── focus_editor.d.ts
│ │ │ │ ├── focus_editor.js
│ │ │ │ ├── search_symbol.d.ts
│ │ │ │ └── search_symbol.js
│ │ │ └── utils
│ │ │ ├── port.d.ts
│ │ │ └── port.js
│ │ ├── package.json
│ │ ├── pnpm-lock.yaml
│ │ ├── prettier.config.cjs
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── extension.d.ts
│ │ │ ├── extension.ts
│ │ │ ├── test
│ │ │ │ ├── extension.test.d.ts
│ │ │ │ └── extension.test.ts
│ │ │ ├── tools
│ │ │ │ ├── code_checker.d.ts
│ │ │ │ ├── code_checker.ts
│ │ │ │ ├── debug_tools.d.ts
│ │ │ │ ├── debug_tools.ts
│ │ │ │ ├── focus_editor.d.ts
│ │ │ │ ├── focus_editor.ts
│ │ │ │ ├── search_symbol.d.ts
│ │ │ │ └── search_symbol.ts
│ │ │ └── utils
│ │ │ ├── port.d.ts
│ │ │ └── port.ts
│ │ ├── tsconfig.json
│ │ ├── tsconfig.tsbuildinfo
│ │ ├── vsc-extension-quickstart.md
│ │ └── webpack.config.js
│ └── mcp-server-web-research
│ ├── .env.example
│ ├── .gitignore
│ ├── .vscode
│ │ ├── launch.json
│ │ └── settings.json
│ ├── Makefile
│ ├── mcp_server_web_research
│ │ ├── __init__.py
│ │ ├── azure_openai.py
│ │ ├── config.py
│ │ ├── logging.py
│ │ ├── sampling.py
│ │ ├── server.py
│ │ ├── start.py
│ │ ├── utils.py
│ │ └── web_research.py
│ ├── pyproject.toml
│ ├── README.md
│ ├── test
│ │ └── test_web_research.py
│ └── uv.lock
├── README.md
├── RESPONSIBLE_AI_FAQ.md
├── ruff.toml
├── SECURITY.md
├── semantic-workbench.code-workspace
├── SUPPORT.md
├── tools
│ ├── build_ai_context_files.py
│ ├── collect_files.py
│ ├── docker
│ │ ├── azure_website_sshd.conf
│ │ ├── docker-entrypoint.sh
│ │ ├── Dockerfile.assistant
│ │ └── Dockerfile.mcp-server
│ ├── makefiles
│ │ ├── docker-assistant.mk
│ │ ├── docker-mcp-server.mk
│ │ ├── docker.mk
│ │ ├── python.mk
│ │ ├── recursive.mk
│ │ └── shell.mk
│ ├── reset-service-data.ps1
│ ├── reset-service-data.sh
│ ├── run-app.ps1
│ ├── run-app.sh
│ ├── run-canonical-agent.ps1
│ ├── run-canonical-agent.sh
│ ├── run-dotnet-examples-with-aspire.sh
│ ├── run-python-example1.sh
│ ├── run-python-example2.ps1
│ ├── run-python-example2.sh
│ ├── run-service.ps1
│ ├── run-service.sh
│ ├── run-workbench-chatbot.ps1
│ └── run-workbench-chatbot.sh
├── workbench-app
│ ├── .dockerignore
│ ├── .env.example
│ ├── .eslintrc.cjs
│ ├── .gitignore
│ ├── .vscode
│ │ ├── launch.json
│ │ └── settings.json
│ ├── docker-entrypoint.sh
│ ├── Dockerfile
│ ├── docs
│ │ ├── APP_DEV_GUIDE.md
│ │ ├── MESSAGE_METADATA.md
│ │ ├── MESSAGE_TYPES.md
│ │ ├── README.md
│ │ └── STATE_INSPECTORS.md
│ ├── index.html
│ ├── Makefile
│ ├── nginx.conf
│ ├── package.json
│ ├── pnpm-lock.yaml
│ ├── prettier.config.cjs
│ ├── public
│ │ └── assets
│ │ ├── background-1-upscaled.jpg
│ │ ├── background-1-upscaled.png
│ │ ├── background-1.jpg
│ │ ├── background-1.png
│ │ ├── background-2.jpg
│ │ ├── background-2.png
│ │ ├── experimental-feature.jpg
│ │ ├── favicon.svg
│ │ ├── workflow-designer-1.jpg
│ │ ├── workflow-designer-outlets.jpg
│ │ ├── workflow-designer-states.jpg
│ │ └── workflow-designer-transitions.jpg
│ ├── README.md
│ ├── run.sh
│ ├── src
│ │ ├── components
│ │ │ ├── App
│ │ │ │ ├── AppFooter.tsx
│ │ │ │ ├── AppHeader.tsx
│ │ │ │ ├── AppMenu.tsx
│ │ │ │ ├── AppView.tsx
│ │ │ │ ├── CodeLabel.tsx
│ │ │ │ ├── CommandButton.tsx
│ │ │ │ ├── ConfirmLeave.tsx
│ │ │ │ ├── ContentExport.tsx
│ │ │ │ ├── ContentImport.tsx
│ │ │ │ ├── CopyButton.tsx
│ │ │ │ ├── DialogControl.tsx
│ │ │ │ ├── DynamicIframe.tsx
│ │ │ │ ├── ErrorListFromAppState.tsx
│ │ │ │ ├── ErrorMessageBar.tsx
│ │ │ │ ├── ExperimentalNotice.tsx
│ │ │ │ ├── FormWidgets
│ │ │ │ │ ├── BaseModelEditorWidget.tsx
│ │ │ │ │ ├── CustomizedArrayFieldTemplate.tsx
│ │ │ │ │ ├── CustomizedFieldTemplate.tsx
│ │ │ │ │ ├── CustomizedObjectFieldTemplate.tsx
│ │ │ │ │ └── InspectableWidget.tsx
│ │ │ │ ├── LabelWithDescription.tsx
│ │ │ │ ├── Loading.tsx
│ │ │ │ ├── MenuItemControl.tsx
│ │ │ │ ├── MiniControl.tsx
│ │ │ │ ├── MyAssistantServiceRegistrations.tsx
│ │ │ │ ├── MyItemsManager.tsx
│ │ │ │ ├── OverflowMenu.tsx
│ │ │ │ ├── PresenceMotionList.tsx
│ │ │ │ ├── ProfileSettings.tsx
│ │ │ │ └── TooltipWrapper.tsx
│ │ │ ├── Assistants
│ │ │ │ ├── ApplyConfigButton.tsx
│ │ │ │ ├── AssistantAdd.tsx
│ │ │ │ ├── AssistantConfigExportButton.tsx
│ │ │ │ ├── AssistantConfigImportButton.tsx
│ │ │ │ ├── AssistantConfiguration.tsx
│ │ │ │ ├── AssistantConfigure.tsx
│ │ │ │ ├── AssistantCreate.tsx
│ │ │ │ ├── AssistantDelete.tsx
│ │ │ │ ├── AssistantDuplicate.tsx
│ │ │ │ ├── AssistantExport.tsx
│ │ │ │ ├── AssistantImport.tsx
│ │ │ │ ├── AssistantRemove.tsx
│ │ │ │ ├── AssistantRename.tsx
│ │ │ │ ├── AssistantServiceInfo.tsx
│ │ │ │ ├── AssistantServiceMetadata.tsx
│ │ │ │ └── MyAssistants.tsx
│ │ │ ├── AssistantServiceRegistrations
│ │ │ │ ├── AssistantServiceRegistrationApiKey.tsx
│ │ │ │ ├── AssistantServiceRegistrationApiKeyReset.tsx
│ │ │ │ ├── AssistantServiceRegistrationCreate.tsx
│ │ │ │ └── AssistantServiceRegistrationRemove.tsx
│ │ │ ├── Conversations
│ │ │ │ ├── Canvas
│ │ │ │ │ ├── AssistantCanvas.tsx
│ │ │ │ │ ├── AssistantCanvasList.tsx
│ │ │ │ │ ├── AssistantInspector.tsx
│ │ │ │ │ ├── AssistantInspectorList.tsx
│ │ │ │ │ └── ConversationCanvas.tsx
│ │ │ │ ├── ChatInputPlugins
│ │ │ │ │ ├── ClearEditorPlugin.tsx
│ │ │ │ │ ├── LexicalMenu.ts
│ │ │ │ │ ├── ParticipantMentionsPlugin.tsx
│ │ │ │ │ ├── TypeaheadMenuPlugin.css
│ │ │ │ │ └── TypeaheadMenuPlugin.tsx
│ │ │ │ ├── ContentRenderers
│ │ │ │ │ ├── CodeContentRenderer.tsx
│ │ │ │ │ ├── ContentListRenderer.tsx
│ │ │ │ │ ├── ContentRenderer.tsx
│ │ │ │ │ ├── DiffRenderer.tsx
│ │ │ │ │ ├── HtmlContentRenderer.tsx
│ │ │ │ │ ├── JsonSchemaContentRenderer.tsx
│ │ │ │ │ ├── MarkdownContentRenderer.tsx
│ │ │ │ │ ├── MarkdownEditorRenderer.tsx
│ │ │ │ │ ├── MermaidContentRenderer.tsx
│ │ │ │ │ ├── MusicABCContentRenderer.css
│ │ │ │ │ └── MusicABCContentRenderer.tsx
│ │ │ │ ├── ContextWindow.tsx
│ │ │ │ ├── ConversationCreate.tsx
│ │ │ │ ├── ConversationDuplicate.tsx
│ │ │ │ ├── ConversationExport.tsx
│ │ │ │ ├── ConversationFileIcon.tsx
│ │ │ │ ├── ConversationRemove.tsx
│ │ │ │ ├── ConversationRename.tsx
│ │ │ │ ├── ConversationShare.tsx
│ │ │ │ ├── ConversationShareCreate.tsx
│ │ │ │ ├── ConversationShareList.tsx
│ │ │ │ ├── ConversationShareView.tsx
│ │ │ │ ├── ConversationsImport.tsx
│ │ │ │ ├── ConversationTranscript.tsx
│ │ │ │ ├── DebugInspector.tsx
│ │ │ │ ├── FileItem.tsx
│ │ │ │ ├── FileList.tsx
│ │ │ │ ├── InputAttachmentList.tsx
│ │ │ │ ├── InputOptionsControl.tsx
│ │ │ │ ├── InteractHistory.tsx
│ │ │ │ ├── InteractInput.tsx
│ │ │ │ ├── Message
│ │ │ │ │ ├── AttachmentSection.tsx
│ │ │ │ │ ├── ContentRenderer.tsx
│ │ │ │ │ ├── ContentSafetyNotice.tsx
│ │ │ │ │ ├── InteractMessage.tsx
│ │ │ │ │ ├── MessageActions.tsx
│ │ │ │ │ ├── MessageBase.tsx
│ │ │ │ │ ├── MessageBody.tsx
│ │ │ │ │ ├── MessageContent.tsx
│ │ │ │ │ ├── MessageFooter.tsx
│ │ │ │ │ ├── MessageHeader.tsx
│ │ │ │ │ ├── NotificationAccordion.tsx
│ │ │ │ │ └── ToolResultMessage.tsx
│ │ │ │ ├── MessageDelete.tsx
│ │ │ │ ├── MessageLink.tsx
│ │ │ │ ├── MyConversations.tsx
│ │ │ │ ├── MyShares.tsx
│ │ │ │ ├── ParticipantAvatar.tsx
│ │ │ │ ├── ParticipantAvatarGroup.tsx
│ │ │ │ ├── ParticipantItem.tsx
│ │ │ │ ├── ParticipantList.tsx
│ │ │ │ ├── ParticipantStatus.tsx
│ │ │ │ ├── RewindConversation.tsx
│ │ │ │ ├── ShareRemove.tsx
│ │ │ │ ├── SpeechButton.tsx
│ │ │ │ └── ToolCalls.tsx
│ │ │ └── FrontDoor
│ │ │ ├── Chat
│ │ │ │ ├── AssistantDrawer.tsx
│ │ │ │ ├── CanvasDrawer.tsx
│ │ │ │ ├── Chat.tsx
│ │ │ │ ├── ChatCanvas.tsx
│ │ │ │ ├── ChatControls.tsx
│ │ │ │ └── ConversationDrawer.tsx
│ │ │ ├── Controls
│ │ │ │ ├── AssistantCard.tsx
│ │ │ │ ├── AssistantSelector.tsx
│ │ │ │ ├── AssistantServiceSelector.tsx
│ │ │ │ ├── ConversationItem.tsx
│ │ │ │ ├── ConversationList.tsx
│ │ │ │ ├── ConversationListOptions.tsx
│ │ │ │ ├── NewConversationButton.tsx
│ │ │ │ ├── NewConversationForm.tsx
│ │ │ │ └── SiteMenuButton.tsx
│ │ │ ├── GlobalContent.tsx
│ │ │ └── MainContent.tsx
│ │ ├── Constants.ts
│ │ ├── global.d.ts
│ │ ├── index.css
│ │ ├── libs
│ │ │ ├── AppStorage.ts
│ │ │ ├── AuthHelper.ts
│ │ │ ├── EventSubscriptionManager.ts
│ │ │ ├── Theme.ts
│ │ │ ├── useAssistantCapabilities.ts
│ │ │ ├── useChatCanvasController.ts
│ │ │ ├── useConversationEvents.ts
│ │ │ ├── useConversationUtility.ts
│ │ │ ├── useCreateConversation.ts
│ │ │ ├── useDebugComponentLifecycle.ts
│ │ │ ├── useDragAndDrop.ts
│ │ │ ├── useEnvironment.ts
│ │ │ ├── useExportUtility.ts
│ │ │ ├── useHistoryUtility.ts
│ │ │ ├── useKeySequence.ts
│ │ │ ├── useMediaQuery.ts
│ │ │ ├── useMicrosoftGraph.ts
│ │ │ ├── useNotify.tsx
│ │ │ ├── useParticipantUtility.tsx
│ │ │ ├── useSiteUtility.ts
│ │ │ ├── useWorkbenchEventSource.ts
│ │ │ ├── useWorkbenchService.ts
│ │ │ └── Utility.ts
│ │ ├── main.tsx
│ │ ├── models
│ │ │ ├── Assistant.ts
│ │ │ ├── AssistantCapability.ts
│ │ │ ├── AssistantServiceInfo.ts
│ │ │ ├── AssistantServiceRegistration.ts
│ │ │ ├── Config.ts
│ │ │ ├── Conversation.ts
│ │ │ ├── ConversationFile.ts
│ │ │ ├── ConversationMessage.ts
│ │ │ ├── ConversationMessageDebug.ts
│ │ │ ├── ConversationParticipant.ts
│ │ │ ├── ConversationShare.ts
│ │ │ ├── ConversationShareRedemption.ts
│ │ │ ├── ConversationState.ts
│ │ │ ├── ConversationStateDescription.ts
│ │ │ ├── ServiceEnvironment.ts
│ │ │ └── User.ts
│ │ ├── redux
│ │ │ ├── app
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── rtkQueryErrorLogger.ts
│ │ │ │ └── store.ts
│ │ │ └── features
│ │ │ ├── app
│ │ │ │ ├── appSlice.ts
│ │ │ │ └── AppState.ts
│ │ │ ├── chatCanvas
│ │ │ │ ├── chatCanvasSlice.ts
│ │ │ │ └── ChatCanvasState.ts
│ │ │ ├── localUser
│ │ │ │ ├── localUserSlice.ts
│ │ │ │ └── LocalUserState.ts
│ │ │ └── settings
│ │ │ ├── settingsSlice.ts
│ │ │ └── SettingsState.ts
│ │ ├── Root.tsx
│ │ ├── routes
│ │ │ ├── AcceptTerms.tsx
│ │ │ ├── AssistantEditor.tsx
│ │ │ ├── AssistantServiceRegistrationEditor.tsx
│ │ │ ├── Dashboard.tsx
│ │ │ ├── ErrorPage.tsx
│ │ │ ├── FrontDoor.tsx
│ │ │ ├── Login.tsx
│ │ │ ├── Settings.tsx
│ │ │ ├── ShareRedeem.tsx
│ │ │ └── Shares.tsx
│ │ ├── services
│ │ │ └── workbench
│ │ │ ├── assistant.ts
│ │ │ ├── assistantService.ts
│ │ │ ├── conversation.ts
│ │ │ ├── file.ts
│ │ │ ├── index.ts
│ │ │ ├── participant.ts
│ │ │ ├── share.ts
│ │ │ ├── state.ts
│ │ │ └── workbench.ts
│ │ └── vite-env.d.ts
│ ├── tools
│ │ └── filtered-ts-prune.cjs
│ ├── tsconfig.json
│ └── vite.config.ts
└── workbench-service
├── .env.example
├── .vscode
│ ├── extensions.json
│ ├── launch.json
│ └── settings.json
├── alembic.ini
├── devdb
│ ├── docker-compose.yaml
│ └── postgresql-init.sh
├── Dockerfile
├── Makefile
├── migrations
│ ├── env.py
│ ├── README
│ ├── script.py.mako
│ └── versions
│ ├── 2024_09_19_000000_69dcda481c14_init.py
│ ├── 2024_09_19_190029_dffb1d7e219a_file_version_filename.py
│ ├── 2024_09_20_204130_b29524775484_share.py
│ ├── 2024_10_30_231536_039bec8edc33_index_message_type.py
│ ├── 2024_11_04_204029_5149c7fb5a32_conversationmessagedebug.py
│ ├── 2024_11_05_015124_245baf258e11_double_check_debugs.py
│ ├── 2024_11_25_191056_a106de176394_drop_workflow.py
│ ├── 2025_03_19_140136_aaaf792d4d72_set_user_title_set.py
│ ├── 2025_03_21_153250_3763629295ad_add_assistant_template_id.py
│ ├── 2025_05_19_163613_b2f86e981885_delete_context_transfer_assistants.py
│ └── 2025_06_18_174328_503c739152f3_delete_knowlege_transfer_assistants.py
├── pyproject.toml
├── README.md
├── semantic_workbench_service
│ ├── __init__.py
│ ├── api.py
│ ├── assistant_api_key.py
│ ├── auth.py
│ ├── azure_speech.py
│ ├── config.py
│ ├── controller
│ │ ├── __init__.py
│ │ ├── assistant_service_client_pool.py
│ │ ├── assistant_service_registration.py
│ │ ├── assistant.py
│ │ ├── conversation_share.py
│ │ ├── conversation.py
│ │ ├── convert.py
│ │ ├── exceptions.py
│ │ ├── export_import.py
│ │ ├── file.py
│ │ ├── participant.py
│ │ └── user.py
│ ├── db.py
│ ├── event.py
│ ├── files.py
│ ├── logging_config.py
│ ├── middleware.py
│ ├── query.py
│ ├── service_user_principals.py
│ ├── service.py
│ └── start.py
├── tests
│ ├── __init__.py
│ ├── conftest.py
│ ├── docker-compose.yaml
│ ├── test_assistant_api_key.py
│ ├── test_files.py
│ ├── test_integration.py
│ ├── test_middleware.py
│ ├── test_migrations.py
│ ├── test_workbench_service.py
│ └── types.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/assistant.py:
--------------------------------------------------------------------------------
```python
from typing import (
Any,
Iterable,
Mapping,
)
import deepmerge
from fastapi import FastAPI
from pydantic import BaseModel, ConfigDict
from semantic_workbench_assistant.assistant_app.config import BaseModelAssistantConfig
from semantic_workbench_assistant.assistant_service import create_app
from .content_safety import AlwaysWarnContentSafetyEvaluator, ContentSafety
from .export_import import FileStorageAssistantDataExporter, FileStorageConversationDataExporter
from .protocol import (
AssistantCapability,
AssistantConfigProvider,
AssistantConversationInspectorStateProvider,
AssistantDataExporter,
AssistantTemplate,
ContentInterceptor,
ConversationDataExporter,
Events,
)
from .service import AssistantService
class EmptyConfigModel(BaseModel):
model_config = ConfigDict(title="This assistant has no configuration")
class AssistantApp:
def __init__(
self,
assistant_service_id: str,
assistant_service_name: str,
assistant_service_description: str,
assistant_service_metadata: dict[str, Any] = {},
capabilities: set[AssistantCapability] = set(),
config_provider: AssistantConfigProvider = BaseModelAssistantConfig(EmptyConfigModel).provider,
data_exporter: AssistantDataExporter = FileStorageAssistantDataExporter(),
conversation_data_exporter: ConversationDataExporter = FileStorageConversationDataExporter(),
inspector_state_providers: Mapping[str, AssistantConversationInspectorStateProvider] | None = None,
content_interceptor: ContentInterceptor | None = ContentSafety(AlwaysWarnContentSafetyEvaluator.factory),
additional_templates: Iterable[AssistantTemplate] = [],
) -> None:
self.assistant_service_id = assistant_service_id
self.assistant_service_name = assistant_service_name
self.assistant_service_description = assistant_service_description
self._assistant_service_metadata = assistant_service_metadata
self._capabilities = capabilities
self.config_provider = config_provider
self.data_exporter = data_exporter
self.templates = {
"default": AssistantTemplate(
id="default",
name=assistant_service_name,
description=assistant_service_description,
),
}
if additional_templates:
for template in additional_templates:
if template.id in self.templates:
raise ValueError(f"Template {template.id} already exists")
self.templates[template.id] = template
self.conversation_data_exporter = conversation_data_exporter
self.inspector_state_providers = dict(inspector_state_providers or {})
self.content_interceptor = content_interceptor
self.events = Events()
@property
def assistant_service_metadata(self) -> dict[str, Any]:
return deepmerge.always_merger.merge(
self._assistant_service_metadata,
{"capabilities": {capability: True for capability in self._capabilities}},
)
def add_inspector_state_provider(
self,
state_id: str,
provider: AssistantConversationInspectorStateProvider,
) -> None:
if state_id in self.inspector_state_providers:
raise ValueError(f"Inspector state provider with id {state_id} already exists")
self.inspector_state_providers[state_id] = provider
def add_capability(self, capability: AssistantCapability) -> None:
self._capabilities.add(capability)
def fastapi_app(self) -> FastAPI:
return create_app(
lambda lifespan: AssistantService(
assistant_app=self,
register_lifespan_handler=lifespan.register_handler,
)
)
```
--------------------------------------------------------------------------------
/workbench-app/src/components/Conversations/RewindConversation.tsx:
--------------------------------------------------------------------------------
```typescript
// Copyright (c) Microsoft. All rights reserved.
import { Button, DialogTrigger } from '@fluentui/react-components';
import { RewindRegular } from '@fluentui/react-icons';
import React from 'react';
import { CommandButton } from '../App/CommandButton';
// TODO: consider removing attachments to messages that are deleted
// and send the appropriate events to the assistants
interface RewindConversationProps {
onRewind?: (redo: boolean) => void;
disabled?: boolean;
}
export const RewindConversation: React.FC<RewindConversationProps> = (props) => {
const { onRewind, disabled } = props;
const [submitted, setSubmitted] = React.useState(false);
const handleRewind = React.useCallback(
async (redo: boolean = false) => {
if (submitted) {
return;
}
setSubmitted(true);
try {
onRewind?.(redo);
} finally {
setSubmitted(false);
}
},
[onRewind, submitted],
);
return (
<CommandButton
disabled={disabled}
description="Rewind conversation to before this message, with optional redo."
icon={<RewindRegular />}
iconOnly={true}
dialogContent={{
trigger: <Button appearance="subtle" icon={<RewindRegular />} size="small" />,
title: 'Rewind Conversation',
content: (
<>
<p>
Are you sure you want to rewind the conversation to before this message? This action cannot
be undone.
</p>
<p>
Optionally, you can choose to rewind the conversation and then redo the chosen message. This
will rewind the conversation to before the chosen message and then re-add the message back
to the conversation, effectively replaying the message.
</p>
<p>
<em>NOTE: This is an experimental feature.</em>
</p>
<p>
<em>
This will remove the messages from the conversation history in the Semantic Workbench,
but it is up to the individual assistant implementations to handle message deletion and
making decisions on what to do with other systems that may have received the message
(such as synthetic memories that may have been created or summaries, etc.)
</em>
</p>
<p>
<em>
Files or other data associated with the messages will not be removed from the system.
</em>
</p>
</>
),
closeLabel: 'Cancel',
additionalActions: [
<DialogTrigger key="rewind" disableButtonEnhancement>
<Button appearance="primary" onClick={() => handleRewind()} disabled={submitted}>
{submitted ? 'Rewinding...' : 'Rewind'}
</Button>
</DialogTrigger>,
<DialogTrigger key="rewindWithRedo" disableButtonEnhancement>
<Button onClick={() => handleRewind(true)} disabled={submitted}>
{submitted ? 'Rewinding and redoing...' : 'Rewind with Redo'}
</Button>
</DialogTrigger>,
],
}}
/>
);
};
```
--------------------------------------------------------------------------------
/libraries/python/skills/skill-library/skill_library/skills/guided_conversation/chat_completions/fix_agenda_error.py:
--------------------------------------------------------------------------------
```python
import logging
from typing import cast
from openai_client import (
CompletionError,
add_serializable_data,
create_system_message,
create_user_message,
make_completion_args_serializable,
validate_completion,
)
from skill_library.types import LanguageModel
from ..agenda import Agenda
from ..message import Conversation, ConversationMessageType
logger = logging.getLogger(__name__)
AGENDA_ERROR_CORRECTION_SYSTEM_TEMPLATE = """
You are a helpful, thoughtful, and meticulous assistant.
You are conducting a conversation with a user. You tried to update the agenda, but the update was invalid.
You will be provided the history of your conversation with the user, your previous attempt(s) at updating the agenda, and the error message(s) that resulted from your attempt(s).
Your task is to correct the update so that it is valid.
Your changes should be as minimal as possible - you are focused on fixing the error(s) that caused the update to be invalid.
Note that if the resource allocation is invalid, you must follow these rules:
1. You should not change the description of the first item (since it has already been executed), but you can change its resource allocation.
2. For all other items, you can combine or split them, or assign them fewer or more resources, but the content they cover collectively should not change (i.e. don't eliminate or add new topics).
For example, the invalid attempt was "item 1 = ask for date of birth (1 turn), item 2 = ask for phone number (1 turn), item 3 = ask for phone type (1 turn), item 4 = explore treatment history (6 turns)", and the error says you need to correct the total resource allocation to 7 turns. A bad solution is "item 1 = ask for date of birth (1 turn), item 2 = explore treatment history (6 turns)" because it eliminates the phone number and phone type topics. A good solution is "item 1 = ask for date of birth (2 turns), item 2 = ask for phone number, phone type, and treatment history (2 turns), item 3 = explore treatment history (3 turns)."
""".replace("\n\n\n", "\n\n").strip()
async def fix_agenda_error(
language_model: LanguageModel,
previous_attempts: str,
conversation: Conversation,
) -> Agenda:
completion_args = {
"model": "gpt-3.5-turbo",
"messages": [
create_system_message(AGENDA_ERROR_CORRECTION_SYSTEM_TEMPLATE),
create_user_message(
(
"Conversation history:\n"
"{{ conversation_history }}\n\n"
"Previous attempts to update the agenda:\n"
"{{ previous_attempts }}"
),
{
"conversation_history": str(conversation.exclude([ConversationMessageType.REASONING])),
"previous_attempts": previous_attempts,
},
),
],
"response_format": Agenda,
}
metadata = {}
logger.debug("Completion call.", extra=add_serializable_data(make_completion_args_serializable(completion_args)))
metadata["completion_args"] = make_completion_args_serializable(completion_args)
try:
completion = await language_model.beta.chat.completions.parse(
**completion_args,
)
validate_completion(completion)
logger.debug("Completion response.", extra=add_serializable_data({"completion": completion.model_dump()}))
metadata["completion"] = completion.model_dump()
except CompletionError as e:
completion_error = CompletionError(e)
metadata["completion_error"] = completion_error.message
logger.error(
e.message, extra=add_serializable_data({"completion_error": completion_error.body, "metadata": metadata})
)
raise completion_error from e
else:
agenda = cast(Agenda, completion.choices[0].message.parsed)
return agenda
```
--------------------------------------------------------------------------------
/libraries/python/skills/skill-library/skill_library/skills/guided_conversation/conversation_guides/er_triage.py:
--------------------------------------------------------------------------------
```python
from pydantic import BaseModel, Field
from skill_library.skills.guided_conversation import (
ConversationGuide,
ResourceConstraint,
ResourceConstraintMode,
ResourceConstraintUnit,
)
# Define nested models for emergency room triage
class PersonalInformation(BaseModel):
name: str = Field(description="The full name of the patient in 'First Last' format.")
sex: str = Field(description="Sex of the patient (M for male, F for female).")
date_of_birth: str = Field(description="The patient's date of birth in 'MM-DD-YYYY' format.")
phone: str = Field(description="The patient's primary phone number in 'XXX-XXX-XXXX' format.")
class Artifact(BaseModel):
personal_information: PersonalInformation = Field(
description="The patient's personal information, including name, sex, date of birth, and phone."
)
chief_complaint: str = Field(description="The main reason the patient is seeking medical attention.")
symptoms: list[str] = Field(description="List of symptoms the patient is currently experiencing.")
medications: list[str] = Field(description="List of medications the patient is currently taking.")
medical_history: list[str] = Field(description="Relevant medical history including diagnoses, surgeries, etc.")
esi_level: int = Field(description="The Emergency Severity Index (ESI) level, an integer between 1 and 5.")
resource_needs: list[str] = Field(description="A list of resources or interventions needed.")
# Rules - Guidelines for triage conversations
rules = [
"DO NOT provide medical advice.",
"Terminate the conversation if inappropriate content is requested.",
"Begin by collecting basic information such as name and date of birth to quickly identify the patient.",
"Prioritize collecting the chief complaint and symptoms to assess the immediate urgency.",
"Gather relevant medical history and current medications that might affect the patient's condition.",
"If time permits, inquire about additional resource needs for patient care.",
"Maintain a calm and reassuring demeanor to help put patients at ease during questioning.",
"Focus questions to ensure the critical information needed for ESI assignment is collected first.",
"Move urgently but efficiently through questions to minimize patient wait time during triage.",
"Ensure confidentiality and handle all patient information securely.",
]
# Conversation Flow - Steps for the triage process
conversation_flow = """
1. Greet the patient and explain the purpose of collecting medical information for triage, quickly begin by collecting basic identifying information such as name and date of birth.
2. Ask about the chief complaint to understand the primary reason for the visit.
3. Inquire about current symptoms the patient is experiencing.
4. Gather relevant medical history, including past diagnoses, surgeries, and hospitalizations.
5. Ask the patient about any medications they are currently taking.
6. Determine if there are any specific resources or interventions needed immediately.
7. Evaluate the collected information to determine the Emergency Severity Index (ESI) level.
8. Reassure the patient and inform them of the next steps in their care as quickly as possible.
"""
# Context - Additional information for the triage process
conversation_context = """
Assisting patients in providing essential information during emergency room triage in a medical setting.
"""
# Create instance of the GuidedConversationDefinition model with the above configuration.
definition = ConversationGuide(
artifact_schema=Artifact.model_json_schema(),
rules=rules,
conversation_flow=conversation_flow,
conversation_context=conversation_context,
resource_constraint=ResourceConstraint(
quantity=10,
unit=ResourceConstraintUnit.MINUTES,
mode=ResourceConstraintMode.MAXIMUM,
),
)
```
--------------------------------------------------------------------------------
/assistants/prospector-assistant/assistant/form_fill_extension/steps/_guided_conversation.py:
--------------------------------------------------------------------------------
```python
"""
Utility functions for working with guided conversations.
"""
import asyncio
import contextlib
import json
from collections import defaultdict
from contextlib import asynccontextmanager
from pathlib import Path
from typing import AsyncIterator
from guided_conversation.guided_conversation_agent import GuidedConversation
from openai import AsyncOpenAI
from pydantic import BaseModel
from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
from semantic_workbench_assistant.assistant_app.context import ConversationContext, storage_directory_for_context
from .types import GuidedConversationDefinition
_state_locks: dict[Path, asyncio.Lock] = defaultdict(asyncio.Lock)
@asynccontextmanager
async def engine(
openai_client: AsyncOpenAI,
openai_model: str,
definition: GuidedConversationDefinition,
artifact_type: type[BaseModel],
state_file_path: Path,
context: ConversationContext,
state_id: str,
) -> AsyncIterator[GuidedConversation]:
"""
Context manager that provides a guided conversation engine with state, reading it from disk, and saving back
to disk after the context manager block is executed.
NOTE: This context manager uses a lock to ensure that only one guided conversation is executed at a time for any
given state file.
"""
async with _state_locks[state_file_path], context.state_updated_event_after(state_id):
kernel, service_id = _build_kernel_with_service(openai_client, openai_model)
state: dict | None = None
with contextlib.suppress(FileNotFoundError):
state = json.loads(state_file_path.read_text(encoding="utf-8"))
if state:
guided_conversation = GuidedConversation.from_json(
json_data=state,
# dependencies
kernel=kernel,
service_id=service_id,
# context
artifact=artifact_type,
rules=definition.rules,
conversation_flow=definition.conversation_flow,
context=definition.context,
resource_constraint=definition.resource_constraint.to_resource_constraint(),
)
guided_conversation.resource.resource_constraint = definition.resource_constraint.to_resource_constraint()
else:
guided_conversation = GuidedConversation(
# dependencies
kernel=kernel,
service_id=service_id,
# context
artifact=artifact_type,
rules=definition.rules,
conversation_flow=definition.conversation_flow,
context=definition.context,
resource_constraint=definition.resource_constraint.to_resource_constraint(),
)
yield guided_conversation
state = guided_conversation.to_json()
# re-order the keys to make the json more readable in the state file
state = {
"artifact": state.pop("artifact"),
"agenda": state.pop("agenda"),
"resource": state.pop("resource"),
"chat_history": state.pop("chat_history"),
**state,
}
state_file_path.write_text(json.dumps(state), encoding="utf-8")
def _build_kernel_with_service(openai_client: AsyncOpenAI, openai_model: str) -> tuple[Kernel, str]:
kernel = Kernel()
service_id = "gc_main"
chat_service = OpenAIChatCompletion(
service_id=service_id,
async_client=openai_client,
ai_model_id=openai_model,
)
kernel.add_service(chat_service)
return kernel, service_id
def path_for_state(context: ConversationContext, dir: str) -> Path:
dir_path = storage_directory_for_context(context) / dir
dir_path.mkdir(parents=True, exist_ok=True)
return dir_path / "guided_conversation_state.json"
```
--------------------------------------------------------------------------------
/libraries/python/skills/skill-library/skill_library/skills/research2/routines/search_plan.py:
--------------------------------------------------------------------------------
```python
from typing import Any, cast
from openai_client import (
CompletionError,
create_user_message,
extra_data,
format_with_liquid,
make_completion_args_serializable,
message_content_from_completion,
validate_completion,
)
from skill_library import AskUserFn, EmitFn, RunContext, RunRoutineFn
from skill_library.logging import logger
from skill_library.skills.research2.research_skill import ResearchSkill
INITIAL_PROMPT = """
You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools.
Now for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts.
This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer.
Do not skip steps, do not add any superfluous steps. Only write the high-level plan.
Here is your topic:
`{{TOPIC}}`
Here is the up-to-date list of facts that you know:
```
{{FACTS}}
```
Observations from previous research:
```
{{OBSERVATIONS}}
```
If you decide that the research topic has been completed, respond only with <DONE>.
Now begin! Write your plan below.
"""
UPDATE_PROMPT = """
You're still working towards completing this research:
`{{TOPIC}}`
Now for the given topic, develop a step-by-step high-level plan taking into account the above inputs and list of facts.
This plan should involve individual tasks that if executed correctly will yield the correct answer.
Current plan:
```
{{PLAN}}
```
Here is the up-to-date list of facts that you know:
```
{{FACTS}}
```
Observations from previous research:
```
{{OBSERVATIONS}}
```
If you decide that the research topic has been completed, respond only with <DONE>.
Now begin! Write your revised plan below.
"""
async def main(
context: RunContext,
routine_state: dict[str, Any],
emit: EmitFn,
run: RunRoutineFn,
ask_user: AskUserFn,
topic: str,
plan: str = "",
facts: str = "",
observations: list[str] = [],
) -> tuple[str, bool]:
"""Make a search plan for a research project."""
research_skill = cast(ResearchSkill, context.skills["research2"])
language_model = research_skill.config.reasoning_language_model
if not plan:
prompt = format_with_liquid(INITIAL_PROMPT, vars={"TOPIC": topic, "FACTS": facts, "OBSERVATIONS": observations})
else:
prompt = format_with_liquid(
UPDATE_PROMPT, vars={"TOPIC": topic, "FACTS": facts, "PLAN": plan, "OBSERVATIONS": observations}
)
completion_args = {
"model": "o1",
"reasoning_effort": "high",
"messages": [
create_user_message(
prompt,
),
],
}
logger.debug("Completion call.", extra=extra_data(make_completion_args_serializable(completion_args)))
metadata = {}
metadata["completion_args"] = make_completion_args_serializable(completion_args)
try:
completion = await language_model.beta.chat.completions.parse(
**completion_args,
)
validate_completion(completion)
logger.debug("Completion response.", extra=extra_data({"completion": completion.model_dump()}))
metadata["completion"] = completion.model_dump()
except Exception as e:
completion_error = CompletionError(e)
metadata["completion_error"] = completion_error.message
logger.error(
completion_error.message,
extra=extra_data({"completion_error": completion_error.body, "metadata": context.metadata_log}),
)
raise completion_error from e
else:
content = message_content_from_completion(completion).strip().strip('"')
metadata["content"] = content
if "<DONE>" in content:
return content, True
return content, False
finally:
context.log("search_plan", metadata)
```
--------------------------------------------------------------------------------
/libraries/python/assistant-extensions/assistant_extensions/attachments/_summarizer.py:
--------------------------------------------------------------------------------
```python
import datetime
import logging
from typing import Callable
from attr import dataclass
from openai import AsyncOpenAI
from openai.types.chat import (
ChatCompletionContentPartImageParam,
ChatCompletionContentPartTextParam,
ChatCompletionSystemMessageParam,
ChatCompletionUserMessageParam,
)
from semantic_workbench_assistant.assistant_app import ConversationContext
from ._model import Attachment, AttachmentSummary, Summarizer
from ._shared import original_to_attachment_filename, summary_drive_for_context
logger = logging.getLogger("assistant_extensions.attachments")
async def get_attachment_summary(context: ConversationContext, filename: str) -> AttachmentSummary:
"""
Get the summary of the attachment from the summary drive.
If the summary file does not exist, returns None.
"""
drive = summary_drive_for_context(context)
try:
return drive.read_model(AttachmentSummary, original_to_attachment_filename(filename))
except FileNotFoundError:
# If the summary file does not exist, return None
return AttachmentSummary(
summary="",
)
async def summarize_attachment_task(
context: ConversationContext, summarizer: Summarizer, attachment: Attachment
) -> None:
"""
Summarize the attachment and save the summary to the summary drive.
"""
logger.info("summarizing attachment; filename: %s", attachment.filename)
summary = await summarizer.summarize(attachment=attachment)
attachment_summary = AttachmentSummary(summary=summary, updated_datetime=datetime.datetime.now(datetime.UTC))
drive = summary_drive_for_context(context)
# Save the summary
drive.write_model(attachment_summary, original_to_attachment_filename(attachment.filename))
logger.info("summarization of attachment complete; filename: %s", attachment.filename)
@dataclass
class LLMConfig:
client_factory: Callable[[], AsyncOpenAI]
model: str
max_response_tokens: int
file_summary_system_message: str = """You will be provided the content of a file.
It is your goal to factually, accurately, and concisely summarize the content of the file.
You must do so in less than 3 sentences or 100 words."""
class LLMFileSummarizer(Summarizer):
def __init__(self, llm_config: LLMConfig) -> None:
self.llm_config = llm_config
async def summarize(self, attachment: Attachment) -> str:
llm_config = self.llm_config
content_param = ChatCompletionContentPartTextParam(type="text", text=attachment.content)
if attachment.content.startswith("data:image/"):
# If the content is an image, we need to provide a different message format
content_param = ChatCompletionContentPartImageParam(
type="image_url",
image_url={"url": attachment.content},
)
chat_message_params = [
ChatCompletionSystemMessageParam(role="system", content=llm_config.file_summary_system_message),
ChatCompletionUserMessageParam(
role="user",
content=[
ChatCompletionContentPartTextParam(
type="text",
text=f"Filename: {attachment.filename}",
),
content_param,
ChatCompletionContentPartTextParam(
type="text",
text="Please concisely and accurately summarize the file contents.",
),
],
),
]
async with llm_config.client_factory() as client:
summary_response = await client.chat.completions.create(
messages=chat_message_params,
model=llm_config.model,
max_tokens=llm_config.max_response_tokens,
)
return summary_response.choices[0].message.content or ""
```
--------------------------------------------------------------------------------
/workbench-service/migrations/versions/2024_09_20_204130_b29524775484_share.py:
--------------------------------------------------------------------------------
```python
"""share
Revision ID: b29524775484
Revises: dffb1d7e219a
Create Date: 2024-09-17 20:41:30.747858
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel
import sqlmodel.sql.sqltypes
from alembic import op
from sqlalchemy import inspect
# revision identifiers, used by Alembic.
revision: str = "b29524775484"
down_revision: Union[str, None] = "dffb1d7e219a"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.create_table(
"conversationshare",
sa.Column("conversation_share_id", sa.Uuid(), nullable=False),
sa.Column("conversation_id", sa.Uuid(), nullable=False),
sa.Column("created_datetime", sa.DateTime(timezone=True), nullable=False),
sa.Column("owner_id", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("label", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("metadata", sa.JSON(), nullable=True),
sa.Column("conversation_permission", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("is_redeemable", sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(
["conversation_id"],
["conversation.conversation_id"],
name="fk_file_conversation_id_conversation",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["owner_id"],
["user.user_id"],
),
sa.PrimaryKeyConstraint("conversation_share_id"),
)
op.create_table(
"conversationshareredemption",
sa.Column("conversation_share_redemption_id", sa.Uuid(), nullable=False),
sa.Column("conversation_share_id", sa.Uuid(), nullable=False),
sa.Column("conversation_id", sa.Uuid(), nullable=False),
sa.Column("conversation_permission", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("new_participant", sa.Boolean(), nullable=False),
sa.Column("redeemed_by_user_id", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("created_datetime", sa.DateTime(timezone=True), nullable=False),
sa.ForeignKeyConstraint(
["conversation_share_id"],
["conversationshare.conversation_share_id"],
name="fk_conversationshareredemption_conversation_share_id",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["redeemed_by_user_id"],
["user.user_id"],
name="fk_conversationshareredemption_user_id_user",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("conversation_share_redemption_id"),
)
op.add_column("assistant", sa.Column("imported_from_assistant_id", sa.Uuid(), nullable=True))
op.add_column("conversation", sa.Column("imported_from_conversation_id", sa.Uuid(), nullable=True))
op.add_column(
"userparticipant", sa.Column("conversation_permission", sqlmodel.sql.sqltypes.AutoString(), nullable=True)
)
op.execute("UPDATE userparticipant SET conversation_permission = 'read_write'")
with op.batch_alter_table("userparticipant") as batch_op:
batch_op.alter_column("conversation_permission", nullable=False)
inspector = inspect(op.get_bind())
uq_constraints = inspector.get_unique_constraints("fileversion")
if any("uq_fileversion_file_id_version" == uq_constraint["name"] for uq_constraint in uq_constraints):
with op.batch_alter_table("fileversion") as batch_op:
batch_op.drop_constraint("uq_fileversion_file_id_version", type_="unique")
def downgrade() -> None:
op.drop_column("userparticipant", "conversation_permission")
op.drop_column("conversation", "imported_from_conversation_id")
op.drop_column("assistant", "imported_from_assistant_id")
op.drop_table("conversationshareredemption")
op.drop_table("conversationshare")
```
--------------------------------------------------------------------------------
/libraries/python/skills/skill-library/skill_library/types.py:
--------------------------------------------------------------------------------
```python
# skill_library/types.py
from datetime import datetime
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Protocol
from uuid import uuid4
from assistant_drive import Drive
from events import EventProtocol
from openai import AsyncAzureOpenAI, AsyncOpenAI
from semantic_workbench_api_model.workbench_model import ConversationMessageList
from .usage import routines_usage as usage_routines_usage
if TYPE_CHECKING:
from .skill import Skill
Metadata = dict[str, Any]
class RunContext:
"""
Every skill routine is executed with a "Run context". This is how we give
routines everything they need to interact one another and the "outside
world".
"""
def __init__(
self,
session_id: str,
run_drive: Drive,
conversation_history: Callable[[], Awaitable[ConversationMessageList]],
skills: dict[str, "Skill"],
) -> None:
# A session id is useful for maintaining consistent session state across all
# consumers of this context. For example, a session id can be set in an
# assistant and all functions called by that assistant can should receive
# this same context object to know which session is being used.
self.session_id: str = session_id or str(uuid4())
# A "run" is a particular series of calls within a session. The initial call will
# set the run id and all subsequent calls will use the same run id. This is useful
# for logging, metrics, and debugging.
self.run_id: str | None = str(uuid4())
# The assistant drive is a drive object that can be used to read and
# write files to a particular location. The assistant drive should be
# used for assistant-specific data and not for general data storage.
self.run_drive: Drive = run_drive
# The conversation history function is a function that can be called to
# get the conversation history for the current session. This is useful
# for routines that need to know what has been said in the conversation
# so far. Usage: `await run_context.conversation_history()`
self.conversation_history = conversation_history
self.skills = skills
# This is a dictionary that can be used to store meta information about
# the current run.
self.metadata_log: list[Metadata] = []
def log(self, message: str, metadata: Metadata) -> None:
"""
Log a message with metadata. The metadata will be stored in the
`metadata_log` list and can be inspected to see all the things that
happened for a given run.
"""
ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
if "log_message" not in metadata:
metadata["log_message"] = message
if "ts" not in metadata:
metadata["ts"] = ts
if "session_id" not in metadata:
metadata["session_id"] = self.session_id
if "run_id" not in metadata:
metadata["run_id"] = self.run_id
self.metadata_log.append(metadata)
def flattened_metadata(self) -> dict[str, dict[str, Any]]:
return {f"🕒{item['ts']} ➡️{item['log_message']}": item for item in self.metadata_log}
def routine_usage(self) -> str:
return usage_routines_usage(self.skills)
class RunContextProvider(Protocol):
"""
A provider of a run context must have this method. When called, it will
return a run context. This is used by skill routines to have access to all
the things they need for running.
"""
def create_run_context(self) -> RunContext: ...
AskUserFn = Callable[[str], Awaitable[str]]
ActionFn = Callable[[RunContext], Awaitable[Any]]
EmitFn = Callable[[EventProtocol], None]
class RunRoutineFn(Protocol):
async def __call__(self, designation: str, *args: Any, **kwargs: Any) -> Any: ...
LanguageModel = AsyncOpenAI | AsyncAzureOpenAI
```
--------------------------------------------------------------------------------
/libraries/python/skills/skill-library/skill_library/skills/web_research/routines/search_plan.py:
--------------------------------------------------------------------------------
```python
from typing import Any, cast
from openai_client import (
CompletionError,
create_user_message,
extra_data,
format_with_liquid,
make_completion_args_serializable,
message_content_from_completion,
validate_completion,
)
from skill_library import AskUserFn, EmitFn, RunContext, RunRoutineFn
from skill_library.logging import logger
from skill_library.skills.web_research.research_skill import WebResearchSkill
INITIAL_PROMPT = """
As a research expert, create a strategic search plan for:
`{{TOPIC}}`
Your plan should:
1. Prioritize finding high-quality, authoritative sources over quantity
2. Include specific steps to bypass SEO-optimized content in favor of substantive information
3. Focus on locating genuine expert reviews and authentic user feedback
4. Identify specific technical resources likely to contain verifiable information
Current facts:
```
{{FACTS}}
```
Observations:
```
{{OBSERVATIONS}}
```
If you determine the research is complete, respond only with <DONE>.
Otherwise, provide a step-by-step plan focusing on filling information gaps with reliable sources.
"""
UPDATE_PROMPT = """
You're researching:
`{{TOPIC}}`
Review what we've learned and what gaps remain. Current plan:
```
{{PLAN}}
```
Current facts:
```
{{FACTS}}
```
Observations:
```
{{OBSERVATIONS}}
```
For the next phase of research:
1. Evaluate which sources have proven most reliable so far
2. Identify specific information gaps with the highest priority
3. Target specialized and authoritative sources for remaining questions
4. Develop strategies to find technical details and verified user experiences
If the research topic has been completed with verified information, respond only with <DONE>.
Otherwise, revise your plan to focus on remaining information gaps.
"""
async def main(
context: RunContext,
routine_state: dict[str, Any],
emit: EmitFn,
run: RunRoutineFn,
ask_user: AskUserFn,
topic: str,
plan: str = "",
facts: str = "",
observations: list[str] = [],
) -> tuple[str, bool]:
"""Make a search plan for a research project."""
research_skill = cast(WebResearchSkill, context.skills["web_research"])
language_model = research_skill.config.reasoning_language_model
if not plan:
prompt = format_with_liquid(INITIAL_PROMPT, vars={"TOPIC": topic, "FACTS": facts, "OBSERVATIONS": observations})
else:
prompt = format_with_liquid(
UPDATE_PROMPT, vars={"TOPIC": topic, "FACTS": facts, "PLAN": plan, "OBSERVATIONS": observations}
)
completion_args = {
"model": "o3-mini",
"reasoning_effort": "high",
"messages": [
create_user_message(
prompt,
),
],
}
logger.debug("Completion call.", extra=extra_data(make_completion_args_serializable(completion_args)))
metadata = {}
metadata["completion_args"] = make_completion_args_serializable(completion_args)
try:
completion = await language_model.beta.chat.completions.parse(
**completion_args,
)
validate_completion(completion)
logger.debug("Completion response.", extra=extra_data({"completion": completion.model_dump()}))
metadata["completion"] = completion.model_dump()
except Exception as e:
completion_error = CompletionError(e)
metadata["completion_error"] = completion_error.message
logger.error(
completion_error.message,
extra=extra_data({"completion_error": completion_error.body, "metadata": context.metadata_log}),
)
raise completion_error from e
else:
content = message_content_from_completion(completion).strip().strip('"')
metadata["content"] = content
if "<DONE>" in content:
return content, True
return content, False
finally:
context.log("search_plan", metadata)
```
--------------------------------------------------------------------------------
/libraries/python/skills/skill-library/skill_library/skills/fabric/patterns/create_stride_threat_model/system.md:
--------------------------------------------------------------------------------
```markdown
# IDENTITY and PURPOSE
You are an expert in risk and threat management and cybersecurity. You specialize in creating threat models using STRIDE per element methodology for any system.
# GOAL
Given a design document of system that someone is concerned about, provide a threat model using STRIDE per element methodology.
# STEPS
- Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
- Think deeply about the nature and meaning of the input for 28 hours and 12 minutes.
- Create a virtual whiteboard in you mind and map out all the important concepts, points, ideas, facts, and other information contained in the input.
- Fully understand the STRIDE per element threat modeling approach.
- Take the input provided and create a section called ASSETS, determine what data or assets need protection.
- Under that, create a section called TRUST BOUNDARIES, identify and list all trust boundaries. Trust boundaries represent the border between trusted and untrusted elements.
- Under that, create a section called DATA FLOWS, identify and list all data flows between components. Data flow is interaction between two components. Mark data flows crossing trust boundaries.
- Under that, create a section called THREAT MODEL. Create threats table with STRIDE per element threats. Prioritize threats by likelihood and potential impact.
- Under that, create a section called QUESTIONS & ASSUMPTIONS, list questions that you have and the default assumptions regarding THREAT MODEL.
- The goal is to highlight what's realistic vs. possible, and what's worth defending against vs. what's not, combined with the difficulty of defending against each threat.
- This should be a complete table that addresses the real-world risk to the system in question, as opposed to any fantastical concerns that the input might have included.
- Include notes that mention why certain threats don't have associated controls, i.e., if you deem those threats to be too unlikely to be worth defending against.
# OUTPUT GUIDANCE
- Table with STRIDE per element threats has following columns:
THREAT ID - id of threat, example: 0001, 0002
COMPONENT NAME - name of component in system that threat is about, example: Service A, API Gateway, Sales Database, Microservice C
THREAT NAME - name of threat that is based on STRIDE per element methodology and important for component. Be detailed and specific. Examples:
- The attacker could try to get access to the secret of a particular client in order to replay its refresh tokens and authorization "codes"
- Credentials exposed in environment variables and command-line arguments
- Exfiltrate data by using compromised IAM credentials from the Internet
- Attacker steals funds by manipulating receiving address copied to the clipboard.
STRIDE CATEGORY - name of STRIDE category, example: Spoofing, Tampering. Pick only one category per threat.
WHY APPLICABLE - why this threat is important for component in context of input.
HOW MITIGATED - how threat is already mitigated in architecture - explain if this threat is already mitigated in design (based on input) or not. Give reference to input.
MITIGATION - provide mitigation that can be applied for this threat. It should be detailed and related to input.
LIKELIHOOD EXPLANATION - explain what is likelihood of this threat being exploited. Consider input (design document) and real-world risk.
IMPACT EXPLANATION - explain impact of this threat being exploited. Consider input (design document) and real-world risk.
RISK SEVERITY - risk severity of threat being exploited. Based it on LIKELIHOOD and IMPACT. Give value, e.g.: low, medium, high, critical.
# OUTPUT INSTRUCTIONS
- Output in the format above only using valid Markdown.
- Do not use bold or italic formatting in the Markdown (no asterisks).
- Do not complain about anything, just do what you're told.
# INPUT:
INPUT:
```
--------------------------------------------------------------------------------
/workbench-app/src/components/Conversations/MyShares.tsx:
--------------------------------------------------------------------------------
```typescript
// Copyright (c) Microsoft. All rights reserved.
import { Copy24Regular, Info24Regular, Share24Regular } from '@fluentui/react-icons';
import React from 'react';
import { useConversationUtility } from '../../libs/useConversationUtility';
import { Conversation } from '../../models/Conversation';
import { ConversationShare } from '../../models/ConversationShare';
import { CommandButton } from '../App/CommandButton';
import { CopyButton } from '../App/CopyButton';
import { MiniControl } from '../App/MiniControl';
import { MyItemsManager } from '../App/MyItemsManager';
import { ConversationShareCreate } from './ConversationShareCreate';
import { ConversationShareView } from './ConversationShareView';
import { ShareRemove } from './ShareRemove';
interface MySharesProps {
shares: ConversationShare[];
title?: string;
hideInstruction?: boolean;
conversation?: Conversation;
}
export const MyShares: React.FC<MySharesProps> = (props) => {
const { shares, hideInstruction, title, conversation } = props;
const [newOpen, setNewOpen] = React.useState(Boolean(conversation && shares.length === 0));
const [conversationShareForDetails, setConversationShareForDetails] = React.useState<ConversationShare>();
const conversationUtility = useConversationUtility();
const createTitle = 'Create a new share link';
// The create share button is internal to the MyShares component so that we're always
// presenting the list of current shares for the conversation in case the user wants to
// reuse a previously created share link.
const actions = conversation ? (
<CommandButton label="New Share" description={createTitle} onClick={() => setNewOpen(true)} />
) : (
<></>
);
const titleFor = (share: ConversationShare) => {
const { shareType } = conversationUtility.getShareType(share);
return `${share.label} (${shareType.toLowerCase()})`;
};
const linkFor = (share: ConversationShare) => {
return conversationUtility.getShareLink(share);
};
return (
<>
<MyItemsManager
items={shares.map((share) => (
<MiniControl
key={share.id}
icon={<Share24Regular />}
label={titleFor(share)}
linkUrl={linkFor(share)}
tooltip="Open share link"
actions={
<>
<CommandButton
icon={<Info24Regular />}
iconOnly
onClick={() => setConversationShareForDetails(share)}
description="View details"
/>
<CopyButton data={linkFor(share)} icon={<Copy24Regular />} tooltip="Copy share link" />
<ShareRemove share={share} iconOnly />
</>
}
/>
))}
title={title ?? 'My Shared Links'}
itemLabel="Share Link"
hideInstruction={hideInstruction}
actions={actions}
/>
{newOpen && conversation && (
<ConversationShareCreate
conversation={conversation}
onClosed={() => setNewOpen(false)}
onCreated={(createdShare) => setConversationShareForDetails(createdShare)}
/>
)}
{conversationShareForDetails && (
<ConversationShareView
conversationShare={conversationShareForDetails}
showDetails
onClosed={() => setConversationShareForDetails(undefined)}
/>
)}
</>
);
};
```
--------------------------------------------------------------------------------
/assistants/codespace-assistant/assistant/text_includes/context_transfer_assistant_info.md:
--------------------------------------------------------------------------------
```markdown
# Context Transfer Assistant
## Overview
The Context Transfer Assistant specializes in capturing, organizing, and sharing knowledge between users. It helps document and transfer complex information about any subject matter, making it easier to onboard team members, share expertise, or provide detailed guidance across various domains.
## Key Features
- **Knowledge capture and organization**: Records and structures detailed information about any topic or subject area.
- **Contextual understanding**: Analyzes shared files, documents, and conversations to build comprehensive knowledge representation.
- **Guided context sharing**: Helps structure information to make it accessible and actionable for recipients.
- **Domain-aware explanations**: Provides explanations grounded in the specific context being discussed.
- **Interactive guidance**: Adapts explanations based on the recipient's questions and expertise level.
- **Visual representation**: Creates diagrams and visual aids to explain complex concepts when helpful.
- **Knowledge persistence**: Maintains shared knowledge through a centralized whiteboard that all participants can access, ensuring consistent information across team conversations.
## How to Use the Context Transfer Assistant
### For Context Owners (Sharers)
1. **Start by defining the context**: Explain what information you want to share and who will be using it.
2. **Share relevant artifacts**: Upload or reference key documents, files, or resources.
3. **Provide structural overview**: Describe the system, domain, or topic structure and key components.
4. **Define processes**: Explain important workflows, procedures, or methodologies.
5. **Refine knowledge representation**: Answer the assistant's questions to clarify details.
### For Context Recipients
1. **Review shared context**: Explore the information that has been shared with you.
2. **Ask clarifying questions**: Request more details about specific aspects of the topic or materials.
3. **Request practical examples**: Ask for concrete examples of concepts or procedures.
4. **Apply knowledge assistance**: Get help applying the shared knowledge to specific tasks.
5. **Seek procedural guidance**: Get step-by-step instructions for workflows or processes.
## Knowledge Transfer Process
1. **Context Definition**:
- Define the domain or subject matter to be shared
- Establish the recipient's background and expertise level
- Determine the goals of the knowledge transfer
2. **Knowledge Capture**:
- Share relevant documents and resources
- Provide structural and organizational descriptions
- Explain key concepts and relationships
- Document important processes and procedures
3. **Context Organization**:
- The assistant structures the information for clarity
- Complex concepts are broken down into digestible components
- Visual diagrams may be created for conceptual understanding
- Information is organized within the conversation history for reference
4. **Interactive Knowledge Sharing**:
- Recipients explore the shared context
- The assistant answers questions based on the captured knowledge
- Explanations are adapted to the recipient's understanding
- Additional context is gathered when knowledge gaps are identified
## Common Use Cases
- **Team member onboarding**: Transfer comprehensive knowledge to new members.
- **Knowledge handover**: Document complex systems or processes when transitioning between teams.
- **Expert consultation**: Preserve and share the knowledge of subject matter experts.
- **Cross-team collaboration**: Share context between teams working on different aspects of a project.
- **Process documentation**: Guide others through setting up or following specific procedures.
The Context Transfer Assistant bridges the gap between experts and knowledge recipients, making complex information more accessible, organized, and actionable across teams and individuals.
```
--------------------------------------------------------------------------------
/libraries/python/assistant-extensions/assistant_extensions/chat_context_toolkit/virtual_filesystem/_attachments_file_source.py:
--------------------------------------------------------------------------------
```python
import logging
from typing import Iterable
from chat_context_toolkit.virtual_filesystem import (
DirectoryEntry,
FileEntry,
FileSource,
MountPoint,
)
from openai_client import OpenAIRequestConfig, ServiceConfig, create_client
from semantic_workbench_assistant.assistant_app import ConversationContext
from assistant_extensions.attachments._model import Summarizer
from ...attachments import get_attachments
from ...attachments._summarizer import LLMConfig, LLMFileSummarizer, get_attachment_summary
logger = logging.getLogger(__name__)
class AttachmentsVirtualFileSystemFileSource(FileSource):
"""File source for the attachments."""
def __init__(
self,
context: ConversationContext,
summarizer: Summarizer,
) -> None:
"""Initialize the file source with the conversation context."""
self.context = context
self.summarizer = summarizer
async def list_directory(self, path: str) -> Iterable[DirectoryEntry | FileEntry]:
"""
List files and directories at the specified path.
Should support absolute paths only, such as "/dir/file.txt".
If the directory does not exist, should raise FileNotFoundError.
"""
query_prefix = path.lstrip("/") or None
list_files_result = await self.context.list_files(prefix=query_prefix)
directories: set[str] = set()
entries: list[DirectoryEntry | FileEntry] = []
prefix = path.lstrip("/")
for file in list_files_result.files:
if prefix and not file.filename.startswith(prefix):
continue
relative_filepath = file.filename.replace(prefix, "")
if "/" in relative_filepath:
directory = relative_filepath.rsplit("/", 1)[0]
if directory in directories:
continue
directories.add(directory)
entries.append(DirectoryEntry(path=f"/{prefix}{directory}", description="", permission="read"))
continue
entries.append(
FileEntry(
path=f"/{prefix}{relative_filepath}",
size=file.file_size,
timestamp=file.updated_datetime,
permission="read",
description=(await get_attachment_summary(context=self.context, filename=file.filename)).summary,
)
)
return entries
async def read_file(self, path: str) -> str:
"""
Read file content from the specified path.
Should support absolute paths only, such as "/dir/file.txt".
If the file does not exist, should raise FileNotFoundError.
FileSource implementations are responsible for representing the file content as a string.
"""
workbench_path = path.lstrip("/")
attachments = await get_attachments(
context=self.context,
include_filenames=[workbench_path],
exclude_filenames=[],
summarizer=self.summarizer,
)
if not attachments:
raise FileNotFoundError(f"File not found: {path}")
return attachments[0].content
def attachments_file_source_mount(
context: ConversationContext, service_config: ServiceConfig, request_config: OpenAIRequestConfig
) -> MountPoint:
return MountPoint(
entry=DirectoryEntry(
path="/attachments",
description="User and assistant created files and attachments",
permission="read",
),
file_source=AttachmentsVirtualFileSystemFileSource(
context=context,
summarizer=LLMFileSummarizer(
llm_config=LLMConfig(
client_factory=lambda: create_client(service_config),
model=request_config.model,
max_response_tokens=request_config.response_tokens,
)
),
),
)
```
--------------------------------------------------------------------------------
/assistants/project-assistant/assistant/agentic/focus.py:
--------------------------------------------------------------------------------
```python
from typing import Any
import openai_client
from assistant_extensions.attachments import AttachmentsExtension
from pydantic import BaseModel
from semantic_workbench_assistant.assistant_app import ConversationContext
from assistant.config import assistant_config
import uuid
from assistant.data import InspectorTab, TaskInfo, TaskPriority, TaskStatus
from assistant.domain.tasks_manager import TasksManager
from assistant.logging import logger
from assistant.notifications import Notifications
from assistant.prompt_utils import (
ContextStrategy,
DataContext,
Instructions,
Prompt,
)
from assistant.utils import load_text_include
async def focus(context: ConversationContext, attachments_extension: AttachmentsExtension) -> None:
debug: dict[str, Any] = {
"context": context.to_dict(),
}
config = await assistant_config.get(context.assistant)
# Set up prompt instructions.
instruction_text = load_text_include("focus.md")
instructions = Instructions(instruction_text)
prompt = Prompt(
instructions=instructions,
context_strategy=ContextStrategy.MULTI,
)
tasks = await TasksManager.get_tasks(context)
if tasks:
tasks_data = "\n\n".join("- " + task.content for task in tasks)
prompt.contexts.append(
DataContext(
"Consulting Tasks",
tasks_data,
"The consultant's current task list for the knowledge transfer consulting project.",
)
)
else:
prompt.contexts.append(
DataContext(
"Consulting Tasks",
"[]",
"The consultant has no current tasks for the knowledge transfer consulting project.",
)
)
class Output(BaseModel):
"""Output class to hold the generated tasks."""
reasoning: str # Reasoning behind how you are focusing the task list.
focused_tasks: list[str] # Focused task list for the knowledge transfer consultant.
# Chat completion
async with openai_client.create_client(config.service_config) as client:
try:
completion_args = {
"messages": prompt.messages(),
"model": config.request_config.openai_model,
"max_tokens": 500,
"temperature": 0.8,
"response_format": Output,
}
debug["completion_args"] = openai_client.serializable(completion_args)
# LLM call
response = await client.beta.chat.completions.parse(
**completion_args,
)
openai_client.validate_completion(response)
debug["completion_response"] = openai_client.serializable(response.model_dump())
# Response
if response and response.choices and response.choices[0].message.parsed:
output: Output = response.choices[0].message.parsed
if output.focused_tasks:
focused_task_infos = [
TaskInfo(
task_id=str(uuid.uuid4()),
content=task,
status=TaskStatus.PENDING,
priority=TaskPriority.MEDIUM
) for task in output.focused_tasks
]
await TasksManager.set_task_list(context, focused_task_infos)
await Notifications.notify(context, "Focused the task list.", debug_data=debug)
await Notifications.notify_state_update(
context,
[InspectorTab.DEBUG],
)
else:
logger.warning("Empty response from LLM for welcome message generation")
except Exception as e:
logger.exception(f"Failed to make OpenIA call: {e}")
debug["error"] = str(e)
# logger.debug(f"{__name__}: {debug}")
```
--------------------------------------------------------------------------------
/libraries/python/semantic-workbench-assistant/tests/test_canonical.py:
--------------------------------------------------------------------------------
```python
import uuid
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from semantic_workbench_api_model import assistant_model
from semantic_workbench_assistant import canonical, settings, storage
@pytest.fixture
def canonical_assistant_service(
monkeypatch: pytest.MonkeyPatch, storage_settings: storage.FileStorageSettings
) -> FastAPI:
monkeypatch.setattr(settings, "storage", storage_settings)
return canonical.canonical_app.fastapi_app()
def test_service_init(canonical_assistant_service: FastAPI):
with TestClient(app=canonical_assistant_service):
pass
def test_create_assistant_put_config(canonical_assistant_service: FastAPI):
with TestClient(app=canonical_assistant_service) as client:
assistant_id = str(uuid.uuid4())
assistant_definition = assistant_model.AssistantPutRequestModel(
assistant_name="test-assistant", template_id="default"
)
response = client.put(f"/{assistant_id}", data={"assistant": assistant_definition.model_dump_json()})
response.raise_for_status()
response = client.get(f"/{assistant_id}/config")
response.raise_for_status()
original_config_state = assistant_model.ConfigResponseModel(**response.json())
original_config = canonical.ConfigStateModel(**original_config_state.config)
# check that the default config state is as expected so we can later assert on the
# partially updated state
assert original_config.model_dump(mode="json") == {
"un_annotated_text": "",
"short_text": "",
"long_text": "",
"setting_int": 0,
"model": {"name": "gpt35turbo"},
"prompt": {"custom_prompt": "", "temperature": 0.7},
}
config = assistant_model.ConfigPutRequestModel(
config=canonical.ConfigStateModel(
short_text="test short text - this should update",
long_text="test long text - this should update",
prompt=canonical.PromptConfigModel(
custom_prompt="test custom prompt - this should update", temperature=0.999999
),
).model_dump()
)
response = client.put(f"/{assistant_id}/config", json=config.model_dump(mode="json"))
response.raise_for_status()
updated_config_state = assistant_model.ConfigResponseModel(**response.json())
updated_config = canonical.ConfigStateModel(**updated_config_state.config)
assert updated_config.model_dump(mode="json") == {
"un_annotated_text": "",
"short_text": "test short text - this should update",
"long_text": "test long text - this should update",
"setting_int": 0,
"model": {"name": "gpt35turbo"},
"prompt": {"custom_prompt": "test custom prompt - this should update", "temperature": 0.999999},
}
def test_create_assistant_put_invalid_config(canonical_assistant_service: FastAPI):
with TestClient(app=canonical_assistant_service) as client:
assistant_id = str(uuid.uuid4())
assistant_definition = assistant_model.AssistantPutRequestModel(
assistant_name="test-assistant", template_id="default"
)
response = client.put(f"/{assistant_id}", data={"assistant": assistant_definition.model_dump_json()})
response.raise_for_status()
response = client.get(f"/{assistant_id}/config")
response.raise_for_status()
original_config_state = assistant_model.ConfigResponseModel(**response.json())
response = client.put(f"/{assistant_id}/config", json={"data": {"invalid_key": "data"}})
assert response.status_code in [422, 400]
response = client.get(f"/{assistant_id}/config")
response.raise_for_status()
after_config_state = assistant_model.ConfigResponseModel(**response.json())
assert after_config_state == original_config_state
```
--------------------------------------------------------------------------------
/assistants/knowledge-transfer-assistant/assistant/tools/learning_outcomes.py:
--------------------------------------------------------------------------------
```python
"""
Learning outcomes management tools for Knowledge Transfer Assistant.
Tools for managing individual learning outcomes within objectives.
"""
from assistant.domain import LearningObjectivesManager
from assistant.storage_models import ConversationRole
from .base import ToolsBase
class LearningOutcomeTools(ToolsBase):
"""Tools for managing learning outcomes."""
async def add_learning_outcome(self, objective_id: str, outcome_description: str) -> str:
"""
Add a new learning outcome to an existing learning objective.
WHEN TO USE:
- When you need to add additional measurable outcomes to an existing objective
- When refining objectives by breaking them down into more specific outcomes
- When expanding the scope of an objective with new learning goals
- When iteratively developing learning objectives based on feedback
Args:
objective_id: The unique ID of the learning objective to add the outcome to
outcome_description: Clear, specific description of what needs to be understood or accomplished
Returns:
A message indicating success or failure
"""
if self.role is not ConversationRole.COORDINATOR:
return "Only Coordinator can add learning outcomes."
success, message = await LearningObjectivesManager.add_learning_outcome(
context=self.context,
objective_id=objective_id,
outcome_description=outcome_description,
)
return (
message
if message
else ("Learning outcome added successfully." if success else "Failed to add learning outcome.")
)
async def update_learning_outcome(self, outcome_id: str, new_description: str) -> str:
"""
Update the description of an existing learning outcome.
WHEN TO USE:
- When clarifying or improving the wording of an existing outcome
- When making outcomes more specific or measurable
- When correcting errors in outcome descriptions
- When refining outcomes based on feedback or better understanding
Args:
outcome_id: The unique ID of the learning outcome to update
new_description: New description for the learning outcome
Returns:
A message indicating success or failure
"""
if self.role is not ConversationRole.COORDINATOR:
return "Only Coordinator can update learning outcomes."
success, message = await LearningObjectivesManager.update_learning_outcome(
context=self.context,
outcome_id=outcome_id,
new_description=new_description,
)
return (
message
if message
else ("Learning outcome updated successfully." if success else "Failed to update learning outcome.")
)
async def delete_learning_outcome(self, outcome_id: str) -> str:
"""
Delete a learning outcome from a learning objective.
WHEN TO USE:
- When an outcome is no longer relevant or necessary
- When consolidating redundant outcomes
- When removing outcomes that were added by mistake
- When simplifying objectives by removing overly specific outcomes
NOTE: This action is irreversible.
Args:
outcome_id: The unique ID of the learning outcome to delete
Returns:
A message indicating success or failure
"""
if self.role is not ConversationRole.COORDINATOR:
return "Only Coordinator can delete learning outcomes."
success, message = await LearningObjectivesManager.delete_learning_outcome(
context=self.context,
outcome_id=outcome_id,
)
return (
message
if message
else ("Learning outcome deleted successfully." if success else "Failed to delete learning outcome.")
)
```
--------------------------------------------------------------------------------
/docs/HOSTED_ASSISTANT_WITH_LOCAL_MCP_SERVERS.md:
--------------------------------------------------------------------------------
```markdown
# Hosted Assistant with Local MCP Servers
The [Codespace Assistant](../assistants/codespace-assistant) has support for connecting to MCP servers. This document is how to connect a hosted instance of the Codespace Assistant to a local MCP server.
## How to connect a hosted Codespace Assistant instance to local MCP servers
NOTE: If you are running the workbench and the assistant locally, this guide will NOT help you.
To connect assistants running in a hosted workbench to a local MCP server, the MCP server needs to be available as an SSE service connectable from the public internet.
To make it accessible, you need to set up a tunnel to the localhost server.
We recommend using the Azure devtunnel service, as it provides a simple and reliable way to create a tunnel.
For MCP servers that only support stdio transport, you additionally need to run `mcp-proxy`, to expose the app as an HTTP/SSE service.
## Standard input/output (stdio) transport
If the MCP server supports SSE transport, you can skip this step.
For MCP servers that only support stdio transport, you can use the mcp-proxy to expose the app as an HTTP/SSE service.
### Step 1: Determine the command to run the MCP server
This will be in the docs for the MCP server you are using.
### Step 2: Run mcp-proxy
Pick an unused port (e.g. 50001) and run the following command:
```bash
# Option 1: With uv (recommended)
uvx mcp-proxy --sse-port={port} {command}
# Option 2: With pipx (alternative)
pipx run mcp-proxy --sse-port={port} {command}
```
Example for port 50001 and command `mcp-server-fetch`:
```bash
uvx mcp-proxy --sse-port=50001 mcp-server-fetch
```
## HTTP Server-Sent Events (SSE) transport
For MCP servers that support HTTP/SSE transport, or servers for which you have set up the proxy, you can use the Azure devtunnel service to create a tunnel to the localhost server.
### Step 1: Install [devtunnel CLI](https://learn.microsoft.com/en-us/azure/developer/dev-tunnels/get-started#install)
```bash
# Windows
winget install Microsoft.DevTunnel
# Mac
brew install --cask devtunnel
#Linux
curl -sL https://aka.ms/DevTunnelCliInstall | bash
```
### Step 2: Log in to devtunnel
```bash
devtunnel user login
```
### Step 3: Determine which port the MCP server is running on
The default port that an MCP server runs on will be in the docs for the MCP server you are using. This port will often be configurable.
For servers that you have set up the proxy for, the port is the one you specified in the `--sse-port` argument when running `mcp-proxy`.
### Step 4: Create a tunnel
```bash
devtunnel host -p {mcp-server-port} --allow-anonymous --protocol http
```
Example for MCP server running on port 50001:
```bash
devtunnel host -p 50001 --allow-anonymous --protocol http
```
### Step 4: Copy the tunnel URL
`devtunnel` will print a URL like this:
```
Connect via browser: https://...
```
NOTE: Make sure you copy the "Connect via browser" URL, not the "Inspect network activity" URL.
## Configure the Codespace Assistant
1. Open the hosted Semantic Workbench app in your browser (ex: https://contoso-semantic-workbench.azurewebsites.net).
1. Create a conversation with a Codespace Assistant or open an existing one.
1. Click the "Conversation canvas" button in the top-right corner, near your profile picture.
1. Click the "..." button next to the assistant, and click "Configure".
1. Scroll down to the "MCP Servers" section.
1. Delete the default MCP servers using the "Delete" button, if there are any. (They aren't accessible from the workbench and will cause errors.)
1. Click the "+ Add" button.
1. Update the "Key" field to give it a relevant name (e.g. "vscode" or "word").
1. Update the "Command" field to the tunnel URL you copied in the previous step, plus `/sse`. For example, if your tunnel URL is `https://abcdefghi123.usw2.devtunnels.ms`, the command should be `https://c2z6r7s8-6010.usw2.devtunnels.ms/sse`.
1. Click "Save".
1. Click "Close".
1. As a test, try asking the assistant "what tools do you have?".
```
--------------------------------------------------------------------------------
/assistants/guided-conversation-assistant/assistant/agents/guided_conversation/definitions/er_triage.py:
--------------------------------------------------------------------------------
```python
import json
from guided_conversation.utils.resources import ResourceConstraintMode, ResourceConstraintUnit
from pydantic import BaseModel, Field
from ..definition import GuidedConversationDefinition
# Define nested models for emergency room triage
class PersonalInformation(BaseModel):
name: str = Field(description="The full name of the patient in 'First Last' format.")
sex: str = Field(description="Sex of the patient (M for male, F for female).")
date_of_birth: str = Field(description="The patient's date of birth in 'MM-DD-YYYY' format.")
phone: str = Field(description="The patient's primary phone number in 'XXX-XXX-XXXX' format.")
class Artifact(BaseModel):
personal_information: PersonalInformation = Field(
description="The patient's personal information, including name, sex, date of birth, and phone."
)
chief_complaint: str = Field(description="The main reason the patient is seeking medical attention.")
symptoms: list[str] = Field(description="List of symptoms the patient is currently experiencing.")
medications: list[str] = Field(description="List of medications the patient is currently taking.")
medical_history: list[str] = Field(description="Relevant medical history including diagnoses, surgeries, etc.")
esi_level: int = Field(description="The Emergency Severity Index (ESI) level, an integer between 1 and 5.")
resource_needs: list[str] = Field(description="A list of resources or interventions needed.")
# Rules - Guidelines for triage conversations
rules = [
"DO NOT provide medical advice.",
"Terminate the conversation if inappropriate content is requested.",
"Begin by collecting basic information such as name and date of birth to quickly identify the patient.",
"Prioritize collecting the chief complaint and symptoms to assess the immediate urgency.",
"Gather relevant medical history and current medications that might affect the patient's condition.",
"If time permits, inquire about additional resource needs for patient care.",
"Maintain a calm and reassuring demeanor to help put patients at ease during questioning.",
"Focus questions to ensure the critical information needed for ESI assignment is collected first.",
"Move urgently but efficiently through questions to minimize patient wait time during triage.",
"Ensure confidentiality and handle all patient information securely.",
]
# Conversation Flow - Steps for the triage process
conversation_flow = """
1. Greet the patient and explain the purpose of collecting medical information for triage, quickly begin by collecting basic identifying information such as name and date of birth.
2. Ask about the chief complaint to understand the primary reason for the visit.
3. Inquire about current symptoms the patient is experiencing.
4. Gather relevant medical history, including past diagnoses, surgeries, and hospitalizations.
5. Ask the patient about any medications they are currently taking.
6. Determine if there are any specific resources or interventions needed immediately.
7. Evaluate the collected information to determine the Emergency Severity Index (ESI) level.
8. Reassure the patient and inform them of the next steps in their care as quickly as possible.
"""
# Context - Additional information for the triage process
context = """
Assisting patients in providing essential information during emergency room triage in a medical setting.
"""
# Resource Constraints - Defines the constraints like time for the conversation
resource_constraint = GuidedConversationDefinition.ResourceConstraint(
quantity=10,
unit=ResourceConstraintUnit.MINUTES,
mode=ResourceConstraintMode.MAXIMUM,
)
# Create instance of the GuidedConversationDefinition model with the above configuration.
er_triage = GuidedConversationDefinition(
artifact=json.dumps(Artifact.model_json_schema(), indent=2),
rules=rules,
conversation_flow=conversation_flow,
context=context,
resource_constraint=resource_constraint,
)
```
--------------------------------------------------------------------------------
/assistants/prospector-assistant/assistant/form_fill_extension/state.py:
--------------------------------------------------------------------------------
```python
from contextlib import asynccontextmanager
from contextvars import ContextVar
from enum import StrEnum
from pathlib import Path
from typing import AsyncIterator
from pydantic import BaseModel, Field
from semantic_workbench_assistant.assistant_app.context import ConversationContext, storage_directory_for_context
from semantic_workbench_assistant.storage import read_model, write_model
from .inspector import FileStateInspector
class FieldType(StrEnum):
text = "text"
text_list = "text_list"
currency = "currency"
date = "date"
signature = "signature"
multiple_choice = "multiple_choice"
class AllowedOptionSelections(StrEnum):
one = "one"
"""One of the options can be selected."""
many = "many"
"""One or more of the options can be selected."""
class FormField(BaseModel):
id: str = Field(description="The descriptive, unique identifier of the field as a snake_case_english_string.")
name: str = Field(description="The name of the field.")
description: str = Field(description="The description of the field.")
type: FieldType = Field(description="The type of the field.")
options: list[str] = Field(description="The options for multiple choice fields.")
option_selections_allowed: AllowedOptionSelections | None = Field(
description="The number of options that can be selected for multiple choice fields."
)
required: bool = Field(
description="Whether the field is required or not. False indicates the field is optional and can be left blank."
)
class Section(BaseModel):
title: str = Field(description="The title of the section if one is provided on the form.")
description: str = Field(description="The description of the section if one is provided on the form.")
instructions: str = Field(description="The instructions for the section if they are provided on the form.")
fields: list[FormField] = Field(description="The fields of the section.")
sections: list["Section"] = Field(description="The sub-sections of the section, if any.")
class Form(Section):
title: str = Field(description="The title of the form.")
description: str = Field(description="The description of the form if one is provided on the form.")
instructions: str = Field(description="The instructions for the form if they are provided on the form.")
fields: list[FormField] = Field(description="The fields of the form, if there are any at the top level.")
sections: list[Section] = Field(description="The sections of the form, if there are any.")
class FormFillExtensionMode(StrEnum):
acquire_form_step = "acquire_form"
extract_form_fields = "extract_form_fields"
fill_form_step = "fill_form"
conversation_over = "conversation_over"
class FormFillExtensionState(BaseModel):
mode: FormFillExtensionMode = FormFillExtensionMode.acquire_form_step
form_filename: str = ""
extracted_form: Form | None = None
populated_form_markdown: str = ""
fill_form_gc_artifact: dict | None = None
def path_for_state(context: ConversationContext) -> Path:
return storage_directory_for_context(context) / "state.json"
current_state = ContextVar[FormFillExtensionState | None]("current_state", default=None)
@asynccontextmanager
async def extension_state(context: ConversationContext) -> AsyncIterator[FormFillExtensionState]:
"""
Context manager that provides the agent state, reading it from disk, and saving back
to disk after the context manager block is executed.
"""
state = current_state.get()
if state is not None:
yield state
return
async with context.state_updated_event_after(inspector.state_id):
state = read_model(path_for_state(context), FormFillExtensionState) or FormFillExtensionState()
current_state.set(state)
yield state
write_model(path_for_state(context), state)
current_state.set(None)
inspector = FileStateInspector(display_name="Debug: FormFill Agent", file_path_source=path_for_state)
```
--------------------------------------------------------------------------------
/workbench-app/src/components/Conversations/Canvas/AssistantCanvasList.tsx:
--------------------------------------------------------------------------------
```typescript
// Copyright (c) Microsoft. All rights reserved.
import { Overflow, OverflowItem, Tab, TabList, makeStyles, shorthands, tokens } from '@fluentui/react-components';
import React from 'react';
import { useChatCanvasController } from '../../../libs/useChatCanvasController';
import { Assistant } from '../../../models/Assistant';
import { Conversation } from '../../../models/Conversation';
import { OverflowMenu, OverflowMenuItemData } from '../../App/OverflowMenu';
import { AssistantCanvas } from './AssistantCanvas';
const useClasses = makeStyles({
root: {
display: 'flex',
flexDirection: 'column',
height: '100%',
},
noAssistants: {
display: 'flex',
flexDirection: 'row',
alignItems: 'center',
justifyContent: 'space-between',
...shorthands.padding(tokens.spacingVerticalS),
},
header: {
overflow: 'hidden',
...shorthands.padding(tokens.spacingVerticalS),
...shorthands.borderBottom(tokens.strokeWidthThin, 'solid', tokens.colorNeutralStroke1),
},
});
interface AssistantCanvasListProps {
selectedAssistant?: Assistant;
conversationAssistants: Assistant[];
conversation: Conversation;
}
export const AssistantCanvasList: React.FC<AssistantCanvasListProps> = (props) => {
const { selectedAssistant, conversationAssistants, conversation } = props;
const classes = useClasses();
const chatCanvasController = useChatCanvasController();
const tabItems = React.useMemo(
() =>
conversationAssistants.slice().map(
(assistant): OverflowMenuItemData => ({
id: assistant.id,
name: assistant.name,
}),
),
[conversationAssistants],
);
const handleTabSelect = React.useCallback(
(id: string) => {
// Find the assistant that corresponds to the selected tab
const conversationAssistant = conversationAssistants.find(
(conversationAssistant) => conversationAssistant.id === id,
);
// Set the new assistant as the active assistant
// If we can't find the assistant, we'll set the assistant to undefined
chatCanvasController.transitionToState({
selectedAssistantId: conversationAssistant?.id,
selectedAssistantStateId: undefined,
});
},
[chatCanvasController, conversationAssistants],
);
const assistant = React.useMemo(
() => selectedAssistant ?? conversationAssistants[0],
[selectedAssistant, conversationAssistants],
);
if (conversationAssistants.length === 1) {
// Only one assistant, no need to show tabs, just show the single assistant
return <AssistantCanvas assistant={conversationAssistants[0]} conversationId={conversation.id} />;
}
// Multiple assistants, show tabs
return (
<div className={classes.root}>
<div className={classes.header}>
<Overflow minimumVisible={1}>
<TabList
selectedValue={assistant.id}
onTabSelect={(_, data) => handleTabSelect(data.value as string)}
size="small"
>
{tabItems.map((tabItem) => (
<OverflowItem
key={tabItem.id}
id={tabItem.id}
priority={tabItem.id === assistant.id ? 2 : 1}
>
<Tab value={tabItem.id}>{tabItem.name}</Tab>
</OverflowItem>
))}
<OverflowMenu items={tabItems} onItemSelect={handleTabSelect} />
</TabList>
</Overflow>
</div>
<AssistantCanvas assistant={assistant} conversationId={conversation.id} />
</div>
);
};
```