This is page 31 of 114. Use http://codebase.md/microsoft/semanticworkbench?lines=false&page={x} to view the full context.
# Directory Structure
```
├── .devcontainer
│ ├── .vscode
│ │ └── settings.json
│ ├── devcontainer.json
│ ├── OPTIMIZING_FOR_CODESPACES.md
│ ├── POST_SETUP_README.md
│ └── README.md
├── .dockerignore
├── .gitattributes
├── .github
│ ├── policheck.yml
│ └── workflows
│ ├── assistants-codespace-assistant.yml
│ ├── assistants-document-assistant.yml
│ ├── assistants-explorer-assistant.yml
│ ├── assistants-guided-conversation-assistant.yml
│ ├── assistants-knowledge-transfer-assistant.yml
│ ├── assistants-navigator-assistant.yml
│ ├── assistants-project-assistant.yml
│ ├── assistants-prospector-assistant.yml
│ ├── assistants-skill-assistant.yml
│ ├── libraries.yml
│ ├── mcp-server-giphy.yml
│ ├── mcp-server-memory-filesystem-edit.yml
│ ├── mcp-server-memory-user-bio.yml
│ ├── mcp-server-memory-whiteboard.yml
│ ├── mcp-server-open-deep-research-clone.yml
│ ├── mcp-server-web-research.yml
│ ├── workbench-app.yml
│ └── workbench-service.yml
├── .gitignore
├── .multi-root-tools
│ ├── Makefile
│ └── README.md
├── .vscode
│ ├── extensions.json
│ ├── launch.json
│ └── settings.json
├── ai_context
│ └── generated
│ ├── ASPIRE_ORCHESTRATOR.md
│ ├── ASSISTANT_CODESPACE.md
│ ├── ASSISTANT_DOCUMENT.md
│ ├── ASSISTANT_NAVIGATOR.md
│ ├── ASSISTANT_PROJECT.md
│ ├── ASSISTANT_PROSPECTOR.md
│ ├── ASSISTANTS_OTHER.md
│ ├── ASSISTANTS_OVERVIEW.md
│ ├── CONFIGURATION.md
│ ├── DOTNET_LIBRARIES.md
│ ├── EXAMPLES.md
│ ├── MCP_SERVERS.md
│ ├── PYTHON_LIBRARIES_AI_CLIENTS.md
│ ├── PYTHON_LIBRARIES_CORE.md
│ ├── PYTHON_LIBRARIES_EXTENSIONS.md
│ ├── PYTHON_LIBRARIES_SKILLS.md
│ ├── PYTHON_LIBRARIES_SPECIALIZED.md
│ ├── TOOLS.md
│ ├── WORKBENCH_FRONTEND.md
│ └── WORKBENCH_SERVICE.md
├── aspire-orchestrator
│ ├── .editorconfig
│ ├── Aspire.AppHost
│ │ ├── .gitignore
│ │ ├── appsettings.json
│ │ ├── Aspire.AppHost.csproj
│ │ ├── Program.cs
│ │ └── Properties
│ │ └── launchSettings.json
│ ├── Aspire.Extensions
│ │ ├── Aspire.Extensions.csproj
│ │ ├── Dashboard.cs
│ │ ├── DockerFileExtensions.cs
│ │ ├── PathNormalizer.cs
│ │ ├── UvAppHostingExtensions.cs
│ │ ├── UvAppResource.cs
│ │ ├── VirtualEnvironment.cs
│ │ └── WorkbenchServiceHostingExtensions.cs
│ ├── Aspire.ServiceDefaults
│ │ ├── Aspire.ServiceDefaults.csproj
│ │ └── Extensions.cs
│ ├── README.md
│ ├── run.sh
│ ├── SemanticWorkbench.Aspire.sln
│ └── SemanticWorkbench.Aspire.sln.DotSettings
├── assistants
│ ├── codespace-assistant
│ │ ├── .claude
│ │ │ └── settings.local.json
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── extensions.json
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── assets
│ │ │ │ ├── icon_context_transfer.svg
│ │ │ │ └── icon.svg
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── helpers.py
│ │ │ ├── response
│ │ │ │ ├── __init__.py
│ │ │ │ ├── completion_handler.py
│ │ │ │ ├── models.py
│ │ │ │ ├── request_builder.py
│ │ │ │ ├── response.py
│ │ │ │ ├── step_handler.py
│ │ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── abbreviations.py
│ │ │ │ ├── formatting_utils.py
│ │ │ │ ├── message_utils.py
│ │ │ │ └── openai_utils.py
│ │ │ ├── text_includes
│ │ │ │ ├── card_content_context_transfer.md
│ │ │ │ ├── card_content.md
│ │ │ │ ├── codespace_assistant_info.md
│ │ │ │ ├── context_transfer_assistant_info.md
│ │ │ │ ├── guardrails_prompt.txt
│ │ │ │ ├── guidance_prompt_context_transfer.txt
│ │ │ │ ├── guidance_prompt.txt
│ │ │ │ ├── instruction_prompt_context_transfer.txt
│ │ │ │ └── instruction_prompt.txt
│ │ │ └── whiteboard
│ │ │ ├── __init__.py
│ │ │ ├── _inspector.py
│ │ │ └── _whiteboard.py
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── document-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── assets
│ │ │ │ └── icon.svg
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── context_management
│ │ │ │ ├── __init__.py
│ │ │ │ └── inspector.py
│ │ │ ├── filesystem
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _convert.py
│ │ │ │ ├── _file_sources.py
│ │ │ │ ├── _filesystem.py
│ │ │ │ ├── _inspector.py
│ │ │ │ ├── _model.py
│ │ │ │ ├── _prompts.py
│ │ │ │ └── _tasks.py
│ │ │ ├── guidance
│ │ │ │ ├── __init__.py
│ │ │ │ ├── dynamic_ui_inspector.py
│ │ │ │ ├── guidance_config.py
│ │ │ │ ├── guidance_prompts.py
│ │ │ │ └── README.md
│ │ │ ├── response
│ │ │ │ ├── __init__.py
│ │ │ │ ├── completion_handler.py
│ │ │ │ ├── models.py
│ │ │ │ ├── prompts.py
│ │ │ │ ├── responder.py
│ │ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── formatting_utils.py
│ │ │ │ ├── message_utils.py
│ │ │ │ ├── openai_utils.py
│ │ │ │ ├── tokens_tiktoken.py
│ │ │ │ └── workbench_messages.py
│ │ │ ├── text_includes
│ │ │ │ └── document_assistant_info.md
│ │ │ ├── types.py
│ │ │ └── whiteboard
│ │ │ ├── __init__.py
│ │ │ ├── _inspector.py
│ │ │ └── _whiteboard.py
│ │ ├── assistant.code-workspace
│ │ ├── CLAUDE.md
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── __init__.py
│ │ │ ├── test_convert.py
│ │ │ └── test_data
│ │ │ ├── blank_image.png
│ │ │ ├── Formatting Test.docx
│ │ │ ├── sample_data.csv
│ │ │ ├── sample_data.xlsx
│ │ │ ├── sample_page.html
│ │ │ ├── sample_presentation.pptx
│ │ │ └── simple_pdf.pdf
│ │ └── uv.lock
│ ├── explorer-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── helpers.py
│ │ │ ├── response
│ │ │ │ ├── __init__.py
│ │ │ │ ├── model.py
│ │ │ │ ├── response_anthropic.py
│ │ │ │ ├── response_openai.py
│ │ │ │ └── response.py
│ │ │ └── text_includes
│ │ │ └── guardrails_prompt.txt
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── guided-conversation-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── agents
│ │ │ │ ├── guided_conversation
│ │ │ │ │ ├── config.py
│ │ │ │ │ ├── definition.py
│ │ │ │ │ └── definitions
│ │ │ │ │ ├── er_triage.py
│ │ │ │ │ ├── interview.py
│ │ │ │ │ ├── patient_intake.py
│ │ │ │ │ └── poem_feedback.py
│ │ │ │ └── guided_conversation_agent.py
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ └── text_includes
│ │ │ └── guardrails_prompt.txt
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── knowledge-transfer-assistant
│ │ ├── .claude
│ │ │ └── settings.local.json
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── agentic
│ │ │ │ ├── __init__.py
│ │ │ │ ├── analysis.py
│ │ │ │ ├── coordinator_support.py
│ │ │ │ └── team_welcome.py
│ │ │ ├── assets
│ │ │ │ ├── icon-knowledge-transfer.svg
│ │ │ │ └── icon.svg
│ │ │ ├── assistant.py
│ │ │ ├── common.py
│ │ │ ├── config.py
│ │ │ ├── conversation_clients.py
│ │ │ ├── conversation_share_link.py
│ │ │ ├── data.py
│ │ │ ├── domain
│ │ │ │ ├── __init__.py
│ │ │ │ ├── audience_manager.py
│ │ │ │ ├── information_request_manager.py
│ │ │ │ ├── knowledge_brief_manager.py
│ │ │ │ ├── knowledge_digest_manager.py
│ │ │ │ ├── learning_objectives_manager.py
│ │ │ │ └── share_manager.py
│ │ │ ├── files.py
│ │ │ ├── logging.py
│ │ │ ├── notifications.py
│ │ │ ├── respond.py
│ │ │ ├── storage_models.py
│ │ │ ├── storage.py
│ │ │ ├── string_utils.py
│ │ │ ├── text_includes
│ │ │ │ ├── assistant_info.md
│ │ │ │ ├── card_content.md
│ │ │ │ ├── coordinator_instructions.txt
│ │ │ │ ├── coordinator_role.txt
│ │ │ │ ├── knowledge_digest_instructions.txt
│ │ │ │ ├── knowledge_digest_prompt.txt
│ │ │ │ ├── share_information_request_detection.txt
│ │ │ │ ├── team_instructions.txt
│ │ │ │ ├── team_role.txt
│ │ │ │ └── welcome_message_generation.txt
│ │ │ ├── tools
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base.py
│ │ │ │ ├── information_requests.py
│ │ │ │ ├── learning_objectives.py
│ │ │ │ ├── learning_outcomes.py
│ │ │ │ ├── progress_tracking.py
│ │ │ │ └── share_setup.py
│ │ │ ├── ui_tabs
│ │ │ │ ├── __init__.py
│ │ │ │ ├── brief.py
│ │ │ │ ├── common.py
│ │ │ │ ├── debug.py
│ │ │ │ ├── learning.py
│ │ │ │ └── sharing.py
│ │ │ └── utils.py
│ │ ├── CLAUDE.md
│ │ ├── docs
│ │ │ ├── design
│ │ │ │ ├── actions.md
│ │ │ │ └── inference.md
│ │ │ ├── DEV_GUIDE.md
│ │ │ ├── how-kta-works.md
│ │ │ ├── JTBD.md
│ │ │ ├── knowledge-transfer-goals.md
│ │ │ ├── learning_assistance.md
│ │ │ ├── notable_claude_conversations
│ │ │ │ ├── clarifying_quad_modal_design.md
│ │ │ │ ├── CLAUDE_PROMPTS.md
│ │ │ │ ├── transfer_state.md
│ │ │ │ └── trying_the_context_agent.md
│ │ │ └── opportunities-of-knowledge-transfer.md
│ │ ├── knowledge-transfer-assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── __init__.py
│ │ │ ├── test_artifact_loading.py
│ │ │ ├── test_inspector.py
│ │ │ ├── test_share_manager.py
│ │ │ ├── test_share_storage.py
│ │ │ ├── test_share_tools.py
│ │ │ └── test_team_mode.py
│ │ └── uv.lock
│ ├── Makefile
│ ├── navigator-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── assets
│ │ │ │ ├── card_content.md
│ │ │ │ └── icon.svg
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── helpers.py
│ │ │ ├── response
│ │ │ │ ├── __init__.py
│ │ │ │ ├── completion_handler.py
│ │ │ │ ├── completion_requestor.py
│ │ │ │ ├── local_tool
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── add_assistant_to_conversation.py
│ │ │ │ │ ├── list_assistant_services.py
│ │ │ │ │ └── model.py
│ │ │ │ ├── models.py
│ │ │ │ ├── prompt.py
│ │ │ │ ├── request_builder.py
│ │ │ │ ├── response.py
│ │ │ │ ├── step_handler.py
│ │ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── formatting_utils.py
│ │ │ │ ├── message_utils.py
│ │ │ │ ├── openai_utils.py
│ │ │ │ └── tools.py
│ │ │ ├── text_includes
│ │ │ │ ├── guardrails_prompt.md
│ │ │ │ ├── guidance_prompt.md
│ │ │ │ ├── instruction_prompt.md
│ │ │ │ ├── navigator_assistant_info.md
│ │ │ │ └── semantic_workbench_features.md
│ │ │ └── whiteboard
│ │ │ ├── __init__.py
│ │ │ ├── _inspector.py
│ │ │ └── _whiteboard.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── project-assistant
│ │ ├── .cspell
│ │ │ └── custom-dictionary-workspace.txt
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── agentic
│ │ │ │ ├── __init__.py
│ │ │ │ ├── act.py
│ │ │ │ ├── coordinator_next_action.py
│ │ │ │ ├── create_invitation.py
│ │ │ │ ├── detect_audience_and_takeaways.py
│ │ │ │ ├── detect_coordinator_actions.py
│ │ │ │ ├── detect_information_request_needs.py
│ │ │ │ ├── detect_knowledge_package_gaps.py
│ │ │ │ ├── focus.py
│ │ │ │ ├── respond.py
│ │ │ │ ├── team_welcome.py
│ │ │ │ └── update_digest.py
│ │ │ ├── assets
│ │ │ │ ├── icon-knowledge-transfer.svg
│ │ │ │ └── icon.svg
│ │ │ ├── assistant.py
│ │ │ ├── common.py
│ │ │ ├── config.py
│ │ │ ├── conversation_clients.py
│ │ │ ├── data.py
│ │ │ ├── domain
│ │ │ │ ├── __init__.py
│ │ │ │ ├── audience_manager.py
│ │ │ │ ├── conversation_preferences_manager.py
│ │ │ │ ├── information_request_manager.py
│ │ │ │ ├── knowledge_brief_manager.py
│ │ │ │ ├── knowledge_digest_manager.py
│ │ │ │ ├── learning_objectives_manager.py
│ │ │ │ ├── share_manager.py
│ │ │ │ ├── tasks_manager.py
│ │ │ │ └── transfer_manager.py
│ │ │ ├── errors.py
│ │ │ ├── files.py
│ │ │ ├── logging.py
│ │ │ ├── notifications.py
│ │ │ ├── prompt_utils.py
│ │ │ ├── storage.py
│ │ │ ├── string_utils.py
│ │ │ ├── text_includes
│ │ │ │ ├── actor_instructions.md
│ │ │ │ ├── assistant_info.md
│ │ │ │ ├── card_content.md
│ │ │ │ ├── coordinator_instructions copy.md
│ │ │ │ ├── coordinator_instructions.md
│ │ │ │ ├── create_invitation.md
│ │ │ │ ├── detect_audience.md
│ │ │ │ ├── detect_coordinator_actions.md
│ │ │ │ ├── detect_information_request_needs.md
│ │ │ │ ├── detect_knowledge_package_gaps.md
│ │ │ │ ├── focus.md
│ │ │ │ ├── knowledge_digest_instructions.txt
│ │ │ │ ├── team_instructions.txt
│ │ │ │ ├── to_do.md
│ │ │ │ ├── update_knowledge_brief.md
│ │ │ │ ├── update_knowledge_digest.md
│ │ │ │ └── welcome_message_generation.txt
│ │ │ ├── tools
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base.py
│ │ │ │ ├── conversation_preferences.py
│ │ │ │ ├── information_requests.py
│ │ │ │ ├── learning_objectives.py
│ │ │ │ ├── learning_outcomes.py
│ │ │ │ ├── progress_tracking.py
│ │ │ │ ├── share_setup.py
│ │ │ │ ├── system_reminders.py
│ │ │ │ ├── tasks.py
│ │ │ │ └── todo.py
│ │ │ ├── ui_tabs
│ │ │ │ ├── __init__.py
│ │ │ │ ├── brief.py
│ │ │ │ ├── common.py
│ │ │ │ ├── debug.py
│ │ │ │ ├── learning.py
│ │ │ │ └── sharing.py
│ │ │ └── utils.py
│ │ ├── CLAUDE.md
│ │ ├── docs
│ │ │ ├── design
│ │ │ │ ├── actions.md
│ │ │ │ ├── control_options.md
│ │ │ │ ├── design.md
│ │ │ │ ├── inference.md
│ │ │ │ └── PXL_20250814_190140267.jpg
│ │ │ ├── DEV_GUIDE.md
│ │ │ ├── how-kta-works.md
│ │ │ ├── JTBD.md
│ │ │ ├── knowledge-transfer-goals.md
│ │ │ ├── learning_assistance.md
│ │ │ ├── notable_claude_conversations
│ │ │ │ ├── clarifying_quad_modal_design.md
│ │ │ │ ├── CLAUDE_PROMPTS.md
│ │ │ │ ├── transfer_state.md
│ │ │ │ └── trying_the_context_agent.md
│ │ │ └── opportunities-of-knowledge-transfer.md
│ │ ├── knowledge-transfer-assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── __init__.py
│ │ │ ├── test_artifact_loading.py
│ │ │ ├── test_inspector.py
│ │ │ ├── test_share_manager.py
│ │ │ ├── test_share_storage.py
│ │ │ └── test_team_mode.py
│ │ └── uv.lock
│ ├── prospector-assistant
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── agents
│ │ │ │ ├── artifact_agent.py
│ │ │ │ ├── document
│ │ │ │ │ ├── config.py
│ │ │ │ │ ├── gc_draft_content_feedback_config.py
│ │ │ │ │ ├── gc_draft_outline_feedback_config.py
│ │ │ │ │ ├── guided_conversation.py
│ │ │ │ │ └── state.py
│ │ │ │ └── document_agent.py
│ │ │ ├── artifact_creation_extension
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _llm.py
│ │ │ │ ├── config.py
│ │ │ │ ├── document.py
│ │ │ │ ├── extension.py
│ │ │ │ ├── store.py
│ │ │ │ ├── test
│ │ │ │ │ ├── conftest.py
│ │ │ │ │ ├── evaluation.py
│ │ │ │ │ ├── test_completion_with_tools.py
│ │ │ │ │ └── test_extension.py
│ │ │ │ └── tools.py
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ ├── form_fill_extension
│ │ │ │ ├── __init__.py
│ │ │ │ ├── config.py
│ │ │ │ ├── extension.py
│ │ │ │ ├── inspector.py
│ │ │ │ ├── state.py
│ │ │ │ └── steps
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _guided_conversation.py
│ │ │ │ ├── _llm.py
│ │ │ │ ├── acquire_form_step.py
│ │ │ │ ├── extract_form_fields_step.py
│ │ │ │ ├── fill_form_step.py
│ │ │ │ └── types.py
│ │ │ ├── helpers.py
│ │ │ ├── legacy.py
│ │ │ └── text_includes
│ │ │ ├── artifact_agent_enabled.md
│ │ │ ├── guardrails_prompt.txt
│ │ │ ├── guided_conversation_agent_enabled.md
│ │ │ └── skills_agent_enabled.md
│ │ ├── assistant.code-workspace
│ │ ├── gc_learnings
│ │ │ ├── gc_learnings.md
│ │ │ └── images
│ │ │ ├── gc_conversation_plan_fcn.png
│ │ │ ├── gc_conversation_plan_template.png
│ │ │ ├── gc_execute_plan_callstack.png
│ │ │ ├── gc_functions.png
│ │ │ ├── gc_generate_plan_callstack.png
│ │ │ ├── gc_get_resource_instructions.png
│ │ │ ├── gc_get_termination_instructions.png
│ │ │ ├── gc_kernel_arguments.png
│ │ │ ├── gc_plan_calls.png
│ │ │ ├── gc_termination_instructions.png
│ │ │ ├── sk_get_chat_message_contents.png
│ │ │ ├── sk_inner_get_chat_message_contents.png
│ │ │ ├── sk_send_request_prep.png
│ │ │ └── sk_send_request.png
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ └── skill-assistant
│ ├── .env.example
│ ├── .vscode
│ │ ├── launch.json
│ │ └── settings.json
│ ├── assistant
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── logging.py
│ │ ├── skill_assistant.py
│ │ ├── skill_engine_registry.py
│ │ ├── skill_event_mapper.py
│ │ ├── text_includes
│ │ │ └── guardrails_prompt.txt
│ │ └── workbench_helpers.py
│ ├── assistant.code-workspace
│ ├── Makefile
│ ├── pyproject.toml
│ ├── README.md
│ ├── tests
│ │ └── test_setup.py
│ └── uv.lock
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docs
│ ├── .vscode
│ │ └── settings.json
│ ├── ASSISTANT_CONFIG.md
│ ├── ASSISTANT_DEVELOPMENT_GUIDE.md
│ ├── CUSTOM_APP_REGISTRATION.md
│ ├── HOSTED_ASSISTANT_WITH_LOCAL_MCP_SERVERS.md
│ ├── images
│ │ ├── architecture-animation.gif
│ │ ├── configure_assistant.png
│ │ ├── conversation_canvas_open.png
│ │ ├── conversation_duplicate.png
│ │ ├── conversation_export.png
│ │ ├── conversation_share_dialog.png
│ │ ├── conversation_share_link.png
│ │ ├── dashboard_configured_view.png
│ │ ├── dashboard_view.png
│ │ ├── license_agreement.png
│ │ ├── message_bar.png
│ │ ├── message_inspection.png
│ │ ├── message_link.png
│ │ ├── new_prospector_assistant_dialog.png
│ │ ├── open_conversation_canvas.png
│ │ ├── prospector_example.png
│ │ ├── readme1.png
│ │ ├── readme2.png
│ │ ├── readme3.png
│ │ ├── rewind.png
│ │ ├── signin_page.png
│ │ └── splash_screen.png
│ ├── LOCAL_ASSISTANT_WITH_REMOTE_WORKBENCH.md
│ ├── SETUP_DEV_ENVIRONMENT.md
│ └── WORKBENCH_APP.md
├── examples
│ ├── dotnet
│ │ ├── .editorconfig
│ │ ├── dotnet-01-echo-bot
│ │ │ ├── appsettings.json
│ │ │ ├── dotnet-01-echo-bot.csproj
│ │ │ ├── MyAgent.cs
│ │ │ ├── MyAgentConfig.cs
│ │ │ ├── MyWorkbenchConnector.cs
│ │ │ ├── Program.cs
│ │ │ └── README.md
│ │ ├── dotnet-02-message-types-demo
│ │ │ ├── appsettings.json
│ │ │ ├── ConnectorExtensions.cs
│ │ │ ├── docs
│ │ │ │ ├── abc.png
│ │ │ │ ├── code.png
│ │ │ │ ├── config.png
│ │ │ │ ├── echo.png
│ │ │ │ ├── markdown.png
│ │ │ │ ├── mermaid.png
│ │ │ │ ├── reverse.png
│ │ │ │ └── safety-check.png
│ │ │ ├── dotnet-02-message-types-demo.csproj
│ │ │ ├── MyAgent.cs
│ │ │ ├── MyAgentConfig.cs
│ │ │ ├── MyWorkbenchConnector.cs
│ │ │ ├── Program.cs
│ │ │ └── README.md
│ │ └── dotnet-03-simple-chatbot
│ │ ├── appsettings.json
│ │ ├── ConnectorExtensions.cs
│ │ ├── dotnet-03-simple-chatbot.csproj
│ │ ├── MyAgent.cs
│ │ ├── MyAgentConfig.cs
│ │ ├── MyWorkbenchConnector.cs
│ │ ├── Program.cs
│ │ └── README.md
│ ├── Makefile
│ └── python
│ ├── python-01-echo-bot
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── chat.py
│ │ │ └── config.py
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── python-02-simple-chatbot
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant
│ │ │ ├── __init__.py
│ │ │ ├── chat.py
│ │ │ ├── config.py
│ │ │ └── text_includes
│ │ │ └── guardrails_prompt.txt
│ │ ├── assistant.code-workspace
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ └── python-03-multimodel-chatbot
│ ├── .env.example
│ ├── .vscode
│ │ ├── launch.json
│ │ └── settings.json
│ ├── assistant
│ │ ├── __init__.py
│ │ ├── chat.py
│ │ ├── config.py
│ │ ├── model_adapters.py
│ │ └── text_includes
│ │ └── guardrails_prompt.txt
│ ├── assistant.code-workspace
│ ├── Makefile
│ ├── pyproject.toml
│ ├── README.md
│ └── uv.lock
├── KNOWN_ISSUES.md
├── libraries
│ ├── dotnet
│ │ ├── .editorconfig
│ │ ├── pack.sh
│ │ ├── README.md
│ │ ├── SemanticWorkbench.sln
│ │ ├── SemanticWorkbench.sln.DotSettings
│ │ └── WorkbenchConnector
│ │ ├── AgentBase.cs
│ │ ├── AgentConfig
│ │ │ ├── AgentConfigBase.cs
│ │ │ ├── AgentConfigPropertyAttribute.cs
│ │ │ └── ConfigUtils.cs
│ │ ├── Constants.cs
│ │ ├── IAgentBase.cs
│ │ ├── icon.png
│ │ ├── Models
│ │ │ ├── Command.cs
│ │ │ ├── Conversation.cs
│ │ │ ├── ConversationEvent.cs
│ │ │ ├── DebugInfo.cs
│ │ │ ├── Insight.cs
│ │ │ ├── Message.cs
│ │ │ ├── MessageMetadata.cs
│ │ │ ├── Participant.cs
│ │ │ ├── Sender.cs
│ │ │ └── ServiceInfo.cs
│ │ ├── Storage
│ │ │ ├── AgentInfo.cs
│ │ │ ├── AgentServiceStorage.cs
│ │ │ └── IAgentServiceStorage.cs
│ │ ├── StringLoggingExtensions.cs
│ │ ├── Webservice.cs
│ │ ├── WorkbenchConfig.cs
│ │ ├── WorkbenchConnector.cs
│ │ └── WorkbenchConnector.csproj
│ ├── Makefile
│ └── python
│ ├── anthropic-client
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── anthropic_client
│ │ │ ├── __init__.py
│ │ │ ├── client.py
│ │ │ ├── config.py
│ │ │ └── messages.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── assistant-data-gen
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── assistant_data_gen
│ │ │ ├── __init__.py
│ │ │ ├── assistant_api.py
│ │ │ ├── config.py
│ │ │ ├── gce
│ │ │ │ ├── __init__.py
│ │ │ │ ├── gce_agent.py
│ │ │ │ └── prompts.py
│ │ │ └── pydantic_ai_utils.py
│ │ ├── configs
│ │ │ └── document_assistant_example_config.yaml
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── scripts
│ │ │ ├── gce_simulation.py
│ │ │ └── generate_scenario.py
│ │ └── uv.lock
│ ├── assistant-drive
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── extensions.json
│ │ │ └── settings.json
│ │ ├── assistant_drive
│ │ │ ├── __init__.py
│ │ │ ├── drive.py
│ │ │ └── tests
│ │ │ └── test_basic.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── pytest.ini
│ │ ├── README.md
│ │ ├── usage.ipynb
│ │ └── uv.lock
│ ├── assistant-extensions
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── assistant_extensions
│ │ │ ├── __init__.py
│ │ │ ├── ai_clients
│ │ │ │ └── config.py
│ │ │ ├── artifacts
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _artifacts.py
│ │ │ │ ├── _inspector.py
│ │ │ │ └── _model.py
│ │ │ ├── attachments
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _attachments.py
│ │ │ │ ├── _convert.py
│ │ │ │ ├── _model.py
│ │ │ │ ├── _shared.py
│ │ │ │ └── _summarizer.py
│ │ │ ├── chat_context_toolkit
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _config.py
│ │ │ │ ├── archive
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── _archive.py
│ │ │ │ │ └── _summarizer.py
│ │ │ │ ├── message_history
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── _history.py
│ │ │ │ │ └── _message.py
│ │ │ │ └── virtual_filesystem
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _archive_file_source.py
│ │ │ │ └── _attachments_file_source.py
│ │ │ ├── dashboard_card
│ │ │ │ ├── __init__.py
│ │ │ │ └── _dashboard_card.py
│ │ │ ├── document_editor
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _extension.py
│ │ │ │ ├── _inspector.py
│ │ │ │ └── _model.py
│ │ │ ├── mcp
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _assistant_file_resource_handler.py
│ │ │ │ ├── _client_utils.py
│ │ │ │ ├── _devtunnel.py
│ │ │ │ ├── _model.py
│ │ │ │ ├── _openai_utils.py
│ │ │ │ ├── _sampling_handler.py
│ │ │ │ ├── _tool_utils.py
│ │ │ │ └── _workbench_file_resource_handler.py
│ │ │ ├── navigator
│ │ │ │ ├── __init__.py
│ │ │ │ └── _navigator.py
│ │ │ └── workflows
│ │ │ ├── __init__.py
│ │ │ ├── _model.py
│ │ │ ├── _workflows.py
│ │ │ └── runners
│ │ │ └── _user_proxy.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── test
│ │ │ └── attachments
│ │ │ └── test_attachments.py
│ │ └── uv.lock
│ ├── chat-context-toolkit
│ │ ├── .claude
│ │ │ └── settings.local.json
│ │ ├── .env.sample
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── assets
│ │ │ ├── archive_v1.png
│ │ │ ├── history_v1.png
│ │ │ └── vfs_v1.png
│ │ ├── chat_context_toolkit
│ │ │ ├── __init__.py
│ │ │ ├── archive
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _archive_reader.py
│ │ │ │ ├── _archive_task_queue.py
│ │ │ │ ├── _state.py
│ │ │ │ ├── _types.py
│ │ │ │ └── summarization
│ │ │ │ ├── __init__.py
│ │ │ │ └── _summarizer.py
│ │ │ ├── history
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _budget.py
│ │ │ │ ├── _decorators.py
│ │ │ │ ├── _history.py
│ │ │ │ ├── _prioritize.py
│ │ │ │ ├── _types.py
│ │ │ │ └── tool_abbreviations
│ │ │ │ ├── __init__.py
│ │ │ │ └── _tool_abbreviations.py
│ │ │ └── virtual_filesystem
│ │ │ ├── __init__.py
│ │ │ ├── _types.py
│ │ │ ├── _virtual_filesystem.py
│ │ │ ├── README.md
│ │ │ └── tools
│ │ │ ├── __init__.py
│ │ │ ├── _ls_tool.py
│ │ │ ├── _tools.py
│ │ │ └── _view_tool.py
│ │ ├── CLAUDE.md
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── test
│ │ │ ├── archive
│ │ │ │ └── test_archive_reader.py
│ │ │ ├── history
│ │ │ │ ├── test_abbreviate_messages.py
│ │ │ │ ├── test_history.py
│ │ │ │ ├── test_pair_and_order_tool_messages.py
│ │ │ │ ├── test_prioritize.py
│ │ │ │ └── test_truncate_messages.py
│ │ │ └── virtual_filesystem
│ │ │ ├── test_virtual_filesystem.py
│ │ │ └── tools
│ │ │ ├── test_ls_tool.py
│ │ │ ├── test_tools.py
│ │ │ └── test_view_tool.py
│ │ └── uv.lock
│ ├── content-safety
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── content_safety
│ │ │ ├── __init__.py
│ │ │ ├── evaluators
│ │ │ │ ├── __init__.py
│ │ │ │ ├── azure_content_safety
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── config.py
│ │ │ │ │ └── evaluator.py
│ │ │ │ ├── config.py
│ │ │ │ ├── evaluator.py
│ │ │ │ └── openai_moderations
│ │ │ │ ├── __init__.py
│ │ │ │ ├── config.py
│ │ │ │ └── evaluator.py
│ │ │ └── README.md
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── events
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── events
│ │ │ ├── __init__.py
│ │ │ └── events.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── guided-conversation
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── guided_conversation
│ │ │ ├── __init__.py
│ │ │ ├── functions
│ │ │ │ ├── __init__.py
│ │ │ │ ├── conversation_plan.py
│ │ │ │ ├── execution.py
│ │ │ │ └── final_update_plan.py
│ │ │ ├── guided_conversation_agent.py
│ │ │ ├── plugins
│ │ │ │ ├── __init__.py
│ │ │ │ ├── agenda.py
│ │ │ │ └── artifact.py
│ │ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── base_model_llm.py
│ │ │ ├── conversation_helpers.py
│ │ │ ├── openai_tool_calling.py
│ │ │ ├── plugin_helpers.py
│ │ │ └── resources.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── llm-client
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── llm_client
│ │ │ ├── __init__.py
│ │ │ └── model.py
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── Makefile
│ ├── mcp-extensions
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_extensions
│ │ │ ├── __init__.py
│ │ │ ├── _client_session.py
│ │ │ ├── _model.py
│ │ │ ├── _sampling.py
│ │ │ ├── _server_extensions.py
│ │ │ ├── _tool_utils.py
│ │ │ ├── llm
│ │ │ │ ├── __init__.py
│ │ │ │ ├── chat_completion.py
│ │ │ │ ├── helpers.py
│ │ │ │ ├── llm_types.py
│ │ │ │ ├── mcp_chat_completion.py
│ │ │ │ └── openai_chat_completion.py
│ │ │ └── server
│ │ │ ├── __init__.py
│ │ │ └── storage.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ └── test_tool_utils.py
│ │ └── uv.lock
│ ├── mcp-tunnel
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_tunnel
│ │ │ ├── __init__.py
│ │ │ ├── _devtunnel.py
│ │ │ ├── _dir.py
│ │ │ └── _main.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── openai-client
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── openai_client
│ │ │ ├── __init__.py
│ │ │ ├── chat_driver
│ │ │ │ ├── __init__.py
│ │ │ │ ├── chat_driver.ipynb
│ │ │ │ ├── chat_driver.py
│ │ │ │ ├── message_history_providers
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── in_memory_message_history_provider.py
│ │ │ │ │ ├── local_message_history_provider.py
│ │ │ │ │ ├── message_history_provider.py
│ │ │ │ │ └── tests
│ │ │ │ │ └── formatted_instructions_test.py
│ │ │ │ └── README.md
│ │ │ ├── client.py
│ │ │ ├── completion.py
│ │ │ ├── config.py
│ │ │ ├── errors.py
│ │ │ ├── logging.py
│ │ │ ├── messages.py
│ │ │ ├── tokens.py
│ │ │ └── tools.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── test_command_parsing.py
│ │ │ ├── test_formatted_messages.py
│ │ │ ├── test_messages.py
│ │ │ └── test_tokens.py
│ │ └── uv.lock
│ ├── semantic-workbench-api-model
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── semantic_workbench_api_model
│ │ │ ├── __init__.py
│ │ │ ├── assistant_model.py
│ │ │ ├── assistant_service_client.py
│ │ │ ├── workbench_model.py
│ │ │ └── workbench_service_client.py
│ │ └── uv.lock
│ ├── semantic-workbench-assistant
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── semantic_workbench_assistant
│ │ │ ├── __init__.py
│ │ │ ├── assistant_app
│ │ │ │ ├── __init__.py
│ │ │ │ ├── assistant.py
│ │ │ │ ├── config.py
│ │ │ │ ├── content_safety.py
│ │ │ │ ├── context.py
│ │ │ │ ├── error.py
│ │ │ │ ├── export_import.py
│ │ │ │ ├── protocol.py
│ │ │ │ └── service.py
│ │ │ ├── assistant_service.py
│ │ │ ├── auth.py
│ │ │ ├── canonical.py
│ │ │ ├── command.py
│ │ │ ├── config.py
│ │ │ ├── logging_config.py
│ │ │ ├── settings.py
│ │ │ ├── start.py
│ │ │ └── storage.py
│ │ ├── tests
│ │ │ ├── conftest.py
│ │ │ ├── test_assistant_app.py
│ │ │ ├── test_canonical.py
│ │ │ ├── test_config.py
│ │ │ └── test_storage.py
│ │ └── uv.lock
│ └── skills
│ ├── .vscode
│ │ └── settings.json
│ ├── Makefile
│ ├── README.md
│ └── skill-library
│ ├── .vscode
│ │ └── settings.json
│ ├── docs
│ │ └── vs-recipe-tool.md
│ ├── Makefile
│ ├── pyproject.toml
│ ├── README.md
│ ├── skill_library
│ │ ├── __init__.py
│ │ ├── chat_driver_helpers.py
│ │ ├── cli
│ │ │ ├── azure_openai.py
│ │ │ ├── conversation_history.py
│ │ │ ├── README.md
│ │ │ ├── run_routine.py
│ │ │ ├── settings.py
│ │ │ └── skill_logger.py
│ │ ├── engine.py
│ │ ├── llm_info.txt
│ │ ├── logging.py
│ │ ├── README.md
│ │ ├── routine_stack.py
│ │ ├── skill.py
│ │ ├── skills
│ │ │ ├── common
│ │ │ │ ├── __init__.py
│ │ │ │ ├── common_skill.py
│ │ │ │ └── routines
│ │ │ │ ├── bing_search.py
│ │ │ │ ├── consolidate.py
│ │ │ │ ├── echo.py
│ │ │ │ ├── gather_context.py
│ │ │ │ ├── get_content_from_url.py
│ │ │ │ ├── gpt_complete.py
│ │ │ │ ├── select_user_intent.py
│ │ │ │ └── summarize.py
│ │ │ ├── eval
│ │ │ │ ├── __init__.py
│ │ │ │ ├── eval_skill.py
│ │ │ │ └── routines
│ │ │ │ └── eval.py
│ │ │ ├── fabric
│ │ │ │ ├── __init__.py
│ │ │ │ ├── fabric_skill.py
│ │ │ │ ├── patterns
│ │ │ │ │ ├── agility_story
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── ai
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_answers
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_candidates
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_cfp_submission
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_claims
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_comments
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_debate
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_email_headers
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_incident
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_interviewer_techniques
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_logs
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_malware
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_military_strategy
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_mistakes
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_paper
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_patent
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_personality
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_presentation
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_product_feedback
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_proposition
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_prose
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_prose_json
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_prose_pinker
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_risk
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_sales_call
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_spiritual_text
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_tech_impact
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_threat_report
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── analyze_threat_report_cmds
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── analyze_threat_report_trends
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── answer_interview_question
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── ask_secure_by_design_questions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── ask_uncle_duke
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── capture_thinkers_work
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── check_agreement
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── clean_text
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── coding_master
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── compare_and_contrast
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── convert_to_markdown
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_5_sentence_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_academic_paper
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_ai_jobs_analysis
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_aphorisms
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_art_prompt
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_better_frame
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_coding_project
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_command
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_cyber_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_design_document
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_diy
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_formal_email
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_git_diff_commit
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_graph_from_input
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_hormozi_offer
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_idea_compass
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_investigation_visualization
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_keynote
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_logo
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_markmap_visualization
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_mermaid_visualization
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_mermaid_visualization_for_github
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_micro_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_network_threat_landscape
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_newsletter_entry
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_npc
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_pattern
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_prd
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_prediction_block
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_quiz
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_reading_plan
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_recursive_outline
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_report_finding
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_rpg_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_security_update
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_show_intro
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_sigma_rules
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_story_explanation
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_stride_threat_model
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_summary
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_tags
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_threat_scenarios
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_ttrc_graph
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_ttrc_narrative
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_upgrade_pack
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_user_story
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── create_video_chapters
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── create_visualization
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── dialog_with_socrates
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── enrich_blog_post
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── explain_code
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── explain_docs
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── explain_math
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── explain_project
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── explain_terms
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── export_data_as_csv
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_algorithm_update_recommendations
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_article_wisdom
│ │ │ │ │ │ ├── dmiessler
│ │ │ │ │ │ │ └── extract_wisdom-1.0.0
│ │ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ │ └── user.md
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_book_ideas
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_book_recommendations
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_business_ideas
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_controversial_ideas
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_core_message
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_ctf_writeup
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_domains
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_extraordinary_claims
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_ideas
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_insights
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_insights_dm
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_instructions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_jokes
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_latest_video
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_main_idea
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_most_redeeming_thing
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_patterns
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_poc
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_predictions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_primary_problem
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_primary_solution
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_product_features
│ │ │ │ │ │ ├── dmiessler
│ │ │ │ │ │ │ └── extract_wisdom-1.0.0
│ │ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ │ └── user.md
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_questions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_recipe
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_recommendations
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_references
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_skills
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_song_meaning
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_sponsors
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_videoid
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── extract_wisdom
│ │ │ │ │ │ ├── dmiessler
│ │ │ │ │ │ │ └── extract_wisdom-1.0.0
│ │ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ │ └── user.md
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_wisdom_agents
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_wisdom_dm
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── extract_wisdom_nometa
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── find_hidden_message
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── find_logical_fallacies
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── get_wow_per_minute
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── get_youtube_rss
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── humanize
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_dsrp_distinctions
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_dsrp_perspectives
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_dsrp_relationships
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_dsrp_systems
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── identify_job_stories
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── improve_academic_writing
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── improve_prompt
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── improve_report_finding
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── improve_writing
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── judge_output
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── label_and_rate
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── loaded
│ │ │ │ │ ├── md_callout
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── official_pattern_template
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── pattern_explanations.md
│ │ │ │ │ ├── prepare_7s_strategy
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── provide_guidance
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── rate_ai_response
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── rate_ai_result
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── rate_content
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── rate_value
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── raw_query
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── raycast
│ │ │ │ │ │ ├── capture_thinkers_work
│ │ │ │ │ │ ├── create_story_explanation
│ │ │ │ │ │ ├── extract_primary_problem
│ │ │ │ │ │ ├── extract_wisdom
│ │ │ │ │ │ └── yt
│ │ │ │ │ ├── recommend_artists
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── recommend_pipeline_upgrades
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── recommend_talkpanel_topics
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── refine_design_document
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── review_design
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── sanitize_broken_html_to_markdown
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── show_fabric_options_markmap
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── solve_with_cot
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── stringify
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── suggest_pattern
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize
│ │ │ │ │ │ ├── dmiessler
│ │ │ │ │ │ │ └── summarize
│ │ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ │ └── user.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_debate
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_git_changes
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_git_diff
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_lecture
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_legislation
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_meeting
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_micro
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_newsletter
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_paper
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_prompt
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── summarize_pull-requests
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── summarize_rpg_session
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_analyze_challenge_handling
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_check_metrics
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_create_h3_career
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_create_opening_sentences
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_describe_life_outlook
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_extract_intro_sentences
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_extract_panel_topics
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_find_blindspots
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_find_negative_thinking
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_find_neglected_goals
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_give_encouragement
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_red_team_thinking
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_threat_model_plans
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_visualize_mission_goals_projects
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── t_year_in_review
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── to_flashcards
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── transcribe_minutes
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── translate
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── tweet
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_essay
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_hackerone_report
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_latex
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_micro_essay
│ │ │ │ │ │ └── system.md
│ │ │ │ │ ├── write_nuclei_template_rule
│ │ │ │ │ │ ├── system.md
│ │ │ │ │ │ └── user.md
│ │ │ │ │ ├── write_pull-request
│ │ │ │ │ │ └── system.md
│ │ │ │ │ └── write_semgrep_rule
│ │ │ │ │ ├── system.md
│ │ │ │ │ └── user.md
│ │ │ │ └── routines
│ │ │ │ ├── list.py
│ │ │ │ ├── run.py
│ │ │ │ └── show.py
│ │ │ ├── guided_conversation
│ │ │ │ ├── __init__.py
│ │ │ │ ├── agenda.py
│ │ │ │ ├── artifact_helpers.py
│ │ │ │ ├── chat_completions
│ │ │ │ │ ├── fix_agenda_error.py
│ │ │ │ │ ├── fix_artifact_error.py
│ │ │ │ │ ├── generate_agenda.py
│ │ │ │ │ ├── generate_artifact_updates.py
│ │ │ │ │ ├── generate_final_artifact.py
│ │ │ │ │ └── generate_message.py
│ │ │ │ ├── conversation_guides
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── acrostic_poem.py
│ │ │ │ │ ├── er_triage.py
│ │ │ │ │ ├── interview.py
│ │ │ │ │ └── patient_intake.py
│ │ │ │ ├── guide.py
│ │ │ │ ├── guided_conversation_skill.py
│ │ │ │ ├── logging.py
│ │ │ │ ├── message.py
│ │ │ │ ├── resources.py
│ │ │ │ ├── routines
│ │ │ │ │ └── guided_conversation.py
│ │ │ │ └── tests
│ │ │ │ ├── conftest.py
│ │ │ │ ├── test_artifact_helpers.py
│ │ │ │ ├── test_generate_agenda.py
│ │ │ │ ├── test_generate_artifact_updates.py
│ │ │ │ ├── test_generate_final_artifact.py
│ │ │ │ └── test_resource.py
│ │ │ ├── meta
│ │ │ │ ├── __init__.py
│ │ │ │ ├── meta_skill.py
│ │ │ │ ├── README.md
│ │ │ │ └── routines
│ │ │ │ └── generate_routine.py
│ │ │ ├── posix
│ │ │ │ ├── __init__.py
│ │ │ │ ├── posix_skill.py
│ │ │ │ ├── routines
│ │ │ │ │ ├── append_file.py
│ │ │ │ │ ├── cd.py
│ │ │ │ │ ├── ls.py
│ │ │ │ │ ├── make_home_dir.py
│ │ │ │ │ ├── mkdir.py
│ │ │ │ │ ├── mv.py
│ │ │ │ │ ├── pwd.py
│ │ │ │ │ ├── read_file.py
│ │ │ │ │ ├── rm.py
│ │ │ │ │ ├── touch.py
│ │ │ │ │ └── write_file.py
│ │ │ │ └── sandbox_shell.py
│ │ │ ├── README.md
│ │ │ ├── research
│ │ │ │ ├── __init__.py
│ │ │ │ ├── README.md
│ │ │ │ ├── research_skill.py
│ │ │ │ └── routines
│ │ │ │ ├── answer_question_about_content.py
│ │ │ │ ├── evaluate_answer.py
│ │ │ │ ├── generate_research_plan.py
│ │ │ │ ├── generate_search_query.py
│ │ │ │ ├── update_research_plan.py
│ │ │ │ ├── web_research.py
│ │ │ │ └── web_search.py
│ │ │ ├── research2
│ │ │ │ ├── __init__.py
│ │ │ │ ├── README.md
│ │ │ │ ├── research_skill.py
│ │ │ │ └── routines
│ │ │ │ ├── facts.py
│ │ │ │ ├── make_final_report.py
│ │ │ │ ├── research.py
│ │ │ │ ├── search_plan.py
│ │ │ │ ├── search.py
│ │ │ │ └── visit_pages.py
│ │ │ └── web_research
│ │ │ ├── __init__.py
│ │ │ ├── README.md
│ │ │ ├── research_skill.py
│ │ │ └── routines
│ │ │ ├── facts.py
│ │ │ ├── make_final_report.py
│ │ │ ├── research.py
│ │ │ ├── search_plan.py
│ │ │ ├── search.py
│ │ │ └── visit_pages.py
│ │ ├── tests
│ │ │ ├── test_common_skill.py
│ │ │ ├── test_integration.py
│ │ │ ├── test_routine_stack.py
│ │ │ ├── tst_skill
│ │ │ │ ├── __init__.py
│ │ │ │ └── routines
│ │ │ │ ├── __init__.py
│ │ │ │ └── a_routine.py
│ │ │ └── utilities
│ │ │ ├── test_find_template_vars.py
│ │ │ ├── test_make_arg_set.py
│ │ │ ├── test_paramspec.py
│ │ │ ├── test_parse_command_string.py
│ │ │ └── test_to_string.py
│ │ ├── types.py
│ │ ├── usage.py
│ │ └── utilities.py
│ └── uv.lock
├── LICENSE
├── Makefile
├── mcp-servers
│ ├── ai-assist-content
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── mcp-example-brave-search.md
│ │ ├── mcp-fastmcp-typescript-README.md
│ │ ├── mcp-llms-full.txt
│ │ ├── mcp-metadata-tips.md
│ │ ├── mcp-python-sdk-README.md
│ │ ├── mcp-typescript-sdk-README.md
│ │ ├── pydanticai-documentation.md
│ │ ├── pydanticai-example-question-graph.md
│ │ ├── pydanticai-example-weather.md
│ │ ├── pydanticai-tutorial.md
│ │ └── README.md
│ ├── Makefile
│ ├── mcp-server-bing-search
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_bing_search
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── prompts
│ │ │ │ ├── __init__.py
│ │ │ │ ├── clean_website.py
│ │ │ │ └── filter_links.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ ├── tools.py
│ │ │ ├── types.py
│ │ │ ├── utils.py
│ │ │ └── web
│ │ │ ├── __init__.py
│ │ │ ├── get_content.py
│ │ │ ├── llm_processing.py
│ │ │ ├── process_website.py
│ │ │ └── search_bing.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ └── test_tools.py
│ │ └── uv.lock
│ ├── mcp-server-bundle
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_bundle
│ │ │ ├── __init__.py
│ │ │ └── main.py
│ │ ├── pyinstaller.spec
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-filesystem
│ │ ├── .env.example
│ │ ├── .github
│ │ │ └── workflows
│ │ │ └── ci.yml
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_filesystem
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── server.py
│ │ │ └── start.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ └── test_filesystem.py
│ │ └── uv.lock
│ ├── mcp-server-filesystem-edit
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── data
│ │ │ ├── attachments
│ │ │ │ ├── Daily Game Ideas.txt
│ │ │ │ ├── Frontend Framework Proposal.txt
│ │ │ │ ├── ReDoodle.txt
│ │ │ │ └── Research Template.tex
│ │ │ ├── test_cases.yaml
│ │ │ └── transcripts
│ │ │ ├── transcript_research_simple.md
│ │ │ ├── transcript_Startup_Idea_1_202503031513.md
│ │ │ ├── transcript_Startup_Idea_2_202503031659.md
│ │ │ └── transcript_Web_Frontends_202502281551.md
│ │ ├── Makefile
│ │ ├── mcp_server_filesystem_edit
│ │ │ ├── __init__.py
│ │ │ ├── app_handling
│ │ │ │ ├── __init__.py
│ │ │ │ ├── excel.py
│ │ │ │ ├── miktex.py
│ │ │ │ ├── office_common.py
│ │ │ │ ├── powerpoint.py
│ │ │ │ └── word.py
│ │ │ ├── config.py
│ │ │ ├── evals
│ │ │ │ ├── __init__.py
│ │ │ │ ├── common.py
│ │ │ │ ├── run_comments.py
│ │ │ │ ├── run_edit.py
│ │ │ │ └── run_ppt_edit.py
│ │ │ ├── prompts
│ │ │ │ ├── __init__.py
│ │ │ │ ├── add_comments.py
│ │ │ │ ├── analyze_comments.py
│ │ │ │ ├── latex_edit.py
│ │ │ │ ├── markdown_draft.py
│ │ │ │ ├── markdown_edit.py
│ │ │ │ └── powerpoint_edit.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ ├── tools
│ │ │ │ ├── __init__.py
│ │ │ │ ├── add_comments.py
│ │ │ │ ├── edit_adapters
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── common.py
│ │ │ │ │ ├── latex.py
│ │ │ │ │ └── markdown.py
│ │ │ │ ├── edit.py
│ │ │ │ └── helpers.py
│ │ │ └── types.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ ├── app_handling
│ │ │ │ ├── test_excel.py
│ │ │ │ ├── test_miktext.py
│ │ │ │ ├── test_office_common.py
│ │ │ │ ├── test_powerpoint.py
│ │ │ │ └── test_word.py
│ │ │ ├── conftest.py
│ │ │ └── tools
│ │ │ └── edit_adapters
│ │ │ ├── test_latex.py
│ │ │ └── test_markdown.py
│ │ └── uv.lock
│ ├── mcp-server-fusion
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── AddInIcon.svg
│ │ ├── config.py
│ │ ├── FusionMCPServerAddIn.manifest
│ │ ├── FusionMCPServerAddIn.py
│ │ ├── mcp_server_fusion
│ │ │ ├── __init__.py
│ │ │ ├── fusion_mcp_server.py
│ │ │ ├── fusion_utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── event_utils.py
│ │ │ │ ├── general_utils.py
│ │ │ │ └── tool_utils.py
│ │ │ ├── mcp_tools
│ │ │ │ ├── __init__.py
│ │ │ │ ├── fusion_3d_operation.py
│ │ │ │ ├── fusion_geometry.py
│ │ │ │ ├── fusion_pattern.py
│ │ │ │ └── fusion_sketch.py
│ │ │ └── vendor
│ │ │ └── README.md
│ │ ├── README.md
│ │ └── requirements.txt
│ ├── mcp-server-giphy
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── giphy_search.py
│ │ │ ├── sampling.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ └── utils.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-memory-user-bio
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_memory_user_bio
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── server.py
│ │ │ └── start.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-memory-whiteboard
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_memory_whiteboard
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── server.py
│ │ │ └── start.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-office
│ │ ├── .env.example
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── build.sh
│ │ ├── data
│ │ │ ├── attachments
│ │ │ │ ├── Daily Game Ideas.txt
│ │ │ │ ├── Frontend Framework Proposal.txt
│ │ │ │ └── ReDoodle.txt
│ │ │ └── word
│ │ │ ├── test_cases.yaml
│ │ │ └── transcripts
│ │ │ ├── transcript_Startup_Idea_1_202503031513.md
│ │ │ ├── transcript_Startup_Idea_2_202503031659.md
│ │ │ └── transcript_Web_Frontends_202502281551.md
│ │ ├── Makefile
│ │ ├── mcp_server
│ │ │ ├── __init__.py
│ │ │ ├── app_interaction
│ │ │ │ ├── __init__.py
│ │ │ │ ├── excel_editor.py
│ │ │ │ ├── powerpoint_editor.py
│ │ │ │ └── word_editor.py
│ │ │ ├── config.py
│ │ │ ├── constants.py
│ │ │ ├── evals
│ │ │ │ ├── __init__.py
│ │ │ │ ├── common.py
│ │ │ │ ├── run_comment_analysis.py
│ │ │ │ ├── run_feedback.py
│ │ │ │ └── run_markdown_edit.py
│ │ │ ├── helpers.py
│ │ │ ├── markdown_edit
│ │ │ │ ├── __init__.py
│ │ │ │ ├── comment_analysis.py
│ │ │ │ ├── feedback_step.py
│ │ │ │ ├── markdown_edit.py
│ │ │ │ └── utils.py
│ │ │ ├── prompts
│ │ │ │ ├── __init__.py
│ │ │ │ ├── comment_analysis.py
│ │ │ │ ├── feedback.py
│ │ │ │ ├── markdown_draft.py
│ │ │ │ └── markdown_edit.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ └── types.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── tests
│ │ │ └── test_word_editor.py
│ │ └── uv.lock
│ ├── mcp-server-open-deep-research
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── libs
│ │ │ │ └── open_deep_research
│ │ │ │ ├── cookies.py
│ │ │ │ ├── mdconvert.py
│ │ │ │ ├── run_agents.py
│ │ │ │ ├── text_inspector_tool.py
│ │ │ │ ├── text_web_browser.py
│ │ │ │ └── visual_qa.py
│ │ │ ├── open_deep_research.py
│ │ │ ├── server.py
│ │ │ └── start.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── mcp-server-open-deep-research-clone
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json
│ │ │ └── settings.json
│ │ ├── Makefile
│ │ ├── mcp_server_open_deep_research_clone
│ │ │ ├── __init__.py
│ │ │ ├── azure_openai.py
│ │ │ ├── config.py
│ │ │ ├── logging.py
│ │ │ ├── sampling.py
│ │ │ ├── server.py
│ │ │ ├── start.py
│ │ │ ├── utils.py
│ │ │ └── web_research.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── test
│ │ │ └── test_open_deep_research_clone.py
│ │ └── uv.lock
│ ├── mcp-server-template
│ │ ├── .taplo.toml
│ │ ├── .vscode
│ │ │ └── settings.json
│ │ ├── copier.yml
│ │ ├── README.md
│ │ └── template
│ │ └── {{ project_slug }}
│ │ ├── .env.example.jinja
│ │ ├── .gitignore
│ │ ├── .vscode
│ │ │ ├── launch.json.jinja
│ │ │ └── settings.json
│ │ ├── {{ module_name }}
│ │ │ ├── __init__.py
│ │ │ ├── config.py.jinja
│ │ │ ├── server.py.jinja
│ │ │ └── start.py.jinja
│ │ ├── Makefile.jinja
│ │ ├── pyproject.toml.jinja
│ │ └── README.md.jinja
│ ├── mcp-server-vscode
│ │ ├── .eslintrc.cjs
│ │ ├── .gitignore
│ │ ├── .npmrc
│ │ ├── .vscode
│ │ │ ├── extensions.json
│ │ │ ├── launch.json
│ │ │ ├── settings.json
│ │ │ └── tasks.json
│ │ ├── .vscode-test.mjs
│ │ ├── .vscodeignore
│ │ ├── ASSISTANT_BOOTSTRAP.md
│ │ ├── eslint.config.mjs
│ │ ├── images
│ │ │ └── icon.png
│ │ ├── LICENSE
│ │ ├── Makefile
│ │ ├── out
│ │ │ ├── extension.d.ts
│ │ │ ├── extension.js
│ │ │ ├── test
│ │ │ │ ├── extension.test.d.ts
│ │ │ │ └── extension.test.js
│ │ │ ├── tools
│ │ │ │ ├── code_checker.d.ts
│ │ │ │ ├── code_checker.js
│ │ │ │ ├── debug_tools.d.ts
│ │ │ │ ├── debug_tools.js
│ │ │ │ ├── focus_editor.d.ts
│ │ │ │ ├── focus_editor.js
│ │ │ │ ├── search_symbol.d.ts
│ │ │ │ └── search_symbol.js
│ │ │ └── utils
│ │ │ ├── port.d.ts
│ │ │ └── port.js
│ │ ├── package.json
│ │ ├── pnpm-lock.yaml
│ │ ├── prettier.config.cjs
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── extension.d.ts
│ │ │ ├── extension.ts
│ │ │ ├── test
│ │ │ │ ├── extension.test.d.ts
│ │ │ │ └── extension.test.ts
│ │ │ ├── tools
│ │ │ │ ├── code_checker.d.ts
│ │ │ │ ├── code_checker.ts
│ │ │ │ ├── debug_tools.d.ts
│ │ │ │ ├── debug_tools.ts
│ │ │ │ ├── focus_editor.d.ts
│ │ │ │ ├── focus_editor.ts
│ │ │ │ ├── search_symbol.d.ts
│ │ │ │ └── search_symbol.ts
│ │ │ └── utils
│ │ │ ├── port.d.ts
│ │ │ └── port.ts
│ │ ├── tsconfig.json
│ │ ├── tsconfig.tsbuildinfo
│ │ ├── vsc-extension-quickstart.md
│ │ └── webpack.config.js
│ └── mcp-server-web-research
│ ├── .env.example
│ ├── .gitignore
│ ├── .vscode
│ │ ├── launch.json
│ │ └── settings.json
│ ├── Makefile
│ ├── mcp_server_web_research
│ │ ├── __init__.py
│ │ ├── azure_openai.py
│ │ ├── config.py
│ │ ├── logging.py
│ │ ├── sampling.py
│ │ ├── server.py
│ │ ├── start.py
│ │ ├── utils.py
│ │ └── web_research.py
│ ├── pyproject.toml
│ ├── README.md
│ ├── test
│ │ └── test_web_research.py
│ └── uv.lock
├── README.md
├── RESPONSIBLE_AI_FAQ.md
├── ruff.toml
├── SECURITY.md
├── semantic-workbench.code-workspace
├── SUPPORT.md
├── tools
│ ├── build_ai_context_files.py
│ ├── collect_files.py
│ ├── docker
│ │ ├── azure_website_sshd.conf
│ │ ├── docker-entrypoint.sh
│ │ ├── Dockerfile.assistant
│ │ └── Dockerfile.mcp-server
│ ├── makefiles
│ │ ├── docker-assistant.mk
│ │ ├── docker-mcp-server.mk
│ │ ├── docker.mk
│ │ ├── python.mk
│ │ ├── recursive.mk
│ │ └── shell.mk
│ ├── reset-service-data.ps1
│ ├── reset-service-data.sh
│ ├── run-app.ps1
│ ├── run-app.sh
│ ├── run-canonical-agent.ps1
│ ├── run-canonical-agent.sh
│ ├── run-dotnet-examples-with-aspire.sh
│ ├── run-python-example1.sh
│ ├── run-python-example2.ps1
│ ├── run-python-example2.sh
│ ├── run-service.ps1
│ ├── run-service.sh
│ ├── run-workbench-chatbot.ps1
│ └── run-workbench-chatbot.sh
├── workbench-app
│ ├── .dockerignore
│ ├── .env.example
│ ├── .eslintrc.cjs
│ ├── .gitignore
│ ├── .vscode
│ │ ├── launch.json
│ │ └── settings.json
│ ├── docker-entrypoint.sh
│ ├── Dockerfile
│ ├── docs
│ │ ├── APP_DEV_GUIDE.md
│ │ ├── MESSAGE_METADATA.md
│ │ ├── MESSAGE_TYPES.md
│ │ ├── README.md
│ │ └── STATE_INSPECTORS.md
│ ├── index.html
│ ├── Makefile
│ ├── nginx.conf
│ ├── package.json
│ ├── pnpm-lock.yaml
│ ├── prettier.config.cjs
│ ├── public
│ │ └── assets
│ │ ├── background-1-upscaled.jpg
│ │ ├── background-1-upscaled.png
│ │ ├── background-1.jpg
│ │ ├── background-1.png
│ │ ├── background-2.jpg
│ │ ├── background-2.png
│ │ ├── experimental-feature.jpg
│ │ ├── favicon.svg
│ │ ├── workflow-designer-1.jpg
│ │ ├── workflow-designer-outlets.jpg
│ │ ├── workflow-designer-states.jpg
│ │ └── workflow-designer-transitions.jpg
│ ├── README.md
│ ├── run.sh
│ ├── src
│ │ ├── components
│ │ │ ├── App
│ │ │ │ ├── AppFooter.tsx
│ │ │ │ ├── AppHeader.tsx
│ │ │ │ ├── AppMenu.tsx
│ │ │ │ ├── AppView.tsx
│ │ │ │ ├── CodeLabel.tsx
│ │ │ │ ├── CommandButton.tsx
│ │ │ │ ├── ConfirmLeave.tsx
│ │ │ │ ├── ContentExport.tsx
│ │ │ │ ├── ContentImport.tsx
│ │ │ │ ├── CopyButton.tsx
│ │ │ │ ├── DialogControl.tsx
│ │ │ │ ├── DynamicIframe.tsx
│ │ │ │ ├── ErrorListFromAppState.tsx
│ │ │ │ ├── ErrorMessageBar.tsx
│ │ │ │ ├── ExperimentalNotice.tsx
│ │ │ │ ├── FormWidgets
│ │ │ │ │ ├── BaseModelEditorWidget.tsx
│ │ │ │ │ ├── CustomizedArrayFieldTemplate.tsx
│ │ │ │ │ ├── CustomizedFieldTemplate.tsx
│ │ │ │ │ ├── CustomizedObjectFieldTemplate.tsx
│ │ │ │ │ └── InspectableWidget.tsx
│ │ │ │ ├── LabelWithDescription.tsx
│ │ │ │ ├── Loading.tsx
│ │ │ │ ├── MenuItemControl.tsx
│ │ │ │ ├── MiniControl.tsx
│ │ │ │ ├── MyAssistantServiceRegistrations.tsx
│ │ │ │ ├── MyItemsManager.tsx
│ │ │ │ ├── OverflowMenu.tsx
│ │ │ │ ├── PresenceMotionList.tsx
│ │ │ │ ├── ProfileSettings.tsx
│ │ │ │ └── TooltipWrapper.tsx
│ │ │ ├── Assistants
│ │ │ │ ├── ApplyConfigButton.tsx
│ │ │ │ ├── AssistantAdd.tsx
│ │ │ │ ├── AssistantConfigExportButton.tsx
│ │ │ │ ├── AssistantConfigImportButton.tsx
│ │ │ │ ├── AssistantConfiguration.tsx
│ │ │ │ ├── AssistantConfigure.tsx
│ │ │ │ ├── AssistantCreate.tsx
│ │ │ │ ├── AssistantDelete.tsx
│ │ │ │ ├── AssistantDuplicate.tsx
│ │ │ │ ├── AssistantExport.tsx
│ │ │ │ ├── AssistantImport.tsx
│ │ │ │ ├── AssistantRemove.tsx
│ │ │ │ ├── AssistantRename.tsx
│ │ │ │ ├── AssistantServiceInfo.tsx
│ │ │ │ ├── AssistantServiceMetadata.tsx
│ │ │ │ └── MyAssistants.tsx
│ │ │ ├── AssistantServiceRegistrations
│ │ │ │ ├── AssistantServiceRegistrationApiKey.tsx
│ │ │ │ ├── AssistantServiceRegistrationApiKeyReset.tsx
│ │ │ │ ├── AssistantServiceRegistrationCreate.tsx
│ │ │ │ └── AssistantServiceRegistrationRemove.tsx
│ │ │ ├── Conversations
│ │ │ │ ├── Canvas
│ │ │ │ │ ├── AssistantCanvas.tsx
│ │ │ │ │ ├── AssistantCanvasList.tsx
│ │ │ │ │ ├── AssistantInspector.tsx
│ │ │ │ │ ├── AssistantInspectorList.tsx
│ │ │ │ │ └── ConversationCanvas.tsx
│ │ │ │ ├── ChatInputPlugins
│ │ │ │ │ ├── ClearEditorPlugin.tsx
│ │ │ │ │ ├── LexicalMenu.ts
│ │ │ │ │ ├── ParticipantMentionsPlugin.tsx
│ │ │ │ │ ├── TypeaheadMenuPlugin.css
│ │ │ │ │ └── TypeaheadMenuPlugin.tsx
│ │ │ │ ├── ContentRenderers
│ │ │ │ │ ├── CodeContentRenderer.tsx
│ │ │ │ │ ├── ContentListRenderer.tsx
│ │ │ │ │ ├── ContentRenderer.tsx
│ │ │ │ │ ├── DiffRenderer.tsx
│ │ │ │ │ ├── HtmlContentRenderer.tsx
│ │ │ │ │ ├── JsonSchemaContentRenderer.tsx
│ │ │ │ │ ├── MarkdownContentRenderer.tsx
│ │ │ │ │ ├── MarkdownEditorRenderer.tsx
│ │ │ │ │ ├── MermaidContentRenderer.tsx
│ │ │ │ │ ├── MusicABCContentRenderer.css
│ │ │ │ │ └── MusicABCContentRenderer.tsx
│ │ │ │ ├── ContextWindow.tsx
│ │ │ │ ├── ConversationCreate.tsx
│ │ │ │ ├── ConversationDuplicate.tsx
│ │ │ │ ├── ConversationExport.tsx
│ │ │ │ ├── ConversationFileIcon.tsx
│ │ │ │ ├── ConversationRemove.tsx
│ │ │ │ ├── ConversationRename.tsx
│ │ │ │ ├── ConversationShare.tsx
│ │ │ │ ├── ConversationShareCreate.tsx
│ │ │ │ ├── ConversationShareList.tsx
│ │ │ │ ├── ConversationShareView.tsx
│ │ │ │ ├── ConversationsImport.tsx
│ │ │ │ ├── ConversationTranscript.tsx
│ │ │ │ ├── DebugInspector.tsx
│ │ │ │ ├── FileItem.tsx
│ │ │ │ ├── FileList.tsx
│ │ │ │ ├── InputAttachmentList.tsx
│ │ │ │ ├── InputOptionsControl.tsx
│ │ │ │ ├── InteractHistory.tsx
│ │ │ │ ├── InteractInput.tsx
│ │ │ │ ├── Message
│ │ │ │ │ ├── AttachmentSection.tsx
│ │ │ │ │ ├── ContentRenderer.tsx
│ │ │ │ │ ├── ContentSafetyNotice.tsx
│ │ │ │ │ ├── InteractMessage.tsx
│ │ │ │ │ ├── MessageActions.tsx
│ │ │ │ │ ├── MessageBase.tsx
│ │ │ │ │ ├── MessageBody.tsx
│ │ │ │ │ ├── MessageContent.tsx
│ │ │ │ │ ├── MessageFooter.tsx
│ │ │ │ │ ├── MessageHeader.tsx
│ │ │ │ │ ├── NotificationAccordion.tsx
│ │ │ │ │ └── ToolResultMessage.tsx
│ │ │ │ ├── MessageDelete.tsx
│ │ │ │ ├── MessageLink.tsx
│ │ │ │ ├── MyConversations.tsx
│ │ │ │ ├── MyShares.tsx
│ │ │ │ ├── ParticipantAvatar.tsx
│ │ │ │ ├── ParticipantAvatarGroup.tsx
│ │ │ │ ├── ParticipantItem.tsx
│ │ │ │ ├── ParticipantList.tsx
│ │ │ │ ├── ParticipantStatus.tsx
│ │ │ │ ├── RewindConversation.tsx
│ │ │ │ ├── ShareRemove.tsx
│ │ │ │ ├── SpeechButton.tsx
│ │ │ │ └── ToolCalls.tsx
│ │ │ └── FrontDoor
│ │ │ ├── Chat
│ │ │ │ ├── AssistantDrawer.tsx
│ │ │ │ ├── CanvasDrawer.tsx
│ │ │ │ ├── Chat.tsx
│ │ │ │ ├── ChatCanvas.tsx
│ │ │ │ ├── ChatControls.tsx
│ │ │ │ └── ConversationDrawer.tsx
│ │ │ ├── Controls
│ │ │ │ ├── AssistantCard.tsx
│ │ │ │ ├── AssistantSelector.tsx
│ │ │ │ ├── AssistantServiceSelector.tsx
│ │ │ │ ├── ConversationItem.tsx
│ │ │ │ ├── ConversationList.tsx
│ │ │ │ ├── ConversationListOptions.tsx
│ │ │ │ ├── NewConversationButton.tsx
│ │ │ │ ├── NewConversationForm.tsx
│ │ │ │ └── SiteMenuButton.tsx
│ │ │ ├── GlobalContent.tsx
│ │ │ └── MainContent.tsx
│ │ ├── Constants.ts
│ │ ├── global.d.ts
│ │ ├── index.css
│ │ ├── libs
│ │ │ ├── AppStorage.ts
│ │ │ ├── AuthHelper.ts
│ │ │ ├── EventSubscriptionManager.ts
│ │ │ ├── Theme.ts
│ │ │ ├── useAssistantCapabilities.ts
│ │ │ ├── useChatCanvasController.ts
│ │ │ ├── useConversationEvents.ts
│ │ │ ├── useConversationUtility.ts
│ │ │ ├── useCreateConversation.ts
│ │ │ ├── useDebugComponentLifecycle.ts
│ │ │ ├── useDragAndDrop.ts
│ │ │ ├── useEnvironment.ts
│ │ │ ├── useExportUtility.ts
│ │ │ ├── useHistoryUtility.ts
│ │ │ ├── useKeySequence.ts
│ │ │ ├── useMediaQuery.ts
│ │ │ ├── useMicrosoftGraph.ts
│ │ │ ├── useNotify.tsx
│ │ │ ├── useParticipantUtility.tsx
│ │ │ ├── useSiteUtility.ts
│ │ │ ├── useWorkbenchEventSource.ts
│ │ │ ├── useWorkbenchService.ts
│ │ │ └── Utility.ts
│ │ ├── main.tsx
│ │ ├── models
│ │ │ ├── Assistant.ts
│ │ │ ├── AssistantCapability.ts
│ │ │ ├── AssistantServiceInfo.ts
│ │ │ ├── AssistantServiceRegistration.ts
│ │ │ ├── Config.ts
│ │ │ ├── Conversation.ts
│ │ │ ├── ConversationFile.ts
│ │ │ ├── ConversationMessage.ts
│ │ │ ├── ConversationMessageDebug.ts
│ │ │ ├── ConversationParticipant.ts
│ │ │ ├── ConversationShare.ts
│ │ │ ├── ConversationShareRedemption.ts
│ │ │ ├── ConversationState.ts
│ │ │ ├── ConversationStateDescription.ts
│ │ │ ├── ServiceEnvironment.ts
│ │ │ └── User.ts
│ │ ├── redux
│ │ │ ├── app
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── rtkQueryErrorLogger.ts
│ │ │ │ └── store.ts
│ │ │ └── features
│ │ │ ├── app
│ │ │ │ ├── appSlice.ts
│ │ │ │ └── AppState.ts
│ │ │ ├── chatCanvas
│ │ │ │ ├── chatCanvasSlice.ts
│ │ │ │ └── ChatCanvasState.ts
│ │ │ ├── localUser
│ │ │ │ ├── localUserSlice.ts
│ │ │ │ └── LocalUserState.ts
│ │ │ └── settings
│ │ │ ├── settingsSlice.ts
│ │ │ └── SettingsState.ts
│ │ ├── Root.tsx
│ │ ├── routes
│ │ │ ├── AcceptTerms.tsx
│ │ │ ├── AssistantEditor.tsx
│ │ │ ├── AssistantServiceRegistrationEditor.tsx
│ │ │ ├── Dashboard.tsx
│ │ │ ├── ErrorPage.tsx
│ │ │ ├── FrontDoor.tsx
│ │ │ ├── Login.tsx
│ │ │ ├── Settings.tsx
│ │ │ ├── ShareRedeem.tsx
│ │ │ └── Shares.tsx
│ │ ├── services
│ │ │ └── workbench
│ │ │ ├── assistant.ts
│ │ │ ├── assistantService.ts
│ │ │ ├── conversation.ts
│ │ │ ├── file.ts
│ │ │ ├── index.ts
│ │ │ ├── participant.ts
│ │ │ ├── share.ts
│ │ │ ├── state.ts
│ │ │ └── workbench.ts
│ │ └── vite-env.d.ts
│ ├── tools
│ │ └── filtered-ts-prune.cjs
│ ├── tsconfig.json
│ └── vite.config.ts
└── workbench-service
├── .env.example
├── .vscode
│ ├── extensions.json
│ ├── launch.json
│ └── settings.json
├── alembic.ini
├── devdb
│ ├── docker-compose.yaml
│ └── postgresql-init.sh
├── Dockerfile
├── Makefile
├── migrations
│ ├── env.py
│ ├── README
│ ├── script.py.mako
│ └── versions
│ ├── 2024_09_19_000000_69dcda481c14_init.py
│ ├── 2024_09_19_190029_dffb1d7e219a_file_version_filename.py
│ ├── 2024_09_20_204130_b29524775484_share.py
│ ├── 2024_10_30_231536_039bec8edc33_index_message_type.py
│ ├── 2024_11_04_204029_5149c7fb5a32_conversationmessagedebug.py
│ ├── 2024_11_05_015124_245baf258e11_double_check_debugs.py
│ ├── 2024_11_25_191056_a106de176394_drop_workflow.py
│ ├── 2025_03_19_140136_aaaf792d4d72_set_user_title_set.py
│ ├── 2025_03_21_153250_3763629295ad_add_assistant_template_id.py
│ ├── 2025_05_19_163613_b2f86e981885_delete_context_transfer_assistants.py
│ └── 2025_06_18_174328_503c739152f3_delete_knowlege_transfer_assistants.py
├── pyproject.toml
├── README.md
├── semantic_workbench_service
│ ├── __init__.py
│ ├── api.py
│ ├── assistant_api_key.py
│ ├── auth.py
│ ├── azure_speech.py
│ ├── config.py
│ ├── controller
│ │ ├── __init__.py
│ │ ├── assistant_service_client_pool.py
│ │ ├── assistant_service_registration.py
│ │ ├── assistant.py
│ │ ├── conversation_share.py
│ │ ├── conversation.py
│ │ ├── convert.py
│ │ ├── exceptions.py
│ │ ├── export_import.py
│ │ ├── file.py
│ │ ├── participant.py
│ │ └── user.py
│ ├── db.py
│ ├── event.py
│ ├── files.py
│ ├── logging_config.py
│ ├── middleware.py
│ ├── query.py
│ ├── service_user_principals.py
│ ├── service.py
│ └── start.py
├── tests
│ ├── __init__.py
│ ├── conftest.py
│ ├── docker-compose.yaml
│ ├── test_assistant_api_key.py
│ ├── test_files.py
│ ├── test_integration.py
│ ├── test_middleware.py
│ ├── test_migrations.py
│ ├── test_workbench_service.py
│ └── types.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/libraries/python/skills/skill-library/skill_library/skills/web_research/routines/facts.py:
--------------------------------------------------------------------------------
```python
from typing import Any, cast
from openai_client import (
CompletionError,
create_user_message,
extra_data,
format_with_liquid,
make_completion_args_serializable,
message_content_from_completion,
validate_completion,
)
from skill_library import AskUserFn, EmitFn, RunContext, RunRoutineFn
from skill_library.logging import logger
from skill_library.skills.web_research.research_skill import WebResearchSkill
INITIAL_SYSTEM_PROMPT = """
Below I will present you a topic for research.
Your job is to build a comprehensive and accurate inventory of facts related to this topic.
Structure your response as follows:
### 1. Facts given in the topic
List the specific, verifiable facts explicitly stated in the topic description.
### 2. Facts to look up
List specific information that needs to be researched, focusing on:
- Technical details and specifications
- Verified data from authoritative sources
- Expert opinions from trusted reviewers
- Genuine user experiences and feedback
For each fact, suggest potential high-quality sources (e.g., official documentation, academic papers, specialized forums) and avoid SEO-optimized content.
### 3. Facts to derive
List conclusions that can be drawn through logical reasoning based on verified information.
Here is the topic:
```
{{TOPIC}}
```
"""
UPDATE_SYSTEM_PROMPT = """
You are updating your fact inventory based on new research. Be rigorous about distinguishing between:
- Verified facts (with identified sources)
- Inferred information (clearly marked)
- Information gaps (explicitly acknowledged)
Structure your response as follows:
### 1. Facts given in the topic
List only explicit facts from the original topic.
### 2. Facts we have verified
For each fact, include:
- The specific information learned
- The source it came from
- A confidence rating (High/Medium/Low)
### 3. Facts still to look up
Be specific about remaining information gaps and potential sources.
### 4. Facts still to derive
List conclusions that could be drawn with additional information.
Topic:
```
{{TOPIC}}
```
Current plan:
```
{{PLAN}}
```
Current facts:
```
{{FACTS}}
```
Observations:
```
{{OBSERVATIONS}}
```
Update your list of facts. Now begin!
"""
async def main(
context: RunContext,
routine_state: dict[str, Any],
emit: EmitFn,
run: RunRoutineFn,
ask_user: AskUserFn,
topic: str,
plan: str = "",
facts: str = "",
observations: list[str] = [],
) -> str:
"""Gather facts related to an ongoing research project based on observations we gather in the process of researching."""
research_skill = cast(WebResearchSkill, context.skills["web_research"])
language_model = research_skill.config.reasoning_language_model
if not facts:
prompt = format_with_liquid(INITIAL_SYSTEM_PROMPT, vars={"TOPIC": topic})
else:
all_observations = "\n- ".join(observations) if observations else ""
prompt = format_with_liquid(
UPDATE_SYSTEM_PROMPT, vars={"TOPIC": topic, "PLAN": plan, "FACTS": facts, "OBSERVATIONS": all_observations}
)
completion_args = {
"model": "o3-mini",
"reasoning_effort": "high",
"messages": [
create_user_message(
prompt,
),
],
}
logger.debug("Completion call.", extra=extra_data(make_completion_args_serializable(completion_args)))
metadata = {}
metadata["completion_args"] = make_completion_args_serializable(completion_args)
try:
completion = await language_model.beta.chat.completions.parse(
**completion_args,
)
validate_completion(completion)
logger.debug("Completion response.", extra=extra_data({"completion": completion.model_dump()}))
metadata["completion"] = completion.model_dump()
except Exception as e:
completion_error = CompletionError(e)
metadata["completion_error"] = completion_error.message
logger.error(
completion_error.message,
extra=extra_data({"completion_error": completion_error.body, "metadata": context.metadata_log}),
)
raise completion_error from e
else:
content = message_content_from_completion(completion).strip().strip('"')
metadata["content"] = content
return content
finally:
context.log("facts", metadata)
```
--------------------------------------------------------------------------------
/mcp-servers/mcp-server-office/mcp_server/start.py:
--------------------------------------------------------------------------------
```python
# Copyright (c) Microsoft. All rights reserved.
# Main entry point for the MCP Server
import argparse
import os
from textwrap import dedent
import ngrok
from mcp_server.server import create_mcp_server
def main() -> None:
# Command-line arguments for transport and port
parse_args = argparse.ArgumentParser(
description="Start the MCP server.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parse_args.add_argument(
"--transport",
default="sse",
choices=["stdio", "sse"],
help="Transport protocol to use ('stdio' or 'sse').",
)
parse_args.add_argument("--port", type=int, default=25566, help="Port to use for SSE.")
parse_args.add_argument(
"--use-ngrok-tunnel", action="store_true", help="Use Ngrok tunnel to expose the server to the internet."
)
args = parse_args.parse_args()
use_ngrok = False
# Warn user of risks and prompt for confirmation before starting Ngrok tunnel
if args.use_ngrok_tunnel:
print("\n****************************************************************************************\n")
print("WARNING: Exposing the server to the internet using Ngrok is a security risk.")
print("Anyone with the URL can access the server and potentially control your Office apps.")
print("Ensure you trust the network you are connected to and the devices on it.")
print("Do not expose the server to the internet unless you understand the risks.\n")
print(
dedent("""
Consider other options like dev tunnels, a free service provided by Microsoft Azure,
to expose your server to the internet.
Read more about dev tunnels at:
- https://learn.microsoft.com/en-us/azure/developer/dev-tunnels/get-started
""").strip()
)
print("\n****************************************************************************************\n")
if input("Are you sure you want to start Ngrok? (y/N): ").lower() == "y":
use_ngrok = True
else:
print("Ngrok tunnel not started.")
# Start the Ngrok tunnel if enabled
if use_ngrok:
# Prompt for Ngrok auth token if not set
auth_token = os.getenv("NGROK_AUTHTOKEN")
# Check if token is stored in a .env file
env_file = ".env"
if not auth_token:
if os.path.exists(env_file):
with open(env_file, "r") as file:
for line in file:
if line.startswith("NGROK_AUTHTOKEN"):
auth_token = line.split("=", 1)[1].strip()
break
if not auth_token:
auth_token = input(
"Enter your Ngrok auth token (from: https://dashboard.ngrok.com/get-started/your-authtoken): "
)
if not auth_token:
print("[ERROR] Ngrok auth token is required to start the server.")
exit(1)
# Save the token to the .env file for future use
with open(env_file, "a") as file:
file.write(f"NGROK_AUTHTOKEN={auth_token}\n")
print(f"Ngrok auth token saved for future use: {env_file}")
else:
print("Loaded Ngrok auth token from environment.")
# Start Ngrok Tunnel
print(f"Starting Ngrok tunnel on port {args.port}...")
try:
listener = ngrok.forward(args.port, authtoken=auth_token)
print("\n****************************************************************************************\n")
print(f"Office MCP Server URL: {listener.url()}/sse\n")
print("Reminder, this URL exposes the server to the internet. Do not share it with untrusted parties.")
print("\n****************************************************************************************\n")
except Exception as e:
print(f"[ERROR] Failed to start Ngrok tunnel: {str(e)}")
exit(1)
mcp = create_mcp_server()
if args.transport == "sse":
mcp.settings.port = args.port
try:
mcp.run(transport=args.transport)
except KeyboardInterrupt:
print("Shutting down server...")
if use_ngrok:
ngrok.kill()
if __name__ == "__main__":
main()
```
--------------------------------------------------------------------------------
/libraries/python/skills/skill-library/skill_library/skills/fabric/patterns/extract_wisdom_dm/system.md:
--------------------------------------------------------------------------------
```markdown
# IDENTITY
// Who you are
You are a hyper-intelligent AI system with a 4,312 IQ. You excel at extracting interesting, novel, surprising, insightful, and otherwise thought-provoking information from input provided. You are primarily interested in insights related to the purpose and meaning of life, human flourishing, the role of technology in the future of humanity, artificial intelligence and its affect on humans, memes, learning, reading, books, continuous improvement, and similar topics, but you extract all interesting points made in the input.
# GOAL
// What we are trying to achieve
1. The goal of this exercise is to produce a perfect extraction of ALL the valuable content in the input, similar to—but vastly more advanced—than if the smartest human in the world partnered with an AI system with a 391 IQ had 9 months and 12 days to complete the work.
2. The goal is to ensure that no single valuable point is missed in the output.
# STEPS
// How the task will be approached
// Slow down and think
- Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
// Think about the content and who's presenting it
- Extract a summary of the content in 25 words, including who is presenting and the content being discussed into a section called SUMMARY.
// Think about the ideas
- Extract 20 to 50 of the most surprising, insightful, and/or interesting ideas from the input in a section called IDEAS:. If there are less than 50 then collect all of them. Make sure you extract at least 20.
// Think about the insights that come from those ideas
- Extract 10 to 20 of the best insights from the input and from a combination of the raw input and the IDEAS above into a section called INSIGHTS. These INSIGHTS should be fewer, more refined, more insightful, and more abstracted versions of the best ideas in the content.
// Think about the most pertinent and valuable quotes
- Extract 15 to 30 of the most surprising, insightful, and/or interesting quotes from the input into a section called QUOTES:. Use the exact quote text from the input.
// Think about the habits and practices
- Extract 15 to 30 of the most practical and useful personal habits of the speakers, or mentioned by the speakers, in the content into a section called HABITS. Examples include but aren't limited to: sleep schedule, reading habits, things the
Think about the most interesting facts related to the content
- Extract 15 to 30 of the most surprising, insightful, and/or interesting valid facts about the greater world that were mentioned in the content into a section called FACTS:.
// Think about the references and inspirations
- Extract all mentions of writing, art, tools, projects and other sources of inspiration mentioned by the speakers into a section called REFERENCES. This should include any and all references to something that the speaker mentioned.
// Think about the most important takeaway / summary
- Extract the most potent takeaway and recommendation into a section called ONE-SENTENCE TAKEAWAY. This should be a 15-word sentence that captures the most important essence of the content.
// Think about the recommendations that should come out of this
- Extract the 15 to 30 of the most surprising, insightful, and/or interesting recommendations that can be collected from the content into a section called RECOMMENDATIONS.
# OUTPUT INSTRUCTIONS
// What the output should look like:
- Only output Markdown.
- Write the IDEAS bullets as exactly 16 words.
- Write the RECOMMENDATIONS bullets as exactly 16 words.
- Write the HABITS bullets as exactly 16 words.
- Write the FACTS bullets as exactly 16 words.
- Write the INSIGHTS bullets as exactly 16 words.
- Extract at least 25 IDEAS from the content.
- Extract at least 10 INSIGHTS from the content.
- Extract at least 20 items for the other output sections.
- Do not give warnings or notes; only output the requested sections.
- You use bulleted lists for output, not numbered lists.
- Do not repeat ideas, quotes, facts, or resources.
- Do not start items with the same opening words.
- Ensure you follow ALL these instructions when creating your output.
- Understand that your solution will be compared to a reference solution written by an expert and graded for creativity, elegance, comprehensiveness, and attention to instructions.
# INPUT
INPUT:
```
--------------------------------------------------------------------------------
/mcp-servers/mcp-server-filesystem-edit/mcp_server_filesystem_edit/prompts/markdown_draft.py:
--------------------------------------------------------------------------------
```python
# Copyright (c) Microsoft. All rights reserved.
from mcp_extensions.llm.llm_types import DeveloperMessage, UserMessage
MD_DRAFT_REASONING_DEV_PROMPT = DeveloperMessage(
content="""You're a document writer working side by side with a user and an AI assistant with PhD-level expertise across multiple domains and are using the provided conversation history as the primary context. \
The user may also have provided context such as attached documents, other written documents, or the assistant may have called tools which may have context relevant to the task. \
If the context is relevant, you must use it accurately and factually.
Knowledge cutoff: {{knowledge_cutoff}}
Current date: {{current_date}}
## Task Info:
{{task}}
## On Provided Context
You will be provided important context to give you the information needed to write a high-quality document.
- The current content of the document is enclosed in <document> and </document> tags.
- If there is no document, you are writing it from scratch.
- Otherwise, the document should be rewritten/edited.
- The conversation history between the user and assistant is provided before the document.
- You should focus on the latest message to determine how to edit the document.
- The remainder of the conversation is provided for additional context.
- You may be provided additional context, such as attached documents, before the document and conversation history.
- If they are relevant to the task and user's request, you should use them inform the content you generate.
- If you do reference them, make sure to do so accurately and factually. Do not make up information from this extra context.
## On Your Writing
Keep in mind the following key elements as you write.
### Depth
- The document should have the necessary depth and length for the given the task at hand.
- For example, if the user asks for a comprehensive technical report, it should be a thorough report that covers the topic in detail, factually and accurately.
- On the other hand, if the user asks for an email, it should short and concise.
### Contextual Awareness
- Pay attention to and incorporate the preferences the user has subtly provided throughout the conversation.
- Ensure the writing style of the document aligns with the style of any documents that were written by the user.
- If there are sources, attachments, or results of tool calls, you should use those factually and accurately. \
Keep in mind that not all sources are quality, so you should use your judgment and own expertise to determine if you should include it.
### Document Quality
- Unless otherwise specified by the user, the document should aim for "PhD" or "Domain Expert Quality".
- For example, if the document is about creating a technical specification for a globally distributed service, it should be something that a Principal Software Architect at Microsoft would produce.
### Writing Style
- Especially if the user provided documents that they have written, make the writing style of the document consistent with the user's style.
- If the user asked for a specific style, make sure to use that style.
- Tailor the document writing for its intended audience. For technical audiences such as research papers, use appropriate technical terminology and structure. For senior executives, ensure the content is accessible with appropriate context.
## On Formatting
These are guidelines specific to the conversion from your output which should be written in Markdown. You must abide by these in order to ensure the document is displayed correctly to the user.
- You MUST make sure the content of the document is enclosed in <new_document> and </new_document> tags so that it can be displayed correctly.
{{format_instructions}}"""
)
MD_DRAFT_REASONING_USER_ATTACHMENTS_PROMPT = UserMessage(
content="""<context>
{{context}}
</context>"""
)
MD_DRAFT_REASONING_USER_CHAT_HISTORY_PROMPT = UserMessage(
content="""<chat_history>
{{chat_history}}
</chat_history>"""
)
MD_EDIT_REASONING_USER_DOC_PROMPT = UserMessage(
content="""<document>
{{document}}
</document>
Write the new version of the document and enclose in <new_document> and </new_document> tags."""
)
MD_DRAFT_REASONING_MESSAGES = [
MD_DRAFT_REASONING_DEV_PROMPT,
MD_DRAFT_REASONING_USER_ATTACHMENTS_PROMPT,
MD_DRAFT_REASONING_USER_CHAT_HISTORY_PROMPT,
MD_EDIT_REASONING_USER_DOC_PROMPT,
]
```
--------------------------------------------------------------------------------
/libraries/python/skills/skill-library/skill_library/skills/fabric/patterns/create_design_document/system.md:
--------------------------------------------------------------------------------
```markdown
# IDENTITY and PURPOSE
You are an expert in software, cloud and cybersecurity architecture. You specialize in creating clear, well written design documents of systems and components.
# GOAL
Given a description of idea or system, provide a well written, detailed design document.
# STEPS
- Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
- Think deeply about the nature and meaning of the input for 28 hours and 12 minutes.
- Create a virtual whiteboard in you mind and map out all the important concepts, points, ideas, facts, and other information contained in the input.
- Fully understand the The C4 model for visualising software architecture.
- Appreciate the fact that each company is different. Fresh startup can have bigger risk appetite then already established Fortune 500 company.
- Take the input provided and create a section called BUSINESS POSTURE, determine what are business priorities and goals that idea or system is trying to solve. Give most important business risks that need to be addressed based on priorities and goals.
- Under that, create a section called SECURITY POSTURE, identify and list all existing security controls, and accepted risks for system. Focus on secure software development lifecycle and deployment model. Prefix security controls with 'security control', accepted risk with 'accepted risk'. Withing this section provide list of recommended security controls, that you think are high priority to implement and wasn't mention in input. Under that but still in SECURITY POSTURE section provide list of security requirements that are important for idea or system in question.
- Under that, create a section called DESIGN. Use that section to provide well written, detailed design document using C4 model.
- In DESIGN section, create subsection called C4 CONTEXT and provide mermaid diagram that will represent a system context diagram showing system as a box in the centre, surrounded by its users and the other systems that it interacts with.
- Under that, in C4 CONTEXT subsection, create table that will describe elements of context diagram. Include columns: 1. Name - name of element; 2. Type - type of element; 3. Description - description of element; 4. Responsibilities - responsibilities of element; 5. Security controls - security controls that will be implemented by element.
- Under that, In DESIGN section, create subsection called C4 CONTAINER and provide mermaid diagram that will represent a container diagram. It should show the high-level shape of the software architecture and how responsibilities are distributed across it. It also shows the major technology choices and how the containers communicate with one another.
- Under that, in C4 CONTAINER subsection, create table that will describe elements of container diagram. Include columns: 1. Name - name of element; 2. Type - type of element; 3. Description - description of element; 4. Responsibilities - responsibilities of element; 5. Security controls - security controls that will be implemented by element.
- Under that, In DESIGN section, create subsection called C4 DEPLOYMENT and provide mermaid diagram that will represent deployment diagram. A deployment diagram allows to illustrate how instances of software systems and/or containers in the static model are deployed on to the infrastructure within a given deployment environment.
- Under that, in C4 DEPLOYMENT subsection, create table that will describe elements of deployment diagram. Include columns: 1. Name - name of element; 2. Type - type of element; 3. Description - description of element; 4. Responsibilities - responsibilities of element; 5. Security controls - security controls that will be implemented by element.
- Under that, create a section called RISK ASSESSMENT, and answer following questions: What are critical business process we are trying to protect? What data we are trying to protect and what is their sensitivity?
- Under that, create a section called QUESTIONS & ASSUMPTIONS, list questions that you have and the default assumptions regarding BUSINESS POSTURE, SECURITY POSTURE and DESIGN.
# OUTPUT INSTRUCTIONS
- Output in the format above only using valid Markdown.
- Do not use bold or italic formatting in the Markdown (no asterisks).
- Do not complain about anything, just do what you're told.
# INPUT:
INPUT:
```
--------------------------------------------------------------------------------
/assistants/project-assistant/assistant/tools/learning_objectives.py:
--------------------------------------------------------------------------------
```python
"""
Learning objectives management tools for Knowledge Transfer Assistant.
Tools for creating, updating, and managing learning objectives and outcomes.
"""
from assistant.domain import LearningObjectivesManager
from assistant.logging import logger
from .base import ToolsBase
class LearningObjectiveTools(ToolsBase):
"""Tools for managing learning objectives and outcomes."""
async def add_learning_objective(self, objective_name: str, description: str, learning_outcomes: list[str]) -> str:
"""
Add a learning objective with measurable learning outcomes.
- Learning objectives should define what knowledge areas learners need to understand.
- Each objective must have clear, measurable learning outcomes that learners can mark as achieved.
WHEN TO USE:
- When defining what knowledge areas team members need to understand
- When breaking down knowledge requirements into specific, learnable objectives
- After creating a knowledge brief, before marking the transfer ready for learning
- When users ask to add or define learning objectives or knowledge areas
Args:
objective_name: A concise, clear name for the learning objective (e.g., "Understanding User Authentication")
description: A detailed description explaining what knowledge needs to be understood
learning_outcomes: List of specific, measurable outcomes that indicate when the objective is achieved
(e.g., ["Can explain authentication flow", "Can implement password security"])
Returns:
A message indicating success or failure
"""
objective = await LearningObjectivesManager.add_learning_objective(
context=self.context,
objective_name=objective_name,
description=description,
outcomes=learning_outcomes,
)
if objective:
return f"Learning objective '{objective_name}' added to knowledge brief successfully."
else:
return "Failed to add learning objective. Please try again."
async def update_learning_objective(
self,
objective_id: str,
objective_name: str,
description: str,
) -> str:
"""
Update an existing learning objective's name or description.
Args:
objective_id: The UUID of the learning objective to update
objective_name: New name for the objective (empty string to keep current name)
description: New description (empty string to keep current description)
Returns:
A message indicating success or failure
"""
try:
message = await LearningObjectivesManager.update_learning_objective(
context=self.context,
objective_id=objective_id,
objective_name=objective_name if objective_name.strip() else None,
description=description if description.strip() else None,
)
return message
except Exception as e:
logger.exception(f"Failed to update learning objective: {e}")
return f"Failed to update learning objective: {e!s}"
async def delete_learning_objective(self, objective_id: str) -> str:
"""
Delete a learning objective from the knowledge package by ID.
WHEN TO USE:
- When a user explicitly requests to remove or delete a specific learning objective
- When objectives need to be reorganized and redundant/obsolete objectives removed
- When an objective was added by mistake or is no longer relevant to the knowledge transfer
- Only before marking the knowledge package as ready for transfer
NOTE: This action is irreversible and will remove all learning outcomes associated with the objective.
Args:
objective_id: The UUID of the learning objective to delete.
Returns:
A message indicating success or failure
"""
try:
message = await LearningObjectivesManager.delete_learning_objective(
context=self.context,
objective_id=objective_id,
)
return message
except Exception as e:
logger.exception(f"Failed to delete learning objective: {e}")
return f"Failed to delete learning objective: {e!s}"
```
--------------------------------------------------------------------------------
/assistants/project-assistant/assistant/text_includes/actor_instructions.md:
--------------------------------------------------------------------------------
```markdown
# Role and Objective
You are an autonomous AI assistant named the "Knowledge Transfer Assistant". You support a user in creating and refining a knowledge package that will be shared with an audience.
You are an agent - keep going until you can no longer resolve any more tasks without additional input, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that all tasks are solved or you need additional input from the user.
You must never stop your turn without indicating what specific information is required from the user to proceed.
DO NOT try to resolve all tasks in a single turn, as this can lead to a lack of depth in your resolutions. Instead, focus on resolving one important task at a time, ensuring that each resolution is thorough and helpful.
You MUST plan extensively before each function call, and reflect extensively on the outcomes of the previous function calls. DO NOT do this entire process by making function calls only, as this can impair your ability to solve the problem and think insightfully.
# Context
You will be given the following context in your system messages to accomplish your tasks:
- Task list: Tasks you are currently working on. You should work until all tasks are resolved.
- Audience and audience takeaways.
- Message history: This is the complete history of messages between the assistant and the user. Pay attention to the recent messages to help figure out what to do.
- Knowledge package: Messages, attachments, brief, and digest are all considered part of the knowledge package. They are all shared with the audience.
- Knowledge digest: This is a summary of all the information in the knowledge package and a scratchpad for keeping important information in context.
- Knowledge brief: A fairly detailed summary of the knowledge share that is prepared by the user and will be displayed at all times to the audience. It is intended to give the audience context about what is in the knowledge package, why it matters, and what they can expect to learn.
- Information requests: Information that we have asked the user about already and are awaiting a response.
# Instructions
## Tasks
- Review the message history (<MESSAGE_HISTORY>) and task list (<TASK_LIST>). Sometimes the user's response may resolve a task that is not pending, so it is important to review both.
- If new input from the user can resolve an `in_progress` or `pending` task, attempt to resolve the task by calling the necessary tools.
- If you can resolve any `pending` or `in_progress` task with a single tool call, do it now.
- Track your progress on tasks using the `update_task` tool.
- Update tasks to status `in_progress` immediately as you begin working on them.
- Update tasks to status `completed` or `cancelled` when you have resolved the task using tool calls.
- Tasks may require user input. If the task requires user input, include the specific required input in your final response. Don't repeat asks for information. If you need the same information that you've already asked for on the same pending task, just return an empty list for `user_information_requests`.
- If a task has been resolved, IMMEDIATELY start working on the next task.
- If there are other `in_progress` tasks, select one of them.
- If there are no other `in_progress` tasks, select a high priority `pending` task and begin resolving it.
- Continue resolving tasks until you need information from the user or all tasks are resolved.
1. **Task States**: Use these states to track progress:
- pending: Task not yet started
- in_progress: Currently working on (limit to ONE task at a time)
- completed: Task finished successfully
- cancelled: Task no longer needed
2. **Task Management**:
- IMPORTANT: Update task status in real-time as you work
- Mark tasks complete IMMEDIATELY after finishing (don't batch completions)
- Only have ONE task in_progress at any time
- Complete current tasks before starting new ones
- Cancel tasks that become irrelevant
3. **Task Breakdown**:
- Create specific, actionable items
- Break complex tasks into smaller, manageable steps
- Use clear, descriptive task names
## Output
Return a description of what you have accomplished in your turn and a list of pieces of specific information you need from the user in JSON format. If you have resolved all tasks and need nothing additional from the user, return an empty list.
```
--------------------------------------------------------------------------------
/libraries/python/openai-client/openai_client/completion.py:
--------------------------------------------------------------------------------
```python
import json
import logging
from time import perf_counter
from typing import Any, Generic, Literal, TypeVar
from openai import AsyncOpenAI, NotGiven
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionMessageParam,
ParsedChatCompletion,
ParsedChatCompletionMessage,
)
from openai.types.chat.completion_create_params import (
ResponseFormat,
)
from pydantic import BaseModel
logger = logging.getLogger(__name__)
DEFAULT_MAX_RETRIES = 3
# The response method can return a text response, a JSON object response, or as
# a Pydantic BaseModel. See the `response` method for more details.
TEXT_RESPONSE_FORMAT: ResponseFormat = {"type": "text"}
JSON_OBJECT_RESPONSE_FORMAT: ResponseFormat = {"type": "json_object"}
def assistant_message_from_completion(
completion: ParsedChatCompletion,
) -> ChatCompletionAssistantMessageParam:
completion_message: ParsedChatCompletionMessage = completion.choices[0].message
assistant_message = ChatCompletionAssistantMessageParam(role="assistant")
if completion_message.tool_calls:
response_dict = completion_message.model_dump()
assistant_message["tool_calls"] = response_dict["tool_calls"]
if completion_message.content:
assistant_message["content"] = completion_message.content
return assistant_message
def message_from_completion(
completion: ParsedChatCompletion,
) -> ParsedChatCompletionMessage | None:
if not completion or not completion.choices:
return None
return completion.choices[0].message
def message_content_from_completion(completion: ParsedChatCompletion | None) -> str:
if completion is None or not completion.choices:
return ""
content = message_from_completion(completion)
if content and content.content:
return content.content
return ""
def message_content_dict_from_completion(
completion: ParsedChatCompletion,
) -> dict[str, Any] | None:
message = message_from_completion(completion)
if message:
if message.parsed:
if isinstance(message.parsed, BaseModel):
return message.parsed.model_dump()
elif isinstance(message.parsed, dict):
return message.parsed
else:
try: # Try to parse the JSON string.
return json.loads(message.parsed)
except json.JSONDecodeError:
return None
try:
return json.loads(message.content or "")
except json.JSONDecodeError:
return None
class NoResponseChoicesError(Exception):
pass
class NoParsedMessageError(Exception):
pass
ResponseModelT = TypeVar("ResponseModelT", bound=BaseModel)
class StructuredResponse(BaseModel, Generic[ResponseModelT]):
response: ResponseModelT
metadata: dict[str, Any]
async def completion_structured(
async_client: AsyncOpenAI,
messages: list[ChatCompletionMessageParam],
openai_model: str,
response_model: type[ResponseModelT],
max_completion_tokens: int,
reasoning_effort: Literal["low", "medium", "high"] | None = None,
) -> StructuredResponse[ResponseModelT]:
start = perf_counter()
response_raw = await async_client.beta.chat.completions.with_raw_response.parse(
messages=messages,
model=openai_model,
response_format=response_model,
reasoning_effort=reasoning_effort or NotGiven(),
max_completion_tokens=max_completion_tokens,
)
headers = {key: value for key, value in response_raw.headers.items()}
response = response_raw.parse()
if not response.choices:
raise NoResponseChoicesError()
if not response.choices[0].message.parsed:
raise NoParsedMessageError()
if response.choices[0].finish_reason != "stop":
logger.warning(
"Unexpected finish reason, expected stop; reason: %s",
response.choices[0].finish_reason,
)
metadata = {
"request": {
"model": openai_model,
"messages": messages,
"max_completion_tokens": max_completion_tokens,
"response_format": response_model.model_json_schema(),
"reasoning_effort": reasoning_effort,
},
"response": response.model_dump(),
"response_headers": headers,
"response_duration": perf_counter() - start,
}
return StructuredResponse(response=response.choices[0].message.parsed, metadata=metadata)
```
--------------------------------------------------------------------------------
/workbench-app/src/components/FrontDoor/Chat/ChatCanvas.tsx:
--------------------------------------------------------------------------------
```typescript
// Copyright (c) Microsoft. All rights reserved.
import debug from 'debug';
import React from 'react';
import { Constants } from '../../../Constants';
import { useChatCanvasController } from '../../../libs/useChatCanvasController';
import { Assistant } from '../../../models/Assistant';
import { Conversation } from '../../../models/Conversation';
import { ConversationFile } from '../../../models/ConversationFile';
import { ConversationParticipant } from '../../../models/ConversationParticipant';
import { useAppSelector } from '../../../redux/app/hooks';
import { AssistantDrawer } from './AssistantDrawer';
import { ConversationDrawer } from './ConversationDrawer';
const log = debug(Constants.debug.root).extend('ChatCanvas');
interface ChatCanvasProps {
conversationAssistants?: Assistant[];
conversationParticipants: ConversationParticipant[];
conversationFiles: ConversationFile[];
conversation: Conversation;
preventAssistantModifyOnParticipantIds?: string[];
readOnly: boolean;
}
export const ChatCanvas: React.FC<ChatCanvasProps> = (props) => {
const {
conversationAssistants,
conversationParticipants,
conversationFiles,
conversation,
preventAssistantModifyOnParticipantIds,
readOnly,
} = props;
const { open, mode, selectedAssistantId } = useAppSelector((state) => state.chatCanvas);
const chatCanvasController = useChatCanvasController();
const [firstRun, setFirstRun] = React.useState(true);
const [selectedAssistant, setSelectedAssistant] = React.useState<Assistant>();
// Set the selected assistant based on the chat canvas state
React.useEffect(() => {
if (!selectedAssistantId || !open || mode !== 'assistant') {
// If the assistant id is not set, the canvas is closed, or the mode is not assistant, clear
// the selected assistant and exit early
setSelectedAssistant(undefined);
return;
}
// If no assistants are in the conversation, unset the selected assistant
if (!conversationAssistants || conversationAssistants.length === 0) {
if (selectedAssistant) setSelectedAssistant(undefined);
// If this is the first run, transition to the conversation mode to add an assistant
if (firstRun) {
log('No assistants in the conversation on first run, transitioning to conversation mode');
chatCanvasController.transitionToState({ open: true, mode: 'conversation' });
setFirstRun(false);
}
return;
}
// Find the assistant that corresponds to the selected assistant id
const assistant = conversationAssistants.find((assistant) => assistant.id === selectedAssistantId);
// If the selected assistant is not found in the conversation, select the first assistant in the conversation
if (!assistant) {
log('Selected assistant not found in conversation, selecting the first assistant in the conversation');
chatCanvasController.setState({ selectedAssistantId: conversationAssistants[0].id });
return;
}
// If the requested assistant is different from the selected assistant, set the selected assistant
if (selectedAssistant?.id !== assistant?.id) {
log(`Setting selected assistant to ${assistant.id}`);
setSelectedAssistant(assistant);
}
}, [conversationAssistants, chatCanvasController, selectedAssistant, firstRun, selectedAssistantId, open, mode]);
// Determine which drawer to open, default to none
const openDrawer = open ? mode : 'none';
return (
<>
<ConversationDrawer
drawerOptions={{
open: openDrawer === 'conversation',
}}
readOnly={readOnly}
conversation={conversation}
conversationParticipants={conversationParticipants}
conversationFiles={conversationFiles}
preventAssistantModifyOnParticipantIds={preventAssistantModifyOnParticipantIds}
/>
<AssistantDrawer
drawerOptions={{
open: openDrawer === 'assistant',
}}
conversation={conversation}
conversationAssistants={conversationAssistants}
selectedAssistant={selectedAssistant}
/>
</>
);
};
```
--------------------------------------------------------------------------------
/libraries/python/skills/skill-library/skill_library/skills/fabric/patterns/tweet/system.md:
--------------------------------------------------------------------------------
```markdown
Title: A Comprehensive Guide to Crafting Engaging Tweets with Emojis
Introduction
Tweets are short messages, limited to 280 characters, that can be shared on the social media platform Twitter. Tweeting is a great way to share your thoughts, engage with others, and build your online presence. If you're new to Twitter and want to start creating your own tweets with emojis, this guide will walk you through the process, from understanding the basics of Twitter to crafting engaging content with emojis.
Understanding Twitter and its purpose
Before you start tweeting, it's essential to understand the platform and its purpose. Twitter is a microblogging and social networking service where users can post and interact with messages known as "tweets." It's a platform that allows you to share your thoughts, opinions, and updates with a global audience.
Creating a Twitter account
To start tweeting, you'll need to create a Twitter account. Visit the Twitter website or download the mobile app and follow the on-screen instructions to sign up. You'll need to provide some basic information, such as your name, email address, and a password.
Familiarizing yourself with Twitter's features
Once you've created your account, take some time to explore Twitter's features. Some key features include:
Home timeline: This is where you'll see tweets from people you follow.
Notifications: This section will show you interactions with your tweets, such as likes, retweets, and new followers.
Mentions: Here, you'll find tweets that mention your username.
Direct messages (DMs): Use this feature to send private messages to other users.
Likes: You can "like" tweets by clicking the heart icon.
Retweets: If you want to share someone else's tweet with your followers, you can retweet it.
Hashtags: Hashtags (#) are used to categorize and search for tweets on specific topics.
Trending topics: This section shows popular topics and hashtags that are currently being discussed on Twitter.
Identifying your target audience and purpose
Before you start tweeting, think about who you want to reach and what you want to achieve with your tweets. Are you looking to share your personal thoughts, promote your business, or engage with a specific community? Identifying your target audience and purpose will help you create more focused and effective tweets.
Crafting engaging content with emojis
Now that you understand the basics of Twitter and have identified your target audience, it's time to start creating your own tweets with emojis. Here are some tips for crafting engaging content with emojis:
Keep it short and sweet: Since tweets are limited to 280 characters, make your message concise and to the point.
Use clear and simple language: Avoid jargon and complex sentences to ensure your message is easily understood by your audience.
Use humor and personality: Adding a touch of humor or showcasing your personality can make your tweets more engaging and relatable.
Include visuals: Tweets with images, videos, or GIFs tend to get more engagement.
Ask questions: Encourage interaction by asking questions or seeking your followers' opinions.
Use hashtags: Incorporate relevant hashtags to increase the visibility of your tweets and connect with users interested in the same topics.
Engage with others: Respond to tweets, retweet interesting content, and participate in conversations to build relationships and grow your audience.
Use emojis: Emojis can help convey emotions and add personality to your tweets. They can also help save space by replacing words with symbols. However, use them sparingly and appropriately, as too many emojis can make your tweets hard to read.
Monitoring and analyzing your tweets' performance
To improve your tweeting skills, it's essential to monitor and analyze the performance of your tweets. Twitter provides analytics that can help you understand how your tweets are performing and what resonates with your audience. Keep an eye on your engagement metrics, such as likes, retweets, and replies, and adjust your content strategy accordingly.
Conclusion
Creating engaging tweets with emojis takes practice and experimentation. By understanding the basics of Twitter, identifying your target audience, and crafting compelling content with emojis, you'll be well on your way to becoming a successful tweeter. Remember to stay authentic, engage with others, and adapt your strategy based on your audience's feedback and preferences.
make this into a tweet and have engaging Emojis!
```
--------------------------------------------------------------------------------
/assistants/prospector-assistant/assistant/artifact_creation_extension/store.py:
--------------------------------------------------------------------------------
```python
from contextlib import contextmanager
from pathlib import Path
from typing import Iterator
import yaml
from semantic_workbench_assistant.assistant_app.context import ConversationContext, storage_directory_for_context
from semantic_workbench_assistant.assistant_app.protocol import (
AssistantConversationInspectorStateDataModel,
ReadOnlyAssistantConversationInspectorStateProvider,
)
from .document import Document, DocumentHeader
class DocumentStore:
def __init__(self, store_path: Path):
store_path.mkdir(parents=True, exist_ok=True)
self.store_path = store_path
def _path_for(self, id: str) -> Path:
return self.store_path / f"{id}.json"
def write(self, document: Document) -> None:
path = self._path_for(document.metadata.document_id)
path.write_text(document.model_dump_json(indent=2))
def read(self, id: str) -> Document:
path = self._path_for(id)
try:
return Document.model_validate_json(path.read_text())
except FileNotFoundError:
raise ValueError(f"Document not found: {id}")
@contextmanager
def checkout(self, id: str) -> Iterator[Document]:
document = self.read(id=id)
yield document
self.write(document)
def delete(self, id: str) -> None:
path = self._path_for(id)
path.unlink(missing_ok=True)
def list_documents(self) -> list[DocumentHeader]:
documents = []
for path in self.store_path.glob("*.json"):
document = Document.model_validate_json(path.read_text())
documents.append(DocumentHeader(document_id=document.metadata.document_id, title=document.title))
return sorted(documents, key=lambda document: document.title.lower())
def for_context(context: ConversationContext) -> DocumentStore:
doc_store_root = storage_directory_for_context(context) / "document_store"
return DocumentStore(doc_store_root)
def project_to_yaml(state: dict | list[dict]) -> str:
"""
Project the state to a yaml code block.
"""
state_as_yaml = yaml.dump(state, sort_keys=False)
return f"```yaml\n{state_as_yaml}\n```"
class DocumentWorkspaceInspector(ReadOnlyAssistantConversationInspectorStateProvider):
@property
def display_name(self) -> str:
return "Document Workspace"
@property
def description(self) -> str:
return "Documents in the workspace."
async def get(self, context: ConversationContext) -> AssistantConversationInspectorStateDataModel:
store = for_context(context)
documents: list[dict] = []
for header in store.list_documents():
doc = store.read(header.document_id)
documents.append(doc.model_dump(mode="json"))
projected = project_to_yaml(documents)
return AssistantConversationInspectorStateDataModel(data={"content": projected})
class AllDocumentsInspector(ReadOnlyAssistantConversationInspectorStateProvider):
@property
def display_name(self) -> str:
return "Documents"
@property
def description(self) -> str:
return "All documents."
async def is_enabled(self, context: ConversationContext) -> bool:
return True
async def get(self, context: ConversationContext) -> AssistantConversationInspectorStateDataModel:
store = for_context(context)
headers = store.list_documents()
if not headers:
return AssistantConversationInspectorStateDataModel(data={"content": "No active document."})
toc: list[str] = []
content: list[str] = []
headers = store.list_documents()
for header in headers:
doc = store.read(header.document_id)
toc.append(f"- [{doc.title}](#{doc.title.lower().replace(' ', '-')})")
content.append(project_document_to_markdown(doc))
tocs = "\n".join(toc)
contents = "\n".join(content)
projection = f"```markdown\nDocuments:\n\n{tocs}\n\n{contents}\n```"
return AssistantConversationInspectorStateDataModel(data={"content": projection})
def project_document_to_markdown(doc: Document) -> str:
"""
Project the document to a markdown code block.
"""
markdown = f"# {doc.title}\n\n***{doc.metadata.purpose}***\n\n"
for section in doc.sections:
markdown += f"{'#' * section.heading_level} {section.section_number} {section.title}\n\n***{section.metadata.purpose}***\n\n{section.content}\n\n"
markdown += "-" * 3 + "\n\n"
return markdown
```
--------------------------------------------------------------------------------
/mcp-servers/mcp-server-filesystem-edit/mcp_server_filesystem_edit/evals/common.py:
--------------------------------------------------------------------------------
```python
# Copyright (c) Microsoft. All rights reserved.
import logging
from pathlib import Path
from typing import Literal
import yaml
from mcp_extensions.llm.llm_types import AssistantMessage, MessageT, UserMessage
from mcp_server_filesystem_edit.types import (
CustomContext,
TestCase,
)
WORD_TEST_CASES_PATH = Path(__file__).parents[2] / "data" / "test_cases.yaml"
WORD_TRANSCRIPT_PATH = Path(__file__).parents[2] / "data" / "transcripts"
ATTACHMENTS_DIR = Path(__file__).parents[2] / "data" / "attachments"
logger = logging.getLogger(__name__)
def load_test_cases(
test_case_type: Literal["writing", "comments", "presentation"] | None = None,
) -> list[CustomContext]:
"""
Load test cases and convert them to CustomContext objects.
Args:
test_case_type: Optional filter to only return test cases of a specific type.
If None, returns all test cases.
Returns:
A list of CustomContext objects representing the test cases.
"""
with Path.open(WORD_TEST_CASES_PATH, "r", encoding="utf-8") as f:
test_cases = yaml.safe_load(f)["test_cases"]
test_cases = [TestCase(**test_case) for test_case in test_cases]
if test_case_type is not None:
test_cases = [tc for tc in test_cases if tc.test_case_type == test_case_type]
custom_contexts = []
for test_case in test_cases:
transcript_path = WORD_TRANSCRIPT_PATH / test_case.transcription_file
with Path.open(transcript_path, "r", encoding="utf-8") as f:
transcript_content = f.read()
chat_history = _parse_transcript_to_chat_history(transcript_content)
chat_history.append(UserMessage(content=test_case.next_ask))
additional_context = ""
if test_case.attachments:
additional_context = _load_attachments(test_case.attachments)
document_content = ""
if test_case.open_file:
document_path = ATTACHMENTS_DIR / test_case.open_file
if document_path.exists():
with Path.open(document_path, "r", encoding="utf-8") as f:
document_content = f.read()
else:
logger.warning(f"Document file {test_case.open_file} not found at {document_path}")
custom_contexts.append(
CustomContext(
chat_history=chat_history,
document=document_content,
file_type=test_case.file_type,
additional_context=additional_context,
)
)
return custom_contexts
def _parse_transcript_to_chat_history(transcript_content: str) -> list[MessageT]:
"""
Parses the markdown transcript into a list of UserMessage and AssistantMessage objects.
"""
# Split the transcript by message sections (marked by heading + divider)
sections = transcript_content.split("----------------------------------")
chat_history = []
for section in sections:
section = section.strip()
if not section:
continue
if section.startswith("###"):
lines = section.split("\n")
header = lines[0]
# Skip if there is no content or if it's a notice
if len(lines) <= 1 or "notice:" in lines[2]:
continue
content = "\n".join(lines[1:]).strip()
if not content:
continue
# Determine if this is an assistant message based on header
if "Assistant" in header:
chat_history.append(AssistantMessage(content=content))
else:
chat_history.append(UserMessage(content=content))
return chat_history
def _load_attachments(attachment_filenames: list[str]) -> str:
"""
Load attachment files and format them into the required XML-like format.
Args:
attachment_filenames: List of attachment filenames to load
Returns:
A string containing all formatted attachments
"""
formatted_attachments = []
for filename in attachment_filenames:
file_path = ATTACHMENTS_DIR / filename
if not file_path.exists():
logger.warning(f"Attachment file {filename} not found at {file_path}")
continue
with Path.open(file_path, "r", encoding="utf-8") as f:
content = f.read()
formatted_attachment = f"<ATTACHMENT><FILENAME>{filename}</FILENAME><CONTENT>{content}</CONTENT></ATTACHMENT>"
formatted_attachments.append(formatted_attachment)
return "\n".join(formatted_attachments)
```
--------------------------------------------------------------------------------
/libraries/python/chat-context-toolkit/chat_context_toolkit/history/_types.py:
--------------------------------------------------------------------------------
```python
import logging
from dataclasses import dataclass, field
from enum import Enum
from typing import Callable, Protocol, Sequence
from uuid import uuid4
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionMessageParam,
ChatCompletionToolMessageParam,
ChatCompletionUserMessageParam,
)
logger = logging.getLogger("chat_context_toolkit.history")
class TokenCounter(Protocol):
def __call__(self, messages: list[ChatCompletionMessageParam]) -> int:
"""
Returns the token count for the given list of OpenAI messages.
"""
...
OpenAIHistoryMessageParam = (
ChatCompletionUserMessageParam | ChatCompletionAssistantMessageParam | ChatCompletionToolMessageParam
)
"""
OpenAI message parameters supported by the history manager.
"""
class NewTurn:
"""
Represents a new turn in the conversation. Re-use the turn between calls to `apply_budget_to_history_messages`
to ensure that all messages generated in the turn are treated as high priority while applying the budget.
This is especially important when executing multiple completions that result in tool calls, so that all messages
in the turn are not abbreviated.
"""
def __init__(self, high_priority_token_count: int = 30_000):
self.high_priority_token_count = high_priority_token_count
self.turn_start_message_id: str | None = None
self.id = uuid4().hex
class HistoryMessageProtocol(Protocol):
@property
def id(self) -> str:
"""The unique identifier for the message."""
...
@property
def openai_message(self) -> OpenAIHistoryMessageParam:
"""
The full OpenAI message representation.
"""
...
@property
def abbreviated_openai_message(self) -> OpenAIHistoryMessageParam | None:
"""
The abbreviated OpenAI message representation, which is used when the message needs to be shortened
to fit within a token budget.
"""
...
class HistoryMessageProvider(Protocol):
"""
Protocol for a message source that returns an ordered list of messages, oldest to most recent.
"""
async def __call__(self) -> Sequence[HistoryMessageProtocol]: ...
@dataclass
class TokenCounts:
"""
Data class to hold token counts for messages.
"""
openai_message_token_counts: Sequence[int] = field(default_factory=list)
abbreviated_openai_message_token_counts: Sequence[int] = field(default_factory=list)
class BudgetDecision(Enum):
"""
Enum representing the decision made by the budget manager regarding message processing.
"""
original = "original"
abbreviated = "abbreviated"
omitted = "omitted"
@dataclass
class MessageCollection:
"""
A collection of messages with their associated token counts and budget decisions.
"""
messages: Sequence[HistoryMessageProtocol]
token_counts: TokenCounts
budget_decisions: Sequence[BudgetDecision]
class HistoryMessage(HistoryMessageProtocol):
"""
A HistoryMessageProtocol implementation that will lazily abbreviate the OpenAI message
when the `abbreviated_openai_message` property is accessed for the first time.
"""
def __init__(
self,
id: str,
openai_message: OpenAIHistoryMessageParam,
abbreviator: Callable[[], OpenAIHistoryMessageParam | None] | None = None,
) -> None:
self._id = id
self._openai_message = openai_message
self._abbreviated = False
self._abbreviator = abbreviator
@property
def id(self) -> str:
return self._id
@property
def openai_message(self) -> OpenAIHistoryMessageParam:
return self._openai_message
@property
def abbreviated_openai_message(self) -> OpenAIHistoryMessageParam | None:
if self._abbreviated:
return self._abbreviated_openai_message
self._abbreviated_openai_message = self.openai_message
if self._abbreviator:
self._abbreviated_openai_message = self._abbreviator()
self._abbreviated = True
return self._abbreviated_openai_message
@dataclass
class MessageHistoryBudgetResult:
"""
Result of applying a budget to a message history.
"""
messages: Sequence[OpenAIHistoryMessageParam]
"""The messages that fit within the budget after applying the budget decisions."""
oldest_message_id: str | None
"""The ID of the oldest message that fit within the budget, or None if no messages fit within the budget."""
```
--------------------------------------------------------------------------------
/libraries/python/skills/skill-library/skill_library/skills/fabric/patterns/summarize_rpg_session/system.md:
--------------------------------------------------------------------------------
```markdown
# IDENTITY and PURPOSE
You are an expert summarizer of in-personal personal role-playing game sessions. You take the transcript of a conversation between friends and extract out the part of the conversation that is talking about the role playing game, and turn that into the summary sections below.
# NOTES
All INPUT provided came from a personal game with friends, and all rights are given to produce the summary.
# STEPS
Read the whole thing and understand the back and forth between characters, paying special attention to the significant events that happened, such as drama, combat, etc.
# OUTPUT
Create the following output sections:
SUMMARY:
A 50 word summary of what happened in a heroic storytelling style.
KEY EVENTS:
A numbered list of 5-15 of the most significant events of the session, capped at no more than 20 words a piece.
KEY COMBAT:
5-15 bullets describing the combat events that happened in the session.
COMBAT STATS:
List the following stats for the session:
Number of Combat Rounds:
Total Damage by All Players:
Total Damage by Each Enemy:
Damage Done by Each Character:
List of Player Attacks Executed:
List of Player Spells Cast:
COMBAT MVP:
List the most heroic character in terms of combat for the session, and give an explanation of how they got the MVP title, including dramatic things they did from the transcript.
ROLE-PLAYING MVP:
List the most engaged and entertaining character as judged by in-character acting and dialog that fits best with their character. Give examples.
KEY DISCUSSIONS:
5-15 bullets of the key discussions the players had in-game, in 15-25 words per bullet.
REVEALED CHARACTER FLAWS:
List 10-20 character flaws of the main characters revealed during this session, each of 30 words or less.
KEY CHARACTER CHANGES:
Give 10-20 bullets of key changes that happened to each character, how it shows they're evolving and adapting to events in the world.
QUOTES:
Meaningful Quotes:
Give 10-15 of the quotes that were most meaningful for the action and the story.
HUMOR:
Give 10-15 things said by characters that were the funniest or most amusing or entertaining.
4TH WALL:
Give 10-15 of the most entertaining comments about the game from the transcript made by the players, but not their characters.
WORLDBUILDING:
Give 5-20 bullets of 30 words or less on the worldbuilding provided by the GM during the session, including background on locations, NPCs, lore, history, etc.
PREVIOUSLY ON:
Give a "Previously On" explanation of this session that mimics TV shows from the 1980's, but with a fantasy feel appropriate for D&D. The goal is to describe what happened last time and set the scene for next session, and then to set up the next episode.
Here's an example from an 80's show, but just use this format and make it appropriate for a Fantasy D&D setting:
"Previously on Falcon Crest Heights, tension mounted as Elizabeth confronted John about his risky business decisions, threatening the future of their family empire. Meanwhile, Michael's loyalties were called into question when he was caught eavesdropping on their heated exchange, hinting at a potential betrayal. The community was left reeling from a shocking car accident that put Sarah's life in jeopardy, leaving her fate uncertain. Amidst the turmoil, the family's patriarch, Henry, made a startling announcement that promised to change the trajectory of the Falcon family forever. Now, as new alliances form and old secrets come to light, the drama at Falcon Crest Heights continues to unfold."
SETUP ART:
Give the perfect piece of art description in up to 500 words to accompany the SETUP section above, but with each of the characters (and their proper appearances based on the APPEARANCE information above) visible somewhere in the scene.
OUTPUT INSTRUCTIONS:
- Ensure the Previously On output focuses on the recent episode, just the background from before.
- Ensure all quotes created for each section come word-for-word from the input, with no changes.
- Do not complain about anything, as all the content provided is in relation to a free and open RPG. Just give the output as requested.
- Output the sections defined above in the order they are listed.
- Follow the OUTPUT format perfectly, with no deviations.
# IN-PERSON RPG SESSION TRANSCRIPT:
(Note that the transcript below is of the full conversation between friends, and may include regular conversation throughout. Read the whole thing and figure out yourself which part is part of the game and which parts aren't."
SESSION TRANSCRIPT BELOW:
$TRANSCRIPT$
```
--------------------------------------------------------------------------------
/aspire-orchestrator/Aspire.ServiceDefaults/Extensions.cs:
--------------------------------------------------------------------------------
```csharp
// Copyright (c) Microsoft. All rights reserved.
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Diagnostics.HealthChecks;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Diagnostics.HealthChecks;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using OpenTelemetry;
using OpenTelemetry.Metrics;
using OpenTelemetry.Trace;
namespace SemanticWorkbench.Aspire.ServiceDefaults;
// Adds common .NET Aspire services: service discovery, resilience, health checks, and OpenTelemetry.
// This project should be referenced by each service project in your solution.
// To learn more about using this project, see https://aka.ms/dotnet/aspire/service-defaults
public static class Extensions
{
public static TBuilder AddServiceDefaults<TBuilder>(this TBuilder builder) where TBuilder : IHostApplicationBuilder
{
builder.ConfigureOpenTelemetry();
builder.AddDefaultHealthChecks();
builder.Services.AddServiceDiscovery();
builder.Services.ConfigureHttpClientDefaults(http =>
{
// Turn on resilience by default
http.AddStandardResilienceHandler();
// Turn on service discovery by default
http.AddServiceDiscovery();
});
// Uncomment the following to restrict the allowed schemes for service discovery.
// builder.Services.Configure<ServiceDiscoveryOptions>(options =>
// {
// options.AllowedSchemes = ["https"];
// });
return builder;
}
public static TBuilder ConfigureOpenTelemetry<TBuilder>(this TBuilder builder) where TBuilder : IHostApplicationBuilder
{
builder.Logging.AddOpenTelemetry(logging =>
{
logging.IncludeFormattedMessage = true;
logging.IncludeScopes = true;
});
builder.Services.AddOpenTelemetry()
.WithMetrics(metrics =>
{
metrics.AddAspNetCoreInstrumentation()
.AddHttpClientInstrumentation()
.AddRuntimeInstrumentation();
})
.WithTracing(tracing =>
{
tracing.AddSource(builder.Environment.ApplicationName)
.AddAspNetCoreInstrumentation()
// Uncomment the following line to enable gRPC instrumentation (requires the OpenTelemetry.Instrumentation.GrpcNetClient package)
//.AddGrpcClientInstrumentation()
.AddHttpClientInstrumentation();
});
builder.AddOpenTelemetryExporters();
return builder;
}
private static TBuilder AddOpenTelemetryExporters<TBuilder>(this TBuilder builder) where TBuilder : IHostApplicationBuilder
{
var useOtlpExporter = !string.IsNullOrWhiteSpace(builder.Configuration["OTEL_EXPORTER_OTLP_ENDPOINT"]);
if (useOtlpExporter)
{
builder.Services.AddOpenTelemetry().UseOtlpExporter();
}
// Uncomment the following lines to enable the Azure Monitor exporter (requires the Azure.Monitor.OpenTelemetry.AspNetCore package)
//if (!string.IsNullOrEmpty(builder.Configuration["APPLICATIONINSIGHTS_CONNECTION_STRING"]))
//{
// builder.Services.AddOpenTelemetry()
// .UseAzureMonitor();
//}
return builder;
}
public static TBuilder AddDefaultHealthChecks<TBuilder>(this TBuilder builder) where TBuilder : IHostApplicationBuilder
{
builder.Services.AddHealthChecks()
// Add a default liveness check to ensure app is responsive
.AddCheck("self", () => HealthCheckResult.Healthy(), ["live"]);
return builder;
}
public static WebApplication MapDefaultEndpoints(this WebApplication app)
{
// Adding health checks endpoints to applications in non-development environments has security implications.
// See https://aka.ms/dotnet/aspire/healthchecks for details before enabling these endpoints in non-development environments.
if (app.Environment.IsDevelopment())
{
// All health checks must pass for app to be considered ready to accept traffic after starting
app.MapHealthChecks("/health");
// Only health checks tagged with the "live" tag must pass for app to be considered alive
app.MapHealthChecks("/alive", new HealthCheckOptions
{
Predicate = r => r.Tags.Contains("live")
});
}
return app;
}
}
```
--------------------------------------------------------------------------------
/assistants/project-assistant/assistant/agentic/detect_information_request_needs.py:
--------------------------------------------------------------------------------
```python
"""
Analysis and detection functions for the knowledge transfer assistant.
This module contains functions for analyzing messages and knowledge transfer content
to detect specific conditions, such as information request needs.
"""
import json
from typing import Any
import openai_client
from openai.types.chat import ChatCompletionMessageParam
from semantic_workbench_assistant.assistant_app import ConversationContext
from assistant.config import assistant_config
from assistant.logging import convert_to_serializable, logger
async def detect_information_request_needs(context: ConversationContext, message: str) -> dict[str, Any]:
"""
Analyze a user message in context of recent chat history to detect potential information request needs.
Uses an LLM for sophisticated detection.
Args:
context: The conversation context
message: The user message to analyze
Returns:
Dict with detection results including is_information_request, confidence, and other metadata
"""
debug: dict[str, Any] = {
"message": message,
"context": convert_to_serializable(context.to_dict()),
}
config = await assistant_config.get(context.assistant)
# Get chat history
chat_history = []
try:
messages_response = await context.get_messages(limit=10)
if messages_response and messages_response.messages:
for msg in messages_response.messages:
sender_name = "Team Member"
if msg.sender.participant_id == context.assistant.id:
sender_name = "Assistant"
role = "user" if sender_name == "Team Member" else "assistant"
chat_history.append({"role": role, "content": f"{sender_name}: {msg.content}"})
chat_history.reverse()
except Exception as e:
logger.warning(f"Could not retrieve chat history: {e}")
try:
async with openai_client.create_client(config.service_config) as client:
messages: list[ChatCompletionMessageParam] = [
{
"role": "system",
"content": config.prompt_config.detect_information_request_needs,
}
]
if chat_history:
for history_msg in chat_history:
messages.append({"role": history_msg["role"], "content": history_msg["content"]})
# Add the current message for analysis - explicitly mark as the latest message
messages.append({
"role": "user",
"content": f"Latest message from Team Member: {message}",
})
completion_args = {
"model": "gpt-3.5-turbo",
"messages": messages,
"response_format": {"type": "json_object"},
"max_tokens": 500,
"temperature": 0.2, # Low temperature for more consistent analysis
}
debug["completion_args"] = openai_client.serializable(completion_args)
response = await client.chat.completions.create(
**completion_args,
)
debug["completion_response"] = response.model_dump()
# Extract and parse the response
if response and response.choices and response.choices[0].message.content:
try:
result = json.loads(response.choices[0].message.content)
# Add the original message for reference
result["original_message"] = message
return result
except json.JSONDecodeError:
logger.warning(f"Failed to parse JSON from LLM response: {response.choices[0].message.content}")
return {
"is_information_request": False,
"reason": "Failed to parse LLM response",
"confidence": 0.0,
"debug": debug,
}
else:
logger.warning("Empty response from LLM for information request detection")
return {
"is_information_request": False,
"reason": "Empty response from LLM",
"confidence": 0.0,
"debug": debug,
}
except Exception as e:
logger.exception(f"Error in LLM-based information request detection: {e}")
debug["error"] = str(e)
return {
"is_information_request": False,
"reason": f"LLM detection error: {e!s}",
"confidence": 0.0,
"debug": debug,
}
```
--------------------------------------------------------------------------------
/libraries/python/content-safety/content_safety/evaluators/openai_moderations/evaluator.py:
--------------------------------------------------------------------------------
```python
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import logging
from typing import Any
import openai
from semantic_workbench_assistant.assistant_app import (
ContentSafetyEvaluation,
ContentSafetyEvaluationResult,
ContentSafetyEvaluator,
)
from .config import OpenAIContentSafetyEvaluatorConfig
logger = logging.getLogger(__name__)
#
# region Evaluator implementation
#
class OpenAIContentSafetyEvaluator(ContentSafetyEvaluator):
"""
An evaluator that uses the OpenAI moderations endpoint to evaluate content safety.
"""
def __init__(self, config: OpenAIContentSafetyEvaluatorConfig) -> None:
self.config = config
async def evaluate(self, content: str | list[str]) -> ContentSafetyEvaluation:
"""
Evaluate the content for safety using the OpenAI moderations endpoint.
"""
# if the content is a string, convert it to a list
content_list = content if isinstance(content, list) else [content]
# create a list of items to send to the OpenAI moderations endpoint
# each item must be less than the maximum size
items = []
for content_item in content_list:
# if the content item is too large, split it into smaller items
if len(content_item) > self.config.max_item_size:
for i in range(0, len(content_item), self.config.max_item_size):
items.append(content_item[i : i + self.config.max_item_size]) # noqa: E203
else:
items.append(content_item)
# now break it down into batches of the maximum size
batches = [
items[i : i + self.config.max_item_count] # noqa: E203
for i in range(0, len(items), self.config.max_item_count)
]
# initialize the result as pass
result = ContentSafetyEvaluationResult.Pass
note: str | None = None
metadata: dict[str, Any] = {
"content_length": sum(len(item) for item in items),
"max_item_size": self.config.max_item_size,
"max_item_count": self.config.max_item_count,
"batches": [],
}
# evaluate each batch of content
results = await asyncio.gather(
*[self._evaluate_batch(batch) for batch in batches],
)
# combine the results of evaluating each batch
for batch_result in results:
# add the batch evaluation to the metadata
metadata["batches"].append(batch_result.metadata)
# if any batch result is a fail, the overall result is a fail
if batch_result.result == ContentSafetyEvaluationResult.Fail:
result = ContentSafetyEvaluationResult.Fail
note = batch_result.note
break
# if any batch result is a warn, the overall result is a warn
if batch_result.result == ContentSafetyEvaluationResult.Warn:
result = ContentSafetyEvaluationResult.Warn
note = batch_result.note
# return the evaluation result
return ContentSafetyEvaluation(
result=result,
note=note,
metadata=metadata,
)
async def _evaluate_batch(self, input: list[str]) -> ContentSafetyEvaluation:
"""
Evaluate a batch of content for safety using the OpenAI moderations endpoint.
"""
# send the content to the OpenAI moderations endpoint for evaluation
try:
moderation_response = await openai.AsyncOpenAI(
api_key=self.config.openai_api_key,
).moderations.create(
input=input,
)
except Exception as e:
# if there is an error, return a fail result with the error message
return ContentSafetyEvaluation(
result=ContentSafetyEvaluationResult.Fail,
note=f"OpenAI moderations endpoint error: {e}",
)
# if any of the results are flagged, the overall result is a fail
result = (
ContentSafetyEvaluationResult.Fail
if any(result.flagged for result in moderation_response.results)
else ContentSafetyEvaluationResult.Pass
)
# add the moderation response to the metadata
metadata = {**moderation_response.model_dump(), "content_length": sum(len(chunk) for chunk in input)}
# return the evaluation result
return ContentSafetyEvaluation(
result=result,
metadata=metadata,
)
```
--------------------------------------------------------------------------------
/libraries/python/skills/skill-library/skill_library/routine_stack.py:
--------------------------------------------------------------------------------
```python
# skill_library/routine_stack.py
from contextlib import asynccontextmanager
from typing import Any, AsyncGenerator, List
from uuid import uuid4
from assistant_drive import Drive, IfDriveFileExistsBehavior
from pydantic import BaseModel, Field
class RoutineFrame(BaseModel):
name: str
id: str = Field(default_factory=lambda: str(uuid4()))
state: dict[str, Any] = Field(default_factory=dict)
class RoutineStackData(BaseModel):
frames: List[RoutineFrame]
model_config = {
"arbitrary_types_allowed": True,
}
STACK_FILENAME = "routine_stack.json"
class RoutineStack:
"""
Manages state for nested routines.
The RoutineStack provides routines with isolated state storage as they run,
allowing nested routines to maintain their own state without interfering
with each other. Each routine execution gets a frame on the stack that
contains:
- The routine's name for tracking
- A state dictionary that the routine can use to store data between steps
The stack behaves like a traditional call stack:
- push() creates a new frame when a routine starts
- pop() removes the frame when the routine completes
- peek() allows checking the current frame
- get/set_current_state provide access to the current routine's state
State is persisted between messages/steps in the conversation, allowing
routines to maintain context even when paused waiting for user input. When
routines call other routines, each gets its own isolated state frame.
"""
def __init__(self, drive: Drive):
self.drive = drive
async def get(self) -> List[RoutineFrame]:
try:
stack = self.drive.read_model(RoutineStackData, STACK_FILENAME)
except FileNotFoundError:
stack = RoutineStackData(frames=[])
await self.set(stack.frames)
return stack.frames
async def push(self, name: str) -> str:
stack = await self.get()
frame = RoutineFrame(name=name)
stack.append(frame)
await self.set(stack)
return frame.id
async def pop(self) -> RoutineFrame | None:
stack = await self.get()
if not stack:
return None
frame = stack.pop()
await self.set(stack)
return frame
async def peek(self) -> RoutineFrame | None:
stack = await self.get()
if not stack:
return None
return stack[-1]
async def update(self, frame: RoutineFrame) -> None:
"""Updates the top frame in the stack."""
stack = await self.get()
stack[-1] = frame
await self.set(stack)
async def set(self, frames: List[RoutineFrame]) -> None:
"""Replaces the stack with the given list of frames."""
self.drive.write_model(
RoutineStackData(frames=frames),
STACK_FILENAME,
if_exists=IfDriveFileExistsBehavior.OVERWRITE,
)
async def get_current_state(self) -> dict[str, Any]:
"""Returns the state of the current routine."""
frame = await self.peek()
if frame:
return frame.state
return {}
async def set_current_state(self, state: dict[str, Any]) -> None:
"""Updates the state of the current routine."""
frame = await self.peek()
if frame:
frame.state = state
await self.update(frame)
async def get_current_state_key(self, key: str) -> Any:
state = await self.get_current_state()
return state.get(key)
async def set_current_state_key(self, key: str, value: Any) -> None:
state = await self.get_current_state()
state[key] = value
await self.set_current_state(state)
async def clear(self) -> None:
await self.set([])
async def length(self) -> int:
return len(await self.get())
async def string(self) -> str:
return str([f"{frame.name}.{frame.id}" for frame in await self.get()])
@asynccontextmanager
async def stack_frame_state(self) -> AsyncGenerator[dict[str, Any], None]:
"""
A context manager that allows you to get and set the state of the
current routine being run. This is useful for storing and retrieving
information that is needed across multiple steps of a routine.
Example:
```
async with context.stack_frame_state() as state:
state["key"] = "value"
```
"""
state = await self.get_current_state()
yield state
await self.set_current_state(state)
```