#
tokens: 49255/50000 85/194 files (page 1/10)
lines: off (toggle) GitHub
raw markdown copy
This is page 1 of 10. Use http://codebase.md/sooperset/mcp-atlassian?page={x} to view the full context.

# Directory Structure

```
├── .devcontainer
│   ├── devcontainer.json
│   ├── Dockerfile
│   ├── post-create.sh
│   └── post-start.sh
├── .dockerignore
├── .env.example
├── .github
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   └── feature_request.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── docker-publish.yml
│       ├── lint.yml
│       ├── publish.yml
│       ├── stale.yml
│       └── tests.yml
├── .gitignore
├── .pre-commit-config.yaml
├── AGENTS.md
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── pyproject.toml
├── README.md
├── scripts
│   ├── oauth_authorize.py
│   └── test_with_real_data.sh
├── SECURITY.md
├── smithery.yaml
├── src
│   └── mcp_atlassian
│       ├── __init__.py
│       ├── confluence
│       │   ├── __init__.py
│       │   ├── client.py
│       │   ├── comments.py
│       │   ├── config.py
│       │   ├── constants.py
│       │   ├── labels.py
│       │   ├── pages.py
│       │   ├── search.py
│       │   ├── spaces.py
│       │   ├── users.py
│       │   ├── utils.py
│       │   └── v2_adapter.py
│       ├── exceptions.py
│       ├── jira
│       │   ├── __init__.py
│       │   ├── attachments.py
│       │   ├── boards.py
│       │   ├── client.py
│       │   ├── comments.py
│       │   ├── config.py
│       │   ├── constants.py
│       │   ├── epics.py
│       │   ├── fields.py
│       │   ├── formatting.py
│       │   ├── issues.py
│       │   ├── links.py
│       │   ├── projects.py
│       │   ├── protocols.py
│       │   ├── search.py
│       │   ├── sprints.py
│       │   ├── transitions.py
│       │   ├── users.py
│       │   └── worklog.py
│       ├── models
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── confluence
│       │   │   ├── __init__.py
│       │   │   ├── comment.py
│       │   │   ├── common.py
│       │   │   ├── label.py
│       │   │   ├── page.py
│       │   │   ├── search.py
│       │   │   ├── space.py
│       │   │   └── user_search.py
│       │   ├── constants.py
│       │   └── jira
│       │       ├── __init__.py
│       │       ├── agile.py
│       │       ├── comment.py
│       │       ├── common.py
│       │       ├── issue.py
│       │       ├── link.py
│       │       ├── project.py
│       │       ├── search.py
│       │       ├── version.py
│       │       ├── workflow.py
│       │       └── worklog.py
│       ├── preprocessing
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── confluence.py
│       │   └── jira.py
│       ├── servers
│       │   ├── __init__.py
│       │   ├── confluence.py
│       │   ├── context.py
│       │   ├── dependencies.py
│       │   ├── jira.py
│       │   └── main.py
│       └── utils
│           ├── __init__.py
│           ├── date.py
│           ├── decorators.py
│           ├── env.py
│           ├── environment.py
│           ├── io.py
│           ├── lifecycle.py
│           ├── logging.py
│           ├── oauth_setup.py
│           ├── oauth.py
│           ├── ssl.py
│           ├── tools.py
│           └── urls.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── fixtures
│   │   ├── __init__.py
│   │   ├── confluence_mocks.py
│   │   └── jira_mocks.py
│   ├── integration
│   │   ├── conftest.py
│   │   ├── README.md
│   │   ├── test_authentication.py
│   │   ├── test_content_processing.py
│   │   ├── test_cross_service.py
│   │   ├── test_mcp_protocol.py
│   │   ├── test_proxy.py
│   │   ├── test_real_api.py
│   │   ├── test_ssl_verification.py
│   │   ├── test_stdin_monitoring_fix.py
│   │   └── test_transport_lifecycle.py
│   ├── README.md
│   ├── test_preprocessing.py
│   ├── test_real_api_validation.py
│   ├── unit
│   │   ├── confluence
│   │   │   ├── __init__.py
│   │   │   ├── conftest.py
│   │   │   ├── test_client_oauth.py
│   │   │   ├── test_client.py
│   │   │   ├── test_comments.py
│   │   │   ├── test_config.py
│   │   │   ├── test_constants.py
│   │   │   ├── test_custom_headers.py
│   │   │   ├── test_labels.py
│   │   │   ├── test_pages.py
│   │   │   ├── test_search.py
│   │   │   ├── test_spaces.py
│   │   │   ├── test_users.py
│   │   │   ├── test_utils.py
│   │   │   └── test_v2_adapter.py
│   │   ├── jira
│   │   │   ├── conftest.py
│   │   │   ├── test_attachments.py
│   │   │   ├── test_boards.py
│   │   │   ├── test_client_oauth.py
│   │   │   ├── test_client.py
│   │   │   ├── test_comments.py
│   │   │   ├── test_config.py
│   │   │   ├── test_constants.py
│   │   │   ├── test_custom_headers.py
│   │   │   ├── test_epics.py
│   │   │   ├── test_fields.py
│   │   │   ├── test_formatting.py
│   │   │   ├── test_issues_markdown.py
│   │   │   ├── test_issues.py
│   │   │   ├── test_links.py
│   │   │   ├── test_projects.py
│   │   │   ├── test_protocols.py
│   │   │   ├── test_search.py
│   │   │   ├── test_sprints.py
│   │   │   ├── test_transitions.py
│   │   │   ├── test_users.py
│   │   │   └── test_worklog.py
│   │   ├── models
│   │   │   ├── __init__.py
│   │   │   ├── conftest.py
│   │   │   ├── test_base_models.py
│   │   │   ├── test_confluence_models.py
│   │   │   ├── test_constants.py
│   │   │   └── test_jira_models.py
│   │   ├── servers
│   │   │   ├── __init__.py
│   │   │   ├── test_confluence_server.py
│   │   │   ├── test_context.py
│   │   │   ├── test_dependencies.py
│   │   │   ├── test_jira_server.py
│   │   │   └── test_main_server.py
│   │   ├── test_exceptions.py
│   │   ├── test_main_transport_selection.py
│   │   └── utils
│   │       ├── __init__.py
│   │       ├── test_custom_headers.py
│   │       ├── test_date.py
│   │       ├── test_decorators.py
│   │       ├── test_env.py
│   │       ├── test_environment.py
│   │       ├── test_io.py
│   │       ├── test_lifecycle.py
│   │       ├── test_logging.py
│   │       ├── test_masking.py
│   │       ├── test_oauth_setup.py
│   │       ├── test_oauth.py
│   │       ├── test_ssl.py
│   │       ├── test_tools.py
│   │       └── test_urls.py
│   └── utils
│       ├── __init__.py
│       ├── assertions.py
│       ├── base.py
│       ├── factories.py
│       └── mocks.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------

```
# Version control
.git
.gitignore
.github

# Virtual environments
.venv*
venv*
env*

# Python bytecode/cache
__pycache__/
*.py[cod]
*$py.class
.pytest_cache
.coverage
.mypy_cache
.ruff_cache

# Build artifacts
dist/
build/
*.egg-info/

# IDE and editor files
.vscode/
.idea/
*.swp
*.swo
.DS_Store

# Environment variables and secrets
.env*
*.env
*.key

# Logs
*.log

# Docker related
Dockerfile*
docker-compose*
.dockerignore

# Documentation
docs/
*.md
!README.md

# Temporal and backup files
*.tmp
*.bak
*.backup

```

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# Environments
.env
.env.*
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# uv/uvx specific
.uv/
.uvx/
.venv.uv/

# IDEs and editors
.idea/
.vscode/
*.swp
*.swo
*~

# OS specific
.DS_Store
Thumbs.db

# Debug and local development
debug/
local/

# Credentials
.pypirc

# Pytest
.pytest_cache/
.coverage/
.coverage

# Pre-commit
.pre-commit-config.yaml.cache

# debug
playground/

# cursor
.cursor*

# Claude
.claude/

```

--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------

```yaml
repos:
-   repo: https://github.com/pre-commit/pre-commit-hooks
    rev: v4.5.0
    hooks:
    -   id: trailing-whitespace
    -   id: end-of-file-fixer
    -   id: check-yaml
    -   id: check-added-large-files
    -   id: check-toml
    -   id: debug-statements

-   repo: https://github.com/astral-sh/ruff-pre-commit
    rev: v0.9.7
    hooks:
    -   id: ruff-format
    -   id: ruff
        args: [
            "--fix",
            "--exit-non-zero-on-fix",
            "--ignore=BLE001,EM102,FBT001,FBT002,E501,F841,S112,S113,B904"
        ]

-   repo: https://github.com/pre-commit/mirrors-mypy
    rev: v1.8.0
    hooks:
    -   id: mypy
        # TODO: Fix these type errors in future PRs:
        # - Union-attr errors in server.py (None checks needed)
        # - Index error in jira_mocks.py (tests/fixtures/jira_mocks.py:421)
        # - Assignment type errors in jira.py (str to int assignments)
        # - Unreachable statements in preprocessing.py and jira.py
        args: [
            "--ignore-missing-imports",
            "--no-strict-optional",
            "--disable-error-code=index",
            "--disable-error-code=unreachable",
            "--disable-error-code=assignment",
            "--disable-error-code=arg-type",
            "--disable-error-code=return-value",
            "--disable-error-code=has-type",
            "--disable-error-code=no-any-return",
            "--disable-error-code=misc",
            "--disable-error-code=var-annotated",
            "--disable-error-code=no-untyped-def",
            "--disable-error-code=annotation-unchecked",
        ]
        additional_dependencies: [
            'types-beautifulsoup4',
            'types-requests',
            'types-setuptools',
            'types-urllib3',
            'types-cachetools',
            'atlassian-python-api',
            'beautifulsoup4',
            'httpx',
            'python-dotenv',
            'markdownify',
            'python-dateutil',
            'types-python-dateutil',
        ]

```

--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------

```
# MCP-ATLASSIAN CONFIGURATION EXAMPLE
# -------------------------------------
# Copy this file to .env and fill in your details.
# - Do not use double quotes for string values in this file, unless the value itself contains spaces.

# =============================================
# ESSENTIAL ATLASSIAN INSTANCE URLS
# =============================================
# REQUIRED: Replace with your Atlassian instance URLs.
# Example Cloud: https://your-company.atlassian.net
# Example Server/DC: https://jira.your-internal-domain.com
JIRA_URL=https://your-company.atlassian.net
# Example Cloud: https://your-company.atlassian.net/wiki
# Example Server/DC: https://confluence.your-internal-domain.com
CONFLUENCE_URL=https://your-company.atlassian.net/wiki

# =============================================
# AUTHENTICATION: CHOOSE ONE METHOD PER PRODUCT (Jira/Confluence)
# =============================================
# mcp-atlassian will attempt to auto-detect the auth method based on the credentials you provide below.
# Precedence for auto-detection:
#   1. Username/API Token (Basic Auth) - Recommended for Cloud
#   2. Personal Access Token (if URL is Server/DC and PAT is set)
#   3. OAuth BYOT (if ATLASSIAN_OAUTH_CLOUD_ID and ATLASSIAN_OAUTH_ACCESS_TOKEN are set)
#   4. OAuth Standard (if OAuth Client ID/Secret are set)

# --- METHOD 1: API TOKEN (Recommended for Atlassian Cloud) ---
# Get API tokens from: https://id.atlassian.com/manage-profile/security/api-tokens
# This is the simplest and most reliable authentication method for Cloud deployments.
#[email protected]
#JIRA_API_TOKEN=your_jira_api_token

#[email protected]
#CONFLUENCE_API_TOKEN=your_confluence_api_token

# --- METHOD 2: PERSONAL ACCESS TOKEN (PAT) (Server / Data Center - Recommended) ---
# Create PATs in your Jira/Confluence profile settings (usually under "Personal Access Tokens").
#JIRA_PERSONAL_TOKEN=your_jira_personal_access_token

#CONFLUENCE_PERSONAL_TOKEN=your_confluence_personal_access_token

# --- METHOD 3: USERNAME/PASSWORD (Server / Data Center - Uses Basic Authentication) ---
#JIRA_USERNAME=your_server_dc_username
#JIRA_API_TOKEN=your_jira_server_dc_password # For Server/DC Basic Auth, API_TOKEN holds the actual password

#CONFLUENCE_USERNAME=your_server_dc_username
#CONFLUENCE_API_TOKEN=your_confluence_server_dc_password

# --- METHOD 4: OAUTH 2.0 (Advanced - Atlassian Cloud Only) ---
# OAuth 2.0 provides enhanced security but is more complex to set up.
# For most users, Method 1 (API Token) is simpler and sufficient.
# 1. Create an OAuth 2.0 (3LO) app in Atlassian Developer Console:
#    https://developer.atlassian.com/console/myapps/
# 2. Set the Callback/Redirect URI in your app (e.g., http://localhost:8080/callback).
# 3. Grant necessary scopes (see ATLASSIAN_OAUTH_SCOPE below).
# 4. Run 'mcp-atlassian --oauth-setup -v' (or 'uvx mcp-atlassian@latest --oauth-setup -v').
#    This wizard will guide you through authorization and provide your ATLASSIAN_OAUTH_CLOUD_ID.
#    Tokens are stored securely (keyring or a local file in ~/.mcp-atlassian/).

# Required for --oauth-setup and for the server to use OAuth:
#ATLASSIAN_OAUTH_CLIENT_ID=your_oauth_client_id
#ATLASSIAN_OAUTH_CLIENT_SECRET=your_oauth_client_secret
#ATLASSIAN_OAUTH_REDIRECT_URI=http://localhost:8080/callback # Must match your app's redirect URI
#ATLASSIAN_OAUTH_SCOPE=read:jira-work write:jira-work read:confluence-space.summary read:confluence-content.all write:confluence-content offline_access # IMPORTANT: 'offline_access' is crucial for refresh tokens

# Required for the server AFTER running --oauth-setup (this ID is printed by the setup wizard):
#ATLASSIAN_OAUTH_CLOUD_ID=your_atlassian_cloud_id_from_oauth_setup

# --- METHOD 5: BRING YOUR OWN TOKEN (BYOT) OAUTH (Advanced - Atlassian Cloud Only) ---
# Use this method when you have an externally managed OAuth access token.
# This is useful for integration with larger systems (e.g., MCP Gateway) that manage OAuth tokens.
# No token refresh will be performed - the external system is responsible for token management.
#ATLASSIAN_OAUTH_CLOUD_ID=your_atlassian_cloud_id
#ATLASSIAN_OAUTH_ACCESS_TOKEN=your_pre_existing_oauth_access_token

# =============================================
# SERVER/DATA CENTER SPECIFIC SETTINGS
# =============================================
# Only applicable if your JIRA_URL/CONFLUENCE_URL points to a Server/DC instance (not *.atlassian.net).
# Default is true. Set to false if using self-signed certificates (not recommended for production environments).
#JIRA_SSL_VERIFY=true
#CONFLUENCE_SSL_VERIFY=true


# =============================================
# OPTIONAL CONFIGURATION
# =============================================

# --- General Server Settings ---
# Transport mode for the MCP server. Default is 'stdio'.
# Options: stdio, sse
#TRANSPORT=stdio
# Port for 'sse' transport. Default is 8000.
#PORT=8000
# Host for 'sse' transport. Default is '0.0.0.0'.
#HOST=0.0.0.0

# --- Read-Only Mode ---
# Disables all write operations (create, update, delete). Default is false.
#READ_ONLY_MODE=false

# --- Logging Verbosity ---
# MCP_VERBOSE=true        # Enables INFO level logging (equivalent to 'mcp-atlassian -v')
# MCP_VERY_VERBOSE=true   # Enables DEBUG level logging (equivalent to 'mcp-atlassian -vv')
# MCP_LOGGING_STDOUT=true # Enables logging to stdout (logging.StreamHandler defaults to stderr)
# Default logging level is WARNING (minimal output).

# --- Tool Filtering ---
# Comma-separated list of tool names to enable. If not set, all tools are enabled
# (subject to read-only mode and configured services).
# Example: ENABLED_TOOLS=confluence_search,jira_get_issue
#ENABLED_TOOLS=

# --- Content Filtering ---
# Optional: Comma-separated list of Confluence space keys to limit searches and other operations to.
#CONFLUENCE_SPACES_FILTER=DEV,TEAM,DOC
# Optional: Comma-separated list of Jira project keys to limit searches and other operations to.
#JIRA_PROJECTS_FILTER=PROJ,DEVOPS

# --- Proxy Configuration (Advanced) ---
# Global proxy settings (applies to both Jira and Confluence unless overridden by service-specific proxy settings below).
#HTTP_PROXY=http://proxy.example.com:8080
#HTTPS_PROXY=https://user:[email protected]:8443 # Credentials can be included
#SOCKS_PROXY=socks5://proxy.example.com:1080 # Requires 'requests[socks]' to be installed
#NO_PROXY=localhost,127.0.0.1,.internal.example.com # Comma-separated list of hosts/domains to bypass proxy

# Jira-specific proxy settings (these override global proxy settings for Jira requests).
#JIRA_HTTP_PROXY=http://jira-proxy.example.com:8080
#JIRA_HTTPS_PROXY=https://jira-proxy.example.com:8443
#JIRA_SOCKS_PROXY=socks5://jira-proxy.example.com:1080
#JIRA_NO_PROXY=localhost,127.0.0.1,.internal.jira.com

# Confluence-specific proxy settings (these override global proxy settings for Confluence requests).
#CONFLUENCE_HTTP_PROXY=http://confluence-proxy.example.com:8080
#CONFLUENCE_HTTPS_PROXY=https://confluence-proxy.example.com:8443
#CONFLUENCE_SOCKS_PROXY=socks5://confluence-proxy.example.com:1080
#CONFLUENCE_NO_PROXY=localhost,127.0.0.1,.internal.confluence.com

# --- Custom HTTP Headers (Advanced) ---
# Jira-specific custom headers.
#JIRA_CUSTOM_HEADERS=X-Jira-Service=mcp-integration,X-Custom-Auth=jira-token,X-Forwarded-User=service-account

# Confluence-specific custom headers.
#CONFLUENCE_CUSTOM_HEADERS=X-Confluence-Service=mcp-integration,X-Custom-Auth=confluence-token,X-ALB-Token=secret-token

```

--------------------------------------------------------------------------------
/tests/integration/README.md:
--------------------------------------------------------------------------------

```markdown
# Integration Tests

This directory contains integration tests for the MCP Atlassian project. These tests validate the interaction between different components and services.

## Test Categories

### 1. Authentication Integration (`test_authentication.py`)
Tests various authentication flows including OAuth, Basic Auth, and PAT tokens.

- **OAuth Token Refresh**: Validates token refresh on expiration
- **Basic Auth**: Tests username/password authentication for both services
- **PAT Tokens**: Tests Personal Access Token authentication
- **Fallback Patterns**: Tests authentication fallback (OAuth → Basic → PAT)
- **Mixed Scenarios**: Tests different authentication combinations

### 2. Cross-Service Integration (`test_cross_service.py`)
Tests integration between Jira and Confluence services.

- **User Resolution**: Consistent user handling across services
- **Shared Authentication**: Auth context sharing between services
- **Error Handling**: Service isolation during failures
- **Configuration Sharing**: SSL and proxy settings consistency
- **Service Discovery**: Dynamic service availability detection

### 3. MCP Protocol Integration (`test_mcp_protocol.py`)
Tests the FastMCP server implementation and tool management.

- **Tool Discovery**: Dynamic tool listing based on configuration
- **Tool Filtering**: Read-only mode and enabled tools filtering
- **Middleware**: Authentication token extraction and validation
- **Concurrent Execution**: Parallel tool execution support
- **Error Propagation**: Proper error handling through the stack

### 4. Content Processing Integration (`test_content_processing.py`)
Tests HTML/Markdown conversion and content preprocessing.

- **Roundtrip Conversion**: HTML ↔ Markdown accuracy
- **Macro Preservation**: Confluence macro handling
- **Performance**: Large content processing (>1MB)
- **Edge Cases**: Empty content, malformed HTML, Unicode
- **Cross-Platform**: Content sharing between services

### 5. SSL Verification (`test_ssl_verification.py`)
Tests SSL certificate handling and verification.

- **SSL Configuration**: Enable/disable verification
- **Custom CA Bundles**: Support for custom certificates
- **Multiple Domains**: SSL adapter mounting for various domains
- **Error Handling**: Certificate validation failures

### 6. Proxy Configuration (`test_proxy.py`)
Tests HTTP/HTTPS/SOCKS proxy support.

- **Proxy Types**: HTTP, HTTPS, and SOCKS5 proxies
- **Authentication**: Proxy credentials in URLs
- **NO_PROXY**: Bypass patterns for internal domains
- **Environment Variables**: Proxy configuration from environment
- **Mixed Configuration**: Proxy + SSL settings

### 7. Real API Tests (`test_real_api.py`)
Tests with actual Atlassian APIs (requires `--use-real-data` flag).

- **Complete Lifecycles**: Create/update/delete workflows
- **Attachments**: File upload/download operations
- **Search Operations**: JQL and CQL queries
- **Bulk Operations**: Multiple item creation
- **Rate Limiting**: API throttling behavior
- **Cross-Service Linking**: Jira-Confluence integration

## Running Integration Tests

### Basic Execution
```bash
# Run all integration tests (mocked)
uv run pytest tests/integration/ --integration

# Run specific test file
uv run pytest tests/integration/test_authentication.py --integration

# Run with coverage
uv run pytest tests/integration/ --integration --cov=src/mcp_atlassian
```

### Real API Testing
```bash
# Run tests against real Atlassian APIs
uv run pytest tests/integration/test_real_api.py --integration --use-real-data

# Required environment variables for real API tests:
export JIRA_URL=https://your-domain.atlassian.net
export [email protected]
export JIRA_API_TOKEN=your-api-token
export JIRA_TEST_PROJECT_KEY=TEST

export CONFLUENCE_URL=https://your-domain.atlassian.net/wiki
export [email protected]
export CONFLUENCE_API_TOKEN=your-api-token
export CONFLUENCE_TEST_SPACE_KEY=TEST
```

### Test Markers
- `@pytest.mark.integration` - All integration tests
- `@pytest.mark.anyio` - Async tests supporting multiple backends

## Environment Setup

### For Mocked Tests
No special setup required. Tests use the utilities from `tests/utils/` for mocking.

### For Real API Tests
1. Create a test project in Jira (e.g., "TEST")
2. Create a test space in Confluence (e.g., "TEST")
3. Generate API tokens from your Atlassian account
4. Set environment variables as shown above
5. Ensure your account has permissions to create/delete in test areas

## Test Data Management

### Automatic Cleanup
Real API tests implement automatic cleanup using pytest fixtures:
- Created issues are tracked and deleted after each test
- Created pages are tracked and deleted after each test
- Attachments are cleaned up with their parent items

### Manual Cleanup
If tests fail and leave data behind:
```python
# Use JQL to find test issues
project = TEST AND summary ~ "Integration Test*"

# Use CQL to find test pages
space = TEST AND title ~ "Integration Test*"
```

## Writing New Integration Tests

### Best Practices
1. **Use Test Utilities**: Leverage helpers from `tests/utils/`
2. **Mark Appropriately**: Use `@pytest.mark.integration`
3. **Mock by Default**: Only use real APIs with explicit flag
4. **Clean Up**: Always clean up created test data
5. **Unique Identifiers**: Use UUIDs to avoid conflicts
6. **Error Handling**: Test both success and failure paths

### Example Test Structure
```python
import pytest
from tests.utils.base import BaseAuthTest
from tests.utils.mocks import MockEnvironment

@pytest.mark.integration
class TestNewIntegration(BaseAuthTest):
    def test_feature(self):
        with MockEnvironment.basic_auth_env():
            # Test implementation
            pass
```

## Troubleshooting

### Common Issues

1. **SSL Errors**: Set `JIRA_SSL_VERIFY=false` or `CONFLUENCE_SSL_VERIFY=false`
2. **Proxy Issues**: Check `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` settings
3. **Rate Limiting**: Add delays between requests or reduce test frequency
4. **Permission Errors**: Ensure test user has appropriate permissions
5. **Cleanup Failures**: Manually delete test data using JQL/CQL queries

### Debug Mode
```bash
# Run with verbose output
uv run pytest tests/integration/ --integration -v

# Run with debug logging
uv run pytest tests/integration/ --integration --log-cli-level=DEBUG
```

## CI/CD Integration

### GitHub Actions Example
```yaml
- name: Run Integration Tests
  env:
    JIRA_URL: ${{ secrets.JIRA_URL }}
    JIRA_USERNAME: ${{ secrets.JIRA_USERNAME }}
    JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
  run: |
    uv run pytest tests/integration/ --integration
```

### Skip Patterns
- Integration tests are skipped by default without `--integration` flag
- Real API tests require both `--integration` and `--use-real-data` flags
- Tests skip gracefully when required environment variables are missing

```

--------------------------------------------------------------------------------
/tests/README.md:
--------------------------------------------------------------------------------

```markdown
# MCP Atlassian Test Fixtures Documentation

This document describes the enhanced test fixture system implemented for the MCP Atlassian project.

## Overview

The test fixture system has been significantly improved to provide:

- **Session-scoped fixtures** for expensive operations
- **Factory-based fixtures** for customizable test data
- **Better fixture composition** and reusability
- **Backward compatibility** with existing tests
- **Integration with test utilities** framework

## Architecture

```
tests/
├── conftest.py                 # Root fixtures with session-scoped data
├── unit/
│   ├── jira/conftest.py       # Jira-specific fixtures
│   ├── confluence/conftest.py # Confluence-specific fixtures
│   └── models/conftest.py     # Model testing fixtures
├── utils/                     # Test utilities framework
│   ├── factories.py          # Data factories
│   ├── mocks.py              # Mock utilities
│   ├── base.py               # Base test classes
│   └── assertions.py         # Custom assertions
└── fixtures/                  # Legacy mock data
    ├── jira_mocks.py         # Static Jira mock data
    └── confluence_mocks.py   # Static Confluence mock data
```

## Key Features

### 1. Session-Scoped Fixtures

These fixtures are computed once per test session to improve performance:

- `session_auth_configs`: Authentication configuration templates
- `session_mock_data`: Mock data templates for API responses
- `session_jira_field_definitions`: Jira field definitions
- `session_jira_projects`: Jira project data
- `session_confluence_spaces`: Confluence space definitions

```python
# Example usage
def test_with_session_data(session_jira_field_definitions):
    # Uses cached field definitions, computed once per session
    assert len(session_jira_field_definitions) > 0
```

### 2. Factory-Based Fixtures

These fixtures return factory functions for creating customizable test data:

- `make_jira_issue`: Create Jira issues with custom properties
- `make_confluence_page`: Create Confluence pages with custom properties
- `make_auth_config`: Create authentication configurations
- `make_api_error`: Create API error responses

```python
# Example usage
def test_custom_issue(make_jira_issue):
    issue = make_jira_issue(
        key="CUSTOM-123",
        fields={"priority": {"name": "High"}}
    )
    assert issue["key"] == "CUSTOM-123"
    assert issue["fields"]["priority"]["name"] == "High"
```

### 3. Environment Management

Enhanced environment fixtures for testing different authentication scenarios:

- `clean_environment`: No authentication variables
- `oauth_environment`: OAuth setup
- `basic_auth_environment`: Basic auth setup
- `parametrized_auth_env`: Parameterized auth testing

```python
# Example usage
@pytest.mark.parametrize("parametrized_auth_env",
                       ["oauth", "basic_auth"], indirect=True)
def test_auth_scenarios(parametrized_auth_env):
    # Test runs once for OAuth and once for basic auth
    pass
```

### 4. Enhanced Mock Clients

Improved mock clients with better integration:

- `mock_jira_client`: Pre-configured mock Jira client
- `mock_confluence_client`: Pre-configured mock Confluence client
- `enhanced_mock_jira_client`: Factory-integrated Jira client
- `enhanced_mock_confluence_client`: Factory-integrated Confluence client

### 5. Specialized Data Fixtures

Domain-specific fixtures for complex testing scenarios:

- `make_jira_issue_with_worklog`: Issues with worklog data
- `make_jira_search_results`: JQL search results
- `make_confluence_page_with_content`: Pages with rich content
- `make_confluence_search_results`: CQL search results

## Migration Guide

### For New Tests

Use the enhanced factory-based fixtures:

```python
def test_new_functionality(make_jira_issue, make_confluence_page):
    # Create custom test data
    issue = make_jira_issue(key="NEW-123")
    page = make_confluence_page(title="New Test Page")

    # Test your functionality
    assert issue["key"] == "NEW-123"
    assert page["title"] == "New Test Page"
```

### For Existing Tests

Existing tests continue to work without changes due to backward compatibility:

```python
def test_existing_functionality(jira_issue_data, confluence_page_data):
    # These fixtures still work as before
    assert jira_issue_data["key"] == "TEST-123"
    assert confluence_page_data["title"] == "Test Page"
```

### Performance Testing

Use large dataset fixtures for performance tests:

```python
def test_performance(large_jira_dataset, large_confluence_dataset):
    # 100 issues and pages for performance testing
    assert len(large_jira_dataset) == 100
    assert len(large_confluence_dataset) == 100
```

## Best Practices

### 1. Choose the Right Fixture

- Use **factory fixtures** for customizable data
- Use **session-scoped fixtures** for static, expensive data
- Use **legacy fixtures** only for backward compatibility

### 2. Session-Scoped Data

Take advantage of session-scoped fixtures for data that doesn't change:

```python
# Good: Uses session-scoped data
def test_field_parsing(session_jira_field_definitions):
    parser = FieldParser(session_jira_field_definitions)
    assert parser.is_valid()

# Avoid: Creates new data every time
def test_field_parsing():
    fields = create_field_definitions()  # Expensive operation
    parser = FieldParser(fields)
    assert parser.is_valid()
```

### 3. Factory Customization

Use factories to create exactly the data you need:

```python
# Good: Creates minimal required data
def test_issue_key_validation(make_jira_issue):
    issue = make_jira_issue(key="VALID-123")
    assert validate_key(issue["key"])

# Avoid: Uses complex data when simple would do
def test_issue_key_validation(complete_jira_issue_data):
    assert validate_key(complete_jira_issue_data["key"])
```

### 4. Environment Testing

Use parametrized fixtures for testing multiple scenarios:

```python
@pytest.mark.parametrize("parametrized_auth_env",
                       ["oauth", "basic_auth", "clean"], indirect=True)
def test_auth_detection(parametrized_auth_env):
    # Test with different auth environments
    detector = AuthDetector()
    auth_type = detector.detect_auth_type()
    assert auth_type in ["oauth", "basic", None]
```

## Backward Compatibility

All existing tests continue to work without modification. The enhanced fixtures:

1. **Maintain existing interfaces**: Old fixture names and return types unchanged
2. **Preserve mock data**: Original mock responses still available
3. **Support gradual migration**: Teams can adopt new fixtures incrementally

## Performance Improvements

The enhanced fixture system provides significant performance improvements:

1. **Session-scoped caching**: Expensive data created once per session
2. **Lazy loading**: Data only created when needed
3. **Efficient factories**: Minimal object creation overhead
4. **Reduced duplication**: Shared fixtures across test modules

## Examples

### Basic Usage

```python
def test_jira_issue_creation(make_jira_issue):
    # Create a custom issue
    issue = make_jira_issue(
        key="TEST-456",
        fields={"summary": "Custom test issue"}
    )

    # Test the issue
    model = JiraIssue.from_dict(issue)
    assert model.key == "TEST-456"
    assert model.summary == "Custom test issue"
```

### Advanced Usage

```python
def test_complex_workflow(
    make_jira_issue_with_worklog,
    make_confluence_page_with_content,
    oauth_environment
):
    # Create issue with worklog
    issue = make_jira_issue_with_worklog(
        key="WORKFLOW-123",
        worklog_hours=8,
        worklog_comment="Development work"
    )

    # Create page with content
    page = make_confluence_page_with_content(
        title="Workflow Documentation",
        content="<h1>Workflow</h1><p>Process documentation</p>",
        labels=["workflow", "documentation"]
    )

    # Test workflow with OAuth environment
    workflow = ComplexWorkflow(issue, page)
    result = workflow.execute()

    assert result.success
    assert result.issue_key == "WORKFLOW-123"
    assert "Workflow Documentation" in result.documentation
```

### Integration Testing

```python
def test_real_api_integration(
    jira_integration_client,
    confluence_integration_client,
    use_real_jira_data,
    use_real_confluence_data
):
    if not use_real_jira_data:
        pytest.skip("Real Jira data not available")

    if not use_real_confluence_data:
        pytest.skip("Real Confluence data not available")

    # Test with real API clients
    issues = jira_integration_client.search_issues("project = TEST")
    pages = confluence_integration_client.get_space_pages("TEST")

    assert len(issues) >= 0
    assert len(pages) >= 0
```

## Conclusion

The enhanced fixture system provides a powerful, flexible, and efficient foundation for testing the MCP Atlassian project. It maintains backward compatibility while offering significant improvements in performance, reusability, and developer experience.

Key benefits:

- ⚡ **Faster test execution** through session-scoped caching
- 🔧 **More flexible test data** through factory functions
- 🔄 **Better reusability** across test modules
- 📈 **Improved maintainability** with clear separation of concerns
- 🛡️ **Backward compatibility** with existing tests

For questions or suggestions about the fixture system, please refer to the test utilities documentation in `tests/utils/`.

```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
# MCP Atlassian

![PyPI Version](https://img.shields.io/pypi/v/mcp-atlassian)
![PyPI - Downloads](https://img.shields.io/pypi/dm/mcp-atlassian)
![PePy - Total Downloads](https://static.pepy.tech/personalized-badge/mcp-atlassian?period=total&units=international_system&left_color=grey&right_color=blue&left_text=Total%20Downloads)
[![Run Tests](https://github.com/sooperset/mcp-atlassian/actions/workflows/tests.yml/badge.svg)](https://github.com/sooperset/mcp-atlassian/actions/workflows/tests.yml)
![License](https://img.shields.io/github/license/sooperset/mcp-atlassian)

Model Context Protocol (MCP) server for Atlassian products (Confluence and Jira). This integration supports both Confluence & Jira Cloud and Server/Data Center deployments.

## Example Usage

Ask your AI assistant to:

- **📝 Automatic Jira Updates** - "Update Jira from our meeting notes"
- **🔍 AI-Powered Confluence Search** - "Find our OKR guide in Confluence and summarize it"
- **🐛 Smart Jira Issue Filtering** - "Show me urgent bugs in PROJ project from last week"
- **📄 Content Creation & Management** - "Create a tech design doc for XYZ feature"

### Feature Demo

https://github.com/user-attachments/assets/35303504-14c6-4ae4-913b-7c25ea511c3e

<details> <summary>Confluence Demo</summary>

https://github.com/user-attachments/assets/7fe9c488-ad0c-4876-9b54-120b666bb785

</details>

### Compatibility

| Product        | Deployment Type    | Support Status              |
|----------------|--------------------|-----------------------------|
| **Confluence** | Cloud              | ✅ Fully supported           |
| **Confluence** | Server/Data Center | ✅ Supported (version 6.0+)  |
| **Jira**       | Cloud              | ✅ Fully supported           |
| **Jira**       | Server/Data Center | ✅ Supported (version 8.14+) |

## Quick Start Guide

### 🔐 1. Authentication Setup

MCP Atlassian supports three authentication methods:

#### A. API Token Authentication (Cloud) - **Recommended**

1. Go to https://id.atlassian.com/manage-profile/security/api-tokens
2. Click **Create API token**, name it
3. Copy the token immediately

#### B. Personal Access Token (Server/Data Center)

1. Go to your profile (avatar) → **Profile** → **Personal Access Tokens**
2. Click **Create token**, name it, set expiry
3. Copy the token immediately

#### C. OAuth 2.0 Authentication (Cloud) - **Advanced**

> [!NOTE]
> OAuth 2.0 is more complex to set up but provides enhanced security features. For most users, API Token authentication (Method A) is simpler and sufficient.

1. Go to [Atlassian Developer Console](https://developer.atlassian.com/console/myapps/)
2. Create an "OAuth 2.0 (3LO) integration" app
3. Configure **Permissions** (scopes) for Jira/Confluence
4. Set **Callback URL** (e.g., `http://localhost:8080/callback`)
5. Run setup wizard:
   ```bash
   docker run --rm -i \
     -p 8080:8080 \
     -v "${HOME}/.mcp-atlassian:/home/app/.mcp-atlassian" \
     ghcr.io/sooperset/mcp-atlassian:latest --oauth-setup -v
   ```
6. Follow prompts for `Client ID`, `Secret`, `URI`, and `Scope`
7. Complete browser authorization
8. Add obtained credentials to `.env` or IDE config:
   - `ATLASSIAN_OAUTH_CLOUD_ID` (from wizard)
   - `ATLASSIAN_OAUTH_CLIENT_ID`
   - `ATLASSIAN_OAUTH_CLIENT_SECRET`
   - `ATLASSIAN_OAUTH_REDIRECT_URI`
   - `ATLASSIAN_OAUTH_SCOPE`

> [!IMPORTANT]
> For the standard OAuth flow described above, include `offline_access` in your scope (e.g., `read:jira-work write:jira-work offline_access`). This allows the server to refresh the access token automatically.

<details>
<summary>Alternative: Using a Pre-existing OAuth Access Token (BYOT)</summary>

If you are running mcp-atlassian part of a larger system that manages Atlassian OAuth 2.0 access tokens externally (e.g., through a central identity provider or another application), you can provide an access token directly to this MCP server. This method bypasses the interactive setup wizard and the server's internal token management (including refresh capabilities).

**Requirements:**
- A valid Atlassian OAuth 2.0 Access Token with the necessary scopes for the intended operations.
- The corresponding `ATLASSIAN_OAUTH_CLOUD_ID` for your Atlassian instance.

**Configuration:**
To use this method, set the following environment variables (or use the corresponding command-line flags when starting the server):
- `ATLASSIAN_OAUTH_CLOUD_ID`: Your Atlassian Cloud ID. (CLI: `--oauth-cloud-id`)
- `ATLASSIAN_OAUTH_ACCESS_TOKEN`: Your pre-existing OAuth 2.0 access token. (CLI: `--oauth-access-token`)

**Important Considerations for BYOT:**
- **Token Lifecycle Management:** When using BYOT, the MCP server **does not** handle token refresh. The responsibility for obtaining, refreshing (before expiry), and revoking the access token lies entirely with you or the external system providing the token.
- **Unused Variables:** The standard OAuth client variables (`ATLASSIAN_OAUTH_CLIENT_ID`, `ATLASSIAN_OAUTH_CLIENT_SECRET`, `ATLASSIAN_OAUTH_REDIRECT_URI`, `ATLASSIAN_OAUTH_SCOPE`) are **not** used and can be omitted when configuring for BYOT.
- **No Setup Wizard:** The `--oauth-setup` wizard is not applicable and should not be used for this approach.
- **No Token Cache Volume:** The Docker volume mount for token storage (e.g., `-v "${HOME}/.mcp-atlassian:/home/app/.mcp-atlassian"`) is also not necessary if you are exclusively using the BYOT method, as no tokens are stored or managed by this server.
- **Scope:** The provided access token must already have the necessary permissions (scopes) for the Jira/Confluence operations you intend to perform.

This option is useful in scenarios where OAuth credential management is centralized or handled by other infrastructure components.
</details>

> [!TIP]
> **Multi-Cloud OAuth Support**: If you're building a multi-tenant application where users provide their own OAuth tokens, see the [Multi-Cloud OAuth Support](#multi-cloud-oauth-support) section for minimal configuration setup.

### 📦 2. Installation

MCP Atlassian is distributed as a Docker image. This is the recommended way to run the server, especially for IDE integration. Ensure you have Docker installed.

```bash
# Pull Pre-built Image
docker pull ghcr.io/sooperset/mcp-atlassian:latest
```

## 🛠️ IDE Integration

MCP Atlassian is designed to be used with AI assistants through IDE integration.

> [!TIP]
> **For Claude Desktop**: Locate and edit the configuration file directly:
> - **Windows**: `%APPDATA%\Claude\claude_desktop_config.json`
> - **macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json`
> - **Linux**: `~/.config/Claude/claude_desktop_config.json`
>
> **For Cursor**: Open Settings → MCP → + Add new global MCP server

### ⚙️ Configuration Methods

There are two main approaches to configure the Docker container:

1. **Passing Variables Directly** (shown in examples below)
2. **Using an Environment File** with `--env-file` flag (shown in collapsible sections)

> [!NOTE]
> Common environment variables include:
>
> - `CONFLUENCE_SPACES_FILTER`: Filter by space keys (e.g., "DEV,TEAM,DOC")
> - `JIRA_PROJECTS_FILTER`: Filter by project keys (e.g., "PROJ,DEV,SUPPORT")
> - `READ_ONLY_MODE`: Set to "true" to disable write operations
> - `MCP_VERBOSE`: Set to "true" for more detailed logging
> - `MCP_LOGGING_STDOUT`: Set to "true" to log to stdout instead of stderr
> - `ENABLED_TOOLS`: Comma-separated list of tool names to enable (e.g., "confluence_search,jira_get_issue")
>
> See the [.env.example](https://github.com/sooperset/mcp-atlassian/blob/main/.env.example) file for all available options.


### 📝 Configuration Examples

**Method 1 (Passing Variables Directly):**
```json
{
  "mcpServers": {
    "mcp-atlassian": {
      "command": "docker",
      "args": [
        "run",
        "-i",
        "--rm",
        "-e", "CONFLUENCE_URL",
        "-e", "CONFLUENCE_USERNAME",
        "-e", "CONFLUENCE_API_TOKEN",
        "-e", "JIRA_URL",
        "-e", "JIRA_USERNAME",
        "-e", "JIRA_API_TOKEN",
        "ghcr.io/sooperset/mcp-atlassian:latest"
      ],
      "env": {
        "CONFLUENCE_URL": "https://your-company.atlassian.net/wiki",
        "CONFLUENCE_USERNAME": "[email protected]",
        "CONFLUENCE_API_TOKEN": "your_confluence_api_token",
        "JIRA_URL": "https://your-company.atlassian.net",
        "JIRA_USERNAME": "[email protected]",
        "JIRA_API_TOKEN": "your_jira_api_token"
      }
    }
  }
}
```

<details>
<summary>Alternative: Using Environment File</summary>

```json
{
  "mcpServers": {
    "mcp-atlassian": {
      "command": "docker",
      "args": [
        "run",
        "--rm",
        "-i",
        "--env-file",
        "/path/to/your/mcp-atlassian.env",
        "ghcr.io/sooperset/mcp-atlassian:latest"
      ]
    }
  }
}
```
</details>

<details>
<summary>Server/Data Center Configuration</summary>

For Server/Data Center deployments, use direct variable passing:

```json
{
  "mcpServers": {
    "mcp-atlassian": {
      "command": "docker",
      "args": [
        "run",
        "--rm",
        "-i",
        "-e", "CONFLUENCE_URL",
        "-e", "CONFLUENCE_PERSONAL_TOKEN",
        "-e", "CONFLUENCE_SSL_VERIFY",
        "-e", "JIRA_URL",
        "-e", "JIRA_PERSONAL_TOKEN",
        "-e", "JIRA_SSL_VERIFY",
        "ghcr.io/sooperset/mcp-atlassian:latest"
      ],
      "env": {
        "CONFLUENCE_URL": "https://confluence.your-company.com",
        "CONFLUENCE_PERSONAL_TOKEN": "your_confluence_pat",
        "CONFLUENCE_SSL_VERIFY": "false",
        "JIRA_URL": "https://jira.your-company.com",
        "JIRA_PERSONAL_TOKEN": "your_jira_pat",
        "JIRA_SSL_VERIFY": "false"
      }
    }
  }
}
```

> [!NOTE]
> Set `CONFLUENCE_SSL_VERIFY` and `JIRA_SSL_VERIFY` to "false" only if you have self-signed certificates.

</details>

<details>
<summary>OAuth 2.0 Configuration (Cloud Only)</summary>
<a name="oauth-20-configuration-example-cloud-only"></a>

These examples show how to configure `mcp-atlassian` in your IDE (like Cursor or Claude Desktop) when using OAuth 2.0 for Atlassian Cloud.

**Example for Standard OAuth 2.0 Flow (using Setup Wizard):**

This configuration is for when you use the server's built-in OAuth client and have completed the [OAuth setup wizard](#c-oauth-20-authentication-cloud---advanced).

```json
{
  "mcpServers": {
    "mcp-atlassian": {
      "command": "docker",
      "args": [
        "run",
        "--rm",
        "-i",
        "-v", "<path_to_your_home>/.mcp-atlassian:/home/app/.mcp-atlassian",
        "-e", "JIRA_URL",
        "-e", "CONFLUENCE_URL",
        "-e", "ATLASSIAN_OAUTH_CLIENT_ID",
        "-e", "ATLASSIAN_OAUTH_CLIENT_SECRET",
        "-e", "ATLASSIAN_OAUTH_REDIRECT_URI",
        "-e", "ATLASSIAN_OAUTH_SCOPE",
        "-e", "ATLASSIAN_OAUTH_CLOUD_ID",
        "ghcr.io/sooperset/mcp-atlassian:latest"
      ],
      "env": {
        "JIRA_URL": "https://your-company.atlassian.net",
        "CONFLUENCE_URL": "https://your-company.atlassian.net/wiki",
        "ATLASSIAN_OAUTH_CLIENT_ID": "YOUR_OAUTH_APP_CLIENT_ID",
        "ATLASSIAN_OAUTH_CLIENT_SECRET": "YOUR_OAUTH_APP_CLIENT_SECRET",
        "ATLASSIAN_OAUTH_REDIRECT_URI": "http://localhost:8080/callback",
        "ATLASSIAN_OAUTH_SCOPE": "read:jira-work write:jira-work read:confluence-content.all write:confluence-content offline_access",
        "ATLASSIAN_OAUTH_CLOUD_ID": "YOUR_CLOUD_ID_FROM_SETUP_WIZARD"
      }
    }
  }
}
```

> [!NOTE]
> - For the Standard Flow:
>   - `ATLASSIAN_OAUTH_CLOUD_ID` is obtained from the `--oauth-setup` wizard output or is known for your instance.
>   - Other `ATLASSIAN_OAUTH_*` client variables are from your OAuth app in the Atlassian Developer Console.
>   - `JIRA_URL` and `CONFLUENCE_URL` for your Cloud instances are always required.
>   - The volume mount (`-v .../.mcp-atlassian:/home/app/.mcp-atlassian`) is crucial for persisting the OAuth tokens obtained by the wizard, enabling automatic refresh.

**Example for Pre-existing Access Token (BYOT - Bring Your Own Token):**

This configuration is for when you are providing your own externally managed OAuth 2.0 access token.

```json
{
  "mcpServers": {
    "mcp-atlassian": {
      "command": "docker",
      "args": [
        "run",
        "--rm",
        "-i",
        "-e", "JIRA_URL",
        "-e", "CONFLUENCE_URL",
        "-e", "ATLASSIAN_OAUTH_CLOUD_ID",
        "-e", "ATLASSIAN_OAUTH_ACCESS_TOKEN",
        "ghcr.io/sooperset/mcp-atlassian:latest"
      ],
      "env": {
        "JIRA_URL": "https://your-company.atlassian.net",
        "CONFLUENCE_URL": "https://your-company.atlassian.net/wiki",
        "ATLASSIAN_OAUTH_CLOUD_ID": "YOUR_KNOWN_CLOUD_ID",
        "ATLASSIAN_OAUTH_ACCESS_TOKEN": "YOUR_PRE_EXISTING_OAUTH_ACCESS_TOKEN"
      }
    }
  }
}
```

> [!NOTE]
> - For the BYOT Method:
>   - You primarily need `JIRA_URL`, `CONFLUENCE_URL`, `ATLASSIAN_OAUTH_CLOUD_ID`, and `ATLASSIAN_OAUTH_ACCESS_TOKEN`.
>   - Standard OAuth client variables (`ATLASSIAN_OAUTH_CLIENT_ID`, `CLIENT_SECRET`, `REDIRECT_URI`, `SCOPE`) are **not** used.
>   - Token lifecycle (e.g., refreshing the token before it expires and restarting mcp-atlassian) is your responsibility, as the server will not refresh BYOT tokens.

</details>

<details>
<summary>Proxy Configuration</summary>

MCP Atlassian supports routing API requests through standard HTTP/HTTPS/SOCKS proxies. Configure using environment variables:

- Supports standard `HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY`, `SOCKS_PROXY`.
- Service-specific overrides are available (e.g., `JIRA_HTTPS_PROXY`, `CONFLUENCE_NO_PROXY`).
- Service-specific variables override global ones for that service.

Add the relevant proxy variables to the `args` (using `-e`) and `env` sections of your MCP configuration:

```json
{
  "mcpServers": {
    "mcp-atlassian": {
      "command": "docker",
      "args": [
        "run",
        "-i",
        "--rm",
        "-e", "... existing Confluence/Jira vars",
        "-e", "HTTP_PROXY",
        "-e", "HTTPS_PROXY",
        "-e", "NO_PROXY",
        "ghcr.io/sooperset/mcp-atlassian:latest"
      ],
      "env": {
        "... existing Confluence/Jira vars": "...",
        "HTTP_PROXY": "http://proxy.internal:8080",
        "HTTPS_PROXY": "http://proxy.internal:8080",
        "NO_PROXY": "localhost,.your-company.com"
      }
    }
  }
}
```

Credentials in proxy URLs are masked in logs. If you set `NO_PROXY`, it will be respected for requests to matching hosts.

</details>
<details>
<summary>Custom HTTP Headers Configuration</summary>

MCP Atlassian supports adding custom HTTP headers to all API requests. This feature is particularly useful in corporate environments where additional headers are required for security, authentication, or routing purposes.

Custom headers are configured using environment variables with comma-separated key=value pairs:

```json
{
  "mcpServers": {
    "mcp-atlassian": {
      "command": "docker",
      "args": [
        "run",
        "-i",
        "--rm",
        "-e", "CONFLUENCE_URL",
        "-e", "CONFLUENCE_USERNAME",
        "-e", "CONFLUENCE_API_TOKEN",
        "-e", "CONFLUENCE_CUSTOM_HEADERS",
        "-e", "JIRA_URL",
        "-e", "JIRA_USERNAME",
        "-e", "JIRA_API_TOKEN",
        "-e", "JIRA_CUSTOM_HEADERS",
        "ghcr.io/sooperset/mcp-atlassian:latest"
      ],
      "env": {
        "CONFLUENCE_URL": "https://your-company.atlassian.net/wiki",
        "CONFLUENCE_USERNAME": "[email protected]",
        "CONFLUENCE_API_TOKEN": "your_confluence_api_token",
        "CONFLUENCE_CUSTOM_HEADERS": "X-Confluence-Service=mcp-integration,X-Custom-Auth=confluence-token,X-ALB-Token=secret-token",
        "JIRA_URL": "https://your-company.atlassian.net",
        "JIRA_USERNAME": "[email protected]",
        "JIRA_API_TOKEN": "your_jira_api_token",
        "JIRA_CUSTOM_HEADERS": "X-Forwarded-User=service-account,X-Company-Service=mcp-atlassian,X-Jira-Client=mcp-integration"
      }
    }
  }
}
```

**Security Considerations:**

- Custom header values are masked in debug logs to protect sensitive information
- Ensure custom headers don't conflict with standard HTTP or Atlassian API headers
- Avoid including sensitive authentication tokens in custom headers if already using basic auth or OAuth
- Headers are sent with every API request - verify they don't interfere with API functionality

</details>


<details>
<summary>Multi-Cloud OAuth Support</summary>

MCP Atlassian supports multi-cloud OAuth scenarios where each user connects to their own Atlassian cloud instance. This is useful for multi-tenant applications, chatbots, or services where users provide their own OAuth tokens.

**Minimal OAuth Configuration:**

1. Enable minimal OAuth mode (no client credentials required):
   ```bash
   docker run -e ATLASSIAN_OAUTH_ENABLE=true -p 9000:9000 \
     ghcr.io/sooperset/mcp-atlassian:latest \
     --transport streamable-http --port 9000
   ```

2. Users provide authentication via HTTP headers:
   - `Authorization: Bearer <user_oauth_token>`
   - `X-Atlassian-Cloud-Id: <user_cloud_id>`

**Example Integration (Python):**
```python
import asyncio
from mcp.client.streamable_http import streamablehttp_client
from mcp import ClientSession

user_token = "user-specific-oauth-token"
user_cloud_id = "user-specific-cloud-id"

async def main():
    # Connect to streamable HTTP server with custom headers
    async with streamablehttp_client(
        "http://localhost:9000/mcp",
        headers={
            "Authorization": f"Bearer {user_token}",
            "X-Atlassian-Cloud-Id": user_cloud_id
        }
    ) as (read_stream, write_stream, _):
        # Create a session using the client streams
        async with ClientSession(read_stream, write_stream) as session:
            # Initialize the connection
            await session.initialize()

            # Example: Get a Jira issue
            result = await session.call_tool(
                "jira_get_issue",
                {"issue_key": "PROJ-123"}
            )
            print(result)

asyncio.run(main())
```

**Configuration Notes:**
- Each request can use a different cloud instance via the `X-Atlassian-Cloud-Id` header
- User tokens are isolated per request - no cross-tenant data leakage
- Falls back to global `ATLASSIAN_OAUTH_CLOUD_ID` if header not provided
- Compatible with standard OAuth 2.0 bearer token authentication

</details>

<details> <summary>Single Service Configurations</summary>

**For Confluence Cloud only:**

```json
{
  "mcpServers": {
    "mcp-atlassian": {
      "command": "docker",
      "args": [
        "run",
        "--rm",
        "-i",
        "-e", "CONFLUENCE_URL",
        "-e", "CONFLUENCE_USERNAME",
        "-e", "CONFLUENCE_API_TOKEN",
        "ghcr.io/sooperset/mcp-atlassian:latest"
      ],
      "env": {
        "CONFLUENCE_URL": "https://your-company.atlassian.net/wiki",
        "CONFLUENCE_USERNAME": "[email protected]",
        "CONFLUENCE_API_TOKEN": "your_api_token"
      }
    }
  }
}
```

For Confluence Server/DC, use:
```json
{
  "mcpServers": {
    "mcp-atlassian": {
      "command": "docker",
      "args": [
        "run",
        "--rm",
        "-i",
        "-e", "CONFLUENCE_URL",
        "-e", "CONFLUENCE_PERSONAL_TOKEN",
        "ghcr.io/sooperset/mcp-atlassian:latest"
      ],
      "env": {
        "CONFLUENCE_URL": "https://confluence.your-company.com",
        "CONFLUENCE_PERSONAL_TOKEN": "your_personal_token"
      }
    }
  }
}
```

**For Jira Cloud only:**

```json
{
  "mcpServers": {
    "mcp-atlassian": {
      "command": "docker",
      "args": [
        "run",
        "--rm",
        "-i",
        "-e", "JIRA_URL",
        "-e", "JIRA_USERNAME",
        "-e", "JIRA_API_TOKEN",
        "ghcr.io/sooperset/mcp-atlassian:latest"
      ],
      "env": {
        "JIRA_URL": "https://your-company.atlassian.net",
        "JIRA_USERNAME": "[email protected]",
        "JIRA_API_TOKEN": "your_api_token"
      }
    }
  }
}
```

For Jira Server/DC, use:
```json
{
  "mcpServers": {
    "mcp-atlassian": {
      "command": "docker",
      "args": [
        "run",
        "--rm",
        "-i",
        "-e", "JIRA_URL",
        "-e", "JIRA_PERSONAL_TOKEN",
        "ghcr.io/sooperset/mcp-atlassian:latest"
      ],
      "env": {
        "JIRA_URL": "https://jira.your-company.com",
        "JIRA_PERSONAL_TOKEN": "your_personal_token"
      }
    }
  }
}
```

</details>

### 👥 HTTP Transport Configuration

Instead of using `stdio`, you can run the server as a persistent HTTP service using either:
- `sse` (Server-Sent Events) transport at `/sse` endpoint
- `streamable-http` transport at `/mcp` endpoint

Both transport types support single-user and multi-user authentication:

**Authentication Options:**
- **Single-User**: Use server-level authentication configured via environment variables
- **Multi-User**: Each user provides their own authentication:
  - Cloud: OAuth 2.0 Bearer tokens
  - Server/Data Center: Personal Access Tokens (PATs)

<details> <summary>Basic HTTP Transport Setup</summary>

1. Start the server with your chosen transport:

    ```bash
    # For SSE transport
    docker run --rm -p 9000:9000 \
      --env-file /path/to/your/.env \
      ghcr.io/sooperset/mcp-atlassian:latest \
      --transport sse --port 9000 -vv

    # OR for streamable-http transport
    docker run --rm -p 9000:9000 \
      --env-file /path/to/your/.env \
      ghcr.io/sooperset/mcp-atlassian:latest \
      --transport streamable-http --port 9000 -vv
    ```

2. Configure your IDE (single-user example):

    **SSE Transport Example:**
    ```json
    {
      "mcpServers": {
        "mcp-atlassian-http": {
          "url": "http://localhost:9000/sse"
        }
      }
    }
    ```

    **Streamable-HTTP Transport Example:**
    ```json
    {
      "mcpServers": {
        "mcp-atlassian-service": {
          "url": "http://localhost:9000/mcp"
        }
      }
    }
    ```
</details>

<details> <summary>Multi-User Authentication Setup</summary>

Here's a complete example of setting up multi-user authentication with streamable-HTTP transport:

1. First, run the OAuth setup wizard to configure the server's OAuth credentials:
   ```bash
   docker run --rm -i \
     -p 8080:8080 \
     -v "${HOME}/.mcp-atlassian:/home/app/.mcp-atlassian" \
     ghcr.io/sooperset/mcp-atlassian:latest --oauth-setup -v
   ```

2. Start the server with streamable-HTTP transport:
   ```bash
   docker run --rm -p 9000:9000 \
     --env-file /path/to/your/.env \
     ghcr.io/sooperset/mcp-atlassian:latest \
     --transport streamable-http --port 9000 -vv
   ```

3. Configure your IDE's MCP settings:

**Choose the appropriate Authorization method for your Atlassian deployment:**

- **Cloud (OAuth 2.0):** Use this if your organization is on Atlassian Cloud and you have an OAuth access token for each user.
- **Server/Data Center (PAT):** Use this if you are on Atlassian Server or Data Center and each user has a Personal Access Token (PAT).

**Cloud (OAuth 2.0) Example:**
```json
{
  "mcpServers": {
    "mcp-atlassian-service": {
      "url": "http://localhost:9000/mcp",
      "headers": {
        "Authorization": "Bearer <USER_OAUTH_ACCESS_TOKEN>"
      }
    }
  }
}
```

**Server/Data Center (PAT) Example:**
```json
{
  "mcpServers": {
    "mcp-atlassian-service": {
      "url": "http://localhost:9000/mcp",
      "headers": {
        "Authorization": "Token <USER_PERSONAL_ACCESS_TOKEN>"
      }
    }
  }
}
```

4. Required environment variables in `.env`:
   ```bash
   JIRA_URL=https://your-company.atlassian.net
   CONFLUENCE_URL=https://your-company.atlassian.net/wiki
   ATLASSIAN_OAUTH_CLIENT_ID=your_oauth_app_client_id
   ATLASSIAN_OAUTH_CLIENT_SECRET=your_oauth_app_client_secret
   ATLASSIAN_OAUTH_REDIRECT_URI=http://localhost:8080/callback
   ATLASSIAN_OAUTH_SCOPE=read:jira-work write:jira-work read:confluence-content.all write:confluence-content offline_access
   ATLASSIAN_OAUTH_CLOUD_ID=your_cloud_id_from_setup_wizard
   ```

> [!NOTE]
> - The server should have its own fallback authentication configured (e.g., via environment variables for API token, PAT, or its own OAuth setup using --oauth-setup). This is used if a request doesn't include user-specific authentication.
> - **OAuth**: Each user needs their own OAuth access token from your Atlassian OAuth app.
> - **PAT**: Each user provides their own Personal Access Token.
> - **Multi-Cloud**: For OAuth users, optionally include `X-Atlassian-Cloud-Id` header to specify which Atlassian cloud instance to use
> - The server will use the user's token for API calls when provided, falling back to server auth if not
> - User tokens should have appropriate scopes for their needed operations

</details>

## Tools

### Key Tools

#### Jira Tools

- `jira_get_issue`: Get details of a specific issue
- `jira_search`: Search issues using JQL
- `jira_create_issue`: Create a new issue
- `jira_update_issue`: Update an existing issue
- `jira_transition_issue`: Transition an issue to a new status
- `jira_add_comment`: Add a comment to an issue

#### Confluence Tools

- `confluence_search`: Search Confluence content using CQL
- `confluence_get_page`: Get content of a specific page
- `confluence_create_page`: Create a new page
- `confluence_update_page`: Update an existing page

<details> <summary>View All Tools</summary>

| Operation | Jira Tools                          | Confluence Tools               |
|-----------|-------------------------------------|--------------------------------|
| **Read**  | `jira_search`                       | `confluence_search`            |
|           | `jira_get_issue`                    | `confluence_get_page`          |
|           | `jira_get_all_projects`             | `confluence_get_page_children` |
|           | `jira_get_project_issues`           | `confluence_get_comments`      |
|           | `jira_get_worklog`                  | `confluence_get_labels`        |
|           | `jira_get_transitions`              | `confluence_search_user`       |
|           | `jira_search_fields`                |                                |
|           | `jira_get_agile_boards`             |                                |
|           | `jira_get_board_issues`             |                                |
|           | `jira_get_sprints_from_board`       |                                |
|           | `jira_get_sprint_issues`            |                                |
|           | `jira_get_issue_link_types`         |                                |
|           | `jira_batch_get_changelogs`*        |                                |
|           | `jira_get_user_profile`             |                                |
|           | `jira_download_attachments`         |                                |
|           | `jira_get_project_versions`         |                                |
| **Write** | `jira_create_issue`                 | `confluence_create_page`       |
|           | `jira_update_issue`                 | `confluence_update_page`       |
|           | `jira_delete_issue`                 | `confluence_delete_page`       |
|           | `jira_batch_create_issues`          | `confluence_add_label`         |
|           | `jira_add_comment`                  | `confluence_add_comment`       |
|           | `jira_transition_issue`             |                                |
|           | `jira_add_worklog`                  |                                |
|           | `jira_link_to_epic`                 |                                |
|           | `jira_create_sprint`                |                                |
|           | `jira_update_sprint`                |                                |
|           | `jira_create_issue_link`            |                                |
|           | `jira_remove_issue_link`            |                                |
|           | `jira_create_version`               |                                |
|           | `jira_batch_create_versions`        |                                |

</details>

*Tool only available on Jira Cloud

</details>

### Tool Filtering and Access Control

The server provides two ways to control tool access:

1. **Tool Filtering**: Use `--enabled-tools` flag or `ENABLED_TOOLS` environment variable to specify which tools should be available:

   ```bash
   # Via environment variable
   ENABLED_TOOLS="confluence_search,jira_get_issue,jira_search"

   # Or via command line flag
   docker run ... --enabled-tools "confluence_search,jira_get_issue,jira_search" ...
   ```

2. **Read/Write Control**: Tools are categorized as read or write operations. When `READ_ONLY_MODE` is enabled, only read operations are available regardless of `ENABLED_TOOLS` setting.

## Troubleshooting & Debugging

### Common Issues

- **Authentication Failures**:
    - For Cloud: Check your API tokens (not your account password)
    - For Server/Data Center: Verify your personal access token is valid and not expired
    - For older Confluence servers: Some older versions require basic authentication with `CONFLUENCE_USERNAME` and `CONFLUENCE_API_TOKEN` (where token is your password)
- **SSL Certificate Issues**: If using Server/Data Center and encounter SSL errors, set `CONFLUENCE_SSL_VERIFY=false` or `JIRA_SSL_VERIFY=false`
- **Permission Errors**: Ensure your Atlassian account has sufficient permissions to access the spaces/projects
- **Custom Headers Issues**: See the ["Debugging Custom Headers"](#debugging-custom-headers) section below to analyze and resolve issues with custom headers

### Debugging Custom Headers

To verify custom headers are being applied correctly:

1. **Enable Debug Logging**: Set `MCP_VERY_VERBOSE=true` to see detailed request logs
   ```bash
   # In your .env file or environment
   MCP_VERY_VERBOSE=true
   MCP_LOGGING_STDOUT=true
   ```

2. **Check Header Parsing**: Custom headers appear in logs with masked values for security:
   ```
   DEBUG Custom headers applied: {'X-Forwarded-User': '***', 'X-ALB-Token': '***'}
   ```

3. **Verify Service-Specific Headers**: Check logs to confirm the right headers are being used:
   ```
   DEBUG Jira request headers: service-specific headers applied
   DEBUG Confluence request headers: service-specific headers applied
   ```

4. **Test Header Format**: Ensure your header string format is correct:
   ```bash
   # Correct format
   JIRA_CUSTOM_HEADERS=X-Custom=value1,X-Other=value2
   CONFLUENCE_CUSTOM_HEADERS=X-Custom=value1,X-Other=value2

   # Incorrect formats (will be ignored)
   JIRA_CUSTOM_HEADERS="X-Custom=value1,X-Other=value2"  # Extra quotes
   JIRA_CUSTOM_HEADERS=X-Custom: value1,X-Other: value2  # Colon instead of equals
   JIRA_CUSTOM_HEADERS=X-Custom = value1               # Spaces around equals
   ```

**Security Note**: Header values containing sensitive information (tokens, passwords) are automatically masked in logs to prevent accidental exposure.

### Debugging Tools

```bash
# Using MCP Inspector for testing
npx @modelcontextprotocol/inspector uvx mcp-atlassian ...

# For local development version
npx @modelcontextprotocol/inspector uv --directory /path/to/your/mcp-atlassian run mcp-atlassian ...

# View logs
# macOS
tail -n 20 -f ~/Library/Logs/Claude/mcp*.log
# Windows
type %APPDATA%\Claude\logs\mcp*.log | more
```

## Security

- Never share API tokens
- Keep .env files secure and private
- See [SECURITY.md](SECURITY.md) for best practices

## Contributing

We welcome contributions to MCP Atlassian! If you'd like to contribute:

1. Check out our [CONTRIBUTING.md](CONTRIBUTING.md) guide for detailed development setup instructions.
2. Make changes and submit a pull request.

We use pre-commit hooks for code quality and follow semantic versioning for releases.

## License

Licensed under MIT - see [LICENSE](LICENSE) file. This is not an official Atlassian product.

```

--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------

```markdown
# Security Policy

## Reporting Issues

Please report security vulnerabilities to [security contact].

## Best Practices

1. **API Tokens**
   - Never commit tokens to version control
   - Rotate tokens regularly
   - Use minimal required permissions

2. **Environment Variables**
   - Keep .env files secure and private
   - Use separate tokens for development/production

3. **Access Control**
   - Regularly audit Confluence space access
   - Follow principle of least privilege

4. **OAuth Client Credentials**
   - Never share your client secret publicly
   - Be aware that printing client secrets to console output poses a security risk
   - Console output can be logged, screen-captured, or viewed by others with access to your environment
   - If client secrets are exposed, regenerate them immediately in your Atlassian developer console
   - Consider using environment variables or secure credential storage instead of direct console output

```

--------------------------------------------------------------------------------
/AGENTS.md:
--------------------------------------------------------------------------------

```markdown
# AGENTS

> **Audience**: LLM-driven engineering agents

This file provides guidance for autonomous coding agents working inside the **MCP Atlassian** repository.

---

## Repository map

| Path | Purpose |
| --- | --- |
| `src/mcp_atlassian/` | Library source code (Python ≥ 3.10) |
| `  ├─ jira/` | Jira client, mixins, and operations |
| `  ├─ confluence/` | Confluence client, mixins, and operations |
| `  ├─ models/` | Pydantic data models for API responses |
| `  ├─ servers/` | FastMCP server implementations |
| `  └─ utils/` | Shared utilities (auth, logging, SSL) |
| `tests/` | Pytest test suite with fixtures |
| `scripts/` | OAuth setup and testing scripts |

---

## Mandatory dev workflow

```bash
uv sync --frozen --all-extras --dev  # install dependencies
pre-commit install                    # setup hooks
pre-commit run --all-files           # Ruff + Prettier + Pyright
uv run pytest                        # run full test suite
```

*Tests must pass* and *lint/typing must be clean* before committing.

---

## Core MCP patterns

**Tool naming**: `{service}_{action}` (e.g., `jira_create_issue`)

**Architecture**:
- **Mixins**: Functionality split into focused mixins extending base clients
- **Models**: All data structures extend `ApiModel` base class
- **Auth**: Supports API tokens, PAT tokens, and OAuth 2.0

---

## Development rules

1. **Package management**: ONLY use `uv`, NEVER `pip`
2. **Branching**: NEVER work on `main`, always create feature branches
3. **Type safety**: All functions require type hints
4. **Testing**: New features need tests, bug fixes need regression tests
5. **Commits**: Use trailers for attribution, never mention tools/AI

---

## Code conventions

* **Language**: Python ≥ 3.10
* **Line length**: 88 characters maximum
* **Imports**: Absolute imports, sorted by ruff
* **Naming**: `snake_case` functions, `PascalCase` classes
* **Docstrings**: Google-style for all public APIs
* **Error handling**: Specific exceptions only

---

## Development guidelines

1. Do what has been asked; nothing more, nothing less
2. NEVER create files unless absolutely necessary
3. Always prefer editing existing files
4. Follow established patterns and maintain consistency
5. Run `pre-commit run --all-files` before committing
6. Fix bugs immediately when reported

---

## Quick reference

```bash
# Running the server
uv run mcp-atlassian                 # Start server
uv run mcp-atlassian --oauth-setup   # OAuth wizard
uv run mcp-atlassian -v              # Verbose mode

# Git workflow
git checkout -b feature/description   # New feature
git checkout -b fix/issue-description # Bug fix
git commit --trailer "Reported-by:<name>"      # Attribution
git commit --trailer "Github-Issue:#<number>"  # Issue reference
```

```

--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------

```markdown
# Contributing to MCP Atlassian

Thank you for your interest in contributing to MCP Atlassian! This document provides guidelines and instructions for contributing to this project.

## Development Setup

1. Make sure you have Python 3.10+ installed
1. Install [uv](https://docs.astral.sh/uv/getting-started/installation/)
1. Fork the repository
1. Clone your fork: `git clone https://github.com/YOUR-USERNAME/mcp-atlassian.git`
1. Add the upstream remote: `git remote add upstream https://github.com/sooperset/mcp-atlassian.git`
1. Install dependencies:

    ```sh
    uv sync
    uv sync --frozen --all-extras --dev
    ```

1. Activate the virtual environment:

    __macOS and Linux__:

    ```sh
    source .venv/bin/activate
    ```

    __Windows__:

    ```powershell
    .venv\Scripts\activate.ps1
    ```

1. Set up pre-commit hooks:

    ```sh
    pre-commit install
    ```

1. Set up environment variables (copy from .env.example):

    ```bash
    cp .env.example .env
    ```

## Development Setup with local VSCode devcontainer

1. Clone your fork: `git clone https://github.com/YOUR-USERNAME/mcp-atlassian.git`
1. Add the upstream remote: `git remote add upstream https://github.com/sooperset/mcp-atlassian.git`
1. Open the project with VSCode and open with devcontainer
1. Add this bit of config to your `.vscode/settings.json`:

    ```json
    {
        "python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python",
        "[python]": {
        "editor.defaultFormatter": "charliermarsh.ruff",
        "editor.formatOnSave": true
        }
    }
    ```

## Development Workflow

1. Create a feature or fix branch:

    ```sh
    git checkout -b feature/your-feature-name
    # or
    git checkout -b fix/issue-description
    ```

1. Make your changes

1. Ensure tests pass:

    ```sh
    uv run pytest

    # With coverage
    uv run pytest --cov=mcp_atlassian
    ```

1. Run code quality checks using pre-commit:

    ```bash
    pre-commit run --all-files
    ```

1. Commit your changes with clear, concise commit messages referencing issues when applicable

1. Submit a pull request to the main branch

## Code Style

- Run `pre-commit run --all-files` before committing
- Code quality tools (managed by pre-commit):
  - `ruff` for formatting and linting (88 char line limit)
  - `pyright` for type checking (preferred over mypy)
  - `prettier` for YAML/JSON formatting
  - Additional checks for trailing whitespace, file endings, YAML/TOML validity
- Follow type annotation patterns:
  - `type[T]` for class types
  - Union types with pipe syntax: `str | None`
  - Standard collection types with subscripts: `list[str]`, `dict[str, Any]`
- Add docstrings to all public modules, functions, classes, and methods using Google-style format:

        ```python
        def function_name(param1: str, param2: int) -> bool:
            """Summary of function purpose.

            More detailed description if needed.

            Args:
                param1: Description of param1
                param2: Description of param2

            Returns:
                Description of return value

            Raises:
                ValueError: When and why this exception is raised
            """
        ```

## Pull Request Process

1. Fill out the PR template with a description of your changes
2. Ensure all CI checks pass
3. Request review from maintainers
4. Address review feedback if requested

## Release Process

Releases follow semantic versioning:
- **MAJOR** version for incompatible API changes
- **MINOR** version for backwards-compatible functionality additions
- **PATCH** version for backwards-compatible bug fixes

---

Thank you for contributing to MCP Atlassian!

```

--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------

```python

```

--------------------------------------------------------------------------------
/tests/fixtures/__init__.py:
--------------------------------------------------------------------------------

```python

```

--------------------------------------------------------------------------------
/tests/unit/models/__init__.py:
--------------------------------------------------------------------------------

```python

```

--------------------------------------------------------------------------------
/tests/unit/servers/__init__.py:
--------------------------------------------------------------------------------

```python

```

--------------------------------------------------------------------------------
/tests/unit/confluence/__init__.py:
--------------------------------------------------------------------------------

```python
"""Unit tests for the Confluence module."""

```

--------------------------------------------------------------------------------
/tests/utils/__init__.py:
--------------------------------------------------------------------------------

```python
"""Test utilities for MCP Atlassian test suite."""

```

--------------------------------------------------------------------------------
/tests/unit/utils/__init__.py:
--------------------------------------------------------------------------------

```python
"""Unit tests for the MCP Atlassian utils module."""

```

--------------------------------------------------------------------------------
/.devcontainer/post-create.sh:
--------------------------------------------------------------------------------

```bash
#! /bin/bash
set -xe

uv venv
source .venv/bin/activate

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/servers/__init__.py:
--------------------------------------------------------------------------------

```python
"""MCP Atlassian Servers Package."""

from .main import main_mcp

__all__ = ["main_mcp"]

```

--------------------------------------------------------------------------------
/.devcontainer/post-start.sh:
--------------------------------------------------------------------------------

```bash
#! /bin/bash

set -xe

source .venv/bin/activate

uv sync --frozen --all-extras --dev
pre-commit install

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/exceptions.py:
--------------------------------------------------------------------------------

```python
class MCPAtlassianAuthenticationError(Exception):
    """Raised when Atlassian API authentication fails (401/403)."""

    pass

```

--------------------------------------------------------------------------------
/.devcontainer/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
FROM "mcr.microsoft.com/devcontainers/python:3.10"
RUN curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="/usr/local/bin" sh
ENV UV_LINK_MODE=copy

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/jira/constants.py:
--------------------------------------------------------------------------------

```python
"""Constants specific to Jira operations."""

# Set of default fields returned by Jira read operations when no specific fields are requested.
DEFAULT_READ_JIRA_FIELDS: set[str] = {
    "summary",
    "description",
    "status",
    "assignee",
    "reporter",
    "labels",
    "priority",
    "created",
    "updated",
    "issuetype",
}

```

--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------

```yaml
blank_issues_enabled: false
contact_links:
  - name: "\U0001F4AC Ask a Question or Discuss"
    url: https://github.com/sooperset/mcp-atlassian/discussions # GitHub Discussions 링크 (활성화되어 있다면)
    about: Please ask and answer questions here, or start a general discussion.
  - name: "\U0001F4DA Read the Documentation"
    url: https://github.com/sooperset/mcp-atlassian/blob/main/README.md
    about: Check the README for setup and usage instructions.

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/preprocessing/__init__.py:
--------------------------------------------------------------------------------

```python
"""Preprocessing modules for handling text conversion between different formats."""

# Re-export the TextPreprocessor and other utilities
# Backward compatibility
from .base import BasePreprocessor
from .base import BasePreprocessor as TextPreprocessor
from .confluence import ConfluencePreprocessor
from .jira import JiraPreprocessor

__all__ = [
    "BasePreprocessor",
    "ConfluencePreprocessor",
    "JiraPreprocessor",
    "TextPreprocessor",  # For backwards compatibility
]

```

--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------

```yaml
name: Lint

on:
  pull_request:
    branches: [ main ]
  push:
    branches: [ main ]

jobs:
  lint:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4

      - name: Set up Python
        uses: actions/setup-python@v5
        with:
          python-version: '3.10'
          cache: 'pip'

      - name: Install dependencies
        run: |
          python -m pip install --upgrade pip
          pip install .[dev]

      - name: Run pre-commit
        uses: pre-commit/[email protected]

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/utils/io.py:
--------------------------------------------------------------------------------

```python
"""I/O utility functions for MCP Atlassian."""

from mcp_atlassian.utils.env import is_env_extended_truthy


def is_read_only_mode() -> bool:
    """Check if the server is running in read-only mode.

    Read-only mode prevents all write operations (create, update, delete)
    while allowing all read operations. This is useful for working with
    production Atlassian instances where you want to prevent accidental
    modifications.

    Returns:
        True if read-only mode is enabled, False otherwise
    """
    return is_env_extended_truthy("READ_ONLY_MODE", "false")

```

--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------

```markdown
<!-- Thank you for your contribution! Please provide a brief summary. -->

## Description

<!-- What does this PR do? Why is it needed? -->
<!-- Link related issues: Fixes #<issue_number> -->

Fixes: #

## Changes

<!-- Briefly list the key changes made. -->

-
-
-

## Testing

<!-- How did you test these changes? (e.g., unit tests, integration tests, manual checks) -->

- [ ] Unit tests added/updated
- [ ] Integration tests passed
- [ ] Manual checks performed: `[briefly describe]`

## Checklist

- [ ] Code follows project style guidelines (linting passes).
- [ ] Tests added/updated for changes.
- [ ] All tests pass locally.
- [ ] Documentation updated (if needed).

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/servers/context.py:
--------------------------------------------------------------------------------

```python
from __future__ import annotations

from dataclasses import dataclass
from typing import TYPE_CHECKING

if TYPE_CHECKING:
    from mcp_atlassian.confluence.config import ConfluenceConfig
    from mcp_atlassian.jira.config import JiraConfig


@dataclass(frozen=True)
class MainAppContext:
    """
    Context holding fully configured Jira and Confluence configurations
    loaded from environment variables at server startup.
    These configurations include any global/default authentication details.
    """

    full_jira_config: JiraConfig | None = None
    full_confluence_config: ConfluenceConfig | None = None
    read_only: bool = False
    enabled_tools: list[str] | None = None

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/confluence/__init__.py:
--------------------------------------------------------------------------------

```python
"""Confluence API integration module.

This module provides access to Confluence content through the Model Context Protocol.
"""

from .client import ConfluenceClient
from .comments import CommentsMixin
from .config import ConfluenceConfig
from .labels import LabelsMixin
from .pages import PagesMixin
from .search import SearchMixin
from .spaces import SpacesMixin
from .users import UsersMixin


class ConfluenceFetcher(
    SearchMixin, SpacesMixin, PagesMixin, CommentsMixin, LabelsMixin, UsersMixin
):
    """Main entry point for Confluence operations, providing backward compatibility.

    This class combines functionality from various mixins to maintain the same
    API as the original ConfluenceFetcher class.
    """

    pass


__all__ = ["ConfluenceFetcher", "ConfluenceConfig", "ConfluenceClient"]

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/confluence/constants.py:
--------------------------------------------------------------------------------

```python
"""Constants specific to Confluence and CQL."""

# Based on https://developer.atlassian.com/cloud/confluence/cql-functions/#reserved-words
# List might need refinement based on actual parser behavior
# Using lowercase for case-insensitive matching
RESERVED_CQL_WORDS = {
    "after",
    "and",
    "as",
    "avg",
    "before",
    "begin",
    "by",
    "commit",
    "contains",
    "count",
    "distinct",
    "else",
    "empty",
    "end",
    "explain",
    "from",
    "having",
    "if",
    "in",
    "inner",
    "insert",
    "into",
    "is",
    "isnull",
    "left",
    "like",
    "limit",
    "max",
    "min",
    "not",
    "null",
    "or",
    "order",
    "outer",
    "right",
    "select",
    "sum",
    "then",
    "was",
    "where",
    "update",
}

# Add other Confluence-specific constants here if needed in the future.

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/utils/date.py:
--------------------------------------------------------------------------------

```python
"""Utility functions for date operations."""

import logging
from datetime import datetime, timezone

import dateutil.parser

logger = logging.getLogger("mcp-atlassian")


def parse_date(date_str: str | int | None) -> datetime | None:
    """
    Parse a date string from any format to a datetime object for type consistency.

    The input string `date_str` accepts:
    - None
    - Epoch timestamp (only contains digits and is in milliseconds)
    - Other formats supported by `dateutil.parser` (ISO 8601, RFC 3339, etc.)

    Args:
        date_str: Date string

    Returns:
        Parsed date string or None if date_str is None / empty string
    """

    if not date_str:
        return None
    if isinstance(date_str, int) or date_str.isdigit():
        return datetime.fromtimestamp(int(date_str) / 1000, tz=timezone.utc)
    return dateutil.parser.parse(date_str)

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/utils/__init__.py:
--------------------------------------------------------------------------------

```python
"""
Utility functions for the MCP Atlassian integration.
This package provides various utility functions used throughout the codebase.
"""

from .date import parse_date
from .io import is_read_only_mode

# Export lifecycle utilities
from .lifecycle import (
    ensure_clean_exit,
    setup_signal_handlers,
)
from .logging import setup_logging

# Export OAuth utilities
from .oauth import OAuthConfig, configure_oauth_session
from .ssl import SSLIgnoreAdapter, configure_ssl_verification
from .urls import is_atlassian_cloud_url

# Export all utility functions for backward compatibility
__all__ = [
    "SSLIgnoreAdapter",
    "configure_ssl_verification",
    "is_atlassian_cloud_url",
    "is_read_only_mode",
    "setup_logging",
    "parse_date",
    "parse_iso8601_date",
    "OAuthConfig",
    "configure_oauth_session",
    "setup_signal_handlers",
    "ensure_clean_exit",
]

```

--------------------------------------------------------------------------------
/tests/integration/conftest.py:
--------------------------------------------------------------------------------

```python
"""Configuration for integration tests."""

import pytest


def pytest_configure(config):
    """Add integration marker."""
    config.addinivalue_line(
        "markers", "integration: mark test as requiring integration with real services"
    )


def pytest_collection_modifyitems(config, items):
    """Skip integration tests unless explicitly requested."""
    if not config.getoption("--integration", default=False):
        # Skip integration tests by default
        skip_integration = pytest.mark.skip(reason="Need --integration option to run")
        for item in items:
            if "integration" in item.keywords:
                item.add_marker(skip_integration)


def pytest_addoption(parser):
    """Add integration option to pytest."""
    parser.addoption(
        "--integration",
        action="store_true",
        default=False,
        help="run integration tests",
    )

```

--------------------------------------------------------------------------------
/tests/unit/utils/test_decorators.py:
--------------------------------------------------------------------------------

```python
from unittest.mock import MagicMock

import pytest

from mcp_atlassian.utils.decorators import check_write_access


class DummyContext:
    def __init__(self, read_only):
        self.request_context = MagicMock()
        self.request_context.lifespan_context = {
            "app_lifespan_context": MagicMock(read_only=read_only)
        }


@pytest.mark.asyncio
async def test_check_write_access_blocks_in_read_only():
    @check_write_access
    async def dummy_tool(ctx, x):
        return x * 2

    ctx = DummyContext(read_only=True)
    with pytest.raises(ValueError) as exc:
        await dummy_tool(ctx, 3)
    assert "read-only mode" in str(exc.value)


@pytest.mark.asyncio
async def test_check_write_access_allows_in_writable():
    @check_write_access
    async def dummy_tool(ctx, x):
        return x * 2

    ctx = DummyContext(read_only=False)
    result = await dummy_tool(ctx, 4)
    assert result == 8

```

--------------------------------------------------------------------------------
/.github/workflows/tests.yml:
--------------------------------------------------------------------------------

```yaml
# .github/workflows/tests.yml

name: Run Tests

on:
  push:
    branches: [ main ]
  pull_request:
    branches: [ main ]

jobs:
  test:
    name: Run pytest on Python ${{ matrix.python-version }}
    runs-on: ubuntu-latest
    strategy:
      fail-fast: false
      matrix:
        python-version: ["3.10", "3.11", "3.12"]

    steps:
    - name: Checkout repository
      uses: actions/checkout@v4

    - name: Set up Python ${{ matrix.python-version }}
      uses: actions/setup-python@v5
      with:
        python-version: ${{ matrix.python-version }}

    - name: Install uv
      uses: astral-sh/setup-uv@v5
      with:
        version: "0.6.10"
        cache: true

    - name: Install dependencies
      run: uv sync --frozen --all-extras --dev

    - name: Run tests with pytest
      # Add -v for verbose output, helpful in CI
      # Add basic coverage reporting to terminal logs
      # Skip real API validation tests as they require credentials
      run: uv run pytest -v -k "not test_real_api_validation" --cov=src/mcp_atlassian --cov-report=term-missing

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/utils/urls.py:
--------------------------------------------------------------------------------

```python
"""URL-related utility functions for MCP Atlassian."""

import re
from urllib.parse import urlparse


def is_atlassian_cloud_url(url: str) -> bool:
    """Determine if a URL belongs to Atlassian Cloud or Server/Data Center.

    Args:
        url: The URL to check

    Returns:
        True if the URL is for an Atlassian Cloud instance, False for Server/Data Center
    """
    # Localhost and IP-based URLs are always Server/Data Center
    if url is None or not url:
        return False

    parsed_url = urlparse(url)
    hostname = parsed_url.hostname or ""

    # Check for localhost or IP address
    if (
        hostname == "localhost"
        or re.match(r"^127\.", hostname)
        or re.match(r"^192\.168\.", hostname)
        or re.match(r"^10\.", hostname)
        or re.match(r"^172\.(1[6-9]|2[0-9]|3[0-1])\.", hostname)
    ):
        return False

    # The standard check for Atlassian cloud domains
    return (
        ".atlassian.net" in hostname
        or ".jira.com" in hostname
        or ".jira-dev.com" in hostname
        or "api.atlassian.com" in hostname
    )

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/confluence/__init__.py:
--------------------------------------------------------------------------------

```python
"""
Confluence data models for the MCP Atlassian integration.
This package provides Pydantic models for Confluence API data structures,
organized by entity type.

Key models:
- ConfluencePage: Complete model for Confluence page content and metadata
- ConfluenceSpace: Space information and settings
- ConfluenceUser: User account details
- ConfluenceSearchResult: Container for Confluence search (CQL) results
- ConfluenceComment: Page and inline comments
- ConfluenceVersion: Content versioning information
"""

from .comment import ConfluenceComment
from .common import ConfluenceAttachment, ConfluenceUser
from .label import ConfluenceLabel
from .page import ConfluencePage, ConfluenceVersion
from .search import ConfluenceSearchResult
from .space import ConfluenceSpace
from .user_search import ConfluenceUserSearchResult, ConfluenceUserSearchResults

__all__ = [
    "ConfluenceUser",
    "ConfluenceAttachment",
    "ConfluenceSpace",
    "ConfluenceVersion",
    "ConfluenceComment",
    "ConfluenceLabel",
    "ConfluencePage",
    "ConfluenceSearchResult",
    "ConfluenceUserSearchResult",
    "ConfluenceUserSearchResults",
]

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/constants.py:
--------------------------------------------------------------------------------

```python
"""
Constants and default values for model conversions.

This module centralizes all default values and fallbacks used when
converting API responses to models, eliminating "magic strings" in
the codebase and providing a single source of truth for defaults.
"""

#
# Common defaults
#
EMPTY_STRING = ""
UNKNOWN = "Unknown"
UNASSIGNED = "Unassigned"
NONE_VALUE = "None"

#
# Jira defaults
#
JIRA_DEFAULT_ID = "0"
JIRA_DEFAULT_KEY = "UNKNOWN-0"

# Status defaults
JIRA_DEFAULT_STATUS = {
    "name": UNKNOWN,
    "id": JIRA_DEFAULT_ID,
}

# Priority defaults
JIRA_DEFAULT_PRIORITY = {
    "name": NONE_VALUE,
    "id": JIRA_DEFAULT_ID,
}

# Issue type defaults
JIRA_DEFAULT_ISSUE_TYPE = {
    "name": UNKNOWN,
    "id": JIRA_DEFAULT_ID,
}

# Project defaults
JIRA_DEFAULT_PROJECT = JIRA_DEFAULT_ID

#
# Confluence defaults
#
CONFLUENCE_DEFAULT_ID = "0"

# Space defaults
CONFLUENCE_DEFAULT_SPACE = {
    "key": EMPTY_STRING,
    "name": UNKNOWN,
    "id": CONFLUENCE_DEFAULT_ID,
}

# Version defaults
CONFLUENCE_DEFAULT_VERSION = {
    "number": 0,
    "when": EMPTY_STRING,
}

# Date/Time defaults
DEFAULT_TIMESTAMP = "1970-01-01T00:00:00.000+0000"

```

--------------------------------------------------------------------------------
/.devcontainer/devcontainer.json:
--------------------------------------------------------------------------------

```json
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/python
{
	"name": "Python 3 Project",
	// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
	//"image": "mcr.microsoft.com/devcontainers/python:3",
	"build": {
		"dockerfile": "Dockerfile"
	},
	"containerEnv": {
		"PYTHONPATH": "./src"
	},
	// Features to add to the dev container. More info: https://containers.dev/features.
	"features": {
		"ghcr.io/devcontainers/features/node:1": {}
	},
	// Use 'forwardPorts' to make a list of ports inside the container available locally.
	"forwardPorts": [
		3000
	],
	// Use 'postCreateCommand' to run commands after the container is created.
	"postCreateCommand": ".devcontainer/post-create.sh",
	"postStartCommand": ".devcontainer/post-start.sh",
	// Configure tool-specific properties.
	"customizations": {
		"vscode": {
			"extensions": [
				"ms-python.mypy-type-checker",
				"charliermarsh.ruff"
			]
		}
	}
	// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
	// "remoteUser": "root"
}

```

--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------

```yaml
# .github/workflows/publish.yml
name: Publish MCP-Atlassian to PyPI

on:
  release:
    types: [published] # Triggers when a GitHub Release is published
  workflow_dispatch:   # Allows manual triggering

jobs:
  pypi-publish:
    name: Upload release to PyPI
    runs-on: ubuntu-latest
    environment:
      name: pypi
      url: https://pypi.org/p/mcp-atlassian # Link to your PyPI package
    permissions:
      id-token: write # Necessary for PyPI's trusted publishing

    steps:
      - name: Checkout repository
        uses: actions/checkout@v4
        with:
          fetch-depth: 0 # Required for uv-dynamic-versioning to get tags

      - name: Set up Python
        uses: actions/setup-python@v5
        with:
          python-version: "3.10" # Or your minimum supported Python

      - name: Install uv
        uses: astral-sh/setup-uv@v3
        with:
          enable-cache: true

      - name: Build package
        run: uv build

      - name: Publish package to PyPI
        run: uv publish --token ${{ secrets.PYPI_API_TOKEN }} dist/*
        # If using trusted publishing (recommended), remove --token and configure it in PyPI:
        # run: uv publish dist/*

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/jira/__init__.py:
--------------------------------------------------------------------------------

```python
"""
Jira data models for the MCP Atlassian integration.

This package provides Pydantic models for Jira API data structures,
organized by entity type for better maintainability and clarity.
"""

from .agile import JiraBoard, JiraSprint
from .comment import JiraComment
from .common import (
    JiraAttachment,
    JiraIssueType,
    JiraPriority,
    JiraResolution,
    JiraStatus,
    JiraStatusCategory,
    JiraTimetracking,
    JiraUser,
)
from .issue import JiraIssue
from .link import (
    JiraIssueLink,
    JiraIssueLinkType,
    JiraLinkedIssue,
    JiraLinkedIssueFields,
)
from .project import JiraProject
from .search import JiraSearchResult
from .workflow import JiraTransition
from .worklog import JiraWorklog

__all__ = [
    # Common models
    "JiraUser",
    "JiraStatusCategory",
    "JiraStatus",
    "JiraIssueType",
    "JiraPriority",
    "JiraAttachment",
    "JiraResolution",
    "JiraTimetracking",
    # Entity-specific models
    "JiraComment",
    "JiraWorklog",
    "JiraProject",
    "JiraTransition",
    "JiraBoard",
    "JiraSprint",
    "JiraIssue",
    "JiraSearchResult",
    "JiraIssueLinkType",
    "JiraIssueLink",
    "JiraLinkedIssue",
    "JiraLinkedIssueFields",
]

```

--------------------------------------------------------------------------------
/tests/unit/utils/test_date.py:
--------------------------------------------------------------------------------

```python
"Tests for the date utility functions."

import pytest

from mcp_atlassian.utils import parse_date


def test_parse_date_invalid_input():
    """Test that parse_date returns an empty string for invalid dates."""
    with pytest.raises(ValueError):
        parse_date("invalid")


def test_parse_date_valid():
    """Test that parse_date returns the correct date for valid dates."""
    assert str(parse_date("2021-01-01")) == "2021-01-01 00:00:00"


def test_parse_date_epoch_as_str():
    """Test that parse_date returns the correct date for epoch timestamps as str."""
    assert str(parse_date("1612156800000")) == "2021-02-01 05:20:00+00:00"


def test_parse_date_epoch_as_int():
    """Test that parse_date returns the correct date for epoch timestamps as int."""
    assert str(parse_date(1612156800000)) == "2021-02-01 05:20:00+00:00"


def test_parse_date_iso8601():
    """Test that parse_date returns the correct date for ISO 8601."""
    assert str(parse_date("2021-01-01T00:00:00Z")) == "2021-01-01 00:00:00+00:00"


def test_parse_date_rfc3339():
    """Test that parse_date returns the correct date for RFC 3339."""
    assert (
        str(parse_date("1937-01-01T12:00:27.87+00:20"))
        == "1937-01-01 12:00:27.870000+00:20"
    )

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/jira/version.py:
--------------------------------------------------------------------------------

```python
from typing import Any

from ..base import ApiModel


class JiraVersion(ApiModel):
    """
    Model representing a Jira project version (fix version).
    """

    id: str
    name: str
    description: str | None = None
    startDate: str | None = None  # noqa: N815
    releaseDate: str | None = None  # noqa: N815
    released: bool = False
    archived: bool = False

    @classmethod
    def from_api_response(cls, data: dict[str, Any], **kwargs: Any) -> "JiraVersion":
        """Create JiraVersion from API response."""
        return cls(
            id=str(data.get("id", "")),
            name=str(data.get("name", "")),
            description=data.get("description"),
            startDate=data.get("startDate"),
            releaseDate=data.get("releaseDate"),
            released=bool(data.get("released", False)),
            archived=bool(data.get("archived", False)),
        )

    def to_simplified_dict(self) -> dict[str, Any]:
        """Convert to simple dict for API output."""
        result = {
            "id": self.id,
            "name": self.name,
            "released": self.released,
            "archived": self.archived,
        }
        if self.description is not None:
            result["description"] = self.description
        if self.startDate is not None:
            result["startDate"] = self.startDate
        if self.releaseDate is not None:
            result["releaseDate"] = self.releaseDate
        return result

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/confluence/label.py:
--------------------------------------------------------------------------------

```python
"""
Confluence label models.
This module provides Pydantic models for Confluence page labels.
"""

import logging
from typing import Any

from ..base import ApiModel
from ..constants import (
    CONFLUENCE_DEFAULT_ID,
    EMPTY_STRING,
)

logger = logging.getLogger(__name__)


class ConfluenceLabel(ApiModel):
    """
    Model representing a Confluence label.
    """

    id: str = CONFLUENCE_DEFAULT_ID
    name: str = EMPTY_STRING
    prefix: str = "global"
    label: str = EMPTY_STRING
    type: str = "label"

    @classmethod
    def from_api_response(
        cls, data: dict[str, Any], **kwargs: Any
    ) -> "ConfluenceLabel":
        """
        Create a ConfluenceLabel from a Confluence API response.

        Args:
            data: The label data from the Confluence API

        Returns:
            A ConfluenceLabel instance
        """
        if not data:
            return cls()

        return cls(
            id=str(data.get("id", CONFLUENCE_DEFAULT_ID)),
            name=data.get("name", EMPTY_STRING),
            prefix=data.get("prefix", "global"),
            label=data.get("label", EMPTY_STRING),
            type=data.get("type", "label"),
        )

    def to_simplified_dict(self) -> dict[str, Any]:
        """Convert to simplified dictionary for API response."""
        result = {
            "id": self.id,
            "name": self.name,
            "prefix": self.prefix,
            "label": self.label,
        }

        return result

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/confluence/space.py:
--------------------------------------------------------------------------------

```python
"""
Confluence space models.
This module provides Pydantic models for Confluence spaces.
"""

import logging
from typing import Any

from ..base import ApiModel
from ..constants import CONFLUENCE_DEFAULT_ID, EMPTY_STRING, UNKNOWN

logger = logging.getLogger(__name__)


class ConfluenceSpace(ApiModel):
    """
    Model representing a Confluence space.
    """

    id: str = CONFLUENCE_DEFAULT_ID
    key: str = EMPTY_STRING
    name: str = UNKNOWN
    type: str = "global"  # "global", "personal", etc.
    status: str = "current"  # "current", "archived", etc.

    @classmethod
    def from_api_response(
        cls, data: dict[str, Any], **kwargs: Any
    ) -> "ConfluenceSpace":
        """
        Create a ConfluenceSpace from a Confluence API response.

        Args:
            data: The space data from the Confluence API

        Returns:
            A ConfluenceSpace instance
        """
        if not data:
            return cls()

        return cls(
            id=str(data.get("id", CONFLUENCE_DEFAULT_ID)),
            key=data.get("key", EMPTY_STRING),
            name=data.get("name", UNKNOWN),
            type=data.get("type", "global"),
            status=data.get("status", "current"),
        )

    def to_simplified_dict(self) -> dict[str, Any]:
        """Convert to simplified dictionary for API response."""
        return {
            "key": self.key,
            "name": self.name,
            "type": self.type,
            "status": self.status,
        }

```

--------------------------------------------------------------------------------
/tests/unit/jira/test_constants.py:
--------------------------------------------------------------------------------

```python
"""Tests for Jira constants.

Focused tests for Jira constants, validating correct values and business logic.
"""

from mcp_atlassian.jira.constants import DEFAULT_READ_JIRA_FIELDS


class TestDefaultReadJiraFields:
    """Test suite for DEFAULT_READ_JIRA_FIELDS constant."""

    def test_type_and_structure(self):
        """Test that DEFAULT_READ_JIRA_FIELDS is a set of strings."""
        assert isinstance(DEFAULT_READ_JIRA_FIELDS, set)
        assert all(isinstance(field, str) for field in DEFAULT_READ_JIRA_FIELDS)
        assert len(DEFAULT_READ_JIRA_FIELDS) == 10

    def test_contains_expected_jira_fields(self):
        """Test that DEFAULT_READ_JIRA_FIELDS contains the correct Jira fields."""
        expected_fields = {
            "summary",
            "description",
            "status",
            "assignee",
            "reporter",
            "labels",
            "priority",
            "created",
            "updated",
            "issuetype",
        }
        assert DEFAULT_READ_JIRA_FIELDS == expected_fields

    def test_essential_fields_present(self):
        """Test that essential Jira fields are included."""
        essential_fields = {"summary", "status", "issuetype"}
        assert essential_fields.issubset(DEFAULT_READ_JIRA_FIELDS)

    def test_field_format_validity(self):
        """Test that field names are valid for API usage."""
        for field in DEFAULT_READ_JIRA_FIELDS:
            # Fields should be non-empty, lowercase, no spaces
            assert field and field.islower()
            assert " " not in field
            assert not field.startswith("_")
            assert not field.endswith("_")

```

--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yml:
--------------------------------------------------------------------------------

```yaml
name: "\U0001F680 Feature Request"
description: Suggest an idea or enhancement for mcp-atlassian
title: "[Feature]: "
labels: ["enhancement"]
body:
  - type: markdown
    attributes:
      value: |
        Thanks for suggesting an idea! Please describe your proposal clearly.
  - type: textarea
    id: problem
    attributes:
      label: Is your feature request related to a problem? Please describe.
      description: A clear and concise description of what the problem is.
      placeholder: "e.g., I'm always frustrated when [...] because [...]"
    validations:
      required: true
  - type: textarea
    id: solution
    attributes:
      label: Describe the solution you'd like
      description: A clear and concise description of what you want to happen. How should the feature work?
      placeholder: "Add a new tool `confluence_move_page` that takes `page_id` and `target_parent_id` arguments..."
    validations:
      required: true
  - type: textarea
    id: alternatives
    attributes:
      label: Describe alternatives you've considered
      description: A clear and concise description of any alternative solutions or features you've considered.
      placeholder: "I considered modifying the `confluence_update_page` tool, but..."
  - type: textarea
    id: use-case
    attributes:
      label: Use Case
      description: How would this feature benefit users? Who is the target audience?
      placeholder: "This would allow AI agents to automatically organize documentation..."
  - type: textarea
    id: additional-context
    attributes:
      label: Additional Context
      description: Add any other context, mockups, or links about the feature request here.

```

--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
# Use a Python image with uv pre-installed
FROM ghcr.io/astral-sh/uv:python3.10-alpine AS uv

# Install the project into `/app`
WORKDIR /app

# Enable bytecode compilation
ENV UV_COMPILE_BYTECODE=1

# Copy from the cache instead of linking since it's a mounted volume
ENV UV_LINK_MODE=copy

# Generate proper TOML lockfile first
RUN --mount=type=bind,source=pyproject.toml,target=pyproject.toml \
    --mount=type=bind,source=README.md,target=README.md \
    uv lock

# Install the project's dependencies using the lockfile
RUN --mount=type=cache,target=/root/.cache/uv \
    --mount=type=bind,source=pyproject.toml,target=pyproject.toml \
    --mount=type=bind,source=uv.lock,target=uv.lock \
    uv sync --frozen --no-install-project --no-dev --no-editable

# Then, add the rest of the project source code and install it
ADD . /app
RUN --mount=type=cache,target=/root/.cache/uv \
    --mount=type=bind,source=uv.lock,target=uv.lock \
    uv sync --frozen --no-dev --no-editable

# Remove unnecessary files from the virtual environment before copying
RUN find /app/.venv -name '__pycache__' -type d -exec rm -rf {} + && \
    find /app/.venv -name '*.pyc' -delete && \
    find /app/.venv -name '*.pyo' -delete && \
    echo "Cleaned up .venv"

# Final stage
FROM python:3.10-alpine

# Create a non-root user 'app'
RUN adduser -D -h /home/app -s /bin/sh app
WORKDIR /app
USER app

COPY --from=uv --chown=app:app /app/.venv /app/.venv

# Place executables in the environment at the front of the path
ENV PATH="/app/.venv/bin:$PATH"

# For minimal OAuth setup without environment variables, use:
# docker run -e ATLASSIAN_OAUTH_ENABLE=true -p 8000:8000 your-image
# Then provide authentication via headers:
# Authorization: Bearer <your_oauth_token>
# X-Atlassian-Cloud-Id: <your_cloud_id>

ENTRYPOINT ["mcp-atlassian"]

```

--------------------------------------------------------------------------------
/tests/unit/utils/test_logging.py:
--------------------------------------------------------------------------------

```python
import io
import logging

from mcp_atlassian.utils.logging import setup_logging


def test_setup_logging_default_level():
    """Test setup_logging with default WARNING level"""
    logger = setup_logging()

    # Check logger level is set to WARNING
    assert logger.level == logging.WARNING

    # Check root logger is configured
    root_logger = logging.getLogger()
    assert root_logger.level == logging.WARNING

    # Verify handler and formatter
    assert len(root_logger.handlers) == 1
    handler = root_logger.handlers[0]
    assert isinstance(handler, logging.Handler)
    assert handler.formatter._fmt == "%(levelname)s - %(name)s - %(message)s"


def test_setup_logging_custom_level():
    """Test setup_logging with custom DEBUG level"""
    logger = setup_logging(logging.DEBUG)

    # Check logger level is set to DEBUG
    assert logger.level == logging.DEBUG

    # Check root logger is configured
    root_logger = logging.getLogger()
    assert root_logger.level == logging.DEBUG


def test_setup_logging_removes_existing_handlers():
    """Test that setup_logging removes existing handlers"""
    # Add a test handler
    root_logger = logging.getLogger()
    test_handler = logging.StreamHandler()
    root_logger.addHandler(test_handler)
    initial_handler_count = len(root_logger.handlers)

    # Setup logging should remove existing handler
    setup_logging()

    # Verify only one handler remains
    assert len(root_logger.handlers) == 1
    assert test_handler not in root_logger.handlers


def test_setup_logging_logger_name():
    """Test that setup_logging creates logger with correct name"""
    logger = setup_logging()
    assert logger.name == "mcp-atlassian"


def test_setup_logging_logging_stream():
    """Test that setup_logging uses the correct stream"""
    stream = io.StringIO()
    logger = setup_logging(logging.DEBUG, stream)
    logger.debug("test")
    assert stream.getvalue() == f"DEBUG - {logger.name} - test\n"

```

--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------

```yaml
name: 'Manage Stale Issues and PRs'

on:
  schedule:
    # Runs daily at midnight UTC
    - cron: '0 0 * * *'
  workflow_dispatch:

permissions:
  issues: write
  pull-requests: write

jobs:
  stale:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/stale@v9
        with:
          # === Core Timing Settings ===
          days-before-stale: 14 # Start with 2 weeks
          days-before-close: -1 # Disable automatic closing initially

          # === Labels ===
          stale-issue-label: 'stale'
          stale-pr-label: 'stale'

          # === Bot Messages ===
          stale-issue-message: >
            This issue has been automatically marked as stale because it has not had
            recent activity for 14 days. It will be closed if no further activity occurs.
            Please leave a comment or remove the 'stale' label if you believe this issue is still relevant.
            Thank you for your contributions!
          stale-pr-message: >
            This pull request has been automatically marked as stale because it has not had
            recent activity for 14 days. It will be closed if no further activity occurs.
            Please leave a comment or remove the 'stale' label if you believe this PR is still relevant.
            Thank you for your contributions!

          # === Exemptions ===
          exempt-issue-labels: 'pinned,security,good first issue,help wanted,bug,enhancement,feature request,documentation,awaiting-user-feedback,needs-investigation'
          exempt-pr-labels: 'pinned,security,work-in-progress,awaiting-review,do-not-merge'
          exempt-all-milestones: true
          exempt-all-assignees: false # Consider if most issues should have assignees
          exempt-draft-pr: true

          # === Behavior Control ===
          remove-stale-when-updated: true
          operations-per-run: 30
          debug-only: false
          delete-branch: false

          # --- Statistics ---
          enable-statistics: true

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/jira/__init__.py:
--------------------------------------------------------------------------------

```python
"""Jira API module for mcp_atlassian.

This module provides various Jira API client implementations.
"""

# flake8: noqa

# Re-export the Jira class for backward compatibility
from atlassian.jira import Jira

from .client import JiraClient
from .comments import CommentsMixin
from .config import JiraConfig
from .epics import EpicsMixin
from .fields import FieldsMixin
from .formatting import FormattingMixin
from .issues import IssuesMixin
from .links import LinksMixin
from .projects import ProjectsMixin
from .search import SearchMixin
from .sprints import SprintsMixin
from .transitions import TransitionsMixin
from .users import UsersMixin
from .worklog import WorklogMixin
from .boards import BoardsMixin
from .attachments import AttachmentsMixin


class JiraFetcher(
    ProjectsMixin,
    FieldsMixin,
    FormattingMixin,
    TransitionsMixin,
    WorklogMixin,
    EpicsMixin,
    CommentsMixin,
    SearchMixin,
    IssuesMixin,
    UsersMixin,
    BoardsMixin,
    SprintsMixin,
    AttachmentsMixin,
    LinksMixin,
):
    """
    The main Jira client class providing access to all Jira operations.

    This class inherits from multiple mixins that provide specific functionality:
    - ProjectsMixin: Project-related operations
    - FieldsMixin: Field-related operations
    - FormattingMixin: Content formatting utilities
    - TransitionsMixin: Issue transition operations
    - WorklogMixin: Worklog operations
    - EpicsMixin: Epic operations
    - CommentsMixin: Comment operations
    - SearchMixin: Search operations
    - IssuesMixin: Issue operations
    - UsersMixin: User operations
    - BoardsMixin: Board operations
    - SprintsMixin: Sprint operations
    - AttachmentsMixin: Attachment download operations
    - LinksMixin: Issue link operations

    The class structure is designed to maintain backward compatibility while
    improving code organization and maintainability.
    """

    pass


__all__ = ["JiraFetcher", "JiraConfig", "JiraClient", "Jira"]

```

--------------------------------------------------------------------------------
/tests/utils/base.py:
--------------------------------------------------------------------------------

```python
"""Base test classes and utilities for MCP Atlassian tests."""

from unittest.mock import AsyncMock, MagicMock

import pytest


class BaseMixinTest:
    """Base class for mixin tests with common setup patterns."""

    @pytest.fixture
    def mock_config(self):
        """Mock configuration for testing."""
        return MagicMock()

    @pytest.fixture
    def mock_client(self):
        """Mock client with common methods."""
        client = MagicMock()
        # Add common client methods
        client.get = AsyncMock()
        client.post = AsyncMock()
        client.put = AsyncMock()
        client.delete = AsyncMock()
        return client


class BaseAuthTest:
    """Base class for authentication-related tests."""

    @pytest.fixture
    def oauth_env_vars(self):
        """Standard OAuth environment variables."""
        return {
            "ATLASSIAN_OAUTH_CLIENT_ID": "test-client-id",
            "ATLASSIAN_OAUTH_CLIENT_SECRET": "test-client-secret",
            "ATLASSIAN_OAUTH_REDIRECT_URI": "http://localhost:8080/callback",
            "ATLASSIAN_OAUTH_SCOPE": "read:jira-work write:jira-work",
            "ATLASSIAN_OAUTH_CLOUD_ID": "test-cloud-id",
        }

    @pytest.fixture
    def basic_auth_env_vars(self):
        """Standard basic auth environment variables."""
        return {
            "JIRA_URL": "https://test.atlassian.net",
            "JIRA_USERNAME": "[email protected]",
            "JIRA_API_TOKEN": "test-token",
            "CONFLUENCE_URL": "https://test.atlassian.net/wiki",
            "CONFLUENCE_USERNAME": "[email protected]",
            "CONFLUENCE_API_TOKEN": "test-token",
        }


class BaseServerTest:
    """Base class for server-related tests."""

    @pytest.fixture
    def mock_request(self):
        """Mock FastMCP request object."""
        request = MagicMock()
        request.state = MagicMock()
        return request

    @pytest.fixture
    def mock_context(self):
        """Mock FastMCP context object."""
        return MagicMock()

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/utils/tools.py:
--------------------------------------------------------------------------------

```python
"""Tool-related utility functions for MCP Atlassian."""

import logging
import os

logger = logging.getLogger(__name__)


def get_enabled_tools() -> list[str] | None:
    """Get the list of enabled tools from environment variable.

    This function reads and parses the ENABLED_TOOLS environment variable
    to determine which tools should be available in the server.

    The environment variable should contain a comma-separated list of tool names.
    Whitespace around tool names is stripped.

    Returns:
        List of enabled tool names if ENABLED_TOOLS is set and non-empty,
        None if ENABLED_TOOLS is not set or empty after stripping whitespace.

    Examples:
        ENABLED_TOOLS="tool1,tool2" -> ["tool1", "tool2"]
        ENABLED_TOOLS="tool1, tool2 , tool3" -> ["tool1", "tool2", "tool3"]
        ENABLED_TOOLS="" -> None
        ENABLED_TOOLS not set -> None
        ENABLED_TOOLS=" , " -> None
    """
    enabled_tools_str = os.getenv("ENABLED_TOOLS")
    if not enabled_tools_str:
        logger.debug("ENABLED_TOOLS environment variable not set or empty.")
        return None

    # Split by comma and strip whitespace
    tools = [tool.strip() for tool in enabled_tools_str.split(",")]
    # Filter out empty strings
    tools = [tool for tool in tools if tool]

    logger.debug(f"Parsed enabled tools from environment: {tools}")

    return tools if tools else None


def should_include_tool(tool_name: str, enabled_tools: list[str] | None) -> bool:
    """Check if a tool should be included based on the enabled tools list.

    Args:
        tool_name: The name of the tool to check.
        enabled_tools: List of enabled tool names, or None to include all tools.

    Returns:
        True if the tool should be included, False otherwise.
    """
    if enabled_tools is None:
        logger.debug(
            f"Including tool '{tool_name}' because enabled_tools filter is None."
        )
        return True
    should_include = tool_name in enabled_tools
    logger.debug(
        f"Tool '{tool_name}' included: {should_include} (based on enabled_tools: {enabled_tools})"
    )
    return should_include

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/confluence/search.py:
--------------------------------------------------------------------------------

```python
"""
Confluence search result models.
This module provides Pydantic models for Confluence search (CQL) results.
"""

import logging
from typing import Any

from pydantic import Field, model_validator

from ..base import ApiModel, TimestampMixin

# Import other necessary models using relative imports
from .page import ConfluencePage

logger = logging.getLogger(__name__)


class ConfluenceSearchResult(ApiModel, TimestampMixin):
    """
    Model representing a Confluence search (CQL) result.
    """

    total_size: int = 0
    start: int = 0
    limit: int = 0
    results: list[ConfluencePage] = Field(default_factory=list)
    cql_query: str | None = None
    search_duration: int | None = None

    @classmethod
    def from_api_response(
        cls, data: dict[str, Any], **kwargs: Any
    ) -> "ConfluenceSearchResult":
        """
        Create a ConfluenceSearchResult from a Confluence API response.

        Args:
            data: The search result data from the Confluence API
            **kwargs: Additional context parameters, including:
                - base_url: Base URL for constructing page URLs
                - is_cloud: Whether this is a cloud instance (affects URL format)

        Returns:
            A ConfluenceSearchResult instance
        """
        if not data:
            return cls()

        # Convert search results to ConfluencePage models
        results = []
        for item in data.get("results", []):
            # In Confluence search, the content is nested inside the result item
            if content := item.get("content"):
                results.append(ConfluencePage.from_api_response(content, **kwargs))

        return cls(
            total_size=data.get("totalSize", 0),
            start=data.get("start", 0),
            limit=data.get("limit", 0),
            results=results,
            cql_query=data.get("cqlQuery"),
            search_duration=data.get("searchDuration"),
        )

    @model_validator(mode="after")
    def validate_search_result(self) -> "ConfluenceSearchResult":
        """Validate the search result and log warnings if needed."""
        if self.total_size > 0 and not self.results:
            logger.warning(
                "Search found %d pages but no content data was returned",
                self.total_size,
            )
        return self

```

--------------------------------------------------------------------------------
/tests/unit/utils/test_io.py:
--------------------------------------------------------------------------------

```python
"""Tests for the I/O utilities module."""

import os
from unittest.mock import patch

from mcp_atlassian.utils.io import is_read_only_mode


def test_is_read_only_mode_default():
    """Test that is_read_only_mode returns False by default."""
    # Arrange - Make sure READ_ONLY_MODE is not set
    with patch.dict(os.environ, clear=True):
        # Act
        result = is_read_only_mode()

        # Assert
        assert result is False


def test_is_read_only_mode_true():
    """Test that is_read_only_mode returns True when environment variable is set to true."""
    # Arrange - Set READ_ONLY_MODE to true
    with patch.dict(os.environ, {"READ_ONLY_MODE": "true"}):
        # Act
        result = is_read_only_mode()

        # Assert
        assert result is True


def test_is_read_only_mode_yes():
    """Test that is_read_only_mode returns True when environment variable is set to yes."""
    # Arrange - Set READ_ONLY_MODE to yes
    with patch.dict(os.environ, {"READ_ONLY_MODE": "yes"}):
        # Act
        result = is_read_only_mode()

        # Assert
        assert result is True


def test_is_read_only_mode_one():
    """Test that is_read_only_mode returns True when environment variable is set to 1."""
    # Arrange - Set READ_ONLY_MODE to 1
    with patch.dict(os.environ, {"READ_ONLY_MODE": "1"}):
        # Act
        result = is_read_only_mode()

        # Assert
        assert result is True


def test_is_read_only_mode_on():
    """Test that is_read_only_mode returns True when environment variable is set to on."""
    # Arrange - Set READ_ONLY_MODE to on
    with patch.dict(os.environ, {"READ_ONLY_MODE": "on"}):
        # Act
        result = is_read_only_mode()

        # Assert
        assert result is True


def test_is_read_only_mode_uppercase():
    """Test that is_read_only_mode is case-insensitive."""
    # Arrange - Set READ_ONLY_MODE to TRUE (uppercase)
    with patch.dict(os.environ, {"READ_ONLY_MODE": "TRUE"}):
        # Act
        result = is_read_only_mode()

        # Assert
        assert result is True


def test_is_read_only_mode_false():
    """Test that is_read_only_mode returns False when environment variable is set to false."""
    # Arrange - Set READ_ONLY_MODE to false
    with patch.dict(os.environ, {"READ_ONLY_MODE": "false"}):
        # Act
        result = is_read_only_mode()

        # Assert
        assert result is False

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/__init__.py:
--------------------------------------------------------------------------------

```python
"""
Pydantic models for Jira and Confluence API responses.

This package provides type-safe models for working with Atlassian API data,
including conversion methods from API responses to structured models and
simplified dictionaries for API responses.
"""

# Re-export models for easier imports
from .base import ApiModel, TimestampMixin

# Confluence models (Import from the new structure)
from .confluence import (
    ConfluenceAttachment,
    ConfluenceComment,
    ConfluenceLabel,
    ConfluencePage,
    ConfluenceSearchResult,
    ConfluenceSpace,
    ConfluenceUser,
    ConfluenceVersion,
)
from .constants import (  # noqa: F401 - Keep constants available
    CONFLUENCE_DEFAULT_ID,
    CONFLUENCE_DEFAULT_SPACE,
    CONFLUENCE_DEFAULT_VERSION,
    DEFAULT_TIMESTAMP,
    EMPTY_STRING,
    JIRA_DEFAULT_ID,
    JIRA_DEFAULT_ISSUE_TYPE,
    JIRA_DEFAULT_KEY,
    JIRA_DEFAULT_PRIORITY,
    JIRA_DEFAULT_PROJECT,
    JIRA_DEFAULT_STATUS,
    NONE_VALUE,
    UNASSIGNED,
    UNKNOWN,
)

# Jira models (Keep existing imports)
from .jira import (
    JiraAttachment,
    JiraBoard,
    JiraComment,
    JiraIssue,
    JiraIssueType,
    JiraPriority,
    JiraProject,
    JiraResolution,
    JiraSearchResult,
    JiraSprint,
    JiraStatus,
    JiraStatusCategory,
    JiraTimetracking,
    JiraTransition,
    JiraUser,
    JiraWorklog,
)

# Additional models will be added as they are implemented

__all__ = [
    # Base models
    "ApiModel",
    "TimestampMixin",
    # Constants
    "CONFLUENCE_DEFAULT_ID",
    "CONFLUENCE_DEFAULT_SPACE",
    "CONFLUENCE_DEFAULT_VERSION",
    "DEFAULT_TIMESTAMP",
    "EMPTY_STRING",
    "JIRA_DEFAULT_ID",
    "JIRA_DEFAULT_ISSUE_TYPE",
    "JIRA_DEFAULT_KEY",
    "JIRA_DEFAULT_PRIORITY",
    "JIRA_DEFAULT_PROJECT",
    "JIRA_DEFAULT_STATUS",
    "NONE_VALUE",
    "UNASSIGNED",
    "UNKNOWN",
    # Jira models
    "JiraUser",
    "JiraStatus",
    "JiraStatusCategory",
    "JiraIssueType",
    "JiraPriority",
    "JiraComment",
    "JiraIssue",
    "JiraProject",
    "JiraResolution",
    "JiraTransition",
    "JiraWorklog",
    "JiraSearchResult",
    "JiraAttachment",
    "JiraTimetracking",
    "JiraBoard",
    "JiraSprint",
    # Confluence models
    "ConfluenceUser",
    "ConfluenceSpace",
    "ConfluencePage",
    "ConfluenceComment",
    "ConfluenceLabel",
    "ConfluenceVersion",
    "ConfluenceSearchResult",
    "ConfluenceAttachment",
]

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/confluence/utils.py:
--------------------------------------------------------------------------------

```python
"""Utility functions specific to Confluence operations."""

import logging

from .constants import RESERVED_CQL_WORDS

logger = logging.getLogger(__name__)


def quote_cql_identifier_if_needed(identifier: str) -> str:
    """
    Quotes a Confluence identifier for safe use in CQL literals if required.

    Handles:
    - Personal space keys starting with '~'.
    - Identifiers matching reserved CQL words (case-insensitive).
    - Identifiers starting with a number.
    - Escapes internal quotes ('"') and backslashes ('\\') within the identifier
      *before* quoting.

    Args:
        identifier: The identifier string (e.g., space key).

    Returns:
        The identifier, correctly quoted and escaped if necessary,
        otherwise the original identifier.
    """
    needs_quoting = False
    identifier_lower = identifier.lower()

    # Rule 1: Starts with ~ (Personal Space Key)
    if identifier.startswith("~"):
        needs_quoting = True
        logger.debug(f"Identifier '{identifier}' needs quoting (starts with ~).")

    # Rule 2: Is a reserved word (case-insensitive check)
    elif identifier_lower in RESERVED_CQL_WORDS:
        needs_quoting = True
        logger.debug(f"Identifier '{identifier}' needs quoting (reserved word).")

    # Rule 3: Starts with a number
    elif identifier and identifier[0].isdigit():
        needs_quoting = True
        logger.debug(f"Identifier '{identifier}' needs quoting (starts with digit).")

    # Rule 4: Contains internal quotes or backslashes (always needs quoting+escaping)
    elif '"' in identifier or "\\" in identifier:
        needs_quoting = True
        logger.debug(
            f"Identifier '{identifier}' needs quoting (contains quotes/backslashes)."
        )

    # Add more rules here if other characters prove problematic (e.g., spaces, hyphens)
    # elif ' ' in identifier or '-' in identifier:
    #    needs_quoting = True

    if needs_quoting:
        # Escape internal backslashes first, then double quotes
        escaped_identifier = identifier.replace("\\", "\\\\").replace('"', '\\"')
        quoted_escaped = f'"{escaped_identifier}"'
        logger.debug(f"Quoted and escaped identifier: {quoted_escaped}")
        return quoted_escaped
    else:
        # Return the original identifier if no quoting is needed
        logger.debug(f"Identifier '{identifier}' does not need quoting.")
        return identifier

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/confluence/labels.py:
--------------------------------------------------------------------------------

```python
"""Module for Confluence label operations."""

import logging

from ..models.confluence import ConfluenceLabel
from .client import ConfluenceClient

logger = logging.getLogger("mcp-atlassian")


class LabelsMixin(ConfluenceClient):
    """Mixin for Confluence label operations."""

    def get_page_labels(self, page_id: str) -> list[ConfluenceLabel]:
        """
        Get all labels for a specific page.

        Args:
            page_id: The ID of the page to get labels from

        Returns:
            List of ConfluenceLabel models containing label content and metadata

        Raises:
            Exception: If there is an error getting the label
        """
        try:
            # Get labels with expanded content
            labels_response = self.confluence.get_page_labels(page_id=page_id)

            # Process each label
            label_models = []
            for label_data in labels_response.get("results"):
                # Create the model with the processed content
                label_model = ConfluenceLabel.from_api_response(
                    label_data,
                    base_url=self.config.url,
                )

                label_models.append(label_model)

            return label_models

        except Exception as e:
            logger.error(f"Failed fetching labels from page {page_id}: {str(e)}")
            raise Exception(
                f"Failed fetching labels from page {page_id}: {str(e)}"
            ) from e

    def add_page_label(self, page_id: str, name: str) -> list[ConfluenceLabel]:
        """
        Add a label to a Confluence page.

        Args:
            page_id: The ID of the page to update
            name: The name of the label

        Returns:
            Label model containing the updated list of labels

        Raises:
            Exception: If there is an error adding the label
        """
        try:
            logger.debug(f"Adding label with name '{name}' to page {page_id}")

            update_kwargs = {
                "page_id": page_id,
                "label": name,
            }
            response = self.confluence.set_page_label(**update_kwargs)

            # After update, refresh the page data
            return self.get_page_labels(page_id)
        except Exception as e:
            logger.error(f"Error adding label '{name}' to page {page_id}: {str(e)}")
            raise Exception(
                f"Failed to add label '{name}' to page {page_id}: {str(e)}"
            ) from e

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/confluence/comment.py:
--------------------------------------------------------------------------------

```python
"""
Confluence comment models.
This module provides Pydantic models for Confluence page comments.
"""

import logging
from typing import Any

from ..base import ApiModel, TimestampMixin
from ..constants import (
    CONFLUENCE_DEFAULT_ID,
    EMPTY_STRING,
)

# Import other necessary models using relative imports
from .common import ConfluenceUser

logger = logging.getLogger(__name__)


class ConfluenceComment(ApiModel, TimestampMixin):
    """
    Model representing a Confluence comment.
    """

    id: str = CONFLUENCE_DEFAULT_ID
    title: str | None = None
    body: str = EMPTY_STRING
    created: str = EMPTY_STRING
    updated: str = EMPTY_STRING
    author: ConfluenceUser | None = None
    type: str = "comment"  # "comment", "page", etc.

    @classmethod
    def from_api_response(
        cls, data: dict[str, Any], **kwargs: Any
    ) -> "ConfluenceComment":
        """
        Create a ConfluenceComment from a Confluence API response.

        Args:
            data: The comment data from the Confluence API

        Returns:
            A ConfluenceComment instance
        """
        if not data:
            return cls()

        author = None
        if author_data := data.get("author"):
            author = ConfluenceUser.from_api_response(author_data)
        # Try to get author from version.by if direct author is not available
        elif version_data := data.get("version"):
            if by_data := version_data.get("by"):
                author = ConfluenceUser.from_api_response(by_data)

        # For title, try to extract from different locations
        title = data.get("title")
        container = data.get("container")
        if not title and container:
            title = container.get("title")

        return cls(
            id=str(data.get("id", CONFLUENCE_DEFAULT_ID)),
            title=title,
            body=data.get("body", {}).get("view", {}).get("value", EMPTY_STRING),
            created=data.get("created", EMPTY_STRING),
            updated=data.get("updated", EMPTY_STRING),
            author=author,
            type=data.get("type", "comment"),
        )

    def to_simplified_dict(self) -> dict[str, Any]:
        """Convert to simplified dictionary for API response."""
        result = {
            "id": self.id,
            "body": self.body,
            "created": self.format_timestamp(self.created),
            "updated": self.format_timestamp(self.updated),
        }

        if self.title:
            result["title"] = self.title

        if self.author:
            result["author"] = self.author.display_name

        return result

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/jira/comment.py:
--------------------------------------------------------------------------------

```python
"""
Jira comment models.

This module provides Pydantic models for Jira comments.
"""

import logging
from typing import Any

from ..base import ApiModel, TimestampMixin
from ..constants import (
    EMPTY_STRING,
    JIRA_DEFAULT_ID,
)
from .common import JiraUser

logger = logging.getLogger(__name__)


class JiraComment(ApiModel, TimestampMixin):
    """
    Model representing a Jira issue comment.
    """

    id: str = JIRA_DEFAULT_ID
    body: str = EMPTY_STRING
    created: str = EMPTY_STRING
    updated: str = EMPTY_STRING
    author: JiraUser | None = None

    @classmethod
    def from_api_response(cls, data: dict[str, Any], **kwargs: Any) -> "JiraComment":
        """
        Create a JiraComment from a Jira API response.

        Args:
            data: The comment data from the Jira API

        Returns:
            A JiraComment instance
        """
        if not data:
            return cls()

        # Handle non-dictionary data by returning a default instance
        if not isinstance(data, dict):
            logger.debug("Received non-dictionary data, returning default instance")
            return cls()

        # Extract author data
        author = None
        author_data = data.get("author")
        if author_data:
            author = JiraUser.from_api_response(author_data)

        # Ensure ID is a string
        comment_id = data.get("id", JIRA_DEFAULT_ID)
        if comment_id is not None:
            comment_id = str(comment_id)

        # Get the body content
        body_content = EMPTY_STRING
        body = data.get("body")
        if isinstance(body, dict) and "content" in body:
            # Handle Atlassian Document Format (ADF)
            # This is a simplified conversion - a proper implementation would
            # parse the ADF structure
            body_content = str(body.get("content", EMPTY_STRING))
        elif body:
            # Handle plain text or HTML content
            body_content = str(body)

        return cls(
            id=comment_id,
            body=body_content,
            created=str(data.get("created", EMPTY_STRING)),
            updated=str(data.get("updated", EMPTY_STRING)),
            author=author,
        )

    def to_simplified_dict(self) -> dict[str, Any]:
        """Convert to simplified dictionary for API response."""
        result = {
            "body": self.body,
        }

        if self.author:
            result["author"] = self.author.to_simplified_dict()

        if self.created:
            result["created"] = self.created

        if self.updated:
            result["updated"] = self.updated

        return result

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/utils/lifecycle.py:
--------------------------------------------------------------------------------

```python
"""Lifecycle management utilities for graceful shutdown and signal handling."""

import logging
import signal
import sys
import threading
from typing import Any

logger = logging.getLogger("mcp-atlassian.utils.lifecycle")

# Global shutdown event for signal-safe handling
_shutdown_event = threading.Event()


def setup_signal_handlers() -> None:
    """Set up signal handlers for graceful shutdown.

    Registers handlers for SIGTERM, SIGINT, and SIGPIPE (if available) to ensure
    the application shuts down cleanly when receiving termination signals.

    This is particularly important for Docker containers running with the -i flag,
    which need to properly handle shutdown signals from parent processes.
    """

    def signal_handler(signum: int, frame: Any) -> None:
        """Handle shutdown signals gracefully.

        Uses event-based shutdown to avoid signal safety issues.
        Signal handlers should be minimal and avoid complex operations.
        """
        # Only safe operations in signal handlers - set the shutdown event
        _shutdown_event.set()

    # Register signal handlers
    signal.signal(signal.SIGTERM, signal_handler)
    signal.signal(signal.SIGINT, signal_handler)

    # Handle SIGPIPE which occurs when parent process closes the pipe
    try:
        signal.signal(signal.SIGPIPE, signal_handler)
        logger.debug("SIGPIPE handler registered")
    except AttributeError:
        # SIGPIPE may not be available on all platforms (e.g., Windows)
        logger.debug("SIGPIPE not available on this platform")


def ensure_clean_exit() -> None:
    """Ensure all output streams are flushed before exit.

    This is important for containerized environments where output might be
    buffered and could be lost if not properly flushed before exit.

    Handles cases where streams may already be closed by the parent process,
    particularly on Windows or when run as a child process.
    """
    logger.info("Server stopped, flushing output streams...")

    # Safely flush stdout
    try:
        if hasattr(sys.stdout, "closed") and not sys.stdout.closed:
            sys.stdout.flush()
    except (ValueError, OSError, AttributeError) as e:
        # Stream might be closed or redirected
        logger.debug(f"Could not flush stdout: {e}")

    # Safely flush stderr
    try:
        if hasattr(sys.stderr, "closed") and not sys.stderr.closed:
            sys.stderr.flush()
    except (ValueError, OSError, AttributeError) as e:
        # Stream might be closed or redirected
        logger.debug(f"Could not flush stderr: {e}")

    logger.debug("Output streams flushed, exiting gracefully")

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/jira/workflow.py:
--------------------------------------------------------------------------------

```python
"""
Jira workflow models.

This module provides Pydantic models for Jira workflow entities,
such as transitions between statuses.
"""

import logging
from typing import Any

from ..base import ApiModel
from ..constants import (
    EMPTY_STRING,
    JIRA_DEFAULT_ID,
)
from .common import JiraStatus

logger = logging.getLogger(__name__)


class JiraTransition(ApiModel):
    """
    Model representing a Jira issue transition.

    This model contains information about possible status transitions
    for Jira issues, including the target status and related metadata.
    """

    id: str = JIRA_DEFAULT_ID
    name: str = EMPTY_STRING
    to_status: JiraStatus | None = None
    has_screen: bool = False
    is_global: bool = False
    is_initial: bool = False
    is_conditional: bool = False

    @classmethod
    def from_api_response(cls, data: dict[str, Any], **kwargs: Any) -> "JiraTransition":
        """
        Create a JiraTransition from a Jira API response.

        Args:
            data: The transition data from the Jira API

        Returns:
            A JiraTransition instance
        """
        if not data:
            return cls()

        # Handle non-dictionary data by returning a default instance
        if not isinstance(data, dict):
            logger.debug("Received non-dictionary data, returning default instance")
            return cls()

        # Extract to_status data if available
        to_status = None
        if to := data.get("to"):
            if isinstance(to, dict):
                to_status = JiraStatus.from_api_response(to)

        # Ensure ID is a string
        transition_id = data.get("id", JIRA_DEFAULT_ID)
        if transition_id is not None:
            transition_id = str(transition_id)

        # Extract boolean flags with type safety
        has_screen = bool(data.get("hasScreen", False))
        is_global = bool(data.get("isGlobal", False))
        is_initial = bool(data.get("isInitial", False))
        is_conditional = bool(data.get("isConditional", False))

        return cls(
            id=transition_id,
            name=str(data.get("name", EMPTY_STRING)),
            to_status=to_status,
            has_screen=has_screen,
            is_global=is_global,
            is_initial=is_initial,
            is_conditional=is_conditional,
        )

    def to_simplified_dict(self) -> dict[str, Any]:
        """Convert to simplified dictionary for API response."""
        result = {
            "id": self.id,
            "name": self.name,
        }

        if self.to_status:
            result["to_status"] = self.to_status.to_simplified_dict()

        return result

```

--------------------------------------------------------------------------------
/tests/unit/utils/test_tools.py:
--------------------------------------------------------------------------------

```python
"""Tests for tool utility functions."""

import os
from unittest.mock import patch

from mcp_atlassian.utils.tools import get_enabled_tools, should_include_tool


def test_get_enabled_tools_not_set():
    """Test get_enabled_tools when ENABLED_TOOLS is not set."""
    with patch.dict(os.environ, {}, clear=True):
        assert get_enabled_tools() is None


def test_get_enabled_tools_empty_string():
    """Test get_enabled_tools with empty string."""
    with patch.dict(os.environ, {"ENABLED_TOOLS": ""}, clear=True):
        assert get_enabled_tools() is None


def test_get_enabled_tools_only_whitespace():
    """Test get_enabled_tools with string containing only whitespace."""
    with patch.dict(os.environ, {"ENABLED_TOOLS": "   "}, clear=True):
        assert get_enabled_tools() is None


def test_get_enabled_tools_only_commas():
    """Test get_enabled_tools with string containing only commas."""
    with patch.dict(os.environ, {"ENABLED_TOOLS": ",,,,"}, clear=True):
        assert get_enabled_tools() is None


def test_get_enabled_tools_whitespace_and_commas():
    """Test get_enabled_tools with string containing whitespace and commas."""
    with patch.dict(os.environ, {"ENABLED_TOOLS": " , , , "}, clear=True):
        assert get_enabled_tools() is None


def test_get_enabled_tools_single_tool():
    """Test get_enabled_tools with a single tool."""
    with patch.dict(os.environ, {"ENABLED_TOOLS": "tool1"}, clear=True):
        assert get_enabled_tools() == ["tool1"]


def test_get_enabled_tools_multiple_tools():
    """Test get_enabled_tools with multiple tools."""
    with patch.dict(os.environ, {"ENABLED_TOOLS": "tool1,tool2,tool3"}, clear=True):
        assert get_enabled_tools() == ["tool1", "tool2", "tool3"]


def test_get_enabled_tools_with_whitespace():
    """Test get_enabled_tools with whitespace around tool names."""
    with patch.dict(
        os.environ, {"ENABLED_TOOLS": " tool1 , tool2 , tool3 "}, clear=True
    ):
        assert get_enabled_tools() == ["tool1", "tool2", "tool3"]


def test_should_include_tool_none_enabled():
    """Test should_include_tool when enabled_tools is None."""
    assert should_include_tool("any_tool", None) is True


def test_should_include_tool_tool_enabled():
    """Test should_include_tool when tool is in enabled list."""
    enabled_tools = ["tool1", "tool2", "tool3"]
    assert should_include_tool("tool2", enabled_tools) is True


def test_should_include_tool_tool_not_enabled():
    """Test should_include_tool when tool is not in enabled list."""
    enabled_tools = ["tool1", "tool2", "tool3"]
    assert should_include_tool("tool4", enabled_tools) is False

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/jira/boards.py:
--------------------------------------------------------------------------------

```python
"""Module for Jira boards operations."""

import logging
from typing import Any

import requests

from ..models.jira import JiraBoard
from .client import JiraClient

logger = logging.getLogger("mcp-jira")


class BoardsMixin(JiraClient):
    """Mixin for Jira boards operations."""

    def get_all_agile_boards(
        self,
        board_name: str | None = None,
        project_key: str | None = None,
        board_type: str | None = None,
        start: int = 0,
        limit: int = 50,
    ) -> list[dict[str, Any]]:
        """
        Get boards from Jira by name, project key, or type.

        Args:
            board_name: The name of board, support fuzzy search
            project_key: Project key (e.g., PROJECT-123)
            board_type: Board type (e.g., scrum, kanban)
            start: Start index
            limit: Maximum number of boards to return

        Returns:
            List of board information

        Raises:
            Exception: If there is an error retrieving the boards
        """
        try:
            boards = self.jira.get_all_agile_boards(
                board_name=board_name,
                project_key=project_key,
                board_type=board_type,
                start=start,
                limit=limit,
            )
            return boards.get("values", []) if isinstance(boards, dict) else []
        except requests.HTTPError as e:
            logger.error(f"Error getting all agile boards: {str(e.response.content)}")
            return []
        except Exception as e:
            logger.error(f"Error getting all agile boards: {str(e)}")
            return []

    def get_all_agile_boards_model(
        self,
        board_name: str | None = None,
        project_key: str | None = None,
        board_type: str | None = None,
        start: int = 0,
        limit: int = 50,
    ) -> list[JiraBoard]:
        """
        Get boards as JiraBoards model from Jira by name, project key, or type.

        Args:
            board_name: The name of board, support fuzzy search
            project_key: Project key (e.g., PROJECT-123)
            board_type: Board type (e.g., scrum, kanban)
            start: Start index
            limit: Maximum number of boards to return

        Returns:
            List of JiraBoards model with board information

        Raises:
            Exception: If there is an error retrieving the boards
        """
        boards = self.get_all_agile_boards(
            board_name=board_name,
            project_key=project_key,
            board_type=board_type,
            start=start,
            limit=limit,
        )
        return [JiraBoard.from_api_response(board) for board in boards]

```

--------------------------------------------------------------------------------
/tests/unit/confluence/test_constants.py:
--------------------------------------------------------------------------------

```python
"""Tests for Confluence constants.

Focused tests for Confluence constants, validating correct values and business logic.
"""

from mcp_atlassian.confluence.constants import RESERVED_CQL_WORDS


class TestReservedCqlWords:
    """Test suite for RESERVED_CQL_WORDS constant."""

    def test_type_and_structure(self):
        """Test that RESERVED_CQL_WORDS is a set of strings."""
        assert isinstance(RESERVED_CQL_WORDS, set)
        assert all(isinstance(word, str) for word in RESERVED_CQL_WORDS)
        assert len(RESERVED_CQL_WORDS) == 41

    def test_contains_expected_cql_words(self):
        """Test that RESERVED_CQL_WORDS contains the correct CQL reserved words."""
        expected_words = {
            "after",
            "and",
            "as",
            "avg",
            "before",
            "begin",
            "by",
            "commit",
            "contains",
            "count",
            "distinct",
            "else",
            "empty",
            "end",
            "explain",
            "from",
            "having",
            "if",
            "in",
            "inner",
            "insert",
            "into",
            "is",
            "isnull",
            "left",
            "like",
            "limit",
            "max",
            "min",
            "not",
            "null",
            "or",
            "order",
            "outer",
            "right",
            "select",
            "sum",
            "then",
            "was",
            "where",
            "update",
        }
        assert RESERVED_CQL_WORDS == expected_words

    def test_sql_keywords_coverage(self):
        """Test that common SQL keywords are included."""
        sql_keywords = {
            "select",
            "from",
            "where",
            "and",
            "or",
            "not",
            "in",
            "like",
            "is",
            "null",
            "order",
            "by",
            "having",
            "count",
        }
        assert sql_keywords.issubset(RESERVED_CQL_WORDS)

    def test_cql_specific_keywords(self):
        """Test that CQL-specific keywords are included."""
        cql_specific = {"contains", "after", "before", "was", "empty"}
        assert cql_specific.issubset(RESERVED_CQL_WORDS)

    def test_word_format_validity(self):
        """Test that reserved words are valid for CQL usage."""
        for word in RESERVED_CQL_WORDS:
            # Words should be non-empty, lowercase, alphabetic only
            assert word and word.islower() and word.isalpha()
            assert len(word) >= 2  # Shortest valid words like "as", "by"
            assert " " not in word and "\t" not in word

```

--------------------------------------------------------------------------------
/tests/unit/confluence/test_utils.py:
--------------------------------------------------------------------------------

```python
"""Tests for the Confluence utility functions."""

from mcp_atlassian.confluence.constants import RESERVED_CQL_WORDS
from mcp_atlassian.confluence.utils import quote_cql_identifier_if_needed


class TestCQLQuoting:
    """Tests for CQL quoting utility functions."""

    def test_quote_personal_space_key(self):
        """Test quoting of personal space keys."""
        # Personal space keys starting with ~ should be quoted
        assert quote_cql_identifier_if_needed("~username") == '"~username"'
        assert quote_cql_identifier_if_needed("~admin") == '"~admin"'
        assert quote_cql_identifier_if_needed("~user.name") == '"~user.name"'

    def test_quote_reserved_words(self):
        """Test quoting of reserved CQL words."""
        # Reserved words should be quoted (case-insensitive)
        for word in list(RESERVED_CQL_WORDS)[:5]:  # Test a subset for brevity
            assert quote_cql_identifier_if_needed(word) == f'"{word}"'
            assert quote_cql_identifier_if_needed(word.upper()) == f'"{word.upper()}"'
            assert (
                quote_cql_identifier_if_needed(word.capitalize())
                == f'"{word.capitalize()}"'
            )

    def test_quote_numeric_keys(self):
        """Test quoting of keys starting with numbers."""
        # Keys starting with numbers should be quoted
        assert quote_cql_identifier_if_needed("123space") == '"123space"'
        assert quote_cql_identifier_if_needed("42") == '"42"'
        assert quote_cql_identifier_if_needed("1test") == '"1test"'

    def test_quote_special_characters(self):
        """Test quoting and escaping of identifiers with special characters."""
        # Keys with quotes or backslashes should be quoted and escaped
        assert quote_cql_identifier_if_needed('my"space') == '"my\\"space"'
        assert quote_cql_identifier_if_needed("test\\space") == '"test\\\\space"'

        # Test combined quotes and backslashes
        input_str = 'quote"and\\slash'
        result = quote_cql_identifier_if_needed(input_str)
        assert result == '"quote\\"and\\\\slash"'

        # Verify the result by checking individual characters
        assert result[0] == '"'  # opening quote
        assert result[-1] == '"'  # closing quote
        assert "\\\\" in result  # escaped backslash
        assert '\\"' in result  # escaped quote

    def test_no_quote_regular_keys(self):
        """Test that regular keys are not quoted."""
        # Regular space keys should not be quoted
        assert quote_cql_identifier_if_needed("DEV") == "DEV"
        assert quote_cql_identifier_if_needed("MYSPACE") == "MYSPACE"
        assert quote_cql_identifier_if_needed("documentation") == "documentation"

```

--------------------------------------------------------------------------------
/tests/unit/utils/test_masking.py:
--------------------------------------------------------------------------------

```python
"""Test the masking utility functions."""

import logging
from unittest.mock import patch

from mcp_atlassian.utils.logging import log_config_param, mask_sensitive


class TestMaskSensitive:
    """Test the _mask_sensitive function."""

    def test_none_value(self):
        """Test masking None value."""
        assert mask_sensitive(None) == "Not Provided"

    def test_short_value(self):
        """Test masking short value."""
        assert mask_sensitive("abc") == "***"
        assert mask_sensitive("abcdef") == "******"
        assert mask_sensitive("abcdefgh", keep_chars=4) == "********"

    def test_normal_value(self):
        """Test masking normal value."""
        assert mask_sensitive("abcdefghijkl", keep_chars=2) == "ab********kl"
        assert mask_sensitive("abcdefghijkl") == "abcd****ijkl"
        assert (
            mask_sensitive("abcdefghijklmnopqrstuvwxyz", keep_chars=5)
            == "abcde****************vwxyz"
        )


class TestLogConfigParam:
    """Test the _log_config_param function."""

    @patch("mcp_atlassian.utils.logging.logging.Logger")
    def test_normal_param(self, mock_logger):
        """Test logging normal parameter."""
        log_config_param(mock_logger, "Jira", "URL", "https://jira.example.com")
        mock_logger.info.assert_called_once_with("Jira URL: https://jira.example.com")

    @patch("mcp_atlassian.utils.logging.logging.Logger")
    def test_none_param(self, mock_logger):
        """Test logging None parameter."""
        log_config_param(mock_logger, "Jira", "Projects Filter", None)
        mock_logger.info.assert_called_once_with("Jira Projects Filter: Not Provided")

    @patch("mcp_atlassian.utils.logging.logging.Logger")
    def test_sensitive_param(self, mock_logger):
        """Test logging sensitive parameter."""
        log_config_param(
            mock_logger, "Jira", "API Token", "abcdefghijklmnop", sensitive=True
        )
        mock_logger.info.assert_called_once_with("Jira API Token: abcd********mnop")

    def test_log_config_param_masks_proxy_url(self, caplog):
        """Test that log_config_param masks credentials in proxy URLs when sensitive=True."""
        logger = logging.getLogger("test-proxy-logger")
        proxy_url = "socks5://user:[email protected]:1080"
        with caplog.at_level(logging.INFO, logger="test-proxy-logger"):
            log_config_param(logger, "Jira", "SOCKS_PROXY", proxy_url, sensitive=True)
        # Should mask the middle part of the URL, not show user:pass
        assert any(
            rec.message.startswith("Jira SOCKS_PROXY: sock")
            and rec.message.endswith("1080")
            and "user:pass" not in rec.message
            for rec in caplog.records
        )

```

--------------------------------------------------------------------------------
/.github/workflows/docker-publish.yml:
--------------------------------------------------------------------------------

```yaml
name: Docker Publish

on:
  push:
    tags:
      - 'v*.*.*' # Trigger on version tag pushes (e.g., v0.7.1)
  pull_request:
    branches:
      - main     # Trigger on PRs targeting main branch
  workflow_dispatch: {} # Allow manual triggering from the Actions tab

env:
  REGISTRY: ghcr.io
  IMAGE_NAME: ${{ github.repository }}

jobs:
  build-and-push:
    runs-on: ubuntu-latest
    permissions:
      contents: read  # Needed for checkout
      packages: write # Needed to push packages to ghcr.io

    steps:
      - name: Checkout repository
        uses: actions/checkout@v4

      - name: Set up QEMU
        uses: docker/setup-qemu-action@v3
        with:
          platforms: arm64,amd64

      - name: Set up Docker Buildx
        uses: docker/setup-buildx-action@v3

      # Extract metadata (tags, labels) for Docker
      - name: Extract Docker metadata
        id: meta
        uses: docker/metadata-action@v5
        with:
          images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
          tags: |
            # Create 'main' tag on push to main branch
            type=ref,event=branch,branch=main
            # Create 'X.Y.Z', 'X.Y', 'X' tags on tag push (e.g., v1.2.3 -> 1.2.3, 1.2, 1)
            type=semver,pattern={{version}}
            type=semver,pattern={{major}}.{{minor}}
            type=semver,pattern={{major}}
            # Only create 'latest' tag if it's a tag push AND it's a non-prerelease version tag (no '-')
            type=raw,value=latest,enable=${{ github.ref_type == 'tag' && startsWith(github.ref, 'refs/tags/v') && !contains(github.ref, '-') }}
            # Create a tag for PRs (e.g., pr-123) - useful for testing PR builds
            type=ref,event=pr
            # For manual workflow runs, use branch name with 'manual' suffix
            type=raw,value={{branch}}-manual,enable=${{ github.event_name == 'workflow_dispatch' }}

      - name: Log in to GitHub Container Registry
        uses: docker/login-action@v3
        # Only login for pushes and manual runs, not for PRs
        if: github.event_name != 'pull_request'
        with:
          registry: ${{ env.REGISTRY }}
          username: ${{ github.actor }}
          password: ${{ secrets.GITHUB_TOKEN }}

      - name: Build and push Docker image
        id: docker_build
        uses: docker/build-push-action@v5
        with:
          context: .
          platforms: linux/amd64,linux/arm64
          # Only push for non-PR events
          push: ${{ github.event_name != 'pull_request' }}
          # Use tags and labels from metadata action
          tags: ${{ steps.meta.outputs.tags }}
          labels: ${{ steps.meta.outputs.labels }}
          # Enable Docker layer caching
          cache-from: type=gha
          cache-to: type=gha,mode=max

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/utils/env.py:
--------------------------------------------------------------------------------

```python
"""Environment variable utility functions for MCP Atlassian."""

import os


def is_env_truthy(env_var_name: str, default: str = "") -> bool:
    """Check if environment variable is set to a standard truthy value.

    Considers 'true', '1', 'yes' as truthy values (case-insensitive).
    Used for most MCP environment variables.

    Args:
        env_var_name: Name of the environment variable to check
        default: Default value if environment variable is not set

    Returns:
        True if the environment variable is set to a truthy value, False otherwise
    """
    return os.getenv(env_var_name, default).lower() in ("true", "1", "yes")


def is_env_extended_truthy(env_var_name: str, default: str = "") -> bool:
    """Check if environment variable is set to an extended truthy value.

    Considers 'true', '1', 'yes', 'y', 'on' as truthy values (case-insensitive).
    Used for READ_ONLY_MODE and similar flags.

    Args:
        env_var_name: Name of the environment variable to check
        default: Default value if environment variable is not set

    Returns:
        True if the environment variable is set to a truthy value, False otherwise
    """
    return os.getenv(env_var_name, default).lower() in ("true", "1", "yes", "y", "on")


def is_env_ssl_verify(env_var_name: str, default: str = "true") -> bool:
    """Check SSL verification setting with secure defaults.

    Defaults to true unless explicitly set to false values.
    Used for SSL_VERIFY environment variables.

    Args:
        env_var_name: Name of the environment variable to check
        default: Default value if environment variable is not set

    Returns:
        True unless explicitly set to false values
    """
    return os.getenv(env_var_name, default).lower() not in ("false", "0", "no")


def get_custom_headers(env_var_name: str) -> dict[str, str]:
    """Parse custom headers from environment variable containing comma-separated key=value pairs.

    Args:
        env_var_name: Name of the environment variable to read

    Returns:
        Dictionary of parsed headers

    Examples:
        >>> # With CUSTOM_HEADERS="X-Custom=value1,X-Other=value2"
        >>> parse_custom_headers("CUSTOM_HEADERS")
        {'X-Custom': 'value1', 'X-Other': 'value2'}
        >>> # With unset environment variable
        >>> parse_custom_headers("UNSET_VAR")
        {}
    """
    header_string = os.getenv(env_var_name)
    if not header_string or not header_string.strip():
        return {}

    headers = {}
    pairs = header_string.split(",")

    for pair in pairs:
        pair = pair.strip()
        if not pair:
            continue

        if "=" not in pair:
            continue

        key, value = pair.split("=", 1)  # Split on first = only
        key = key.strip()
        value = value.strip()

        if key:  # Only add if key is not empty
            headers[key] = value

    return headers

```

--------------------------------------------------------------------------------
/tests/unit/jira/test_boards.py:
--------------------------------------------------------------------------------

```python
"""Tests for the Jira BoardMixin"""

from unittest.mock import MagicMock

import pytest
import requests

from mcp_atlassian.jira import JiraConfig
from mcp_atlassian.jira.boards import BoardsMixin
from mcp_atlassian.models.jira import JiraBoard


@pytest.fixture
def mock_config():
    """Fixture to create a mock JiraConfig instance."""
    config = MagicMock(spec=JiraConfig)
    config.url = "https://test.atlassian.net"
    config.username = "[email protected]"
    config.api_token = "test-token"
    config.auth_type = "pat"
    return config


@pytest.fixture
def boards_mixin(mock_config):
    """Fixture to create a BoardsMixin instance for testing."""
    mixin = BoardsMixin(config=mock_config)
    mixin.jira = MagicMock()

    return mixin


@pytest.fixture
def mock_boards():
    """Fixture to return mock boards data."""
    return {
        "maxResults": 2,
        "startAt": 0,
        "total": 2,
        "isLast": True,
        "values": [
            {
                "id": 1000,
                "self": "https://test.atlassian.net/rest/agile/1.0/board/1000",
                "name": " Board One",
                "type": "scrum",
            },
            {
                "id": 1001,
                "self": "https://test.atlassian.net/rest/agile/1.0/board/1001",
                "name": " Board Two",
                "type": "kanban",
            },
        ],
    }


def test_get_all_agile_boards(boards_mixin, mock_boards):
    """Test get_all_agile_boards method."""
    boards_mixin.jira.get_all_agile_boards.return_value = mock_boards

    result = boards_mixin.get_all_agile_boards()
    assert result == mock_boards["values"]


def test_get_all_agile_boards_exception(boards_mixin):
    """Test get_all_agile_boards method with exception."""
    boards_mixin.jira.get_all_agile_boards.side_effect = Exception("API Error")

    result = boards_mixin.get_all_agile_boards()
    assert result == []
    boards_mixin.jira.get_all_agile_boards.assert_called_once()


def test_get_all_agile_boards_http_error(boards_mixin):
    """Test get_all_agile_boards method with HTTPError."""
    boards_mixin.jira.get_all_agile_boards.side_effect = requests.HTTPError(
        response=MagicMock(content="API Error content")
    )

    result = boards_mixin.get_all_agile_boards()
    assert result == []
    boards_mixin.jira.get_all_agile_boards.assert_called_once()


def test_get_all_agile_boards_non_dict_response(boards_mixin):
    """Test get_all_agile_boards method with non-list response."""
    boards_mixin.jira.get_all_agile_boards.return_value = "not a dict"

    result = boards_mixin.get_all_agile_boards()
    assert result == []
    boards_mixin.jira.get_all_agile_boards.assert_called_once()


def test_get_all_agile_boards_model(boards_mixin, mock_boards):
    boards_mixin.jira.get_all_agile_boards.return_value = mock_boards

    result = boards_mixin.get_all_agile_boards_model()
    assert result == [
        JiraBoard.from_api_response(value) for value in mock_boards["values"]
    ]

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/jira/search.py:
--------------------------------------------------------------------------------

```python
"""
Jira search result models.

This module provides Pydantic models for Jira search (JQL) results.
"""

import logging
from typing import Any

from pydantic import Field, model_validator

from ..base import ApiModel
from .issue import JiraIssue

logger = logging.getLogger(__name__)


class JiraSearchResult(ApiModel):
    """
    Model representing a Jira search (JQL) result.
    """

    total: int = 0
    start_at: int = 0
    max_results: int = 0
    issues: list[JiraIssue] = Field(default_factory=list)

    @classmethod
    def from_api_response(
        cls, data: dict[str, Any], **kwargs: Any
    ) -> "JiraSearchResult":
        """
        Create a JiraSearchResult from a Jira API response.

        Args:
            data: The search result data from the Jira API
            **kwargs: Additional arguments to pass to the constructor

        Returns:
            A JiraSearchResult instance
        """
        if not data:
            return cls()

        if not isinstance(data, dict):
            logger.debug("Received non-dictionary data, returning default instance")
            return cls()

        issues = []
        issues_data = data.get("issues", [])
        if isinstance(issues_data, list):
            for issue_data in issues_data:
                if issue_data:
                    requested_fields = kwargs.get("requested_fields")
                    issues.append(
                        JiraIssue.from_api_response(
                            issue_data, requested_fields=requested_fields
                        )
                    )

        raw_total = data.get("total")
        raw_start_at = data.get("startAt")
        raw_max_results = data.get("maxResults")

        try:
            total = int(raw_total) if raw_total is not None else -1
        except (ValueError, TypeError):
            total = -1

        try:
            start_at = int(raw_start_at) if raw_start_at is not None else -1
        except (ValueError, TypeError):
            start_at = -1

        try:
            max_results = int(raw_max_results) if raw_max_results is not None else -1
        except (ValueError, TypeError):
            max_results = -1

        return cls(
            total=total,
            start_at=start_at,
            max_results=max_results,
            issues=issues,
        )

    @model_validator(mode="after")
    def validate_search_result(self) -> "JiraSearchResult":
        """
        Validate the search result.

        This validator ensures that pagination values are sensible and
        consistent with the number of issues returned.

        Returns:
            The validated JiraSearchResult instance
        """
        return self

    def to_simplified_dict(self) -> dict[str, Any]:
        """Convert to simplified dictionary for API response."""
        return {
            "total": self.total,
            "start_at": self.start_at,
            "max_results": self.max_results,
            "issues": [issue.to_simplified_dict() for issue in self.issues],
        }

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/preprocessing/confluence.py:
--------------------------------------------------------------------------------

```python
"""Confluence-specific text preprocessing module."""

import logging
import shutil
import tempfile
from pathlib import Path

from md2conf.converter import (
    ConfluenceConverterOptions,
    ConfluenceStorageFormatConverter,
    elements_from_string,
    elements_to_string,
    markdown_to_html,
)

from .base import BasePreprocessor

logger = logging.getLogger("mcp-atlassian")


class ConfluencePreprocessor(BasePreprocessor):
    """Handles text preprocessing for Confluence content."""

    def __init__(self, base_url: str) -> None:
        """
        Initialize the Confluence text preprocessor.

        Args:
            base_url: Base URL for Confluence API
        """
        super().__init__(base_url=base_url)

    def markdown_to_confluence_storage(
        self, markdown_content: str, *, enable_heading_anchors: bool = False
    ) -> str:
        """
        Convert Markdown content to Confluence storage format (XHTML)

        Args:
            markdown_content: Markdown text to convert
            enable_heading_anchors: Whether to enable automatic heading anchor generation (default: False)

        Returns:
            Confluence storage format (XHTML) string
        """
        try:
            # First convert markdown to HTML
            html_content = markdown_to_html(markdown_content)

            # Create a temporary directory for any potential attachments
            temp_dir = tempfile.mkdtemp()

            try:
                # Parse the HTML into an element tree
                root = elements_from_string(html_content)

                # Create converter options
                options = ConfluenceConverterOptions(
                    ignore_invalid_url=True,
                    heading_anchors=enable_heading_anchors,
                    render_mermaid=False,
                )

                # Create a converter
                converter = ConfluenceStorageFormatConverter(
                    options=options,
                    path=Path(temp_dir) / "temp.md",
                    root_dir=Path(temp_dir),
                    page_metadata={},
                )

                # Transform the HTML to Confluence storage format
                converter.visit(root)

                # Convert the element tree back to a string
                storage_format = elements_to_string(root)

                return str(storage_format)
            finally:
                # Clean up the temporary directory
                shutil.rmtree(temp_dir, ignore_errors=True)

        except Exception as e:
            logger.error(f"Error converting markdown to Confluence storage format: {e}")
            logger.exception(e)

            # Fall back to a simpler method if the conversion fails
            html_content = markdown_to_html(markdown_content)

            # Use a different approach that doesn't rely on the HTML macro
            # This creates a proper Confluence storage format document
            storage_format = f"""<p>{html_content}</p>"""

            return str(storage_format)

    # Confluence-specific methods can be added here

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/jira/project.py:
--------------------------------------------------------------------------------

```python
"""
Jira project models.

This module provides Pydantic models for Jira projects.
"""

import logging
from typing import Any

from ..base import ApiModel
from ..constants import (
    EMPTY_STRING,
    JIRA_DEFAULT_PROJECT,
    UNKNOWN,
)
from .common import JiraUser

logger = logging.getLogger(__name__)


class JiraProject(ApiModel):
    """
    Model representing a Jira project.

    This model contains the basic information about a Jira project,
    including its key, name, and category.
    """

    id: str = JIRA_DEFAULT_PROJECT
    key: str = EMPTY_STRING
    name: str = UNKNOWN
    description: str | None = None
    lead: JiraUser | None = None
    url: str | None = None
    category_name: str | None = None
    avatar_url: str | None = None

    @classmethod
    def from_api_response(cls, data: dict[str, Any], **kwargs: Any) -> "JiraProject":
        """
        Create a JiraProject from a Jira API response.

        Args:
            data: The project data from the Jira API

        Returns:
            A JiraProject instance
        """
        if not data:
            return cls()

        # Handle non-dictionary data by returning a default instance
        if not isinstance(data, dict):
            logger.debug("Received non-dictionary data, returning default instance")
            return cls()

        # Extract lead data if available
        lead = None
        lead_data = data.get("lead")
        if lead_data:
            lead = JiraUser.from_api_response(lead_data)

        # Get avatar URL from avatarUrls if available
        avatar_url = None
        if avatars := data.get("avatarUrls"):
            if isinstance(avatars, dict):
                # Get the largest available avatar (48x48)
                avatar_url = avatars.get("48x48")

        # Get project category name if available
        category_name = None
        if project_category := data.get("projectCategory"):
            if isinstance(project_category, dict):
                category_name = project_category.get("name")

        # Ensure ID is a string
        project_id = data.get("id", JIRA_DEFAULT_PROJECT)
        if project_id is not None:
            project_id = str(project_id)

        return cls(
            id=project_id,
            key=str(data.get("key", EMPTY_STRING)),
            name=str(data.get("name", UNKNOWN)),
            description=data.get("description"),
            lead=lead,
            url=data.get("self"),  # API URL for the project
            category_name=category_name,
            avatar_url=avatar_url,
        )

    def to_simplified_dict(self) -> dict[str, Any]:
        """Convert to simplified dictionary for API response."""
        result = {
            "key": self.key,
            "name": self.name,
        }

        if self.description:
            result["description"] = self.description

        if self.category_name:
            result["category"] = self.category_name

        if self.avatar_url:
            result["avatar_url"] = self.avatar_url

        if self.lead:
            result["lead"] = self.lead.to_simplified_dict()

        return result

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/jira/worklog.py:
--------------------------------------------------------------------------------

```python
"""
Jira worklog models.

This module provides Pydantic models for Jira worklogs (time tracking entries).
"""

import logging
from typing import Any

from ..base import ApiModel, TimestampMixin
from ..constants import (
    EMPTY_STRING,
    JIRA_DEFAULT_ID,
)
from .common import JiraUser

logger = logging.getLogger(__name__)


class JiraWorklog(ApiModel, TimestampMixin):
    """
    Model representing a Jira worklog entry.

    This model contains information about time spent on an issue,
    including the author, time spent, and related metadata.
    """

    id: str = JIRA_DEFAULT_ID
    author: JiraUser | None = None
    comment: str | None = None
    created: str = EMPTY_STRING
    updated: str = EMPTY_STRING
    started: str = EMPTY_STRING
    time_spent: str = EMPTY_STRING
    time_spent_seconds: int = 0

    @classmethod
    def from_api_response(cls, data: dict[str, Any], **kwargs: Any) -> "JiraWorklog":
        """
        Create a JiraWorklog from a Jira API response.

        Args:
            data: The worklog data from the Jira API

        Returns:
            A JiraWorklog instance
        """
        if not data:
            return cls()

        # Handle non-dictionary data by returning a default instance
        if not isinstance(data, dict):
            logger.debug("Received non-dictionary data, returning default instance")
            return cls()

        # Extract author data
        author = None
        author_data = data.get("author")
        if author_data:
            author = JiraUser.from_api_response(author_data)

        # Ensure ID is a string
        worklog_id = data.get("id", JIRA_DEFAULT_ID)
        if worklog_id is not None:
            worklog_id = str(worklog_id)

        # Parse time spent seconds with type safety
        time_spent_seconds = data.get("timeSpentSeconds", 0)
        try:
            time_spent_seconds = (
                int(time_spent_seconds) if time_spent_seconds is not None else 0
            )
        except (ValueError, TypeError):
            time_spent_seconds = 0

        return cls(
            id=worklog_id,
            author=author,
            comment=data.get("comment"),
            created=str(data.get("created", EMPTY_STRING)),
            updated=str(data.get("updated", EMPTY_STRING)),
            started=str(data.get("started", EMPTY_STRING)),
            time_spent=str(data.get("timeSpent", EMPTY_STRING)),
            time_spent_seconds=time_spent_seconds,
        )

    def to_simplified_dict(self) -> dict[str, Any]:
        """Convert to simplified dictionary for API response."""
        result = {
            "time_spent": self.time_spent,
            "time_spent_seconds": self.time_spent_seconds,
        }

        if self.author:
            result["author"] = self.author.to_simplified_dict()

        if self.comment:
            result["comment"] = self.comment

        if self.started:
            result["started"] = self.started

        if self.created:
            result["created"] = self.created

        if self.updated:
            result["updated"] = self.updated

        return result

```

--------------------------------------------------------------------------------
/tests/unit/utils/test_urls.py:
--------------------------------------------------------------------------------

```python
"""Tests for the URL utilities module."""

from mcp_atlassian.utils.urls import is_atlassian_cloud_url


def test_is_atlassian_cloud_url_empty():
    """Test that is_atlassian_cloud_url returns False for empty URL."""
    assert is_atlassian_cloud_url("") is False
    assert is_atlassian_cloud_url(None) is False


def test_is_atlassian_cloud_url_cloud():
    """Test that is_atlassian_cloud_url returns True for Atlassian Cloud URLs."""
    # Test standard Atlassian Cloud URLs
    assert is_atlassian_cloud_url("https://example.atlassian.net") is True
    assert is_atlassian_cloud_url("https://company.atlassian.net/wiki") is True
    assert is_atlassian_cloud_url("https://subdomain.atlassian.net/jira") is True
    assert is_atlassian_cloud_url("http://other.atlassian.net") is True

    # Test Jira Cloud specific domains
    assert is_atlassian_cloud_url("https://company.jira.com") is True
    assert is_atlassian_cloud_url("https://team.jira-dev.com") is True


def test_is_atlassian_cloud_url_multi_cloud_oauth():
    """Test that is_atlassian_cloud_url returns True for Multi-Cloud OAuth URLs."""
    # Test api.atlassian.com URLs used by Multi-Cloud OAuth
    assert (
        is_atlassian_cloud_url("https://api.atlassian.com/ex/jira/abc123/rest/api/2/")
        is True
    )
    assert (
        is_atlassian_cloud_url("https://api.atlassian.com/ex/confluence/xyz789/")
        is True
    )
    assert is_atlassian_cloud_url("http://api.atlassian.com/ex/jira/test/") is True
    assert is_atlassian_cloud_url("https://api.atlassian.com") is True


def test_is_atlassian_cloud_url_server():
    """Test that is_atlassian_cloud_url returns False for Atlassian Server/Data Center URLs."""
    # Test with various server/data center domains
    assert is_atlassian_cloud_url("https://jira.example.com") is False
    assert is_atlassian_cloud_url("https://confluence.company.org") is False
    assert is_atlassian_cloud_url("https://jira.internal") is False


def test_is_atlassian_cloud_url_localhost():
    """Test that is_atlassian_cloud_url returns False for localhost URLs."""
    # Test with localhost
    assert is_atlassian_cloud_url("http://localhost") is False
    assert is_atlassian_cloud_url("http://localhost:8080") is False
    assert is_atlassian_cloud_url("https://localhost/jira") is False


def test_is_atlassian_cloud_url_ip_addresses():
    """Test that is_atlassian_cloud_url returns False for IP-based URLs."""
    # Test with IP addresses
    assert is_atlassian_cloud_url("http://127.0.0.1") is False
    assert is_atlassian_cloud_url("http://127.0.0.1:8080") is False
    assert is_atlassian_cloud_url("https://192.168.1.100") is False
    assert is_atlassian_cloud_url("https://10.0.0.1") is False
    assert is_atlassian_cloud_url("https://172.16.0.1") is False
    assert is_atlassian_cloud_url("https://172.31.255.254") is False


def test_is_atlassian_cloud_url_with_protocols():
    """Test that is_atlassian_cloud_url works with different protocols."""
    # Test with different protocols
    assert is_atlassian_cloud_url("https://example.atlassian.net") is True
    assert is_atlassian_cloud_url("http://example.atlassian.net") is True
    assert (
        is_atlassian_cloud_url("ftp://example.atlassian.net") is True
    )  # URL parsing still works

```

--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------

```toml
[project]
name = "mcp-atlassian"
dynamic = ["version"]
description = "The Model Context Protocol (MCP) Atlassian integration is an open-source implementation that bridges Atlassian products (Jira and Confluence) with AI language models following Anthropic's MCP specification. This project enables secure, contextual AI interactions with Atlassian tools while maintaining data privacy and security. Key features include:"
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
    "atlassian-python-api>=4.0.0",
    "requests[socks]>=2.31.0",
    "beautifulsoup4>=4.12.3",
    "httpx>=0.28.0",
    "mcp>=1.8.0,<2.0.0",
    "fastmcp>=2.3.4,<2.4.0",
    "python-dotenv>=1.0.1",
    "markdownify>=0.11.6",
    "markdown>=3.7.0",
    "markdown-to-confluence>=0.3.0,<0.4.0",
    "pydantic>=2.10.6",
    "trio>=0.29.0",
    "click>=8.1.7",
    "uvicorn>=0.27.1",
    "starlette>=0.37.1",
    "thefuzz>=0.22.1",
    "python-dateutil>=2.9.0.post0",
    "types-python-dateutil>=2.9.0.20241206",
    "keyring>=25.6.0",
    "cachetools>=5.0.0",
    "types-cachetools>=5.5.0.20240820",
]
[[project.authors]]
name = "sooperset"
email = "[email protected]"

[build-system]
requires = ["hatchling", "uv-dynamic-versioning>=0.7.0"]
build-backend = "hatchling.build"

[project.scripts]
mcp-atlassian = "mcp_atlassian:main"

[dependency-groups]
dev = [
    "uv>=0.1.0",
    "pytest>=8.0.0",
    "pytest-cov>=4.1.0",
    "pytest-asyncio>=0.23.0",
    "pre-commit>=3.6.0",
    "ruff>=0.3.0",
    "black>=24.2.0",
    "mypy>=1.8.0",
    "mcp[cli]>=1.3.0"
]

[tool.ruff]
exclude = [
    ".bzr",
    ".direnv",
    ".eggs",
    ".git",
    ".git-rewrite",
    ".hg",
    ".mypy_cache",
    ".nox",
    ".pants.d",
    ".pytype",
    ".ruff_cache",
    ".svn",
    ".tox",
    ".venv",
    "__pypackages__",
    "_build",
    "buck-out",
    "build",
    "dist",
    "node_modules",
    "venv",
]
line-length = 88
indent-width = 4
target-version = "py310"

[tool.ruff.lint]
select = ["E", "F", "B", "W", "I", "N", "UP", "ANN", "S", "BLE", "FBT", "C4", "DTZ", "T10", "EM", "ISC", "ICN"]
ignore = ["ANN401", "EM101"]
fixable = ["ALL"]
unfixable = []
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"

[tool.ruff.lint.per-file-ignores]
"tests/**/*.py" = ["S", "ANN", "B017"]
"tests/fixtures/*.py" = ["E501"]
"src/mcp_atlassian/server.py" = ["E501"]

[tool.ruff.format]
quote-style = "double"
indent-style = "space"
skip-magic-trailing-comma = false
line-ending = "auto"

[tool.mypy]
python_version = "3.10"
warn_return_any = true
warn_unused_configs = true
disallow_untyped_defs = true
disallow_incomplete_defs = true
check_untyped_defs = true
disallow_untyped_decorators = false
no_implicit_optional = true
warn_redundant_casts = true
warn_unused_ignores = false
warn_no_return = true
warn_unreachable = true
strict_equality = true
strict_optional = true
disallow_subclassing_any = true
warn_incomplete_stub = true
exclude = "^src/"
explicit_package_bases = true

[[tool.mypy.overrides]]
module = "tests.*"
disallow_untyped_defs = false
check_untyped_defs = false

[[tool.mypy.overrides]]
module = "atlassian.*"
ignore_missing_imports = true

[[tool.mypy.overrides]]
module = "markdownify.*"
ignore_missing_imports = true

[[tool.mypy.overrides]]
module = "src.mcp_atlassian.*"
disallow_untyped_defs = false

[tool.hatch.version]
source = "uv-dynamic-versioning"

[tool.uv-dynamic-versioning]
vcs = "git"
style = "pep440"
bump = true
fallback-version = "0.0.0"

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/models/confluence/common.py:
--------------------------------------------------------------------------------

```python
"""
Common Confluence entity models.
This module provides Pydantic models for common Confluence entities like users
and attachments.
"""

import logging
import warnings
from typing import Any

from ..base import ApiModel
from ..constants import (
    UNASSIGNED,
)

logger = logging.getLogger(__name__)


class ConfluenceUser(ApiModel):
    """
    Model representing a Confluence user.
    """

    account_id: str | None = None
    display_name: str = UNASSIGNED
    email: str | None = None
    profile_picture: str | None = None
    is_active: bool = True
    locale: str | None = None

    @property
    def name(self) -> str:
        """
        Alias for display_name to maintain compatibility with tests.

        Deprecated: Use display_name instead.
        """
        warnings.warn(
            "The 'name' property is deprecated. Use 'display_name' instead.",
            DeprecationWarning,
            stacklevel=2,
        )
        return self.display_name

    @classmethod
    def from_api_response(cls, data: dict[str, Any], **kwargs: Any) -> "ConfluenceUser":
        """
        Create a ConfluenceUser from a Confluence API response.

        Args:
            data: The user data from the Confluence API

        Returns:
            A ConfluenceUser instance
        """
        if not data:
            return cls()

        profile_pic = None
        if pic_data := data.get("profilePicture"):
            # Use the full path to the profile picture
            profile_pic = pic_data.get("path")

        return cls(
            account_id=data.get("accountId"),
            display_name=data.get("displayName", UNASSIGNED),
            email=data.get("email"),
            profile_picture=profile_pic,
            is_active=data.get("accountStatus") == "active",
            locale=data.get("locale"),
        )

    def to_simplified_dict(self) -> dict[str, Any]:
        """Convert to simplified dictionary for API response."""
        return {
            "display_name": self.display_name,
            "email": self.email,
            "profile_picture": self.profile_picture,
        }


class ConfluenceAttachment(ApiModel):
    """
    Model representing a Confluence attachment.
    """

    id: str | None = None
    type: str | None = None
    status: str | None = None
    title: str | None = None
    media_type: str | None = None
    file_size: int | None = None

    @classmethod
    def from_api_response(
        cls, data: dict[str, Any], **kwargs: Any
    ) -> "ConfluenceAttachment":
        """
        Create a ConfluenceAttachment from a Confluence API response.

        Args:
            data: The attachment data from the Confluence API

        Returns:
            A ConfluenceAttachment instance
        """
        if not data:
            return cls()

        return cls(
            id=data.get("id"),
            type=data.get("type"),
            status=data.get("status"),
            title=data.get("title"),
            media_type=data.get("extensions", {}).get("mediaType"),
            file_size=data.get("extensions", {}).get("fileSize"),
        )

    def to_simplified_dict(self) -> dict[str, Any]:
        """Convert to simplified dictionary for API response."""
        return {
            "id": self.id,
            "type": self.type,
            "status": self.status,
            "title": self.title,
            "media_type": self.media_type,
            "file_size": self.file_size,
        }

```

--------------------------------------------------------------------------------
/tests/unit/models/test_base_models.py:
--------------------------------------------------------------------------------

```python
"""
Tests for the base models and utility classes.
"""

from typing import Any

import pytest

from src.mcp_atlassian.models.base import ApiModel, TimestampMixin
from src.mcp_atlassian.models.constants import EMPTY_STRING


class TestApiModel:
    """Tests for the ApiModel base class."""

    def test_base_from_api_response_not_implemented(self):
        """Test that from_api_response raises NotImplementedError if not overridden."""
        with pytest.raises(NotImplementedError):
            ApiModel.from_api_response({})

    def test_base_to_simplified_dict(self):
        """Test that to_simplified_dict returns a dictionary with non-None values."""

        # Create a test subclass with some fields
        class TestModel(ApiModel):
            field1: str = "test"
            field2: int = 123
            field3: str = None

            @classmethod
            def from_api_response(cls, data: dict[str, Any], **kwargs):
                return cls()

        model = TestModel()
        result = model.to_simplified_dict()

        assert isinstance(result, dict)
        assert "field1" in result
        assert "field2" in result
        assert "field3" not in result  # None values should be excluded
        assert result["field1"] == "test"
        assert result["field2"] == 123


class TestTimestampMixin:
    """Tests for the TimestampMixin utility class."""

    def test_format_timestamp_valid(self):
        """Test formatting a valid ISO 8601 timestamp."""
        timestamp = "2024-01-01T12:34:56.789+0000"
        formatter = TimestampMixin()

        result = formatter.format_timestamp(timestamp)

        assert result == "2024-01-01 12:34:56"

    def test_format_timestamp_with_z(self):
        """Test formatting a timestamp with Z (UTC) timezone."""
        timestamp = "2024-01-01T12:34:56.789Z"
        formatter = TimestampMixin()

        result = formatter.format_timestamp(timestamp)

        assert result == "2024-01-01 12:34:56"

    def test_format_timestamp_none(self):
        """Test formatting a None timestamp."""
        formatter = TimestampMixin()

        result = formatter.format_timestamp(None)

        assert result == EMPTY_STRING

    def test_format_timestamp_invalid(self):
        """Test formatting an invalid timestamp string."""
        invalid_timestamp = "not-a-timestamp"
        formatter = TimestampMixin()

        result = formatter.format_timestamp(invalid_timestamp)

        assert result == invalid_timestamp  # Should return the original string

    def test_is_valid_timestamp_valid(self):
        """Test validating a valid ISO 8601 timestamp."""
        timestamp = "2024-01-01T12:34:56.789+0000"
        formatter = TimestampMixin()

        assert formatter.is_valid_timestamp(timestamp) is True

    def test_is_valid_timestamp_with_z(self):
        """Test validating a timestamp with Z (UTC) timezone."""
        timestamp = "2024-01-01T12:34:56.789Z"
        formatter = TimestampMixin()

        assert formatter.is_valid_timestamp(timestamp) is True

    def test_is_valid_timestamp_none(self):
        """Test validating a None timestamp."""
        formatter = TimestampMixin()

        assert formatter.is_valid_timestamp(None) is False

    def test_is_valid_timestamp_invalid(self):
        """Test validating an invalid timestamp string."""
        invalid_timestamp = "not-a-timestamp"
        formatter = TimestampMixin()

        assert formatter.is_valid_timestamp(invalid_timestamp) is False

```

--------------------------------------------------------------------------------
/tests/utils/assertions.py:
--------------------------------------------------------------------------------

```python
"""Custom assertions and helpers for MCP Atlassian tests."""

from typing import Any
from unittest.mock import MagicMock


def assert_api_called_with(mock: MagicMock, method: str, **expected_kwargs) -> None:
    """Assert API method was called with expected parameters."""
    mock.assert_called_once()
    actual_kwargs = mock.call_args.kwargs if mock.call_args else {}

    for key, expected_value in expected_kwargs.items():
        assert key in actual_kwargs, f"Expected parameter '{key}' not found in call"
        assert actual_kwargs[key] == expected_value, (
            f"Parameter '{key}': expected {expected_value}, got {actual_kwargs[key]}"
        )


def assert_mock_called_with_partial(mock: MagicMock, **partial_kwargs) -> None:
    """Assert mock was called with at least the specified kwargs."""
    assert mock.called, "Mock was not called"

    if mock.call_args is None:
        raise AssertionError("Mock was called but call_args is None")

    actual_kwargs = mock.call_args.kwargs
    for key, expected_value in partial_kwargs.items():
        assert key in actual_kwargs, f"Expected parameter '{key}' not found"
        assert actual_kwargs[key] == expected_value, (
            f"Parameter '{key}': expected {expected_value}, got {actual_kwargs[key]}"
        )


def assert_environment_vars_set(env_dict: dict[str, str], **expected_vars) -> None:
    """Assert environment variables are set to expected values."""
    for var_name, expected_value in expected_vars.items():
        assert var_name in env_dict, f"Environment variable '{var_name}' not set"
        assert env_dict[var_name] == expected_value, (
            f"Environment variable '{var_name}': expected '{expected_value}', "
            f"got '{env_dict[var_name]}'"
        )


def assert_config_contains(config: dict[str, Any], **expected_config) -> None:
    """Assert configuration contains expected key-value pairs."""
    for key, expected_value in expected_config.items():
        assert key in config, f"Configuration key '{key}' not found"
        assert config[key] == expected_value, (
            f"Configuration '{key}': expected {expected_value}, got {config[key]}"
        )


def assert_exception_chain(
    exception: Exception, expected_cause: type | None = None
) -> None:
    """Assert exception has expected cause in chain."""
    if expected_cause is None:
        assert exception.__cause__ is None, "Expected no exception cause"
    else:
        assert exception.__cause__ is not None, (
            "Expected exception cause but found none"
        )
        assert isinstance(exception.__cause__, expected_cause), (
            f"Expected cause type {expected_cause}, got {type(exception.__cause__)}"
        )


def assert_log_contains(caplog, level: str, message: str) -> None:
    """Assert log contains message at specified level."""
    records = [r for r in caplog.records if r.levelname == level.upper()]
    messages = [r.message for r in records]

    assert any(message in msg for msg in messages), (
        f"Expected log message containing '{message}' at level {level}, "
        f"got messages: {messages}"
    )


def assert_dict_subset(subset: dict[str, Any], full_dict: dict[str, Any]) -> None:
    """Assert that subset is contained within full_dict."""
    for key, value in subset.items():
        assert key in full_dict, f"Key '{key}' not found in dictionary"
        if isinstance(value, dict) and isinstance(full_dict[key], dict):
            assert_dict_subset(value, full_dict[key])
        else:
            assert full_dict[key] == value, (
                f"Key '{key}': expected {value}, got {full_dict[key]}"
            )

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/utils/logging.py:
--------------------------------------------------------------------------------

```python
"""Logging utilities for MCP Atlassian.

This module provides enhanced logging capabilities for MCP Atlassian,
including level-dependent stream handling to route logs to the appropriate
output stream based on their level.
"""

import logging
import sys
from typing import TextIO


def setup_logging(
    level: int = logging.WARNING, stream: TextIO = sys.stderr
) -> logging.Logger:
    """
    Configure MCP-Atlassian logging with level-based stream routing.

    Args:
        level: The minimum logging level to display (default: WARNING)
        stream: The stream to write logs to (default: sys.stderr)

    Returns:
        The configured logger instance
    """
    # Configure root logger
    root_logger = logging.getLogger()
    root_logger.setLevel(level)

    # Remove existing handlers to prevent duplication
    for handler in root_logger.handlers[:]:
        root_logger.removeHandler(handler)

    # Add the level-dependent handler
    handler = logging.StreamHandler(stream)
    formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
    handler.setFormatter(formatter)
    root_logger.addHandler(handler)

    # Configure specific loggers
    loggers = ["mcp-atlassian", "mcp.server", "mcp.server.lowlevel.server", "mcp-jira"]

    for logger_name in loggers:
        logger = logging.getLogger(logger_name)
        logger.setLevel(level)

    # Return the application logger
    return logging.getLogger("mcp-atlassian")


def mask_sensitive(value: str | None, keep_chars: int = 4) -> str:
    """Masks sensitive strings for logging.

    Args:
        value: The string to mask
        keep_chars: Number of characters to keep visible at start and end

    Returns:
        Masked string with most characters replaced by asterisks
    """
    if not value:
        return "Not Provided"
    if len(value) <= keep_chars * 2:
        return "*" * len(value)
    start = value[:keep_chars]
    end = value[-keep_chars:]
    middle = "*" * (len(value) - keep_chars * 2)
    return f"{start}{middle}{end}"


def get_masked_session_headers(headers: dict[str, str]) -> dict[str, str]:
    """Get session headers with sensitive values masked for safe logging.

    Args:
        headers: Dictionary of HTTP headers

    Returns:
        Dictionary with sensitive headers masked
    """
    sensitive_headers = {"Authorization", "Cookie", "Set-Cookie", "Proxy-Authorization"}
    masked_headers = {}

    for key, value in headers.items():
        if key in sensitive_headers:
            if key == "Authorization":
                # Preserve auth type but mask the credentials
                if value.startswith("Basic "):
                    masked_headers[key] = f"Basic {mask_sensitive(value[6:])}"
                elif value.startswith("Bearer "):
                    masked_headers[key] = f"Bearer {mask_sensitive(value[7:])}"
                else:
                    masked_headers[key] = mask_sensitive(value)
            else:
                masked_headers[key] = mask_sensitive(value)
        else:
            masked_headers[key] = str(value)

    return masked_headers


def log_config_param(
    logger: logging.Logger,
    service: str,
    param: str,
    value: str | None,
    sensitive: bool = False,
) -> None:
    """Logs a configuration parameter, masking if sensitive.

    Args:
        logger: The logger to use
        service: The service name (Jira or Confluence)
        param: The parameter name
        value: The parameter value
        sensitive: Whether the value should be masked
    """
    display_value = mask_sensitive(value) if sensitive else (value or "Not Provided")
    logger.info(f"{service} {param}: {display_value}")

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/utils/ssl.py:
--------------------------------------------------------------------------------

```python
"""SSL-related utility functions for MCP Atlassian."""

import logging
import ssl
from typing import Any
from urllib.parse import urlparse

from requests.adapters import HTTPAdapter
from requests.sessions import Session
from urllib3.poolmanager import PoolManager

logger = logging.getLogger("mcp-atlassian")


class SSLIgnoreAdapter(HTTPAdapter):
    """HTTP adapter that ignores SSL verification.

    A custom transport adapter that disables SSL certificate verification for specific domains.
    This implementation ensures that both verify_mode is set to CERT_NONE and check_hostname
    is disabled, which is required for properly ignoring SSL certificates.

    This adapter also enables legacy SSL renegotiation which may be required for some older servers.
    Note that this reduces security and should only be used when absolutely necessary.
    """

    def init_poolmanager(
        self, connections: int, maxsize: int, block: bool = False, **pool_kwargs: Any
    ) -> None:
        """Initialize the connection pool manager with SSL verification disabled.

        This method is called when the adapter is created, and it's the proper place to
        disable SSL verification completely.

        Args:
            connections: Number of connections to save in the pool
            maxsize: Maximum number of connections in the pool
            block: Whether to block when the pool is full
            pool_kwargs: Additional arguments for the pool manager
        """
        # Configure SSL context to disable verification completely
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE

        # Enable legacy SSL renegotiation
        context.options |= 0x4  # SSL_OP_LEGACY_SERVER_CONNECT
        context.options |= 0x40000  # SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION

        self.poolmanager = PoolManager(
            num_pools=connections,
            maxsize=maxsize,
            block=block,
            ssl_context=context,
            **pool_kwargs,
        )

    def cert_verify(self, conn: Any, url: str, verify: bool, cert: Any | None) -> None:
        """Override cert verification to disable SSL verification.

        This method is still included for backward compatibility, but the main
        SSL disabling happens in init_poolmanager.

        Args:
            conn: The connection
            url: The URL being requested
            verify: The original verify parameter (ignored)
            cert: Client certificate path
        """
        super().cert_verify(conn, url, verify=False, cert=cert)


def configure_ssl_verification(
    service_name: str, url: str, session: Session, ssl_verify: bool
) -> None:
    """Configure SSL verification for a specific service.

    If SSL verification is disabled, this function will configure the session
    to use a custom SSL adapter that bypasses certificate validation for the
    service's domain.

    Args:
        service_name: Name of the service for logging (e.g., "Confluence", "Jira")
        url: The base URL of the service
        session: The requests session to configure
        ssl_verify: Whether SSL verification should be enabled
    """
    if not ssl_verify:
        logger.warning(
            f"{service_name} SSL verification disabled. This is insecure and should only be used in testing environments."
        )

        # Get the domain from the configured URL
        domain = urlparse(url).netloc

        # Mount the adapter to handle requests to this domain
        adapter = SSLIgnoreAdapter()
        session.mount(f"https://{domain}", adapter)
        session.mount(f"http://{domain}", adapter)

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/jira/comments.py:
--------------------------------------------------------------------------------

```python
"""Module for Jira comment operations."""

import logging
from typing import Any

from ..utils import parse_date
from .client import JiraClient

logger = logging.getLogger("mcp-jira")


class CommentsMixin(JiraClient):
    """Mixin for Jira comment operations."""

    def get_issue_comments(
        self, issue_key: str, limit: int = 50
    ) -> list[dict[str, Any]]:
        """
        Get comments for a specific issue.

        Args:
            issue_key: The issue key (e.g. 'PROJ-123')
            limit: Maximum number of comments to return

        Returns:
            List of comments with author, creation date, and content

        Raises:
            Exception: If there is an error getting comments
        """
        try:
            comments = self.jira.issue_get_comments(issue_key)

            if not isinstance(comments, dict):
                msg = f"Unexpected return value type from `jira.issue_get_comments`: {type(comments)}"
                logger.error(msg)
                raise TypeError(msg)

            processed_comments = []
            for comment in comments.get("comments", [])[:limit]:
                processed_comment = {
                    "id": comment.get("id"),
                    "body": self._clean_text(comment.get("body", "")),
                    "created": str(parse_date(comment.get("created"))),
                    "updated": str(parse_date(comment.get("updated"))),
                    "author": comment.get("author", {}).get("displayName", "Unknown"),
                }
                processed_comments.append(processed_comment)

            return processed_comments
        except Exception as e:
            logger.error(f"Error getting comments for issue {issue_key}: {str(e)}")
            raise Exception(f"Error getting comments: {str(e)}") from e

    def add_comment(self, issue_key: str, comment: str) -> dict[str, Any]:
        """
        Add a comment to an issue.

        Args:
            issue_key: The issue key (e.g. 'PROJ-123')
            comment: Comment text to add (in Markdown format)

        Returns:
            The created comment details

        Raises:
            Exception: If there is an error adding the comment
        """
        try:
            # Convert Markdown to Jira's markup format
            jira_formatted_comment = self._markdown_to_jira(comment)

            result = self.jira.issue_add_comment(issue_key, jira_formatted_comment)
            if not isinstance(result, dict):
                msg = f"Unexpected return value type from `jira.issue_add_comment`: {type(result)}"
                logger.error(msg)
                raise TypeError(msg)

            return {
                "id": result.get("id"),
                "body": self._clean_text(result.get("body", "")),
                "created": str(parse_date(result.get("created"))),
                "author": result.get("author", {}).get("displayName", "Unknown"),
            }
        except Exception as e:
            logger.error(f"Error adding comment to issue {issue_key}: {str(e)}")
            raise Exception(f"Error adding comment: {str(e)}") from e

    def _markdown_to_jira(self, markdown_text: str) -> str:
        """
        Convert Markdown syntax to Jira markup syntax.

        This method uses the TextPreprocessor implementation for consistent
        conversion between Markdown and Jira markup.

        Args:
            markdown_text: Text in Markdown format

        Returns:
            Text in Jira markup format
        """
        if not markdown_text:
            return ""

        # Use the existing preprocessor
        try:
            return self.preprocessor.markdown_to_jira(markdown_text)
        except Exception as e:
            logger.warning(f"Error converting markdown to Jira format: {str(e)}")
            # Return the original text if conversion fails
            return markdown_text

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/confluence/users.py:
--------------------------------------------------------------------------------

```python
"""Module for Confluence user operations."""

import logging
from typing import Any

from requests.exceptions import HTTPError

from ..exceptions import MCPAtlassianAuthenticationError
from .client import ConfluenceClient

logger = logging.getLogger("mcp-atlassian")


class UsersMixin(ConfluenceClient):
    """Mixin for Confluence user operations."""

    def get_user_details_by_accountid(
        self, account_id: str, expand: str = None
    ) -> dict[str, Any]:
        """Get user details by account ID.

        Args:
            account_id: The account ID of the user.
            expand: Optional expand for get status of user. Possible param is "status". Results are "Active, Deactivated".

        Returns:
            User details as a dictionary.

        Raises:
            Various exceptions from the Atlassian API if user doesn't exist or if there are permission issues.
        """
        return self.confluence.get_user_details_by_accountid(account_id, expand)

    def get_user_details_by_username(
        self, username: str, expand: str = None
    ) -> dict[str, Any]:
        """Get user details by username.

        This is typically used for Confluence Server/DC instances where username
        might be used as an identifier.

        Args:
            username: The username of the user.
            expand: Optional expand for get status of user. Possible param is "status". Results are "Active, Deactivated".

        Returns:
            User details as a dictionary.

        Raises:
            Various exceptions from the Atlassian API if user doesn't exist or if there are permission issues.
        """
        return self.confluence.get_user_details_by_username(username, expand)

    def get_current_user_info(self) -> dict[str, Any]:
        """
        Retrieve details for the currently authenticated user by calling Confluence's '/rest/api/user/current' endpoint.

        Returns:
            dict[str, Any]: The user details as returned by the API.

        Raises:
            MCPAtlassianAuthenticationError: If authentication fails or the response is not valid user data.
        """
        try:
            user_data = self.confluence.get("rest/api/user/current")
            if not isinstance(user_data, dict):
                logger.error(
                    f"Confluence /rest/api/user/current endpoint returned non-dict data type: {type(user_data)}. "
                    f"Response text (partial): {str(user_data)[:500]}"
                )
                raise MCPAtlassianAuthenticationError(
                    "Confluence token validation failed: Did not receive valid JSON user data from /rest/api/user/current endpoint."
                )
            return user_data
        except HTTPError as http_err:
            if http_err.response is not None and http_err.response.status_code in [
                401,
                403,
            ]:
                logger.warning(
                    f"Confluence token validation failed with HTTP {http_err.response.status_code} for /rest/api/user/current."
                )
                raise MCPAtlassianAuthenticationError(
                    f"Confluence token validation failed: {http_err.response.status_code} from /rest/api/user/current"
                ) from http_err
            logger.error(
                f"HTTPError when calling Confluence /rest/api/user/current: {http_err}",
                exc_info=True,
            )
            raise MCPAtlassianAuthenticationError(
                f"Confluence token validation failed with HTTPError: {http_err}"
            ) from http_err
        except Exception as e:
            logger.error(
                f"Unexpected error fetching current Confluence user details: {e}",
                exc_info=True,
            )
            raise MCPAtlassianAuthenticationError(
                f"Confluence token validation failed: {e}"
            ) from e

```

--------------------------------------------------------------------------------
/src/mcp_atlassian/confluence/spaces.py:
--------------------------------------------------------------------------------

```python
"""Module for Confluence space operations."""

import logging
from typing import cast

import requests

from .client import ConfluenceClient

logger = logging.getLogger("mcp-atlassian")


class SpacesMixin(ConfluenceClient):
    """Mixin for Confluence space operations."""

    def get_spaces(self, start: int = 0, limit: int = 10) -> dict[str, object]:
        """
        Get all available spaces.

        Args:
            start: The starting index for pagination
            limit: Maximum number of spaces to return

        Returns:
            Dictionary containing space information with results and metadata
        """
        spaces = self.confluence.get_all_spaces(start=start, limit=limit)
        # Cast the return value to the expected type
        return cast(dict[str, object], spaces)

    def get_user_contributed_spaces(self, limit: int = 250) -> dict:
        """
        Get spaces the current user has contributed to.

        Args:
            limit: Maximum number of results to return

        Returns:
            Dictionary of space keys to space information
        """
        try:
            # Use CQL to find content the user has contributed to
            cql = "contributor = currentUser() order by lastmodified DESC"
            results = self.confluence.cql(cql=cql, limit=limit)

            # Extract and deduplicate spaces
            spaces = {}
            for result in results.get("results", []):
                space_key = None
                space_name = None

                # Try to extract space from container
                if "resultGlobalContainer" in result:
                    container = result.get("resultGlobalContainer", {})
                    space_name = container.get("title")
                    display_url = container.get("displayUrl", "")
                    if display_url and "/spaces/" in display_url:
                        space_key = display_url.split("/spaces/")[1].split("/")[0]

                # Try to extract from content expandable
                if (
                    not space_key
                    and "content" in result
                    and "_expandable" in result["content"]
                ):
                    expandable = result["content"].get("_expandable", {})
                    space_path = expandable.get("space", "")
                    if space_path and space_path.startswith("/rest/api/space/"):
                        space_key = space_path.split("/rest/api/space/")[1]

                # Try to extract from URL
                if not space_key and "url" in result:
                    url = result.get("url", "")
                    if url and url.startswith("/spaces/"):
                        space_key = url.split("/spaces/")[1].split("/")[0]

                # Only add if we found a space key and it's not already in our results
                if space_key and space_key not in spaces:
                    # Add some defaults if we couldn't extract all fields
                    space_name = space_name or f"Space {space_key}"
                    spaces[space_key] = {"key": space_key, "name": space_name}

            return spaces

        except KeyError as e:
            logger.error(f"Missing key in Confluence spaces data: {str(e)}")
            return {}
        except ValueError as e:
            logger.error(f"Invalid value in Confluence spaces: {str(e)}")
            return {}
        except TypeError as e:
            logger.error(f"Type error when processing Confluence spaces: {str(e)}")
            return {}
        except requests.RequestException as e:
            logger.error(f"Network error when fetching spaces: {str(e)}")
            return {}
        except Exception as e:  # noqa: BLE001 - Intentional fallback with logging
            logger.error(f"Unexpected error fetching Confluence spaces: {str(e)}")
            logger.debug("Full exception details for Confluence spaces:", exc_info=True)
            return {}

```
Page 1/10FirstPrevNextLast