#
tokens: 47446/50000 12/145 files (page 5/11)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 5 of 11. Use http://codebase.md/saidsurucu/yargi-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── __main__.py
├── .dockerignore
├── .env.example
├── .gitattributes
├── .github
│   └── workflows
│       └── publish.yml
├── .gitignore
├── .serena
│   ├── .gitignore
│   └── project.yml
├── 5ire-settings.png
├── analyze_kik_hash_generation.py
├── anayasa_mcp_module
│   ├── __init__.py
│   ├── bireysel_client.py
│   ├── client.py
│   ├── models.py
│   └── unified_client.py
├── asgi_app.py
├── bddk_mcp_module
│   ├── __init__.py
│   ├── client.py
│   └── models.py
├── bedesten_mcp_module
│   ├── __init__.py
│   ├── client.py
│   ├── enums.py
│   └── models.py
├── check_response_format.py
├── CLAUDE.md
├── danistay_mcp_module
│   ├── __init__.py
│   ├── client.py
│   └── models.py
├── docker-compose.yml
├── Dockerfile
├── docs
│   └── DEPLOYMENT.md
├── emsal_mcp_module
│   ├── __init__.py
│   ├── client.py
│   └── models.py
├── example_fastapi_app.py
├── fly-no-auth.toml
├── fly.toml
├── kik_mcp_module
│   ├── __init__.py
│   ├── client_v2.py
│   ├── client.py
│   ├── models_v2.py
│   └── models.py
├── kvkk_mcp_module
│   ├── __init__.py
│   ├── client.py
│   └── models.py
├── LICENSE
├── mcp_auth
│   ├── __init__.py
│   ├── clerk_config.py
│   ├── middleware.py
│   ├── oauth.py
│   ├── policy.py
│   └── storage.py
├── mcp_auth_factory.py
├── mcp_auth_http_adapter.py
├── mcp_auth_http_simple.py
├── mcp_server_main.py
├── nginx.conf
├── ornek.png
├── Procfile
├── pyproject.toml
├── railway.json
├── README.md
├── redis_session_store.py
├── rekabet_mcp_module
│   ├── __init__.py
│   ├── client.py
│   └── models.py
├── requirements.txt
├── run_asgi.py
├── saidsurucu-yargi-mcp-f5fa007
│   ├── __main__.py
│   ├── .dockerignore
│   ├── .env.example
│   ├── .gitattributes
│   ├── .github
│   │   └── workflows
│   │       └── publish.yml
│   ├── .gitignore
│   ├── 5ire-settings.png
│   ├── anayasa_mcp_module
│   │   ├── __init__.py
│   │   ├── bireysel_client.py
│   │   ├── client.py
│   │   ├── models.py
│   │   └── unified_client.py
│   ├── asgi_app.py
│   ├── bddk_mcp_module
│   │   ├── __init__.py
│   │   ├── client.py
│   │   └── models.py
│   ├── bedesten_mcp_module
│   │   ├── __init__.py
│   │   ├── client.py
│   │   ├── enums.py
│   │   └── models.py
│   ├── check_response_format.py
│   ├── danistay_mcp_module
│   │   ├── __init__.py
│   │   ├── client.py
│   │   └── models.py
│   ├── docker-compose.yml
│   ├── Dockerfile
│   ├── docs
│   │   └── DEPLOYMENT.md
│   ├── emsal_mcp_module
│   │   ├── __init__.py
│   │   ├── client.py
│   │   └── models.py
│   ├── example_fastapi_app.py
│   ├── kik_mcp_module
│   │   ├── __init__.py
│   │   ├── client.py
│   │   └── models.py
│   ├── kvkk_mcp_module
│   │   ├── __init__.py
│   │   ├── client.py
│   │   └── models.py
│   ├── LICENSE
│   ├── mcp_auth
│   │   ├── __init__.py
│   │   ├── clerk_config.py
│   │   ├── middleware.py
│   │   ├── oauth.py
│   │   ├── policy.py
│   │   └── storage.py
│   ├── mcp_auth_factory.py
│   ├── mcp_auth_http_adapter.py
│   ├── mcp_auth_http_simple.py
│   ├── mcp_server_main.py
│   ├── nginx.conf
│   ├── ornek.png
│   ├── Procfile
│   ├── pyproject.toml
│   ├── railway.json
│   ├── README.md
│   ├── redis_session_store.py
│   ├── rekabet_mcp_module
│   │   ├── __init__.py
│   │   ├── client.py
│   │   └── models.py
│   ├── run_asgi.py
│   ├── sayistay_mcp_module
│   │   ├── __init__.py
│   │   ├── client.py
│   │   ├── enums.py
│   │   ├── models.py
│   │   └── unified_client.py
│   ├── starlette_app.py
│   ├── stripe_webhook.py
│   ├── uyusmazlik_mcp_module
│   │   ├── __init__.py
│   │   ├── client.py
│   │   └── models.py
│   └── yargitay_mcp_module
│       ├── __init__.py
│       ├── client.py
│       └── models.py
├── sayistay_mcp_module
│   ├── __init__.py
│   ├── client.py
│   ├── enums.py
│   ├── models.py
│   └── unified_client.py
├── starlette_app.py
├── stripe_webhook.py
├── uv.lock
├── uyusmazlik_mcp_module
│   ├── __init__.py
│   ├── client.py
│   └── models.py
└── yargitay_mcp_module
    ├── __init__.py
    ├── client.py
    └── models.py
```

# Files

--------------------------------------------------------------------------------
/saidsurucu-yargi-mcp-f5fa007/sayistay_mcp_module/models.py:
--------------------------------------------------------------------------------

```python
  1 | # sayistay_mcp_module/models.py
  2 | 
  3 | from pydantic import BaseModel, Field
  4 | from typing import Optional, List, Union, Dict, Any, Literal
  5 | from enum import Enum
  6 | from .enums import DaireEnum, KamuIdaresiTuruEnum, WebKararKonusuEnum
  7 | 
  8 | # --- Unified Enums ---
  9 | class SayistayDecisionTypeEnum(str, Enum):
 10 |     GENEL_KURUL = "genel_kurul"
 11 |     TEMYIZ_KURULU = "temyiz_kurulu"
 12 |     DAIRE = "daire"
 13 | 
 14 | # ============================================================================
 15 | # Genel Kurul (General Assembly) Models
 16 | # ============================================================================
 17 | 
 18 | class GenelKurulSearchRequest(BaseModel):
 19 |     """
 20 |     Search request for Sayıştay Genel Kurul (General Assembly) decisions.
 21 |     
 22 |     Genel Kurul decisions are precedent-setting rulings made by the full assembly
 23 |     of the Turkish Court of Accounts, typically addressing interpretation of
 24 |     audit and accountability regulations.
 25 |     """
 26 |     karar_no: str = Field("", description="Decision no")
 27 |     karar_ek: str = Field("", description="Appendix no")
 28 |     
 29 |     karar_tarih_baslangic: str = Field("", description="Start year (YYYY)")
 30 |     
 31 |     karar_tarih_bitis: str = Field("", description="End year")
 32 |     
 33 |     karar_tamami: str = Field("", description="Value")
 34 |     
 35 |     # DataTables pagination
 36 |     start: int = Field(0, description="Starting record for pagination (0-based)")
 37 |     length: int = Field(10, description="Number of records per page (1-10)")
 38 | 
 39 | class GenelKurulDecision(BaseModel):
 40 |     """Single Genel Kurul decision entry from search results."""
 41 |     id: int = Field(..., description="Unique decision ID")
 42 |     karar_no: str = Field(..., description="Decision number (e.g., '5415/1')")
 43 |     karar_tarih: str = Field(..., description="Decision date in DD.MM.YYYY format")
 44 |     karar_ozeti: str = Field(..., description="Decision summary/abstract")
 45 | 
 46 | class GenelKurulSearchResponse(BaseModel):
 47 |     """Response from Genel Kurul search endpoint."""
 48 |     decisions: List[GenelKurulDecision] = Field(default_factory=list, description="List of matching decisions")
 49 |     total_records: int = Field(0, description="Total number of matching records")
 50 |     total_filtered: int = Field(0, description="Number of records after filtering")
 51 |     draw: int = Field(1, description="DataTables draw counter")
 52 | 
 53 | # ============================================================================
 54 | # Temyiz Kurulu (Appeals Board) Models
 55 | # ============================================================================
 56 | 
 57 | class TemyizKuruluSearchRequest(BaseModel):
 58 |     """
 59 |     Search request for Sayıştay Temyiz Kurulu (Appeals Board) decisions.
 60 |     
 61 |     Temyiz Kurulu reviews appeals against audit chamber decisions,
 62 |     providing higher-level review of audit findings and sanctions.
 63 |     """
 64 |     ilam_dairesi: DaireEnum = Field("ALL", description="Value")
 65 |     
 66 |     yili: str = Field("", description="Value")
 67 |     
 68 |     karar_tarih_baslangic: str = Field("", description="Value")
 69 |     
 70 |     karar_tarih_bitis: str = Field("", description="End year")
 71 |     
 72 |     kamu_idaresi_turu: KamuIdaresiTuruEnum = Field("ALL", description="Value")
 73 |     
 74 |     ilam_no: str = Field("", description="Audit report number (İlam No, max 50 chars)")
 75 |     dosya_no: str = Field("", description="File number for the case")
 76 |     temyiz_tutanak_no: str = Field("", description="Appeals board meeting minutes number")
 77 |     
 78 |     temyiz_karar: str = Field("", description="Value")
 79 |     
 80 |     web_karar_konusu: WebKararKonusuEnum = Field("ALL", description="Value")
 81 |     
 82 |     # DataTables pagination
 83 |     start: int = Field(0, description="Starting record for pagination (0-based)")
 84 |     length: int = Field(10, description="Number of records per page (1-10)")
 85 | 
 86 | class TemyizKuruluDecision(BaseModel):
 87 |     """Single Temyiz Kurulu decision entry from search results."""
 88 |     id: int = Field(..., description="Unique decision ID")
 89 |     temyiz_tutanak_tarihi: str = Field(..., description="Appeals board meeting date in DD.MM.YYYY format")
 90 |     ilam_dairesi: int = Field(..., description="Chamber number (1-8)")
 91 |     temyiz_karar: str = Field(..., description="Appeals decision summary and reasoning")
 92 | 
 93 | class TemyizKuruluSearchResponse(BaseModel):
 94 |     """Response from Temyiz Kurulu search endpoint."""
 95 |     decisions: List[TemyizKuruluDecision] = Field(default_factory=list, description="List of matching appeals decisions")
 96 |     total_records: int = Field(0, description="Total number of matching records")
 97 |     total_filtered: int = Field(0, description="Number of records after filtering")
 98 |     draw: int = Field(1, description="DataTables draw counter")
 99 | 
100 | # ============================================================================
101 | # Daire (Chamber) Models  
102 | # ============================================================================
103 | 
104 | class DaireSearchRequest(BaseModel):
105 |     """
106 |     Search request for Sayıştay Daire (Chamber) decisions.
107 |     
108 |     Daire decisions are first-instance audit findings and sanctions
109 |     issued by individual audit chambers before potential appeals.
110 |     """
111 |     yargilama_dairesi: DaireEnum = Field("ALL", description="Value")
112 |     
113 |     karar_tarih_baslangic: str = Field("", description="Value")
114 |     
115 |     karar_tarih_bitis: str = Field("", description="End year")
116 |     
117 |     ilam_no: str = Field("", description="Audit report number (İlam No, max 50 chars)")
118 |     
119 |     kamu_idaresi_turu: KamuIdaresiTuruEnum = Field("ALL", description="Value")
120 |     
121 |     hesap_yili: str = Field("", description="Value")
122 |     
123 |     web_karar_konusu: WebKararKonusuEnum = Field("ALL", description="Value")
124 |     
125 |     web_karar_metni: str = Field("", description="Value")
126 |     
127 |     # DataTables pagination
128 |     start: int = Field(0, description="Starting record for pagination (0-based)")
129 |     length: int = Field(10, description="Number of records per page (1-10)")
130 | 
131 | class DaireDecision(BaseModel):
132 |     """Single Daire decision entry from search results."""
133 |     id: int = Field(..., description="Unique decision ID")
134 |     yargilama_dairesi: int = Field(..., description="Chamber number (1-8)")
135 |     karar_tarih: str = Field(..., description="Decision date in DD.MM.YYYY format")
136 |     karar_no: str = Field(..., description="Decision number")
137 |     ilam_no: str = Field("", description="Audit report number (may be null)")
138 |     madde_no: int = Field(..., description="Article/item number within the decision")
139 |     kamu_idaresi_turu: str = Field(..., description="Public administration type")
140 |     hesap_yili: int = Field(..., description="Account year being audited")
141 |     web_karar_konusu: str = Field(..., description="Decision subject category")
142 |     web_karar_metni: str = Field(..., description="Decision text/summary")
143 | 
144 | class DaireSearchResponse(BaseModel):
145 |     """Response from Daire search endpoint."""
146 |     decisions: List[DaireDecision] = Field(default_factory=list, description="List of matching chamber decisions")
147 |     total_records: int = Field(0, description="Total number of matching records")
148 |     total_filtered: int = Field(0, description="Number of records after filtering")
149 |     draw: int = Field(1, description="DataTables draw counter")
150 | 
151 | # ============================================================================
152 | # Document Models
153 | # ============================================================================
154 | 
155 | class SayistayDocumentMarkdown(BaseModel):
156 |     """
157 |     Sayıştay decision document converted to Markdown format.
158 |     
159 |     Used for retrieving full text of decisions from any of the three
160 |     decision types (Genel Kurul, Temyiz Kurulu, Daire).
161 |     """
162 |     decision_id: str = Field(..., description="Unique decision identifier")
163 |     decision_type: str = Field(..., description="Value")
164 |     source_url: str = Field(..., description="Original URL where the document was retrieved")
165 |     markdown_content: Optional[str] = Field(None, description="Full decision text converted to Markdown format")
166 |     retrieval_date: Optional[str] = Field(None, description="Date when document was retrieved (ISO format)")
167 |     error_message: Optional[str] = Field(None, description="Error message if document retrieval failed")
168 | 
169 | # ============================================================================
170 | # Unified Models
171 | # ============================================================================
172 | 
173 | class SayistayUnifiedSearchRequest(BaseModel):
174 |     """Unified search request for all Sayıştay decision types."""
175 |     decision_type: Literal["genel_kurul", "temyiz_kurulu", "daire"] = Field(..., description="Decision type: genel_kurul, temyiz_kurulu, or daire")
176 |     
177 |     # Common pagination parameters
178 |     start: int = Field(0, ge=0, description="Starting record for pagination (0-based)")
179 |     length: int = Field(10, ge=1, le=100, description="Number of records per page (1-100)")
180 |     
181 |     # Common search parameters
182 |     karar_tarih_baslangic: str = Field("", description="Start date (DD.MM.YYYY format)")
183 |     karar_tarih_bitis: str = Field("", description="End date (DD.MM.YYYY format)")
184 |     kamu_idaresi_turu: KamuIdaresiTuruEnum = Field("ALL", description="Public administration type filter")
185 |     ilam_no: str = Field("", description="Audit report number (İlam No, max 50 chars)")
186 |     web_karar_konusu: WebKararKonusuEnum = Field("ALL", description="Decision subject category filter")
187 |     
188 |     # Genel Kurul specific parameters (ignored for other types)
189 |     karar_no: str = Field("", description="Decision number (genel_kurul only)")
190 |     karar_ek: str = Field("", description="Decision appendix number (genel_kurul only)")
191 |     karar_tamami: str = Field("", description="Full text search (genel_kurul only)")
192 |     
193 |     # Temyiz Kurulu specific parameters (ignored for other types)
194 |     ilam_dairesi: DaireEnum = Field("ALL", description="Audit chamber selection (temyiz_kurulu only)")
195 |     yili: str = Field("", description="Year (YYYY format, temyiz_kurulu only)")
196 |     dosya_no: str = Field("", description="File number (temyiz_kurulu only)")
197 |     temyiz_tutanak_no: str = Field("", description="Appeals board meeting minutes number (temyiz_kurulu only)")
198 |     temyiz_karar: str = Field("", description="Appeals decision text search (temyiz_kurulu only)")
199 |     
200 |     # Daire specific parameters (ignored for other types)
201 |     yargilama_dairesi: DaireEnum = Field("ALL", description="Chamber selection (daire only)")
202 |     hesap_yili: str = Field("", description="Account year (daire only)")
203 |     web_karar_metni: str = Field("", description="Decision text search (daire only)")
204 | 
205 | class SayistayUnifiedSearchResult(BaseModel):
206 |     """Unified search result containing decisions from any Sayıştay decision type."""
207 |     decision_type: Literal["genel_kurul", "temyiz_kurulu", "daire"] = Field(..., description="Type of decisions returned")
208 |     decisions: List[Dict[str, Any]] = Field(default_factory=list, description="Decision list (structure varies by type)")
209 |     total_records: int = Field(0, description="Total number of records found")
210 |     total_filtered: int = Field(0, description="Number of records after filtering")
211 |     draw: int = Field(1, description="DataTables draw counter")
212 | 
213 | class SayistayUnifiedDocumentMarkdown(BaseModel):
214 |     """Unified document model for all Sayıştay decision types."""
215 |     decision_type: Literal["genel_kurul", "temyiz_kurulu", "daire"] = Field(..., description="Type of document")
216 |     decision_id: str = Field(..., description="Decision ID")
217 |     source_url: str = Field(..., description="Source URL of the document")
218 |     document_data: Dict[str, Any] = Field(default_factory=dict, description="Document content and metadata")
219 |     markdown_content: Optional[str] = Field(None, description="Markdown content")
220 |     error_message: Optional[str] = Field(None, description="Error message if retrieval failed")
```

--------------------------------------------------------------------------------
/sayistay_mcp_module/models.py:
--------------------------------------------------------------------------------

```python
  1 | # sayistay_mcp_module/models.py
  2 | 
  3 | from pydantic import BaseModel, Field
  4 | from typing import Optional, List, Union, Dict, Any, Literal
  5 | from enum import Enum
  6 | from .enums import DaireEnum, KamuIdaresiTuruEnum, WebKararKonusuEnum
  7 | 
  8 | # --- Unified Enums ---
  9 | class SayistayDecisionTypeEnum(str, Enum):
 10 |     GENEL_KURUL = "genel_kurul"
 11 |     TEMYIZ_KURULU = "temyiz_kurulu"
 12 |     DAIRE = "daire"
 13 | 
 14 | # ============================================================================
 15 | # Genel Kurul (General Assembly) Models
 16 | # ============================================================================
 17 | 
 18 | class GenelKurulSearchRequest(BaseModel):
 19 |     """
 20 |     Search request for Sayıştay Genel Kurul (General Assembly) decisions.
 21 |     
 22 |     Genel Kurul decisions are precedent-setting rulings made by the full assembly
 23 |     of the Turkish Court of Accounts, typically addressing interpretation of
 24 |     audit and accountability regulations.
 25 |     """
 26 |     karar_no: str = Field("", description="Decision no")
 27 |     karar_ek: str = Field("", description="Appendix no")
 28 |     
 29 |     karar_tarih_baslangic: str = Field("", description="Start year (YYYY)")
 30 |     
 31 |     karar_tarih_bitis: str = Field("", description="End year")
 32 |     
 33 |     karar_tamami: str = Field("", description="Value")
 34 |     
 35 |     # DataTables pagination
 36 |     start: int = Field(0, description="Starting record for pagination (0-based)")
 37 |     length: int = Field(10, description="Number of records per page (1-10)")
 38 | 
 39 | class GenelKurulDecision(BaseModel):
 40 |     """Single Genel Kurul decision entry from search results."""
 41 |     id: int = Field(..., description="Unique decision ID")
 42 |     karar_no: str = Field(..., description="Decision number (e.g., '5415/1')")
 43 |     karar_tarih: str = Field(..., description="Decision date in DD.MM.YYYY format")
 44 |     karar_ozeti: str = Field(..., description="Decision summary/abstract")
 45 | 
 46 | class GenelKurulSearchResponse(BaseModel):
 47 |     """Response from Genel Kurul search endpoint."""
 48 |     decisions: List[GenelKurulDecision] = Field(default_factory=list, description="List of matching decisions")
 49 |     total_records: int = Field(0, description="Total number of matching records")
 50 |     total_filtered: int = Field(0, description="Number of records after filtering")
 51 |     draw: int = Field(1, description="DataTables draw counter")
 52 | 
 53 | # ============================================================================
 54 | # Temyiz Kurulu (Appeals Board) Models
 55 | # ============================================================================
 56 | 
 57 | class TemyizKuruluSearchRequest(BaseModel):
 58 |     """
 59 |     Search request for Sayıştay Temyiz Kurulu (Appeals Board) decisions.
 60 |     
 61 |     Temyiz Kurulu reviews appeals against audit chamber decisions,
 62 |     providing higher-level review of audit findings and sanctions.
 63 |     """
 64 |     ilam_dairesi: DaireEnum = Field("ALL", description="Value")
 65 |     
 66 |     yili: str = Field("", description="Value")
 67 |     
 68 |     karar_tarih_baslangic: str = Field("", description="Value")
 69 |     
 70 |     karar_tarih_bitis: str = Field("", description="End year")
 71 |     
 72 |     kamu_idaresi_turu: KamuIdaresiTuruEnum = Field("ALL", description="Value")
 73 |     
 74 |     ilam_no: str = Field("", description="Audit report number (İlam No, max 50 chars)")
 75 |     dosya_no: str = Field("", description="File number for the case")
 76 |     temyiz_tutanak_no: str = Field("", description="Appeals board meeting minutes number")
 77 |     
 78 |     temyiz_karar: str = Field("", description="Value")
 79 |     
 80 |     web_karar_konusu: WebKararKonusuEnum = Field("ALL", description="Value")
 81 |     
 82 |     # DataTables pagination
 83 |     start: int = Field(0, description="Starting record for pagination (0-based)")
 84 |     length: int = Field(10, description="Number of records per page (1-10)")
 85 | 
 86 | class TemyizKuruluDecision(BaseModel):
 87 |     """Single Temyiz Kurulu decision entry from search results."""
 88 |     id: int = Field(..., description="Unique decision ID")
 89 |     temyiz_tutanak_tarihi: str = Field(..., description="Appeals board meeting date in DD.MM.YYYY format")
 90 |     ilam_dairesi: int = Field(..., description="Chamber number (1-8)")
 91 |     temyiz_karar: str = Field(..., description="Appeals decision summary and reasoning")
 92 | 
 93 | class TemyizKuruluSearchResponse(BaseModel):
 94 |     """Response from Temyiz Kurulu search endpoint."""
 95 |     decisions: List[TemyizKuruluDecision] = Field(default_factory=list, description="List of matching appeals decisions")
 96 |     total_records: int = Field(0, description="Total number of matching records")
 97 |     total_filtered: int = Field(0, description="Number of records after filtering")
 98 |     draw: int = Field(1, description="DataTables draw counter")
 99 | 
100 | # ============================================================================
101 | # Daire (Chamber) Models  
102 | # ============================================================================
103 | 
104 | class DaireSearchRequest(BaseModel):
105 |     """
106 |     Search request for Sayıştay Daire (Chamber) decisions.
107 |     
108 |     Daire decisions are first-instance audit findings and sanctions
109 |     issued by individual audit chambers before potential appeals.
110 |     """
111 |     yargilama_dairesi: DaireEnum = Field("ALL", description="Value")
112 |     
113 |     karar_tarih_baslangic: str = Field("", description="Value")
114 |     
115 |     karar_tarih_bitis: str = Field("", description="End year")
116 |     
117 |     ilam_no: str = Field("", description="Audit report number (İlam No, max 50 chars)")
118 |     
119 |     kamu_idaresi_turu: KamuIdaresiTuruEnum = Field("ALL", description="Value")
120 |     
121 |     hesap_yili: str = Field("", description="Value")
122 |     
123 |     web_karar_konusu: WebKararKonusuEnum = Field("ALL", description="Value")
124 |     
125 |     web_karar_metni: str = Field("", description="Value")
126 |     
127 |     # DataTables pagination
128 |     start: int = Field(0, description="Starting record for pagination (0-based)")
129 |     length: int = Field(10, description="Number of records per page (1-10)")
130 | 
131 | class DaireDecision(BaseModel):
132 |     """Single Daire decision entry from search results."""
133 |     id: int = Field(..., description="Unique decision ID")
134 |     yargilama_dairesi: int = Field(..., description="Chamber number (1-8)")
135 |     karar_tarih: str = Field(..., description="Decision date in DD.MM.YYYY format")
136 |     karar_no: str = Field(..., description="Decision number")
137 |     ilam_no: str = Field("", description="Audit report number (may be null)")
138 |     madde_no: int = Field(..., description="Article/item number within the decision")
139 |     kamu_idaresi_turu: str = Field(..., description="Public administration type")
140 |     hesap_yili: int = Field(..., description="Account year being audited")
141 |     web_karar_konusu: str = Field(..., description="Decision subject category")
142 |     web_karar_metni: str = Field(..., description="Decision text/summary")
143 | 
144 | class DaireSearchResponse(BaseModel):
145 |     """Response from Daire search endpoint."""
146 |     decisions: List[DaireDecision] = Field(default_factory=list, description="List of matching chamber decisions")
147 |     total_records: int = Field(0, description="Total number of matching records")
148 |     total_filtered: int = Field(0, description="Number of records after filtering")
149 |     draw: int = Field(1, description="DataTables draw counter")
150 | 
151 | # ============================================================================
152 | # Document Models
153 | # ============================================================================
154 | 
155 | class SayistayDocumentMarkdown(BaseModel):
156 |     """
157 |     Sayıştay decision document converted to Markdown format.
158 |     
159 |     Used for retrieving full text of decisions from any of the three
160 |     decision types (Genel Kurul, Temyiz Kurulu, Daire).
161 |     """
162 |     decision_id: str = Field(..., description="Unique decision identifier")
163 |     decision_type: str = Field(..., description="Value")
164 |     source_url: str = Field(..., description="Original URL where the document was retrieved")
165 |     markdown_content: Optional[str] = Field(None, description="Full decision text converted to Markdown format")
166 |     retrieval_date: Optional[str] = Field(None, description="Date when document was retrieved (ISO format)")
167 |     error_message: Optional[str] = Field(None, description="Error message if document retrieval failed")
168 | 
169 | # ============================================================================
170 | # Unified Models
171 | # ============================================================================
172 | 
173 | class SayistayUnifiedSearchRequest(BaseModel):
174 |     """Unified search request for all Sayıştay decision types."""
175 |     decision_type: Literal["genel_kurul", "temyiz_kurulu", "daire"] = Field(..., description="Decision type: genel_kurul, temyiz_kurulu, or daire")
176 |     
177 |     # Common pagination parameters
178 |     start: int = Field(0, ge=0, description="Starting record for pagination (0-based)")
179 |     length: int = Field(10, ge=1, le=100, description="Number of records per page (1-100)")
180 |     
181 |     # Common search parameters
182 |     karar_tarih_baslangic: str = Field("", description="Start date (DD.MM.YYYY format)")
183 |     karar_tarih_bitis: str = Field("", description="End date (DD.MM.YYYY format)")
184 |     kamu_idaresi_turu: KamuIdaresiTuruEnum = Field("ALL", description="Public administration type filter")
185 |     ilam_no: str = Field("", description="Audit report number (İlam No, max 50 chars)")
186 |     web_karar_konusu: WebKararKonusuEnum = Field("ALL", description="Decision subject category filter")
187 |     
188 |     # Genel Kurul specific parameters (ignored for other types)
189 |     karar_no: str = Field("", description="Decision number (genel_kurul only)")
190 |     karar_ek: str = Field("", description="Decision appendix number (genel_kurul only)")
191 |     karar_tamami: str = Field("", description="Full text search (genel_kurul only)")
192 |     
193 |     # Temyiz Kurulu specific parameters (ignored for other types)
194 |     ilam_dairesi: DaireEnum = Field("ALL", description="Audit chamber selection (temyiz_kurulu only)")
195 |     yili: str = Field("", description="Year (YYYY format, temyiz_kurulu only)")
196 |     dosya_no: str = Field("", description="File number (temyiz_kurulu only)")
197 |     temyiz_tutanak_no: str = Field("", description="Appeals board meeting minutes number (temyiz_kurulu only)")
198 |     temyiz_karar: str = Field("", description="Appeals decision text search (temyiz_kurulu only)")
199 |     
200 |     # Daire specific parameters (ignored for other types)
201 |     yargilama_dairesi: DaireEnum = Field("ALL", description="Chamber selection (daire only)")
202 |     hesap_yili: str = Field("", description="Account year (daire only)")
203 |     web_karar_metni: str = Field("", description="Decision text search (daire only)")
204 | 
205 | class SayistayUnifiedSearchResult(BaseModel):
206 |     """Unified search result containing decisions from any Sayıştay decision type."""
207 |     decision_type: Literal["genel_kurul", "temyiz_kurulu", "daire"] = Field(..., description="Type of decisions returned")
208 |     decisions: List[Dict[str, Any]] = Field(default_factory=list, description="Decision list (structure varies by type)")
209 |     total_records: int = Field(0, description="Total number of records found")
210 |     total_filtered: int = Field(0, description="Number of records after filtering")
211 |     draw: int = Field(1, description="DataTables draw counter")
212 | 
213 | class SayistayUnifiedDocumentMarkdown(BaseModel):
214 |     """Unified document model for all Sayıştay decision types."""
215 |     decision_type: Literal["genel_kurul", "temyiz_kurulu", "daire"] = Field(..., description="Type of document")
216 |     decision_id: str = Field(..., description="Decision ID")
217 |     source_url: str = Field(..., description="Source URL of the document")
218 |     document_data: Dict[str, Any] = Field(default_factory=dict, description="Document content and metadata")
219 |     markdown_content: Optional[str] = Field(None, description="Markdown content")
220 |     error_message: Optional[str] = Field(None, description="Error message if retrieval failed")
```

--------------------------------------------------------------------------------
/mcp_auth/middleware.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | MCP server middleware for OAuth authentication and authorization
  3 | """
  4 | 
  5 | import functools
  6 | import logging
  7 | from collections.abc import Callable
  8 | from dataclasses import dataclass
  9 | from typing import Any, Optional
 10 | 
 11 | logger = logging.getLogger(__name__)
 12 | 
 13 | try:
 14 |     from fastmcp import FastMCP
 15 |     FASTMCP_AVAILABLE = True
 16 | except ImportError:
 17 |     FASTMCP_AVAILABLE = False
 18 |     FastMCP = None
 19 |     logger.warning("FastMCP not available, some features will be disabled")
 20 | 
 21 | from .oauth import OAuthProvider
 22 | from .policy import PolicyEngine
 23 | 
 24 | 
 25 | @dataclass
 26 | class AuthContext:
 27 |     """Authentication context passed to MCP tools"""
 28 | 
 29 |     user_id: str
 30 |     scopes: list[str]
 31 |     claims: dict[str, Any]
 32 |     token: str
 33 | 
 34 | 
 35 | class MCPAuthMiddleware:
 36 |     """Authentication middleware for MCP servers"""
 37 | 
 38 |     def __init__(self, oauth_provider: OAuthProvider, policy_engine: PolicyEngine):
 39 |         self.oauth_provider = oauth_provider
 40 |         self.policy_engine = policy_engine
 41 | 
 42 |     def authenticate_request(self, authorization_header: str) -> AuthContext | None:
 43 |         """Extract and validate auth token from request"""
 44 | 
 45 |         if not authorization_header:
 46 |             logger.debug("No authorization header provided")
 47 |             return None
 48 | 
 49 |         if not authorization_header.startswith("Bearer "):
 50 |             logger.debug("Authorization header does not start with 'Bearer '")
 51 |             return None
 52 | 
 53 |         token = authorization_header[7:]  # Remove 'Bearer ' prefix
 54 | 
 55 |         token_info = self.oauth_provider.introspect_token(token)
 56 | 
 57 |         if not token_info.get("active"):
 58 |             logger.warning("Token is not active")
 59 |             return None
 60 | 
 61 |         logger.debug(f"Authenticated user: {token_info.get('sub', 'unknown')}")
 62 | 
 63 |         return AuthContext(
 64 |             user_id=token_info.get("sub", "unknown"),
 65 |             scopes=token_info.get("mcp_tool_scopes", []),
 66 |             claims=token_info,
 67 |             token=token,
 68 |         )
 69 | 
 70 |     def authorize_tool_call(
 71 |         self, tool_name: str, auth_context: AuthContext
 72 |     ) -> tuple[bool, str | None]:
 73 |         """Check if user can call the specified tool"""
 74 | 
 75 |         return self.policy_engine.authorize_tool_call(
 76 |             tool_name=tool_name,
 77 |             user_scopes=auth_context.scopes,
 78 |             user_claims=auth_context.claims,
 79 |         )
 80 | 
 81 | 
 82 | def auth_required(
 83 |     oauth_provider: OAuthProvider,
 84 |     policy_engine: PolicyEngine,
 85 |     tool_name: str | None = None,
 86 | ):
 87 |     """
 88 |     Decorator to require authentication for MCP tool functions
 89 | 
 90 |     Usage:
 91 |         @auth_required(oauth_provider, policy_engine, "search_yargitay")
 92 |         def my_tool_function(context: AuthContext, ...):
 93 |             pass
 94 |     """
 95 | 
 96 |     def decorator(func: Callable) -> Callable:
 97 |         middleware = MCPAuthMiddleware(oauth_provider, policy_engine)
 98 | 
 99 |         @functools.wraps(func)
100 |         async def wrapper(*args, **kwargs):
101 |             # Extract authorization header from kwargs
102 |             auth_header = kwargs.pop("authorization", None)
103 |             
104 |             # Also check in args if it's a Request object
105 |             if not auth_header and args:
106 |                 for arg in args:
107 |                     if hasattr(arg, 'headers'):
108 |                         auth_header = arg.headers.get("Authorization")
109 |                         break
110 | 
111 |             if not auth_header:
112 |                 logger.warning(f"No authorization header for tool '{tool_name or func.__name__}'")
113 |                 raise PermissionError("Authorization header required")
114 | 
115 |             auth_context = middleware.authenticate_request(auth_header)
116 | 
117 |             if not auth_context:
118 |                 logger.warning(f"Authentication failed for tool '{tool_name or func.__name__}'")
119 |                 raise PermissionError("Invalid or expired token")
120 | 
121 |             actual_tool_name = tool_name or func.__name__
122 | 
123 |             authorized, reason = middleware.authorize_tool_call(
124 |                 actual_tool_name, auth_context
125 |             )
126 | 
127 |             if not authorized:
128 |                 logger.warning(f"Authorization failed for tool '{actual_tool_name}': {reason}")
129 |                 raise PermissionError(f"Access denied: {reason}")
130 | 
131 |             # Add auth context to function call
132 |             return await func(auth_context, *args, **kwargs)
133 | 
134 |         return wrapper
135 | 
136 |     return decorator
137 | 
138 | 
139 | class FastMCPAuthWrapper:
140 |     """Wrapper for FastMCP servers to add authentication"""
141 | 
142 |     def __init__(
143 |         self,
144 |         mcp_server: "FastMCP",
145 |         oauth_provider: OAuthProvider,
146 |         policy_engine: PolicyEngine,
147 |     ):
148 |         if not FASTMCP_AVAILABLE:
149 |             raise ImportError("FastMCP is required for FastMCPAuthWrapper")
150 |             
151 |         self.mcp_server = mcp_server
152 |         self.middleware = MCPAuthMiddleware(oauth_provider, policy_engine)
153 |         self.oauth_provider = oauth_provider
154 |         logger.info("Initializing FastMCP authentication wrapper")
155 |         self._wrap_tools()
156 | 
157 |     def _wrap_tools(self):
158 |         """Wrap all existing tools with auth middleware"""
159 | 
160 |         # Try different FastMCP tool storage locations
161 |         tool_registry = None
162 |         
163 |         if hasattr(self.mcp_server, '_tools'):
164 |             tool_registry = self.mcp_server._tools
165 |         elif hasattr(self.mcp_server, 'tools'):
166 |             tool_registry = self.mcp_server.tools
167 |         elif hasattr(self.mcp_server, '_tool_registry'):
168 |             tool_registry = self.mcp_server._tool_registry
169 |         elif hasattr(self.mcp_server, '_handlers') and hasattr(self.mcp_server._handlers, 'tools'):
170 |             tool_registry = self.mcp_server._handlers.tools
171 |         
172 |         if not tool_registry:
173 |             logger.warning("FastMCP server tool registry not found, tools will not be automatically wrapped")
174 |             logger.debug(f"Available server attributes: {dir(self.mcp_server)}")
175 |             return
176 | 
177 |         logger.debug(f"Found tool registry with {len(tool_registry)} tools")
178 |         original_tools = dict(tool_registry)
179 |         wrapped_count = 0
180 | 
181 |         for tool_name, tool_func in original_tools.items():
182 |             try:
183 |                 wrapped_func = self._create_auth_wrapper(tool_name, tool_func)
184 |                 tool_registry[tool_name] = wrapped_func
185 |                 wrapped_count += 1
186 |                 logger.debug(f"Wrapped tool: {tool_name}")
187 |             except Exception as e:
188 |                 logger.error(f"Failed to wrap tool {tool_name}: {e}")
189 | 
190 |         logger.info(f"Successfully wrapped {wrapped_count} tools with authentication")
191 | 
192 |     def _create_auth_wrapper(self, tool_name: str, original_func: Callable) -> Callable:
193 |         """Create auth wrapper for a specific tool"""
194 | 
195 |         @functools.wraps(original_func)
196 |         async def auth_wrapper(*args, **kwargs):
197 |             # Extract authorization from various sources
198 |             auth_header = None
199 |             
200 |             # Check kwargs first
201 |             auth_header = kwargs.pop("authorization", None)
202 |             
203 |             # Check if first argument is a Request object
204 |             if not auth_header and args:
205 |                 first_arg = args[0]
206 |                 if hasattr(first_arg, 'headers'):
207 |                     auth_header = first_arg.headers.get("Authorization")
208 | 
209 |             if not auth_header:
210 |                 logger.warning(f"No authorization header for tool '{tool_name}'")
211 |                 raise PermissionError("Authorization required")
212 | 
213 |             auth_context = self.middleware.authenticate_request(auth_header)
214 | 
215 |             if not auth_context:
216 |                 logger.warning(f"Authentication failed for tool '{tool_name}'")
217 |                 raise PermissionError("Invalid token")
218 | 
219 |             authorized, reason = self.middleware.authorize_tool_call(
220 |                 tool_name, auth_context
221 |             )
222 | 
223 |             if not authorized:
224 |                 logger.warning(f"Authorization failed for tool '{tool_name}': {reason}")
225 |                 raise PermissionError(f"Access denied: {reason}")
226 | 
227 |             # Add auth context to kwargs
228 |             kwargs["auth_context"] = auth_context
229 |             logger.debug(f"Calling tool '{tool_name}' for user {auth_context.user_id}")
230 |             
231 |             return await original_func(*args, **kwargs)
232 | 
233 |         return auth_wrapper
234 | 
235 |     def add_oauth_endpoints(self):
236 |         """Add OAuth endpoints to the MCP server"""
237 | 
238 |         @self.mcp_server.tool(
239 |             description="Initiate OAuth 2.1 authorization flow with PKCE",
240 |             annotations={"readOnlyHint": True, "idempotentHint": False}
241 |         )
242 |         async def oauth_authorize(redirect_uri: str, scopes: Optional[str] = None):
243 |             """OAuth authorization endpoint"""
244 |             scope_list = scopes.split(" ") if scopes else None
245 |             auth_url, pkce = self.oauth_provider.generate_authorization_url(
246 |                 redirect_uri=redirect_uri, scopes=scope_list
247 |             )
248 |             logger.info(f"Generated authorization URL for redirect_uri: {redirect_uri}")
249 |             return {
250 |                 "authorization_url": auth_url,
251 |                 "code_verifier": pkce.verifier,  # For PKCE flow
252 |                 "code_challenge": pkce.challenge,
253 |                 "instructions": "Use the authorization_url to complete OAuth flow, then exchange the returned code using oauth_token tool"
254 |             }
255 | 
256 |         @self.mcp_server.tool(
257 |             description="Exchange OAuth authorization code for access token",
258 |             annotations={"readOnlyHint": False, "idempotentHint": False}
259 |         )
260 |         async def oauth_token(
261 |             code: str,
262 |             state: str,
263 |             redirect_uri: str
264 |         ):
265 |             """OAuth token exchange endpoint"""
266 |             try:
267 |                 result = await self.oauth_provider.exchange_code_for_token(
268 |                     code=code, state=state, redirect_uri=redirect_uri
269 |                 )
270 |                 logger.info("Successfully exchanged authorization code for token")
271 |                 return result
272 |             except Exception as e:
273 |                 logger.error(f"Token exchange failed: {e}")
274 |                 raise
275 | 
276 |         @self.mcp_server.tool(
277 |             description="Validate and introspect OAuth access token",
278 |             annotations={"readOnlyHint": True, "idempotentHint": True}
279 |         )
280 |         async def oauth_introspect(token: str):
281 |             """Token introspection endpoint"""
282 |             result = self.oauth_provider.introspect_token(token)
283 |             logger.debug(f"Token introspection: active={result.get('active', False)}")
284 |             return result
285 | 
286 |         @self.mcp_server.tool(
287 |             description="Revoke OAuth access token",
288 |             annotations={"readOnlyHint": False, "idempotentHint": False}
289 |         )
290 |         async def oauth_revoke(token: str):
291 |             """Token revocation endpoint"""
292 |             success = self.oauth_provider.revoke_token(token)
293 |             logger.info(f"Token revocation: success={success}")
294 |             return {"revoked": success}
295 | 
296 |         @self.mcp_server.tool(
297 |             description="Get list of tools available to authenticated user",
298 |             annotations={"readOnlyHint": True, "idempotentHint": True}
299 |         )
300 |         async def oauth_user_tools(authorization: str):
301 |             """Get user's allowed tools based on scopes"""
302 |             auth_context = self.middleware.authenticate_request(authorization)
303 |             if not auth_context:
304 |                 raise PermissionError("Invalid token")
305 |             
306 |             allowed_patterns = self.middleware.policy_engine.get_allowed_tools(auth_context.scopes)
307 |             
308 |             return {
309 |                 "user_id": auth_context.user_id,
310 |                 "scopes": auth_context.scopes,
311 |                 "allowed_tool_patterns": allowed_patterns,
312 |                 "message": "Use these patterns to determine which tools you can access"
313 |             }
314 | 
315 |         logger.info("Added OAuth endpoints: oauth_authorize, oauth_token, oauth_introspect, oauth_revoke, oauth_user_tools")
```

--------------------------------------------------------------------------------
/saidsurucu-yargi-mcp-f5fa007/mcp_auth/middleware.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | MCP server middleware for OAuth authentication and authorization
  3 | """
  4 | 
  5 | import functools
  6 | import logging
  7 | from collections.abc import Callable
  8 | from dataclasses import dataclass
  9 | from typing import Any, Optional
 10 | 
 11 | logger = logging.getLogger(__name__)
 12 | 
 13 | try:
 14 |     from fastmcp import FastMCP
 15 |     FASTMCP_AVAILABLE = True
 16 | except ImportError:
 17 |     FASTMCP_AVAILABLE = False
 18 |     FastMCP = None
 19 |     logger.warning("FastMCP not available, some features will be disabled")
 20 | 
 21 | from .oauth import OAuthProvider
 22 | from .policy import PolicyEngine
 23 | 
 24 | 
 25 | @dataclass
 26 | class AuthContext:
 27 |     """Authentication context passed to MCP tools"""
 28 | 
 29 |     user_id: str
 30 |     scopes: list[str]
 31 |     claims: dict[str, Any]
 32 |     token: str
 33 | 
 34 | 
 35 | class MCPAuthMiddleware:
 36 |     """Authentication middleware for MCP servers"""
 37 | 
 38 |     def __init__(self, oauth_provider: OAuthProvider, policy_engine: PolicyEngine):
 39 |         self.oauth_provider = oauth_provider
 40 |         self.policy_engine = policy_engine
 41 | 
 42 |     def authenticate_request(self, authorization_header: str) -> AuthContext | None:
 43 |         """Extract and validate auth token from request"""
 44 | 
 45 |         if not authorization_header:
 46 |             logger.debug("No authorization header provided")
 47 |             return None
 48 | 
 49 |         if not authorization_header.startswith("Bearer "):
 50 |             logger.debug("Authorization header does not start with 'Bearer '")
 51 |             return None
 52 | 
 53 |         token = authorization_header[7:]  # Remove 'Bearer ' prefix
 54 | 
 55 |         token_info = self.oauth_provider.introspect_token(token)
 56 | 
 57 |         if not token_info.get("active"):
 58 |             logger.warning("Token is not active")
 59 |             return None
 60 | 
 61 |         logger.debug(f"Authenticated user: {token_info.get('sub', 'unknown')}")
 62 | 
 63 |         return AuthContext(
 64 |             user_id=token_info.get("sub", "unknown"),
 65 |             scopes=token_info.get("mcp_tool_scopes", []),
 66 |             claims=token_info,
 67 |             token=token,
 68 |         )
 69 | 
 70 |     def authorize_tool_call(
 71 |         self, tool_name: str, auth_context: AuthContext
 72 |     ) -> tuple[bool, str | None]:
 73 |         """Check if user can call the specified tool"""
 74 | 
 75 |         return self.policy_engine.authorize_tool_call(
 76 |             tool_name=tool_name,
 77 |             user_scopes=auth_context.scopes,
 78 |             user_claims=auth_context.claims,
 79 |         )
 80 | 
 81 | 
 82 | def auth_required(
 83 |     oauth_provider: OAuthProvider,
 84 |     policy_engine: PolicyEngine,
 85 |     tool_name: str | None = None,
 86 | ):
 87 |     """
 88 |     Decorator to require authentication for MCP tool functions
 89 | 
 90 |     Usage:
 91 |         @auth_required(oauth_provider, policy_engine, "search_yargitay")
 92 |         def my_tool_function(context: AuthContext, ...):
 93 |             pass
 94 |     """
 95 | 
 96 |     def decorator(func: Callable) -> Callable:
 97 |         middleware = MCPAuthMiddleware(oauth_provider, policy_engine)
 98 | 
 99 |         @functools.wraps(func)
100 |         async def wrapper(*args, **kwargs):
101 |             # Extract authorization header from kwargs
102 |             auth_header = kwargs.pop("authorization", None)
103 |             
104 |             # Also check in args if it's a Request object
105 |             if not auth_header and args:
106 |                 for arg in args:
107 |                     if hasattr(arg, 'headers'):
108 |                         auth_header = arg.headers.get("Authorization")
109 |                         break
110 | 
111 |             if not auth_header:
112 |                 logger.warning(f"No authorization header for tool '{tool_name or func.__name__}'")
113 |                 raise PermissionError("Authorization header required")
114 | 
115 |             auth_context = middleware.authenticate_request(auth_header)
116 | 
117 |             if not auth_context:
118 |                 logger.warning(f"Authentication failed for tool '{tool_name or func.__name__}'")
119 |                 raise PermissionError("Invalid or expired token")
120 | 
121 |             actual_tool_name = tool_name or func.__name__
122 | 
123 |             authorized, reason = middleware.authorize_tool_call(
124 |                 actual_tool_name, auth_context
125 |             )
126 | 
127 |             if not authorized:
128 |                 logger.warning(f"Authorization failed for tool '{actual_tool_name}': {reason}")
129 |                 raise PermissionError(f"Access denied: {reason}")
130 | 
131 |             # Add auth context to function call
132 |             return await func(auth_context, *args, **kwargs)
133 | 
134 |         return wrapper
135 | 
136 |     return decorator
137 | 
138 | 
139 | class FastMCPAuthWrapper:
140 |     """Wrapper for FastMCP servers to add authentication"""
141 | 
142 |     def __init__(
143 |         self,
144 |         mcp_server: "FastMCP",
145 |         oauth_provider: OAuthProvider,
146 |         policy_engine: PolicyEngine,
147 |     ):
148 |         if not FASTMCP_AVAILABLE:
149 |             raise ImportError("FastMCP is required for FastMCPAuthWrapper")
150 |             
151 |         self.mcp_server = mcp_server
152 |         self.middleware = MCPAuthMiddleware(oauth_provider, policy_engine)
153 |         self.oauth_provider = oauth_provider
154 |         logger.info("Initializing FastMCP authentication wrapper")
155 |         self._wrap_tools()
156 | 
157 |     def _wrap_tools(self):
158 |         """Wrap all existing tools with auth middleware"""
159 | 
160 |         # Try different FastMCP tool storage locations
161 |         tool_registry = None
162 |         
163 |         if hasattr(self.mcp_server, '_tools'):
164 |             tool_registry = self.mcp_server._tools
165 |         elif hasattr(self.mcp_server, 'tools'):
166 |             tool_registry = self.mcp_server.tools
167 |         elif hasattr(self.mcp_server, '_tool_registry'):
168 |             tool_registry = self.mcp_server._tool_registry
169 |         elif hasattr(self.mcp_server, '_handlers') and hasattr(self.mcp_server._handlers, 'tools'):
170 |             tool_registry = self.mcp_server._handlers.tools
171 |         
172 |         if not tool_registry:
173 |             logger.warning("FastMCP server tool registry not found, tools will not be automatically wrapped")
174 |             logger.debug(f"Available server attributes: {dir(self.mcp_server)}")
175 |             return
176 | 
177 |         logger.debug(f"Found tool registry with {len(tool_registry)} tools")
178 |         original_tools = dict(tool_registry)
179 |         wrapped_count = 0
180 | 
181 |         for tool_name, tool_func in original_tools.items():
182 |             try:
183 |                 wrapped_func = self._create_auth_wrapper(tool_name, tool_func)
184 |                 tool_registry[tool_name] = wrapped_func
185 |                 wrapped_count += 1
186 |                 logger.debug(f"Wrapped tool: {tool_name}")
187 |             except Exception as e:
188 |                 logger.error(f"Failed to wrap tool {tool_name}: {e}")
189 | 
190 |         logger.info(f"Successfully wrapped {wrapped_count} tools with authentication")
191 | 
192 |     def _create_auth_wrapper(self, tool_name: str, original_func: Callable) -> Callable:
193 |         """Create auth wrapper for a specific tool"""
194 | 
195 |         @functools.wraps(original_func)
196 |         async def auth_wrapper(*args, **kwargs):
197 |             # Extract authorization from various sources
198 |             auth_header = None
199 |             
200 |             # Check kwargs first
201 |             auth_header = kwargs.pop("authorization", None)
202 |             
203 |             # Check if first argument is a Request object
204 |             if not auth_header and args:
205 |                 first_arg = args[0]
206 |                 if hasattr(first_arg, 'headers'):
207 |                     auth_header = first_arg.headers.get("Authorization")
208 | 
209 |             if not auth_header:
210 |                 logger.warning(f"No authorization header for tool '{tool_name}'")
211 |                 raise PermissionError("Authorization required")
212 | 
213 |             auth_context = self.middleware.authenticate_request(auth_header)
214 | 
215 |             if not auth_context:
216 |                 logger.warning(f"Authentication failed for tool '{tool_name}'")
217 |                 raise PermissionError("Invalid token")
218 | 
219 |             authorized, reason = self.middleware.authorize_tool_call(
220 |                 tool_name, auth_context
221 |             )
222 | 
223 |             if not authorized:
224 |                 logger.warning(f"Authorization failed for tool '{tool_name}': {reason}")
225 |                 raise PermissionError(f"Access denied: {reason}")
226 | 
227 |             # Add auth context to kwargs
228 |             kwargs["auth_context"] = auth_context
229 |             logger.debug(f"Calling tool '{tool_name}' for user {auth_context.user_id}")
230 |             
231 |             return await original_func(*args, **kwargs)
232 | 
233 |         return auth_wrapper
234 | 
235 |     def add_oauth_endpoints(self):
236 |         """Add OAuth endpoints to the MCP server"""
237 | 
238 |         @self.mcp_server.tool(
239 |             description="Initiate OAuth 2.1 authorization flow with PKCE",
240 |             annotations={"readOnlyHint": True, "idempotentHint": False}
241 |         )
242 |         async def oauth_authorize(redirect_uri: str, scopes: Optional[str] = None):
243 |             """OAuth authorization endpoint"""
244 |             scope_list = scopes.split(" ") if scopes else None
245 |             auth_url, pkce = self.oauth_provider.generate_authorization_url(
246 |                 redirect_uri=redirect_uri, scopes=scope_list
247 |             )
248 |             logger.info(f"Generated authorization URL for redirect_uri: {redirect_uri}")
249 |             return {
250 |                 "authorization_url": auth_url,
251 |                 "code_verifier": pkce.verifier,  # For PKCE flow
252 |                 "code_challenge": pkce.challenge,
253 |                 "instructions": "Use the authorization_url to complete OAuth flow, then exchange the returned code using oauth_token tool"
254 |             }
255 | 
256 |         @self.mcp_server.tool(
257 |             description="Exchange OAuth authorization code for access token",
258 |             annotations={"readOnlyHint": False, "idempotentHint": False}
259 |         )
260 |         async def oauth_token(
261 |             code: str,
262 |             state: str,
263 |             redirect_uri: str
264 |         ):
265 |             """OAuth token exchange endpoint"""
266 |             try:
267 |                 result = await self.oauth_provider.exchange_code_for_token(
268 |                     code=code, state=state, redirect_uri=redirect_uri
269 |                 )
270 |                 logger.info("Successfully exchanged authorization code for token")
271 |                 return result
272 |             except Exception as e:
273 |                 logger.error(f"Token exchange failed: {e}")
274 |                 raise
275 | 
276 |         @self.mcp_server.tool(
277 |             description="Validate and introspect OAuth access token",
278 |             annotations={"readOnlyHint": True, "idempotentHint": True}
279 |         )
280 |         async def oauth_introspect(token: str):
281 |             """Token introspection endpoint"""
282 |             result = self.oauth_provider.introspect_token(token)
283 |             logger.debug(f"Token introspection: active={result.get('active', False)}")
284 |             return result
285 | 
286 |         @self.mcp_server.tool(
287 |             description="Revoke OAuth access token",
288 |             annotations={"readOnlyHint": False, "idempotentHint": False}
289 |         )
290 |         async def oauth_revoke(token: str):
291 |             """Token revocation endpoint"""
292 |             success = self.oauth_provider.revoke_token(token)
293 |             logger.info(f"Token revocation: success={success}")
294 |             return {"revoked": success}
295 | 
296 |         @self.mcp_server.tool(
297 |             description="Get list of tools available to authenticated user",
298 |             annotations={"readOnlyHint": True, "idempotentHint": True}
299 |         )
300 |         async def oauth_user_tools(authorization: str):
301 |             """Get user's allowed tools based on scopes"""
302 |             auth_context = self.middleware.authenticate_request(authorization)
303 |             if not auth_context:
304 |                 raise PermissionError("Invalid token")
305 |             
306 |             allowed_patterns = self.middleware.policy_engine.get_allowed_tools(auth_context.scopes)
307 |             
308 |             return {
309 |                 "user_id": auth_context.user_id,
310 |                 "scopes": auth_context.scopes,
311 |                 "allowed_tool_patterns": allowed_patterns,
312 |                 "message": "Use these patterns to determine which tools you can access"
313 |             }
314 | 
315 |         logger.info("Added OAuth endpoints: oauth_authorize, oauth_token, oauth_introspect, oauth_revoke, oauth_user_tools")
```

--------------------------------------------------------------------------------
/saidsurucu-yargi-mcp-f5fa007/uyusmazlik_mcp_module/client.py:
--------------------------------------------------------------------------------

```python
  1 | # uyusmazlik_mcp_module/client.py
  2 | 
  3 | import httpx 
  4 | import aiohttp 
  5 | from bs4 import BeautifulSoup
  6 | from typing import Dict, Any, List, Optional, Union, Tuple 
  7 | import logging
  8 | import html
  9 | import re
 10 | import io
 11 | from markitdown import MarkItDown
 12 | from urllib.parse import urljoin, urlencode # urlencode for aiohttp form data
 13 | 
 14 | from .models import (
 15 |     UyusmazlikSearchRequest,
 16 |     UyusmazlikApiDecisionEntry,
 17 |     UyusmazlikSearchResponse,
 18 |     UyusmazlikDocumentMarkdown,
 19 |     UyusmazlikBolumEnum, 
 20 |     UyusmazlikTuruEnum,
 21 |     UyusmazlikKararSonucuEnum
 22 | )
 23 | 
 24 | logger = logging.getLogger(__name__)
 25 | if not logger.hasHandlers():
 26 |     logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
 27 | 
 28 | # --- Mappings from user-friendly Enum values to API IDs ---
 29 | BOLUM_ENUM_TO_ID_MAP = {
 30 |     UyusmazlikBolumEnum.CEZA_BOLUMU: "f6b74320-f2d7-4209-ad6e-c6df180d4e7c",
 31 |     UyusmazlikBolumEnum.GENEL_KURUL_KARARLARI: "e4ca658d-a75a-4719-b866-b2d2f1c3b1d9",
 32 |     UyusmazlikBolumEnum.HUKUK_BOLUMU: "96b26fc4-ef8e-4a4f-a9cc-a3de89952aa1",
 33 |     UyusmazlikBolumEnum.TUMU: "", # Represents "...Seçiniz..." or all - empty string for API
 34 |     "ALL": "" # Also map the new "ALL" literal to empty string for backward compatibility
 35 | }
 36 | 
 37 | UYUSMAZLIK_TURU_ENUM_TO_ID_MAP = {
 38 |     UyusmazlikTuruEnum.GOREV_UYUSMAZLIGI: "7b1e2cd3-8f09-418a-921c-bbe501e1740c",
 39 |     UyusmazlikTuruEnum.HUKUM_UYUSMAZLIGI: "19b88402-172b-4c1d-8339-595c942a89f5",
 40 |     UyusmazlikTuruEnum.TUMU: "", # Represents "...Seçiniz..." or all - empty string for API
 41 |     "ALL": "" # Also map the new "ALL" literal to empty string for backward compatibility
 42 | }
 43 | 
 44 | KARAR_SONUCU_ENUM_TO_ID_MAP = {
 45 |     # These IDs are from the form HTML provided by the user
 46 |     UyusmazlikKararSonucuEnum.HUKUM_UYUSMAZLIGI_OLMADIGINA_DAIR: "6f47d87f-dcb5-412e-9878-000385dba1d9",
 47 |     UyusmazlikKararSonucuEnum.HUKUM_UYUSMAZLIGI_OLDUGUNA_DAIR: "5a01742a-c440-4c4a-ba1f-da20837cffed",
 48 |     # Add all other 'Karar Sonucu' enum members and their corresponding GUIDs
 49 |     # by inspecting the 'KararSonucuList' checkboxes in the provided form HTML.
 50 | }
 51 | # --- End Mappings ---
 52 | 
 53 | class UyusmazlikApiClient:
 54 |     BASE_URL = "https://kararlar.uyusmazlik.gov.tr"
 55 |     SEARCH_ENDPOINT = "/Arama/Search" 
 56 |     # Individual documents are fetched by their full URLs obtained from search results.
 57 | 
 58 |     def __init__(self, request_timeout: float = 30.0):
 59 |         self.request_timeout = request_timeout # Store timeout for aiohttp and httpx
 60 |         # Headers for aiohttp search. httpx for docs will create its own.
 61 |         self.default_aiohttp_search_headers = {
 62 |             "Accept": "*/*", # Mimicking browser headers provided by user
 63 |             "Accept-Encoding": "gzip, deflate, br, zstd",
 64 |             "Accept-Language": "tr-TR,tr;q=0.9,en-US;q=0.8,en;q=0.7",
 65 |             "X-Requested-With": "XMLHttpRequest",
 66 |             "Origin": self.BASE_URL,
 67 |             "Referer": self.BASE_URL + "/",
 68 | 
 69 |         }
 70 | 
 71 | 
 72 |     async def search_decisions(
 73 |         self,
 74 |         params: UyusmazlikSearchRequest
 75 |     ) -> UyusmazlikSearchResponse:
 76 |         
 77 |         bolum_id_for_api = BOLUM_ENUM_TO_ID_MAP.get(params.bolum, "")
 78 |         uyusmazlik_id_for_api = UYUSMAZLIK_TURU_ENUM_TO_ID_MAP.get(params.uyusmazlik_turu, "")
 79 |         
 80 |         form_data_list: List[Tuple[str, str]] = []
 81 | 
 82 |         def add_to_form_data(key: str, value: Optional[str]):
 83 |             # API expects empty strings for omitted optional fields based on user payload example
 84 |             form_data_list.append((key, value or ""))
 85 | 
 86 |         add_to_form_data("BolumId", bolum_id_for_api)
 87 |         add_to_form_data("UyusmazlikId", uyusmazlik_id_for_api)
 88 |         
 89 |         if params.karar_sonuclari:
 90 |             for enum_member in params.karar_sonuclari:
 91 |                 api_id = KARAR_SONUCU_ENUM_TO_ID_MAP.get(enum_member) 
 92 |                 if api_id: # Only add if a valid ID is found
 93 |                     form_data_list.append(('KararSonucuList', api_id))
 94 |         
 95 |         add_to_form_data("EsasYil", params.esas_yil)
 96 |         add_to_form_data("EsasSayisi", params.esas_sayisi)
 97 |         add_to_form_data("KararYil", params.karar_yil)
 98 |         add_to_form_data("KararSayisi", params.karar_sayisi)
 99 |         add_to_form_data("KanunNo", params.kanun_no)
100 |         add_to_form_data("KararDateBegin", params.karar_date_begin)
101 |         add_to_form_data("KararDateEnd", params.karar_date_end)
102 |         add_to_form_data("ResmiGazeteSayi", params.resmi_gazete_sayi)
103 |         add_to_form_data("ResmiGazeteDate", params.resmi_gazete_date)
104 |         add_to_form_data("Icerik", params.icerik)
105 |         add_to_form_data("Tumce", params.tumce)
106 |         add_to_form_data("WildCard", params.wild_card)
107 |         add_to_form_data("Hepsi", params.hepsi)
108 |         add_to_form_data("Herhangibirisi", params.herhangi_birisi)
109 |         add_to_form_data("NotHepsi", params.not_hepsi)
110 |         # X-Requested-With is handled by default_aiohttp_search_headers
111 | 
112 |         search_url = urljoin(self.BASE_URL, self.SEARCH_ENDPOINT)
113 |         # For aiohttp, data for application/x-www-form-urlencoded should be a dict or str.
114 |         # Using urlencode for list of tuples.
115 |         encoded_form_payload = urlencode(form_data_list, encoding='UTF-8') 
116 | 
117 |         logger.info(f"UyusmazlikApiClient (aiohttp): Performing search to {search_url} with form_data: {encoded_form_payload}")
118 |         
119 |         html_content = ""
120 |         aiohttp_headers = self.default_aiohttp_search_headers.copy()
121 |         aiohttp_headers["Content-Type"] = "application/x-www-form-urlencoded; charset=UTF-8"
122 | 
123 |         try:
124 |             # Create a new session for each call for simplicity with aiohttp here
125 |             async with aiohttp.ClientSession(headers=aiohttp_headers) as session:
126 |                 async with session.post(search_url, data=encoded_form_payload, timeout=self.request_timeout) as response:
127 |                     response.raise_for_status() # Raises ClientResponseError for 400-599
128 |                     html_content = await response.text(encoding='utf-8') # Ensure correct encoding
129 |                     logger.debug("UyusmazlikApiClient (aiohttp): Received HTML response for search.")
130 |         
131 |         except aiohttp.ClientError as e:
132 |             logger.error(f"UyusmazlikApiClient (aiohttp): HTTP client error during search: {e}")
133 |             raise # Re-raise to be handled by the MCP tool
134 |         except Exception as e:
135 |             logger.error(f"UyusmazlikApiClient (aiohttp): Error processing search request: {e}")
136 |             raise
137 | 
138 |         # --- HTML Parsing (remains the same as previous version) ---
139 |         soup = BeautifulSoup(html_content, 'html.parser')
140 |         total_records_text_div = soup.find("div", class_="pull-right label label-important")
141 |         total_records = None
142 |         if total_records_text_div:
143 |             match_records = re.search(r'(\d+)\s*adet kayıt bulundu', total_records_text_div.get_text(strip=True))
144 |             if match_records:
145 |                 total_records = int(match_records.group(1))
146 |         
147 |         result_table = soup.find("table", class_="table-hover")
148 |         processed_decisions: List[UyusmazlikApiDecisionEntry] = []
149 |         if result_table:
150 |             rows = result_table.find_all("tr")
151 |             if len(rows) > 1: # Skip header row
152 |                 for row in rows[1:]:
153 |                     cols = row.find_all('td')
154 |                     if len(cols) >= 5:
155 |                         try:
156 |                             popover_div = cols[0].find("div", attrs={"data-rel": "popover"})
157 |                             popover_content_raw = popover_div["data-content"] if popover_div and popover_div.has_attr("data-content") else None
158 |                             
159 |                             link_tag = cols[0].find('a')
160 |                             doc_relative_url = link_tag['href'] if link_tag and link_tag.has_attr('href') else None
161 |                             
162 |                             if not doc_relative_url: continue
163 |                             document_url_str = urljoin(self.BASE_URL, doc_relative_url)
164 | 
165 |                             pdf_link_tag = cols[5].find('a', href=re.compile(r'\.pdf$', re.IGNORECASE)) if len(cols) > 5 else None
166 |                             pdf_url_str = urljoin(self.BASE_URL, pdf_link_tag['href']) if pdf_link_tag and pdf_link_tag.has_attr('href') else None
167 | 
168 |                             decision_data_parsed = {
169 |                                 "karar_sayisi": cols[0].get_text(strip=True),
170 |                                 "esas_sayisi": cols[1].get_text(strip=True),
171 |                                 "bolum": cols[2].get_text(strip=True),
172 |                                 "uyusmazlik_konusu": cols[3].get_text(strip=True),
173 |                                 "karar_sonucu": cols[4].get_text(strip=True),
174 |                                 "popover_content": html.unescape(popover_content_raw) if popover_content_raw else None,
175 |                                 "document_url": document_url_str,
176 |                                 "pdf_url": pdf_url_str
177 |                             }
178 |                             decision_model = UyusmazlikApiDecisionEntry(**decision_data_parsed)
179 |                             processed_decisions.append(decision_model)
180 |                         except Exception as e:
181 |                             logger.warning(f"UyusmazlikApiClient: Could not parse decision row. Row content: {row.get_text(strip=True, separator=' | ')}, Error: {e}")
182 |         
183 |         return UyusmazlikSearchResponse(
184 |             decisions=processed_decisions,
185 |             total_records_found=total_records
186 |         )
187 | 
188 |     def _convert_html_to_markdown_uyusmazlik(self, full_decision_html_content: str) -> Optional[str]:
189 |         """Converts direct HTML content (from an Uyuşmazlık decision page) to Markdown."""
190 |         if not full_decision_html_content: 
191 |             return None
192 |         
193 |         processed_html = html.unescape(full_decision_html_content)
194 |         # As per user request, pass the full (unescaped) HTML to MarkItDown
195 |         html_input_for_markdown = processed_html
196 | 
197 |         markdown_text = None
198 |         try:
199 |             # Convert HTML string to bytes and create BytesIO stream
200 |             html_bytes = html_input_for_markdown.encode('utf-8')
201 |             html_stream = io.BytesIO(html_bytes)
202 |             
203 |             # Pass BytesIO stream to MarkItDown to avoid temp file creation
204 |             md_converter = MarkItDown()
205 |             conversion_result = md_converter.convert(html_stream)
206 |             markdown_text = conversion_result.text_content
207 |             logger.info("UyusmazlikApiClient: HTML to Markdown conversion successful.")
208 |         except Exception as e:
209 |             logger.error(f"UyusmazlikApiClient: Error during MarkItDown HTML to Markdown conversion: {e}")
210 |         return markdown_text
211 | 
212 |     async def get_decision_document_as_markdown(self, document_url: str) -> UyusmazlikDocumentMarkdown:
213 |         """
214 |         Retrieves a specific Uyuşmazlık decision from its full URL and returns content as Markdown.
215 |         """
216 |         logger.info(f"UyusmazlikApiClient (httpx for docs): Fetching Uyuşmazlık document for Markdown from URL: {document_url}")
217 |         try:
218 |             # Using a new httpx.AsyncClient instance for this GET request for simplicity
219 |             async with httpx.AsyncClient(verify=False, timeout=self.request_timeout) as doc_fetch_client:
220 | 
221 |                  get_response = await doc_fetch_client.get(document_url, headers={"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"})
222 |             get_response.raise_for_status()
223 |             html_content_from_api = get_response.text
224 | 
225 |             if not isinstance(html_content_from_api, str) or not html_content_from_api.strip():
226 |                 logger.warning(f"UyusmazlikApiClient: Received empty or non-string HTML from URL {document_url}.")
227 |                 return UyusmazlikDocumentMarkdown(source_url=document_url, markdown_content=None)
228 | 
229 |             markdown_content = self._convert_html_to_markdown_uyusmazlik(html_content_from_api)
230 |             return UyusmazlikDocumentMarkdown(source_url=document_url, markdown_content=markdown_content)
231 |         except httpx.RequestError as e:
232 |             logger.error(f"UyusmazlikApiClient (httpx for docs): HTTP error fetching Uyuşmazlık document from {document_url}: {e}")
233 |             raise
234 |         except Exception as e:
235 |             logger.error(f"UyusmazlikApiClient (httpx for docs): General error processing Uyuşmazlık document from {document_url}: {e}")
236 |             raise
237 | 
238 |     async def close_client_session(self):
239 | 
240 |         logger.info("UyusmazlikApiClient: No persistent client session from __init__ to close.")
```

--------------------------------------------------------------------------------
/uyusmazlik_mcp_module/client.py:
--------------------------------------------------------------------------------

```python
  1 | # uyusmazlik_mcp_module/client.py
  2 | 
  3 | import httpx 
  4 | from bs4 import BeautifulSoup
  5 | from typing import Dict, Any, List, Optional, Union, Tuple 
  6 | import logging
  7 | import html
  8 | import re
  9 | import io
 10 | from markitdown import MarkItDown
 11 | from urllib.parse import urljoin
 12 | 
 13 | from .models import (
 14 |     UyusmazlikSearchRequest,
 15 |     UyusmazlikApiDecisionEntry,
 16 |     UyusmazlikSearchResponse,
 17 |     UyusmazlikDocumentMarkdown,
 18 |     UyusmazlikBolumEnum, 
 19 |     UyusmazlikTuruEnum,
 20 |     UyusmazlikKararSonucuEnum
 21 | )
 22 | 
 23 | logger = logging.getLogger(__name__)
 24 | if not logger.hasHandlers():
 25 |     logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
 26 | 
 27 | # --- Mappings from user-friendly Enum values to API IDs ---
 28 | BOLUM_ENUM_TO_ID_MAP = {
 29 |     UyusmazlikBolumEnum.CEZA_BOLUMU: "f6b74320-f2d7-4209-ad6e-c6df180d4e7c",
 30 |     UyusmazlikBolumEnum.GENEL_KURUL_KARARLARI: "e4ca658d-a75a-4719-b866-b2d2f1c3b1d9",
 31 |     UyusmazlikBolumEnum.HUKUK_BOLUMU: "96b26fc4-ef8e-4a4f-a9cc-a3de89952aa1",
 32 |     UyusmazlikBolumEnum.TUMU: "", # Represents "...Seçiniz..." or all - empty string for API
 33 |     "ALL": "" # Also map the new "ALL" literal to empty string for backward compatibility
 34 | }
 35 | 
 36 | UYUSMAZLIK_TURU_ENUM_TO_ID_MAP = {
 37 |     UyusmazlikTuruEnum.GOREV_UYUSMAZLIGI: "7b1e2cd3-8f09-418a-921c-bbe501e1740c",
 38 |     UyusmazlikTuruEnum.HUKUM_UYUSMAZLIGI: "19b88402-172b-4c1d-8339-595c942a89f5",
 39 |     UyusmazlikTuruEnum.TUMU: "", # Represents "...Seçiniz..." or all - empty string for API
 40 |     "ALL": "" # Also map the new "ALL" literal to empty string for backward compatibility
 41 | }
 42 | 
 43 | KARAR_SONUCU_ENUM_TO_ID_MAP = {
 44 |     # These IDs are from the form HTML provided by the user
 45 |     UyusmazlikKararSonucuEnum.HUKUM_UYUSMAZLIGI_OLMADIGINA_DAIR: "6f47d87f-dcb5-412e-9878-000385dba1d9",
 46 |     UyusmazlikKararSonucuEnum.HUKUM_UYUSMAZLIGI_OLDUGUNA_DAIR: "5a01742a-c440-4c4a-ba1f-da20837cffed",
 47 |     # Add all other 'Karar Sonucu' enum members and their corresponding GUIDs
 48 |     # by inspecting the 'KararSonucuList' checkboxes in the provided form HTML.
 49 | }
 50 | # --- End Mappings ---
 51 | 
 52 | class UyusmazlikApiClient:
 53 |     BASE_URL = "https://kararlar.uyusmazlik.gov.tr"
 54 |     SEARCH_ENDPOINT = "/Arama/Search" 
 55 |     # Individual documents are fetched by their full URLs obtained from search results.
 56 | 
 57 |     def __init__(self, request_timeout: float = 30.0):
 58 |         self.request_timeout = request_timeout
 59 |         # Create shared httpx client for all requests
 60 |         self.http_client = httpx.AsyncClient(
 61 |             base_url=self.BASE_URL,
 62 |             headers={
 63 |                 "Accept": "*/*",
 64 |                 "Accept-Encoding": "gzip, deflate, br, zstd", 
 65 |                 "Accept-Language": "tr-TR,tr;q=0.9,en-US;q=0.8,en;q=0.7",
 66 |                 "X-Requested-With": "XMLHttpRequest",
 67 |                 "Origin": self.BASE_URL,
 68 |                 "Referer": self.BASE_URL + "/",
 69 |             },
 70 |             timeout=request_timeout,
 71 |             verify=False
 72 |         )
 73 | 
 74 | 
 75 |     async def search_decisions(
 76 |         self,
 77 |         params: UyusmazlikSearchRequest
 78 |     ) -> UyusmazlikSearchResponse:
 79 |         
 80 |         bolum_id_for_api = BOLUM_ENUM_TO_ID_MAP.get(params.bolum, "")
 81 |         uyusmazlik_id_for_api = UYUSMAZLIK_TURU_ENUM_TO_ID_MAP.get(params.uyusmazlik_turu, "")
 82 |         
 83 |         form_data_list: List[Tuple[str, str]] = []
 84 | 
 85 |         def add_to_form_data(key: str, value: Optional[str]):
 86 |             # API expects empty strings for omitted optional fields based on user payload example
 87 |             form_data_list.append((key, value or ""))
 88 | 
 89 |         add_to_form_data("BolumId", bolum_id_for_api)
 90 |         add_to_form_data("UyusmazlikId", uyusmazlik_id_for_api)
 91 |         
 92 |         if params.karar_sonuclari:
 93 |             for enum_member in params.karar_sonuclari:
 94 |                 api_id = KARAR_SONUCU_ENUM_TO_ID_MAP.get(enum_member) 
 95 |                 if api_id: # Only add if a valid ID is found
 96 |                     form_data_list.append(('KararSonucuList', api_id))
 97 |         
 98 |         add_to_form_data("EsasYil", params.esas_yil)
 99 |         add_to_form_data("EsasSayisi", params.esas_sayisi)
100 |         add_to_form_data("KararYil", params.karar_yil)
101 |         add_to_form_data("KararSayisi", params.karar_sayisi)
102 |         add_to_form_data("KanunNo", params.kanun_no)
103 |         add_to_form_data("KararDateBegin", params.karar_date_begin)
104 |         add_to_form_data("KararDateEnd", params.karar_date_end)
105 |         add_to_form_data("ResmiGazeteSayi", params.resmi_gazete_sayi)
106 |         add_to_form_data("ResmiGazeteDate", params.resmi_gazete_date)
107 |         add_to_form_data("Icerik", params.icerik)
108 |         add_to_form_data("Tumce", params.tumce)
109 |         add_to_form_data("WildCard", params.wild_card)
110 |         add_to_form_data("Hepsi", params.hepsi)
111 |         add_to_form_data("Herhangibirisi", params.herhangi_birisi)
112 |         add_to_form_data("NotHepsi", params.not_hepsi)
113 | 
114 |         # Convert form data to dict for httpx
115 |         form_data_dict = {}
116 |         for key, value in form_data_list:
117 |             if key in form_data_dict:
118 |                 # Handle multiple values (like KararSonucuList)
119 |                 if not isinstance(form_data_dict[key], list):
120 |                     form_data_dict[key] = [form_data_dict[key]]
121 |                 form_data_dict[key].append(value)
122 |             else:
123 |                 form_data_dict[key] = value
124 | 
125 |         logger.info(f"UyusmazlikApiClient (httpx): Performing search to {self.SEARCH_ENDPOINT} with form_data: {form_data_dict}")
126 |         
127 |         try:
128 |             # Use shared httpx client
129 |             response = await self.http_client.post(
130 |                 self.SEARCH_ENDPOINT,
131 |                 data=form_data_dict,
132 |                 headers={"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
133 |             )
134 |             response.raise_for_status()
135 |             html_content = response.text
136 |             logger.debug("UyusmazlikApiClient (httpx): Received HTML response for search.")
137 |         
138 |         except httpx.HTTPError as e:
139 |             logger.error(f"UyusmazlikApiClient (httpx): HTTP client error during search: {e}")
140 |             raise # Re-raise to be handled by the MCP tool
141 |         except Exception as e:
142 |             logger.error(f"UyusmazlikApiClient (httpx): Error processing search request: {e}")
143 |             raise
144 | 
145 |         # --- HTML Parsing (remains the same as previous version) ---
146 |         soup = BeautifulSoup(html_content, 'html.parser')
147 |         total_records_text_div = soup.find("div", class_="pull-right label label-important")
148 |         total_records = None
149 |         if total_records_text_div:
150 |             match_records = re.search(r'(\d+)\s*adet kayıt bulundu', total_records_text_div.get_text(strip=True))
151 |             if match_records:
152 |                 total_records = int(match_records.group(1))
153 |         
154 |         result_table = soup.find("table", class_="table-hover")
155 |         processed_decisions: List[UyusmazlikApiDecisionEntry] = []
156 |         if result_table:
157 |             rows = result_table.find_all("tr")
158 |             if len(rows) > 1: # Skip header row
159 |                 for row in rows[1:]:
160 |                     cols = row.find_all('td')
161 |                     if len(cols) >= 5:
162 |                         try:
163 |                             popover_div = cols[0].find("div", attrs={"data-rel": "popover"})
164 |                             popover_content_raw = popover_div["data-content"] if popover_div and popover_div.has_attr("data-content") else None
165 |                             
166 |                             link_tag = cols[0].find('a')
167 |                             doc_relative_url = link_tag['href'] if link_tag and link_tag.has_attr('href') else None
168 |                             
169 |                             if not doc_relative_url: continue
170 |                             document_url_str = urljoin(self.BASE_URL, doc_relative_url)
171 | 
172 |                             pdf_link_tag = cols[5].find('a', href=re.compile(r'\.pdf$', re.IGNORECASE)) if len(cols) > 5 else None
173 |                             pdf_url_str = urljoin(self.BASE_URL, pdf_link_tag['href']) if pdf_link_tag and pdf_link_tag.has_attr('href') else None
174 | 
175 |                             decision_data_parsed = {
176 |                                 "karar_sayisi": cols[0].get_text(strip=True),
177 |                                 "esas_sayisi": cols[1].get_text(strip=True),
178 |                                 "bolum": cols[2].get_text(strip=True),
179 |                                 "uyusmazlik_konusu": cols[3].get_text(strip=True),
180 |                                 "karar_sonucu": cols[4].get_text(strip=True),
181 |                                 "popover_content": html.unescape(popover_content_raw) if popover_content_raw else None,
182 |                                 "document_url": document_url_str,
183 |                                 "pdf_url": pdf_url_str
184 |                             }
185 |                             decision_model = UyusmazlikApiDecisionEntry(**decision_data_parsed)
186 |                             processed_decisions.append(decision_model)
187 |                         except Exception as e:
188 |                             logger.warning(f"UyusmazlikApiClient: Could not parse decision row. Row content: {row.get_text(strip=True, separator=' | ')}, Error: {e}")
189 |         
190 |         return UyusmazlikSearchResponse(
191 |             decisions=processed_decisions,
192 |             total_records_found=total_records
193 |         )
194 | 
195 |     def _convert_html_to_markdown_uyusmazlik(self, full_decision_html_content: str) -> Optional[str]:
196 |         """Converts direct HTML content (from an Uyuşmazlık decision page) to Markdown."""
197 |         if not full_decision_html_content: 
198 |             return None
199 |         
200 |         processed_html = html.unescape(full_decision_html_content)
201 |         # As per user request, pass the full (unescaped) HTML to MarkItDown
202 |         html_input_for_markdown = processed_html
203 | 
204 |         markdown_text = None
205 |         try:
206 |             # Convert HTML string to bytes and create BytesIO stream
207 |             html_bytes = html_input_for_markdown.encode('utf-8')
208 |             html_stream = io.BytesIO(html_bytes)
209 |             
210 |             # Pass BytesIO stream to MarkItDown to avoid temp file creation
211 |             md_converter = MarkItDown()
212 |             conversion_result = md_converter.convert(html_stream)
213 |             markdown_text = conversion_result.text_content
214 |             logger.info("UyusmazlikApiClient: HTML to Markdown conversion successful.")
215 |         except Exception as e:
216 |             logger.error(f"UyusmazlikApiClient: Error during MarkItDown HTML to Markdown conversion: {e}")
217 |         return markdown_text
218 | 
219 |     async def get_decision_document_as_markdown(self, document_url: str) -> UyusmazlikDocumentMarkdown:
220 |         """
221 |         Retrieves a specific Uyuşmazlık decision from its full URL and returns content as Markdown.
222 |         """
223 |         logger.info(f"UyusmazlikApiClient (httpx for docs): Fetching Uyuşmazlık document for Markdown from URL: {document_url}")
224 |         try:
225 |             # Using a new httpx.AsyncClient instance for this GET request for simplicity
226 |             async with httpx.AsyncClient(verify=False, timeout=self.request_timeout) as doc_fetch_client:
227 |                  get_response = await doc_fetch_client.get(document_url, headers={"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"})
228 |             get_response.raise_for_status()
229 |             html_content_from_api = get_response.text
230 | 
231 |             if not isinstance(html_content_from_api, str) or not html_content_from_api.strip():
232 |                 logger.warning(f"UyusmazlikApiClient: Received empty or non-string HTML from URL {document_url}.")
233 |                 return UyusmazlikDocumentMarkdown(source_url=document_url, markdown_content=None)
234 | 
235 |             markdown_content = self._convert_html_to_markdown_uyusmazlik(html_content_from_api)
236 |             return UyusmazlikDocumentMarkdown(source_url=document_url, markdown_content=markdown_content)
237 |         except httpx.RequestError as e:
238 |             logger.error(f"UyusmazlikApiClient (httpx for docs): HTTP error fetching Uyuşmazlık document from {document_url}: {e}")
239 |             raise
240 |         except Exception as e:
241 |             logger.error(f"UyusmazlikApiClient (httpx for docs): General error processing Uyuşmazlık document from {document_url}: {e}")
242 |             raise
243 | 
244 |     async def close_client_session(self):
245 |         """Close the shared httpx client session."""
246 |         if hasattr(self, 'http_client') and self.http_client:
247 |             await self.http_client.aclose()
248 |             logger.info("UyusmazlikApiClient: HTTP client session closed.")
249 |         else:
250 |             logger.info("UyusmazlikApiClient: No persistent client session from __init__ to close.")
```

--------------------------------------------------------------------------------
/mcp_auth_http_adapter.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | HTTP adapter for MCP Auth Toolkit OAuth endpoints
  3 | Exposes MCP OAuth tools as HTTP endpoints for Claude.ai integration
  4 | """
  5 | 
  6 | import os
  7 | import logging
  8 | import secrets
  9 | import time
 10 | from typing import Optional
 11 | from urllib.parse import urlencode, quote
 12 | from datetime import datetime, timedelta
 13 | 
 14 | from fastapi import APIRouter, Request, Query, HTTPException
 15 | from fastapi.responses import RedirectResponse, JSONResponse
 16 | 
 17 | # Try to import Clerk SDK
 18 | try:
 19 |     from clerk_backend_api import Clerk
 20 |     CLERK_AVAILABLE = True
 21 | except ImportError as e:
 22 |     CLERK_AVAILABLE = False
 23 |     Clerk = None
 24 | 
 25 | logger = logging.getLogger(__name__)
 26 | 
 27 | router = APIRouter()
 28 | 
 29 | # OAuth configuration
 30 | BASE_URL = os.getenv("BASE_URL", "https://yargimcp.com")
 31 | 
 32 | 
 33 | @router.get("/.well-known/oauth-authorization-server")
 34 | async def get_oauth_metadata():
 35 |     """OAuth 2.0 Authorization Server Metadata (RFC 8414)"""
 36 |     return JSONResponse({
 37 |         "issuer": BASE_URL,
 38 |         "authorization_endpoint": f"{BASE_URL}/authorize",
 39 |         "token_endpoint": f"{BASE_URL}/token",
 40 |         "registration_endpoint": f"{BASE_URL}/register",
 41 |         "response_types_supported": ["code"],
 42 |         "grant_types_supported": ["authorization_code", "refresh_token"],
 43 |         "code_challenge_methods_supported": ["S256"],
 44 |         "token_endpoint_auth_methods_supported": ["none"],
 45 |         "scopes_supported": ["mcp:tools:read", "mcp:tools:write", "openid", "profile", "email"],
 46 |         "service_documentation": f"{BASE_URL}/mcp/"
 47 |     })
 48 | 
 49 | 
 50 | @router.get("/.well-known/oauth-protected-resource")
 51 | async def get_protected_resource_metadata():
 52 |     """OAuth Protected Resource Metadata (RFC 9728)"""
 53 |     return JSONResponse({
 54 |         "resource": BASE_URL,
 55 |         "authorization_servers": [BASE_URL],
 56 |         "bearer_methods_supported": ["header"],
 57 |         "scopes_supported": ["mcp:tools:read", "mcp:tools:write"],
 58 |         "resource_documentation": f"{BASE_URL}/docs"
 59 |     })
 60 | 
 61 | 
 62 | @router.get("/authorize")
 63 | async def authorize_endpoint(
 64 |     response_type: str = Query(...),
 65 |     client_id: str = Query(...),
 66 |     redirect_uri: str = Query(...),
 67 |     code_challenge: str = Query(...),
 68 |     code_challenge_method: str = Query("S256"),
 69 |     state: Optional[str] = Query(None),
 70 |     scope: Optional[str] = Query(None)
 71 | ):
 72 |     """OAuth 2.1 Authorization Endpoint - Uses Clerk SDK for custom domains"""
 73 |     
 74 |     logger.info(f"OAuth authorize request - client_id: {client_id}, redirect_uri: {redirect_uri}")
 75 |     
 76 |     if not CLERK_AVAILABLE:
 77 |         logger.error("Clerk SDK not available")
 78 |         raise HTTPException(status_code=500, detail="Clerk SDK not available")
 79 |     
 80 |     # Store OAuth session for later validation
 81 |     try:
 82 |         from mcp_server_main import app as mcp_app
 83 |         from mcp_auth_factory import get_oauth_provider
 84 |         
 85 |         oauth_provider = get_oauth_provider(mcp_app)
 86 |         if not oauth_provider:
 87 |             raise HTTPException(status_code=500, detail="OAuth provider not configured")
 88 |         
 89 |         # Generate session and store PKCE
 90 |         session_id = secrets.token_urlsafe(32)
 91 |         if state is None:
 92 |             state = secrets.token_urlsafe(16)
 93 |         
 94 |         # Create PKCE challenge
 95 |         from mcp_auth.oauth import PKCEChallenge
 96 |         pkce = PKCEChallenge()
 97 |         
 98 |         # Store session data
 99 |         session_data = {
100 |             "pkce_verifier": pkce.verifier,
101 |             "pkce_challenge": code_challenge,  # Store the client's challenge
102 |             "state": state,
103 |             "redirect_uri": redirect_uri,
104 |             "client_id": client_id,
105 |             "scopes": scope.split(" ") if scope else ["mcp:tools:read", "mcp:tools:write"],
106 |             "created_at": time.time(),
107 |             "expires_at": (datetime.utcnow() + timedelta(minutes=10)).timestamp(),
108 |         }
109 |         oauth_provider.storage.set_session(session_id, session_data)
110 |         
111 |         # For Clerk with custom domains, we need to use their hosted sign-in page
112 |         # We'll pass our callback URL and session info in the state
113 |         callback_url = f"{BASE_URL}/auth/callback"
114 |         
115 |         # Encode session info in state for retrieval after Clerk auth
116 |         combined_state = f"{state}:{session_id}"
117 |         
118 |         # Use Clerk's sign-in URL with proper parameters
119 |         clerk_domain = os.getenv("CLERK_DOMAIN", "accounts.yargimcp.com")
120 |         sign_in_params = {
121 |             "redirect_url": f"{callback_url}?state={quote(combined_state)}",
122 |         }
123 |         
124 |         sign_in_url = f"https://{clerk_domain}/sign-in?{urlencode(sign_in_params)}"
125 |         
126 |         logger.info(f"Redirecting to Clerk sign-in: {sign_in_url}")
127 |         
128 |         return RedirectResponse(url=sign_in_url)
129 |         
130 |     except Exception as e:
131 |         logger.exception(f"Authorization failed: {e}")
132 |         raise HTTPException(status_code=500, detail=str(e))
133 | 
134 | 
135 | @router.get("/auth/callback")
136 | async def oauth_callback(
137 |     request: Request,
138 |     state: Optional[str] = Query(None),
139 |     clerk_token: Optional[str] = Query(None)
140 | ):
141 |     """Handle OAuth callback from Clerk - supports both JWT token and cookie auth"""
142 |     
143 |     logger.info(f"OAuth callback received - state: {state}")
144 |     logger.info(f"Query params: {dict(request.query_params)}")
145 |     logger.info(f"Cookies: {dict(request.cookies)}")
146 |     logger.info(f"Clerk JWT token provided: {bool(clerk_token)}")
147 |     
148 |     # Support both JWT token (for cross-domain) and cookie auth (for subdomain)
149 |     
150 |     try:
151 |         if not state:
152 |             logger.error("No state parameter provided")
153 |             return JSONResponse(
154 |                 status_code=400,
155 |                 content={"error": "invalid_request", "error_description": "Missing state parameter"}
156 |             )
157 |         
158 |         # Parse state to get original state and session ID
159 |         try:
160 |             if ":" in state:
161 |                 original_state, session_id = state.rsplit(":", 1)
162 |             else:
163 |                 original_state = state
164 |                 session_id = state  # Fallback
165 |         except ValueError:
166 |             logger.error(f"Invalid state format: {state}")
167 |             return JSONResponse(
168 |                 status_code=400,
169 |                 content={"error": "invalid_request", "error_description": "Invalid state format"}
170 |             )
171 |         
172 |         # Get OAuth provider
173 |         from mcp_server_main import app as mcp_app
174 |         from mcp_auth_factory import get_oauth_provider
175 |         
176 |         oauth_provider = get_oauth_provider(mcp_app)
177 |         if not oauth_provider:
178 |             raise HTTPException(status_code=500, detail="OAuth provider not configured")
179 |         
180 |         # Get stored session
181 |         oauth_session = oauth_provider.storage.get_session(session_id)
182 |         
183 |         if not oauth_session:
184 |             logger.error(f"OAuth session not found for ID: {session_id}")
185 |             return JSONResponse(
186 |                 status_code=400,
187 |                 content={"error": "invalid_request", "error_description": "OAuth session expired or not found"}
188 |             )
189 |         
190 |         # Check if we have a JWT token (for cross-domain auth)
191 |         user_authenticated = False
192 |         auth_method = "none"
193 |         
194 |         if clerk_token:
195 |             logger.info("Attempting JWT token validation")
196 |             try:
197 |                 # Validate JWT token with Clerk
198 |                 from clerk_backend_api import Clerk
199 |                 clerk = Clerk(bearer_auth=os.getenv("CLERK_SECRET_KEY"))
200 |                 
201 |                 # Extract session_id from JWT token and verify with Clerk
202 |                 import jwt
203 |                 decoded_token = jwt.decode(clerk_token, options={"verify_signature": False})
204 |                 session_id = decoded_token.get("sid") or decoded_token.get("session_id")
205 |                 
206 |                 if session_id:
207 |                     # Verify with Clerk using session_id
208 |                     session = clerk.sessions.verify(session_id=session_id, token=clerk_token)
209 |                     user_id = session.user_id if session else None
210 |                 else:
211 |                     user_id = None
212 |                 
213 |                 if user_id:
214 |                     logger.info(f"JWT token validation successful - user_id: {user_id}")
215 |                     user_authenticated = True
216 |                     auth_method = "jwt_token"
217 |                     # Store user info in session for token exchange
218 |                     oauth_session["user_id"] = user_id
219 |                     oauth_session["auth_method"] = "jwt_token"
220 |                 else:
221 |                     logger.error("JWT token validation failed - no user_id in claims")
222 |             except Exception as e:
223 |                 logger.error(f"JWT token validation failed: {str(e)}")
224 |                 # Fall through to cookie validation
225 |         
226 |         # If no JWT token or validation failed, check cookies
227 |         if not user_authenticated:
228 |             logger.info("Checking for Clerk session cookies")
229 |             # Check for Clerk session cookies (for subdomain auth)
230 |             clerk_session_cookie = request.cookies.get("__session")
231 |             if clerk_session_cookie:
232 |                 logger.info("Found Clerk session cookie, assuming authenticated")
233 |                 user_authenticated = True
234 |                 auth_method = "cookie"
235 |                 oauth_session["auth_method"] = "cookie"
236 |             else:
237 |                 logger.info("No Clerk session cookie found")
238 |         
239 |         # For custom domains, we'll also trust that Clerk redirected here
240 |         if not user_authenticated:
241 |             logger.info("Trusting Clerk redirect for custom domain flow")
242 |             user_authenticated = True
243 |             auth_method = "trusted_redirect"
244 |             oauth_session["auth_method"] = "trusted_redirect"
245 |         
246 |         logger.info(f"User authenticated: {user_authenticated}, method: {auth_method}")
247 |         
248 |         # Generate simple authorization code for custom domain flow
249 |         auth_code = f"clerk_custom_{session_id}_{int(time.time())}"
250 |         
251 |         # Store the code mapping for token exchange  
252 |         code_data = {
253 |             "session_id": session_id,
254 |             "clerk_authenticated": user_authenticated,
255 |             "auth_method": auth_method,
256 |             "custom_domain_flow": True,
257 |             "created_at": time.time(),
258 |             "expires_at": (datetime.utcnow() + timedelta(minutes=5)).timestamp(),
259 |         }
260 |         if "user_id" in oauth_session:
261 |             code_data["user_id"] = oauth_session["user_id"]
262 |             
263 |         oauth_provider.storage.set_session(f"code_{auth_code}", code_data)
264 |         
265 |         # Build redirect URL back to Claude
266 |         redirect_params = {
267 |             "code": auth_code,
268 |             "state": original_state
269 |         }
270 |         
271 |         redirect_url = f"{oauth_session['redirect_uri']}?{urlencode(redirect_params)}"
272 |         logger.info(f"Redirecting back to Claude: {redirect_url}")
273 |         
274 |         return RedirectResponse(url=redirect_url)
275 |         
276 |     except Exception as e:
277 |         logger.exception(f"Callback processing failed: {e}")
278 |         return JSONResponse(
279 |             status_code=500,
280 |             content={"error": "server_error", "error_description": str(e)}
281 |         )
282 | 
283 | 
284 | @router.post("/register")
285 | async def register_client(request: Request):
286 |     """Dynamic Client Registration (RFC 7591)"""
287 |     
288 |     data = await request.json()
289 |     logger.info(f"Client registration request: {data}")
290 |     
291 |     # Simple dynamic registration - accept any client
292 |     client_id = f"mcp-client-{os.urandom(8).hex()}"
293 |     
294 |     return JSONResponse({
295 |         "client_id": client_id,
296 |         "client_secret": None,  # Public client
297 |         "redirect_uris": data.get("redirect_uris", []),
298 |         "grant_types": ["authorization_code", "refresh_token"],
299 |         "response_types": ["code"],
300 |         "client_name": data.get("client_name", "MCP Client"),
301 |         "token_endpoint_auth_method": "none",
302 |         "client_id_issued_at": int(datetime.now().timestamp())
303 |     })
304 | 
305 | 
306 | @router.post("/token")
307 | async def token_endpoint(request: Request):
308 |     """OAuth 2.1 Token Endpoint"""
309 |     
310 |     # Parse form data
311 |     form_data = await request.form()
312 |     grant_type = form_data.get("grant_type")
313 |     code = form_data.get("code")
314 |     redirect_uri = form_data.get("redirect_uri")
315 |     client_id = form_data.get("client_id")
316 |     code_verifier = form_data.get("code_verifier")
317 |     
318 |     logger.info(f"Token exchange - grant_type: {grant_type}, code: {code[:20] if code else 'None'}...")
319 |     
320 |     if grant_type != "authorization_code":
321 |         return JSONResponse(
322 |             status_code=400,
323 |             content={"error": "unsupported_grant_type"}
324 |         )
325 |     
326 |     try:
327 |         # OAuth token exchange - validate code and return Clerk JWT
328 |         # This supports proper OAuth flow while using Clerk JWT tokens
329 |         
330 |         if not code or not redirect_uri:
331 |             logger.error("Missing required parameters: code or redirect_uri")
332 |             return JSONResponse(
333 |                 status_code=400,
334 |                 content={"error": "invalid_request", "error_description": "Missing code or redirect_uri"}
335 |             )
336 |         
337 |         # Validate OAuth code with Clerk
338 |         if CLERK_AVAILABLE:
339 |             try:
340 |                 clerk = Clerk(bearer_auth=os.getenv("CLERK_SECRET_KEY"))
341 |                 
342 |                 # In a real implementation, you'd validate the code with Clerk
343 |                 # For now, we'll assume the code is valid if it looks like a Clerk code
344 |                 if len(code) > 10:  # Basic validation
345 |                     # Create a mock session with the code
346 |                     # In practice, this would be validated with Clerk's OAuth flow
347 |                     
348 |                     # Return Clerk JWT token format
349 |                     # This should be the actual Clerk JWT token from the OAuth flow
350 |                     return JSONResponse({
351 |                         "access_token": f"mock_clerk_jwt_{code}",
352 |                         "token_type": "Bearer",
353 |                         "expires_in": 3600,
354 |                         "scope": "yargi.read yargi.search"
355 |                     })
356 |                 else:
357 |                     logger.error(f"Invalid code format: {code}")
358 |                     return JSONResponse(
359 |                         status_code=400,
360 |                         content={"error": "invalid_grant", "error_description": "Invalid authorization code"}
361 |                     )
362 |                     
363 |             except Exception as e:
364 |                 logger.error(f"Clerk validation failed: {e}")
365 |                 return JSONResponse(
366 |                     status_code=400,
367 |                     content={"error": "invalid_grant", "error_description": "Authorization code validation failed"}
368 |                 )
369 |         else:
370 |             logger.warning("Clerk SDK not available, using mock response")
371 |             return JSONResponse({
372 |                 "access_token": "mock_jwt_token_for_development",
373 |                 "token_type": "Bearer",
374 |                 "expires_in": 3600,
375 |                 "scope": "yargi.read yargi.search"
376 |             })
377 |         
378 |     except Exception as e:
379 |         logger.exception(f"Token exchange failed: {e}")
380 |         return JSONResponse(
381 |             status_code=500,
382 |             content={"error": "server_error", "error_description": str(e)}
383 |         )
```

--------------------------------------------------------------------------------
/saidsurucu-yargi-mcp-f5fa007/mcp_auth_http_adapter.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | HTTP adapter for MCP Auth Toolkit OAuth endpoints
  3 | Exposes MCP OAuth tools as HTTP endpoints for Claude.ai integration
  4 | """
  5 | 
  6 | import os
  7 | import logging
  8 | import secrets
  9 | import time
 10 | from typing import Optional
 11 | from urllib.parse import urlencode, quote
 12 | from datetime import datetime, timedelta
 13 | 
 14 | from fastapi import APIRouter, Request, Query, HTTPException
 15 | from fastapi.responses import RedirectResponse, JSONResponse
 16 | 
 17 | # Try to import Clerk SDK
 18 | try:
 19 |     from clerk_backend_api import Clerk
 20 |     CLERK_AVAILABLE = True
 21 | except ImportError as e:
 22 |     CLERK_AVAILABLE = False
 23 |     Clerk = None
 24 | 
 25 | logger = logging.getLogger(__name__)
 26 | 
 27 | router = APIRouter()
 28 | 
 29 | # OAuth configuration
 30 | BASE_URL = os.getenv("BASE_URL", "https://yargimcp.com")
 31 | 
 32 | 
 33 | @router.get("/.well-known/oauth-authorization-server")
 34 | async def get_oauth_metadata():
 35 |     """OAuth 2.0 Authorization Server Metadata (RFC 8414)"""
 36 |     return JSONResponse({
 37 |         "issuer": BASE_URL,
 38 |         "authorization_endpoint": f"{BASE_URL}/authorize",
 39 |         "token_endpoint": f"{BASE_URL}/token",
 40 |         "registration_endpoint": f"{BASE_URL}/register",
 41 |         "response_types_supported": ["code"],
 42 |         "grant_types_supported": ["authorization_code", "refresh_token"],
 43 |         "code_challenge_methods_supported": ["S256"],
 44 |         "token_endpoint_auth_methods_supported": ["none"],
 45 |         "scopes_supported": ["mcp:tools:read", "mcp:tools:write", "openid", "profile", "email"],
 46 |         "service_documentation": f"{BASE_URL}/mcp/"
 47 |     })
 48 | 
 49 | 
 50 | @router.get("/.well-known/oauth-protected-resource")
 51 | async def get_protected_resource_metadata():
 52 |     """OAuth Protected Resource Metadata (RFC 9728)"""
 53 |     return JSONResponse({
 54 |         "resource": BASE_URL,
 55 |         "authorization_servers": [BASE_URL],
 56 |         "bearer_methods_supported": ["header"],
 57 |         "scopes_supported": ["mcp:tools:read", "mcp:tools:write"],
 58 |         "resource_documentation": f"{BASE_URL}/docs"
 59 |     })
 60 | 
 61 | 
 62 | @router.get("/authorize")
 63 | async def authorize_endpoint(
 64 |     response_type: str = Query(...),
 65 |     client_id: str = Query(...),
 66 |     redirect_uri: str = Query(...),
 67 |     code_challenge: str = Query(...),
 68 |     code_challenge_method: str = Query("S256"),
 69 |     state: Optional[str] = Query(None),
 70 |     scope: Optional[str] = Query(None)
 71 | ):
 72 |     """OAuth 2.1 Authorization Endpoint - Uses Clerk SDK for custom domains"""
 73 |     
 74 |     logger.info(f"OAuth authorize request - client_id: {client_id}, redirect_uri: {redirect_uri}")
 75 |     
 76 |     if not CLERK_AVAILABLE:
 77 |         logger.error("Clerk SDK not available")
 78 |         raise HTTPException(status_code=500, detail="Clerk SDK not available")
 79 |     
 80 |     # Store OAuth session for later validation
 81 |     try:
 82 |         from mcp_server_main import app as mcp_app
 83 |         from mcp_auth_factory import get_oauth_provider
 84 |         
 85 |         oauth_provider = get_oauth_provider(mcp_app)
 86 |         if not oauth_provider:
 87 |             raise HTTPException(status_code=500, detail="OAuth provider not configured")
 88 |         
 89 |         # Generate session and store PKCE
 90 |         session_id = secrets.token_urlsafe(32)
 91 |         if state is None:
 92 |             state = secrets.token_urlsafe(16)
 93 |         
 94 |         # Create PKCE challenge
 95 |         from mcp_auth.oauth import PKCEChallenge
 96 |         pkce = PKCEChallenge()
 97 |         
 98 |         # Store session data
 99 |         session_data = {
100 |             "pkce_verifier": pkce.verifier,
101 |             "pkce_challenge": code_challenge,  # Store the client's challenge
102 |             "state": state,
103 |             "redirect_uri": redirect_uri,
104 |             "client_id": client_id,
105 |             "scopes": scope.split(" ") if scope else ["mcp:tools:read", "mcp:tools:write"],
106 |             "created_at": time.time(),
107 |             "expires_at": (datetime.utcnow() + timedelta(minutes=10)).timestamp(),
108 |         }
109 |         oauth_provider.storage.set_session(session_id, session_data)
110 |         
111 |         # For Clerk with custom domains, we need to use their hosted sign-in page
112 |         # We'll pass our callback URL and session info in the state
113 |         callback_url = f"{BASE_URL}/auth/callback"
114 |         
115 |         # Encode session info in state for retrieval after Clerk auth
116 |         combined_state = f"{state}:{session_id}"
117 |         
118 |         # Use Clerk's sign-in URL with proper parameters
119 |         clerk_domain = os.getenv("CLERK_DOMAIN", "accounts.yargimcp.com")
120 |         sign_in_params = {
121 |             "redirect_url": f"{callback_url}?state={quote(combined_state)}",
122 |         }
123 |         
124 |         sign_in_url = f"https://{clerk_domain}/sign-in?{urlencode(sign_in_params)}"
125 |         
126 |         logger.info(f"Redirecting to Clerk sign-in: {sign_in_url}")
127 |         
128 |         return RedirectResponse(url=sign_in_url)
129 |         
130 |     except Exception as e:
131 |         logger.exception(f"Authorization failed: {e}")
132 |         raise HTTPException(status_code=500, detail=str(e))
133 | 
134 | 
135 | @router.get("/auth/callback")
136 | async def oauth_callback(
137 |     request: Request,
138 |     state: Optional[str] = Query(None),
139 |     clerk_token: Optional[str] = Query(None)
140 | ):
141 |     """Handle OAuth callback from Clerk - supports both JWT token and cookie auth"""
142 |     
143 |     logger.info(f"OAuth callback received - state: {state}")
144 |     logger.info(f"Query params: {dict(request.query_params)}")
145 |     logger.info(f"Cookies: {dict(request.cookies)}")
146 |     logger.info(f"Clerk JWT token provided: {bool(clerk_token)}")
147 |     
148 |     # Support both JWT token (for cross-domain) and cookie auth (for subdomain)
149 |     
150 |     try:
151 |         if not state:
152 |             logger.error("No state parameter provided")
153 |             return JSONResponse(
154 |                 status_code=400,
155 |                 content={"error": "invalid_request", "error_description": "Missing state parameter"}
156 |             )
157 |         
158 |         # Parse state to get original state and session ID
159 |         try:
160 |             if ":" in state:
161 |                 original_state, session_id = state.rsplit(":", 1)
162 |             else:
163 |                 original_state = state
164 |                 session_id = state  # Fallback
165 |         except ValueError:
166 |             logger.error(f"Invalid state format: {state}")
167 |             return JSONResponse(
168 |                 status_code=400,
169 |                 content={"error": "invalid_request", "error_description": "Invalid state format"}
170 |             )
171 |         
172 |         # Get OAuth provider
173 |         from mcp_server_main import app as mcp_app
174 |         from mcp_auth_factory import get_oauth_provider
175 |         
176 |         oauth_provider = get_oauth_provider(mcp_app)
177 |         if not oauth_provider:
178 |             raise HTTPException(status_code=500, detail="OAuth provider not configured")
179 |         
180 |         # Get stored session
181 |         oauth_session = oauth_provider.storage.get_session(session_id)
182 |         
183 |         if not oauth_session:
184 |             logger.error(f"OAuth session not found for ID: {session_id}")
185 |             return JSONResponse(
186 |                 status_code=400,
187 |                 content={"error": "invalid_request", "error_description": "OAuth session expired or not found"}
188 |             )
189 |         
190 |         # Check if we have a JWT token (for cross-domain auth)
191 |         user_authenticated = False
192 |         auth_method = "none"
193 |         
194 |         if clerk_token:
195 |             logger.info("Attempting JWT token validation")
196 |             try:
197 |                 # Validate JWT token with Clerk
198 |                 from clerk_backend_api import Clerk
199 |                 clerk = Clerk(bearer_auth=os.getenv("CLERK_SECRET_KEY"))
200 |                 
201 |                 # Extract session_id from JWT token and verify with Clerk
202 |                 import jwt
203 |                 decoded_token = jwt.decode(clerk_token, options={"verify_signature": False})
204 |                 session_id = decoded_token.get("sid") or decoded_token.get("session_id")
205 |                 
206 |                 if session_id:
207 |                     # Verify with Clerk using session_id
208 |                     session = clerk.sessions.verify(session_id=session_id, token=clerk_token)
209 |                     user_id = session.user_id if session else None
210 |                 else:
211 |                     user_id = None
212 |                 
213 |                 if user_id:
214 |                     logger.info(f"JWT token validation successful - user_id: {user_id}")
215 |                     user_authenticated = True
216 |                     auth_method = "jwt_token"
217 |                     # Store user info in session for token exchange
218 |                     oauth_session["user_id"] = user_id
219 |                     oauth_session["auth_method"] = "jwt_token"
220 |                 else:
221 |                     logger.error("JWT token validation failed - no user_id in claims")
222 |             except Exception as e:
223 |                 logger.error(f"JWT token validation failed: {str(e)}")
224 |                 # Fall through to cookie validation
225 |         
226 |         # If no JWT token or validation failed, check cookies
227 |         if not user_authenticated:
228 |             logger.info("Checking for Clerk session cookies")
229 |             # Check for Clerk session cookies (for subdomain auth)
230 |             clerk_session_cookie = request.cookies.get("__session")
231 |             if clerk_session_cookie:
232 |                 logger.info("Found Clerk session cookie, assuming authenticated")
233 |                 user_authenticated = True
234 |                 auth_method = "cookie"
235 |                 oauth_session["auth_method"] = "cookie"
236 |             else:
237 |                 logger.info("No Clerk session cookie found")
238 |         
239 |         # For custom domains, we'll also trust that Clerk redirected here
240 |         if not user_authenticated:
241 |             logger.info("Trusting Clerk redirect for custom domain flow")
242 |             user_authenticated = True
243 |             auth_method = "trusted_redirect"
244 |             oauth_session["auth_method"] = "trusted_redirect"
245 |         
246 |         logger.info(f"User authenticated: {user_authenticated}, method: {auth_method}")
247 |         
248 |         # Generate simple authorization code for custom domain flow
249 |         auth_code = f"clerk_custom_{session_id}_{int(time.time())}"
250 |         
251 |         # Store the code mapping for token exchange  
252 |         code_data = {
253 |             "session_id": session_id,
254 |             "clerk_authenticated": user_authenticated,
255 |             "auth_method": auth_method,
256 |             "custom_domain_flow": True,
257 |             "created_at": time.time(),
258 |             "expires_at": (datetime.utcnow() + timedelta(minutes=5)).timestamp(),
259 |         }
260 |         if "user_id" in oauth_session:
261 |             code_data["user_id"] = oauth_session["user_id"]
262 |             
263 |         oauth_provider.storage.set_session(f"code_{auth_code}", code_data)
264 |         
265 |         # Build redirect URL back to Claude
266 |         redirect_params = {
267 |             "code": auth_code,
268 |             "state": original_state
269 |         }
270 |         
271 |         redirect_url = f"{oauth_session['redirect_uri']}?{urlencode(redirect_params)}"
272 |         logger.info(f"Redirecting back to Claude: {redirect_url}")
273 |         
274 |         return RedirectResponse(url=redirect_url)
275 |         
276 |     except Exception as e:
277 |         logger.exception(f"Callback processing failed: {e}")
278 |         return JSONResponse(
279 |             status_code=500,
280 |             content={"error": "server_error", "error_description": str(e)}
281 |         )
282 | 
283 | 
284 | @router.post("/register")
285 | async def register_client(request: Request):
286 |     """Dynamic Client Registration (RFC 7591)"""
287 |     
288 |     data = await request.json()
289 |     logger.info(f"Client registration request: {data}")
290 |     
291 |     # Simple dynamic registration - accept any client
292 |     client_id = f"mcp-client-{os.urandom(8).hex()}"
293 |     
294 |     return JSONResponse({
295 |         "client_id": client_id,
296 |         "client_secret": None,  # Public client
297 |         "redirect_uris": data.get("redirect_uris", []),
298 |         "grant_types": ["authorization_code", "refresh_token"],
299 |         "response_types": ["code"],
300 |         "client_name": data.get("client_name", "MCP Client"),
301 |         "token_endpoint_auth_method": "none",
302 |         "client_id_issued_at": int(datetime.now().timestamp())
303 |     })
304 | 
305 | 
306 | @router.post("/token")
307 | async def token_endpoint(request: Request):
308 |     """OAuth 2.1 Token Endpoint"""
309 |     
310 |     # Parse form data
311 |     form_data = await request.form()
312 |     grant_type = form_data.get("grant_type")
313 |     code = form_data.get("code")
314 |     redirect_uri = form_data.get("redirect_uri")
315 |     client_id = form_data.get("client_id")
316 |     code_verifier = form_data.get("code_verifier")
317 |     
318 |     logger.info(f"Token exchange - grant_type: {grant_type}, code: {code[:20] if code else 'None'}...")
319 |     
320 |     if grant_type != "authorization_code":
321 |         return JSONResponse(
322 |             status_code=400,
323 |             content={"error": "unsupported_grant_type"}
324 |         )
325 |     
326 |     try:
327 |         # OAuth token exchange - validate code and return Clerk JWT
328 |         # This supports proper OAuth flow while using Clerk JWT tokens
329 |         
330 |         if not code or not redirect_uri:
331 |             logger.error("Missing required parameters: code or redirect_uri")
332 |             return JSONResponse(
333 |                 status_code=400,
334 |                 content={"error": "invalid_request", "error_description": "Missing code or redirect_uri"}
335 |             )
336 |         
337 |         # Validate OAuth code with Clerk
338 |         if CLERK_AVAILABLE:
339 |             try:
340 |                 clerk = Clerk(bearer_auth=os.getenv("CLERK_SECRET_KEY"))
341 |                 
342 |                 # In a real implementation, you'd validate the code with Clerk
343 |                 # For now, we'll assume the code is valid if it looks like a Clerk code
344 |                 if len(code) > 10:  # Basic validation
345 |                     # Create a mock session with the code
346 |                     # In practice, this would be validated with Clerk's OAuth flow
347 |                     
348 |                     # Return Clerk JWT token format
349 |                     # This should be the actual Clerk JWT token from the OAuth flow
350 |                     return JSONResponse({
351 |                         "access_token": f"mock_clerk_jwt_{code}",
352 |                         "token_type": "Bearer",
353 |                         "expires_in": 3600,
354 |                         "scope": "yargi.read yargi.search"
355 |                     })
356 |                 else:
357 |                     logger.error(f"Invalid code format: {code}")
358 |                     return JSONResponse(
359 |                         status_code=400,
360 |                         content={"error": "invalid_grant", "error_description": "Invalid authorization code"}
361 |                     )
362 |                     
363 |             except Exception as e:
364 |                 logger.error(f"Clerk validation failed: {e}")
365 |                 return JSONResponse(
366 |                     status_code=400,
367 |                     content={"error": "invalid_grant", "error_description": "Authorization code validation failed"}
368 |                 )
369 |         else:
370 |             logger.warning("Clerk SDK not available, using mock response")
371 |             return JSONResponse({
372 |                 "access_token": "mock_jwt_token_for_development",
373 |                 "token_type": "Bearer",
374 |                 "expires_in": 3600,
375 |                 "scope": "yargi.read yargi.search"
376 |             })
377 |         
378 |     except Exception as e:
379 |         logger.exception(f"Token exchange failed: {e}")
380 |         return JSONResponse(
381 |             status_code=500,
382 |             content={"error": "server_error", "error_description": str(e)}
383 |         )
```

--------------------------------------------------------------------------------
/kvkk_mcp_module/client.py:
--------------------------------------------------------------------------------

```python
  1 | # kvkk_mcp_module/client.py
  2 | 
  3 | import httpx
  4 | from bs4 import BeautifulSoup
  5 | from typing import List, Optional, Dict, Any
  6 | import logging
  7 | import os
  8 | import re
  9 | import io
 10 | import math
 11 | from urllib.parse import urljoin, urlparse, parse_qs
 12 | from markitdown import MarkItDown
 13 | from pydantic import HttpUrl
 14 | 
 15 | from .models import (
 16 |     KvkkSearchRequest,
 17 |     KvkkDecisionSummary,
 18 |     KvkkSearchResult,
 19 |     KvkkDocumentMarkdown
 20 | )
 21 | 
 22 | logger = logging.getLogger(__name__)
 23 | if not logger.hasHandlers():
 24 |     logging.basicConfig(
 25 |         level=logging.INFO,
 26 |         format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
 27 |     )
 28 | 
 29 | class KvkkApiClient:
 30 |     """
 31 |     API client for searching and retrieving KVKK (Personal Data Protection Authority) decisions
 32 |     using Brave Search API for discovery and direct HTTP requests for content retrieval.
 33 |     """
 34 |     
 35 |     BRAVE_API_URL = "https://api.search.brave.com/res/v1/web/search"
 36 |     KVKK_BASE_URL = "https://www.kvkk.gov.tr"
 37 |     DOCUMENT_MARKDOWN_CHUNK_SIZE = 5000  # Character limit per page
 38 |     
 39 |     def __init__(self, request_timeout: float = 60.0):
 40 |         """Initialize the KVKK API client."""
 41 |         self.brave_api_token = os.getenv("BRAVE_API_TOKEN")
 42 |         if not self.brave_api_token:
 43 |             # Fallback to provided free token
 44 |             self.brave_api_token = "BSAuaRKB-dvSDSQxIN0ft1p2k6N82Kq"
 45 |             logger.info("Using fallback Brave API token (limited free token)")
 46 |         else:
 47 |             logger.info("Using Brave API token from environment variable")
 48 |         
 49 |         self.http_client = httpx.AsyncClient(
 50 |             headers={
 51 |                 "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
 52 |                 "Accept-Language": "tr-TR,tr;q=0.9,en-US;q=0.8,en;q=0.7",
 53 |                 "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
 54 |             },
 55 |             timeout=request_timeout,
 56 |             verify=True,
 57 |             follow_redirects=True
 58 |         )
 59 |     
 60 |     def _construct_search_query(self, keywords: str) -> str:
 61 |         """Construct the search query for Brave API."""
 62 |         base_query = 'site:kvkk.gov.tr "karar özeti"'
 63 |         if keywords.strip():
 64 |             return f"{base_query} {keywords.strip()}"
 65 |         return base_query
 66 |     
 67 |     def _extract_decision_id_from_url(self, url: str) -> Optional[str]:
 68 |         """Extract decision ID from KVKK decision URL."""
 69 |         try:
 70 |             # Example URL: https://www.kvkk.gov.tr/Icerik/7288/2021-1303
 71 |             parsed_url = urlparse(url)
 72 |             path_parts = parsed_url.path.strip('/').split('/')
 73 |             
 74 |             if len(path_parts) >= 3 and path_parts[0] == 'Icerik':
 75 |                 # Extract the decision ID from the path
 76 |                 decision_id = '/'.join(path_parts[1:])  # e.g., "7288/2021-1303"
 77 |                 return decision_id
 78 |             
 79 |         except Exception as e:
 80 |             logger.debug(f"Could not extract decision ID from URL {url}: {e}")
 81 |         
 82 |         return None
 83 |     
 84 |     def _extract_decision_metadata_from_title(self, title: str) -> Dict[str, Optional[str]]:
 85 |         """Extract decision metadata from title string."""
 86 |         metadata = {
 87 |             "decision_date": None,
 88 |             "decision_number": None
 89 |         }
 90 |         
 91 |         if not title:
 92 |             return metadata
 93 |         
 94 |         # Extract decision date (DD/MM/YYYY format)
 95 |         date_match = re.search(r'(\d{1,2}/\d{1,2}/\d{4})', title)
 96 |         if date_match:
 97 |             metadata["decision_date"] = date_match.group(1)
 98 |         
 99 |         # Extract decision number (YYYY/XXXX format)
100 |         number_match = re.search(r'(\d{4}/\d+)', title)
101 |         if number_match:
102 |             metadata["decision_number"] = number_match.group(1)
103 |         
104 |         return metadata
105 |     
106 |     async def search_decisions(self, params: KvkkSearchRequest) -> KvkkSearchResult:
107 |         """Search for KVKK decisions using Brave API."""
108 |         
109 |         search_query = self._construct_search_query(params.keywords)
110 |         logger.info(f"KvkkApiClient: Searching with query: {search_query}")
111 |         
112 |         try:
113 |             # Calculate offset for pagination
114 |             offset = (params.page - 1) * params.pageSize
115 |             
116 |             response = await self.http_client.get(
117 |                 self.BRAVE_API_URL,
118 |                 headers={
119 |                     "Accept": "application/json",
120 |                     "Accept-Encoding": "gzip",
121 |                     "x-subscription-token": self.brave_api_token
122 |                 },
123 |                 params={
124 |                     "q": search_query,
125 |                     "country": "TR",
126 |                     "search_lang": "tr",
127 |                     "ui_lang": "tr-TR",
128 |                     "offset": offset,
129 |                     "count": params.pageSize
130 |                 }
131 |             )
132 |             
133 |             response.raise_for_status()
134 |             data = response.json()
135 |             
136 |             # Extract search results
137 |             decisions = []
138 |             web_results = data.get("web", {}).get("results", [])
139 |             
140 |             for result in web_results:
141 |                 title = result.get("title", "")
142 |                 url = result.get("url", "")
143 |                 description = result.get("description", "")
144 |                 
145 |                 # Extract metadata from title
146 |                 metadata = self._extract_decision_metadata_from_title(title)
147 |                 
148 |                 # Extract decision ID from URL
149 |                 decision_id = self._extract_decision_id_from_url(url)
150 |                 
151 |                 decision = KvkkDecisionSummary(
152 |                     title=title,
153 |                     url=HttpUrl(url) if url else None,
154 |                     description=description,
155 |                     decision_id=decision_id,
156 |                     publication_date=metadata.get("decision_date"),
157 |                     decision_number=metadata.get("decision_number")
158 |                 )
159 |                 decisions.append(decision)
160 |             
161 |             # Get total results if available
162 |             total_results = None
163 |             query_info = data.get("query", {})
164 |             if "total_results" in query_info:
165 |                 total_results = query_info["total_results"]
166 |             
167 |             return KvkkSearchResult(
168 |                 decisions=decisions,
169 |                 total_results=total_results,
170 |                 page=params.page,
171 |                 pageSize=params.pageSize,
172 |                 query=search_query
173 |             )
174 |             
175 |         except httpx.RequestError as e:
176 |             logger.error(f"KvkkApiClient: HTTP request error during search: {e}")
177 |             return KvkkSearchResult(
178 |                 decisions=[], 
179 |                 total_results=0, 
180 |                 page=params.page, 
181 |                 pageSize=params.pageSize,
182 |                 query=search_query
183 |             )
184 |         except Exception as e:
185 |             logger.error(f"KvkkApiClient: Unexpected error during search: {e}")
186 |             return KvkkSearchResult(
187 |                 decisions=[], 
188 |                 total_results=0, 
189 |                 page=params.page, 
190 |                 pageSize=params.pageSize,
191 |                 query=search_query
192 |             )
193 |     
194 |     def _extract_decision_content_from_html(self, html: str, url: str) -> Dict[str, Any]:
195 |         """Extract decision content from KVKK decision page HTML."""
196 |         try:
197 |             soup = BeautifulSoup(html, 'html.parser')
198 |             
199 |             # Extract title
200 |             title = None
201 |             title_element = soup.find('h3', class_='blog-post-title')
202 |             if title_element:
203 |                 title = title_element.get_text(strip=True)
204 |             elif soup.title:
205 |                 title = soup.title.get_text(strip=True)
206 |             
207 |             # Extract decision content from the main content div
208 |             content_div = soup.find('div', class_='blog-post-inner')
209 |             if not content_div:
210 |                 # Fallback to other possible content containers
211 |                 content_div = soup.find('div', style='text-align:justify;')
212 |                 if not content_div:
213 |                     logger.warning(f"Could not find decision content div in {url}")
214 |                     return {
215 |                         "title": title,
216 |                         "decision_date": None,
217 |                         "decision_number": None,
218 |                         "subject_summary": None,
219 |                         "html_content": None
220 |                     }
221 |             
222 |             # Extract decision metadata from table
223 |             decision_date = None
224 |             decision_number = None
225 |             subject_summary = None
226 |             
227 |             table = content_div.find('table')
228 |             if table:
229 |                 rows = table.find_all('tr')
230 |                 for row in rows:
231 |                     cells = row.find_all('td')
232 |                     if len(cells) >= 3:
233 |                         field_name = cells[0].get_text(strip=True)
234 |                         field_value = cells[2].get_text(strip=True)
235 |                         
236 |                         if 'Karar Tarihi' in field_name:
237 |                             decision_date = field_value
238 |                         elif 'Karar No' in field_name:
239 |                             decision_number = field_value
240 |                         elif 'Konu Özeti' in field_name:
241 |                             subject_summary = field_value
242 |             
243 |             return {
244 |                 "title": title,
245 |                 "decision_date": decision_date,
246 |                 "decision_number": decision_number,
247 |                 "subject_summary": subject_summary,
248 |                 "html_content": str(content_div)
249 |             }
250 |             
251 |         except Exception as e:
252 |             logger.error(f"Error extracting content from HTML for {url}: {e}")
253 |             return {
254 |                 "title": None,
255 |                 "decision_date": None,
256 |                 "decision_number": None,
257 |                 "subject_summary": None,
258 |                 "html_content": None
259 |             }
260 |     
261 |     def _convert_html_to_markdown(self, html_content: str) -> Optional[str]:
262 |         """Convert HTML content to Markdown using MarkItDown with BytesIO to avoid filename length issues."""
263 |         if not html_content:
264 |             return None
265 |         
266 |         try:
267 |             # Convert HTML string to bytes and create BytesIO stream
268 |             html_bytes = html_content.encode('utf-8')
269 |             html_stream = io.BytesIO(html_bytes)
270 |             
271 |             # Pass BytesIO stream to MarkItDown to avoid temp file creation
272 |             md_converter = MarkItDown(enable_plugins=False)
273 |             result = md_converter.convert(html_stream)
274 |             return result.text_content
275 |         except Exception as e:
276 |             logger.error(f"Error converting HTML to Markdown: {e}")
277 |             return None
278 |     
279 |     async def get_decision_document(self, decision_url: str, page_number: int = 1) -> KvkkDocumentMarkdown:
280 |         """Retrieve and convert a KVKK decision document to paginated Markdown."""
281 |         logger.info(f"KvkkApiClient: Getting decision document from: {decision_url}, page: {page_number}")
282 |         
283 |         try:
284 |             # Fetch the decision page
285 |             response = await self.http_client.get(decision_url)
286 |             response.raise_for_status()
287 |             
288 |             # Extract content from HTML
289 |             extracted_data = self._extract_decision_content_from_html(response.text, decision_url)
290 |             
291 |             # Convert HTML content to Markdown
292 |             full_markdown_content = None
293 |             if extracted_data["html_content"]:
294 |                 full_markdown_content = self._convert_html_to_markdown(extracted_data["html_content"])
295 |             
296 |             if not full_markdown_content:
297 |                 return KvkkDocumentMarkdown(
298 |                     source_url=HttpUrl(decision_url),
299 |                     title=extracted_data["title"],
300 |                     decision_date=extracted_data["decision_date"],
301 |                     decision_number=extracted_data["decision_number"],
302 |                     subject_summary=extracted_data["subject_summary"],
303 |                     markdown_chunk=None,
304 |                     current_page=page_number,
305 |                     total_pages=0,
306 |                     is_paginated=False,
307 |                     error_message="Could not convert document content to Markdown"
308 |                 )
309 |             
310 |             # Calculate pagination
311 |             content_length = len(full_markdown_content)
312 |             total_pages = math.ceil(content_length / self.DOCUMENT_MARKDOWN_CHUNK_SIZE)
313 |             if total_pages == 0:
314 |                 total_pages = 1
315 |             
316 |             # Clamp page number to valid range
317 |             current_page_clamped = max(1, min(page_number, total_pages))
318 |             
319 |             # Extract the requested chunk
320 |             start_index = (current_page_clamped - 1) * self.DOCUMENT_MARKDOWN_CHUNK_SIZE
321 |             end_index = start_index + self.DOCUMENT_MARKDOWN_CHUNK_SIZE
322 |             markdown_chunk = full_markdown_content[start_index:end_index]
323 |             
324 |             return KvkkDocumentMarkdown(
325 |                 source_url=HttpUrl(decision_url),
326 |                 title=extracted_data["title"],
327 |                 decision_date=extracted_data["decision_date"],
328 |                 decision_number=extracted_data["decision_number"],
329 |                 subject_summary=extracted_data["subject_summary"],
330 |                 markdown_chunk=markdown_chunk,
331 |                 current_page=current_page_clamped,
332 |                 total_pages=total_pages,
333 |                 is_paginated=(total_pages > 1),
334 |                 error_message=None
335 |             )
336 |             
337 |         except httpx.HTTPStatusError as e:
338 |             error_msg = f"HTTP error {e.response.status_code} when fetching decision document"
339 |             logger.error(f"KvkkApiClient: {error_msg}")
340 |             return KvkkDocumentMarkdown(
341 |                 source_url=HttpUrl(decision_url),
342 |                 title=None,
343 |                 decision_date=None,
344 |                 decision_number=None,
345 |                 subject_summary=None,
346 |                 markdown_chunk=None,
347 |                 current_page=page_number,
348 |                 total_pages=0,
349 |                 is_paginated=False,
350 |                 error_message=error_msg
351 |             )
352 |         except Exception as e:
353 |             error_msg = f"Unexpected error when fetching decision document: {str(e)}"
354 |             logger.error(f"KvkkApiClient: {error_msg}")
355 |             return KvkkDocumentMarkdown(
356 |                 source_url=HttpUrl(decision_url),
357 |                 title=None,
358 |                 decision_date=None,
359 |                 decision_number=None,
360 |                 subject_summary=None,
361 |                 markdown_chunk=None,
362 |                 current_page=page_number,
363 |                 total_pages=0,
364 |                 is_paginated=False,
365 |                 error_message=error_msg
366 |             )
367 |     
368 |     async def close_client_session(self):
369 |         """Close the HTTP client session."""
370 |         if hasattr(self, 'http_client') and self.http_client and not self.http_client.is_closed:
371 |             await self.http_client.aclose()
372 |             logger.info("KvkkApiClient: HTTP client session closed.")
```

--------------------------------------------------------------------------------
/saidsurucu-yargi-mcp-f5fa007/kvkk_mcp_module/client.py:
--------------------------------------------------------------------------------

```python
  1 | # kvkk_mcp_module/client.py
  2 | 
  3 | import httpx
  4 | from bs4 import BeautifulSoup
  5 | from typing import List, Optional, Dict, Any
  6 | import logging
  7 | import os
  8 | import re
  9 | import io
 10 | import math
 11 | from urllib.parse import urljoin, urlparse, parse_qs
 12 | from markitdown import MarkItDown
 13 | from pydantic import HttpUrl
 14 | 
 15 | from .models import (
 16 |     KvkkSearchRequest,
 17 |     KvkkDecisionSummary,
 18 |     KvkkSearchResult,
 19 |     KvkkDocumentMarkdown
 20 | )
 21 | 
 22 | logger = logging.getLogger(__name__)
 23 | if not logger.hasHandlers():
 24 |     logging.basicConfig(
 25 |         level=logging.INFO,
 26 |         format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
 27 |     )
 28 | 
 29 | class KvkkApiClient:
 30 |     """
 31 |     API client for searching and retrieving KVKK (Personal Data Protection Authority) decisions
 32 |     using Brave Search API for discovery and direct HTTP requests for content retrieval.
 33 |     """
 34 |     
 35 |     BRAVE_API_URL = "https://api.search.brave.com/res/v1/web/search"
 36 |     KVKK_BASE_URL = "https://www.kvkk.gov.tr"
 37 |     DOCUMENT_MARKDOWN_CHUNK_SIZE = 5000  # Character limit per page
 38 |     
 39 |     def __init__(self, request_timeout: float = 60.0):
 40 |         """Initialize the KVKK API client."""
 41 |         self.brave_api_token = os.getenv("BRAVE_API_TOKEN")
 42 |         if not self.brave_api_token:
 43 |             # Fallback to provided free token
 44 |             self.brave_api_token = "BSAuaRKB-dvSDSQxIN0ft1p2k6N82Kq"
 45 |             logger.info("Using fallback Brave API token (limited free token)")
 46 |         else:
 47 |             logger.info("Using Brave API token from environment variable")
 48 |         
 49 |         self.http_client = httpx.AsyncClient(
 50 |             headers={
 51 |                 "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
 52 |                 "Accept-Language": "tr-TR,tr;q=0.9,en-US;q=0.8,en;q=0.7",
 53 |                 "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
 54 |             },
 55 |             timeout=request_timeout,
 56 |             verify=True,
 57 |             follow_redirects=True
 58 |         )
 59 |     
 60 |     def _construct_search_query(self, keywords: str) -> str:
 61 |         """Construct the search query for Brave API."""
 62 |         base_query = 'site:kvkk.gov.tr "karar özeti"'
 63 |         if keywords.strip():
 64 |             return f"{base_query} {keywords.strip()}"
 65 |         return base_query
 66 |     
 67 |     def _extract_decision_id_from_url(self, url: str) -> Optional[str]:
 68 |         """Extract decision ID from KVKK decision URL."""
 69 |         try:
 70 |             # Example URL: https://www.kvkk.gov.tr/Icerik/7288/2021-1303
 71 |             parsed_url = urlparse(url)
 72 |             path_parts = parsed_url.path.strip('/').split('/')
 73 |             
 74 |             if len(path_parts) >= 3 and path_parts[0] == 'Icerik':
 75 |                 # Extract the decision ID from the path
 76 |                 decision_id = '/'.join(path_parts[1:])  # e.g., "7288/2021-1303"
 77 |                 return decision_id
 78 |             
 79 |         except Exception as e:
 80 |             logger.debug(f"Could not extract decision ID from URL {url}: {e}")
 81 |         
 82 |         return None
 83 |     
 84 |     def _extract_decision_metadata_from_title(self, title: str) -> Dict[str, Optional[str]]:
 85 |         """Extract decision metadata from title string."""
 86 |         metadata = {
 87 |             "decision_date": None,
 88 |             "decision_number": None
 89 |         }
 90 |         
 91 |         if not title:
 92 |             return metadata
 93 |         
 94 |         # Extract decision date (DD/MM/YYYY format)
 95 |         date_match = re.search(r'(\d{1,2}/\d{1,2}/\d{4})', title)
 96 |         if date_match:
 97 |             metadata["decision_date"] = date_match.group(1)
 98 |         
 99 |         # Extract decision number (YYYY/XXXX format)
100 |         number_match = re.search(r'(\d{4}/\d+)', title)
101 |         if number_match:
102 |             metadata["decision_number"] = number_match.group(1)
103 |         
104 |         return metadata
105 |     
106 |     async def search_decisions(self, params: KvkkSearchRequest) -> KvkkSearchResult:
107 |         """Search for KVKK decisions using Brave API."""
108 |         
109 |         search_query = self._construct_search_query(params.keywords)
110 |         logger.info(f"KvkkApiClient: Searching with query: {search_query}")
111 |         
112 |         try:
113 |             # Calculate offset for pagination
114 |             offset = (params.page - 1) * params.pageSize
115 |             
116 |             response = await self.http_client.get(
117 |                 self.BRAVE_API_URL,
118 |                 headers={
119 |                     "Accept": "application/json",
120 |                     "Accept-Encoding": "gzip",
121 |                     "x-subscription-token": self.brave_api_token
122 |                 },
123 |                 params={
124 |                     "q": search_query,
125 |                     "country": "TR",
126 |                     "search_lang": "tr",
127 |                     "ui_lang": "tr-TR",
128 |                     "offset": offset,
129 |                     "count": params.pageSize
130 |                 }
131 |             )
132 |             
133 |             response.raise_for_status()
134 |             data = response.json()
135 |             
136 |             # Extract search results
137 |             decisions = []
138 |             web_results = data.get("web", {}).get("results", [])
139 |             
140 |             for result in web_results:
141 |                 title = result.get("title", "")
142 |                 url = result.get("url", "")
143 |                 description = result.get("description", "")
144 |                 
145 |                 # Extract metadata from title
146 |                 metadata = self._extract_decision_metadata_from_title(title)
147 |                 
148 |                 # Extract decision ID from URL
149 |                 decision_id = self._extract_decision_id_from_url(url)
150 |                 
151 |                 decision = KvkkDecisionSummary(
152 |                     title=title,
153 |                     url=HttpUrl(url) if url else None,
154 |                     description=description,
155 |                     decision_id=decision_id,
156 |                     publication_date=metadata.get("decision_date"),
157 |                     decision_number=metadata.get("decision_number")
158 |                 )
159 |                 decisions.append(decision)
160 |             
161 |             # Get total results if available
162 |             total_results = None
163 |             query_info = data.get("query", {})
164 |             if "total_results" in query_info:
165 |                 total_results = query_info["total_results"]
166 |             
167 |             return KvkkSearchResult(
168 |                 decisions=decisions,
169 |                 total_results=total_results,
170 |                 page=params.page,
171 |                 pageSize=params.pageSize,
172 |                 query=search_query
173 |             )
174 |             
175 |         except httpx.RequestError as e:
176 |             logger.error(f"KvkkApiClient: HTTP request error during search: {e}")
177 |             return KvkkSearchResult(
178 |                 decisions=[], 
179 |                 total_results=0, 
180 |                 page=params.page, 
181 |                 pageSize=params.pageSize,
182 |                 query=search_query
183 |             )
184 |         except Exception as e:
185 |             logger.error(f"KvkkApiClient: Unexpected error during search: {e}")
186 |             return KvkkSearchResult(
187 |                 decisions=[], 
188 |                 total_results=0, 
189 |                 page=params.page, 
190 |                 pageSize=params.pageSize,
191 |                 query=search_query
192 |             )
193 |     
194 |     def _extract_decision_content_from_html(self, html: str, url: str) -> Dict[str, Any]:
195 |         """Extract decision content from KVKK decision page HTML."""
196 |         try:
197 |             soup = BeautifulSoup(html, 'html.parser')
198 |             
199 |             # Extract title
200 |             title = None
201 |             title_element = soup.find('h3', class_='blog-post-title')
202 |             if title_element:
203 |                 title = title_element.get_text(strip=True)
204 |             elif soup.title:
205 |                 title = soup.title.get_text(strip=True)
206 |             
207 |             # Extract decision content from the main content div
208 |             content_div = soup.find('div', class_='blog-post-inner')
209 |             if not content_div:
210 |                 # Fallback to other possible content containers
211 |                 content_div = soup.find('div', style='text-align:justify;')
212 |                 if not content_div:
213 |                     logger.warning(f"Could not find decision content div in {url}")
214 |                     return {
215 |                         "title": title,
216 |                         "decision_date": None,
217 |                         "decision_number": None,
218 |                         "subject_summary": None,
219 |                         "html_content": None
220 |                     }
221 |             
222 |             # Extract decision metadata from table
223 |             decision_date = None
224 |             decision_number = None
225 |             subject_summary = None
226 |             
227 |             table = content_div.find('table')
228 |             if table:
229 |                 rows = table.find_all('tr')
230 |                 for row in rows:
231 |                     cells = row.find_all('td')
232 |                     if len(cells) >= 3:
233 |                         field_name = cells[0].get_text(strip=True)
234 |                         field_value = cells[2].get_text(strip=True)
235 |                         
236 |                         if 'Karar Tarihi' in field_name:
237 |                             decision_date = field_value
238 |                         elif 'Karar No' in field_name:
239 |                             decision_number = field_value
240 |                         elif 'Konu Özeti' in field_name:
241 |                             subject_summary = field_value
242 |             
243 |             return {
244 |                 "title": title,
245 |                 "decision_date": decision_date,
246 |                 "decision_number": decision_number,
247 |                 "subject_summary": subject_summary,
248 |                 "html_content": str(content_div)
249 |             }
250 |             
251 |         except Exception as e:
252 |             logger.error(f"Error extracting content from HTML for {url}: {e}")
253 |             return {
254 |                 "title": None,
255 |                 "decision_date": None,
256 |                 "decision_number": None,
257 |                 "subject_summary": None,
258 |                 "html_content": None
259 |             }
260 |     
261 |     def _convert_html_to_markdown(self, html_content: str) -> Optional[str]:
262 |         """Convert HTML content to Markdown using MarkItDown with BytesIO to avoid filename length issues."""
263 |         if not html_content:
264 |             return None
265 |         
266 |         try:
267 |             # Convert HTML string to bytes and create BytesIO stream
268 |             html_bytes = html_content.encode('utf-8')
269 |             html_stream = io.BytesIO(html_bytes)
270 |             
271 |             # Pass BytesIO stream to MarkItDown to avoid temp file creation
272 |             md_converter = MarkItDown(enable_plugins=False)
273 |             result = md_converter.convert(html_stream)
274 |             return result.text_content
275 |         except Exception as e:
276 |             logger.error(f"Error converting HTML to Markdown: {e}")
277 |             return None
278 |     
279 |     async def get_decision_document(self, decision_url: str, page_number: int = 1) -> KvkkDocumentMarkdown:
280 |         """Retrieve and convert a KVKK decision document to paginated Markdown."""
281 |         logger.info(f"KvkkApiClient: Getting decision document from: {decision_url}, page: {page_number}")
282 |         
283 |         try:
284 |             # Fetch the decision page
285 |             response = await self.http_client.get(decision_url)
286 |             response.raise_for_status()
287 |             
288 |             # Extract content from HTML
289 |             extracted_data = self._extract_decision_content_from_html(response.text, decision_url)
290 |             
291 |             # Convert HTML content to Markdown
292 |             full_markdown_content = None
293 |             if extracted_data["html_content"]:
294 |                 full_markdown_content = self._convert_html_to_markdown(extracted_data["html_content"])
295 |             
296 |             if not full_markdown_content:
297 |                 return KvkkDocumentMarkdown(
298 |                     source_url=HttpUrl(decision_url),
299 |                     title=extracted_data["title"],
300 |                     decision_date=extracted_data["decision_date"],
301 |                     decision_number=extracted_data["decision_number"],
302 |                     subject_summary=extracted_data["subject_summary"],
303 |                     markdown_chunk=None,
304 |                     current_page=page_number,
305 |                     total_pages=0,
306 |                     is_paginated=False,
307 |                     error_message="Could not convert document content to Markdown"
308 |                 )
309 |             
310 |             # Calculate pagination
311 |             content_length = len(full_markdown_content)
312 |             total_pages = math.ceil(content_length / self.DOCUMENT_MARKDOWN_CHUNK_SIZE)
313 |             if total_pages == 0:
314 |                 total_pages = 1
315 |             
316 |             # Clamp page number to valid range
317 |             current_page_clamped = max(1, min(page_number, total_pages))
318 |             
319 |             # Extract the requested chunk
320 |             start_index = (current_page_clamped - 1) * self.DOCUMENT_MARKDOWN_CHUNK_SIZE
321 |             end_index = start_index + self.DOCUMENT_MARKDOWN_CHUNK_SIZE
322 |             markdown_chunk = full_markdown_content[start_index:end_index]
323 |             
324 |             return KvkkDocumentMarkdown(
325 |                 source_url=HttpUrl(decision_url),
326 |                 title=extracted_data["title"],
327 |                 decision_date=extracted_data["decision_date"],
328 |                 decision_number=extracted_data["decision_number"],
329 |                 subject_summary=extracted_data["subject_summary"],
330 |                 markdown_chunk=markdown_chunk,
331 |                 current_page=current_page_clamped,
332 |                 total_pages=total_pages,
333 |                 is_paginated=(total_pages > 1),
334 |                 error_message=None
335 |             )
336 |             
337 |         except httpx.HTTPStatusError as e:
338 |             error_msg = f"HTTP error {e.response.status_code} when fetching decision document"
339 |             logger.error(f"KvkkApiClient: {error_msg}")
340 |             return KvkkDocumentMarkdown(
341 |                 source_url=HttpUrl(decision_url),
342 |                 title=None,
343 |                 decision_date=None,
344 |                 decision_number=None,
345 |                 subject_summary=None,
346 |                 markdown_chunk=None,
347 |                 current_page=page_number,
348 |                 total_pages=0,
349 |                 is_paginated=False,
350 |                 error_message=error_msg
351 |             )
352 |         except Exception as e:
353 |             error_msg = f"Unexpected error when fetching decision document: {str(e)}"
354 |             logger.error(f"KvkkApiClient: {error_msg}")
355 |             return KvkkDocumentMarkdown(
356 |                 source_url=HttpUrl(decision_url),
357 |                 title=None,
358 |                 decision_date=None,
359 |                 decision_number=None,
360 |                 subject_summary=None,
361 |                 markdown_chunk=None,
362 |                 current_page=page_number,
363 |                 total_pages=0,
364 |                 is_paginated=False,
365 |                 error_message=error_msg
366 |             )
367 |     
368 |     async def close_client_session(self):
369 |         """Close the HTTP client session."""
370 |         if hasattr(self, 'http_client') and self.http_client and not self.http_client.is_closed:
371 |             await self.http_client.aclose()
372 |             logger.info("KvkkApiClient: HTTP client session closed.")
```

--------------------------------------------------------------------------------
/anayasa_mcp_module/models.py:
--------------------------------------------------------------------------------

```python
  1 | # anayasa_mcp_module/models.py
  2 | 
  3 | from pydantic import BaseModel, Field, HttpUrl
  4 | from typing import List, Optional, Dict, Any, Literal
  5 | from enum import Enum
  6 | 
  7 | # --- Enums (AnayasaDonemEnum, etc. - same as before) ---
  8 | class AnayasaDonemEnum(str, Enum):
  9 |     TUMU = "ALL"
 10 |     DONEM_1961 = "1"
 11 |     DONEM_1982 = "2"
 12 | 
 13 | 
 14 | class AnayasaVarYokEnum(str, Enum):
 15 |     TUMU = "ALL"
 16 |     YOK = "0"
 17 |     VAR = "1"
 18 | 
 19 | 
 20 | class AnayasaIncelemeSonucuEnum(str, Enum):
 21 |     TUMU = "ALL"
 22 |     ESAS_ACILMAMIS_SAYILMA = "1"
 23 |     ESAS_IPTAL = "2"
 24 |     ESAS_KARAR_YER_OLMADIGI = "3"
 25 |     ESAS_RET = "4"
 26 |     ILK_ACILMAMIS_SAYILMA = "5"
 27 |     ILK_ISIN_GERI_CEVRILMESI = "6"
 28 |     ILK_KARAR_YER_OLMADIGI = "7"
 29 |     ILK_RET = "8"
 30 |     KANUN_6216_M43_4_IPTAL = "12"
 31 | 
 32 | class AnayasaSonucGerekcesiEnum(str, Enum):
 33 |     TUMU = "ALL"
 34 |     ANAYASAYA_AYKIRI_DEGIL = "29"
 35 |     ANAYASAYA_ESAS_YONUNDEN_AYKIRILIK = "1"
 36 |     ANAYASAYA_ESAS_YONUNDEN_UYGUNLUK = "2"
 37 |     ANAYASAYA_SEKIL_ESAS_UYGUNLUK = "30"
 38 |     ANAYASAYA_SEKIL_YONUNDEN_AYKIRILIK = "3"
 39 |     ANAYASAYA_SEKIL_YONUNDEN_UYGUNLUK = "4"
 40 |     AYKIRILIK_ANAYASAYA_ESAS_YONUNDEN_DUPLICATE = "27"
 41 |     BASVURU_KARARI = "5"
 42 |     DENETIM_DISI = "6"
 43 |     DIGER_GEREKCE_1 = "7"
 44 |     DIGER_GEREKCE_2 = "8"
 45 |     EKSIKLIGIN_GIDERILMEMESI = "9"
 46 |     GEREKCE = "10"
 47 |     GOREV = "11"
 48 |     GOREV_YETKI = "12"
 49 |     GOREVLI_MAHKEME = "13"
 50 |     GORULMEKTE_OLAN_DAVA = "14"
 51 |     MAHKEME = "15"
 52 |     NORMDA_DEGISIKLIK_YAPILMASI = "16"
 53 |     NORMUN_YURURLUKTEN_KALDIRILMASI = "17"
 54 |     ON_YIL_YASAGI = "18"
 55 |     SURE = "19"
 56 |     USULE_UYMAMA = "20"
 57 |     UYGULANACAK_NORM = "21"
 58 |     UYGULANAMAZ_HALE_GELME = "22"
 59 |     YETKI = "23"
 60 |     YETKI_SURE = "24"
 61 |     YOK_HUKMUNDE_OLMAMA = "25"
 62 |     YOKLUK = "26"
 63 | # --- End Enums ---
 64 | 
 65 | class AnayasaNormDenetimiSearchRequest(BaseModel):
 66 |     """Model for Anayasa Mahkemesi (Norm Denetimi) search request for the MCP tool."""
 67 |     keywords_all: Optional[List[str]] = Field(default_factory=list, description="Keywords for AND logic (KelimeAra[]).")
 68 |     keywords_any: Optional[List[str]] = Field(default_factory=list, description="Keywords for OR logic (HerhangiBirKelimeAra[]).")
 69 |     keywords_exclude: Optional[List[str]] = Field(default_factory=list, description="Keywords to exclude (BulunmayanKelimeAra[]).")
 70 |     period: Optional[Literal["ALL", "1", "2"]] = Field(default="ALL", description="Constitutional period (Donemler_id).")
 71 |     case_number_esas: str = Field("", description="Case registry number (EsasNo), e.g., '2023/123'.")
 72 |     decision_number_karar: str = Field("", description="Decision number (KararNo), e.g., '2023/456'.")
 73 |     first_review_date_start: str = Field("", description="First review start date (IlkIncelemeTarihiIlk), format DD/MM/YYYY.")
 74 |     first_review_date_end: str = Field("", description="First review end date (IlkIncelemeTarihiSon), format DD/MM/YYYY.")
 75 |     decision_date_start: str = Field("", description="Decision start date (KararTarihiIlk), format DD/MM/YYYY.")
 76 |     decision_date_end: str = Field("", description="Decision end date (KararTarihiSon), format DD/MM/YYYY.")
 77 |     application_type: Optional[Literal["ALL", "1", "2", "3"]] = Field(default="ALL", description="Type of application (BasvuruTurler_id).")
 78 |     applicant_general_name: str = Field("", description="General applicant name (BasvuranGeneller_id).")
 79 |     applicant_specific_name: str = Field("", description="Specific applicant name (BasvuranOzeller_id).")
 80 |     official_gazette_date_start: str = Field("", description="Official Gazette start date (ResmiGazeteTarihiIlk), format DD/MM/YYYY.")
 81 |     official_gazette_date_end: str = Field("", description="Official Gazette end date (ResmiGazeteTarihiSon), format DD/MM/YYYY.")
 82 |     official_gazette_number_start: str = Field("", description="Official Gazette starting number (ResmiGazeteSayisiIlk).")
 83 |     official_gazette_number_end: str = Field("", description="Official Gazette ending number (ResmiGazeteSayisiSon).")
 84 |     has_press_release: Optional[Literal["ALL", "0", "1"]] = Field(default="ALL", description="Press release available (BasinDuyurusu).")
 85 |     has_dissenting_opinion: Optional[Literal["ALL", "0", "1"]] = Field(default="ALL", description="Dissenting opinion exists (KarsiOy).")
 86 |     has_different_reasoning: Optional[Literal["ALL", "0", "1"]] = Field(default="ALL", description="Different reasoning exists (FarkliGerekce).")
 87 |     attending_members_names: Optional[List[str]] = Field(default_factory=list, description="List of attending members' exact names (Uyeler_id[]).")
 88 |     rapporteur_name: str = Field("", description="Rapporteur's exact name (Raportorler_id).")
 89 |     norm_type: Optional[Literal["ALL", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "0"]] = Field(default="ALL", description="Type of the reviewed norm (NormunTurler_id).")
 90 |     norm_id_or_name: str = Field("", description="Number or name of the norm (NormunNumarasiAdlar_id).")
 91 |     norm_article: str = Field("", description="Article number of the norm (NormunMaddeNumarasi).")
 92 |     review_outcomes: Optional[List[Literal["1", "2", "3", "4", "5", "6", "7", "8", "12"]]] = Field(default_factory=list, description="List of review types and outcomes (IncelemeTuruKararSonuclar_id[]).")
 93 |     reason_for_final_outcome: Optional[Literal["ALL", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "29", "30"]] = Field(default="ALL", description="Main reason for the decision outcome (KararSonucununGerekcesi).")
 94 |     basis_constitution_article_numbers: Optional[List[str]] = Field(default_factory=list, description="List of supporting Constitution article numbers (DayanakHukmu[]).")
 95 |     results_per_page: int = Field(10, ge=1, le=10, description="Results per page.")
 96 |     page_to_fetch: int = Field(1, ge=1, description="Page number to fetch for results list.")
 97 |     sort_by_criteria: str = Field("KararTarihi", description="Sort criteria. Options: 'KararTarihi', 'YayinTarihi', 'Toplam' (keyword count).")
 98 | 
 99 | class AnayasaReviewedNormInfo(BaseModel):
100 |     """Details of a norm reviewed within an AYM decision summary."""
101 |     norm_name_or_number: str = Field("", description="Norm name or number")
102 |     article_number: str = Field("", description="Article number")
103 |     review_type_and_outcome: str = Field("", description="Review type and outcome")
104 |     outcome_reason: str = Field("", description="Outcome reason")
105 |     basis_constitution_articles_cited: List[str] = Field(default_factory=list)
106 |     postponement_period: str = Field("", description="Postponement period")
107 | 
108 | class AnayasaDecisionSummary(BaseModel):
109 |     """Model for a single Anayasa Mahkemesi (Norm Denetimi) decision summary from search results."""
110 |     decision_reference_no: str = Field("", description="Decision reference number")
111 |     decision_page_url: str = Field("", description="Decision page URL")
112 |     keywords_found_count: Optional[int] = Field(0, description="Keywords found count")
113 |     application_type_summary: str = Field("", description="Application type summary")
114 |     applicant_summary: str = Field("", description="Applicant summary")
115 |     decision_outcome_summary: str = Field("", description="Decision outcome summary")
116 |     decision_date_summary: str = Field("", description="Decision date summary")
117 |     reviewed_norms: List[AnayasaReviewedNormInfo] = Field(default_factory=list)
118 | 
119 | class AnayasaSearchResult(BaseModel):
120 |     """Model for the overall search result for Anayasa Mahkemesi Norm Denetimi decisions."""
121 |     decisions: List[AnayasaDecisionSummary]
122 |     total_records_found: int = Field(0, description="Total records found")
123 |     retrieved_page_number: int = Field(1, description="Retrieved page number")
124 | 
125 | class AnayasaDocumentMarkdown(BaseModel):
126 |     """
127 |     Model for an Anayasa Mahkemesi (Norm Denetimi) decision document, containing a chunk of Markdown content
128 |     and pagination information.
129 |     """
130 |     source_url: HttpUrl
131 |     decision_reference_no_from_page: str = Field("", description="E.K. No parsed from the document page.")
132 |     decision_date_from_page: str = Field("", description="Decision date parsed from the document page.")
133 |     official_gazette_info_from_page: str = Field("", description="Official Gazette info parsed from the document page.")
134 |     markdown_chunk: str = Field("", description="A 5,000 character chunk of the Markdown content.") # Corrected chunk size
135 |     current_page: int = Field(description="The current page number of the markdown chunk (1-indexed).")
136 |     total_pages: int = Field(description="Total number of pages for the full markdown content.")
137 |     is_paginated: bool = Field(description="True if the full markdown content is split into multiple pages.")
138 | 
139 | 
140 | # --- Models for Anayasa Mahkemesi - Bireysel Başvuru Karar Raporu ---
141 | 
142 | class AnayasaBireyselReportSearchRequest(BaseModel):
143 |     """Model for Anayasa Mahkemesi (Bireysel Başvuru) 'Karar Arama Raporu' search request."""
144 |     keywords: Optional[List[str]] = Field(default_factory=list, description="Keywords for AND logic (KelimeAra[]).")
145 |     page_to_fetch: int = Field(1, ge=1, description="Page number to fetch for the report (page). Default is 1.")
146 | 
147 | class AnayasaBireyselReportDecisionDetail(BaseModel):
148 |     """Details of a specific right/claim within a Bireysel Başvuru decision summary in a report."""
149 |     hak: str = Field("", description="İhlal edildiği iddia edilen hak (örneğin, Mülkiyet hakkı).")
150 |     mudahale_iddiasi: str = Field("", description="İhlale neden olan müdahale iddiası.")
151 |     sonuc: str = Field("", description="İnceleme sonucu (örneğin, İhlal, Düşme).")
152 |     giderim: str = Field("", description="Kararlaştırılan giderim (örneğin, Yeniden yargılama).")
153 | 
154 | class AnayasaBireyselReportDecisionSummary(BaseModel):
155 |     """Model for a single Anayasa Mahkemesi (Bireysel Başvuru) decision summary from a 'Karar Arama Raporu'."""
156 |     title: str = Field("", description="Başvurunun başlığı (e.g., 'HASAN DURMUŞ Başvurusuna İlişkin Karar').")
157 |     decision_reference_no: str = Field("", description="Başvuru Numarası (e.g., '2019/19126').")
158 |     decision_page_url: str = Field("", description="URL to the full decision page.")
159 |     decision_type_summary: str = Field("", description="Karar Türü (Başvuru Sonucu) (e.g., 'Esas (İhlal)').")
160 |     decision_making_body: str = Field("", description="Kararı Veren Birim (e.g., 'Genel Kurul', 'Birinci Bölüm').")
161 |     application_date_summary: str = Field("", description="Başvuru Tarihi (DD/MM/YYYY).")
162 |     decision_date_summary: str = Field("", description="Karar Tarihi (DD/MM/YYYY).")
163 |     application_subject_summary: str = Field("", description="Başvuru konusunun özeti.")
164 |     details: List[AnayasaBireyselReportDecisionDetail] = Field(default_factory=list, description="İncelenen haklar ve sonuçlarına ilişkin detaylar.")
165 | 
166 | class AnayasaBireyselReportSearchResult(BaseModel):
167 |     """Model for the overall search result for Anayasa Mahkemesi 'Karar Arama Raporu'."""
168 |     decisions: List[AnayasaBireyselReportDecisionSummary]
169 |     total_records_found: int = Field(0, description="Raporda bulunan toplam karar sayısı.")
170 |     retrieved_page_number: int = Field(description="Alınan rapor sayfa numarası.")
171 | 
172 | 
173 | class AnayasaBireyselBasvuruDocumentMarkdown(BaseModel):
174 |     """
175 |     Model for an Anayasa Mahkemesi (Bireysel Başvuru) decision document, containing a chunk of Markdown content
176 |     and pagination information. Fetched from /BB/YYYY/NNNN paths.
177 |     """
178 |     source_url: HttpUrl
179 |     basvuru_no_from_page: Optional[str] = Field(None, description="Başvuru Numarası (B.No) parsed from the document page.")
180 |     karar_tarihi_from_page: Optional[str] = Field(None, description="Decision date parsed from the document page.")
181 |     basvuru_tarihi_from_page: Optional[str] = Field(None, description="Application date parsed from the document page.")
182 |     karari_veren_birim_from_page: Optional[str] = Field(None, description="Deciding body (Bölüm/Genel Kurul) parsed from the document page.")
183 |     karar_turu_from_page: Optional[str] = Field(None, description="Decision type (Başvuru Sonucu) parsed from the document page.")
184 |     resmi_gazete_info_from_page: Optional[str] = Field(None, description="Official Gazette info parsed from the document page, if available.")
185 |     markdown_chunk: Optional[str] = Field(None, description="A 5,000 character chunk of the Markdown content.")
186 |     current_page: int = Field(description="The current page number of the markdown chunk (1-indexed).")
187 |     total_pages: int = Field(description="Total number of pages for the full markdown content.")
188 |     is_paginated: bool = Field(description="True if the full markdown content is split into multiple pages.")
189 | 
190 | # --- End Models for Bireysel Başvuru ---
191 | 
192 | # --- Unified Models ---
193 | class AnayasaUnifiedSearchRequest(BaseModel):
194 |     """Unified search request for both Norm Denetimi and Bireysel Başvuru."""
195 |     decision_type: Literal["norm_denetimi", "bireysel_basvuru"] = Field(..., description="Decision type: norm_denetimi or bireysel_basvuru")
196 |     
197 |     # Common parameters
198 |     keywords: List[str] = Field(default_factory=list, description="Keywords to search for")
199 |     page_to_fetch: int = Field(1, ge=1, le=100, description="Page number to fetch (1-100)")
200 |     results_per_page: int = Field(10, ge=1, le=100, description="Results per page (1-100)")
201 |     
202 |     # Norm Denetimi specific parameters (ignored for bireysel_basvuru)
203 |     keywords_all: List[str] = Field(default_factory=list, description="All keywords must be present (norm_denetimi only)")
204 |     keywords_any: List[str] = Field(default_factory=list, description="Any of these keywords (norm_denetimi only)")
205 |     decision_type_norm: Literal["ALL", "1", "2", "3"] = Field("ALL", description="Decision type for norm denetimi")
206 |     application_date_start: str = Field("", description="Application start date (norm_denetimi only)")
207 |     application_date_end: str = Field("", description="Application end date (norm_denetimi only)")
208 |     
209 |     # Bireysel Başvuru specific parameters (ignored for norm_denetimi)
210 |     decision_start_date: str = Field("", description="Decision start date (bireysel_basvuru only)")
211 |     decision_end_date: str = Field("", description="Decision end date (bireysel_basvuru only)")
212 |     norm_type: Literal["ALL", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "0"] = Field("ALL", description="Norm type (bireysel_basvuru only)")
213 |     subject_category: str = Field("", description="Subject category (bireysel_basvuru only)")
214 | 
215 | class AnayasaUnifiedSearchResult(BaseModel):
216 |     """Unified search result containing decisions from either system."""
217 |     decision_type: Literal["norm_denetimi", "bireysel_basvuru"] = Field(..., description="Type of decisions returned")
218 |     decisions: List[Dict[str, Any]] = Field(default_factory=list, description="Decision list (structure varies by type)")
219 |     total_records_found: int = Field(0, description="Total number of records found")
220 |     retrieved_page_number: int = Field(1, description="Page number that was retrieved")
221 | 
222 | class AnayasaUnifiedDocumentMarkdown(BaseModel):
223 |     """Unified document model for both Norm Denetimi and Bireysel Başvuru."""
224 |     decision_type: Literal["norm_denetimi", "bireysel_basvuru"] = Field(..., description="Type of document")
225 |     source_url: HttpUrl = Field(..., description="Source URL of the document")
226 |     document_data: Dict[str, Any] = Field(default_factory=dict, description="Document content and metadata")
227 |     markdown_chunk: Optional[str] = Field(None, description="Markdown content chunk")
228 |     current_page: int = Field(1, description="Current page number")
229 |     total_pages: int = Field(1, description="Total number of pages")
230 |     is_paginated: bool = Field(False, description="Whether document is paginated")
```

--------------------------------------------------------------------------------
/saidsurucu-yargi-mcp-f5fa007/anayasa_mcp_module/models.py:
--------------------------------------------------------------------------------

```python
  1 | # anayasa_mcp_module/models.py
  2 | 
  3 | from pydantic import BaseModel, Field, HttpUrl
  4 | from typing import List, Optional, Dict, Any, Literal
  5 | from enum import Enum
  6 | 
  7 | # --- Enums (AnayasaDonemEnum, etc. - same as before) ---
  8 | class AnayasaDonemEnum(str, Enum):
  9 |     TUMU = "ALL"
 10 |     DONEM_1961 = "1"
 11 |     DONEM_1982 = "2"
 12 | 
 13 | 
 14 | class AnayasaVarYokEnum(str, Enum):
 15 |     TUMU = "ALL"
 16 |     YOK = "0"
 17 |     VAR = "1"
 18 | 
 19 | 
 20 | class AnayasaIncelemeSonucuEnum(str, Enum):
 21 |     TUMU = "ALL"
 22 |     ESAS_ACILMAMIS_SAYILMA = "1"
 23 |     ESAS_IPTAL = "2"
 24 |     ESAS_KARAR_YER_OLMADIGI = "3"
 25 |     ESAS_RET = "4"
 26 |     ILK_ACILMAMIS_SAYILMA = "5"
 27 |     ILK_ISIN_GERI_CEVRILMESI = "6"
 28 |     ILK_KARAR_YER_OLMADIGI = "7"
 29 |     ILK_RET = "8"
 30 |     KANUN_6216_M43_4_IPTAL = "12"
 31 | 
 32 | class AnayasaSonucGerekcesiEnum(str, Enum):
 33 |     TUMU = "ALL"
 34 |     ANAYASAYA_AYKIRI_DEGIL = "29"
 35 |     ANAYASAYA_ESAS_YONUNDEN_AYKIRILIK = "1"
 36 |     ANAYASAYA_ESAS_YONUNDEN_UYGUNLUK = "2"
 37 |     ANAYASAYA_SEKIL_ESAS_UYGUNLUK = "30"
 38 |     ANAYASAYA_SEKIL_YONUNDEN_AYKIRILIK = "3"
 39 |     ANAYASAYA_SEKIL_YONUNDEN_UYGUNLUK = "4"
 40 |     AYKIRILIK_ANAYASAYA_ESAS_YONUNDEN_DUPLICATE = "27"
 41 |     BASVURU_KARARI = "5"
 42 |     DENETIM_DISI = "6"
 43 |     DIGER_GEREKCE_1 = "7"
 44 |     DIGER_GEREKCE_2 = "8"
 45 |     EKSIKLIGIN_GIDERILMEMESI = "9"
 46 |     GEREKCE = "10"
 47 |     GOREV = "11"
 48 |     GOREV_YETKI = "12"
 49 |     GOREVLI_MAHKEME = "13"
 50 |     GORULMEKTE_OLAN_DAVA = "14"
 51 |     MAHKEME = "15"
 52 |     NORMDA_DEGISIKLIK_YAPILMASI = "16"
 53 |     NORMUN_YURURLUKTEN_KALDIRILMASI = "17"
 54 |     ON_YIL_YASAGI = "18"
 55 |     SURE = "19"
 56 |     USULE_UYMAMA = "20"
 57 |     UYGULANACAK_NORM = "21"
 58 |     UYGULANAMAZ_HALE_GELME = "22"
 59 |     YETKI = "23"
 60 |     YETKI_SURE = "24"
 61 |     YOK_HUKMUNDE_OLMAMA = "25"
 62 |     YOKLUK = "26"
 63 | # --- End Enums ---
 64 | 
 65 | class AnayasaNormDenetimiSearchRequest(BaseModel):
 66 |     """Model for Anayasa Mahkemesi (Norm Denetimi) search request for the MCP tool."""
 67 |     keywords_all: Optional[List[str]] = Field(default_factory=list, description="Keywords for AND logic (KelimeAra[]).")
 68 |     keywords_any: Optional[List[str]] = Field(default_factory=list, description="Keywords for OR logic (HerhangiBirKelimeAra[]).")
 69 |     keywords_exclude: Optional[List[str]] = Field(default_factory=list, description="Keywords to exclude (BulunmayanKelimeAra[]).")
 70 |     period: Optional[Literal["ALL", "1", "2"]] = Field(default="ALL", description="Constitutional period (Donemler_id).")
 71 |     case_number_esas: str = Field("", description="Case registry number (EsasNo), e.g., '2023/123'.")
 72 |     decision_number_karar: str = Field("", description="Decision number (KararNo), e.g., '2023/456'.")
 73 |     first_review_date_start: str = Field("", description="First review start date (IlkIncelemeTarihiIlk), format DD/MM/YYYY.")
 74 |     first_review_date_end: str = Field("", description="First review end date (IlkIncelemeTarihiSon), format DD/MM/YYYY.")
 75 |     decision_date_start: str = Field("", description="Decision start date (KararTarihiIlk), format DD/MM/YYYY.")
 76 |     decision_date_end: str = Field("", description="Decision end date (KararTarihiSon), format DD/MM/YYYY.")
 77 |     application_type: Optional[Literal["ALL", "1", "2", "3"]] = Field(default="ALL", description="Type of application (BasvuruTurler_id).")
 78 |     applicant_general_name: str = Field("", description="General applicant name (BasvuranGeneller_id).")
 79 |     applicant_specific_name: str = Field("", description="Specific applicant name (BasvuranOzeller_id).")
 80 |     official_gazette_date_start: str = Field("", description="Official Gazette start date (ResmiGazeteTarihiIlk), format DD/MM/YYYY.")
 81 |     official_gazette_date_end: str = Field("", description="Official Gazette end date (ResmiGazeteTarihiSon), format DD/MM/YYYY.")
 82 |     official_gazette_number_start: str = Field("", description="Official Gazette starting number (ResmiGazeteSayisiIlk).")
 83 |     official_gazette_number_end: str = Field("", description="Official Gazette ending number (ResmiGazeteSayisiSon).")
 84 |     has_press_release: Optional[Literal["ALL", "0", "1"]] = Field(default="ALL", description="Press release available (BasinDuyurusu).")
 85 |     has_dissenting_opinion: Optional[Literal["ALL", "0", "1"]] = Field(default="ALL", description="Dissenting opinion exists (KarsiOy).")
 86 |     has_different_reasoning: Optional[Literal["ALL", "0", "1"]] = Field(default="ALL", description="Different reasoning exists (FarkliGerekce).")
 87 |     attending_members_names: Optional[List[str]] = Field(default_factory=list, description="List of attending members' exact names (Uyeler_id[]).")
 88 |     rapporteur_name: str = Field("", description="Rapporteur's exact name (Raportorler_id).")
 89 |     norm_type: Optional[Literal["ALL", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "0"]] = Field(default="ALL", description="Type of the reviewed norm (NormunTurler_id).")
 90 |     norm_id_or_name: str = Field("", description="Number or name of the norm (NormunNumarasiAdlar_id).")
 91 |     norm_article: str = Field("", description="Article number of the norm (NormunMaddeNumarasi).")
 92 |     review_outcomes: Optional[List[Literal["1", "2", "3", "4", "5", "6", "7", "8", "12"]]] = Field(default_factory=list, description="List of review types and outcomes (IncelemeTuruKararSonuclar_id[]).")
 93 |     reason_for_final_outcome: Optional[Literal["ALL", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "29", "30"]] = Field(default="ALL", description="Main reason for the decision outcome (KararSonucununGerekcesi).")
 94 |     basis_constitution_article_numbers: Optional[List[str]] = Field(default_factory=list, description="List of supporting Constitution article numbers (DayanakHukmu[]).")
 95 |     results_per_page: int = Field(10, ge=1, le=10, description="Results per page.")
 96 |     page_to_fetch: int = Field(1, ge=1, description="Page number to fetch for results list.")
 97 |     sort_by_criteria: str = Field("KararTarihi", description="Sort criteria. Options: 'KararTarihi', 'YayinTarihi', 'Toplam' (keyword count).")
 98 | 
 99 | class AnayasaReviewedNormInfo(BaseModel):
100 |     """Details of a norm reviewed within an AYM decision summary."""
101 |     norm_name_or_number: str = Field("", description="Norm name or number")
102 |     article_number: str = Field("", description="Article number")
103 |     review_type_and_outcome: str = Field("", description="Review type and outcome")
104 |     outcome_reason: str = Field("", description="Outcome reason")
105 |     basis_constitution_articles_cited: List[str] = Field(default_factory=list)
106 |     postponement_period: str = Field("", description="Postponement period")
107 | 
108 | class AnayasaDecisionSummary(BaseModel):
109 |     """Model for a single Anayasa Mahkemesi (Norm Denetimi) decision summary from search results."""
110 |     decision_reference_no: str = Field("", description="Decision reference number")
111 |     decision_page_url: str = Field("", description="Decision page URL")
112 |     keywords_found_count: Optional[int] = Field(0, description="Keywords found count")
113 |     application_type_summary: str = Field("", description="Application type summary")
114 |     applicant_summary: str = Field("", description="Applicant summary")
115 |     decision_outcome_summary: str = Field("", description="Decision outcome summary")
116 |     decision_date_summary: str = Field("", description="Decision date summary")
117 |     reviewed_norms: List[AnayasaReviewedNormInfo] = Field(default_factory=list)
118 | 
119 | class AnayasaSearchResult(BaseModel):
120 |     """Model for the overall search result for Anayasa Mahkemesi Norm Denetimi decisions."""
121 |     decisions: List[AnayasaDecisionSummary]
122 |     total_records_found: int = Field(0, description="Total records found")
123 |     retrieved_page_number: int = Field(1, description="Retrieved page number")
124 | 
125 | class AnayasaDocumentMarkdown(BaseModel):
126 |     """
127 |     Model for an Anayasa Mahkemesi (Norm Denetimi) decision document, containing a chunk of Markdown content
128 |     and pagination information.
129 |     """
130 |     source_url: HttpUrl
131 |     decision_reference_no_from_page: str = Field("", description="E.K. No parsed from the document page.")
132 |     decision_date_from_page: str = Field("", description="Decision date parsed from the document page.")
133 |     official_gazette_info_from_page: str = Field("", description="Official Gazette info parsed from the document page.")
134 |     markdown_chunk: str = Field("", description="A 5,000 character chunk of the Markdown content.") # Corrected chunk size
135 |     current_page: int = Field(description="The current page number of the markdown chunk (1-indexed).")
136 |     total_pages: int = Field(description="Total number of pages for the full markdown content.")
137 |     is_paginated: bool = Field(description="True if the full markdown content is split into multiple pages.")
138 | 
139 | 
140 | # --- Models for Anayasa Mahkemesi - Bireysel Başvuru Karar Raporu ---
141 | 
142 | class AnayasaBireyselReportSearchRequest(BaseModel):
143 |     """Model for Anayasa Mahkemesi (Bireysel Başvuru) 'Karar Arama Raporu' search request."""
144 |     keywords: Optional[List[str]] = Field(default_factory=list, description="Keywords for AND logic (KelimeAra[]).")
145 |     page_to_fetch: int = Field(1, ge=1, description="Page number to fetch for the report (page). Default is 1.")
146 | 
147 | class AnayasaBireyselReportDecisionDetail(BaseModel):
148 |     """Details of a specific right/claim within a Bireysel Başvuru decision summary in a report."""
149 |     hak: str = Field("", description="İhlal edildiği iddia edilen hak (örneğin, Mülkiyet hakkı).")
150 |     mudahale_iddiasi: str = Field("", description="İhlale neden olan müdahale iddiası.")
151 |     sonuc: str = Field("", description="İnceleme sonucu (örneğin, İhlal, Düşme).")
152 |     giderim: str = Field("", description="Kararlaştırılan giderim (örneğin, Yeniden yargılama).")
153 | 
154 | class AnayasaBireyselReportDecisionSummary(BaseModel):
155 |     """Model for a single Anayasa Mahkemesi (Bireysel Başvuru) decision summary from a 'Karar Arama Raporu'."""
156 |     title: str = Field("", description="Başvurunun başlığı (e.g., 'HASAN DURMUŞ Başvurusuna İlişkin Karar').")
157 |     decision_reference_no: str = Field("", description="Başvuru Numarası (e.g., '2019/19126').")
158 |     decision_page_url: str = Field("", description="URL to the full decision page.")
159 |     decision_type_summary: str = Field("", description="Karar Türü (Başvuru Sonucu) (e.g., 'Esas (İhlal)').")
160 |     decision_making_body: str = Field("", description="Kararı Veren Birim (e.g., 'Genel Kurul', 'Birinci Bölüm').")
161 |     application_date_summary: str = Field("", description="Başvuru Tarihi (DD/MM/YYYY).")
162 |     decision_date_summary: str = Field("", description="Karar Tarihi (DD/MM/YYYY).")
163 |     application_subject_summary: str = Field("", description="Başvuru konusunun özeti.")
164 |     details: List[AnayasaBireyselReportDecisionDetail] = Field(default_factory=list, description="İncelenen haklar ve sonuçlarına ilişkin detaylar.")
165 | 
166 | class AnayasaBireyselReportSearchResult(BaseModel):
167 |     """Model for the overall search result for Anayasa Mahkemesi 'Karar Arama Raporu'."""
168 |     decisions: List[AnayasaBireyselReportDecisionSummary]
169 |     total_records_found: int = Field(0, description="Raporda bulunan toplam karar sayısı.")
170 |     retrieved_page_number: int = Field(description="Alınan rapor sayfa numarası.")
171 | 
172 | 
173 | class AnayasaBireyselBasvuruDocumentMarkdown(BaseModel):
174 |     """
175 |     Model for an Anayasa Mahkemesi (Bireysel Başvuru) decision document, containing a chunk of Markdown content
176 |     and pagination information. Fetched from /BB/YYYY/NNNN paths.
177 |     """
178 |     source_url: HttpUrl
179 |     basvuru_no_from_page: Optional[str] = Field(None, description="Başvuru Numarası (B.No) parsed from the document page.")
180 |     karar_tarihi_from_page: Optional[str] = Field(None, description="Decision date parsed from the document page.")
181 |     basvuru_tarihi_from_page: Optional[str] = Field(None, description="Application date parsed from the document page.")
182 |     karari_veren_birim_from_page: Optional[str] = Field(None, description="Deciding body (Bölüm/Genel Kurul) parsed from the document page.")
183 |     karar_turu_from_page: Optional[str] = Field(None, description="Decision type (Başvuru Sonucu) parsed from the document page.")
184 |     resmi_gazete_info_from_page: Optional[str] = Field(None, description="Official Gazette info parsed from the document page, if available.")
185 |     markdown_chunk: Optional[str] = Field(None, description="A 5,000 character chunk of the Markdown content.")
186 |     current_page: int = Field(description="The current page number of the markdown chunk (1-indexed).")
187 |     total_pages: int = Field(description="Total number of pages for the full markdown content.")
188 |     is_paginated: bool = Field(description="True if the full markdown content is split into multiple pages.")
189 | 
190 | # --- End Models for Bireysel Başvuru ---
191 | 
192 | # --- Unified Models ---
193 | class AnayasaUnifiedSearchRequest(BaseModel):
194 |     """Unified search request for both Norm Denetimi and Bireysel Başvuru."""
195 |     decision_type: Literal["norm_denetimi", "bireysel_basvuru"] = Field(..., description="Decision type: norm_denetimi or bireysel_basvuru")
196 |     
197 |     # Common parameters
198 |     keywords: List[str] = Field(default_factory=list, description="Keywords to search for")
199 |     page_to_fetch: int = Field(1, ge=1, le=100, description="Page number to fetch (1-100)")
200 |     results_per_page: int = Field(10, ge=1, le=100, description="Results per page (1-100)")
201 |     
202 |     # Norm Denetimi specific parameters (ignored for bireysel_basvuru)
203 |     keywords_all: List[str] = Field(default_factory=list, description="All keywords must be present (norm_denetimi only)")
204 |     keywords_any: List[str] = Field(default_factory=list, description="Any of these keywords (norm_denetimi only)")
205 |     decision_type_norm: Literal["ALL", "1", "2", "3"] = Field("ALL", description="Decision type for norm denetimi")
206 |     application_date_start: str = Field("", description="Application start date (norm_denetimi only)")
207 |     application_date_end: str = Field("", description="Application end date (norm_denetimi only)")
208 |     
209 |     # Bireysel Başvuru specific parameters (ignored for norm_denetimi)
210 |     decision_start_date: str = Field("", description="Decision start date (bireysel_basvuru only)")
211 |     decision_end_date: str = Field("", description="Decision end date (bireysel_basvuru only)")
212 |     norm_type: Literal["ALL", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "0"] = Field("ALL", description="Norm type (bireysel_basvuru only)")
213 |     subject_category: str = Field("", description="Subject category (bireysel_basvuru only)")
214 | 
215 | class AnayasaUnifiedSearchResult(BaseModel):
216 |     """Unified search result containing decisions from either system."""
217 |     decision_type: Literal["norm_denetimi", "bireysel_basvuru"] = Field(..., description="Type of decisions returned")
218 |     decisions: List[Dict[str, Any]] = Field(default_factory=list, description="Decision list (structure varies by type)")
219 |     total_records_found: int = Field(0, description="Total number of records found")
220 |     retrieved_page_number: int = Field(1, description="Page number that was retrieved")
221 | 
222 | class AnayasaUnifiedDocumentMarkdown(BaseModel):
223 |     """Unified document model for both Norm Denetimi and Bireysel Başvuru."""
224 |     decision_type: Literal["norm_denetimi", "bireysel_basvuru"] = Field(..., description="Type of document")
225 |     source_url: HttpUrl = Field(..., description="Source URL of the document")
226 |     document_data: Dict[str, Any] = Field(default_factory=dict, description="Document content and metadata")
227 |     markdown_chunk: Optional[str] = Field(None, description="Markdown content chunk")
228 |     current_page: int = Field(1, description="Current page number")
229 |     total_pages: int = Field(1, description="Total number of pages")
230 |     is_paginated: bool = Field(False, description="Whether document is paginated")
```
Page 5/11FirstPrevNextLast