#
tokens: 38005/50000 2/303 files (page 12/15)
lines: off (toggle) GitHub
raw markdown copy
This is page 12 of 15. Use http://codebase.md/genomoncology/biomcp?page={x} to view the full context.

# Directory Structure

```
├── .github
│   ├── actions
│   │   └── setup-python-env
│   │       └── action.yml
│   ├── dependabot.yml
│   └── workflows
│       ├── ci.yml
│       ├── deploy-docs.yml
│       ├── main.yml.disabled
│       ├── on-release-main.yml
│       └── validate-codecov-config.yml
├── .gitignore
├── .pre-commit-config.yaml
├── BIOMCP_DATA_FLOW.md
├── CHANGELOG.md
├── CNAME
├── codecov.yaml
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── apis
│   │   ├── error-codes.md
│   │   ├── overview.md
│   │   └── python-sdk.md
│   ├── assets
│   │   ├── biomcp-cursor-locations.png
│   │   ├── favicon.ico
│   │   ├── icon.png
│   │   ├── logo.png
│   │   ├── mcp_architecture.txt
│   │   └── remote-connection
│   │       ├── 00_connectors.png
│   │       ├── 01_add_custom_connector.png
│   │       ├── 02_connector_enabled.png
│   │       ├── 03_connect_to_biomcp.png
│   │       ├── 04_select_google_oauth.png
│   │       └── 05_success_connect.png
│   ├── backend-services-reference
│   │   ├── 01-overview.md
│   │   ├── 02-biothings-suite.md
│   │   ├── 03-cbioportal.md
│   │   ├── 04-clinicaltrials-gov.md
│   │   ├── 05-nci-cts-api.md
│   │   ├── 06-pubtator3.md
│   │   └── 07-alphagenome.md
│   ├── blog
│   │   ├── ai-assisted-clinical-trial-search-analysis.md
│   │   ├── images
│   │   │   ├── deep-researcher-video.png
│   │   │   ├── researcher-announce.png
│   │   │   ├── researcher-drop-down.png
│   │   │   ├── researcher-prompt.png
│   │   │   ├── trial-search-assistant.png
│   │   │   └── what_is_biomcp_thumbnail.png
│   │   └── researcher-persona-resource.md
│   ├── changelog.md
│   ├── CNAME
│   ├── concepts
│   │   ├── 01-what-is-biomcp.md
│   │   ├── 02-the-deep-researcher-persona.md
│   │   └── 03-sequential-thinking-with-the-think-tool.md
│   ├── developer-guides
│   │   ├── 01-server-deployment.md
│   │   ├── 02-contributing-and-testing.md
│   │   ├── 03-third-party-endpoints.md
│   │   ├── 04-transport-protocol.md
│   │   ├── 05-error-handling.md
│   │   ├── 06-http-client-and-caching.md
│   │   ├── 07-performance-optimizations.md
│   │   └── generate_endpoints.py
│   ├── faq-condensed.md
│   ├── FDA_SECURITY.md
│   ├── genomoncology.md
│   ├── getting-started
│   │   ├── 01-quickstart-cli.md
│   │   ├── 02-claude-desktop-integration.md
│   │   └── 03-authentication-and-api-keys.md
│   ├── how-to-guides
│   │   ├── 01-find-articles-and-cbioportal-data.md
│   │   ├── 02-find-trials-with-nci-and-biothings.md
│   │   ├── 03-get-comprehensive-variant-annotations.md
│   │   ├── 04-predict-variant-effects-with-alphagenome.md
│   │   ├── 05-logging-and-monitoring-with-bigquery.md
│   │   └── 06-search-nci-organizations-and-interventions.md
│   ├── index.md
│   ├── policies.md
│   ├── reference
│   │   ├── architecture-diagrams.md
│   │   ├── quick-architecture.md
│   │   ├── quick-reference.md
│   │   └── visual-architecture.md
│   ├── robots.txt
│   ├── stylesheets
│   │   ├── announcement.css
│   │   └── extra.css
│   ├── troubleshooting.md
│   ├── tutorials
│   │   ├── biothings-prompts.md
│   │   ├── claude-code-biomcp-alphagenome.md
│   │   ├── nci-prompts.md
│   │   ├── openfda-integration.md
│   │   ├── openfda-prompts.md
│   │   ├── pydantic-ai-integration.md
│   │   └── remote-connection.md
│   ├── user-guides
│   │   ├── 01-command-line-interface.md
│   │   ├── 02-mcp-tools-reference.md
│   │   └── 03-integrating-with-ides-and-clients.md
│   └── workflows
│       └── all-workflows.md
├── example_scripts
│   ├── mcp_integration.py
│   └── python_sdk.py
├── glama.json
├── LICENSE
├── lzyank.toml
├── Makefile
├── mkdocs.yml
├── package-lock.json
├── package.json
├── pyproject.toml
├── README.md
├── scripts
│   ├── check_docs_in_mkdocs.py
│   ├── check_http_imports.py
│   └── generate_endpoints_doc.py
├── smithery.yaml
├── src
│   └── biomcp
│       ├── __init__.py
│       ├── __main__.py
│       ├── articles
│       │   ├── __init__.py
│       │   ├── autocomplete.py
│       │   ├── fetch.py
│       │   ├── preprints.py
│       │   ├── search_optimized.py
│       │   ├── search.py
│       │   └── unified.py
│       ├── biomarkers
│       │   ├── __init__.py
│       │   └── search.py
│       ├── cbioportal_helper.py
│       ├── circuit_breaker.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── articles.py
│       │   ├── biomarkers.py
│       │   ├── diseases.py
│       │   ├── health.py
│       │   ├── interventions.py
│       │   ├── main.py
│       │   ├── openfda.py
│       │   ├── organizations.py
│       │   ├── server.py
│       │   ├── trials.py
│       │   └── variants.py
│       ├── connection_pool.py
│       ├── constants.py
│       ├── core.py
│       ├── diseases
│       │   ├── __init__.py
│       │   ├── getter.py
│       │   └── search.py
│       ├── domain_handlers.py
│       ├── drugs
│       │   ├── __init__.py
│       │   └── getter.py
│       ├── exceptions.py
│       ├── genes
│       │   ├── __init__.py
│       │   └── getter.py
│       ├── http_client_simple.py
│       ├── http_client.py
│       ├── individual_tools.py
│       ├── integrations
│       │   ├── __init__.py
│       │   ├── biothings_client.py
│       │   └── cts_api.py
│       ├── interventions
│       │   ├── __init__.py
│       │   ├── getter.py
│       │   └── search.py
│       ├── logging_filter.py
│       ├── metrics_handler.py
│       ├── metrics.py
│       ├── openfda
│       │   ├── __init__.py
│       │   ├── adverse_events_helpers.py
│       │   ├── adverse_events.py
│       │   ├── cache.py
│       │   ├── constants.py
│       │   ├── device_events_helpers.py
│       │   ├── device_events.py
│       │   ├── drug_approvals.py
│       │   ├── drug_labels_helpers.py
│       │   ├── drug_labels.py
│       │   ├── drug_recalls_helpers.py
│       │   ├── drug_recalls.py
│       │   ├── drug_shortages_detail_helpers.py
│       │   ├── drug_shortages_helpers.py
│       │   ├── drug_shortages.py
│       │   ├── exceptions.py
│       │   ├── input_validation.py
│       │   ├── rate_limiter.py
│       │   ├── utils.py
│       │   └── validation.py
│       ├── organizations
│       │   ├── __init__.py
│       │   ├── getter.py
│       │   └── search.py
│       ├── parameter_parser.py
│       ├── prefetch.py
│       ├── query_parser.py
│       ├── query_router.py
│       ├── rate_limiter.py
│       ├── render.py
│       ├── request_batcher.py
│       ├── resources
│       │   ├── __init__.py
│       │   ├── getter.py
│       │   ├── instructions.md
│       │   └── researcher.md
│       ├── retry.py
│       ├── router_handlers.py
│       ├── router.py
│       ├── shared_context.py
│       ├── thinking
│       │   ├── __init__.py
│       │   ├── sequential.py
│       │   └── session.py
│       ├── thinking_tool.py
│       ├── thinking_tracker.py
│       ├── trials
│       │   ├── __init__.py
│       │   ├── getter.py
│       │   ├── nci_getter.py
│       │   ├── nci_search.py
│       │   └── search.py
│       ├── utils
│       │   ├── __init__.py
│       │   ├── cancer_types_api.py
│       │   ├── cbio_http_adapter.py
│       │   ├── endpoint_registry.py
│       │   ├── gene_validator.py
│       │   ├── metrics.py
│       │   ├── mutation_filter.py
│       │   ├── query_utils.py
│       │   ├── rate_limiter.py
│       │   └── request_cache.py
│       ├── variants
│       │   ├── __init__.py
│       │   ├── alphagenome.py
│       │   ├── cancer_types.py
│       │   ├── cbio_external_client.py
│       │   ├── cbioportal_mutations.py
│       │   ├── cbioportal_search_helpers.py
│       │   ├── cbioportal_search.py
│       │   ├── constants.py
│       │   ├── external.py
│       │   ├── filters.py
│       │   ├── getter.py
│       │   ├── links.py
│       │   └── search.py
│       └── workers
│           ├── __init__.py
│           ├── worker_entry_stytch.js
│           ├── worker_entry.js
│           └── worker.py
├── tests
│   ├── bdd
│   │   ├── cli_help
│   │   │   ├── help.feature
│   │   │   └── test_help.py
│   │   ├── conftest.py
│   │   ├── features
│   │   │   └── alphagenome_integration.feature
│   │   ├── fetch_articles
│   │   │   ├── fetch.feature
│   │   │   └── test_fetch.py
│   │   ├── get_trials
│   │   │   ├── get.feature
│   │   │   └── test_get.py
│   │   ├── get_variants
│   │   │   ├── get.feature
│   │   │   └── test_get.py
│   │   ├── search_articles
│   │   │   ├── autocomplete.feature
│   │   │   ├── search.feature
│   │   │   ├── test_autocomplete.py
│   │   │   └── test_search.py
│   │   ├── search_trials
│   │   │   ├── search.feature
│   │   │   └── test_search.py
│   │   ├── search_variants
│   │   │   ├── search.feature
│   │   │   └── test_search.py
│   │   └── steps
│   │       └── test_alphagenome_steps.py
│   ├── config
│   │   └── test_smithery_config.py
│   ├── conftest.py
│   ├── data
│   │   ├── ct_gov
│   │   │   ├── clinical_trials_api_v2.yaml
│   │   │   ├── trials_NCT04280705.json
│   │   │   └── trials_NCT04280705.txt
│   │   ├── myvariant
│   │   │   ├── myvariant_api.yaml
│   │   │   ├── myvariant_field_descriptions.csv
│   │   │   ├── variants_full_braf_v600e.json
│   │   │   ├── variants_full_braf_v600e.txt
│   │   │   └── variants_part_braf_v600_multiple.json
│   │   ├── openfda
│   │   │   ├── drugsfda_detail.json
│   │   │   ├── drugsfda_search.json
│   │   │   ├── enforcement_detail.json
│   │   │   └── enforcement_search.json
│   │   └── pubtator
│   │       ├── pubtator_autocomplete.json
│   │       └── pubtator3_paper.txt
│   ├── integration
│   │   ├── test_openfda_integration.py
│   │   ├── test_preprints_integration.py
│   │   ├── test_simple.py
│   │   └── test_variants_integration.py
│   ├── tdd
│   │   ├── articles
│   │   │   ├── test_autocomplete.py
│   │   │   ├── test_cbioportal_integration.py
│   │   │   ├── test_fetch.py
│   │   │   ├── test_preprints.py
│   │   │   ├── test_search.py
│   │   │   └── test_unified.py
│   │   ├── conftest.py
│   │   ├── drugs
│   │   │   ├── __init__.py
│   │   │   └── test_drug_getter.py
│   │   ├── openfda
│   │   │   ├── __init__.py
│   │   │   ├── test_adverse_events.py
│   │   │   ├── test_device_events.py
│   │   │   ├── test_drug_approvals.py
│   │   │   ├── test_drug_labels.py
│   │   │   ├── test_drug_recalls.py
│   │   │   ├── test_drug_shortages.py
│   │   │   └── test_security.py
│   │   ├── test_biothings_integration_real.py
│   │   ├── test_biothings_integration.py
│   │   ├── test_circuit_breaker.py
│   │   ├── test_concurrent_requests.py
│   │   ├── test_connection_pool.py
│   │   ├── test_domain_handlers.py
│   │   ├── test_drug_approvals.py
│   │   ├── test_drug_recalls.py
│   │   ├── test_drug_shortages.py
│   │   ├── test_endpoint_documentation.py
│   │   ├── test_error_scenarios.py
│   │   ├── test_europe_pmc_fetch.py
│   │   ├── test_mcp_integration.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_metrics.py
│   │   ├── test_nci_integration.py
│   │   ├── test_nci_mcp_tools.py
│   │   ├── test_network_policies.py
│   │   ├── test_offline_mode.py
│   │   ├── test_openfda_unified.py
│   │   ├── test_pten_r173_search.py
│   │   ├── test_render.py
│   │   ├── test_request_batcher.py.disabled
│   │   ├── test_retry.py
│   │   ├── test_router.py
│   │   ├── test_shared_context.py.disabled
│   │   ├── test_unified_biothings.py
│   │   ├── thinking
│   │   │   ├── __init__.py
│   │   │   └── test_sequential.py
│   │   ├── trials
│   │   │   ├── test_backward_compatibility.py
│   │   │   ├── test_getter.py
│   │   │   └── test_search.py
│   │   ├── utils
│   │   │   ├── test_gene_validator.py
│   │   │   ├── test_mutation_filter.py
│   │   │   ├── test_rate_limiter.py
│   │   │   └── test_request_cache.py
│   │   ├── variants
│   │   │   ├── constants.py
│   │   │   ├── test_alphagenome_api_key.py
│   │   │   ├── test_alphagenome_comprehensive.py
│   │   │   ├── test_alphagenome.py
│   │   │   ├── test_cbioportal_mutations.py
│   │   │   ├── test_cbioportal_search.py
│   │   │   ├── test_external_integration.py
│   │   │   ├── test_external.py
│   │   │   ├── test_extract_gene_aa_change.py
│   │   │   ├── test_filters.py
│   │   │   ├── test_getter.py
│   │   │   ├── test_links.py
│   │   │   └── test_search.py
│   │   └── workers
│   │       └── test_worker_sanitization.js
│   └── test_pydantic_ai_integration.py
├── THIRD_PARTY_ENDPOINTS.md
├── tox.ini
├── uv.lock
└── wrangler.toml
```

# Files

--------------------------------------------------------------------------------
/tests/data/myvariant/myvariant_field_descriptions.csv:
--------------------------------------------------------------------------------

```
field,"description"
"cadd._license","License information URL for the CADD data source."
"cadd.alt","Alternate allele for the variant in CADD."
"cadd.anc","Ancestral allele according to CADD analysis."
"cadd.annotype","Annotation type (e.g., CodingTranscript) from CADD."
"cadd.bstatistic","B-statistic score from CADD, related to conservation."
"cadd.chmm.bivflnk","ChromHMM state score: Flanking Bivalent TSS/Enh."
"cadd.chmm.enh","ChromHMM state score: Active Enhancer."
"cadd.chmm.enhbiv","ChromHMM state score: Bivalent Enhancer."
"cadd.chmm.het","ChromHMM state score: Heterochromatin."
"cadd.chmm.quies","ChromHMM state score: Quiescent/Low activity."
"cadd.chmm.reprpc","ChromHMM state score: Repressed Polycomb."
"cadd.chmm.reprpcwk","ChromHMM state score: Weak Repressed Polycomb."
"cadd.chmm.tssa","ChromHMM state score: Active TSS."
"cadd.chmm.tssaflnk","ChromHMM state score: Flanking Active TSS."
"cadd.chmm.tssbiv","ChromHMM state score: Bivalent TSS."
"cadd.chmm.tx","ChromHMM state score: Strong transcription."
"cadd.chmm.txflnk","ChromHMM state score: Transcribed at gene 5' and 3'."
"cadd.chmm.txwk","ChromHMM state score: Weak transcription."
"cadd.chmm.znfrpts","ChromHMM state score: ZNF genes & repeats."
"cadd.chrom","Chromosome number for the variant in CADD."
"cadd.consdetail","Detailed consequence of the variant (e.g., missense) from CADD."
"cadd.consequence","General consequence category (e.g., NON_SYNONYMOUS) from CADD."
"cadd.consscore","Conservation score from CADD."
"cadd.cpg","Indicator if the variant is in a CpG island (0 or 1)."
"cadd.dna.helt","DNA physical property: Helix twist value."
"cadd.dna.mgw","DNA physical property: Minor groove width value."
"cadd.dna.prot","DNA physical property: Propeller twist value."
"cadd.dna.roll","DNA physical property: Roll value."
"cadd.encode.exp","ENCODE gene expression value."
"cadd.encode.h3k27ac","ENCODE histone modification H3K27ac signal value."
"cadd.encode.h3k4me1","ENCODE histone modification H3K4me1 signal value."
"cadd.encode.h3k4me3","ENCODE histone modification H3K4me3 signal value."
"cadd.encode.nucleo","ENCODE nucleosome occupancy signal value."
"cadd.exon","Exon number and total exons (e.g., 15/18)."
"cadd.fitcons","FitCons score indicating functional impact based on evolutionary data."
"cadd.gc","GC content in the surrounding region."
"cadd.gene.ccds_id","Consensus CDS (CCDS) identifier for the gene."
"cadd.gene.cds.cdna_pos","Position of the variant within the cDNA sequence."
"cadd.gene.cds.cds_pos","Position of the variant within the coding sequence (CDS)."
"cadd.gene.cds.rel_cdna_pos","Relative position within the cDNA sequence."
"cadd.gene.cds.rel_cds_pos","Relative position within the coding sequence (CDS)."
"cadd.gene.feature_id","Ensembl transcript identifier (ENST)."
"cadd.gene.gene_id","Ensembl gene identifier (ENSG)."
"cadd.gene.genename","Gene symbol (e.g., BRAF)."
"cadd.gene.prot.domain","Protein domain affected by the variant."
"cadd.gene.prot.protpos","Amino acid position within the protein."
"cadd.gene.prot.rel_prot_pos","Relative position within the protein sequence."
"cadd.gerp.n","GERP++ Neutral rate score."
"cadd.gerp.rs","GERP++ Rejected Substitutions score (conservation score)."
"cadd.gerp.rs_pval","P-value associated with the GERP++ RS score."
"cadd.gerp.s","GERP++ S score, equivalent to RS score."
"cadd.grantham","Grantham score measuring physicochemical difference between amino acids."
"cadd.isderived","Indicates if the alternate allele is derived (TRUE/FALSE)."
"cadd.isknownvariant","Indicates if the variant is known in dbSNP (TRUE/FALSE)."
"cadd.istv","Indicates if the variant is a transversion (TRUE/FALSE)."
"cadd.length","Length of the variant (0 for SNVs)."
"cadd.mapability.20bp","Mapability score based on 20bp reads."
"cadd.mapability.35bp","Mapability score based on 35bp reads."
"cadd.min_dist_tse","Minimum distance to the nearest transcription start site end (TSE)."
"cadd.min_dist_tss","Minimum distance to the nearest transcription start site (TSS)."
"cadd.mutindex","Mutation index score from CADD."
"cadd.naa","New amino acid resulting from the variant."
"cadd.oaa","Original amino acid affected by the variant."
hadd.phast_cons.mammalian,"PhastCons conservation score across mammals."
"cadd.phast_cons.primate","PhastCons conservation score across primates."
"cadd.phast_cons.vertebrate","PhastCons conservation score across vertebrates."
hadd.phred,"CADD PHRED-like scaled score indicating deleteriousness."
"cadd.phylop.mammalian","PhyloP conservation score across mammals."
"cadd.phylop.primate",PhyloP conservation score across primates.
"cadd.phylop.vertebrate","PhyloP conservation score across vertebrates."
"cadd.polyphen.cat","PolyPhen-2 qualitative prediction (e.g., probably_damaging)."
"cadd.polyphen.val","PolyPhen-2 quantitative score (0 to 1)."
"cadd.pos","Genomic position of the variant (hg19)."
"cadd.rawscore","CADD raw score before scaling."
"cadd.ref","Reference allele for the variant in CADD."
"cadd.segway","Segway annotation for the genomic region."
"cadd.sift.cat","SIFT qualitative prediction (e.g., deleterious)."
"cadd.sift.val","SIFT quantitative score (0 to 1)."
"cadd.type","Type of variant (e.g., SNV)."
"cgi._license","License information URL for the CGI data source."
"cgi.association","Drug association type (Responsive, Resistant) from CGI."
"cgi.cdna","cDNA change notation (e.g., c.1799T>A) from CGI."
"cgi.drug","Drug name associated with the variant from CGI."
"cgi.evidence_level","Level of evidence for the drug association from CGI."
"cgi.gene","Gene symbol associated with the variant from CGI."
"cgi.primary_tumor_type",Primary tumor type associated with the CGI entry.
"cgi.protein_change","Protein change notation (e.g., BRAF:V600E) from CGI."
"cgi.region","Genomic region description from CGI."
"cgi.source","Source identifier (e.g., PubMed ID, ASCO abstract) from CGI."
"cgi.transcript","Transcript identifier associated with the CGI entry."
"chrom","Chromosome number for the variant."
"civic._license","License information URL for the CIViC data source."
"civic.alleleRegistryId","Allele Registry ID associated with the variant in CIViC."
"civic.clinvarIds","List of associated ClinVar Variation IDs."
"civic.comments.totalCount","Total number of comments associated with the CIViC variant entry."
"civic.contributors.curators.lastActionDate","Timestamp of the last action by a CIViC curator."
"civic.contributors.curators.totalActionCount","Total number of actions performed by a CIViC curator."
"civic.contributors.curators.uniqueActions.action","Type of action performed by a CIViC curator (e.g., REVISION_SUGGESTED)."
"civic.contributors.curators.uniqueActions.count","Count of a specific unique action by a CIViC curator."
"civic.contributors.curators.user.id","User ID of the CIViC curator."
"civic.contributors.editors.lastActionDate","Timestamp of the last action by a CIViC editor."
"civic.contributors.editors.totalActionCount","Total number of actions performed by a CIViC editor."
"civic.contributors.editors.uniqueActions.action","Type of action performed by a CIViC editor (e.g., REVISION_ACCEPTED)."
"civic.contributors.editors.uniqueActions.count","Count of a specific unique action by a CIViC editor."
"civic.contributors.editors.user.id","User ID of the CIViC editor."
"civic.coordinates.chromosome","Chromosome for the variant according to CIViC coordinates."
"civic.coordinates.coordinateType","Type of coordinate system used in CIViC (e.g., GENE_VARIANT_COORDINATE)."
"civic.coordinates.ensemblVersion","Ensembl version used for CIViC coordinates."
"civic.coordinates.referenceBases","Reference bases for the variant in CIViC coordinates."
"civic.coordinates.referenceBuild","Reference genome build used for CIViC coordinates (e.g., GRCH37)."
"civic.coordinates.representativeTranscript","Representative transcript ID used for CIViC coordinates."
"civic.coordinates.start","Start position of the variant in CIViC coordinates."
"civic.coordinates.stop","Stop position of the variant in CIViC coordinates."
"civic.coordinates.variantBases","Variant bases for the variant in CIViC coordinates."
"civic.creationActivity.createdAt","Timestamp when the CIViC variant entry was created."
"civic.creationActivity.user.displayName","Display name of the user who created the CIViC entry."
"civic.creationActivity.user.id","User ID of the creator of the CIViC entry."
"civic.creationActivity.user.role","Role of the user who created the CIViC entry (e.g., ADMIN)."
"civic.deprecated","Boolean indicating if the CIViC variant entry is deprecated."
"civic.feature.deprecated","Boolean indicating if the associated CIViC feature (gene) is deprecated."
"civic.feature.flagged","Boolean indicating if the associated CIViC feature (gene) is flagged."
"civic.feature.id","Internal CIViC ID for the associated feature (gene)."
"civic.feature.link","URL link to the associated CIViC feature page."
"civic.feature.name","Name of the associated CIViC feature (gene symbol)."
"civic.flags.totalCount","Total number of flags associated with the CIViC variant entry."
"civic.hgvsDescriptions","List of HGVS descriptions for the variant from CIViC."
"civic.id","Internal CIViC ID for the variant."
"civic.lastAcceptedRevisionEvent.originatingUser.displayName","Display name of the user whose revision was last accepted."
"civic.lastAcceptedRevisionEvent.originatingUser.id","User ID of the user whose revision was last accepted."
"civic.lastAcceptedRevisionEvent.originatingUser.role","Role of the user whose revision was last accepted."
"civic.lastSubmittedRevisionEvent.originatingUser.displayName","Display name of the user who last submitted a revision."
"civic.lastSubmittedRevisionEvent.originatingUser.id","User ID of the user who last submitted a revision."
"civic.lastSubmittedRevisionEvent.originatingUser.role","Role of the user who last submitted a revision."
"civic.maneSelectTranscript","MANE Select transcript HGVS description from CIViC."
"civic.molecularProfiles.evidenceItems.description","Textual description of the evidence item in CIViC."
"civic.molecularProfiles.evidenceItems.disease.diseaseAliases","List of aliases for the associated disease in CIViC."
"civic.molecularProfiles.evidenceItems.disease.diseaseUrl","URL link to the disease ontology page."
"civic.molecularProfiles.evidenceItems.disease.displayName","Display name of the associated disease in CIViC."
"civic.molecularProfiles.evidenceItems.disease.doid","Disease Ontology ID (DOID) for the associated disease."
"civic.molecularProfiles.evidenceItems.disease.id","Internal CIViC ID for the associated disease."
"civic.molecularProfiles.evidenceItems.disease.link","URL link to the associated CIViC disease page."
"civic.molecularProfiles.evidenceItems.disease.myDiseaseInfo.doDef","Disease Ontology definition for the associated disease."
"civic.molecularProfiles.evidenceItems.disease.myDiseaseInfo.icd10","ICD-10 code(s) for the associated disease."
"civic.molecularProfiles.evidenceItems.disease.myDiseaseInfo.icdo","ICD-O code for the associated disease."
"civic.molecularProfiles.evidenceItems.disease.myDiseaseInfo.mesh","MeSH ID(s) for the associated disease."
"civic.molecularProfiles.evidenceItems.disease.myDiseaseInfo.mondoId","Mondo Disease Ontology ID for the associated disease."
"civic.molecularProfiles.evidenceItems.disease.myDiseaseInfo.ncit","NCI Thesaurus code(s) for the associated disease."
"civic.molecularProfiles.evidenceItems.disease.name","Name of the associated disease in CIViC."
"civic.molecularProfiles.evidenceItems.evidenceDirection","Direction of evidence (SUPPORTS, DOES_NOT_SUPPORT) in CIViC."
"civic.molecularProfiles.evidenceItems.evidenceLevel","Level of evidence (A, B, C, D, E) in CIViC."
"civic.molecularProfiles.evidenceItems.evidenceRating","Rating of the evidence (1-5 stars) in CIViC."
"civic.molecularProfiles.evidenceItems.evidenceType","Type of evidence (e.g., PREDICTIVE, DIAGNOSTIC) in CIViC."
"civic.molecularProfiles.evidenceItems.flagged","Boolean indicating if the CIViC evidence item is flagged."
"civic.molecularProfiles.evidenceItems.id","Internal CIViC ID for the evidence item."
"civic.molecularProfiles.evidenceItems.molecularProfile.id","Internal CIViC ID for the associated molecular profile."
"civic.molecularProfiles.evidenceItems.name","Name of the evidence item (e.g., EID79)."
"civic.molecularProfiles.evidenceItems.significance","Clinical significance of the evidence (e.g., SENSITIVITYRESPONSE, RESISTANCE) in CIViC."
"civic.molecularProfiles.evidenceItems.source.abstract","Abstract of the source publication from CIViC."
"civic.molecularProfiles.evidenceItems.source.authorString","Author list from the source publication."
"civic.molecularProfiles.evidenceItems.source.citation","Short citation format for the source publication."
"civic.molecularProfiles.evidenceItems.source.citationId","PubMed ID (PMID) or ASCO ID for the source."
"civic.molecularProfiles.evidenceItems.source.id","Internal CIViC ID for the source."
"civic.molecularProfiles.evidenceItems.source.journal","Journal name of the source publication."
"civic.molecularProfiles.evidenceItems.source.link","URL link to the associated CIViC source page."
"civic.molecularProfiles.evidenceItems.source.name","Formatted name of the source (e.g., PubMed: Howell et al., 2011)."
"civic.molecularProfiles.evidenceItems.source.openAccess","Boolean indicating if the source is open access."
"civic.molecularProfiles.evidenceItems.source.pmcId","PubMed Central ID (PMCID) if available."
"civic.molecularProfiles.evidenceItems.source.publicationDate","Publication date of the source."
"civic.molecularProfiles.evidenceItems.source.retracted","Boolean indicating if the source has been retracted."
"civic.molecularProfiles.evidenceItems.source.retractionDate","Date the source was retracted, if applicable."
"civic.molecularProfiles.evidenceItems.source.retractionNature","Nature of the retraction, if applicable."
"civic.molecularProfiles.evidenceItems.source.retractionReasons","Reason(s) for retraction, if applicable."
"civic.molecularProfiles.evidenceItems.source.sourceType","Type of source (e.g., PUBMED, ASCO)."
"civic.molecularProfiles.evidenceItems.source.sourceUrl","URL link to the original source."
"civic.molecularProfiles.evidenceItems.source.title","Title of the source publication."
"civic.molecularProfiles.evidenceItems.therapies.deprecated","Boolean indicating if the therapy entry is deprecated in CIViC."
"civic.molecularProfiles.evidenceItems.therapies.id","Internal CIViC ID for the therapy."
"civic.molecularProfiles.evidenceItems.therapies.link","URL link to the associated CIViC therapy page."
"civic.molecularProfiles.evidenceItems.therapies.name","Name of the therapy in CIViC."
"civic.molecularProfiles.evidenceItems.variantOrigin","Origin of the variant (SOMATIC, GERMLINE, NA) for the evidence item."
"civic.molecularProfiles.id","Internal CIViC ID for the molecular profile."
"civic.molecularProfiles.molecularProfileAliases","List of aliases for the molecular profile in CIViC."
"civic.molecularProfiles.molecularProfileScore","Score associated with the molecular profile in CIViC."
"civic.molecularProfiles.name","Name of the molecular profile in CIViC."
"civic.molecularProfiles.variants.id","Internal CIViC ID for the variant within the profile."
"civic.molecularProfiles.variants.link","URL link to the associated CIViC variant page."
"civic.molecularProfiles.variants.name","Name of the variant within the profile."
"civic.name","Name of the variant in CIViC (e.g., V600E)."
"civic.openCravatUrl","URL link to the OpenCRAVAT report for the variant."
"civic.openRevisionCount","Number of open revisions for the CIViC variant entry."
"civic.revisions.totalCount","Total number of revisions for the CIViC variant entry."
"civic.variantAliases","List of aliases for the variant in CIViC."
"civic.variantTypes.id","Internal CIViC ID for the variant type."
"civic.variantTypes.link","URL link to the associated CIViC variant type page."
"civic.variantTypes.name","Name of the variant type (e.g., Missense Variant)."
"civic.variantTypes.soid","Sequence Ontology ID for the variant type."
"clinvar._license","License information URL for the ClinVar data source."
"clinvar.allele_id","ClinVar Allele ID."
"clinvar.alt","Alternate allele in ClinVar."
"clinvar.chrom","Chromosome number in ClinVar."
"clinvar.cytogenic","Cytogenetic location (e.g., 7q34)."
"clinvar.gene.id","Entrez Gene ID associated with the ClinVar record."
"clinvar.gene.symbol","Gene symbol associated with the ClinVar record."
"clinvar.hg19.end","End position of the variant in hg19 assembly."
"clinvar.hg19.start","Start position of the variant in hg19 assembly."
"clinvar.hg38.end","End position of the variant in hg38 assembly."
"clinvar.hg38.start","Start position of the variant in hg38 assembly."
"clinvar.hgvs.coding","List of HGVS coding sequence notations."
"clinvar.hgvs.genomic","List of HGVS genomic sequence notations."
"clinvar.hgvs.protein","List of HGVS protein sequence notations."
"clinvar.omim","Associated Online Mendelian Inheritance in Man (OMIM) ID(s)."
"clinvar.rcv.accession","ClinVar RCV accession number (identifies a submitted interpretation)."
"clinvar.rcv.clinical_significance","Clinical significance assertion for the RCV record."
"clinvar.rcv.conditions.identifiers.human_phenotype_ontology","Associated Human Phenotype Ontology (HPO) IDs."
"clinvar.rcv.conditions.identifiers.medgen","Associated MedGen Concept Unique Identifier (CUI)."
"clinvar.rcv.conditions.identifiers.mesh","Associated Medical Subject Headings (MeSH) ID(s)."
"clinvar.rcv.conditions.identifiers.mondo","Associated Mondo Disease Ontology ID(s)."
"clinvar.rcv.conditions.identifiers.omim","Associated OMIM ID(s) for the condition."
"clinvar.rcv.conditions.identifiers.orphanet","Associated Orphanet ID(s)."
"clinvar.rcv.conditions.name","Name of the condition associated with the RCV record."
"clinvar.rcv.conditions.synonyms","Synonyms for the condition associated with the RCV record."
"clinvar.rcv.last_evaluated","Date the RCV record was last evaluated by the submitter."
"clinvar.rcv.number_submitters","Number of submitters for this interpretation."
"clinvar.rcv.origin","Origin of the allele (somatic, germline, etc.)."
"clinvar.rcv.preferred_name","Submitter's preferred name for the variant."
"clinvar.rcv.review_status","Review status of the ClinVar RCV record."
"clinvar.ref","Reference allele in ClinVar."
"clinvar.rsid","Associated dbSNP Reference SNP (rs) identifier."
"clinvar.type","Type of variant (e.g., single nucleotide variant)."
"clinvar.variant_id","ClinVar Variation ID."
"cosmic._license","License information URL for the COSMIC data source."
"cosmic.alt","Alternate allele in COSMIC (relative to reference)."
"cosmic.chrom","Chromosome number in COSMIC."
"cosmic.cosmic_id","COSMIC mutation identifier (e.g., COSM476)."
"cosmic.hg19.end","End position of the variant in hg19 assembly (COSMIC)."
"cosmic.hg19.start","Start position of the variant in hg19 assembly (COSMIC)."
"cosmic.mut_freq","Mutation frequency reported in COSMIC samples (?). UNKNOWN."
"cosmic.mut_nt","Nucleotide change reported in COSMIC (e.g., T>A)."
"cosmic.ref","Reference allele in COSMIC."
"cosmic.tumor_site","Primary tumor site where the mutation was observed in COSMIC."
"dbnsfp._license","License information URL for the dbNSFP data source."
"dbnsfp.aa.alt","Alternate amino acid predicted by dbNSFP."
"dbnsfp.aa.codon_degeneracy","Codon degeneracy value. UNKNOWN significance here."
"dbnsfp.aa.codonpos","Position within the codon (1, 2, or 3)."
"dbnsfp.aa.pos","Amino acid position in the protein sequence (for different transcripts)."
"dbnsfp.aa.ref","Reference amino acid from dbNSFP."
"dbnsfp.aa.refcodon","Reference codon sequence(s)."
"dbnsfp.alphamissense.pred","AlphaMissense prediction (Pathogenic/Benign) for different transcripts."
"dbnsfp.alphamissense.rankscore","AlphaMissense rank score (0-1, higher is more pathogenic)."
"dbnsfp.alphamissense.score","AlphaMissense raw score for different transcripts."
"dbnsfp.alt","Alternate allele in dbNSFP."
"dbnsfp.ancestral_allele","Predicted ancestral allele from dbNSFP."
"dbnsfp.appris","APPRIS annotation for the transcript (e.g., principal, alternative)."
"dbnsfp.bayesdel.add_af.pred","BayesDel prediction (Deleterious/Tolerated) incorporating allele frequency."
"dbnsfp.bayesdel.add_af.rankscore","BayesDel rank score incorporating allele frequency."
"dbnsfp.bayesdel.add_af.score","BayesDel score incorporating allele frequency."
"dbnsfp.bayesdel.no_af.pred","BayesDel prediction (Deleterious/Tolerated) without allele frequency."
"dbnsfp.bayesdel.no_af.rankscore","BayesDel rank score without allele frequency."
"dbnsfp.bayesdel.no_af.score","BayesDel score without allele frequency."
"dbnsfp.bstatistic.converted_rankscore","BStatistic converted rank score."
"dbnsfp.bstatistic.score","BStatistic raw score."
"dbnsfp.chrom","Chromosome number in dbNSFP."
"dbnsfp.clinpred.pred","ClinPred prediction (Deleterious/Benign)."
"dbnsfp.clinpred.rankscore","ClinPred rank score."
"dbnsfp.clinpred.score","ClinPred raw score."
"dbnsfp.clinvar.clinvar_id","Associated ClinVar Variation ID in dbNSFP."
"dbnsfp.clinvar.clnsig","ClinVar clinical significance assertions from dbNSFP."
"dbnsfp.clinvar.hgvs","HGVS genomic notation from ClinVar via dbNSFP."
"dbnsfp.clinvar.medgen","Associated MedGen CUIs from ClinVar via dbNSFP."
"dbnsfp.clinvar.omim","Associated OMIM IDs from ClinVar via dbNSFP."
"dbnsfp.clinvar.orphanet","Associated Orphanet IDs from ClinVar via dbNSFP."
"dbnsfp.clinvar.review","ClinVar review status from dbNSFP."
"dbnsfp.clinvar.trait","Associated traits/diseases from ClinVar via dbNSFP."
"dbnsfp.clinvar.var_source","Sources cited for the ClinVar entry via dbNSFP."
"dbnsfp.dann.rankscore","DANN rank score."
"dbnsfp.dann.score","DANN raw score for predicting deleteriousness."
"dbnsfp.deogen2.pred","DEOGEN2 prediction (Deleterious/Tolerated)."
"dbnsfp.deogen2.rankscore","DEOGEN2 rank score."
"dbnsfp.deogen2.score","DEOGEN2 raw score."
"dbnsfp.eigen.phred_coding","Eigen Phred-scaled score for coding variants."
"dbnsfp.eigen.raw_coding","Eigen raw score for coding variants."
"dbnsfp.eigen.raw_coding_rankscore","Eigen rank score for coding variants."
"dbnsfp.eigen-pc.phred_coding","Eigen-PC Phred-scaled score for coding variants (principal components)."
"dbnsfp.eigen-pc.raw_coding","Eigen-PC raw score for coding variants (principal components)."
"dbnsfp.eigen-pc.raw_coding_rankscore","Eigen-PC rank score for coding variants (principal components)."
"dbnsfp.ensembl.geneid","Ensembl Gene ID from dbNSFP."
"dbnsfp.ensembl.proteinid","Ensembl Protein ID from dbNSFP."
"dbnsfp.ensembl.transcriptid","Ensembl Transcript ID from dbNSFP."
"dbnsfp.esm1b.pred","ESM-1b prediction (Deleterious/Benign)."
"dbnsfp.esm1b.rankscore","ESM-1b rank score."
"dbnsfp.esm1b.score","ESM-1b raw score."
"dbnsfp.eve.class10_pred","EVE prediction class (Pathogenic/Benign/Uncertain) at 10% FDR."
"dbnsfp.eve.class20_pred","EVE prediction class at 20% FDR."
"dbnsfp.eve.class25_pred","EVE prediction class at 25% FDR."
"dbnsfp.eve.class30_pred","EVE prediction class at 30% FDR."
"dbnsfp.eve.class40_pred","EVE prediction class at 40% FDR."
"dbnsfp.eve.class50_pred","EVE prediction class at 50% FDR."
"dbnsfp.eve.class60_pred","EVE prediction class at 60% FDR."
"dbnsfp.eve.class70_pred","EVE prediction class at 70% FDR."
"dbnsfp.eve.class75_pred","EVE prediction class at 75% FDR."
"dbnsfp.eve.class80_pred","EVE prediction class at 80% FDR."
"dbnsfp.eve.class90_pred","EVE prediction class at 90% FDR."
"dbnsfp.eve.rankscore","EVE rank score."
"dbnsfp.eve.score","EVE raw score."
"dbnsfp.exac.ac","Allele count in ExAC database from dbNSFP."
"dbnsfp.exac.adj_ac","Adjusted allele count in ExAC from dbNSFP."
"dbnsfp.exac.adj_af","Adjusted allele frequency in ExAC from dbNSFP."
"dbnsfp.exac.af","Allele frequency in ExAC database from dbNSFP."
"dbnsfp.exac.afr.ac","Allele count in ExAC African population."
"dbnsfp.exac.afr.af","Allele frequency in ExAC African population."
"dbnsfp.exac.amr.ac","Allele count in ExAC American population."
"dbnsfp.exac.amr.af","Allele frequency in ExAC American population."
"dbnsfp.exac.eas.ac","Allele count in ExAC East Asian population."
"dbnsfp.exac.eas.af","Allele frequency in ExAC East Asian population."
"dbnsfp.exac.fin.ac","Allele count in ExAC Finnish population."
"dbnsfp.exac.fin.af","Allele frequency in ExAC Finnish population."
"dbnsfp.exac.nfe.ac","Allele count in ExAC Non-Finnish European population."
"dbnsfp.exac.nfe.af","Allele frequency in ExAC Non-Finnish European population."
"dbnsfp.exac.sas.ac","Allele count in ExAC South Asian population."
"dbnsfp.exac.sas.af","Allele frequency in ExAC South Asian population."
"dbnsfp.exac_nonpsych.ac","Allele count in ExAC non-psychiatric subset."
"dbnsfp.exac_nonpsych.adj_ac","Adjusted allele count in ExAC non-psychiatric subset."
"dbnsfp.exac_nonpsych.adj_af","Adjusted allele frequency in ExAC non-psychiatric subset."
"dbnsfp.exac_nonpsych.af","Allele frequency in ExAC non-psychiatric subset."
"dbnsfp.exac_nonpsych.afr.ac","Allele count in ExAC non-psych African population."
"dbnsfp.exac_nonpsych.afr.af","Allele frequency in ExAC non-psych African population."
"dbnsfp.exac_nonpsych.amr.ac","Allele count in ExAC non-psych American population."
"dbnsfp.exac_nonpsych.amr.af","Allele frequency in ExAC non-psych American population."
"dbnsfp.exac_nonpsych.eas.ac","Allele count in ExAC non-psych East Asian population."
"dbnsfp.exac_nonpsych.eas.af","Allele frequency in ExAC non-psych East Asian population."
"dbnsfp.exac_nonpsych.fin.ac","Allele count in ExAC non-psych Finnish population."
"dbnsfp.exac_nonpsych.fin.af","Allele frequency in ExAC non-psych Finnish population."
"dbnsfp.exac_nonpsych.nfe.ac","Allele count in ExAC non-psych Non-Finnish European population."
"dbnsfp.exac_nonpsych.nfe.af","Allele frequency in ExAC non-psych Non-Finnish European population."
"dbnsfp.exac_nonpsych.sas.ac","Allele count in ExAC non-psych South Asian population."
"dbnsfp.exac_nonpsych.sas.af","Allele frequency in ExAC non-psych South Asian population."
"dbnsfp.exac_nontcga.ac","Allele count in ExAC non-TCGA subset."
"dbnsfp.exac_nontcga.adj_ac","Adjusted allele count in ExAC non-TCGA subset."
"dbnsfp.exac_nontcga.adj_af","Adjusted allele frequency in ExAC non-TCGA subset."
"dbnsfp.exac_nontcga.af","Allele frequency in ExAC non-TCGA subset."
"dbnsfp.exac_nontcga.afr.ac","Allele count in ExAC non-TCGA African population."
"dbnsfp.exac_nontcga.afr.af","Allele frequency in ExAC non-TCGA African population."
"dbnsfp.exac_nontcga.amr.ac","Allele count in ExAC non-TCGA American population."
"dbnsfp.exac_nontcga.amr.af","Allele frequency in ExAC non-TCGA American population."
"dbnsfp.exac_nontcga.eas.ac","Allele count in ExAC non-TCGA East Asian population."
"dbnsfp.exac_nontcga.eas.af","Allele frequency in ExAC non-TCGA East Asian population."
"dbnsfp.exac_nontcga.fin.ac","Allele count in ExAC non-TCGA Finnish population."
"dbnsfp.exac_nontcga.fin.af","Allele frequency in ExAC non-TCGA Finnish population."
"dbnsfp.exac_nontcga.nfe.ac","Allele count in ExAC non-TCGA Non-Finnish European population."
"dbnsfp.exac_nontcga.nfe.af","Allele frequency in ExAC non-TCGA Non-Finnish European population."
"dbnsfp.exac_nontcga.sas.ac","Allele count in ExAC non-TCGA South Asian population."
"dbnsfp.exac_nontcga.sas.af","Allele frequency in ExAC non-TCGA South Asian population."
"dbnsfp.fathmm-mkl.coding_group","FATHMM-MKL coding group assignment."
"dbnsfp.fathmm-mkl.coding_pred","FATHMM-MKL prediction (Deleterious/Neutral) for coding variants."
"dbnsfp.fathmm-mkl.coding_rankscore","FATHMM-MKL rank score for coding variants."
"dbnsfp.fathmm-mkl.coding_score","FATHMM-MKL raw score for coding variants."
"dbnsfp.fathmm-xf.coding_pred","FATHMM-XF prediction (Deleterious/Neutral) for coding variants."
"dbnsfp.fathmm-xf.coding_rankscore","FATHMM-XF rank score for coding variants."
"dbnsfp.fathmm-xf.coding_score","FATHMM-XF raw score for coding variants."
"dbnsfp.fitcons.gm12878.confidence_value","FitCons confidence value in GM12878 cell line."
"dbnsfp.fitcons.gm12878.rankscore","FitCons rank score in GM12878 cell line."
"dbnsfp.fitcons.gm12878.score","FitCons raw score in GM12878 cell line."
"dbnsfp.fitcons.h1-hesc.confidence_value","FitCons confidence value in H1-hESC cell line."
"dbnsfp.fitcons.h1-hesc.rankscore","FitCons rank score in H1-hESC cell line."
"dbnsfp.fitcons.h1-hesc.score","FitCons raw score in H1-hESC cell line."
"dbnsfp.fitcons.huvec.confidence_value","FitCons confidence value in HUVEC cell line."
"dbnsfp.fitcons.huvec.rankscore","FitCons rank score in HUVEC cell line."
"dbnsfp.fitcons.huvec.score","FitCons raw score in HUVEC cell line."
"dbnsfp.fitcons.integrated.confidence_value","Integrated FitCons confidence value across cell lines."
"dbnsfp.fitcons.integrated.rankscore","Integrated FitCons rank score across cell lines."
"dbnsfp.fitcons.integrated.score","Integrated FitCons raw score across cell lines."
"dbnsfp.gencode_basic","Indicates if transcript is part of GENCODE basic set (Y/N)."
"dbnsfp.genename","Gene name(s) from dbNSFP."
"dbnsfp.genocanyon.rankscore","GenoCanyon rank score."
"dbnsfp.genocanyon.score","GenoCanyon raw score for functional prediction."
"dbnsfp.gerp++.nr","GERP++ Neutral Rate score."
"dbnsfp.gerp++.rs","GERP++ Rejected Substitutions score (conservation)."
"dbnsfp.gerp++.rs_rankscore","GERP++ RS rank score."
"dbnsfp.gmvp.rankscore","GMVP (Genome-Wide Missense Variant Pathogenicity) rank score."
"dbnsfp.gmvp.score","GMVP raw score."
"dbnsfp.hg18.end","End position in hg18 assembly."
"dbnsfp.hg18.start","Start position in hg18 assembly."
"dbnsfp.hg19.end","End position in hg19 assembly."
"dbnsfp.hg19.start","Start position in hg19 assembly."
"dbnsfp.hg38.end","End position in hg38 assembly."
"dbnsfp.hg38.start","Start position in hg38 assembly."
"dbnsfp.hgvsc","HGVS coding sequence notation(s) from dbNSFP."
"dbnsfp.hgvsp","HGVS protein sequence notation(s) from dbNSFP."
"dbnsfp.interpro.domain","InterPro protein domain annotation(s)."
"dbnsfp.list-s2.pred","LIST-S2 prediction (Tolerated/Damaging)."
"dbnsfp.list-s2.rankscore","LIST-S2 rank score."
"dbnsfp.list-s2.score","LIST-S2 raw score."
"dbnsfp.lrt.converted_rankscore","LRT converted rank score."
"dbnsfp.lrt.omega","LRT omega value (dN/dS ratio)."
"dbnsfp.lrt.pred","LRT prediction (Deleterious/Neutral/Unknown)."
"dbnsfp.lrt.score","LRT raw score (likelihood ratio test)."
"dbnsfp.m-cap.pred","M-CAP prediction (Deleterious/Tolerated)."
"dbnsfp.m-cap.rankscore","M-CAP rank score."
"dbnsfp.m-cap.score","M-CAP raw score."
"dbnsfp.metalr.pred","MetaLR prediction (Tolerated/Damaging)."
"dbnsfp.metalr.rankscore","MetaLR rank score."
"dbnsfp.metalr.score","MetaLR raw score."
"dbnsfp.metarnn.pred","MetaRNN prediction (Deleterious/Benign)."
"dbnsfp.metarnn.rankscore","MetaRNN rank score."
"dbnsfp.metarnn.score","MetaRNN raw score."
"dbnsfp.metasvm.pred","MetaSVM prediction (Tolerated/Damaging)."
"dbnsfp.metasvm.rankscore","MetaSVM rank score."
"dbnsfp.metasvm.score","MetaSVM raw score."
"dbnsfp.mpc.rankscore","MPC (Missense badness, PolyPhen-2, and Constraint) rank score."
"dbnsfp.mpc.score","MPC raw score."
"dbnsfp.mutationassessor.pred","MutationAssessor prediction (high/medium/low/neutral functional impact)."
"dbnsfp.mutationassessor.rankscore","MutationAssessor rank score."
"dbnsfp.mutationassessor.score","MutationAssessor raw score (functional impact score)."
"dbnsfp.mutationtaster.aae","Amino acid change predicted by MutationTaster."
"dbnsfp.mutationtaster.converted_rankscore","MutationTaster converted rank score."
"dbnsfp.mutationtaster.model","MutationTaster model used for prediction."
"dbnsfp.mutationtaster.pred","MutationTaster prediction (disease_causing_automatic/polymorphism_automatic)."
"dbnsfp.mutationtaster.score","MutationTaster raw score (probability of being deleterious)."
"dbnsfp.mutformer.rankscore","MutFormer rank score."
"dbnsfp.mutformer.score","MutFormer raw score."
"dbnsfp.mutpred.aa_change","Amino acid change considered by MutPred."
"dbnsfp.mutpred.accession","UniProt accession used by MutPred."
"dbnsfp.mutpred.pred.mechanism","Molecular mechanism predicted by MutPred to be affected."
"dbnsfp.mutpred.pred.p_val","P-value associated with the MutPred mechanism prediction."
"dbnsfp.mutpred.rankscore","MutPred rank score."
"dbnsfp.mutpred.score","MutPred raw score (probability of being deleterious)."
"dbnsfp.mvp.rankscore","MVP (Missense Variant Pathogenicity) rank score."
"dbnsfp.mvp.score","MVP raw score."
"dbnsfp.phactboost.rankscore","phACTboost rank score."
"dbnsfp.phactboost.score","phACTboost raw score."
"dbnsfp.phastcons.100way_vertebrate.rankscore","PhastCons 100-way vertebrate conservation rank score."
"dbnsfp.phastcons.100way_vertebrate.score","PhastCons 100-way vertebrate conservation score."
"dbnsfp.phastcons.17way_primate.rankscore","PhastCons 17-way primate conservation rank score."
"dbnsfp.phastcons.17way_primate.score","PhastCons 17-way primate conservation score."
"dbnsfp.phastcons.470way_mammalian.rankscore","PhastCons 470-way mammalian conservation rank score."
"dbnsfp.phastcons.470way_mammalian.score","PhastCons 470-way mammalian conservation score."
"dbnsfp.phylop.100way_vertebrate.rankscore","PhyloP 100-way vertebrate conservation rank score."
"dbnsfp.phylop.100way_vertebrate.score","PhyloP 100-way vertebrate conservation score."
"dbnsfp.phylop.17way_primate.rankscore","PhyloP 17-way primate conservation rank score."
"dbnsfp.phylop.17way_primate.score","PhyloP 17-way primate conservation score."
"dbnsfp.phylop.470way_mammalian.rankscore","PhyloP 470-way mammalian conservation rank score."
"dbnsfp.phylop.470way_mammalian.score","PhyloP 470-way mammalian conservation score."
"dbnsfp.polyphen2.hdiv.pred","PolyPhen-2 HDIV prediction (Probably_damaging/Possibly_damaging/Benign)."
"dbnsfp.polyphen2.hdiv.rankscore","PolyPhen-2 HDIV rank score."
"dbnsfp.polyphen2.hdiv.score","PolyPhen-2 HDIV raw score."
"dbnsfp.polyphen2.hvar.pred","PolyPhen-2 HVAR prediction (Probably_damaging/Possibly_damaging/Benign)."
"dbnsfp.polyphen2.hvar.rankscore","PolyPhen-2 HVAR rank score."
"dbnsfp.polyphen2.hvar.score","PolyPhen-2 HVAR raw score."
"dbnsfp.primateai.pred","PrimateAI prediction (Deleterious/Tolerated)."
"dbnsfp.primateai.rankscore","PrimateAI rank score."
"dbnsfp.primateai.score","PrimateAI raw score."
"dbnsfp.ref","Reference allele in dbNSFP."
"dbnsfp.reliability_index","dbNSFP internal reliability index. UNKNOWN significance."
"dbnsfp.revel.rankscore","REVEL (Rare Exome Variant Ensemble Learner) rank score."
"dbnsfp.revel.score","REVEL raw score."
"dbnsfp.rsid","Associated dbSNP rsID from dbNSFP."
"dbnsfp.siphy_29way.logodds_rankscore","SiPhy 29-way log-odds rank score."
"dbnsfp.siphy_29way.logodds_score","SiPhy 29-way log-odds conservation score."
"dbnsfp.siphy_29way.pi.a","SiPhy estimated probability of A at this position."
"dbnsfp.siphy_29way.pi.c","SiPhy estimated probability of C at this position."
"dbnsfp.siphy_29way.pi.g","SiPhy estimated probability of G at this position."
"dbnsfp.siphy_29way.pi.t","SiPhy estimated probability of T at this position."
"dbnsfp.tsl","Transcript Support Level from Ensembl."
"dbnsfp.uniprot.acc","UniProt accession number(s)."
"dbnsfp.uniprot.entry","UniProt entry name(s)."
"dbnsfp.varity.er.rankscore","VARITY_ER (evidence-based ranking) rank score."
"dbnsfp.varity.er.score","VARITY_ER raw score."
"dbnsfp.varity.er_loo.rankscore","VARITY_ER_LOO (leave-one-out) rank score."
"dbnsfp.varity.er_loo.score","VARITY_ER_LOO raw score."
"dbnsfp.varity.r.rankscore","VARITY_R (rule-based ranking) rank score."
"dbnsfp.varity.r.score","VARITY_R raw score."
"dbnsfp.varity.r_loo.rankscore","VARITY_R_LOO (leave-one-out) rank score."
"dbnsfp.varity.r_loo.score","VARITY_R_LOO raw score."
"dbnsfp.vep_canonical","Indicates if the transcript is the VEP canonical transcript (YES/NO)."
"dbsnp._license","License information URL for the dbSNP data source."
"dbsnp.alleles.allele","Allele base (A, C, G, or T)."
"dbsnp.alleles.freq.exac","Allele frequency in ExAC as reported by dbSNP."
"dbsnp.alleles.freq.gnomad_exomes","Allele frequency in gnomAD exomes as reported by dbSNP."
"dbsnp.alt","Alternate allele(s) in dbSNP."
"dbsnp.chrom","Chromosome number in dbSNP."
"dbsnp.citations","List of PubMed IDs citing this dbSNP entry."
"dbsnp.dbsnp_build","dbSNP build number when the information was extracted."
"dbsnp.gene.geneid","Entrez Gene ID associated with the dbSNP record."
"dbsnp.gene.is_pseudo","Boolean indicating if the associated gene is a pseudogene."
"dbsnp.gene.name","Full name of the associated gene."
"dbsnp.gene.rnas.codon_aligned_transcript_change.deleted_sequence","Deleted sequence in codon-aligned transcript context."
"dbsnp.gene.rnas.codon_aligned_transcript_change.inserted_sequence","Inserted sequence in codon-aligned transcript context."
"dbsnp.gene.rnas.codon_aligned_transcript_change.position","Position of change in codon-aligned transcript context."
"dbsnp.gene.rnas.codon_aligned_transcript_change.seq_id","Sequence ID for codon-aligned transcript context."
"dbsnp.gene.rnas.hgvs","HGVS notation for the specific RNA transcript."
"dbsnp.gene.rnas.protein.variant.spdi.deleted_sequence","Deleted sequence in SPDI protein context."
"dbsnp.gene.rnas.protein.variant.spdi.inserted_sequence","Inserted sequence in SPDI protein context."
"dbsnp.gene.rnas.protein.variant.spdi.position","Position of change in SPDI protein context."
"dbsnp.gene.rnas.protein.variant.spdi.seq_id","Sequence ID for SPDI protein context."
"dbsnp.gene.rnas.protein_product.refseq","RefSeq protein product identifier (NP_)."
"dbsnp.gene.rnas.refseq","RefSeq RNA transcript identifier (NM_ or XM_)."
"dbsnp.gene.rnas.so.accession","Sequence Ontology term accession (SO:...)."
"dbsnp.gene.rnas.so.name","Sequence Ontology term name (e.g., coding_sequence_variant)."
"dbsnp.gene.strand","Gene strand (+ or -)."
"dbsnp.gene.symbol","Gene symbol (e.g., BRAF)."
"dbsnp.hg19.end","End position in hg19 assembly (dbSNP)."
"dbsnp.hg19.start","Start position in hg19 assembly (dbSNP)."
"dbsnp.ref","Reference allele in dbSNP."
"dbsnp.rsid","dbSNP Reference SNP (rs) identifier."
"dbsnp.vartype","Type of variation (e.g., snv)."
"docm.aa_change","Amino acid change notation (e.g., p.V600E) from DOCM."
"docm.all_domains","All protein domains overlapping the variant position from DOCM."
"docm.alt","Alternate allele in DOCM."
"docm.c_position","cDNA position notation (e.g., c.1799) from DOCM."
"docm.chrom","Chromosome number in DOCM."
"docm.default_gene_name","Default gene name used in DOCM."
"docm.deletion_substructures","Substructure information for deletions (often '-'). UNKNOWN."
"docm.disease","Disease associated with the variant in DOCM."
"docm.doid","Disease Ontology ID (DOID) associated with the variant in DOCM."
"docm.domain","Specific protein domain containing the variant from DOCM."
"docm.ensembl_gene_id","Ensembl gene ID from DOCM."
"docm.genename","Gene name from DOCM."
"docm.genename_source","Source of the gene name (e.g., HGNC) in DOCM."
"docm.hg19.end","End position in hg19 assembly (DOCM)."
"docm.hg19.start","Start position in hg19 assembly (DOCM)."
"docm.primary","Indicates if this is the primary transcript used (?). UNKNOWN."
"docm.pubmed_id","Associated PubMed IDs from DOCM."
"docm.ref","Reference allele in DOCM."
"docm.source","Original data source cited by DOCM (e.g., MyCancerGenome)."
"docm.strand","Genomic strand (+ or -) in DOCM."
"docm.transcript_error","Indicates errors found during transcript mapping in DOCM."
"docm.transcript_name","Transcript name used for annotation in DOCM."
"docm.transcript_source","Source of the transcript information (e.g., ensembl) in DOCM."
"docm.transcript_species","Species of the transcript (e.g., human) in DOCM."
"docm.transcript_status","Status of the transcript (e.g., known) in DOCM."
"docm.transcript_version","Version of the transcript used in DOCM."
"docm.trv_type","Type of transcript variation (e.g., missense) in DOCM."
"docm.type","Type of variant (e.g., SNP) in DOCM."
"docm.ucsc_cons","UCSC conservation score (?). UNKNOWN."
"docm.url","URL link to the source entry in DOCM."
"emv._license","License information URL for the EMV data source."
"emv.egl_classification","EGL classification of the variant (e.g., Pathogenic)."
"emv.egl_classification_date","Date of the EGL classification."
"emv.egl_protein","Protein change notation used by EGL."
"emv.egl_variant","Variant notation used by EGL (often HGVS coding)."
"emv.exon","Exon number containing the variant from EMV."
"emv.gene","Gene symbol from EMV."
"emv.hgvs","List of HGVS notations associated with the variant in EMV."
"emv.variant_id","Internal EMV variant identifier."
"exac._license","License information URL for the ExAC data source."
"exac.ac.ac","Total allele count in ExAC."
"exac.ac.ac_adj","Adjusted total allele count in ExAC (after filtering)."
"exac.ac.ac_afr","Allele count in ExAC African/African American population."
"exac.ac.ac_amr","Allele count in ExAC American population."
"exac.ac.ac_eas","Allele count in ExAC East Asian population."
"exac.ac.ac_female","Allele count in ExAC female population."
"exac.ac.ac_fin","Allele count in ExAC Finnish population."
"exac.ac.ac_het","Heterozygous allele count in ExAC."
"exac.ac.ac_hom","Homozygous allele count in ExAC."
"exac.ac.ac_male","Allele count in ExAC male population."
"exac.ac.ac_nfe","Allele count in ExAC Non-Finnish European population."
"exac.ac.ac_oth","Allele count in ExAC Other population."
"exac.ac.ac_sas","Allele count in ExAC South Asian population."
"exac.af","Allele frequency in ExAC."
"exac.alleles","Alternate allele(s) observed in ExAC."
"exac.alt","Alternate allele in ExAC format."
"exac.an.an","Total number of alleles genotyped in ExAC."
"exac.an.an_adj","Adjusted total number of alleles in ExAC (after filtering)."
"exac.an.an_afr","Number of alleles in ExAC African/African American population."
"exac.an.an_amr","Number of alleles in ExAC American population."
"exac.an.an_eas","Number of alleles in ExAC East Asian population."
"exac.an.an_female","Number of alleles in ExAC female population."
"exac.an.an_fin","Number of alleles in ExAC Finnish population."
"exac.an.an_male","Number of alleles in ExAC male population."
"exac.an.an_nfe","Number of alleles in ExAC Non-Finnish European population."
"exac.an.an_oth","Number of alleles in ExAC Other population."
"exac.an.an_sas","Number of alleles in ExAC South Asian population."
"exac.baseqranksum","ExAC BaseQRankSum test statistic (mapping quality difference)."
"exac.chrom","Chromosome number in ExAC."
"exac.clippingranksum","ExAC ClippingRankSum test statistic."
"exac.culprit","ExAC VQSR culprit annotation."
"exac.fs","ExAC FisherStrand bias score."
"exac.het.het_afr","Heterozygous count in ExAC African/African American population."
"exac.het.het_amr","Heterozygous count in ExAC American population."
"exac.het.het_eas","Heterozygous count in ExAC East Asian population."
"exac.het.het_fin","Heterozygous count in ExAC Finnish population."
"exac.het.het_nfe","Heterozygous count in ExAC Non-Finnish European population."
"exac.het.het_oth","Heterozygous count in ExAC Other population."
"exac.het.het_sas","Heterozygous count in ExAC South Asian population."
"exac.hom.hom_afr","Homozygous count in ExAC African/African American population."
"exac.hom.hom_amr","Homozygous count in ExAC American population."
"exac.hom.hom_eas","Homozygous count in ExAC East Asian population."
"exac.hom.hom_fin","Homozygous count in ExAC Finnish population."
"exac.hom.hom_nfe","Homozygous count in ExAC Non-Finnish European population."
"exac.hom.hom_oth","Homozygous count in ExAC Other population."
"exac.hom.hom_sas","Homozygous count in ExAC South Asian population."
"exac.inbreedingcoeff","ExAC Inbreeding Coefficient."
"exac.mq.mq","ExAC root mean square Mapping Quality."
"exac.mq.mq0","ExAC count of reads with mapping quality 0."
"exac.mq.mqranksum","ExAC MQRankSum test statistic (mapping quality difference ref vs alt)."
"exac.ncc","ExAC number of chromosomes carrying the variant in hemizygous state. UNKNOWN."
"exac.pos","Genomic position in ExAC (hg19)."
"exac.qd","ExAC Quality by Depth score."
"exac.readposranksum","ExAC ReadPosRankSum test statistic (position bias)."
"exac.ref","Reference allele in ExAC format."
"exac.type","Variant type in ExAC (e.g., snp)."
"exac.vqslod","ExAC Variant Quality Score Log-Odds."
"exac_nontcga._license","License information URL for the ExAC non-TCGA data source."
"exac_nontcga.ac.ac","Total allele count in ExAC non-TCGA subset."
"exac_nontcga.ac.ac_adj","Adjusted total allele count in ExAC non-TCGA subset."
"exac_nontcga.ac.ac_afr","Allele count in ExAC non-TCGA African/African American population."
"exac_nontcga.ac.ac_amr","Allele count in ExAC non-TCGA American population."
"exac_nontcga.ac.ac_eas","Allele count in ExAC non-TCGA East Asian population."
"exac_nontcga.ac.ac_female","Allele count in ExAC non-TCGA female population."
"exac_nontcga.ac.ac_fin","Allele count in ExAC non-TCGA Finnish population."
"exac_nontcga.ac.ac_het","Heterozygous allele count in ExAC non-TCGA subset."
"exac_nontcga.ac.ac_hom","Homozygous allele count in ExAC non-TCGA subset."
"exac_nontcga.ac.ac_male","Allele count in ExAC non-TCGA male population."
"exac_nontcga.ac.ac_nfe","Allele count in ExAC non-TCGA Non-Finnish European population."
"exac_nontcga.ac.ac_oth","Allele count in ExAC non-TCGA Other population."
"exac_nontcga.ac.ac_sas","Allele count in ExAC non-TCGA South Asian population."
"exac_nontcga.af","Allele frequency in ExAC non-TCGA subset."
"exac_nontcga.alleles","Alternate allele(s) observed in ExAC non-TCGA subset."
"exac_nontcga.alt","Alternate allele in ExAC non-TCGA format."
"exac_nontcga.an.an","Total number of alleles genotyped in ExAC non-TCGA subset."
"exac_nontcga.an.an_adj","Adjusted total number of alleles in ExAC non-TCGA subset."
"exac_nontcga.an.an_afr","Number of alleles in ExAC non-TCGA African/African American population."
"exac_nontcga.an.an_amr","Number of alleles in ExAC non-TCGA American population."
"exac_nontcga.an.an_eas","Number of alleles in ExAC non-TCGA East Asian population."
"exac_nontcga.an.an_female","Number of alleles in ExAC non-TCGA female population."
"exac_nontcga.an.an_fin","Number of alleles in ExAC non-TCGA Finnish population."
"exac_nontcga.an.an_male","Number of alleles in ExAC non-TCGA male population."
"exac_nontcga.an.an_nfe","Number of alleles in ExAC non-TCGA Non-Finnish European population."
"exac_nontcga.an.an_oth","Number of alleles in ExAC non-TCGA Other population."
"exac_nontcga.an.an_sas","Number of alleles in ExAC non-TCGA South Asian population."
"exac_nontcga.baseqranksum","ExAC non-TCGA BaseQRankSum test statistic."
"exac_nontcga.chrom","Chromosome number in ExAC non-TCGA subset."
"exac_nontcga.clippingranksum","ExAC non-TCGA ClippingRankSum test statistic."
"exac_nontcga.culprit","ExAC non-TCGA VQSR culprit annotation."
"exac_nontcga.fs","ExAC non-TCGA FisherStrand bias score."
"exac_nontcga.het.het_afr","Heterozygous count in ExAC non-TCGA African/African American population."
"exac_nontcga.het.het_amr","Heterozygous count in ExAC non-TCGA American population."
"exac_nontcga.het.het_eas","Heterozygous count in ExAC non-TCGA East Asian population."
"exac_nontcga.het.het_fin","Heterozygous count in ExAC non-TCGA Finnish population."
"exac_nontcga.het.het_nfe","Heterozygous count in ExAC non-TCGA Non-Finnish European population."
"exac_nontcga.het.het_oth","Heterozygous count in ExAC non-TCGA Other population."
"exac_nontcga.het.het_sas","Heterozygous count in ExAC non-TCGA South Asian population."
"exac_nontcga.hom.hom_afr","Homozygous count in ExAC non-TCGA African/African American population."
"exac_nontcga.hom.hom_amr","Homozygous count in ExAC non-TCGA American population."
"exac_nontcga.hom.hom_eas","Homozygous count in ExAC non-TCGA East Asian population."
"exac_nontcga.hom.hom_fin","Homozygous count in ExAC non-TCGA Finnish population."
"exac_nontcga.hom.hom_nfe","Homozygous count in ExAC non-TCGA Non-Finnish European population."
"exac_nontcga.hom.hom_oth","Homozygous count in ExAC non-TCGA Other population."
"exac_nontcga.hom.hom_sas","Homozygous count in ExAC non-TCGA South Asian population."
"exac_nontcga.inbreedingcoeff","ExAC non-TCGA Inbreeding Coefficient."
"exac_nontcga.mq.mq","ExAC non-TCGA root mean square Mapping Quality."
"exac_nontcga.mq.mq0","ExAC non-TCGA count of reads with mapping quality 0."
"exac_nontcga.mq.mqranksum","ExAC non-TCGA MQRankSum test statistic."
"exac_nontcga.ncc","ExAC non-TCGA number of hemizygous chromosomes. UNKNOWN."
"exac_nontcga.pos","Genomic position in ExAC non-TCGA (hg19)."
"exac_nontcga.qd","ExAC non-TCGA Quality by Depth score."
"exac_nontcga.readposranksum","ExAC non-TCGA ReadPosRankSum test statistic."
"exac_nontcga.ref","Reference allele in ExAC non-TCGA format."
"exac_nontcga.type","Variant type in ExAC non-TCGA (e.g., snp)."
"exac_nontcga.vqslod","ExAC non-TCGA Variant Quality Score Log-Odds."
"gnomad_exome._license","License information URL for the gnomAD exome data source."
"gnomad_exome.ac.ac","Total allele count in gnomAD exomes."
"gnomad_exome.ac.ac_afr","Allele count in gnomAD exomes African/African American population."
"gnomad_exome.ac.ac_afr_female","Allele count in gnomAD exomes African/African American female population."
"gnomad_exome.ac.ac_afr_male","Allele count in gnomAD exomes African/African American male population."
"gnomad_exome.ac.ac_amr","Allele count in gnomAD exomes American population."
"gnomad_exome.ac.ac_amr_female","Allele count in gnomAD exomes American female population."
"gnomad_exome.ac.ac_amr_male","Allele count in gnomAD exomes American male population."
"gnomad_exome.ac.ac_asj","Allele count in gnomAD exomes Ashkenazi Jewish population."
"gnomad_exome.ac.ac_asj_female","Allele count in gnomAD exomes Ashkenazi Jewish female population."
"gnomad_exome.ac.ac_asj_male","Allele count in gnomAD exomes Ashkenazi Jewish male population."
"gnomad_exome.ac.ac_eas","Allele count in gnomAD exomes East Asian population."
"gnomad_exome.ac.ac_eas_female","Allele count in gnomAD exomes East Asian female population."
"gnomad_exome.ac.ac_eas_jpn","Allele count in gnomAD exomes East Asian Japanese population."
"gnomad_exome.ac.ac_eas_kor","Allele count in gnomAD exomes East Asian Korean population."
"gnomad_exome.ac.ac_eas_male","Allele count in gnomAD exomes East Asian male population."
"gnomad_exome.ac.ac_eas_oea","Allele count in gnomAD exomes East Asian Other population."
"gnomad_exome.ac.ac_female","Total allele count in gnomAD exomes female population."
"gnomad_exome.ac.ac_fin","Allele count in gnomAD exomes Finnish population."
"gnomad_exome.ac.ac_fin_female","Allele count in gnomAD exomes Finnish female population."
"gnomad_exome.ac.ac_fin_male","Allele count in gnomAD exomes Finnish male population."
"gnomad_exome.ac.ac_male","Total allele count in gnomAD exomes male population."
"gnomad_exome.ac.ac_nfe","Allele count in gnomAD exomes Non-Finnish European population."
"gnomad_exome.ac.ac_nfe_bgr","Allele count in gnomAD exomes NFE Bulgarian population."
"gnomad_exome.ac.ac_nfe_est","Allele count in gnomAD exomes NFE Estonian population."
"gnomad_exome.ac.ac_nfe_female","Allele count in gnomAD exomes NFE female population."
"gnomad_exome.ac.ac_nfe_male","Allele count in gnomAD exomes NFE male population."
"gnomad_exome.ac.ac_nfe_nwe","Allele count in gnomAD exomes NFE North-Western European population."
"gnomad_exome.ac.ac_nfe_onf","Allele count in gnomAD exomes NFE Other Non-Finnish European population."
"gnomad_exome.ac.ac_nfe_seu","Allele count in gnomAD exomes NFE Southern European population."
"gnomad_exome.ac.ac_nfe_swe","Allele count in gnomAD exomes NFE Swedish population."
"gnomad_exome.ac.ac_oth","Allele count in gnomAD exomes Other population."
"gnomad_exome.ac.ac_oth_female","Allele count in gnomAD exomes Other female population."
"gnomad_exome.ac.ac_oth_male","Allele count in gnomAD exomes Other male population."
"gnomad_exome.ac.ac_sas","Allele count in gnomAD exomes South Asian population."
"gnomad_exome.ac.ac_sas_female","Allele count in gnomAD exomes South Asian female population."
"gnomad_exome.ac.ac_sas_male","Allele count in gnomAD exomes South Asian male population."
"gnomad_exome.af.af","Overall allele frequency in gnomAD exomes."
"gnomad_exome.af.af_afr","Allele frequency in gnomAD exomes African/African American population."
"gnomad_exome.af.af_afr_female","Allele frequency in gnomAD exomes African/African American female population."
"gnomad_exome.af.af_afr_male","Allele frequency in gnomAD exomes African/African American male population."
"gnomad_exome.af.af_amr","Allele frequency in gnomAD exomes American population."
"gnomad_exome.af.af_amr_female","Allele frequency in gnomAD exomes American female population."
"gnomad_exome.af.af_amr_male","Allele frequency in gnomAD exomes American male population."
"gnomad_exome.af.af_asj","Allele frequency in gnomAD exomes Ashkenazi Jewish population."
"gnomad_exome.af.af_asj_female","Allele frequency in gnomAD exomes Ashkenazi Jewish female population."
"gnomad_exome.af.af_asj_male","Allele frequency in gnomAD exomes Ashkenazi Jewish male population."
"gnomad_exome.af.af_eas","Allele frequency in gnomAD exomes East Asian population."
"gnomad_exome.af.af_eas_female","Allele frequency in gnomAD exomes East Asian female population."
"gnomad_exome.af.af_eas_jpn","Allele frequency in gnomAD exomes East Asian Japanese population."
"gnomad_exome.af.af_eas_kor","Allele frequency in gnomAD exomes East Asian Korean population."
"gnomad_exome.af.af_eas_male","Allele frequency in gnomAD exomes East Asian male population."
"gnomad_exome.af.af_eas_oea","Allele frequency in gnomAD exomes East Asian Other population."
"gnomad_exome.af.af_female","Overall allele frequency in gnomAD exomes female population."
"gnomad_exome.af.af_fin","Allele frequency in gnomAD exomes Finnish population."
"gnomad_exome.af.af_fin_female","Allele frequency in gnomAD exomes Finnish female population."
"gnomad_exome.af.af_fin_male","Allele frequency in gnomAD exomes Finnish male population."
"gnomad_exome.af.af_male","Overall allele frequency in gnomAD exomes male population."
"gnomad_exome.af.af_nfe","Allele frequency in gnomAD exomes Non-Finnish European population."
"gnomad_exome.af.af_nfe_bgr","Allele frequency in gnomAD exomes NFE Bulgarian population."
"gnomad_exome.af.af_nfe_est","Allele frequency in gnomAD exomes NFE Estonian population."
"gnomad_exome.af.af_nfe_female","Allele frequency in gnomAD exomes NFE female population."
"gnomad_exome.af.af_nfe_male","Allele frequency in gnomAD exomes NFE male population."
"gnomad_exome.af.af_nfe_nwe","Allele frequency in gnomAD exomes NFE North-Western European population."
"gnomad_exome.af.af_nfe_onf","Allele frequency in gnomAD exomes NFE Other Non-Finnish European population."
"gnomad_exome.af.af_nfe_seu","Allele frequency in gnomAD exomes NFE Southern European population."
"gnomad_exome.af.af_nfe_swe","Allele frequency in gnomAD exomes NFE Swedish population."
"gnomad_exome.af.af_oth","Allele frequency in gnomAD exomes Other population."
"gnomad_exome.af.af_oth_female","Allele frequency in gnomAD exomes Other female population."
"gnomad_exome.af.af_oth_male","Allele frequency in gnomAD exomes Other male population."
"gnomad_exome.af.af_sas","Allele frequency in gnomAD exomes South Asian population."
"gnomad_exome.af.af_sas_female","Allele frequency in gnomAD exomes South Asian female population."
"gnomad_exome.af.af_sas_male","Allele frequency in gnomAD exomes South Asian male population."
"gnomad_exome.alleles","Alternate allele(s) observed in gnomAD exomes."
"gnomad_exome.alt","Alternate allele in gnomAD exome format."
"gnomad_exome.an.an","Total number of alleles genotyped in gnomAD exomes."
"gnomad_exome.an.an_afr","Number of alleles in gnomAD exomes African/African American population."
"gnomad_exome.an.an_afr_female","Number of alleles in gnomAD exomes African/African American female population."
"gnomad_exome.an.an_afr_male","Number of alleles in gnomAD exomes African/African American male population."
"gnomad_exome.an.an_amr","Number of alleles in gnomAD exomes American population."
"gnomad_exome.an.an_amr_female","Number of alleles in gnomAD exomes American female population."
"gnomad_exome.an.an_amr_male","Number of alleles in gnomAD exomes American male population."
"gnomad_exome.an.an_asj","Number of alleles in gnomAD exomes Ashkenazi Jewish population."
"gnomad_exome.an.an_asj_female","Number of alleles in gnomAD exomes Ashkenazi Jewish female population."
"gnomad_exome.an.an_asj_male","Number of alleles in gnomAD exomes Ashkenazi Jewish male population."
"gnomad_exome.an.an_eas","Number of alleles in gnomAD exomes East Asian population."
"gnomad_exome.an.an_eas_female","Number of alleles in gnomAD exomes East Asian female population."
"gnomad_exome.an.an_eas_jpn","Number of alleles in gnomAD exomes East Asian Japanese population."
"gnomad_exome.an.an_eas_kor","Number of alleles in gnomAD exomes East Asian Korean population."
"gnomad_exome.an.an_eas_male","Number of alleles in gnomAD exomes East Asian male population."
"gnomad_exome.an.an_eas_oea","Number of alleles in gnomAD exomes East Asian Other population."
"gnomad_exome.an.an_female","Total number of alleles in gnomAD exomes female population."
"gnomad_exome.an.an_fin","Number of alleles in gnomAD exomes Finnish population."
"gnomad_exome.an.an_fin_female","Number of alleles in gnomAD exomes Finnish female population."
"gnomad_exome.an.an_fin_male","Number of alleles in gnomAD exomes Finnish male population."
"gnomad_exome.an.an_male","Total number of alleles in gnomAD exomes male population."
"gnomad_exome.an.an_nfe","Number of alleles in gnomAD exomes Non-Finnish European population."
"gnomad_exome.an.an_nfe_bgr","Number of alleles in gnomAD exomes NFE Bulgarian population."
"gnomad_exome.an.an_nfe_est","Number of alleles in gnomAD exomes NFE Estonian population."
"gnomad_exome.an.an_nfe_female","Number of alleles in gnomAD exomes NFE female population."
"gnomad_exome.an.an_nfe_male","Number of alleles in gnomAD exomes NFE male population."
"gnomad_exome.an.an_nfe_nwe","Number of alleles in gnomAD exomes NFE North-Western European population."
"gnomad_exome.an.an_nfe_onf","Number of alleles in gnomAD exomes NFE Other Non-Finnish European population."
"gnomad_exome.an.an_nfe_seu","Number of alleles in gnomAD exomes NFE Southern European population."
"gnomad_exome.an.an_nfe_swe","Number of alleles in gnomAD exomes NFE Swedish population."
"gnomad_exome.an.an_oth","Number of alleles in gnomAD exomes Other population."
"gnomad_exome.an.an_oth_female","Number of alleles in gnomAD exomes Other female population."
"gnomad_exome.an.an_oth_male","Number of alleles in gnomAD exomes Other male population."
"gnomad_exome.an.an_sas","Number of alleles in gnomAD exomes South Asian population."
"gnomad_exome.an.an_sas_female","Number of alleles in gnomAD exomes South Asian female population."
"gnomad_exome.an.an_sas_male","Number of alleles in gnomAD exomes South Asian male population."
"gnomad_exome.baseqranksum","gnomAD exome BaseQRankSum test statistic."
"gnomad_exome.chrom","Chromosome number in gnomAD exomes."
"gnomad_exome.clippingranksum","gnomAD exome ClippingRankSum test statistic."
"gnomad_exome.dp","Total read depth at the variant position in gnomAD exomes."
"gnomad_exome.fs","gnomAD exome FisherStrand bias score."
"gnomad_exome.hom.hom","Total homozygous count in gnomAD exomes."
"gnomad_exome.hom.hom_afr","Homozygous count in gnomAD exomes African/African American population."
"gnomad_exome.hom.hom_afr_female","Homozygous count in gnomAD exomes African/African American female population."
"gnomad_exome.hom.hom_afr_male","Homozygous count in gnomAD exomes African/African American male population."
"gnomad_exome.hom.hom_amr","Homozygous count in gnomAD exomes American population."
"gnomad_exome.hom.hom_amr_female","Homozygous count in gnomAD exomes American female population."
"gnomad_exome.hom.hom_amr_male","Homozygous count in gnomAD exomes American male population."
"gnomad_exome.hom.hom_asj","Homozygous count in gnomAD exomes Ashkenazi Jewish population."
"gnomad_exome.hom.hom_asj_female","Homozygous count in gnomAD exomes Ashkenazi Jewish female population."
"gnomad_exome.hom.hom_asj_male","Homozygous count in gnomAD exomes Ashkenazi Jewish male population."
"gnomad_exome.hom.hom_eas","Homozygous count in gnomAD exomes East Asian population."
"gnomad_exome.hom.hom_eas_female","Homozygous count in gnomAD exomes East Asian female population."
"gnomad_exome.hom.hom_eas_jpn","Homozygous count in gnomAD exomes East Asian Japanese population."
"gnomad_exome.hom.hom_eas_kor","Homozygous count in gnomAD exomes East Asian Korean population."
"gnomad_exome.hom.hom_eas_male","Homozygous count in gnomAD exomes East Asian male population."
"gnomad_exome.hom.hom_eas_oea","Homozygous count in gnomAD exomes East Asian Other population."
"gnomad_exome.hom.hom_female","Total homozygous count in gnomAD exomes female population."
"gnomad_exome.hom.hom_fin","Homozygous count in gnomAD exomes Finnish population."
"gnomad_exome.hom.hom_fin_female","Homozygous count in gnomAD exomes Finnish female population."
"gnomad_exome.hom.hom_fin_male","Homozygous count in gnomAD exomes Finnish male population."
"gnomad_exome.hom.hom_male","Total homozygous count in gnomAD exomes male population."
"gnomad_exome.hom.hom_nfe","Homozygous count in gnomAD exomes Non-Finnish European population."
"gnomad_exome.hom.hom_nfe_bgr","Homozygous count in gnomAD exomes NFE Bulgarian population."
"gnomad_exome.hom.hom_nfe_est","Homozygous count in gnomAD exomes NFE Estonian population."
"gnomad_exome.hom.hom_nfe_female","Homozygous count in gnomAD exomes NFE female population."
"gnomad_exome.hom.hom_nfe_male","Homozygous count in gnomAD exomes NFE male population."
"gnomad_exome.hom.hom_nfe_nwe","Homozygous count in gnomAD exomes NFE North-Western European population."
"gnomad_exome.hom.hom_nfe_onf","Homozygous count in gnomAD exomes NFE Other Non-Finnish European population."
"gnomad_exome.hom.hom_nfe_seu","Homozygous count in gnomAD exomes NFE Southern European population."
"gnomad_exome.hom.hom_nfe_swe","Homozygous count in gnomAD exomes NFE Swedish population."
"gnomad_exome.hom.hom_oth","Homozygous count in gnomAD exomes Other population."
"gnomad_exome.hom.hom_oth_female","Homozygous count in gnomAD exomes Other female population."
"gnomad_exome.hom.hom_oth_male","Homozygous count in gnomAD exomes Other male population."
"gnomad_exome.hom.hom_sas","Homozygous count in gnomAD exomes South Asian population."
"gnomad_exome.hom.hom_sas_female","Homozygous count in gnomAD exomes South Asian female population."
"gnomad_exome.hom.hom_sas_male","Homozygous count in gnomAD exomes South Asian male population."
"gnomad_exome.inbreedingcoeff","gnomAD exome Inbreeding Coefficient."
"gnomad_exome.mq.mq","gnomAD exome root mean square Mapping Quality."
"gnomad_exome.mq.mqranksum","gnomAD exome MQRankSum test statistic."
"gnomad_exome.pab_max","Maximum P(AB) value from gnomAD exomes. UNKNOWN significance."
"gnomad_exome.pos","Genomic position in gnomAD exomes (hg19)."
"gnomad_exome.qd","gnomAD exome Quality by Depth score."
"gnomad_exome.readposranksum","gnomAD exome ReadPosRankSum test statistic."
"gnomad_exome.ref","Reference allele in gnomAD exome format."
"gnomad_exome.rf","Random Forest probability score from gnomAD exomes. UNKNOWN usage."
"gnomad_exome.rsid","Associated dbSNP rsID from gnomAD exomes."
"gnomad_exome.sor","gnomAD exome Strand Odds Ratio score."
"gnomad_exome.type","Variant type in gnomAD exomes (e.g., snp)."
"gnomad_exome.vqslod","gnomAD exome Variant Quality Score Log-Odds."
"gnomad_exome.vqsr_culprit","gnomAD exome VQSR culprit annotation."
"hg19.end","End position in hg19 assembly."
"hg19.start","Start position in hg19 assembly."
"mutdb._license","License information URL for the MutDB data source."
"mutdb.alt","Alternate allele in MutDB."
"mutdb.chrom","Chromosome number in MutDB."
"mutdb.cosmic_id","Associated COSMIC ID(s) from MutDB."
"mutdb.hg19.end","End position in hg19 assembly (MutDB)."
"mutdb.hg19.start","Start position in hg19 assembly (MutDB)."
"mutdb.mutpred_score","MutPred score reported by MutDB."
"mutdb.ref","Reference allele in MutDB."
"mutdb.rsid","Associated dbSNP rsID from MutDB."
"mutdb.strand","Genomic strand reported by MutDB (m indicates '-')."
"mutdb.uniprot_id","Associated UniProt variant ID from MutDB."
"observed","Boolean indicating if the variant is observed in aggregated datasets."
"snpeff._license","License information URL for the SnpEff data source."
"snpeff.ann.cdna.length","Length of the cDNA sequence for the annotated transcript."
"snpeff.ann.cdna.position","Position of the variant within the cDNA sequence."
"snpeff.ann.cds.length","Length of the coding sequence (CDS) for the annotated transcript."
"snpeff.ann.cds.position","Position of the variant within the coding sequence (CDS)."
"snpeff.ann.effect","Predicted sequence ontology effect of the variant (e.g., missense_variant)."
"snpeff.ann.feature_id","Feature ID (usually transcript ID like NM_004333.4) for the annotation."
"snpeff.ann.feature_type","Type of feature annotated (e.g., transcript)."
"snpeff.ann.gene_id","Gene symbol or ID associated with the annotation."
"snpeff.ann.genename","Gene name associated with the annotation."
"snpeff.ann.hgvs_c","HGVS coding sequence notation from SnpEff."
"snpeff.ann.hgvs_p","HGVS protein sequence notation from SnpEff."
"snpeff.ann.protein.length","Length of the protein sequence for the annotated transcript."
"snpeff.ann.protein.position","Position of the amino acid change within the protein."
"snpeff.ann.putative_impact","SnpEff predicted impact category (e.g., MODERATE, HIGH)."
"snpeff.ann.rank","Rank of the annotation (exon/intron rank)."
"snpeff.ann.total","Total number of exons/introns in the transcript."
"snpeff.ann.transcript_biotype","Biotype of the transcript (e.g., protein_coding)."
"vcf.alt","Alternate allele in VCF format."
"vcf.position","Position of the variant in VCF format (hg19)."
"vcf.ref","Reference allele in VCF format."

```

--------------------------------------------------------------------------------
/src/biomcp/router.py:
--------------------------------------------------------------------------------

```python
"""Unified search and fetch tools for BioMCP.

This module provides the main MCP tools for searching and fetching biomedical data
across different domains (articles, trials, variants) with integrated sequential
thinking capabilities.
"""

import json
import logging
from typing import Annotated, Any, Literal

from pydantic import Field

from biomcp.constants import (
    DEFAULT_PAGE_NUMBER,
    DEFAULT_PAGE_SIZE,
    DEFAULT_TITLE,
    ERROR_DOMAIN_REQUIRED,
    ESTIMATED_ADDITIONAL_RESULTS,
    MAX_RESULTS_PER_DOMAIN_DEFAULT,
    TRIAL_DETAIL_SECTIONS,
    VALID_DOMAINS,
)
from biomcp.core import mcp_app
from biomcp.domain_handlers import get_domain_handler
from biomcp.exceptions import (
    InvalidDomainError,
    InvalidParameterError,
    QueryParsingError,
    ResultParsingError,
    SearchExecutionError,
)
from biomcp.integrations.biothings_client import BioThingsClient
from biomcp.metrics import track_performance
from biomcp.parameter_parser import ParameterParser
from biomcp.query_parser import QueryParser
from biomcp.query_router import QueryRouter, execute_routing_plan
from biomcp.thinking_tracker import get_thinking_reminder
from biomcp.trials import getter as trial_getter

logger = logging.getLogger(__name__)


def format_results(
    results: list[dict], domain: str, page: int, page_size: int, total: int
) -> dict:
    """Format search results according to OpenAI MCP search semantics.

    Converts domain-specific result formats into a standardized structure with:
    - id: Unique identifier for the result (required)
    - title: Human-readable title (required)
    - text: Brief preview or summary of the content (required)
    - url: Link to the full resource (optional but recommended for citations)

    Note: The OpenAI MCP specification does NOT require metadata in search results.
    Metadata should only be included in fetch results.

    Args:
        results: Raw results from domain-specific search
        domain: Type of results ('article', 'trial', or 'variant')
        page: Current page number (for internal tracking only)
        page_size: Number of results per page (for internal tracking only)
        total: Total number of results available (for internal tracking only)

    Returns:
        Dictionary with results array following OpenAI MCP format:
        {"results": [{"id", "title", "text", "url"}, ...]}

    Raises:
        InvalidDomainError: If domain is not recognized
    """
    logger.debug(f"Formatting {len(results)} results for domain: {domain}")

    formatted_data = []

    # Get the appropriate handler
    try:
        handler_class = get_domain_handler(domain)
    except ValueError:
        raise InvalidDomainError(domain, VALID_DOMAINS) from None

    # Format each result
    for result in results:
        try:
            formatted_result = handler_class.format_result(result)
            # Ensure the result has the required OpenAI MCP fields
            openai_result = {
                "id": formatted_result.get("id", ""),
                "title": formatted_result.get("title", DEFAULT_TITLE),
                "text": formatted_result.get(
                    "snippet", formatted_result.get("text", "")
                ),
                "url": formatted_result.get("url", ""),
            }
            # Note: OpenAI MCP spec doesn't require metadata in search results
            # Only include it if explicitly needed for enhanced functionality
            formatted_data.append(openai_result)
        except Exception as e:
            logger.warning(f"Failed to format result in domain {domain}: {e}")
            # Skip malformed results
            continue

    # Add thinking reminder if needed (as first result)
    reminder = get_thinking_reminder()
    if reminder and formatted_data:
        reminder_result = {
            "id": "thinking-reminder",
            "title": "⚠️ Research Best Practice Reminder",
            "text": reminder,
            "url": "",
        }
        formatted_data.insert(0, reminder_result)

    # Return OpenAI MCP compliant format
    return {"results": formatted_data}


# ────────────────────────────
# Unified SEARCH tool
# ────────────────────────────
@mcp_app.tool()
@track_performance("biomcp.search")
async def search(  # noqa: C901
    query: Annotated[
        str,
        "Unified search query (e.g., 'gene:BRAF AND trials.condition:melanoma'). If provided, other parameters are ignored.",
    ],
    call_benefit: Annotated[
        str | None,
        Field(
            description="Brief explanation of why this search is being performed and expected benefit. Helps improve search accuracy and provides context for analytics. Highly recommended for better results."
        ),
    ] = None,
    domain: Annotated[
        Literal[
            "article",
            "trial",
            "variant",
            "gene",
            "drug",
            "disease",
            "nci_organization",
            "nci_intervention",
            "nci_biomarker",
            "nci_disease",
            "fda_adverse",
            "fda_label",
            "fda_device",
            "fda_approval",
            "fda_recall",
            "fda_shortage",
        ]
        | None,
        Field(
            description="Domain to search: 'article' for papers/literature ABOUT genes/variants/diseases, 'trial' for clinical studies, 'variant' for genetic variant DATABASE RECORDS, 'gene' for gene information from MyGene.info, 'drug' for drug/chemical information from MyChem.info, 'disease' for disease information from MyDisease.info, 'nci_organization' for NCI cancer centers/sponsors, 'nci_intervention' for NCI drugs/devices/procedures, 'nci_biomarker' for NCI trial eligibility biomarkers, 'nci_disease' for NCI cancer vocabulary, 'fda_adverse' for FDA adverse event reports, 'fda_label' for FDA drug labels, 'fda_device' for FDA device events, 'fda_approval' for FDA drug approvals, 'fda_recall' for FDA drug recalls, 'fda_shortage' for FDA drug shortages"
        ),
    ] = None,
    genes: Annotated[list[str] | str | None, "Gene symbols"] = None,
    diseases: Annotated[list[str] | str | None, "Disease terms"] = None,
    variants: Annotated[list[str] | str | None, "Variant strings"] = None,
    chemicals: Annotated[list[str] | str | None, "Drug/chemical terms"] = None,
    keywords: Annotated[list[str] | str | None, "Free-text keywords"] = None,
    conditions: Annotated[list[str] | str | None, "Trial conditions"] = None,
    interventions: Annotated[
        list[str] | str | None, "Trial interventions"
    ] = None,
    recruiting_status: Annotated[
        str | None, "Trial status filter (OPEN, CLOSED, or ANY)"
    ] = None,
    phase: Annotated[str | None, "Trial phase filter"] = None,
    significance: Annotated[
        str | None, "Variant clinical significance"
    ] = None,
    lat: Annotated[
        float | None,
        "Latitude for trial location search. AI agents should geocode city names (e.g., 'Cleveland' → 41.4993) before using.",
    ] = None,
    long: Annotated[
        float | None,
        "Longitude for trial location search. AI agents should geocode city names (e.g., 'Cleveland' → -81.6944) before using.",
    ] = None,
    distance: Annotated[
        int | None,
        "Distance in miles from lat/long for trial search (default: 50 miles if lat/long provided)",
    ] = None,
    page: Annotated[int, "Page number (minimum: 1)"] = DEFAULT_PAGE_NUMBER,
    page_size: Annotated[int, "Results per page (1-100)"] = DEFAULT_PAGE_SIZE,
    max_results_per_domain: Annotated[
        int | None, "Max results per domain (unified search only)"
    ] = None,
    explain_query: Annotated[
        bool, "Return query explanation (unified search only)"
    ] = False,
    get_schema: Annotated[
        bool, "Return searchable fields schema instead of results"
    ] = False,
    api_key: Annotated[
        str | None,
        Field(
            description="NCI API key for searching NCI domains (nci_organization, nci_intervention, nci_biomarker, nci_disease). Required for NCI searches. Get a free key at: https://clinicaltrialsapi.cancer.gov/"
        ),
    ] = None,
) -> dict:
    """Search biomedical literature, clinical trials, genetic variants, genes, drugs, and diseases.

    ⚠️ IMPORTANT: Have you used the 'think' tool first? If not, STOP and use it NOW!
    The 'think' tool is REQUIRED for proper research planning and should be your FIRST step.

    This tool provides access to biomedical data from PubMed/PubTator3, ClinicalTrials.gov,
    MyVariant.info, and the BioThings suite (MyGene.info, MyChem.info, MyDisease.info).
    It supports two search modes:

    ## 1. UNIFIED QUERY LANGUAGE
    Use the 'query' parameter with field-based syntax for precise cross-domain searches.

    Syntax:
    - Basic: "gene:BRAF"
    - AND logic: "gene:BRAF AND disease:melanoma"
    - OR logic: "gene:PTEN AND (R173 OR Arg173 OR 'position 173')"
    - Domain-specific: "trials.condition:melanoma AND trials.phase:3"

    Common fields:
    - Cross-domain: gene, disease, variant, chemical/drug
    - Articles: pmid, title, abstract, journal, author
    - Trials: trials.condition, trials.intervention, trials.phase, trials.status
    - Variants: variants.hgvs, variants.rsid, variants.significance

    Example:
    ```
    await search(
        query="gene:BRAF AND disease:melanoma AND trials.phase:3",
        max_results_per_domain=20
    )
    ```

    ## 2. DOMAIN-SPECIFIC SEARCH
    Use the 'domain' parameter with specific filters for targeted searches.

    Domains:
    - "article": Search PubMed/PubTator3 for research articles and preprints ABOUT genes, variants, diseases, or chemicals
    - "trial": Search ClinicalTrials.gov for clinical studies
    - "variant": Search MyVariant.info for genetic variant DATABASE RECORDS (population frequency, clinical significance, etc.) - NOT for articles about variants!
    - "gene": Search MyGene.info for gene information (symbol, name, function, aliases)
    - "drug": Search MyChem.info for drug/chemical information (names, formulas, indications)
    - "disease": Search MyDisease.info for disease information (names, definitions, synonyms)
    - "nci_organization": Search NCI database for cancer centers, hospitals, and research sponsors (requires API key)
    - "nci_intervention": Search NCI database for drugs, devices, procedures used in cancer trials (requires API key)
    - "nci_biomarker": Search NCI database for biomarkers used in trial eligibility criteria (requires API key)
    - "nci_disease": Search NCI controlled vocabulary for cancer conditions and terms (requires API key)

    Example:
    ```
    await search(
        domain="article",
        genes=["BRAF", "NRAS"],
        diseases=["melanoma"],
        page_size=50
    )
    ```

    ## DOMAIN SELECTION EXAMPLES:
    - To find ARTICLES about BRAF V600E mutation: domain="article", genes=["BRAF"], variants=["V600E"]
    - To find VARIANT DATA for BRAF mutations: domain="variant", gene="BRAF"
    - To find articles about ERBB2 p.D277Y: domain="article", genes=["ERBB2"], variants=["p.D277Y"]
    - Common mistake: Using domain="variant" when you want articles about a variant

    ## IMPORTANT NOTES:
    - For complex research questions, use the separate 'think' tool for systematic analysis
    - The tool returns results in OpenAI MCP format: {"results": [{"id", "title", "text", "url"}, ...]}
    - Search results do NOT include metadata (per OpenAI MCP specification)
    - Use the fetch tool to get detailed metadata for specific records
    - Use get_schema=True to explore available search fields
    - Use explain_query=True to understand query parsing (unified mode)
    - Domain-specific searches use AND logic for multiple values
    - For OR logic, use the unified query language
    - NEW: Article search keywords support OR with pipe separator: "R173|Arg173|p.R173"
    - Remember: domain="article" finds LITERATURE, domain="variant" finds DATABASE RECORDS

    ## RETURN FORMAT:
    All search modes return results in this format:
    ```json
    {
        "results": [
            {
                "id": "unique_identifier",
                "title": "Human-readable title",
                "text": "Summary or snippet of content",
                "url": "Link to full resource"
            }
        ]
    }
    ```
    """
    logger.info(f"Search called with domain={domain}, query={query}")

    # Return schema if requested
    if get_schema:
        parser = QueryParser()
        return parser.get_schema()

    # Determine search mode
    if query and query.strip():
        # Check if this is a unified query (contains field syntax like "gene:" or "AND")
        is_unified_query = any(
            marker in query for marker in [":", " AND ", " OR "]
        )

        # Check if this is an NCI domain
        nci_domains = [
            "nci_biomarker",
            "nci_organization",
            "nci_intervention",
            "nci_disease",
        ]
        is_nci_domain = domain in nci_domains if domain else False

        if not domain or (domain and is_unified_query and not is_nci_domain):
            # Use unified query mode if:
            # 1. No domain specified, OR
            # 2. Domain specified but query has field syntax AND it's not an NCI domain
            logger.info(f"Using unified query mode: {query}")
            return await _unified_search(
                query=query,
                max_results_per_domain=max_results_per_domain
                or MAX_RESULTS_PER_DOMAIN_DEFAULT,
                domains=None,
                explain_query=explain_query,
            )
        elif domain:
            # Domain-specific search with query as keyword
            logger.info(
                f"Domain-specific search with query as keyword: domain={domain}, query={query}"
            )
            # Convert query to keywords parameter for domain-specific search
            keywords = [query]

    # Legacy domain-based search
    if not domain:
        raise InvalidParameterError(
            "query or domain", None, ERROR_DOMAIN_REQUIRED
        )

    # Validate pagination parameters
    try:
        page, page_size = ParameterParser.validate_page_params(page, page_size)
    except InvalidParameterError as e:
        logger.error(f"Invalid pagination parameters: {e}")
        raise

    # Parse parameters using ParameterParser
    genes = ParameterParser.parse_list_param(genes, "genes")
    diseases = ParameterParser.parse_list_param(diseases, "diseases")
    variants = ParameterParser.parse_list_param(variants, "variants")
    chemicals = ParameterParser.parse_list_param(chemicals, "chemicals")
    keywords = ParameterParser.parse_list_param(keywords, "keywords")
    conditions = ParameterParser.parse_list_param(conditions, "conditions")
    interventions = ParameterParser.parse_list_param(
        interventions, "interventions"
    )

    logger.debug(
        f"Parsed parameters for domain {domain}: "
        f"genes={genes}, diseases={diseases}, variants={variants}"
    )

    if domain == "article":
        from .router_handlers import handle_article_search

        items, total = await handle_article_search(
            genes=genes,
            diseases=diseases,
            variants=variants,
            chemicals=chemicals,
            keywords=keywords,
            page=page,
            page_size=page_size,
        )

        return format_results(
            items,
            domain="article",
            page=page,
            page_size=page_size,
            total=total,
        )

    elif domain == "trial":
        logger.info("Executing trial search")
        # Build the trial search parameters
        search_params: dict[str, Any] = {}
        if conditions:
            search_params["conditions"] = conditions
        if interventions:
            search_params["interventions"] = interventions
        if recruiting_status:
            search_params["recruiting_status"] = recruiting_status
        if phase:
            try:
                search_params["phase"] = ParameterParser.normalize_phase(phase)
            except InvalidParameterError:
                raise
        if keywords:
            search_params["keywords"] = keywords
        if lat is not None:
            search_params["lat"] = lat
        if long is not None:
            search_params["long"] = long
        if distance is not None:
            search_params["distance"] = distance

        try:
            from biomcp.trials.search import TrialQuery, search_trials

            # Convert search_params to TrialQuery
            trial_query = TrialQuery(**search_params, page_size=page_size)
            result_str = await search_trials(trial_query, output_json=True)
        except Exception as e:
            logger.error(f"Trial search failed: {e}")
            raise SearchExecutionError("trial", e) from e

        # Parse the JSON results
        try:
            results = json.loads(result_str)
        except (json.JSONDecodeError, TypeError) as e:
            logger.error(f"Failed to parse trial results: {e}")
            raise ResultParsingError("trial", e) from e

        # Handle different response formats from the trials API
        # The API can return either a dict with 'studies' key or a direct list
        if isinstance(results, dict):
            # ClinicalTrials.gov API v2 format with studies array
            if "studies" in results:
                items = results["studies"]
                total = len(items)  # API doesn't provide total count
            # Legacy format or error
            elif "error" in results:
                logger.warning(
                    f"Trial API returned error: {results.get('error')}"
                )
                return format_results(
                    [], domain="trial", page=page, page_size=page_size, total=0
                )
            else:
                # Assume the dict itself is a single result
                items = [results]
                total = 1
        elif isinstance(results, list):
            # Direct list of results
            items = results
            total = len(items)
        else:
            items = []
            total = 0

        logger.info(f"Trial search returned {total} total results")

        return format_results(
            items, domain="trial", page=page, page_size=page_size, total=total
        )

    elif domain == "variant":
        logger.info("Executing variant search")
        # Build the variant search parameters
        # Note: variant searcher expects single gene, not list
        gene = genes[0] if genes else None

        # Use keywords to search for significance if provided
        keyword_list = keywords or []
        if significance:
            keyword_list.append(significance)

        try:
            from biomcp.variants.search import VariantQuery, search_variants

            variant_query = VariantQuery(
                gene=gene,
                significance=significance,
                size=page_size,
                offset=(page - 1) * page_size,
            )
            result_str = await search_variants(variant_query, output_json=True)
        except Exception as e:
            logger.error(f"Variant search failed: {e}")
            raise SearchExecutionError("variant", e) from e

        # Parse the JSON results
        try:
            all_results = json.loads(result_str)
        except (json.JSONDecodeError, TypeError) as e:
            logger.error(f"Failed to parse variant results: {e}")
            raise ResultParsingError("variant", e) from e

        # For variants, the results are already paginated by the API
        # We need to estimate total based on whether we got a full page
        items = all_results if isinstance(all_results, list) else []
        # Rough estimate: if we got a full page, there might be more
        total = len(items) + (
            ESTIMATED_ADDITIONAL_RESULTS if len(items) == page_size else 0
        )

        logger.info(f"Variant search returned {len(items)} results")

        return format_results(
            items,
            domain="variant",
            page=page,
            page_size=page_size,
            total=total,
        )

    elif domain == "gene":
        logger.info("Executing gene search")
        # Build the gene search query
        query_str = keywords[0] if keywords else genes[0] if genes else ""

        if not query_str:
            raise InvalidParameterError(
                "keywords or genes", None, "a gene symbol or search term"
            )

        try:
            client = BioThingsClient()
            # For search, query by symbol/name
            results = await client._query_gene(query_str)

            if not results:
                items = []
                total = 0
            else:
                # Fetch full details for each result (limited by page_size)
                items = []
                for result in results[:page_size]:
                    gene_id = result.get("_id")
                    if gene_id:
                        full_gene = await client._get_gene_by_id(gene_id)
                        if full_gene:
                            items.append(full_gene.model_dump())

                total = len(results)

        except Exception as e:
            logger.error(f"Gene search failed: {e}")
            raise SearchExecutionError("gene", e) from e

        logger.info(f"Gene search returned {len(items)} results")

        return format_results(
            items,
            domain="gene",
            page=page,
            page_size=page_size,
            total=total,
        )

    elif domain == "drug":
        logger.info("Executing drug search")
        # Build the drug search query
        query_str = (
            keywords[0] if keywords else chemicals[0] if chemicals else ""
        )

        if not query_str:
            raise InvalidParameterError(
                "keywords or chemicals", None, "a drug name or search term"
            )

        try:
            client = BioThingsClient()
            # For search, query by name
            results = await client._query_drug(query_str)

            if not results:
                items = []
                total = 0
            else:
                # Fetch full details for each result (limited by page_size)
                items = []
                for result in results[:page_size]:
                    drug_id = result.get("_id")
                    if drug_id:
                        full_drug = await client._get_drug_by_id(drug_id)
                        if full_drug:
                            items.append(full_drug.model_dump(by_alias=True))

                total = len(results)

        except Exception as e:
            logger.error(f"Drug search failed: {e}")
            raise SearchExecutionError("drug", e) from e

        logger.info(f"Drug search returned {len(items)} results")

        return format_results(
            items,
            domain="drug",
            page=page,
            page_size=page_size,
            total=total,
        )

    elif domain == "disease":
        logger.info("Executing disease search")
        # Build the disease search query
        query_str = (
            keywords[0] if keywords else diseases[0] if diseases else ""
        )

        if not query_str:
            raise InvalidParameterError(
                "keywords or diseases", None, "a disease name or search term"
            )

        try:
            client = BioThingsClient()
            # For search, query by name
            results = await client._query_disease(query_str)

            if not results:
                items = []
                total = 0
            else:
                # Fetch full details for each result (limited by page_size)
                items = []
                for result in results[:page_size]:
                    disease_id = result.get("_id")
                    if disease_id:
                        full_disease = await client._get_disease_by_id(
                            disease_id
                        )
                        if full_disease:
                            items.append(
                                full_disease.model_dump(by_alias=True)
                            )

                total = len(results)

        except Exception as e:
            logger.error(f"Disease search failed: {e}")
            raise SearchExecutionError("disease", e) from e

        logger.info(f"Disease search returned {len(items)} results")

        return format_results(
            items,
            domain="disease",
            page=page,
            page_size=page_size,
            total=total,
        )

    elif domain == "nci_organization":
        from .router_handlers import handle_nci_organization_search

        # Extract NCI-specific parameters
        organization_type = keywords[0] if keywords else None
        city = None
        state = None
        name = keywords[0] if keywords else None

        # Try to parse location from keywords
        if keywords and len(keywords) >= 2:
            # Assume last two keywords might be city, state
            city = keywords[-2]
            state = keywords[-1]
            if len(state) == 2 and state.isupper():
                # Likely a state code
                name = " ".join(keywords[:-2]) if len(keywords) > 2 else None
            else:
                # Not a state code, use all as name
                city = None
                state = None
                name = " ".join(keywords)

        items, total = await handle_nci_organization_search(
            name=name,
            organization_type=organization_type,
            city=city,
            state=state,
            api_key=api_key,
            page=page,
            page_size=page_size,
        )

        return format_results(
            items,
            domain="nci_organization",
            page=page,
            page_size=page_size,
            total=total,
        )

    elif domain == "nci_intervention":
        from .router_handlers import handle_nci_intervention_search

        # Extract parameters
        name = keywords[0] if keywords else None
        intervention_type = None  # Could be parsed from additional params

        items, total = await handle_nci_intervention_search(
            name=name,
            intervention_type=intervention_type,
            synonyms=True,
            api_key=api_key,
            page=page,
            page_size=page_size,
        )

        return format_results(
            items,
            domain="nci_intervention",
            page=page,
            page_size=page_size,
            total=total,
        )

    elif domain == "nci_biomarker":
        from .router_handlers import handle_nci_biomarker_search

        # Extract parameters
        name = keywords[0] if keywords else None
        gene = genes[0] if genes else None

        items, total = await handle_nci_biomarker_search(
            name=name,
            gene=gene,
            biomarker_type=None,
            assay_type=None,
            api_key=api_key,
            page=page,
            page_size=page_size,
        )

        return format_results(
            items,
            domain="nci_biomarker",
            page=page,
            page_size=page_size,
            total=total,
        )

    elif domain == "nci_disease":
        from .router_handlers import handle_nci_disease_search

        # Extract parameters
        name = diseases[0] if diseases else keywords[0] if keywords else None

        items, total = await handle_nci_disease_search(
            name=name,
            include_synonyms=True,
            category=None,
            api_key=api_key,
            page=page,
            page_size=page_size,
        )

        return format_results(
            items,
            domain="nci_disease",
            page=page,
            page_size=page_size,
            total=total,
        )

    # OpenFDA domains
    elif domain == "fda_adverse":
        from biomcp.openfda import search_adverse_events

        drug_name = (
            chemicals[0] if chemicals else keywords[0] if keywords else None
        )
        skip = (page - 1) * page_size
        fda_result = await search_adverse_events(
            drug=drug_name,
            limit=page_size,
            skip=skip,
            api_key=api_key,
        )
        # Parse the markdown result to extract items
        # For simplicity, return the result as a single item
        return {"results": [{"content": fda_result}]}

    elif domain == "fda_label":
        from biomcp.openfda import search_drug_labels

        drug_name = (
            chemicals[0] if chemicals else keywords[0] if keywords else None
        )
        skip = (page - 1) * page_size
        fda_result = await search_drug_labels(
            name=drug_name,
            limit=page_size,
            skip=skip,
            api_key=api_key,
        )
        return {"results": [{"content": fda_result}]}

    elif domain == "fda_device":
        from biomcp.openfda import search_device_events

        device_name = keywords[0] if keywords else None
        skip = (page - 1) * page_size
        fda_result = await search_device_events(
            device=device_name,
            limit=page_size,
            skip=skip,
            api_key=api_key,
        )
        return {"results": [{"content": fda_result}]}

    elif domain == "fda_approval":
        from biomcp.openfda import search_drug_approvals

        drug_name = (
            chemicals[0] if chemicals else keywords[0] if keywords else None
        )
        skip = (page - 1) * page_size
        fda_result = await search_drug_approvals(
            drug=drug_name,
            limit=page_size,
            skip=skip,
            api_key=api_key,
        )
        return {"results": [{"content": fda_result}]}

    elif domain == "fda_recall":
        from biomcp.openfda import search_drug_recalls

        drug_name = (
            chemicals[0] if chemicals else keywords[0] if keywords else None
        )
        skip = (page - 1) * page_size
        fda_result = await search_drug_recalls(
            drug=drug_name,
            limit=page_size,
            skip=skip,
            api_key=api_key,
        )
        return {"results": [{"content": fda_result}]}

    elif domain == "fda_shortage":
        from biomcp.openfda import search_drug_shortages

        drug_name = (
            chemicals[0] if chemicals else keywords[0] if keywords else None
        )
        skip = (page - 1) * page_size
        fda_result = await search_drug_shortages(
            drug=drug_name,
            limit=page_size,
            skip=skip,
            api_key=api_key,
        )
        return {"results": [{"content": fda_result}]}

    else:
        raise InvalidDomainError(domain, VALID_DOMAINS)


# ────────────────────────────
# Unified FETCH tool
# ────────────────────────────
@mcp_app.tool()
@track_performance("biomcp.fetch")
async def fetch(  # noqa: C901
    id: Annotated[  # noqa: A002
        str,
        "PMID / NCT ID / Variant ID / DOI / Gene ID / Drug ID / Disease ID / NCI Organization ID / NCI Intervention ID / NCI Disease ID / FDA Report ID / FDA Set ID / FDA MDR Key / FDA Application Number / FDA Recall Number",
    ],
    domain: Annotated[
        Literal[
            "article",
            "trial",
            "variant",
            "gene",
            "drug",
            "disease",
            "nci_organization",
            "nci_intervention",
            "nci_biomarker",
            "nci_disease",
            "fda_adverse",
            "fda_label",
            "fda_device",
            "fda_approval",
            "fda_recall",
            "fda_shortage",
        ]
        | None,
        Field(
            description="Domain of the record (auto-detected if not provided)"
        ),
    ] = None,
    call_benefit: Annotated[
        str | None,
        Field(
            description="Brief explanation of why this fetch is being performed and expected benefit. Helps provide context for analytics and improves result relevance."
        ),
    ] = None,
    detail: Annotated[
        Literal[
            "protocol", "locations", "outcomes", "references", "all", "full"
        ]
        | None,
        "Specific section to retrieve (trials) or 'full' (articles)",
    ] = None,
    api_key: Annotated[
        str | None,
        Field(
            description="NCI API key for fetching NCI records (nci_organization, nci_intervention, nci_disease). Required for NCI fetches. Get a free key at: https://clinicaltrialsapi.cancer.gov/"
        ),
    ] = None,
) -> dict:
    """Fetch comprehensive details for a specific biomedical record.

    This tool retrieves full information for articles, clinical trials, genetic variants,
    genes, drugs, or diseases using their unique identifiers. It returns data in a
    standardized format suitable for detailed analysis and research.

    ## IDENTIFIER FORMATS:
    - Articles: PMID (PubMed ID) - e.g., "35271234" OR DOI - e.g., "10.1101/2024.01.20.23288905"
    - Trials: NCT ID (ClinicalTrials.gov ID) - e.g., "NCT04280705"
    - Variants: HGVS notation or dbSNP ID - e.g., "chr7:g.140453136A>T" or "rs121913254"
    - Genes: Gene symbol or Entrez ID - e.g., "BRAF" or "673"
    - Drugs: Drug name or ID - e.g., "imatinib" or "DB00619"
    - Diseases: Disease name or ID - e.g., "melanoma" or "MONDO:0005105"
    - NCI Organizations: NCI organization ID - e.g., "NCI-2011-03337"
    - NCI Interventions: NCI intervention ID - e.g., "INT123456"
    - NCI Diseases: NCI disease ID - e.g., "C4872"

    The domain is automatically detected from the ID format if not provided:
    - NCT* → trial
    - Contains "/" with numeric prefix (DOI) → article
    - Pure numeric → article (PMID)
    - rs* or contains ':' or 'g.' → variant
    - For genes, drugs, diseases: manual specification recommended

    ## DOMAIN-SPECIFIC OPTIONS:

    ### Articles (domain="article"):
    - Returns full article metadata, abstract, and full text when available
    - Supports both PubMed articles (via PMID) and Europe PMC preprints (via DOI)
    - Includes annotations for genes, diseases, chemicals, and variants (PubMed only)
    - detail="full" attempts to retrieve full text content (PubMed only)

    ### Clinical Trials (domain="trial"):
    - detail=None or "protocol": Core study information
    - detail="locations": Study sites and contact information
    - detail="outcomes": Primary/secondary outcomes and results
    - detail="references": Related publications and citations
    - detail="all": Complete trial record with all sections

    ### Variants (domain="variant"):
    - Returns comprehensive variant information including:
      - Clinical significance and interpretations
      - Population frequencies
      - Gene/protein effects
      - External database links
    - detail parameter is ignored (always returns full data)

    ### Genes (domain="gene"):
    - Returns gene information from MyGene.info including:
      - Gene symbol, name, and type
      - Entrez ID and Ensembl IDs
      - Gene summary and aliases
      - RefSeq information
    - detail parameter is ignored (always returns full data)

    ### Drugs (domain="drug"):
    - Returns drug/chemical information from MyChem.info including:
      - Drug name and trade names
      - Chemical formula and structure IDs
      - Clinical indications
      - Mechanism of action
      - External database links (DrugBank, PubChem, ChEMBL)
    - detail parameter is ignored (always returns full data)

    ### Diseases (domain="disease"):
    - Returns disease information from MyDisease.info including:
      - Disease name and definition
      - MONDO ontology ID
      - Disease synonyms
      - Cross-references to other databases
      - Associated phenotypes
    - detail parameter is ignored (always returns full data)

    ### NCI Organizations (domain="nci_organization"):
    - Returns organization information from NCI database including:
      - Organization name and type
      - Full address and contact information
      - Research focus areas
      - Associated clinical trials
    - Requires NCI API key
    - detail parameter is ignored (always returns full data)

    ### NCI Interventions (domain="nci_intervention"):
    - Returns intervention information from NCI database including:
      - Intervention name and type
      - Synonyms and alternative names
      - Mechanism of action (for drugs)
      - FDA approval status
      - Associated clinical trials
    - Requires NCI API key
    - detail parameter is ignored (always returns full data)

    ### NCI Diseases (domain="nci_disease"):
    - Returns disease information from NCI controlled vocabulary including:
      - Preferred disease name
      - Disease category and classification
      - All known synonyms
      - Cross-reference codes (ICD, SNOMED)
    - Requires NCI API key
    - detail parameter is ignored (always returns full data)

    ## RETURN FORMAT:
    All fetch operations return a standardized format:
    ```json
    {
        "id": "unique_identifier",
        "title": "Record title or name",
        "text": "Full content or comprehensive description",
        "url": "Link to original source",
        "metadata": {
            // Domain-specific additional fields
        }
    }
    ```

    ## EXAMPLES:

    Fetch article by PMID (domain auto-detected):
    ```
    await fetch(id="35271234")
    ```

    Fetch article by DOI (domain auto-detected):
    ```
    await fetch(id="10.1101/2024.01.20.23288905")
    ```

    Fetch complete trial information (domain auto-detected):
    ```
    await fetch(
        id="NCT04280705",
        detail="all"
    )
    ```

    Fetch variant with clinical interpretations:
    ```
    await fetch(id="rs121913254")
    ```

    Explicitly specify domain (optional):
    ```
    await fetch(
        domain="variant",
        id="chr7:g.140453136A>T"
    )
    ```
    """
    # Auto-detect domain if not provided
    if domain is None:
        # Try to infer domain from ID format
        if id.upper().startswith("NCT"):
            domain = "trial"
            logger.info(f"Auto-detected domain 'trial' from NCT ID: {id}")
        elif "/" in id and id.split("/")[0].replace(".", "").isdigit():
            # DOI format (e.g., 10.1038/nature12373) - treat as article
            domain = "article"
            logger.info(f"Auto-detected domain 'article' from DOI: {id}")
        elif id.isdigit():
            # Numeric ID - likely PMID
            domain = "article"
            logger.info(
                f"Auto-detected domain 'article' from numeric ID: {id}"
            )
        elif id.startswith("rs") or ":" in id or "g." in id:
            # rsID or HGVS notation
            domain = "variant"
            logger.info(f"Auto-detected domain 'variant' from ID format: {id}")
        else:
            # Default to article if we can't determine
            domain = "article"
            logger.warning(
                f"Could not auto-detect domain for ID '{id}', defaulting to 'article'"
            )

    logger.info(f"Fetch called for {domain} with id={id}, detail={detail}")

    if domain == "article":
        logger.debug("Fetching article details")
        try:
            from biomcp.articles.fetch import _article_details

            # The _article_details function handles both PMIDs and DOIs
            result_str = await _article_details(
                call_benefit=call_benefit
                or "Fetching article details via MCP tool",
                pmid=id,
            )
        except Exception as e:
            logger.error(f"Article fetch failed: {e}")
            raise SearchExecutionError("article", e) from e

        # Parse and return the first article
        try:
            articles = (
                json.loads(result_str)
                if isinstance(result_str, str)
                else result_str
            )
        except (json.JSONDecodeError, TypeError) as e:
            logger.error(f"Failed to parse article fetch results: {e}")
            raise ResultParsingError("article", e) from e

        if not articles:
            return {"error": "Article not found"}

        article = articles[0]

        # Check if the article is actually an error response
        if "error" in article:
            return {"error": article["error"]}

        # Format according to OpenAI MCP standard
        full_text = article.get("full_text", "")
        abstract = article.get("abstract", "")
        text_content = full_text if full_text else abstract

        return {
            "id": str(article.get("pmid", id)),
            "title": article.get("title", DEFAULT_TITLE),
            "text": text_content,
            "url": article.get(
                "url", f"https://pubmed.ncbi.nlm.nih.gov/{id}/"
            ),
            "metadata": {
                "pmid": article.get("pmid"),
                "journal": article.get("journal"),
                "authors": article.get("authors"),
                "year": article.get("year"),
                "doi": article.get("doi"),
                "annotations": article.get("annotations", {}),
                "is_preprint": article.get("is_preprint", False),
                "preprint_source": article.get("preprint_source"),
            },
        }

    elif domain == "trial":
        logger.debug(f"Fetching trial details for section: {detail}")

        # Validate detail parameter
        if detail is not None and detail not in TRIAL_DETAIL_SECTIONS:
            raise InvalidParameterError(
                "detail",
                detail,
                f"one of: {', '.join(TRIAL_DETAIL_SECTIONS)} or None",
            )

        try:
            # Always fetch protocol for basic info - get JSON format
            protocol_json = await trial_getter.get_trial(
                nct_id=id,
                module=trial_getter.Module.PROTOCOL,
                output_json=True,
            )

            # Parse the JSON response
            try:
                protocol_data = json.loads(protocol_json)
            except json.JSONDecodeError as e:
                logger.error(f"Failed to parse protocol JSON for {id}: {e}")
                return {
                    "id": id,
                    "title": f"Clinical Trial {id}",
                    "text": f"Error parsing trial data: {e}",
                    "url": f"https://clinicaltrials.gov/study/{id}",
                    "metadata": {
                        "nct_id": id,
                        "error": f"JSON parse error: {e}",
                    },
                }

            # Check for errors in the response
            if "error" in protocol_data:
                return {
                    "id": id,
                    "title": f"Clinical Trial {id}",
                    "text": protocol_data.get(
                        "details",
                        protocol_data.get("error", "Trial not found"),
                    ),
                    "url": f"https://clinicaltrials.gov/study/{id}",
                    "metadata": {
                        "nct_id": id,
                        "error": protocol_data.get("error"),
                    },
                }

            # Build comprehensive text description
            text_parts = []

            # Extract protocol section data from the API response
            protocol_section = protocol_data.get("protocolSection", {})

            # Extract basic info from the protocol section
            id_module = protocol_section.get("identificationModule", {})
            status_module = protocol_section.get("statusModule", {})
            desc_module = protocol_section.get("descriptionModule", {})
            conditions_module = protocol_section.get("conditionsModule", {})
            design_module = protocol_section.get("designModule", {})
            arms_module = protocol_section.get("armsInterventionsModule", {})

            # Add basic protocol info to text
            title = id_module.get("briefTitle", f"Clinical Trial {id}")
            text_parts.append(f"Study Title: {title}")

            # Conditions
            conditions = conditions_module.get("conditions", [])
            if conditions:
                text_parts.append(f"\nConditions: {', '.join(conditions)}")

            # Interventions
            interventions = []
            for intervention in arms_module.get("interventions", []):
                interventions.append(intervention.get("name", ""))
            if interventions:
                text_parts.append(f"Interventions: {', '.join(interventions)}")

            # Phase
            phases = design_module.get("phases", [])
            if phases:
                text_parts.append(f"Phase: {', '.join(phases)}")

            # Status
            overall_status = status_module.get("overallStatus", "N/A")
            text_parts.append(f"Status: {overall_status}")

            # Summary
            brief_summary = desc_module.get(
                "briefSummary", "No summary available"
            )
            text_parts.append(f"\nSummary: {brief_summary}")

            # Prepare metadata
            metadata = {"nct_id": id, "protocol": protocol_data}

            if detail in ("all", "locations", "outcomes", "references"):
                # Fetch additional sections as needed
                if detail == "all" or detail == "locations":
                    try:
                        locations_json = await trial_getter.get_trial(
                            nct_id=id,
                            module=trial_getter.Module.LOCATIONS,
                            output_json=True,
                        )
                        locations_data = json.loads(locations_json)
                        if "error" not in locations_data:
                            # Extract locations from the protocol section
                            locations_module = locations_data.get(
                                "protocolSection", {}
                            ).get("contactsLocationsModule", {})
                            locations_list = locations_module.get(
                                "locations", []
                            )
                            metadata["locations"] = locations_list
                            if locations_list:
                                text_parts.append(
                                    f"\n\nLocations: {len(locations_list)} study sites"
                                )
                    except Exception as e:
                        logger.warning(
                            f"Failed to fetch locations for {id}: {e}"
                        )
                        metadata["locations"] = []

                if detail == "all" or detail == "outcomes":
                    try:
                        outcomes_json = await trial_getter.get_trial(
                            nct_id=id,
                            module=trial_getter.Module.OUTCOMES,
                            output_json=True,
                        )
                        outcomes_data = json.loads(outcomes_json)
                        if "error" not in outcomes_data:
                            # Extract outcomes from the protocol section
                            outcomes_module = outcomes_data.get(
                                "protocolSection", {}
                            ).get("outcomesModule", {})
                            primary_outcomes = outcomes_module.get(
                                "primaryOutcomes", []
                            )
                            secondary_outcomes = outcomes_module.get(
                                "secondaryOutcomes", []
                            )
                            metadata["outcomes"] = {
                                "primary_outcomes": primary_outcomes,
                                "secondary_outcomes": secondary_outcomes,
                            }
                            if primary_outcomes:
                                text_parts.append(
                                    f"\n\nPrimary Outcomes: {len(primary_outcomes)} measures"
                                )
                    except Exception as e:
                        logger.warning(
                            f"Failed to fetch outcomes for {id}: {e}"
                        )
                        metadata["outcomes"] = {}

                if detail == "all" or detail == "references":
                    try:
                        references_json = await trial_getter.get_trial(
                            nct_id=id,
                            module=trial_getter.Module.REFERENCES,
                            output_json=True,
                        )
                        references_data = json.loads(references_json)
                        if "error" not in references_data:
                            # Extract references from the protocol section
                            references_module = references_data.get(
                                "protocolSection", {}
                            ).get("referencesModule", {})
                            references_list = references_module.get(
                                "references", []
                            )
                            metadata["references"] = references_list
                            if references_list:
                                text_parts.append(
                                    f"\n\nReferences: {len(references_list)} publications"
                                )
                    except Exception as e:
                        logger.warning(
                            f"Failed to fetch references for {id}: {e}"
                        )
                        metadata["references"] = []

            # Return OpenAI MCP compliant format
            return {
                "id": id,
                "title": title,
                "text": "\n".join(text_parts),
                "url": f"https://clinicaltrials.gov/study/{id}",
                "metadata": metadata,
            }

        except Exception as e:
            logger.error(f"Trial fetch failed: {e}")
            raise SearchExecutionError("trial", e) from e

    elif domain == "variant":
        logger.debug("Fetching variant details")
        try:
            from biomcp.variants.getter import get_variant

            result_str = await get_variant(
                variant_id=id,
                output_json=True,
                include_external=True,
            )
        except Exception as e:
            logger.error(f"Variant fetch failed: {e}")
            raise SearchExecutionError("variant", e) from e

        try:
            variant_response = (
                json.loads(result_str)
                if isinstance(result_str, str)
                else result_str
            )
        except (json.JSONDecodeError, TypeError) as e:
            logger.error(f"Failed to parse variant fetch results: {e}")
            raise ResultParsingError("variant", e) from e

        # get_variant returns a list, extract the first variant
        if isinstance(variant_response, list) and variant_response:
            variant_data = variant_response[0]
        elif isinstance(variant_response, dict):
            variant_data = variant_response
        else:
            return {"error": "Variant not found"}

        # Build comprehensive text description
        text_parts = []

        # Basic variant info
        text_parts.append(f"Variant: {variant_data.get('_id', id)}")

        # Gene information
        if variant_data.get("gene"):
            gene_info = variant_data["gene"]
            text_parts.append(
                f"\nGene: {gene_info.get('symbol', 'Unknown')} ({gene_info.get('name', '')})"
            )

        # Clinical significance
        if variant_data.get("clinvar"):
            clinvar = variant_data["clinvar"]
            if clinvar.get("clinical_significance"):
                text_parts.append(
                    f"\nClinical Significance: {clinvar['clinical_significance']}"
                )
            if clinvar.get("review_status"):
                text_parts.append(f"Review Status: {clinvar['review_status']}")

        # dbSNP info
        if variant_data.get("dbsnp"):
            dbsnp = variant_data["dbsnp"]
            if dbsnp.get("rsid"):
                text_parts.append(f"\ndbSNP: {dbsnp['rsid']}")

        # CADD scores
        if variant_data.get("cadd"):
            cadd = variant_data["cadd"]
            if cadd.get("phred"):
                text_parts.append(f"\nCADD Score: {cadd['phred']}")

        # Allele frequencies
        if variant_data.get("gnomad_exome"):
            gnomad = variant_data["gnomad_exome"]
            if gnomad.get("af", {}).get("af"):
                text_parts.append(
                    f"\nGnomAD Allele Frequency: {gnomad['af']['af']:.6f}"
                )

        # External links
        if variant_data.get("external_links"):
            links = variant_data["external_links"]
            text_parts.append(
                f"\n\nExternal Resources: {len(links)} database links available"
            )

        # Check for external data indicators
        if variant_data.get("tcga"):
            text_parts.append("\n\nTCGA Data: Available")
        if variant_data.get("1000genomes"):
            text_parts.append("\n1000 Genomes Data: Available")

        # Determine best URL
        url = variant_data.get("url", "")
        if not url and variant_data.get("dbsnp", {}).get("rsid"):
            url = f"https://www.ncbi.nlm.nih.gov/snp/{variant_data['dbsnp']['rsid']}"
        elif not url:
            url = f"https://myvariant.info/v1/variant/{id}"

        # Return OpenAI MCP compliant format
        return {
            "id": variant_data.get("_id", id),
            "title": f"Variant {variant_data.get('_id', id)}",
            "text": "\n".join(text_parts),
            "url": url,
            "metadata": variant_data,
        }

    elif domain == "gene":
        logger.debug("Fetching gene details")
        try:
            client = BioThingsClient()
            gene_info = await client.get_gene_info(id)

            if not gene_info:
                return {"error": f"Gene {id} not found"}

            # Build comprehensive text description
            text_parts = []
            text_parts.append(f"Gene: {gene_info.symbol} ({gene_info.name})")

            if gene_info.entrezgene:
                text_parts.append(f"\nEntrez ID: {gene_info.entrezgene}")

            if gene_info.type_of_gene:
                text_parts.append(f"Type: {gene_info.type_of_gene}")

            if gene_info.summary:
                text_parts.append(f"\nSummary: {gene_info.summary}")

            if gene_info.alias:
                text_parts.append(f"\nAliases: {', '.join(gene_info.alias)}")

            # URL
            url = (
                f"https://www.genenames.org/data/gene-symbol-report/#!/symbol/{gene_info.symbol}"
                if gene_info.symbol
                else ""
            )

            # Return OpenAI MCP compliant format
            return {
                "id": str(gene_info.gene_id),
                "title": f"{gene_info.symbol}: {gene_info.name}"
                if gene_info.symbol and gene_info.name
                else gene_info.symbol or gene_info.name or DEFAULT_TITLE,
                "text": "\n".join(text_parts),
                "url": url,
                "metadata": gene_info.model_dump(),
            }

        except Exception as e:
            logger.error(f"Gene fetch failed: {e}")
            raise SearchExecutionError("gene", e) from e

    elif domain == "drug":
        logger.debug("Fetching drug details")
        try:
            client = BioThingsClient()
            drug_info = await client.get_drug_info(id)

            if not drug_info:
                return {"error": f"Drug {id} not found"}

            # Build comprehensive text description
            text_parts = []
            text_parts.append(f"Drug: {drug_info.name}")

            if drug_info.drugbank_id:
                text_parts.append(f"\nDrugBank ID: {drug_info.drugbank_id}")

            if drug_info.formula:
                text_parts.append(f"Formula: {drug_info.formula}")

            if drug_info.tradename:
                text_parts.append(
                    f"\nTrade Names: {', '.join(drug_info.tradename)}"
                )

            if drug_info.description:
                text_parts.append(f"\nDescription: {drug_info.description}")

            if drug_info.indication:
                text_parts.append(f"\nIndication: {drug_info.indication}")

            if drug_info.mechanism_of_action:
                text_parts.append(
                    f"\nMechanism of Action: {drug_info.mechanism_of_action}"
                )

            # URL
            url = ""
            if drug_info.drugbank_id:
                url = f"https://www.drugbank.ca/drugs/{drug_info.drugbank_id}"
            elif drug_info.pubchem_cid:
                url = f"https://pubchem.ncbi.nlm.nih.gov/compound/{drug_info.pubchem_cid}"

            # Return OpenAI MCP compliant format
            return {
                "id": drug_info.drug_id,
                "title": drug_info.name or drug_info.drug_id or DEFAULT_TITLE,
                "text": "\n".join(text_parts),
                "url": url,
                "metadata": drug_info.model_dump(),
            }

        except Exception as e:
            logger.error(f"Drug fetch failed: {e}")
            raise SearchExecutionError("drug", e) from e

    elif domain == "disease":
        logger.debug("Fetching disease details")
        try:
            client = BioThingsClient()
            disease_info = await client.get_disease_info(id)

            if not disease_info:
                return {"error": f"Disease {id} not found"}

            # Build comprehensive text description
            text_parts = []
            text_parts.append(f"Disease: {disease_info.name}")

            if disease_info.mondo and isinstance(disease_info.mondo, dict):
                mondo_id = disease_info.mondo.get("id")
                if mondo_id:
                    text_parts.append(f"\nMONDO ID: {mondo_id}")

            if disease_info.definition:
                text_parts.append(f"\nDefinition: {disease_info.definition}")

            if disease_info.synonyms:
                text_parts.append(
                    f"\nSynonyms: {', '.join(disease_info.synonyms[:5])}"
                )
                if len(disease_info.synonyms) > 5:
                    text_parts.append(
                        f"  ... and {len(disease_info.synonyms) - 5} more"
                    )

            if disease_info.phenotypes:
                text_parts.append(
                    f"\nAssociated Phenotypes: {len(disease_info.phenotypes)}"
                )

            # URL
            url = ""
            if disease_info.mondo and isinstance(disease_info.mondo, dict):
                mondo_id = disease_info.mondo.get("id")
                if mondo_id:
                    url = f"https://monarchinitiative.org/disease/{mondo_id}"

            # Return OpenAI MCP compliant format
            return {
                "id": disease_info.disease_id,
                "title": disease_info.name
                or disease_info.disease_id
                or DEFAULT_TITLE,
                "text": "\n".join(text_parts),
                "url": url,
                "metadata": disease_info.model_dump(),
            }

        except Exception as e:
            logger.error(f"Disease fetch failed: {e}")
            raise SearchExecutionError("disease", e) from e

    elif domain == "nci_organization":
        logger.debug("Fetching NCI organization details")
        try:
            from biomcp.organizations import get_organization
            from biomcp.organizations.getter import format_organization_details

            org_data = await get_organization(
                org_id=id,
                api_key=api_key,
            )

            # Format the details
            formatted_text = format_organization_details(org_data)

            # Return OpenAI MCP compliant format
            return {
                "id": id,
                "title": org_data.get("name", "Unknown Organization"),
                "text": formatted_text,
                "url": "",  # NCI doesn't provide direct URLs
                "metadata": org_data,
            }

        except Exception as e:
            logger.error(f"NCI organization fetch failed: {e}")
            raise SearchExecutionError("nci_organization", e) from e

    elif domain == "nci_intervention":
        logger.debug("Fetching NCI intervention details")
        try:
            from biomcp.interventions import get_intervention
            from biomcp.interventions.getter import format_intervention_details

            intervention_data = await get_intervention(
                intervention_id=id,
                api_key=api_key,
            )

            # Format the details
            formatted_text = format_intervention_details(intervention_data)

            # Return OpenAI MCP compliant format
            return {
                "id": id,
                "title": intervention_data.get("name", "Unknown Intervention"),
                "text": formatted_text,
                "url": "",  # NCI doesn't provide direct URLs
                "metadata": intervention_data,
            }

        except Exception as e:
            logger.error(f"NCI intervention fetch failed: {e}")
            raise SearchExecutionError("nci_intervention", e) from e

    elif domain == "nci_disease":
        logger.debug("Fetching NCI disease details")
        try:
            from biomcp.diseases import get_disease_by_id

            disease_data = await get_disease_by_id(
                disease_id=id,
                api_key=api_key,
            )

            # Build text description
            text_parts = []
            text_parts.append(
                f"Disease: {disease_data.get('name', 'Unknown Disease')}"
            )

            if disease_data.get("category"):
                text_parts.append(f"\nCategory: {disease_data['category']}")

            if disease_data.get("synonyms"):
                synonyms = disease_data["synonyms"]
                if isinstance(synonyms, list) and synonyms:
                    text_parts.append(f"\nSynonyms: {', '.join(synonyms[:5])}")
                    if len(synonyms) > 5:
                        text_parts.append(
                            f"  ... and {len(synonyms) - 5} more"
                        )

            if disease_data.get("codes"):
                codes = disease_data["codes"]
                if isinstance(codes, dict):
                    code_items = [
                        f"{system}: {code}" for system, code in codes.items()
                    ]
                    if code_items:
                        text_parts.append(f"\nCodes: {', '.join(code_items)}")

            # Return OpenAI MCP compliant format
            return {
                "id": id,
                "title": disease_data.get(
                    "name",
                    disease_data.get("preferred_name", "Unknown Disease"),
                ),
                "text": "\n".join(text_parts),
                "url": "",  # NCI doesn't provide direct URLs
                "metadata": disease_data,
            }

        except Exception as e:
            logger.error(f"NCI disease fetch failed: {e}")
            raise SearchExecutionError("nci_disease", e) from e

    # Note: nci_biomarker doesn't support fetching by ID, only searching

    # OpenFDA domains
    elif domain == "fda_adverse":
        from biomcp.openfda import get_adverse_event

        result = await get_adverse_event(id, api_key=api_key)
        return {
            "title": f"FDA Adverse Event Report {id}",
            "text": result,
            "url": "",
            "metadata": {"report_id": id, "domain": "fda_adverse"},
        }

    elif domain == "fda_label":
        from biomcp.openfda import get_drug_label

        result = await get_drug_label(id, api_key=api_key)
        return {
            "title": f"FDA Drug Label {id}",
            "text": result,
            "url": "",
            "metadata": {"set_id": id, "domain": "fda_label"},
        }

    elif domain == "fda_device":
        from biomcp.openfda import get_device_event

        result = await get_device_event(id, api_key=api_key)
        return {
            "title": f"FDA Device Event {id}",
            "text": result,
            "url": "",
            "metadata": {"mdr_report_key": id, "domain": "fda_device"},
        }

    elif domain == "fda_approval":
        from biomcp.openfda import get_drug_approval

        result = await get_drug_approval(id, api_key=api_key)
        return {
            "title": f"FDA Drug Approval {id}",
            "text": result,
            "url": "",
            "metadata": {"application_number": id, "domain": "fda_approval"},
        }

    elif domain == "fda_recall":
        from biomcp.openfda import get_drug_recall

        result = await get_drug_recall(id, api_key=api_key)
        return {
            "title": f"FDA Drug Recall {id}",
            "text": result,
            "url": "",
            "metadata": {"recall_number": id, "domain": "fda_recall"},
        }

    elif domain == "fda_shortage":
        from biomcp.openfda import get_drug_shortage

        result = await get_drug_shortage(id, api_key=api_key)
        return {
            "title": f"FDA Drug Shortage - {id}",
            "text": result,
            "url": "",
            "metadata": {"drug": id, "domain": "fda_shortage"},
        }

    # Invalid domain
    raise InvalidDomainError(domain, VALID_DOMAINS)


# Internal function for unified search
async def _unified_search(  # noqa: C901
    query: str,
    max_results_per_domain: int = MAX_RESULTS_PER_DOMAIN_DEFAULT,
    domains: list[str] | None = None,
    explain_query: bool = False,
) -> dict:
    """Internal unified search implementation.

    Parses the unified query language and routes to appropriate domain tools.
    Supports field-based syntax like 'gene:BRAF AND trials.phase:3'.

    Args:
        query: Unified query string with field syntax
        max_results_per_domain: Limit results per domain
        domains: Optional list to filter which domains to search
        explain_query: If True, return query parsing explanation

    Returns:
        Dictionary with results organized by domain

    Raises:
        QueryParsingError: If query cannot be parsed
        SearchExecutionError: If search execution fails
    """
    logger.info(f"Unified search with query: {query}")
    # Parse the query
    try:
        parser = QueryParser()
        parsed = parser.parse(query)
    except Exception as e:
        logger.error(f"Failed to parse query: {e}")
        raise QueryParsingError(query, e) from e

    # Route to appropriate tools
    router = QueryRouter()
    plan = router.route(parsed)

    # Filter domains if specified
    if domains:
        filtered_tools = []
        for tool in plan.tools_to_call:
            if (
                ("article" in tool and "articles" in domains)
                or ("trial" in tool and "trials" in domains)
                or ("variant" in tool and "variants" in domains)
            ):
                filtered_tools.append(tool)
        plan.tools_to_call = filtered_tools

    # Return explanation if requested
    if explain_query:
        return {
            "original_query": query,
            "parsed_structure": {
                "cross_domain_fields": parsed.cross_domain_fields,
                "domain_specific_fields": parsed.domain_specific_fields,
                "terms": [
                    {
                        "field": term.field,
                        "operator": term.operator.value,
                        "value": term.value,
                        "domain": term.domain,
                    }
                    for term in parsed.terms
                ],
            },
            "routing_plan": {
                "tools_to_call": plan.tools_to_call,
                "field_mappings": plan.field_mappings,
            },
            "schema": parser.get_schema(),
        }

    # Execute the search plan
    try:
        results = await execute_routing_plan(plan, output_json=True)
    except Exception as e:
        logger.error(f"Failed to execute search plan: {e}")
        raise SearchExecutionError("unified", e) from e

    # Format unified results - collect all results into a single array
    all_results = []

    for domain, result_str in results.items():
        if isinstance(result_str, dict) and "error" in result_str:
            logger.warning(f"Error in domain {domain}: {result_str['error']}")
            continue

        try:
            data = (
                json.loads(result_str)
                if isinstance(result_str, str)
                else result_str
            )

            # Get the appropriate handler for formatting
            handler_class = get_domain_handler(
                domain.rstrip("s")
            )  # Remove trailing 's'

            # Process and format each result
            # Handle both list format and dict format (for articles with cBioPortal data)
            items_to_process = []
            cbioportal_summary = None

            if isinstance(data, list):
                items_to_process = data[:max_results_per_domain]
            elif isinstance(data, dict):
                # Handle unified search format with cBioPortal data
                if "articles" in data:
                    items_to_process = data["articles"][
                        :max_results_per_domain
                    ]
                    cbioportal_summary = data.get("cbioportal_summary")
                else:
                    # Single item dict
                    items_to_process = [data]

            # Add cBioPortal summary as first result if available
            if cbioportal_summary and domain == "articles":
                try:
                    # Extract gene name from parsed query or summary
                    gene_name = parsed.cross_domain_fields.get("gene", "")
                    if not gene_name and "Summary for " in cbioportal_summary:
                        # Try to extract from summary title
                        import re

                        match = re.search(
                            r"Summary for (\w+)", cbioportal_summary
                        )
                        if match:
                            gene_name = match.group(1)

                    cbio_result = {
                        "id": f"cbioportal_summary_{gene_name or 'gene'}",
                        "title": f"cBioPortal Summary for {gene_name or 'Gene'}",
                        "text": cbioportal_summary[:5000],  # Limit text length
                        "url": f"https://www.cbioportal.org/results?gene_list={gene_name}"
                        if gene_name
                        else "",
                    }
                    all_results.append(cbio_result)
                except Exception as e:
                    logger.warning(f"Failed to format cBioPortal summary: {e}")

            for item in items_to_process:
                try:
                    formatted_result = handler_class.format_result(item)
                    # Ensure OpenAI MCP format
                    openai_result = {
                        "id": formatted_result.get("id", ""),
                        "title": formatted_result.get("title", DEFAULT_TITLE),
                        "text": formatted_result.get(
                            "snippet", formatted_result.get("text", "")
                        ),
                        "url": formatted_result.get("url", ""),
                    }
                    # Note: For unified search, we can optionally include domain in metadata
                    # This helps distinguish between result types
                    all_results.append(openai_result)
                except Exception as e:
                    logger.warning(
                        f"Failed to format result in domain {domain}: {e}"
                    )
                    continue

        except (json.JSONDecodeError, TypeError, ValueError) as e:
            logger.warning(f"Failed to parse results for domain {domain}: {e}")
            continue

    logger.info(
        f"Unified search completed with {len(all_results)} total results"
    )

    # Return OpenAI MCP compliant format
    return {"results": all_results}

```
Page 12/15FirstPrevNextLast