Update mcp/orchestrator.py
Browse files- mcp/orchestrator.py +84 -135
mcp/orchestrator.py
CHANGED
@@ -1,150 +1,99 @@
|
|
1 |
-
|
|
|
|
|
2 |
"""
|
3 |
-
mcp/orchestrator.py β MedGenesis v5
|
4 |
-
βββββββββββββββββββββββββββββββββββ
|
5 |
-
Asynchronously fan-outs across >10 open biomedical APIs, then returns
|
6 |
-
one consolidated dictionary for the Streamlit UI.
|
7 |
-
|
8 |
-
Public-keyβfree by default:
|
9 |
-
β’ MyGene.info, Ensembl REST, Open Targets GraphQL
|
10 |
-
β’ PubMed (E-utils), arXiv
|
11 |
-
β’ UMLS, openFDA, DisGeNET
|
12 |
-
β’ Expression Atlas, ClinicalTrials.gov (+ WHO ICTRP fallback)
|
13 |
-
β’ cBioPortal, DrugCentral, PubChem
|
14 |
-
|
15 |
-
If you add secrets **MYGENE_KEY**, **OT_KEY**, **CBIO_KEY** or
|
16 |
-
**NCBI_EUTILS_KEY**, they are auto-detected and used β otherwise the code
|
17 |
-
runs key-less.
|
18 |
-
|
19 |
-
Returned payload keys
|
20 |
-
βββββββββββββββββββββ
|
21 |
-
papers, ai_summary, llm_used, umls, drug_safety,
|
22 |
-
genes_rich, expr_atlas, drug_meta, chem_info,
|
23 |
-
gene_disease, clinical_trials, cbio_variants
|
24 |
-
"""
|
25 |
-
|
26 |
-
from __future__ import annotations
|
27 |
import asyncio
|
28 |
from typing import Dict, Any, List
|
29 |
|
30 |
-
|
31 |
-
from mcp.
|
32 |
-
from mcp.
|
33 |
-
|
34 |
-
|
35 |
-
from mcp.
|
36 |
-
from mcp.
|
37 |
-
from mcp.
|
38 |
-
|
39 |
-
from mcp.
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
from mcp.pubchem_ext import fetch_compound
|
49 |
-
|
50 |
-
# ββ Large-language model helpers ββββββββββββββββββββββββββββββββββββ
|
51 |
-
from mcp.openai_utils import ai_summarize, ai_qa
|
52 |
-
from mcp.gemini import gemini_summarize, gemini_qa
|
53 |
-
|
54 |
-
_LLM_DEFAULT = "openai"
|
55 |
-
|
56 |
-
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
57 |
-
# LLM router
|
58 |
-
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
59 |
-
def _llm_router(name: str):
|
60 |
-
"""Return (summarise_fn, qa_fn, engine_tag)."""
|
61 |
-
if name.lower() == "gemini":
|
62 |
return gemini_summarize, gemini_qa, "gemini"
|
63 |
return ai_summarize, ai_qa, "openai"
|
64 |
|
65 |
|
66 |
-
#
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
)
|
101 |
|
102 |
-
#
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
# 4 Other single-shot APIs --------------------------------------
|
111 |
-
gene_dis = await disease_to_genes(query)
|
112 |
-
trials = await search_trials(query, max_studies=20)
|
113 |
-
|
114 |
-
# Cancer variants for first 3 gene symbols (quota safety)
|
115 |
-
cbio_jobs = [fetch_cbio(g.get("symbol", "")) for g in genes[:3]]
|
116 |
-
cbio_vars = []
|
117 |
-
if cbio_jobs:
|
118 |
-
tmp = await asyncio.gather(*cbio_jobs, return_exceptions=True)
|
119 |
-
cbio_vars = [v for v in tmp if v]
|
120 |
-
|
121 |
-
# 5 AI summary ---------------------------------------------------
|
122 |
-
summarise, _, engine_tag = _llm_router(llm)
|
123 |
-
ai_summary = await summarise(corpus) if corpus else ""
|
124 |
-
|
125 |
-
# 6 Return payload ----------------------------------------------
|
126 |
return {
|
127 |
-
"papers"
|
128 |
-
"
|
129 |
-
"
|
130 |
-
"
|
131 |
-
"
|
132 |
-
"
|
133 |
-
"
|
134 |
-
"
|
135 |
-
"chem_info" : chems,
|
136 |
-
"gene_disease" : gene_dis,
|
137 |
-
"clinical_trials" : trials,
|
138 |
-
"cbio_variants" : cbio_vars,
|
139 |
}
|
140 |
|
141 |
|
142 |
-
|
143 |
-
|
144 |
-
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
145 |
-
async def answer_ai_question(question: str, *,
|
146 |
-
context: str,
|
147 |
-
llm: str = _LLM_DEFAULT) -> Dict[str, str]:
|
148 |
-
"""Return {"answer": str} using chosen LLM."""
|
149 |
_, qa_fn, _ = _llm_router(llm)
|
150 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
# ββββββββββββββββββββββββ mcp/orchestrator.py βββββββββββββββββββββββββ
|
2 |
+
"""DualβLLM orchestrator coordinating literature β annotation β trials.
|
3 |
+
Adds gene/variant enrichment with MyGene.info β Ensembl β OpenTargets β cBio.
|
4 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
import asyncio
|
6 |
from typing import Dict, Any, List
|
7 |
|
8 |
+
from mcp.arxiv import fetch_arxiv
|
9 |
+
from mcp.pubmed import fetch_pubmed
|
10 |
+
from mcp.nlp import extract_keywords
|
11 |
+
from mcp.umls import lookup_umls
|
12 |
+
from mcp.openfda import fetch_drug_safety
|
13 |
+
from mcp.clinicaltrials import search_trials
|
14 |
+
from mcp.gene_hub import resolve_gene # MyGeneβEnsemblβOT
|
15 |
+
from mcp.cbio import fetch_cbio_variants
|
16 |
+
|
17 |
+
from mcp.openai_utils import ai_summarize, ai_qa
|
18 |
+
from mcp.gemini import gemini_summarize, gemini_qa
|
19 |
+
|
20 |
+
_DEF = "openai"
|
21 |
+
|
22 |
+
# ------------ light LLM router ------------
|
23 |
+
|
24 |
+
def _llm_router(llm: str):
|
25 |
+
if llm.lower() == "gemini":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
return gemini_summarize, gemini_qa, "gemini"
|
27 |
return ai_summarize, ai_qa, "openai"
|
28 |
|
29 |
|
30 |
+
# ---------------- gene / variant enrichment --------------------------
|
31 |
+
|
32 |
+
async def _enrich_gene_block(keywords: List[str]) -> Dict[str, Any]:
|
33 |
+
out: List[Dict] = []
|
34 |
+
variants: Dict[str, List[Dict]] = {}
|
35 |
+
for kw in keywords:
|
36 |
+
g = await resolve_gene(kw)
|
37 |
+
if g:
|
38 |
+
out.append(g)
|
39 |
+
# fetch tumour variants β fire & forget (errors ignored)
|
40 |
+
try:
|
41 |
+
variants[kw] = await fetch_cbio_variants(kw)
|
42 |
+
except Exception:
|
43 |
+
variants[kw] = []
|
44 |
+
return {"genes": out, "variants": variants}
|
45 |
+
|
46 |
+
|
47 |
+
# ---------------- orchestrator entryβpoints --------------------------
|
48 |
+
|
49 |
+
async def orchestrate_search(query: str, llm: str = _DEF) -> Dict[str, Any]:
|
50 |
+
"""Run search, summarise and join annotations for the UI."""
|
51 |
+
# literature ------------------------------------------------------
|
52 |
+
arxiv_task = asyncio.create_task(fetch_arxiv(query, max_results=20))
|
53 |
+
pubmed_task = asyncio.create_task(fetch_pubmed(query, max_results=20))
|
54 |
+
papers = sum(await asyncio.gather(arxiv_task, pubmed_task), [])
|
55 |
+
|
56 |
+
# NLP keyword extraction -----------------------------------------
|
57 |
+
blob = " ".join(p.get("summary", "") for p in papers)[:60_000]
|
58 |
+
keywords = extract_keywords(blob)[:12]
|
59 |
+
|
60 |
+
# enrichment (in parallel) ---------------------------------------
|
61 |
+
umls_f = [lookup_umls(k) for k in keywords]
|
62 |
+
fda_f = [fetch_drug_safety(k) for k in keywords]
|
63 |
+
gene_block = asyncio.create_task(_enrich_gene_block(keywords))
|
64 |
+
trials_task = asyncio.create_task(search_trials(query, max_studies=20))
|
65 |
+
|
66 |
+
umls, fda, gene_data, trials = await asyncio.gather(
|
67 |
+
asyncio.gather(*umls_f, return_exceptions=True),
|
68 |
+
asyncio.gather(*fda_f, return_exceptions=True),
|
69 |
+
gene_block,
|
70 |
+
trials_task,
|
71 |
)
|
72 |
|
73 |
+
# summarise via LLM ----------------------------------------------
|
74 |
+
summarise, _, engine_name = _llm_router(llm)
|
75 |
+
try:
|
76 |
+
summary = await summarise(blob)
|
77 |
+
except Exception:
|
78 |
+
summary = "LLM summarisation unavailable." # graceful fallback
|
79 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
return {
|
81 |
+
"papers": papers,
|
82 |
+
"umls": umls,
|
83 |
+
"drug_safety": fda,
|
84 |
+
"genes": gene_data["genes"],
|
85 |
+
"variants": gene_data["variants"],
|
86 |
+
"clinical_trials": trials,
|
87 |
+
"ai_summary": summary,
|
88 |
+
"llm_used": engine_name,
|
|
|
|
|
|
|
|
|
89 |
}
|
90 |
|
91 |
|
92 |
+
async def answer_ai_question(question: str, *, context: str, llm: str = _DEF) -> Dict[str, str]:
|
93 |
+
"""Followβup Q&A via selected LLM."""
|
|
|
|
|
|
|
|
|
|
|
94 |
_, qa_fn, _ = _llm_router(llm)
|
95 |
+
try:
|
96 |
+
answer = await qa_fn(question, context)
|
97 |
+
except Exception:
|
98 |
+
answer = "LLM unavailable or quota exceeded."
|
99 |
+
return {"answer": answer}
|