Update mcp/orchestrator.py
Browse files- mcp/orchestrator.py +3 -3
mcp/orchestrator.py
CHANGED
@@ -21,7 +21,7 @@ from mcp.umls import lookup_umls
|
|
21 |
from mcp.openfda import fetch_drug_safety
|
22 |
from mcp.disgenet import disease_to_genes
|
23 |
from mcp.clinicaltrials import fetch_clinical_trials
|
24 |
-
from mcp.cbio import
|
25 |
from mcp.openai_utils import ai_summarize, ai_qa
|
26 |
from mcp.gemini import gemini_summarize, gemini_qa
|
27 |
|
@@ -117,7 +117,7 @@ async def orchestrate_search(query: str, llm: str = _DEFAULT_LLM) -> Dict[str, A
|
|
117 |
gene_task = asyncio.create_task(_gene_enrichment(seeds))
|
118 |
trials_t = asyncio.create_task(fetch_clinical_trials(query, max_studies=10))
|
119 |
cbio_t = asyncio.create_task(
|
120 |
-
|
121 |
)
|
122 |
|
123 |
umls_list, fda_list, gene_data, trials, variants = await asyncio.gather(
|
@@ -173,7 +173,7 @@ async def orchestrate_search(query: str, llm: str = _DEFAULT_LLM) -> Dict[str, A
|
|
173 |
|
174 |
async def answer_ai_question(question: str, context: str, llm: str = _DEFAULT_LLM) -> Dict[str, str]:
|
175 |
"""
|
176 |
-
Follow-up QA: uses the
|
177 |
"""
|
178 |
_, qa_fn, _ = _llm_router(llm)
|
179 |
prompt = f"Q: {question}\nContext: {context}\nA:"
|
|
|
21 |
from mcp.openfda import fetch_drug_safety
|
22 |
from mcp.disgenet import disease_to_genes
|
23 |
from mcp.clinicaltrials import fetch_clinical_trials
|
24 |
+
from mcp.cbio import fetch_cbio
|
25 |
from mcp.openai_utils import ai_summarize, ai_qa
|
26 |
from mcp.gemini import gemini_summarize, gemini_qa
|
27 |
|
|
|
117 |
gene_task = asyncio.create_task(_gene_enrichment(seeds))
|
118 |
trials_t = asyncio.create_task(fetch_clinical_trials(query, max_studies=10))
|
119 |
cbio_t = asyncio.create_task(
|
120 |
+
fetch_cbio(seeds[0]) if seeds else asyncio.sleep(0, result=[])
|
121 |
)
|
122 |
|
123 |
umls_list, fda_list, gene_data, trials, variants = await asyncio.gather(
|
|
|
173 |
|
174 |
async def answer_ai_question(question: str, context: str, llm: str = _DEFAULT_LLM) -> Dict[str, str]:
|
175 |
"""
|
176 |
+
Follow-up QA: uses the chosen LLM’s QA function.
|
177 |
"""
|
178 |
_, qa_fn, _ = _llm_router(llm)
|
179 |
prompt = f"Q: {question}\nContext: {context}\nA:"
|