File size: 678 Bytes
718aa48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
import asyncio

async def query_llm_agent(name: str, prompt: str, settings: dict) -> str:
    # Placeholder for real LLM API calls (OpenAI, Claude, etc.)
    await asyncio.sleep(0.5)  # Simulate network latency
    return f"[{name}] thinks: '{prompt[::-1]}'"  # Reverse input for mock

async def query_all_llms(prompt: str, settings: dict) -> list:
    agents = ["LLM-A", "LLM-B", "LLM-C"]
    tasks = [query_llm_agent(agent, prompt, settings) for agent in agents]
    results = await asyncio.gather(*tasks)
    return results

# Wrapper for sync Flask usage
def query_all_llms_sync(prompt: str, settings: dict) -> list:
    return asyncio.run(query_all_llms(prompt, settings))