import asyncio | |
async def query_llm_agent(name: str, prompt: str, settings: dict) -> str: | |
# Placeholder for real LLM API calls (OpenAI, Claude, etc.) | |
await asyncio.sleep(0.5) # Simulate network latency | |
return f"[{name}] thinks: '{prompt[::-1]}'" # Reverse input for mock | |
async def query_all_llms(prompt: str, settings: dict) -> list: | |
agents = ["LLM-A", "LLM-B", "LLM-C"] | |
tasks = [query_llm_agent(agent, prompt, settings) for agent in agents] | |
results = await asyncio.gather(*tasks) | |
return results | |
# Wrapper for sync Flask usage | |
def query_all_llms_sync(prompt: str, settings: dict) -> list: | |
return asyncio.run(query_all_llms(prompt, settings)) | |