Spaces:
Runtime error
Runtime error
| """ | |
| Advanced Agentic System Interface | |
| ------------------------------- | |
| Provides an interface to interact with the autonomous agent system | |
| using local LLM for improved performance. | |
| """ | |
| import gradio as gr | |
| import asyncio | |
| from typing import Dict, Any, List | |
| import json | |
| from datetime import datetime | |
| import logging | |
| from agentic_system import AgenticSystem | |
| from team_management import TeamManager | |
| from orchestrator import AgentOrchestrator | |
| from reasoning.unified_engine import UnifiedReasoningEngine | |
| # Configure logging | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| class AgentInterface: | |
| """Interface for the agentic system.""" | |
| def __init__(self): | |
| """Initialize the interface components.""" | |
| self.orchestrator = AgentOrchestrator() | |
| self.reasoning_engine = UnifiedReasoningEngine( | |
| min_confidence=0.7, | |
| parallel_threshold=3, | |
| learning_rate=0.1 | |
| ) | |
| async def process_query(self, message: str) -> str: | |
| """Process user query through the reasoning system.""" | |
| try: | |
| # Prepare context | |
| context = { | |
| 'timestamp': datetime.now().isoformat(), | |
| 'objective': 'Provide helpful and accurate responses', | |
| 'mode': 'analytical' | |
| } | |
| # Get response from reasoning engine | |
| result = await self.reasoning_engine.reason( | |
| query=message, | |
| context=context | |
| ) | |
| if result.success: | |
| return result.answer | |
| else: | |
| return f"Error: Unable to process query. Please try again." | |
| except Exception as e: | |
| logger.error(f"Error processing query: {e}") | |
| return f"Error: {str(e)}" | |
| # Initialize interface | |
| interface = AgentInterface() | |
| # Create Gradio interface | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown(""" | |
| # AI Reasoning System | |
| This system uses advanced reasoning strategies including local LLM for improved performance. | |
| Note: First query might take a few seconds as the model loads. | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=4): | |
| input_text = gr.Textbox( | |
| label="Your question", | |
| placeholder="Ask me anything...", | |
| lines=2 | |
| ) | |
| output_text = gr.Textbox( | |
| label="Response", | |
| lines=10, | |
| interactive=False | |
| ) | |
| submit_btn = gr.Button("Ask") | |
| clear_btn = gr.Button("Clear") | |
| with gr.Column(scale=1): | |
| gr.Markdown(""" | |
| ### Example Questions: | |
| - What are the implications of artificial intelligence on society? | |
| - How does climate change affect global ecosystems? | |
| - What are the philosophical implications of quantum mechanics? | |
| """) | |
| # Set up event handlers | |
| submit_btn.click( | |
| fn=interface.process_query, | |
| inputs=input_text, | |
| outputs=output_text | |
| ) | |
| clear_btn.click( | |
| lambda: ("", ""), | |
| inputs=None, | |
| outputs=[input_text, output_text] | |
| ) | |
| # Launch the interface | |
| if __name__ == "__main__": | |
| demo.launch() | |