File size: 7,109 Bytes
08e2c16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
"""
BasicAgent - Simple fallback agent with LLM integration and rule-based answers.

This agent provides basic question answering capabilities using LLM API calls
with fallback to rule-based responses when API access is unavailable.
"""

import os
import requests
import time
from typing import Optional

from config import (
    LLAMA_API_URL, HF_API_TOKEN, HEADERS, MAX_RETRIES, RETRY_DELAY
)
from utils.text_processing import clean_llm_response, extract_final_answer


class BasicAgent:
    """
    Simple agent with LLM integration and rule-based fallbacks.
    
    Features:
    - Direct LLM API integration
    - Response cleaning and answer extraction
    - Rule-based fallback answers
    - Simple prompt formatting
    """
    
    def __init__(self):
        print("BasicAgent initialized.")
        # Set up LLM API access
        self.hf_api_url = LLAMA_API_URL
        self.headers = HEADERS
        
        # Set up caching for responses
        self.cache = {}
    
    def query_llm(self, prompt: str) -> str:
        """Send a prompt to the LLM API and return the response."""
        # Check cache first
        if prompt in self.cache:
            print("Using cached response")
            return self.cache[prompt]
            
        if not HF_API_TOKEN:
            # Fallback to rule-based approach if no API token
            return self.rule_based_answer(prompt)
            
        payload = {
            "inputs": prompt,
            "parameters": {
                "max_new_tokens": 300,
                "temperature": 0.5,
                "top_p": 0.8,
                "do_sample": True
            }
        }
        
        for attempt in range(MAX_RETRIES):
            try:
                response = requests.post(
                    self.hf_api_url, 
                    headers=self.headers, 
                    json=payload, 
                    timeout=30
                )
                response.raise_for_status()
                result = response.json()
                
                # Extract the generated text from the response
                if isinstance(result, list) and len(result) > 0:
                    generated_text = result[0].get("generated_text", "")
                    # Clean up the response to get just the answer
                    clean_response = self.clean_response(generated_text, prompt)
                    # Cache the response
                    self.cache[prompt] = clean_response
                    return clean_response
                return "I couldn't generate a proper response."
                
            except Exception as e:
                print(f"Attempt {attempt+1}/{MAX_RETRIES} failed: {str(e)}")
                if attempt < MAX_RETRIES - 1:
                    time.sleep(RETRY_DELAY)
                else:
                    # Fall back to rule-based method on failure
                    return self.rule_based_answer(prompt)
    
    def clean_response(self, response: str, prompt: str) -> str:
        """Clean up the LLM response to extract the answer."""
        return clean_llm_response(response, prompt)
    
    def rule_based_answer(self, question: str) -> str:
        """Fallback method using rule-based answers for common question types."""
        question_lower = question.lower()
        
        # Simple pattern matching for common question types
        if "what is" in question_lower or "define" in question_lower:
            if "agent" in question_lower:
                return "An agent is an autonomous entity that observes and acts upon an environment using sensors and actuators, usually to achieve specific goals."
            if "gaia" in question_lower:
                return "GAIA (General AI Assistant) is a framework for creating and evaluating AI assistants that can perform a wide range of tasks."
            if "llm" in question_lower or "large language model" in question_lower:
                return "A Large Language Model (LLM) is a neural network trained on vast amounts of text data to understand and generate human language."
            if "rag" in question_lower or "retrieval" in question_lower:
                return "RAG (Retrieval-Augmented Generation) combines retrieval of relevant information with generation capabilities of language models."
        
        if "how to" in question_lower:
            return "To accomplish this task, you should first understand the requirements, then implement a solution step by step, and finally test your implementation."
        
        if "example" in question_lower:
            return "Here's an example implementation that demonstrates the concept in a practical manner."
        
        if "evaluate" in question_lower or "criteria" in question_lower:
            return "Evaluation criteria for agents typically include accuracy, relevance, factual correctness, conciseness, ability to follow instructions, and transparency in reasoning."
        
        # More specific fallback answers
        if "tools" in question_lower:
            return "Tools for AI agents include web search, content extraction, API connections, and various knowledge retrieval mechanisms."
        if "chain" in question_lower:
            return "Chain-of-thought reasoning allows AI agents to break down complex problems into sequential steps, improving accuracy and transparency."
        if "purpose" in question_lower or "goal" in question_lower:
            return "The purpose of AI agents is to assist users by answering questions, performing tasks, and providing helpful information while maintaining ethical standards."
        
        # Default response for truly unmatched questions
        return "This question relates to AI agent capabilities. To provide a more precise answer, I would need additional information or context about the specific aspect of AI agents you're interested in."
        
    def format_prompt(self, question: str) -> str:
        """Format the question into a proper prompt for the LLM."""
        return f"""You are an intelligent AI assistant. Please answer the following question accurately and concisely:

Question: {question}

Answer:"""
    
    def __call__(self, question: str) -> str:
        """Main execution method for the BasicAgent."""
        print(f"BasicAgent received question: {question}...")
        
        try:
            # Format the question as a prompt
            prompt = self.format_prompt(question)
            
            # Query the LLM
            answer = self.query_llm(prompt)
            
            # Extract final answer
            clean_answer = extract_final_answer(answer)
            
            print(f"BasicAgent returning answer: {clean_answer}...")
            return clean_answer
            
        except Exception as e:
            print(f"Error in BasicAgent: {e}")
            # Fallback to the rule-based method if anything goes wrong
            fallback_answer = self.rule_based_answer(question)
            print(f"BasicAgent returning fallback answer: {fallback_answer}...")
            return fallback_answer