riokorb commited on
Commit
8ac926e
·
verified ·
1 Parent(s): 550a2ea

Revert app.py to original BasicAgent design for regression testing

Browse files
Files changed (1) hide show
  1. app.py +3 -92
app.py CHANGED
@@ -23,102 +23,13 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
23
 
24
  # --- Basic Agent Definition ---
25
  class BasicAgent:
26
- """A LlamaIndex-based agent."""
27
  def __init__(self):
28
  print("BasicAgent initialized.")
29
- # Initialize the core components
30
- self.llm = self._initialize_llm()
31
-
32
- # Import get_tools from agent.py here to avoid circular imports
33
- from agent import get_tools
34
- self.tools = get_tools()
35
-
36
- self.memory = ChatMemoryBuffer.from_defaults(token_limit=3900)
37
- # Build the agent
38
- self.agent = self._build_agent()
39
- print("Agent setup complete.")
40
-
41
- def _initialize_llm(self) -> LLM:
42
- """Initialize the LLM based on configuration."""
43
- provider = os.getenv("DEFAULT_LLM_PROVIDER", "gemini").lower()
44
-
45
- if provider == "gemini":
46
- api_key = os.getenv("GOOGLE_API_KEY")
47
- if not api_key:
48
- raise ValueError("GOOGLE_API_KEY not found in environment variables")
49
-
50
- return Gemini(
51
- model_name="models/gemini-1.5-flash",
52
- api_key=api_key,
53
- temperature=0.1,
54
- top_p=0.95,
55
- max_tokens=1024,
56
- )
57
-
58
- elif provider == "huggingface":
59
- api_key = os.getenv("HUGGINGFACE_API_KEY")
60
- if not api_key:
61
- raise ValueError("HUGGINGFACE_API_KEY not found in environment variables")
62
-
63
- return HuggingFaceInferenceAPI(
64
- model_name="Qwen/Qwen2.5-Coder-32B-Instruct",
65
- api_key=api_key,
66
- temperature=0.1,
67
- max_tokens=1024,
68
- )
69
- else:
70
- raise ValueError(f"Unsupported LLM provider: {provider}")
71
-
72
- def _build_agent(self) -> ReActAgent:
73
- """Build and return the agent."""
74
- # Load system prompt from file and append output format requirements
75
- try:
76
- with open("system_prompt.txt", "r", encoding="utf-8") as f:
77
- system_prompt = f.read()
78
- # Append output format to system prompt
79
- system_prompt = f"{system_prompt}\n\nIMPORTANT OUTPUT FORMAT:\n{OUTPUT_FORMAT}"
80
- except Exception as e:
81
- print(f"Error loading system prompt: {e}")
82
- system_prompt = f"You are an intelligent agent designed to answer a wide variety of questions.\n\nIMPORTANT OUTPUT FORMAT:\n{OUTPUT_FORMAT}"
83
-
84
- return ReActAgent.from_tools(
85
- tools=self.tools,
86
- llm=self.llm,
87
- memory=self.memory,
88
- system_prompt=system_prompt,
89
- verbose=True,
90
- )
91
-
92
  def __call__(self, question: str) -> str:
93
  print(f"Agent received question (first 50 chars): {question[:50]}...")
94
- try:
95
- # Process the question
96
- response = self.agent.query(question)
97
- answer_text = str(response)
98
-
99
- # Extract the FINAL ANSWER part if it exists
100
- if "FINAL ANSWER:" in answer_text:
101
- reasoning_trace = answer_text.split("FINAL ANSWER:")[0].strip()
102
- model_answer = answer_text.split("FINAL ANSWER:")[1].strip()
103
-
104
- # Include the reasoning trace in the response but formatted for JSON
105
- result = {
106
- "model_answer": model_answer,
107
- "reasoning_trace": reasoning_trace
108
- }
109
-
110
- # Return just the answer part for direct evaluation
111
- print(f"Agent generated answer: {model_answer[:50]}..." if len(model_answer) > 50 else f"Agent generated answer: {model_answer}")
112
- return json.dumps(result)
113
- else:
114
- # If no FINAL ANSWER pattern, return the whole response
115
- print(f"No 'FINAL ANSWER' found in response. Returning full response.")
116
- return json.dumps({"model_answer": answer_text, "reasoning_trace": ""})
117
-
118
- except Exception as e:
119
- print(f"Error generating answer: {e}")
120
- error_msg = f"I encountered an error while answering your question: {str(e)}"
121
- return json.dumps({"model_answer": error_msg, "reasoning_trace": ""})
122
 
123
  def run_and_submit_all(profile: gr.OAuthProfile | None):
124
  """
 
23
 
24
  # --- Basic Agent Definition ---
25
  class BasicAgent:
 
26
  def __init__(self):
27
  print("BasicAgent initialized.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  def __call__(self, question: str) -> str:
29
  print(f"Agent received question (first 50 chars): {question[:50]}...")
30
+ fixed_answer = "This is a default answer."
31
+ print(f"Agent returning fixed answer: {fixed_answer}")
32
+ return fixed_answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  def run_and_submit_all(profile: gr.OAuthProfile | None):
35
  """