Guillaume Fradet commited on
Commit
b59053d
·
1 Parent(s): 81917a3

add llama-index agent (without tools for now)

Browse files
Files changed (2) hide show
  1. app.py +38 -10
  2. requirements.txt +3 -1
app.py CHANGED
@@ -1,9 +1,13 @@
1
  import os
2
  import gradio as gr
3
  import requests
4
- import inspect
5
  import pandas as pd
6
 
 
 
 
 
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
@@ -19,7 +23,29 @@ class BasicAgent:
19
  print(f"Agent returning fixed answer: {fixed_answer}")
20
  return fixed_answer
21
 
22
- def run_and_submit_all( profile: gr.OAuthProfile | None):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  """
24
  Fetches all questions, runs the BasicAgent on them, submits all answers,
25
  and displays the results.
@@ -38,13 +64,14 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
38
  questions_url = f"{api_url}/questions"
39
  submit_url = f"{api_url}/submit"
40
 
41
- # 1. Instantiate Agent ( modify this part to create your agent)
42
  try:
43
- agent = BasicAgent()
 
44
  except Exception as e:
45
  print(f"Error instantiating agent: {e}")
46
  return f"Error initializing agent: {e}", None
47
- # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
48
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
49
  print(agent_code)
50
 
@@ -62,9 +89,9 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
62
  print(f"Error fetching questions: {e}")
63
  return f"Error fetching questions: {e}", None
64
  except requests.exceptions.JSONDecodeError as e:
65
- print(f"Error decoding JSON response from questions endpoint: {e}")
66
- print(f"Response text: {response.text[:500]}")
67
- return f"Error decoding server response for questions: {e}", None
68
  except Exception as e:
69
  print(f"An unexpected error occurred fetching questions: {e}")
70
  return f"An unexpected error occurred fetching questions: {e}", None
@@ -80,7 +107,8 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
80
  print(f"Skipping item with missing task_id or question: {item}")
81
  continue
82
  try:
83
- submitted_answer = agent(question_text)
 
84
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
86
  except Exception as e:
@@ -91,7 +119,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
91
  print("Agent did not produce any answers to submit.")
92
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
 
94
- # 4. Prepare Submission
95
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
96
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
97
  print(status_update)
 
1
  import os
2
  import gradio as gr
3
  import requests
 
4
  import pandas as pd
5
 
6
+ from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
7
+ from llama_index.core.agent.workflow import ReActAgent
8
+ from llama_index.core.workflow import Context
9
+ from llama_index.core.agent.workflow import AgentStream, ToolCallResult
10
+
11
  # (Keep Constants as is)
12
  # --- Constants ---
13
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
23
  print(f"Agent returning fixed answer: {fixed_answer}")
24
  return fixed_answer
25
 
26
+ class LLamaIndexAgent:
27
+ def __init__(self, model_name="Qwen/Qwen2.5-Coder-32B-Instruct"):
28
+ llm = HuggingFaceInferenceAPI(model_name=model_name)
29
+ self.agent = ReActAgent(tools=[], llm=llm)
30
+ self.ctx = Context(self.agent)
31
+ print(f"BasicAgent initialized with model \"{model_name}\"")
32
+
33
+ async def __call__(self, question: str) -> str:
34
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
35
+
36
+ handler = self.agent.run(question, ctx=self.ctx)
37
+ async for ev in handler.stream_events():
38
+ if isinstance(ev, ToolCallResult):
39
+ print(f"\nCall {ev.tool_name} with {ev.tool_kwargs}\nReturned: {ev.tool_output}")
40
+ if isinstance(ev, AgentStream):
41
+ print(f"{ev.delta}", end="", flush=True)
42
+
43
+ response = await handler
44
+ print("\n\n"+"-"*50)
45
+ print(f"Agent returning with answer: {response}")
46
+ return response
47
+
48
+ async def run_and_submit_all(profile: gr.OAuthProfile | None):
49
  """
50
  Fetches all questions, runs the BasicAgent on them, submits all answers,
51
  and displays the results.
 
64
  questions_url = f"{api_url}/questions"
65
  submit_url = f"{api_url}/submit"
66
 
67
+ # 1. Instantiate Agent (modify this part to create your agent)
68
  try:
69
+ # agent = BasicAgent()
70
+ agent = LLamaIndexAgent()
71
  except Exception as e:
72
  print(f"Error instantiating agent: {e}")
73
  return f"Error initializing agent: {e}", None
74
+ # In the case of an app running as a hugging Face space, this link points toward your codebase (useful for others so please keep it public)
75
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
76
  print(agent_code)
77
 
 
89
  print(f"Error fetching questions: {e}")
90
  return f"Error fetching questions: {e}", None
91
  except requests.exceptions.JSONDecodeError as e:
92
+ print(f"Error decoding JSON response from questions endpoint: {e}")
93
+ print(f"Response text: {response.text[:500]}")
94
+ return f"Error decoding server response for questions: {e}", None
95
  except Exception as e:
96
  print(f"An unexpected error occurred fetching questions: {e}")
97
  return f"An unexpected error occurred fetching questions: {e}", None
 
107
  print(f"Skipping item with missing task_id or question: {item}")
108
  continue
109
  try:
110
+ # submitted_answer = agent(question_text)
111
+ submitted_answer = await agent(question_text)
112
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
113
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
114
  except Exception as e:
 
119
  print("Agent did not produce any answers to submit.")
120
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
121
 
122
+ # 4. Prepare Submission
123
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
124
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
125
  print(status_update)
requirements.txt CHANGED
@@ -1,2 +1,4 @@
1
  gradio
2
- requests
 
 
 
1
  gradio
2
+ requests
3
+ llama-index
4
+ llama-index-llms-huggingface-api