MathFrenchToast commited on
Commit
5675d05
·
1 Parent(s): 7acb2e7

feat: reach 35% so pass the test with the xagent

Browse files

set a stronger model for the manager
add a iage vision tool (not yet tested)

Files changed (9) hide show
  1. README.md +17 -1
  2. app.py +10 -4
  3. multiagents.py +49 -17
  4. requirements.txt +3 -1
  5. tools/fetch.py +7 -15
  6. tools/image.py +40 -0
  7. tools/mylogger.py +41 -0
  8. tools/stt.py +68 -0
  9. vllm_test.py +4 -0
README.md CHANGED
@@ -33,7 +33,23 @@ docker run --rm `
33
  -v "${PWD}/etcsearxng:/etc/searxng" `
34
  -e "BASE_URL=http://localhost:$PORT/" `
35
  -e "INSTANCE_NAME=my-instance" `
36
- searxng/searxng
37
 
38
  be sure to allow the json format in /etc/seraxng/settings.yml
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  -v "${PWD}/etcsearxng:/etc/searxng" `
34
  -e "BASE_URL=http://localhost:$PORT/" `
35
  -e "INSTANCE_NAME=my-instance" `
36
+ -d searxng/searxng
37
 
38
  be sure to allow the json format in /etc/seraxng/settings.yml
39
 
40
+
41
+ For the same reason, for my tool for text to speech I locally run a speech to text docker image running whisper.cpp
42
+ whith ffmpeg installed for mp3 > wav conversion
43
+
44
+ And if I really have no longer any available token on openai or gemini, I can run a VLLM instance.
45
+
46
+ ## Instrumentation
47
+
48
+ Instrumentation is enabled,
49
+ an done locally with (Arize-ai phoenix)[https://github.com/Arize-ai/phoenix]
50
+ a server is launched with:
51
+
52
+ python -m phoenix.server.main serve
53
+
54
+ and can be consulted on: http://127.0.0.1:6006
55
+
app.py CHANGED
@@ -8,6 +8,12 @@ from dotenv import load_dotenv
8
  from myagent import BasicAgent # Import your agent class from myagent.py
9
  from multiagents import MultiAgent
10
 
 
 
 
 
 
 
11
  # (Keep Constants as is)
12
  # --- Constants ---
13
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
@@ -80,16 +86,16 @@ def run_and_submit_all(nb_questions: int, profile: gr.OAuthProfile | None):
80
  task_id = item.get("task_id")
81
  question_text = item.get("question")
82
  file_name = item.get("file_name")
83
- file_url = None
84
  if file_name:
85
- file_url = f"{file_url}/{task_id}"
86
  if not task_id or question_text is None:
87
  print(f"Skipping item with missing task_id or question: {item}")
88
  continue
89
  try:
90
  agent_question = question_text
91
- if file_url:
92
- agent_question += f"\n\nFile URL: {file_url}"
93
 
94
  submitted_answer = agent(agent_question)
95
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
 
8
  from myagent import BasicAgent # Import your agent class from myagent.py
9
  from multiagents import MultiAgent
10
 
11
+ from phoenix.otel import register
12
+ from openinference.instrumentation.smolagents import SmolagentsInstrumentor
13
+
14
+ register()
15
+ SmolagentsInstrumentor().instrument()
16
+
17
  # (Keep Constants as is)
18
  # --- Constants ---
19
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
86
  task_id = item.get("task_id")
87
  question_text = item.get("question")
88
  file_name = item.get("file_name")
89
+ file_question_url = None
90
  if file_name:
91
+ file_question_url = f"{file_url}/{task_id}"
92
  if not task_id or question_text is None:
93
  print(f"Skipping item with missing task_id or question: {item}")
94
  continue
95
  try:
96
  agent_question = question_text
97
+ if file_question_url:
98
+ agent_question += f"\n\nFile URL: {file_question_url}"
99
 
100
  submitted_answer = agent(agent_question)
101
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
multiagents.py CHANGED
@@ -1,11 +1,15 @@
1
 
 
2
  import os
3
  import dotenv
4
  from smolagents import CodeAgent, ToolCallingAgent
5
  from smolagents import OpenAIServerModel
6
  from tools.fetch import fetch_webpage, search_web
7
- from smolagents import VisitWebpageTool, GoogleSearchTool, SpeechToTextTool
8
  from tools.yttranscript import get_youtube_transcript, get_youtube_title_description
 
 
 
9
  import myprompts
10
 
11
  dotenv.load_dotenv()
@@ -29,14 +33,23 @@ openai_41nano_model = OpenAIServerModel(
29
  api_key=os.environ["OPENAI_API_KEY"],
30
  )
31
 
 
 
 
 
 
 
32
 
33
- def chek_final_answer(final_answer, agent_memory) -> bool:
34
  """
35
  Check if the final answer is correct.
36
  This is a placeholder function. You can implement your own logic here.
37
  """
38
- # For demonstration, we assume the answer is always correct
39
- return True
 
 
 
40
 
41
 
42
  web_agent = CodeAgent(
@@ -46,8 +59,9 @@ web_agent = CodeAgent(
46
  fetch_webpage,
47
  ],
48
  name="web_agent",
49
- description="Browses the web to find information",
50
- verbosity_level=1,
 
51
  max_steps=7,
52
  )
53
 
@@ -56,10 +70,12 @@ audiovideo_agent = CodeAgent(
56
  tools=[
57
  get_youtube_transcript,
58
  get_youtube_title_description,
59
- SpeechToTextTool()
 
60
  ],
61
  name="audiovideo_agent",
62
- description="Extracts information from video or audio files",
 
63
  verbosity_level=1,
64
  max_steps=7,
65
  )
@@ -67,13 +83,13 @@ audiovideo_agent = CodeAgent(
67
 
68
 
69
  manager_agent = CodeAgent(
70
- model=openai_41nano_model,
71
- tools=[],
72
  managed_agents=[web_agent, audiovideo_agent],
73
  additional_authorized_imports=["pandas", "numpy","bs4"],
74
  planning_interval=5,
75
  verbosity_level=2,
76
- final_answer_checks=[chek_final_answer],
77
  max_steps=15,
78
  name="manager_agent",
79
  description="A manager agent that coordinates the work of other agents to answer questions.",
@@ -84,13 +100,19 @@ class MultiAgent:
84
  print("BasicAgent initialized.")
85
 
86
  def __call__(self, question: str) -> str:
87
- try:
88
- # log agent call in file
89
- with open("logs/agent_calls.log", "a") as log_file:
90
- log_file.write(f"Agent called with question: {question}\n")
91
 
92
  try:
93
- question = question + '\n' + myprompts.output_format
 
 
 
 
 
 
 
 
 
94
  fixed_answer = ""
95
 
96
  fixed_answer = manager_agent.run(question)
@@ -104,7 +126,17 @@ class MultiAgent:
104
 
105
  if __name__ == "__main__":
106
  # Example usage
 
107
  question = "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia."
 
 
 
 
 
 
 
 
108
  agent = MultiAgent()
109
  answer = agent(question)
110
- print(f"Answer: {answer}")
 
 
1
 
2
+ # a multi agent proposal to solve HF agent course final assignment
3
  import os
4
  import dotenv
5
  from smolagents import CodeAgent, ToolCallingAgent
6
  from smolagents import OpenAIServerModel
7
  from tools.fetch import fetch_webpage, search_web
8
+ from smolagents import PythonInterpreterTool
9
  from tools.yttranscript import get_youtube_transcript, get_youtube_title_description
10
+ from tools.stt import stt
11
+ from tools.image import analyze_image
12
+ from tools.mylogger import save_file_with_timestamp, mylog
13
  import myprompts
14
 
15
  dotenv.load_dotenv()
 
33
  api_key=os.environ["OPENAI_API_KEY"],
34
  )
35
 
36
+ openai_41mini_model = OpenAIServerModel(
37
+ model_id="gpt-4.1-mini",
38
+ api_base="https://api.openai.com/v1",
39
+ api_key=os.environ["OPENAI_API_KEY"],
40
+ )
41
+
42
 
43
+ def check_final_answer(final_answer, agent_memory) -> bool:
44
  """
45
  Check if the final answer is correct.
46
  This is a placeholder function. You can implement your own logic here.
47
  """
48
+ # if return answer is more than 200 characters, we will assume it is not correct
49
+ if len(str(final_answer)) > 200:
50
+ return False
51
+ else:
52
+ return True
53
 
54
 
55
  web_agent = CodeAgent(
 
59
  fetch_webpage,
60
  ],
61
  name="web_agent",
62
+ description="Use search engine to find webpages related to a subject and get the page content",
63
+ additional_authorized_imports=["pandas", "numpy","bs4"],
64
+ verbosity_level=1,
65
  max_steps=7,
66
  )
67
 
 
70
  tools=[
71
  get_youtube_transcript,
72
  get_youtube_title_description,
73
+ stt,
74
+ analyze_image
75
  ],
76
  name="audiovideo_agent",
77
+ description="Extracts information from image, video or audio files from the web",
78
+ additional_authorized_imports=["pandas", "numpy","bs4", "requests"],
79
  verbosity_level=1,
80
  max_steps=7,
81
  )
 
83
 
84
 
85
  manager_agent = CodeAgent(
86
+ model=openai_41mini_model,
87
+ tools=[ PythonInterpreterTool()],
88
  managed_agents=[web_agent, audiovideo_agent],
89
  additional_authorized_imports=["pandas", "numpy","bs4"],
90
  planning_interval=5,
91
  verbosity_level=2,
92
+ final_answer_checks=[check_final_answer],
93
  max_steps=15,
94
  name="manager_agent",
95
  description="A manager agent that coordinates the work of other agents to answer questions.",
 
100
  print("BasicAgent initialized.")
101
 
102
  def __call__(self, question: str) -> str:
103
+ mylog(self.__class__.__name__, question)
 
 
 
104
 
105
  try:
106
+ prefix = """You are the top agent of a multi-agent system that can answer questions by coordinating the work of other agents.
107
+ You will receive a question and you will decide which agent to use to answer it.
108
+ You can use the web_agent to search the web for information and for fetching the content of a web page, or the audiovideo_agent to extract information from video or audio files.
109
+ You can also use your own knowledge to answer the question.
110
+ You need to respect the output format that is given to you.
111
+ Finding the correct answer to the question need reasoning and plannig, read the question carrefully, think step by step and do not skip any steps.
112
+ """
113
+
114
+ question = prefix + "\nTHE QUESTION:\n" + question + '\n' + myprompts.output_format
115
+
116
  fixed_answer = ""
117
 
118
  fixed_answer = manager_agent.run(question)
 
126
 
127
  if __name__ == "__main__":
128
  # Example usage
129
+ """"
130
  question = "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia."
131
+ """
132
+ question = """
133
+ Hi, I was out sick from my classes on Friday, so I'm trying to figure out what I need to study for my Calculus mid-term next week. My friend from class sent me an audio recording of Professor Willowbrook giving out the recommended reading for the test, but my headphones are broken :(
134
+
135
+ Could you please listen to the recording for me and tell me the page numbers I'm supposed to go over? I've attached a file called Homework.mp3 that has the recording. Please provide just the page numbers as a comma-delimited list. And please provide the list in ascending order.
136
+
137
+ File URL: https://agents-course-unit4-scoring.hf.space/files/1f975693-876d-457b-a649-393859e79bf3
138
+ """
139
  agent = MultiAgent()
140
  answer = agent(question)
141
+ print(f"Answer: {answer}")
142
+
requirements.txt CHANGED
@@ -8,4 +8,6 @@ smolagents[openai]
8
  markdownify
9
  beautifulsoup4
10
  transformers
11
- smolagents[transformers]
 
 
 
8
  markdownify
9
  beautifulsoup4
10
  transformers
11
+ smolagents[transformers]
12
+ smolagents[audio]
13
+ smolagents[telemetry,toolkit]
tools/fetch.py CHANGED
@@ -2,8 +2,8 @@
2
  from smolagents import tool
3
  import requests
4
  from markdownify import markdownify as md
5
- import time
6
  from bs4 import BeautifulSoup
 
7
 
8
  @tool
9
  def fetch_webpage(url: str, convert_to_markdown: bool = True) -> str:
@@ -16,7 +16,8 @@ def fetch_webpage(url: str, convert_to_markdown: bool = True) -> str:
16
  Returns:
17
  str: The HTML content of the URL.
18
  """
19
- response = requests.get(url)
 
20
  if (convert_to_markdown):
21
  soup = BeautifulSoup(response.text, "html.parser")
22
  # remove script and style tags
@@ -27,23 +28,14 @@ def fetch_webpage(url: str, convert_to_markdown: bool = True) -> str:
27
  if "wikipedia.org" in url:
28
  main_content = soup.find("main",{"id":"content"})
29
  if main_content:
30
- content = md(str(main_content),strip=['script', 'style']).strip()
31
  else:
32
- content = md(response.text,strip=['script', 'style']).strip()
33
  else:
34
  content = response.text
35
 
36
- try:
37
- # save content to a file in test folder before returning
38
- # compute filepath with correct extension based on convert_to_markdown and add a timestamp for unicity
39
- file_extension = ".md" if convert_to_markdown else ".html"
40
- unicity_suffix = str(int(time.time()))
41
- file_name = f"test/fetched_content_{unicity_suffix}{file_extension}"
42
- with open(file_name, "w", encoding="utf-8") as f:
43
- f.write(content)
44
- except Exception as e:
45
- print(f"Error saving content to file: {e}")
46
-
47
  return content
48
 
49
  @tool
 
2
  from smolagents import tool
3
  import requests
4
  from markdownify import markdownify as md
 
5
  from bs4 import BeautifulSoup
6
+ from tools.mylogger import save_file_with_timestamp, mylog
7
 
8
  @tool
9
  def fetch_webpage(url: str, convert_to_markdown: bool = True) -> str:
 
16
  Returns:
17
  str: The HTML content of the URL.
18
  """
19
+ content = None
20
+ response = requests.get(url, timeout=30)
21
  if (convert_to_markdown):
22
  soup = BeautifulSoup(response.text, "html.parser")
23
  # remove script and style tags
 
28
  if "wikipedia.org" in url:
29
  main_content = soup.find("main",{"id":"content"})
30
  if main_content:
31
+ content = md(str(main_content),strip=['script', 'style'], heading_style="ATX").strip()
32
  else:
33
+ content = md(response.text,strip=['script', 'style'], heading_style="ATX").strip()
34
  else:
35
  content = response.text
36
 
37
+ save_file_with_timestamp(content, "webpage", ".md" if convert_to_markdown else ".html")
38
+
 
 
 
 
 
 
 
 
 
39
  return content
40
 
41
  @tool
tools/image.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # image analyzer using open ai model
2
+
3
+ from smolagents import tool
4
+ from openai import OpenAI
5
+ import dotenv
6
+ dotenv.load_dotenv()
7
+
8
+ @tool
9
+ def analyze_image(question: str, image_url: str) -> str:
10
+ """
11
+ Analyze an image using OpenAI's API.
12
+ Args:
13
+ question (str): The question to ask about the image. eg. "What is in this image?"
14
+ image_url (str): The URL of the image to analyze.
15
+ """
16
+ client = OpenAI()
17
+
18
+ response = client.responses.create(
19
+ model="gpt-4o-mini",
20
+ input=[
21
+ {
22
+ "role": "user",
23
+ "content": [
24
+ { "type": "input_text", "text": f"{question}" },
25
+ {
26
+ "type": "input_image",
27
+ "image_url": f"{image_url}",
28
+ }
29
+ ]
30
+ }
31
+ ]
32
+ )
33
+
34
+ return response
35
+
36
+ if __name__ == "__main__":
37
+ question = "What is the main subject of this image?"
38
+ image_url = "https://agents-course-unit4-scoring.hf.space/files/cca530fc-4052-43b2-b130-b30968d8aa44"
39
+ answer = analyze_image(question, image_url)
40
+ print(f"Answer: {answer}")
tools/mylogger.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ def save_file_with_timestamp(content: str, file_name: str, extension: str) -> str:
4
+ """
5
+ Save content to a file with a timestamp.
6
+ Args:
7
+ content (str): The content to save.
8
+ file_name (str): The base name of the file.
9
+ Returns:
10
+ str: The path to the saved file.
11
+ """
12
+ try:
13
+ # save content to a file in test folder before returning
14
+ # compute filepath with correct extension based on convert_to_markdown and add a timestamp for unicity
15
+
16
+ unicity_suffix = str(int(time.time()))
17
+
18
+ file_path = f"test/{file_name}_{unicity_suffix}.{extension}"
19
+ with open(file_name, "w", encoding="utf-8") as f:
20
+ f.write(content)
21
+ except Exception as e:
22
+ print(f"Error saving content to file: {e}")
23
+ return file_name
24
+
25
+
26
+ def mylog(agent_name: str, message: str, depth: int = 0) -> None:
27
+ """
28
+ Log a message with indentation based on the depth.
29
+ Args:
30
+ agent_name (str): The name of the agent.
31
+ message (str): The message to log.
32
+ depth (int): The depth of the log message.
33
+ """
34
+ indent = " " * (depth * 4)
35
+ try:
36
+ # log agent call in file
37
+ with open("logs/agent_calls.log", "a") as log_file:
38
+ log_file.write(f"{indent}{agent_name}: {message}\n")
39
+ except Exception as e:
40
+ print(f"Error logging agent call: {e}")
41
+
tools/stt.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+ import shutil
4
+ import subprocess
5
+ import requests
6
+ import uuid
7
+ from smolagents import tool
8
+
9
+ @tool
10
+ def stt(file_url: str, language: str = "en-US") -> str:
11
+ """
12
+ Convert speech to text using local whisper model.
13
+ This function downloads an audio file from a given URL, converts it to WAV format if necessary,
14
+ then use whisper model to transcribe the audio to text.
15
+
16
+ Args:
17
+ file_url (str): The URL of the audio file to transcribe.
18
+ language (str): The language code for the transcription. Default is "en-US".
19
+
20
+ Returns:
21
+ str: The transcribed text.
22
+ """
23
+
24
+ file_name = uuid.uuid4().hex +".mp3"
25
+
26
+ dest_folder = "c:\\Users\\mathi\\dev\\stt"
27
+ file_path = os.path.join(dest_folder + "\\tmp", file_name)
28
+ # 1. download the file from url (in pure python without wget or curl)
29
+ if not os.path.exists(file_name):
30
+ response = requests.get(file_url)
31
+ if response.status_code == 200:
32
+ with open(file_path, "wb") as f:
33
+ f.write(response.content)
34
+ else:
35
+ raise Exception(f"Error downloading file: {response.status_code}")
36
+
37
+ # 2. if it is a mp3 convert to wav with ffmpeg exec
38
+ if file_name.endswith(".mp3"):
39
+ cmd = f"ffmpeg -i {file_path} -ac 1 -ar 16000 -c:a pcm_s16le {file_path[:-4]}.wav"
40
+ cmd_as_list = cmd.split()
41
+ subprocess.run(cmd_as_list, cwd=dest_folder, check=True)
42
+ file_path = file_path[:-4] + ".wav"
43
+ file_name = file_name[:-4] + ".wav"
44
+
45
+ # 3. copy file to data folder
46
+ shutil.copy2(file_path, os.path.join(dest_folder, "testdata/"))
47
+
48
+
49
+ # 4. call docker run command
50
+ docker_command = f"""
51
+ docker run
52
+ -v {dest_folder}/models:/app/models
53
+ -v {dest_folder}/testdata:/app/testdata
54
+ ghcr.io/appleboy/go-whisper:latest
55
+ --model /app/models/ggml-small.bin
56
+ --audio-path /app/testdata/{file_name}
57
+ """
58
+
59
+ subprocess.run(docker_command.split(), cwd=dest_folder, check=True)
60
+ # 5. cat the output file an return it
61
+ output_filepath = os.path.join(dest_folder, "testdata", f"{file_name[:-4]}.txt")
62
+ with open(output_filepath, "r") as f:
63
+ text = f.read()
64
+ return text
65
+
66
+ if __name__ == "__main__":
67
+ transcript = stt("https://agents-course-unit4-scoring.hf.space/files/99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3", )
68
+ print(transcript)
vllm_test.py CHANGED
@@ -1,4 +1,8 @@
1
  from openai import OpenAI
 
 
 
 
2
  client = OpenAI(
3
  base_url="http://192.168.1.39:18000/v1",
4
  api_key="token-abc123",
 
1
  from openai import OpenAI
2
+
3
+ # test file in cas I need to run entirely locally
4
+ # this test comptaibility with open ai
5
+
6
  client = OpenAI(
7
  base_url="http://192.168.1.39:18000/v1",
8
  api_key="token-abc123",