Spaces:
Runtime error
Runtime error
Commit
·
26e469b
1
Parent(s):
5675d05
secu: use non gaia test question
Browse files- multiagents.py +2 -8
- myagent.py +2 -2
- vllm_asopenai_test.py +2 -0
multiagents.py
CHANGED
@@ -126,15 +126,9 @@ class MultiAgent:
|
|
126 |
|
127 |
if __name__ == "__main__":
|
128 |
# Example usage
|
129 |
-
|
130 |
-
question = "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia."
|
131 |
-
"""
|
132 |
question = """
|
133 |
-
|
134 |
-
|
135 |
-
Could you please listen to the recording for me and tell me the page numbers I'm supposed to go over? I've attached a file called Homework.mp3 that has the recording. Please provide just the page numbers as a comma-delimited list. And please provide the list in ascending order.
|
136 |
-
|
137 |
-
File URL: https://agents-course-unit4-scoring.hf.space/files/1f975693-876d-457b-a649-393859e79bf3
|
138 |
"""
|
139 |
agent = MultiAgent()
|
140 |
answer = agent(question)
|
|
|
126 |
|
127 |
if __name__ == "__main__":
|
128 |
# Example usage
|
129 |
+
|
|
|
|
|
130 |
question = """
|
131 |
+
What was the actual enrollment of the Malko competition in 2023?
|
|
|
|
|
|
|
|
|
132 |
"""
|
133 |
agent = MultiAgent()
|
134 |
answer = agent(question)
|
myagent.py
CHANGED
@@ -29,7 +29,7 @@ class BasicAgent:
|
|
29 |
elif reviewer_answer == "model":
|
30 |
# If the reviewer agent suggests using the model, we can proceed with the model agent
|
31 |
print("Using model agent to answer the question.")
|
32 |
-
fixed_answer = model_agent.run(model_prompt + "\nThe question is:\n" + question)
|
33 |
print(f"Model agent answer: {fixed_answer}")
|
34 |
|
35 |
return fixed_answer
|
@@ -53,7 +53,7 @@ gaia_agent = CodeAgent(tools=[fetch_webpage,get_youtube_title_description,get_yo
|
|
53 |
|
54 |
if __name__ == "__main__":
|
55 |
# Example usage
|
56 |
-
question = "
|
57 |
agent = BasicAgent()
|
58 |
answer = agent(question)
|
59 |
print(f"Answer: {answer}")
|
|
|
29 |
elif reviewer_answer == "model":
|
30 |
# If the reviewer agent suggests using the model, we can proceed with the model agent
|
31 |
print("Using model agent to answer the question.")
|
32 |
+
fixed_answer = model_agent.run(myprompts.model_prompt + "\nThe question is:\n" + question)
|
33 |
print(f"Model agent answer: {fixed_answer}")
|
34 |
|
35 |
return fixed_answer
|
|
|
53 |
|
54 |
if __name__ == "__main__":
|
55 |
# Example usage
|
56 |
+
question = "What was the actual enrollment of the Malko competition in 2023?"
|
57 |
agent = BasicAgent()
|
58 |
answer = agent(question)
|
59 |
print(f"Answer: {answer}")
|
vllm_asopenai_test.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
import os
|
2 |
from smolagents import OpenAIServerModel, CodeAgent
|
3 |
|
|
|
|
|
4 |
model = OpenAIServerModel(
|
5 |
model_id="Qwen/Qwen2.5-1.5B-Instruct",
|
6 |
api_base="http://192.168.1.39:18000/v1",
|
|
|
1 |
import os
|
2 |
from smolagents import OpenAIServerModel, CodeAgent
|
3 |
|
4 |
+
# test file in cas I need to run entirely locally
|
5 |
+
|
6 |
model = OpenAIServerModel(
|
7 |
model_id="Qwen/Qwen2.5-1.5B-Instruct",
|
8 |
api_base="http://192.168.1.39:18000/v1",
|