Spaces:
Runtime error
Runtime error
Commit
·
7acb2e7
1
Parent(s):
81917a3
feata: my proposition
Browse filesneed some cleaning before publishing
- .gitignore +6 -0
- README.md +26 -2
- app.py +30 -16
- env.example +1 -0
- multiagents.py +110 -0
- myagent.py +59 -0
- myprompts.py +14 -0
- requirements.txt +10 -1
- tools/__init__.py +0 -0
- tools/fetch.py +108 -0
- tools/yttranscript.py +72 -0
- vllm_asopenai_test.py +15 -0
- vllm_test.py +14 -0
.gitignore
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.venv/
|
2 |
+
.env
|
3 |
+
|
4 |
+
__pycache__/
|
5 |
+
test/*
|
6 |
+
logs/*
|
README.md
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
---
|
2 |
-
title: Template Final Assignment
|
3 |
emoji: 🕵🏻♂️
|
4 |
colorFrom: indigo
|
5 |
colorTo: indigo
|
@@ -12,4 +12,28 @@ hf_oauth: true
|
|
12 |
hf_oauth_expiration_minutes: 480
|
13 |
---
|
14 |
|
15 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: Template Final Assignment - frenchtoast.dev
|
3 |
emoji: 🕵🏻♂️
|
4 |
colorFrom: indigo
|
5 |
colorTo: indigo
|
|
|
12 |
hf_oauth_expiration_minutes: 480
|
13 |
---
|
14 |
|
15 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
16 |
+
|
17 |
+
# HF agent course final assigment
|
18 |
+
|
19 |
+
## local running
|
20 |
+
|
21 |
+
Create a venv
|
22 |
+
Install requirements
|
23 |
+
create a .env file (see env.example)
|
24 |
+
start with `python app.py`
|
25 |
+
|
26 |
+
DuckDuckGo and GoogleSearch have too many rate limit,
|
27 |
+
This code assume that a local running instance of searxng is on http://localhost:8888
|
28 |
+
|
29 |
+
On windows run it with:
|
30 |
+
$PORT=8888
|
31 |
+
docker run --rm `
|
32 |
+
-p ${PORT}:8080 `
|
33 |
+
-v "${PWD}/etcsearxng:/etc/searxng" `
|
34 |
+
-e "BASE_URL=http://localhost:$PORT/" `
|
35 |
+
-e "INSTANCE_NAME=my-instance" `
|
36 |
+
searxng/searxng
|
37 |
+
|
38 |
+
be sure to allow the json format in /etc/seraxng/settings.yml
|
39 |
+
|
app.py
CHANGED
@@ -3,25 +3,22 @@ import gradio as gr
|
|
3 |
import requests
|
4 |
import inspect
|
5 |
import pandas as pd
|
|
|
|
|
|
|
|
|
6 |
|
7 |
# (Keep Constants as is)
|
8 |
# --- Constants ---
|
9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
|
12 |
-
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
13 |
-
class BasicAgent:
|
14 |
-
def __init__(self):
|
15 |
-
print("BasicAgent initialized.")
|
16 |
-
def __call__(self, question: str) -> str:
|
17 |
-
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
18 |
-
fixed_answer = "This is a default answer."
|
19 |
-
print(f"Agent returning fixed answer: {fixed_answer}")
|
20 |
-
return fixed_answer
|
21 |
-
|
22 |
-
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
23 |
"""
|
24 |
-
Fetches all questions, runs
|
25 |
and displays the results.
|
26 |
"""
|
27 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
@@ -36,11 +33,13 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
36 |
|
37 |
api_url = DEFAULT_API_URL
|
38 |
questions_url = f"{api_url}/questions"
|
|
|
39 |
submit_url = f"{api_url}/submit"
|
40 |
|
41 |
-
# 1. Instantiate Agent
|
42 |
try:
|
43 |
-
agent = BasicAgent()
|
|
|
44 |
except Exception as e:
|
45 |
print(f"Error instantiating agent: {e}")
|
46 |
return f"Error initializing agent: {e}", None
|
@@ -72,15 +71,27 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
72 |
# 3. Run your Agent
|
73 |
results_log = []
|
74 |
answers_payload = []
|
|
|
|
|
|
|
|
|
75 |
print(f"Running agent on {len(questions_data)} questions...")
|
76 |
for item in questions_data:
|
77 |
task_id = item.get("task_id")
|
78 |
question_text = item.get("question")
|
|
|
|
|
|
|
|
|
79 |
if not task_id or question_text is None:
|
80 |
print(f"Skipping item with missing task_id or question: {item}")
|
81 |
continue
|
82 |
try:
|
83 |
-
|
|
|
|
|
|
|
|
|
84 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
85 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
86 |
except Exception as e:
|
@@ -160,6 +171,8 @@ with gr.Blocks() as demo:
|
|
160 |
|
161 |
gr.LoginButton()
|
162 |
|
|
|
|
|
163 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
164 |
|
165 |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
@@ -168,6 +181,7 @@ with gr.Blocks() as demo:
|
|
168 |
|
169 |
run_button.click(
|
170 |
fn=run_and_submit_all,
|
|
|
171 |
outputs=[status_output, results_table]
|
172 |
)
|
173 |
|
|
|
3 |
import requests
|
4 |
import inspect
|
5 |
import pandas as pd
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
|
8 |
+
from myagent import BasicAgent # Import your agent class from myagent.py
|
9 |
+
from multiagents import MultiAgent
|
10 |
|
11 |
# (Keep Constants as is)
|
12 |
# --- Constants ---
|
13 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
14 |
+
load_dotenv()
|
15 |
+
|
16 |
+
max_questions = 20
|
17 |
+
|
18 |
|
19 |
+
def run_and_submit_all(nb_questions: int, profile: gr.OAuthProfile | None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
"""
|
21 |
+
Fetches all questions, runs my Agent on them, submits all answers,
|
22 |
and displays the results.
|
23 |
"""
|
24 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
|
|
33 |
|
34 |
api_url = DEFAULT_API_URL
|
35 |
questions_url = f"{api_url}/questions"
|
36 |
+
file_url = f"{api_url}/files"
|
37 |
submit_url = f"{api_url}/submit"
|
38 |
|
39 |
+
# 1. Instantiate Agent
|
40 |
try:
|
41 |
+
# agent = BasicAgent()
|
42 |
+
agent = MultiAgent()
|
43 |
except Exception as e:
|
44 |
print(f"Error instantiating agent: {e}")
|
45 |
return f"Error initializing agent: {e}", None
|
|
|
71 |
# 3. Run your Agent
|
72 |
results_log = []
|
73 |
answers_payload = []
|
74 |
+
|
75 |
+
# for testing keep only some questions
|
76 |
+
questions_data = questions_data[:nb_questions]
|
77 |
+
|
78 |
print(f"Running agent on {len(questions_data)} questions...")
|
79 |
for item in questions_data:
|
80 |
task_id = item.get("task_id")
|
81 |
question_text = item.get("question")
|
82 |
+
file_name = item.get("file_name")
|
83 |
+
file_url = None
|
84 |
+
if file_name:
|
85 |
+
file_url = f"{file_url}/{task_id}"
|
86 |
if not task_id or question_text is None:
|
87 |
print(f"Skipping item with missing task_id or question: {item}")
|
88 |
continue
|
89 |
try:
|
90 |
+
agent_question = question_text
|
91 |
+
if file_url:
|
92 |
+
agent_question += f"\n\nFile URL: {file_url}"
|
93 |
+
|
94 |
+
submitted_answer = agent(agent_question)
|
95 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
96 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
97 |
except Exception as e:
|
|
|
171 |
|
172 |
gr.LoginButton()
|
173 |
|
174 |
+
nb_questions = gr.Number(value=20)
|
175 |
+
|
176 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
177 |
|
178 |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
|
|
181 |
|
182 |
run_button.click(
|
183 |
fn=run_and_submit_all,
|
184 |
+
inputs=[nb_questions],
|
185 |
outputs=[status_output, results_table]
|
186 |
)
|
187 |
|
env.example
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
HF_TOKEN=<<your hf token>>
|
multiagents.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import os
|
3 |
+
import dotenv
|
4 |
+
from smolagents import CodeAgent, ToolCallingAgent
|
5 |
+
from smolagents import OpenAIServerModel
|
6 |
+
from tools.fetch import fetch_webpage, search_web
|
7 |
+
from smolagents import VisitWebpageTool, GoogleSearchTool, SpeechToTextTool
|
8 |
+
from tools.yttranscript import get_youtube_transcript, get_youtube_title_description
|
9 |
+
import myprompts
|
10 |
+
|
11 |
+
dotenv.load_dotenv()
|
12 |
+
|
13 |
+
gemini_model = OpenAIServerModel(
|
14 |
+
model_id="gemini-2.0-flash",
|
15 |
+
api_key=os.environ["GEMINI_API_KEY"],
|
16 |
+
# Google Gemini OpenAI-compatible API base URL
|
17 |
+
api_base="https://generativelanguage.googleapis.com/v1beta/openai/",
|
18 |
+
)
|
19 |
+
|
20 |
+
vllm_model = OpenAIServerModel(
|
21 |
+
model_id="Qwen/Qwen2.5-1.5B-Instruct",
|
22 |
+
api_base="http://192.168.1.39:18000/v1",
|
23 |
+
api_key="token-abc123",
|
24 |
+
)
|
25 |
+
|
26 |
+
openai_41nano_model = OpenAIServerModel(
|
27 |
+
model_id="gpt-4.1-nano",
|
28 |
+
api_base="https://api.openai.com/v1",
|
29 |
+
api_key=os.environ["OPENAI_API_KEY"],
|
30 |
+
)
|
31 |
+
|
32 |
+
|
33 |
+
def chek_final_answer(final_answer, agent_memory) -> bool:
|
34 |
+
"""
|
35 |
+
Check if the final answer is correct.
|
36 |
+
This is a placeholder function. You can implement your own logic here.
|
37 |
+
"""
|
38 |
+
# For demonstration, we assume the answer is always correct
|
39 |
+
return True
|
40 |
+
|
41 |
+
|
42 |
+
web_agent = CodeAgent(
|
43 |
+
model=openai_41nano_model,
|
44 |
+
tools=[
|
45 |
+
search_web,
|
46 |
+
fetch_webpage,
|
47 |
+
],
|
48 |
+
name="web_agent",
|
49 |
+
description="Browses the web to find information",
|
50 |
+
verbosity_level=1,
|
51 |
+
max_steps=7,
|
52 |
+
)
|
53 |
+
|
54 |
+
audiovideo_agent = CodeAgent(
|
55 |
+
model=openai_41nano_model,
|
56 |
+
tools=[
|
57 |
+
get_youtube_transcript,
|
58 |
+
get_youtube_title_description,
|
59 |
+
SpeechToTextTool()
|
60 |
+
],
|
61 |
+
name="audiovideo_agent",
|
62 |
+
description="Extracts information from video or audio files",
|
63 |
+
verbosity_level=1,
|
64 |
+
max_steps=7,
|
65 |
+
)
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
manager_agent = CodeAgent(
|
70 |
+
model=openai_41nano_model,
|
71 |
+
tools=[],
|
72 |
+
managed_agents=[web_agent, audiovideo_agent],
|
73 |
+
additional_authorized_imports=["pandas", "numpy","bs4"],
|
74 |
+
planning_interval=5,
|
75 |
+
verbosity_level=2,
|
76 |
+
final_answer_checks=[chek_final_answer],
|
77 |
+
max_steps=15,
|
78 |
+
name="manager_agent",
|
79 |
+
description="A manager agent that coordinates the work of other agents to answer questions.",
|
80 |
+
)
|
81 |
+
|
82 |
+
class MultiAgent:
|
83 |
+
def __init__(self):
|
84 |
+
print("BasicAgent initialized.")
|
85 |
+
|
86 |
+
def __call__(self, question: str) -> str:
|
87 |
+
try:
|
88 |
+
# log agent call in file
|
89 |
+
with open("logs/agent_calls.log", "a") as log_file:
|
90 |
+
log_file.write(f"Agent called with question: {question}\n")
|
91 |
+
|
92 |
+
try:
|
93 |
+
question = question + '\n' + myprompts.output_format
|
94 |
+
fixed_answer = ""
|
95 |
+
|
96 |
+
fixed_answer = manager_agent.run(question)
|
97 |
+
|
98 |
+
return fixed_answer
|
99 |
+
except Exception as e:
|
100 |
+
error = f"An error occurred while processing the question: {e}"
|
101 |
+
print(error)
|
102 |
+
return error
|
103 |
+
|
104 |
+
|
105 |
+
if __name__ == "__main__":
|
106 |
+
# Example usage
|
107 |
+
question = "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia."
|
108 |
+
agent = MultiAgent()
|
109 |
+
answer = agent(question)
|
110 |
+
print(f"Answer: {answer}")
|
myagent.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from smolagents import CodeAgent, ToolCallingAgent
|
3 |
+
from smolagents import OpenAIServerModel
|
4 |
+
from tools.fetch import fetch_webpage
|
5 |
+
from tools.yttranscript import get_youtube_transcript, get_youtube_title_description
|
6 |
+
import myprompts
|
7 |
+
|
8 |
+
# --- Basic Agent Definition ---
|
9 |
+
class BasicAgent:
|
10 |
+
def __init__(self):
|
11 |
+
print("BasicAgent initialized.")
|
12 |
+
def __call__(self, question: str) -> str:
|
13 |
+
|
14 |
+
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
15 |
+
|
16 |
+
try:
|
17 |
+
# Use the reviewer agent to determine if the question can be answered by a model or requires code
|
18 |
+
print("Calling reviewer agent...")
|
19 |
+
reviewer_answer = reviewer_agent.run(myprompts.review_prompt + "\nThe question is:\n" + question)
|
20 |
+
print(f"Reviewer agent answer: {reviewer_answer}")
|
21 |
+
|
22 |
+
question = question + '\n' + myprompts.output_format
|
23 |
+
fixed_answer = ""
|
24 |
+
|
25 |
+
if reviewer_answer == "code":
|
26 |
+
fixed_answer = gaia_agent.run(question)
|
27 |
+
print(f"Code agent answer: {fixed_answer}")
|
28 |
+
|
29 |
+
elif reviewer_answer == "model":
|
30 |
+
# If the reviewer agent suggests using the model, we can proceed with the model agent
|
31 |
+
print("Using model agent to answer the question.")
|
32 |
+
fixed_answer = model_agent.run(model_prompt + "\nThe question is:\n" + question)
|
33 |
+
print(f"Model agent answer: {fixed_answer}")
|
34 |
+
|
35 |
+
return fixed_answer
|
36 |
+
except Exception as e:
|
37 |
+
error = f"An error occurred while processing the question: {e}"
|
38 |
+
print(error)
|
39 |
+
return error
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
model = OpenAIServerModel(
|
45 |
+
model_id="gpt-4.1-nano",
|
46 |
+
api_base="https://api.openai.com/v1",
|
47 |
+
api_key=os.environ["OPENAI_API_KEY"],
|
48 |
+
)
|
49 |
+
|
50 |
+
reviewer_agent= ToolCallingAgent(model=model, tools=[])
|
51 |
+
model_agent = ToolCallingAgent(model=model,tools=[fetch_webpage])
|
52 |
+
gaia_agent = CodeAgent(tools=[fetch_webpage,get_youtube_title_description,get_youtube_transcript ], model=model)
|
53 |
+
|
54 |
+
if __name__ == "__main__":
|
55 |
+
# Example usage
|
56 |
+
question = "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia."
|
57 |
+
agent = BasicAgent()
|
58 |
+
answer = agent(question)
|
59 |
+
print(f"Answer: {answer}")
|
myprompts.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
review_prompt = """You are a reviewer agent. You will be given a question.
|
2 |
+
Your task it to assert if a LLM agent with access to web content can answer the question or not, or if a coding agent and more tools is needed to answer the question.
|
3 |
+
If the question is too complex for a LLM agent, you should return "code" as the answer else you should return "model".
|
4 |
+
"""
|
5 |
+
|
6 |
+
model_prompt = """You are a LLM agent. You will be given a question.
|
7 |
+
Your task it to answer the question using the tools you have access to.
|
8 |
+
take time to analyse the steps to answer the question.
|
9 |
+
if a filename is given in the question you can infer the url
|
10 |
+
"""
|
11 |
+
|
12 |
+
output_format = """OUTPUT FORMAT should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
|
13 |
+
If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise, use digits only.
|
14 |
+
If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string."""
|
requirements.txt
CHANGED
@@ -1,2 +1,11 @@
|
|
1 |
gradio
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
gradio
|
2 |
+
gradio[oauth]
|
3 |
+
requests
|
4 |
+
python-dotenv
|
5 |
+
smolagents
|
6 |
+
duckduckgo-search
|
7 |
+
smolagents[openai]
|
8 |
+
markdownify
|
9 |
+
beautifulsoup4
|
10 |
+
transformers
|
11 |
+
smolagents[transformers]
|
tools/__init__.py
ADDED
File without changes
|
tools/fetch.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# this is asmolagent too to fetch html content from a url
|
2 |
+
from smolagents import tool
|
3 |
+
import requests
|
4 |
+
from markdownify import markdownify as md
|
5 |
+
import time
|
6 |
+
from bs4 import BeautifulSoup
|
7 |
+
|
8 |
+
@tool
|
9 |
+
def fetch_webpage(url: str, convert_to_markdown: bool = True) -> str:
|
10 |
+
"""
|
11 |
+
Fetches the HTML content of a given URL.
|
12 |
+
if markdown conversion is enabled, it will remove script and style and return the text content as markdown else return raw unfiltered HTML
|
13 |
+
Args:
|
14 |
+
url (str): The URL to fetch.
|
15 |
+
convert_to_markdown (bool): If True, convert the HTML content to Markdown format. else return the raw HTML.
|
16 |
+
Returns:
|
17 |
+
str: The HTML content of the URL.
|
18 |
+
"""
|
19 |
+
response = requests.get(url)
|
20 |
+
if (convert_to_markdown):
|
21 |
+
soup = BeautifulSoup(response.text, "html.parser")
|
22 |
+
# remove script and style tags
|
23 |
+
for script in soup(["script", "style"]):
|
24 |
+
script.extract()
|
25 |
+
|
26 |
+
# for wikipedia only keep the main content
|
27 |
+
if "wikipedia.org" in url:
|
28 |
+
main_content = soup.find("main",{"id":"content"})
|
29 |
+
if main_content:
|
30 |
+
content = md(str(main_content),strip=['script', 'style']).strip()
|
31 |
+
else:
|
32 |
+
content = md(response.text,strip=['script', 'style']).strip()
|
33 |
+
else:
|
34 |
+
content = response.text
|
35 |
+
|
36 |
+
try:
|
37 |
+
# save content to a file in test folder before returning
|
38 |
+
# compute filepath with correct extension based on convert_to_markdown and add a timestamp for unicity
|
39 |
+
file_extension = ".md" if convert_to_markdown else ".html"
|
40 |
+
unicity_suffix = str(int(time.time()))
|
41 |
+
file_name = f"test/fetched_content_{unicity_suffix}{file_extension}"
|
42 |
+
with open(file_name, "w", encoding="utf-8") as f:
|
43 |
+
f.write(content)
|
44 |
+
except Exception as e:
|
45 |
+
print(f"Error saving content to file: {e}")
|
46 |
+
|
47 |
+
return content
|
48 |
+
|
49 |
+
@tool
|
50 |
+
# this tool allow web search on a local SearXNG instance
|
51 |
+
def search_web(query: str, num_results: int = 5) -> list:
|
52 |
+
"""
|
53 |
+
Perform a web search using local SearXNG instance.
|
54 |
+
Args:
|
55 |
+
query (str): The search query.
|
56 |
+
num_results (int): The number of results to return.
|
57 |
+
Returns:
|
58 |
+
list: A list of search results sorted by score with {url, title, content, score} for each result.
|
59 |
+
"""
|
60 |
+
# local metaserach engine searxng, run on localhost:8888
|
61 |
+
searxng_url = "http://localhost:8888/search"
|
62 |
+
params = {"q": query, "format": 'json'}
|
63 |
+
response = requests.get(searxng_url, params=params)
|
64 |
+
if response.status_code == 200:
|
65 |
+
ret = response.json()
|
66 |
+
# keep only the response'results' array
|
67 |
+
results = ret.get("results", [])
|
68 |
+
# keep only the first num_results
|
69 |
+
results = results[:num_results]
|
70 |
+
# for each result keep only the url, title and content ans score
|
71 |
+
results = [
|
72 |
+
{
|
73 |
+
"url": result.get("url"),
|
74 |
+
"title": result.get("title"),
|
75 |
+
"content": result.get("content"),
|
76 |
+
"score": result.get("score"),
|
77 |
+
}
|
78 |
+
for result in results
|
79 |
+
]
|
80 |
+
|
81 |
+
return results
|
82 |
+
|
83 |
+
else:
|
84 |
+
print(f"Error: {response.status_code}")
|
85 |
+
return []
|
86 |
+
|
87 |
+
if __name__ == "__main__":
|
88 |
+
|
89 |
+
try:
|
90 |
+
# Test the function
|
91 |
+
query = "What is the capital of France?"
|
92 |
+
results = search_web(query,3)
|
93 |
+
print(results)
|
94 |
+
except Exception as e:
|
95 |
+
print(f"An error occurred: {e}")
|
96 |
+
|
97 |
+
try:
|
98 |
+
# Test the function
|
99 |
+
video_id = "L1vXCYZAYYM" # Replace with your YouTube video ID
|
100 |
+
video_url = "https://www.youtube.com/watch?v=" + video_id
|
101 |
+
url = "https://en.wikipedia.org/wiki/Malko_Competition"
|
102 |
+
# page_content = fetch_webpage(video_url)
|
103 |
+
page_content = fetch_webpage(url, convert_to_markdown=True)
|
104 |
+
print(page_content.encode("utf-8"))
|
105 |
+
except Exception as e:
|
106 |
+
print(f"An error occurred: {e}")
|
107 |
+
|
108 |
+
|
tools/yttranscript.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from smolagents import tool
|
3 |
+
from youtube_transcript_api import YouTubeTranscriptApi
|
4 |
+
from bs4 import BeautifulSoup
|
5 |
+
from tools.fetch import fetch_webpage
|
6 |
+
|
7 |
+
@tool
|
8 |
+
# a function to get youtube transcript from video id
|
9 |
+
def get_youtube_transcript(video_id: str) -> str:
|
10 |
+
"""
|
11 |
+
Fetches the transcript of a YouTube video given its video ID.
|
12 |
+
Args:
|
13 |
+
video_id (str): The ID of the YouTube video. Pass in the video ID, NOT the video URL. For a video with the URL https://www.youtube.com/watch?v=12345 the ID is 12345.
|
14 |
+
Returns:
|
15 |
+
str: The transcript of the YouTube video. as a single string with each line separated by a newline character.
|
16 |
+
"""
|
17 |
+
# Initialize the YouTubeTranscriptApi
|
18 |
+
ytt_api = YouTubeTranscriptApi()
|
19 |
+
fetched_transcript = ytt_api.fetch(video_id)
|
20 |
+
raw_data = fetched_transcript.to_raw_data()
|
21 |
+
# raw data is in the form of [{ 'text': 'Hey there', 'start': 0.0, 'duration': 1.54 }, { 'text': 'how are you',, 'start': 1.54, 'duration': 4.16 }, ... ] we will return ony the text element as lines
|
22 |
+
transcript = "\n".join([item['text'] for item in raw_data])
|
23 |
+
return transcript
|
24 |
+
|
25 |
+
|
26 |
+
@tool
|
27 |
+
# a function to get video title and description from video url
|
28 |
+
def get_youtube_title_description(video_url: str) -> str:
|
29 |
+
"""
|
30 |
+
Fetches the title and description of a YouTube video given its video ID.
|
31 |
+
Args:
|
32 |
+
video_url (str): The url of the YouTube video.
|
33 |
+
Returns:
|
34 |
+
str: The title and description of the YouTube video.
|
35 |
+
"""
|
36 |
+
# Initialize the YouTube object
|
37 |
+
soup = BeautifulSoup(fetch_webpage(video_url, convert_to_markdown=False), "html.parser")
|
38 |
+
# Extract the title by looking at the meta tag with name="title" and getting the content
|
39 |
+
metatitle = soup.find("meta", {"name": "title"})
|
40 |
+
if metatitle is not None:
|
41 |
+
title = metatitle["content"]
|
42 |
+
else:
|
43 |
+
title = "No title found"
|
44 |
+
|
45 |
+
# same for description
|
46 |
+
metadescription = soup.find("meta", {"name": "description"})
|
47 |
+
if metadescription is not None:
|
48 |
+
description = metadescription["content"]
|
49 |
+
else:
|
50 |
+
description = "No description found"
|
51 |
+
|
52 |
+
return f"Title: {title}\nDescription: {description}"
|
53 |
+
|
54 |
+
|
55 |
+
if __name__ == "__main__":
|
56 |
+
from dotenv import load_dotenv
|
57 |
+
load_dotenv
|
58 |
+
# Test the function
|
59 |
+
video_id = "1htKBjuUWec" # Replace with your YouTube video ID
|
60 |
+
video_url = "https://www.youtube.com/watch?v=" + video_id
|
61 |
+
# Get the title and description
|
62 |
+
try:
|
63 |
+
title_description = get_youtube_title_description(video_url)
|
64 |
+
print(title_description)
|
65 |
+
except Exception as e:
|
66 |
+
print(f"Error fetching title and description: {e}")
|
67 |
+
|
68 |
+
try:
|
69 |
+
transcript = get_youtube_transcript(video_id)
|
70 |
+
except Exception as e:
|
71 |
+
print(f"Error fetching transcript: {e}")
|
72 |
+
print(transcript)
|
vllm_asopenai_test.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from smolagents import OpenAIServerModel, CodeAgent
|
3 |
+
|
4 |
+
model = OpenAIServerModel(
|
5 |
+
model_id="Qwen/Qwen2.5-1.5B-Instruct",
|
6 |
+
api_base="http://192.168.1.39:18000/v1",
|
7 |
+
api_key="token-abc123",
|
8 |
+
)
|
9 |
+
|
10 |
+
myagent = CodeAgent(
|
11 |
+
model=model,
|
12 |
+
tools=[])
|
13 |
+
|
14 |
+
result = myagent.run("Hello who are you?") # Replace with your question
|
15 |
+
print(result)
|
vllm_test.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
client = OpenAI(
|
3 |
+
base_url="http://192.168.1.39:18000/v1",
|
4 |
+
api_key="token-abc123",
|
5 |
+
)
|
6 |
+
|
7 |
+
completion = client.chat.completions.create(
|
8 |
+
model="Qwen/Qwen2.5-1.5B-Instruct",
|
9 |
+
messages=[
|
10 |
+
{"role": "user", "content": "Hello!"}
|
11 |
+
]
|
12 |
+
)
|
13 |
+
|
14 |
+
print(completion.choices[0].message)
|