Changed to Qwen.
Browse files- app.py +24 -23
- requirements.txt +1 -1
app.py
CHANGED
@@ -3,7 +3,7 @@ import gradio as gr
|
|
3 |
import requests
|
4 |
import inspect
|
5 |
import pandas as pd
|
6 |
-
import
|
7 |
|
8 |
# (Keep Constants as is)
|
9 |
# --- Constants ---
|
@@ -14,10 +14,11 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
14 |
class BasicAgent:
|
15 |
def __init__(self):
|
16 |
print("BasicAgent initialized.")
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
21 |
|
22 |
def break_down_question(self, question: str) -> list:
|
23 |
"""
|
@@ -40,21 +41,22 @@ class BasicAgent:
|
|
40 |
Question: {question}
|
41 |
"""
|
42 |
|
43 |
-
# Call the
|
44 |
-
response = self.
|
45 |
-
|
46 |
-
|
47 |
-
{"role": "system", "content": "You are a helpful assistant that breaks down questions into key search terms."},
|
48 |
-
{"role": "user", "content": prompt}
|
49 |
-
],
|
50 |
temperature=0.3,
|
51 |
-
|
|
|
52 |
)
|
53 |
|
54 |
# Extract the search terms from the response
|
55 |
-
search_terms = response.
|
56 |
search_terms = [term.strip() for term in search_terms if term.strip()]
|
57 |
|
|
|
|
|
|
|
58 |
print(f"Question broken down into {len(search_terms)} search terms: {search_terms}")
|
59 |
return search_terms
|
60 |
|
@@ -149,7 +151,7 @@ class BasicAgent:
|
|
149 |
# Join the results with clear separation
|
150 |
combined_results = "\n\n--- Next Search Result ---\n\n".join(all_results)
|
151 |
|
152 |
-
# Use
|
153 |
try:
|
154 |
synthesis_prompt = f"""
|
155 |
Based on the following search results, please provide a comprehensive answer to this question:
|
@@ -162,17 +164,16 @@ class BasicAgent:
|
|
162 |
Answer:
|
163 |
"""
|
164 |
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
{"role": "user", "content": synthesis_prompt}
|
170 |
-
],
|
171 |
temperature=0.5,
|
172 |
-
|
|
|
173 |
)
|
174 |
|
175 |
-
answer = response.
|
176 |
print("Agent returning synthesized answer from search results.")
|
177 |
return answer
|
178 |
|
|
|
3 |
import requests
|
4 |
import inspect
|
5 |
import pandas as pd
|
6 |
+
from huggingface_hub import InferenceClient # Import Hugging Face InferenceClient
|
7 |
|
8 |
# (Keep Constants as is)
|
9 |
# --- Constants ---
|
|
|
14 |
class BasicAgent:
|
15 |
def __init__(self):
|
16 |
print("BasicAgent initialized.")
|
17 |
+
self.hf_client = InferenceClient(
|
18 |
+
model="Qwen/Qwen1.5-7B-Chat", # Using Qwen model as default
|
19 |
+
token=os.getenv("HF_TOKEN") # Optional: Set HF_TOKEN if you have one
|
20 |
+
)
|
21 |
+
print("Using Hugging Face model: Qwen/Qwen1.5-7B-Chat")
|
22 |
|
23 |
def break_down_question(self, question: str) -> list:
|
24 |
"""
|
|
|
41 |
Question: {question}
|
42 |
"""
|
43 |
|
44 |
+
# Call the Hugging Face model to get the breakdown
|
45 |
+
response = self.hf_client.text_generation(
|
46 |
+
prompt=prompt,
|
47 |
+
max_new_tokens=150,
|
|
|
|
|
|
|
48 |
temperature=0.3,
|
49 |
+
repetition_penalty=1.1,
|
50 |
+
do_sample=True
|
51 |
)
|
52 |
|
53 |
# Extract the search terms from the response
|
54 |
+
search_terms = response.strip().split('\n')
|
55 |
search_terms = [term.strip() for term in search_terms if term.strip()]
|
56 |
|
57 |
+
# Limit to 3 search terms maximum
|
58 |
+
search_terms = search_terms[:3]
|
59 |
+
|
60 |
print(f"Question broken down into {len(search_terms)} search terms: {search_terms}")
|
61 |
return search_terms
|
62 |
|
|
|
151 |
# Join the results with clear separation
|
152 |
combined_results = "\n\n--- Next Search Result ---\n\n".join(all_results)
|
153 |
|
154 |
+
# Use Hugging Face model to synthesize a coherent answer from the search results
|
155 |
try:
|
156 |
synthesis_prompt = f"""
|
157 |
Based on the following search results, please provide a comprehensive answer to this question:
|
|
|
164 |
Answer:
|
165 |
"""
|
166 |
|
167 |
+
# Call the Hugging Face model to synthesize an answer
|
168 |
+
response = self.hf_client.text_generation(
|
169 |
+
prompt=synthesis_prompt,
|
170 |
+
max_new_tokens=500,
|
|
|
|
|
171 |
temperature=0.5,
|
172 |
+
repetition_penalty=1.05,
|
173 |
+
do_sample=True
|
174 |
)
|
175 |
|
176 |
+
answer = response.strip()
|
177 |
print("Agent returning synthesized answer from search results.")
|
178 |
return answer
|
179 |
|
requirements.txt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
gradio
|
2 |
requests
|
3 |
-
|
|
|
1 |
gradio
|
2 |
requests
|
3 |
+
huggingface_hub
|