Spaces:
Sleeping
Sleeping
Commit
·
da2d730
1
Parent(s):
43c174f
migrated to apenai
Browse files- app.py +70 -5
- requirements.txt +2 -1
app.py
CHANGED
@@ -15,10 +15,13 @@ from typing import TypedDict, Annotated
|
|
15 |
from langgraph.prebuilt import ToolNode
|
16 |
from langgraph.graph import START, StateGraph, END, Graph
|
17 |
# from langgraph.prebuilt import tools_condition
|
18 |
-
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
|
19 |
from langchain.agents import initialize_agent, AgentType
|
20 |
# from langchain_community.llms import HuggingFaceHub
|
21 |
# from langchain_community.chat_models import ChatHuggingFace
|
|
|
|
|
|
|
22 |
|
23 |
## # Load environment variables from .env file
|
24 |
# --- Constants ---
|
@@ -28,6 +31,9 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
28 |
# load_dotenv()
|
29 |
HF_ACCESS_KEY = os.getenv('HF_ACCESS_KEY')
|
30 |
WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
|
|
|
|
|
|
|
31 |
|
32 |
########## ----- DEFINING TOOLS -----##########
|
33 |
|
@@ -163,6 +169,52 @@ def currency_convert(args: str) -> str:
|
|
163 |
except Exception:
|
164 |
return "error"
|
165 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
##-- Tool Discovery ---
|
167 |
# Use @tool for each function.
|
168 |
# Use get_all_tools() to auto-discover all decorated tools.
|
@@ -177,6 +229,9 @@ tools_list = [
|
|
177 |
wikipedia_summary,
|
178 |
dictionary_lookup,
|
179 |
currency_convert,
|
|
|
|
|
|
|
180 |
]
|
181 |
|
182 |
tool_descriptions = "\n".join(f"- {tool.name}: {tool.description}" for tool in tools_list)
|
@@ -216,22 +271,31 @@ Instructions:
|
|
216 |
|
217 |
## --- Initialize Hugging Face Model ---
|
218 |
# Generate the chat interface, including the tools
|
|
|
219 |
llm = HuggingFaceEndpoint(
|
220 |
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
221 |
huggingfacehub_api_token=HF_ACCESS_KEY,
|
222 |
model_kwargs={'prompt': system_prompt}
|
223 |
# system_prompt=system_prompt,
|
224 |
)
|
225 |
-
|
226 |
chat_llm = ChatHuggingFace(llm=llm)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
227 |
# chat = ChatHuggingFace(llm=llm, verbose=True)
|
228 |
# tools = [search_tool, fetch_weather]
|
229 |
# chat_with_tools = chat.bind_tools(tools)
|
230 |
|
231 |
agent = initialize_agent(
|
232 |
tools=tools_list,
|
233 |
-
llm=
|
|
|
234 |
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
|
|
235 |
verbose=True,
|
236 |
handle_parsing_errors=True
|
237 |
)
|
@@ -301,8 +365,9 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
301 |
print(f"Skipping item with missing task_id or question: {item}")
|
302 |
continue
|
303 |
try:
|
304 |
-
full_prompt = f"{system_prompt}\n Input Question: {question_text}"
|
305 |
-
submitted_answer = agent(full_prompt)
|
|
|
306 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
307 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
308 |
except Exception as e:
|
|
|
15 |
from langgraph.prebuilt import ToolNode
|
16 |
from langgraph.graph import START, StateGraph, END, Graph
|
17 |
# from langgraph.prebuilt import tools_condition
|
18 |
+
# from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
|
19 |
from langchain.agents import initialize_agent, AgentType
|
20 |
# from langchain_community.llms import HuggingFaceHub
|
21 |
# from langchain_community.chat_models import ChatHuggingFace
|
22 |
+
import openai
|
23 |
+
from openai import OpenAI
|
24 |
+
# from langchain_openai import OpenAI
|
25 |
|
26 |
## # Load environment variables from .env file
|
27 |
# --- Constants ---
|
|
|
31 |
# load_dotenv()
|
32 |
HF_ACCESS_KEY = os.getenv('HF_ACCESS_KEY')
|
33 |
WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
|
34 |
+
OPENAI_MODEL = os.getenv('OPENAI_MODEL') #'gpt-3.5-turbo-0613'
|
35 |
+
OPENAI_KEY = os.getenv('OPENAI_KEY')
|
36 |
+
client = OpenAI(api_key = OPENAI_KEY)
|
37 |
|
38 |
########## ----- DEFINING TOOLS -----##########
|
39 |
|
|
|
169 |
except Exception:
|
170 |
return "error"
|
171 |
|
172 |
+
# --- TOOL 9: Image Captioning Tool ---
|
173 |
+
@tool
|
174 |
+
def image_caption(image_url: str) -> str:
|
175 |
+
"""Generate a descriptive caption for an image given its URL."""
|
176 |
+
api_url = "https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-base"
|
177 |
+
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
178 |
+
payload = {"inputs": image_url}
|
179 |
+
try:
|
180 |
+
resp = requests.post(api_url, headers=headers, json=payload, timeout=30)
|
181 |
+
resp.raise_for_status()
|
182 |
+
data = resp.json()
|
183 |
+
return data[0]["generated_text"] if isinstance(data, list) else data.get("generated_text", "no_caption")
|
184 |
+
except Exception:
|
185 |
+
return "error"
|
186 |
+
|
187 |
+
# --- TOOL 10: Optical Character Recognition (OCR) Tool ---
|
188 |
+
@tool
|
189 |
+
def ocr_image(image_url: str) -> str:
|
190 |
+
"""Extract text from an image given its URL."""
|
191 |
+
api_url = "https://api-inference.huggingface.co/models/impira/layoutlm-document-qa"
|
192 |
+
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
193 |
+
payload = {"inputs": {"image": image_url, "question": "What text is in the image?"}}
|
194 |
+
try:
|
195 |
+
resp = requests.post(api_url, headers=headers, json=payload, timeout=30)
|
196 |
+
resp.raise_for_status()
|
197 |
+
data = resp.json()
|
198 |
+
return data.get("answer", "no_text_found")
|
199 |
+
except Exception:
|
200 |
+
return "error"
|
201 |
+
|
202 |
+
# --- TOOL 11: Image Classification Tool ---
|
203 |
+
@tool
|
204 |
+
def classify_image(image_url: str) -> str:
|
205 |
+
"""Classify the main object or scene in an image given its URL."""
|
206 |
+
api_url = "https://api-inference.huggingface.co/models/google/vit-base-patch16-224"
|
207 |
+
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
208 |
+
payload = {"inputs": image_url}
|
209 |
+
try:
|
210 |
+
resp = requests.post(api_url, headers=headers, json=payload, timeout=30)
|
211 |
+
resp.raise_for_status()
|
212 |
+
data = resp.json()
|
213 |
+
return data[0]["label"] if isinstance(data, list) else data.get("label", "no_label")
|
214 |
+
except Exception:
|
215 |
+
return "error"
|
216 |
+
|
217 |
+
|
218 |
##-- Tool Discovery ---
|
219 |
# Use @tool for each function.
|
220 |
# Use get_all_tools() to auto-discover all decorated tools.
|
|
|
229 |
wikipedia_summary,
|
230 |
dictionary_lookup,
|
231 |
currency_convert,
|
232 |
+
image_caption,
|
233 |
+
ocr_image,
|
234 |
+
classify_image
|
235 |
]
|
236 |
|
237 |
tool_descriptions = "\n".join(f"- {tool.name}: {tool.description}" for tool in tools_list)
|
|
|
271 |
|
272 |
## --- Initialize Hugging Face Model ---
|
273 |
# Generate the chat interface, including the tools
|
274 |
+
'''
|
275 |
llm = HuggingFaceEndpoint(
|
276 |
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
277 |
huggingfacehub_api_token=HF_ACCESS_KEY,
|
278 |
model_kwargs={'prompt': system_prompt}
|
279 |
# system_prompt=system_prompt,
|
280 |
)
|
|
|
281 |
chat_llm = ChatHuggingFace(llm=llm)
|
282 |
+
'''
|
283 |
+
llm = OpenAI(
|
284 |
+
openai_api_key=OPENAI_KEY,
|
285 |
+
model_name=OPENAI_MODEL,
|
286 |
+
temperature=0.1
|
287 |
+
)
|
288 |
+
|
289 |
# chat = ChatHuggingFace(llm=llm, verbose=True)
|
290 |
# tools = [search_tool, fetch_weather]
|
291 |
# chat_with_tools = chat.bind_tools(tools)
|
292 |
|
293 |
agent = initialize_agent(
|
294 |
tools=tools_list,
|
295 |
+
llm=llm,
|
296 |
+
# llm=chat_llm,
|
297 |
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
298 |
+
agent_kwargs={"system_message": system_prompt},
|
299 |
verbose=True,
|
300 |
handle_parsing_errors=True
|
301 |
)
|
|
|
365 |
print(f"Skipping item with missing task_id or question: {item}")
|
366 |
continue
|
367 |
try:
|
368 |
+
# full_prompt = f"{system_prompt}\n Input Question: {question_text}"
|
369 |
+
# submitted_answer = agent.run(full_prompt)
|
370 |
+
submitted_answer = agent.run(question_text)
|
371 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
372 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
373 |
except Exception as e:
|
requirements.txt
CHANGED
@@ -6,4 +6,5 @@ langchainhub
|
|
6 |
huggingface-hub
|
7 |
langchain-huggingface
|
8 |
langchain-community
|
9 |
-
transformers
|
|
|
|
6 |
huggingface-hub
|
7 |
langchain-huggingface
|
8 |
langchain-community
|
9 |
+
transformers
|
10 |
+
openai
|