Spaces:
Sleeping
Sleeping
Commit
·
2948a14
1
Parent(s):
4877923
fall back to langchain
Browse files
app.py
CHANGED
@@ -17,11 +17,11 @@ from langchain.tools import tool
|
|
17 |
# from langgraph.prebuilt import tools_condition
|
18 |
# from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
|
19 |
from langchain.agents import initialize_agent, AgentType
|
20 |
-
|
21 |
-
|
22 |
# import openai
|
23 |
# from openai import OpenAI
|
24 |
-
from langchain_openai import ChatOpenAI
|
25 |
|
26 |
## # Load environment variables from .env file
|
27 |
# --- Constants ---
|
@@ -31,9 +31,9 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
31 |
# load_dotenv()
|
32 |
HF_ACCESS_KEY = os.getenv('HF_ACCESS_KEY')
|
33 |
WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
|
34 |
-
OPENAI_MODEL = os.getenv('OPENAI_MODEL') #'gpt-3.5-turbo-0613'
|
35 |
-
OPENAI_KEY = os.getenv('OPENAI_KEY')
|
36 |
-
client = OpenAI(api_key = OPENAI_KEY)
|
37 |
|
38 |
########## ----- DEFINING TOOLS -----##########
|
39 |
|
@@ -271,11 +271,11 @@ Instructions:
|
|
271 |
|
272 |
## --- Initialize Hugging Face Model ---
|
273 |
# Generate the chat interface, including the tools
|
274 |
-
|
275 |
-
llm =
|
276 |
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
277 |
huggingfacehub_api_token=HF_ACCESS_KEY,
|
278 |
-
model_kwargs={'prompt': system_prompt}
|
279 |
# system_prompt=system_prompt,
|
280 |
)
|
281 |
chat_llm = ChatHuggingFace(llm=llm)
|
@@ -285,7 +285,7 @@ llm = ChatOpenAI(
|
|
285 |
model_name=OPENAI_MODEL,
|
286 |
temperature=0.1
|
287 |
)
|
288 |
-
|
289 |
# chat = ChatHuggingFace(llm=llm, verbose=True)
|
290 |
# tools = [search_tool, fetch_weather]
|
291 |
# chat_with_tools = chat.bind_tools(tools)
|
@@ -295,7 +295,7 @@ agent = initialize_agent(
|
|
295 |
llm=llm,
|
296 |
# llm=chat_llm,
|
297 |
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
298 |
-
agent_kwargs={"system_message": system_prompt},
|
299 |
verbose=True,
|
300 |
handle_parsing_errors=True
|
301 |
)
|
@@ -365,9 +365,9 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
365 |
print(f"Skipping item with missing task_id or question: {item}")
|
366 |
continue
|
367 |
try:
|
368 |
-
|
369 |
-
|
370 |
-
submitted_answer = agent.run(question_text)
|
371 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
372 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
373 |
except Exception as e:
|
|
|
17 |
# from langgraph.prebuilt import tools_condition
|
18 |
# from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
|
19 |
from langchain.agents import initialize_agent, AgentType
|
20 |
+
from langchain_community.llms import HuggingFaceHub
|
21 |
+
from langchain_community.chat_models import ChatHuggingFace
|
22 |
# import openai
|
23 |
# from openai import OpenAI
|
24 |
+
# from langchain_openai import ChatOpenAI
|
25 |
|
26 |
## # Load environment variables from .env file
|
27 |
# --- Constants ---
|
|
|
31 |
# load_dotenv()
|
32 |
HF_ACCESS_KEY = os.getenv('HF_ACCESS_KEY')
|
33 |
WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
|
34 |
+
# OPENAI_MODEL = os.getenv('OPENAI_MODEL') #'gpt-3.5-turbo-0613'
|
35 |
+
# OPENAI_KEY = os.getenv('OPENAI_KEY')
|
36 |
+
# client = OpenAI(api_key = OPENAI_KEY)
|
37 |
|
38 |
########## ----- DEFINING TOOLS -----##########
|
39 |
|
|
|
271 |
|
272 |
## --- Initialize Hugging Face Model ---
|
273 |
# Generate the chat interface, including the tools
|
274 |
+
|
275 |
+
llm = HuggingFaceHub(
|
276 |
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
277 |
huggingfacehub_api_token=HF_ACCESS_KEY,
|
278 |
+
# model_kwargs={'prompt': system_prompt}
|
279 |
# system_prompt=system_prompt,
|
280 |
)
|
281 |
chat_llm = ChatHuggingFace(llm=llm)
|
|
|
285 |
model_name=OPENAI_MODEL,
|
286 |
temperature=0.1
|
287 |
)
|
288 |
+
'''
|
289 |
# chat = ChatHuggingFace(llm=llm, verbose=True)
|
290 |
# tools = [search_tool, fetch_weather]
|
291 |
# chat_with_tools = chat.bind_tools(tools)
|
|
|
295 |
llm=llm,
|
296 |
# llm=chat_llm,
|
297 |
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
298 |
+
# agent_kwargs={"system_message": system_prompt},
|
299 |
verbose=True,
|
300 |
handle_parsing_errors=True
|
301 |
)
|
|
|
365 |
print(f"Skipping item with missing task_id or question: {item}")
|
366 |
continue
|
367 |
try:
|
368 |
+
full_prompt = f"{system_prompt}\n Input Question: {question_text}"
|
369 |
+
submitted_answer = agent.run(full_prompt)
|
370 |
+
# submitted_answer = agent.run(question_text)
|
371 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
372 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
373 |
except Exception as e:
|