Spaces:
Runtime error
Runtime error
Update app.py
Browse files
agent.py
CHANGED
@@ -47,7 +47,7 @@ from langchain.agents import initialize_agent, AgentType
|
|
47 |
from langchain_community.chat_models import ChatOpenAI
|
48 |
from langchain_community.tools import Tool
|
49 |
import time
|
50 |
-
from
|
51 |
|
52 |
load_dotenv()
|
53 |
|
@@ -450,7 +450,38 @@ tools = [wiki_tool, calc_tool, file_tool, web_tool, arvix_tool, youtube_tool, vi
|
|
450 |
|
451 |
# Define the LLM before using it
|
452 |
#llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # or "gpt-3.5-turbo" "gpt-4"
|
453 |
-
llm = ChatMistralAI(model="mistral-7b-instruct-v0.1")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
454 |
|
455 |
# Create an agent using the planner, task classifier, and decision logic
|
456 |
agent = initialize_agent(
|
|
|
47 |
from langchain_community.chat_models import ChatOpenAI
|
48 |
from langchain_community.tools import Tool
|
49 |
import time
|
50 |
+
from huggingface_hub import InferenceClient
|
51 |
|
52 |
load_dotenv()
|
53 |
|
|
|
450 |
|
451 |
# Define the LLM before using it
|
452 |
#llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # or "gpt-3.5-turbo" "gpt-4"
|
453 |
+
#llm = ChatMistralAI(model="mistral-7b-instruct-v0.1")
|
454 |
+
|
455 |
+
from transformers import pipeline
|
456 |
+
|
457 |
+
# Get the Hugging Face API token from the environment variable
|
458 |
+
# Get the Hugging Face API token from the environment variable
|
459 |
+
hf_token = os.getenv("HF_TOKEN")
|
460 |
+
|
461 |
+
# Load the Qwen2.5-Coder-32B-Instruct model using HuggingFaceHub
|
462 |
+
qwen_model = HuggingFaceHub(
|
463 |
+
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct", # Specify the model from Hugging Face
|
464 |
+
api_key=hf_token, # Pass the API token for access
|
465 |
+
model_kwargs={"temperature": 0.7} # Adjust temperature as needed
|
466 |
+
)
|
467 |
+
|
468 |
+
# Example usage with LangChain
|
469 |
+
tools = [
|
470 |
+
Tool(
|
471 |
+
name="code_tool",
|
472 |
+
func=qwen_model.run, # Use the run method for inference
|
473 |
+
description="Use this tool for code generation or similar tasks"
|
474 |
+
)
|
475 |
+
]
|
476 |
+
|
477 |
+
# Initialize the LangChain agent with the tool(s) and the model
|
478 |
+
agent = initialize_agent(
|
479 |
+
tools=tools,
|
480 |
+
llm=qwen_model,
|
481 |
+
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
482 |
+
verbose=True
|
483 |
+
)
|
484 |
+
|
485 |
|
486 |
# Create an agent using the planner, task classifier, and decision logic
|
487 |
agent = initialize_agent(
|