Spaces:
Runtime error
Runtime error
Update agent.py
Browse files
agent.py
CHANGED
@@ -48,6 +48,11 @@ from langchain_community.chat_models import ChatOpenAI
|
|
48 |
from langchain_community.tools import Tool
|
49 |
import time
|
50 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
load_dotenv()
|
53 |
|
@@ -467,7 +472,7 @@ from langchain_huggingface import HuggingFaceEndpoint
|
|
467 |
|
468 |
# Initialize the HuggingFaceEndpoint with the desired model and parameters
|
469 |
llm = HuggingFaceEndpoint(
|
470 |
-
repo_id="
|
471 |
task="text-generation",
|
472 |
huggingfacehub_api_token=hf_token,
|
473 |
model_kwargs={"temperature": 0.7, "max_length": 1024}
|
|
|
48 |
from langchain_community.tools import Tool
|
49 |
import time
|
50 |
from huggingface_hub import InferenceClient
|
51 |
+
from langchain.chat_models import ChatOpenAI
|
52 |
+
from langchain.llms import HuggingFaceHub
|
53 |
+
from langchain.prompts import PromptTemplate
|
54 |
+
from langchain.chains import LLMChain
|
55 |
+
from langchain.agents import initialize_agent, Tool, AgentType
|
56 |
|
57 |
load_dotenv()
|
58 |
|
|
|
472 |
|
473 |
# Initialize the HuggingFaceEndpoint with the desired model and parameters
|
474 |
llm = HuggingFaceEndpoint(
|
475 |
+
repo_id="meta-llama/Meta-Llama-3-70B-Instruct",
|
476 |
task="text-generation",
|
477 |
huggingfacehub_api_token=hf_token,
|
478 |
model_kwargs={"temperature": 0.7, "max_length": 1024}
|