Commit
·
ca8728d
1
Parent(s):
0a7fec5
clean
Browse files- langgraph_dir/agent.py +2 -1
- langgraph_dir/config.py +2 -2
- llamaindex_dir/agent.py +4 -2
- llamaindex_dir/config.py +4 -4
- requirements.txt +1 -1
langgraph_dir/agent.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
import os
|
2 |
import json
|
3 |
|
@@ -6,7 +8,6 @@ from langchain_openai import ChatOpenAI
|
|
6 |
from langgraph.graph import MessagesState
|
7 |
from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage
|
8 |
from langgraph.graph import StateGraph, START, END
|
9 |
-
from langchain.agents import load_tools
|
10 |
from langchain_community.tools import BraveSearch
|
11 |
|
12 |
from .prompt import system_prompt
|
|
|
1 |
+
# env variable needed: HF_TOKEN, OPENAI_API_KEY, BRAVE_SEARCH_API_KEY
|
2 |
+
|
3 |
import os
|
4 |
import json
|
5 |
|
|
|
8 |
from langgraph.graph import MessagesState
|
9 |
from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage
|
10 |
from langgraph.graph import StateGraph, START, END
|
|
|
11 |
from langchain_community.tools import BraveSearch
|
12 |
|
13 |
from .prompt import system_prompt
|
langgraph_dir/config.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
# OPENAI_MODEL_NAME = "gpt-4.1-nano"
|
2 |
-
|
3 |
# OPENAI_MODEL_NAME = "gpt-4.1"
|
4 |
-
OPENAI_MODEL_NAME = "o4-mini"
|
|
|
1 |
# OPENAI_MODEL_NAME = "gpt-4.1-nano"
|
2 |
+
OPENAI_MODEL_NAME = "gpt-4.1-mini"
|
3 |
# OPENAI_MODEL_NAME = "gpt-4.1"
|
4 |
+
# OPENAI_MODEL_NAME = "o4-mini"
|
llamaindex_dir/agent.py
CHANGED
@@ -1,7 +1,9 @@
|
|
|
|
|
|
1 |
from llama_index.core import PromptTemplate
|
2 |
from llama_index.core.workflow import Context
|
3 |
from llama_index.core.agent.workflow import ReActAgent, AgentStream, ToolCallResult
|
4 |
-
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
5 |
from llama_index.tools.wikipedia import WikipediaToolSpec
|
6 |
from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
|
7 |
from llama_index.tools.code_interpreter import CodeInterpreterToolSpec
|
@@ -17,7 +19,7 @@ class LLamaIndexAgent:
|
|
17 |
show_prompt=True):
|
18 |
|
19 |
# LLM definition
|
20 |
-
llm = HuggingFaceInferenceAPI(model_name=model_name,
|
21 |
provider=provider)
|
22 |
print(f"LLamaIndexAgent initialized with model \"{model_name}\"")
|
23 |
|
|
|
1 |
+
# env variable needed: HF_TOKEN
|
2 |
+
|
3 |
from llama_index.core import PromptTemplate
|
4 |
from llama_index.core.workflow import Context
|
5 |
from llama_index.core.agent.workflow import ReActAgent, AgentStream, ToolCallResult
|
6 |
+
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
7 |
from llama_index.tools.wikipedia import WikipediaToolSpec
|
8 |
from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
|
9 |
from llama_index.tools.code_interpreter import CodeInterpreterToolSpec
|
|
|
19 |
show_prompt=True):
|
20 |
|
21 |
# LLM definition
|
22 |
+
llm = HuggingFaceInferenceAPI(model_name=model_name, # needs HF_TOKEN in env
|
23 |
provider=provider)
|
24 |
print(f"LLamaIndexAgent initialized with model \"{model_name}\"")
|
25 |
|
llamaindex_dir/config.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
-
HF_MODEL_NAME = "google/gemma-3-27b-it"
|
2 |
-
HF_PROVIDER = "nebius"
|
3 |
|
4 |
-
|
5 |
-
|
6 |
|
7 |
# HF_MODEL_NAME = "Qwen/Qwen3-32B"
|
8 |
# HF_PROVIDER = "hf-inference"
|
|
|
1 |
+
# HF_MODEL_NAME = "google/gemma-3-27b-it"
|
2 |
+
# HF_PROVIDER = "nebius"
|
3 |
|
4 |
+
HF_MODEL_NAME = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
5 |
+
HF_PROVIDER = "hf-inference"
|
6 |
|
7 |
# HF_MODEL_NAME = "Qwen/Qwen3-32B"
|
8 |
# HF_PROVIDER = "hf-inference"
|
requirements.txt
CHANGED
@@ -9,10 +9,10 @@ langchain
|
|
9 |
langgraph
|
10 |
langchain-openai
|
11 |
langchain-community
|
|
|
12 |
duckduckgo-search
|
13 |
markdownify
|
14 |
beautifulsoup4
|
15 |
-
langchain_experimental
|
16 |
pypdf
|
17 |
youtube-transcript-api
|
18 |
pytube
|
|
|
9 |
langgraph
|
10 |
langchain-openai
|
11 |
langchain-community
|
12 |
+
langchain_experimental
|
13 |
duckduckgo-search
|
14 |
markdownify
|
15 |
beautifulsoup4
|
|
|
16 |
pypdf
|
17 |
youtube-transcript-api
|
18 |
pytube
|