Spaces:
Sleeping
Sleeping
File size: 5,179 Bytes
b8c6d5a 831badd b8c6d5a 831badd b8c6d5a 9258ac0 b8c6d5a 6e804c1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
import os
from langgraph.graph import StateGraph, START, MessagesState
from langgraph.prebuilt import ToolNode, tools_condition
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_groq import ChatGroq
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.document_loaders import WikipediaLoader, ArxivLoader
from langchain_community.vectorstores import SupabaseVectorStore
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.tools import tool
from supabase.client import create_client, Client
# Load environment variables
# ---- Basic Arithmetic Utilities ---- #
@tool
def multiply(a: int, b: int) -> int:
"""Returns the product of two integers."""
return a * b
@tool
def add(a: int, b: int) -> int:
"""Returns the sum of two integers."""
return a + b
@tool
def subtract(a: int, b: int) -> int:
"""Returns the difference between two integers."""
return a - b
@tool
def divide(a: int, b: int) -> float:
"""Performs division and handles zero division errors."""
if b == 0:
raise ValueError("Division by zero is undefined.")
return a / b
@tool
def modulus(a: int, b: int) -> int:
"""Returns the remainder after division."""
return a % b
# ---- Search Tools ---- #
@tool
def search_wikipedia(query: str) -> str:
"""Returns up to 2 documents related to a query from Wikipedia."""
docs = WikipediaLoader(query=query, load_max_docs=2).load()
return {"wiki_results": "\n\n---\n\n".join(
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}'
for doc in docs
)}
@tool
def search_web(query: str) -> str:
"""Fetches up to 3 web results using Tavily."""
results = TavilySearchResults(max_results=3).invoke(query=query)
return {"web_results": "\n\n---\n\n".join(
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}'
for doc in results
)}
@tool
def search_arxiv(query: str) -> str:
"""Retrieves up to 3 papers related to the query from ArXiv."""
results = ArxivLoader(query=query, load_max_docs=3).load()
return {"arvix_results": "\n\n---\n\n".join(
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}'
for doc in results
)}
system_message = SystemMessage(content="""You are a helpful assistant tasked with answering questions using a set of tools. Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
FINAL ANSWER: [YOUR FINAL ANSWER]
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma-separated list of numbers and/or strings.
- If you are asked for a number, don't use a comma in the number and avoid units like $ or % unless specified otherwise.
- If you are asked for a string, avoid using articles and abbreviations (e.g. for cities), and write digits in plain text unless specified otherwise.
- If you are asked for a comma-separated list, apply the above rules depending on whether each item is a number or string.
Your answer should start only with "FINAL ANSWER: ", followed by your result.""")
toolset = [
multiply,
add,
subtract,
divide,
modulus,
search_wikipedia,
search_web,
search_arxiv,
]
# ---- Graph Construction ---- #
def create_agent_flow(provider: str = "groq"):
"""Constructs the LangGraph conversational flow with tool support."""
if provider == "google":
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
elif provider == "groq":
llm = ChatGroq(api_key="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" , model="qwen-qwq-32b", temperature=0)
elif provider == "huggingface":
llm = ChatHuggingFace(llm=HuggingFaceEndpoint(
url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
temperature=0
))
else:
raise ValueError("Unsupported provider. Choose from: 'google', 'groq', 'huggingface'.")
llm_toolchain = llm.bind_tools(toolset)
# Assistant node behavior
def assistant_node(state: MessagesState):
response = llm_toolchain.invoke(state["messages"])
return {"messages": [response]}
# Build the conversational graph
graph = StateGraph(MessagesState)
graph.add_node("assistant", assistant_node)
graph.add_node("tools", ToolNode(toolset))
graph.add_edge(START, "assistant")
graph.add_conditional_edges("assistant", tools_condition)
graph.add_edge("tools", "assistant")
return graph.compile()
if __name__ == "__main__":
question = "What is the capital of France?"
# Build the graph
graph = create_agent_flow(provider="groq")
# Run the graph
messages = [HumanMessage(content=question)]
messages = graph.invoke({"messages": messages})
for m in messages["messages"]:
m.pretty_print()
|