File size: 4,508 Bytes
fc341bd f52e3df fc341bd f52e3df fc341bd f52e3df fc341bd f52e3df fc341bd f52e3df fc341bd f52e3df fc341bd f52e3df fc341bd f52e3df fc341bd f52e3df fc341bd f52e3df fc341bd f52e3df |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import os
from langchain_groq import ChatGroq
from langchain.prompts import PromptTemplate
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import ToolNode, tools_condition
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import HumanMessage
from langchain.tools import tool
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable
from dotenv import load_dotenv
# Load environment variables from .env
load_dotenv()
# Initialize LLM
def initialize_llm():
llm = ChatGroq(
temperature=0,
model_name="qwen-qwq-32b",
groq_api_key=os.getenv("GROQ_API_KEY")
)
return llm
# Initialize Tavily Search Tool
def initialize_search_tool():
return TavilySearchResults()
# Weather tool
def get_weather(location: str, search_tool: TavilySearchResults = None) -> str:
if search_tool is None:
search_tool = initialize_search_tool()
query = f"current weather in {location}"
return search_tool.run(query)
# Recommendation chain
def initialize_recommendation_chain(llm: ChatGroq) -> Runnable:
recommendation_prompt = ChatPromptTemplate.from_template("""
You are a helpful assistant that gives weather-based advice.
Given the current weather condition: "{weather_condition}", provide:
1. Clothing or activity recommendations suited for this weather.
2. At least one health tip to stay safe or comfortable in this condition.
Be concise and clear.
""")
return recommendation_prompt | llm
def get_recommendation(weather_condition: str, recommendation_chain: Runnable = None) -> str:
if recommendation_chain is None:
llm = initialize_llm()
recommendation_chain = initialize_recommendation_chain(llm)
return recommendation_chain.invoke({"weather_condition": weather_condition})
# Math tools
@tool
def add(x: int, y: int) -> int:
return x + y
@tool
def subtract(x: int, y: int) -> int:
return x - y
@tool
def multiply(x: int, y: int) -> int:
return x * y
@tool
def divide(x: int, y: int) -> float:
if y == 0:
raise ValueError("Cannot divide by zero.")
return x / y
@tool
def square(x: int) -> int:
return x * x
@tool
def cube(x: int) -> int:
return x * x * x
@tool
def power(x: int, y: int) -> int:
return x ** y
@tool
def factorial(n: int) -> int:
if n < 0:
raise ValueError("Factorial is not defined for negative numbers.")
if n == 0 or n == 1:
return 1
result = 1
for i in range(2, n + 1):
result *= i
return result
@tool
def mean(numbers: list) -> float:
if not numbers:
raise ValueError("The list is empty.")
return sum(numbers) / len(numbers)
@tool
def standard_deviation(numbers: list) -> float:
if not numbers:
raise ValueError("The list is empty.")
mean_value = mean(numbers)
variance = sum((x - mean_value) ** 2 for x in numbers) / len(numbers)
return variance ** 0.5
# Build the LangGraph
def build_graph():
llm = initialize_llm()
search_tool = initialize_search_tool()
recommendation_chain = initialize_recommendation_chain(llm)
@tool
def weather_tool(location: str) -> str:
return get_weather(location, search_tool)
@tool
def recommendation_tool(weather_condition: str) -> str:
return get_recommendation(weather_condition, recommendation_chain)
tools = [weather_tool, recommendation_tool, add, subtract, multiply, divide, square, cube, power, factorial, mean, standard_deviation]
llm_with_tools = llm.bind_tools(tools)
def assistant(state: MessagesState):
print("Entering assistant node...")
response = llm_with_tools.invoke(state["messages"])
print(f"Assistant says: {response.content}")
return {"messages": [response]}
builder = StateGraph(MessagesState)
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
builder.set_entry_point("assistant")
builder.add_conditional_edges("assistant", tools_condition)
builder.add_edge("tools", "assistant")
return builder.compile()
if __name__ == "__main__":
graph = build_graph()
question = "What is the factorial of 6 and can you also tell me the weather in Paris?"
messages = [HumanMessage(content=question)]
result = graph.invoke({"messages": messages})
for msg in result["messages"]:
msg.pretty_print()
|