HarshitSundriyal commited on
Commit
f52e3df
·
1 Parent(s): 397d99c

updated code space

Browse files
agent.py CHANGED
@@ -13,10 +13,8 @@ from dotenv import load_dotenv
13
  # Load environment variables from .env
14
  load_dotenv()
15
 
16
-
17
  # Initialize LLM
18
  def initialize_llm():
19
- """Initializes the ChatGroq LLM."""
20
  llm = ChatGroq(
21
  temperature=0,
22
  model_name="qwen-qwq-32b",
@@ -26,27 +24,20 @@ def initialize_llm():
26
 
27
  # Initialize Tavily Search Tool
28
  def initialize_search_tool():
29
- """Initializes the TavilySearchResults tool."""
30
- search_tool = TavilySearchResults()
31
- return search_tool
32
-
33
 
34
-
35
- # Define Tools
36
  def get_weather(location: str, search_tool: TavilySearchResults = None) -> str:
37
- """Fetch the current weather information for a given location using Tavily search."""
38
  if search_tool is None:
39
  search_tool = initialize_search_tool()
40
  query = f"current weather in {location}"
41
- results = search_tool.run(query)
42
- return results
43
-
44
 
 
45
  def initialize_recommendation_chain(llm: ChatGroq) -> Runnable:
46
- """Initializes the recommendation chain."""
47
  recommendation_prompt = ChatPromptTemplate.from_template("""
48
  You are a helpful assistant that gives weather-based advice.
49
-
50
  Given the current weather condition: "{weather_condition}", provide:
51
  1. Clothing or activity recommendations suited for this weather.
52
  2. At least one health tip to stay safe or comfortable in this condition.
@@ -55,73 +46,104 @@ def initialize_recommendation_chain(llm: ChatGroq) -> Runnable:
55
  """)
56
  return recommendation_prompt | llm
57
 
58
-
59
-
60
  def get_recommendation(weather_condition: str, recommendation_chain: Runnable = None) -> str:
61
- """Give activity/clothing recommendations and health tips based on the weather condition using an LLM."""
62
  if recommendation_chain is None:
63
  llm = initialize_llm()
64
  recommendation_chain = initialize_recommendation_chain(llm)
65
  return recommendation_chain.invoke({"weather_condition": weather_condition})
66
 
67
-
68
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  def build_graph():
70
- """Build the graph using Groq and custom prompt/tools setup"""
71
-
72
- # Initialize the LLM
73
  llm = initialize_llm()
74
-
75
- # Initialize Tavily tool
76
  search_tool = initialize_search_tool()
77
-
78
-
79
- # Initialize the recommendation chain
80
  recommendation_chain = initialize_recommendation_chain(llm)
81
 
82
- # Define tools
83
  @tool
84
  def weather_tool(location: str) -> str:
85
- """Fetch the current weather information for a given location."""
86
- return get_weather(location, search_tool) # Pass the search tool
87
 
88
  @tool
89
  def recommendation_tool(weather_condition: str) -> str:
90
- """Get recommendations based on weather."""
91
  return get_recommendation(weather_condition, recommendation_chain)
92
 
93
- tools = [weather_tool, recommendation_tool]
94
 
95
- # Bind tools to LLM
96
  llm_with_tools = llm.bind_tools(tools)
97
 
98
- # Define assistant node
99
  def assistant(state: MessagesState):
100
- """Assistant node"""
101
  print("Entering assistant node...")
102
  response = llm_with_tools.invoke(state["messages"])
103
  print(f"Assistant says: {response.content}")
104
  return {"messages": [response]}
105
 
106
- # Create graph
107
  builder = StateGraph(MessagesState)
108
  builder.add_node("assistant", assistant)
109
  builder.add_node("tools", ToolNode(tools))
110
  builder.set_entry_point("assistant")
111
  builder.add_conditional_edges("assistant", tools_condition)
112
  builder.add_edge("tools", "assistant")
113
- graph = builder.compile()
114
-
115
- return graph
116
-
117
-
118
 
119
- # Main execution
120
  if __name__ == "__main__":
121
- # Build and run the graph
122
  graph = build_graph()
123
- question = "What are the Upanishads?"
124
  messages = [HumanMessage(content=question)]
125
- messages = graph.invoke({"messages": messages})
126
- for m in messages["messages"]:
127
- m.pretty_print()
 
13
  # Load environment variables from .env
14
  load_dotenv()
15
 
 
16
  # Initialize LLM
17
  def initialize_llm():
 
18
  llm = ChatGroq(
19
  temperature=0,
20
  model_name="qwen-qwq-32b",
 
24
 
25
  # Initialize Tavily Search Tool
26
  def initialize_search_tool():
27
+ return TavilySearchResults()
 
 
 
28
 
29
+ # Weather tool
 
30
  def get_weather(location: str, search_tool: TavilySearchResults = None) -> str:
 
31
  if search_tool is None:
32
  search_tool = initialize_search_tool()
33
  query = f"current weather in {location}"
34
+ return search_tool.run(query)
 
 
35
 
36
+ # Recommendation chain
37
  def initialize_recommendation_chain(llm: ChatGroq) -> Runnable:
 
38
  recommendation_prompt = ChatPromptTemplate.from_template("""
39
  You are a helpful assistant that gives weather-based advice.
40
+
41
  Given the current weather condition: "{weather_condition}", provide:
42
  1. Clothing or activity recommendations suited for this weather.
43
  2. At least one health tip to stay safe or comfortable in this condition.
 
46
  """)
47
  return recommendation_prompt | llm
48
 
 
 
49
  def get_recommendation(weather_condition: str, recommendation_chain: Runnable = None) -> str:
 
50
  if recommendation_chain is None:
51
  llm = initialize_llm()
52
  recommendation_chain = initialize_recommendation_chain(llm)
53
  return recommendation_chain.invoke({"weather_condition": weather_condition})
54
 
55
+ # Math tools
56
+ @tool
57
+ def add(x: int, y: int) -> int:
58
+ return x + y
59
+
60
+ @tool
61
+ def subtract(x: int, y: int) -> int:
62
+ return x - y
63
+
64
+ @tool
65
+ def multiply(x: int, y: int) -> int:
66
+ return x * y
67
+
68
+ @tool
69
+ def divide(x: int, y: int) -> float:
70
+ if y == 0:
71
+ raise ValueError("Cannot divide by zero.")
72
+ return x / y
73
+
74
+ @tool
75
+ def square(x: int) -> int:
76
+ return x * x
77
+
78
+ @tool
79
+ def cube(x: int) -> int:
80
+ return x * x * x
81
+
82
+ @tool
83
+ def power(x: int, y: int) -> int:
84
+ return x ** y
85
+
86
+ @tool
87
+ def factorial(n: int) -> int:
88
+ if n < 0:
89
+ raise ValueError("Factorial is not defined for negative numbers.")
90
+ if n == 0 or n == 1:
91
+ return 1
92
+ result = 1
93
+ for i in range(2, n + 1):
94
+ result *= i
95
+ return result
96
+
97
+ @tool
98
+ def mean(numbers: list) -> float:
99
+ if not numbers:
100
+ raise ValueError("The list is empty.")
101
+ return sum(numbers) / len(numbers)
102
+
103
+ @tool
104
+ def standard_deviation(numbers: list) -> float:
105
+ if not numbers:
106
+ raise ValueError("The list is empty.")
107
+ mean_value = mean(numbers)
108
+ variance = sum((x - mean_value) ** 2 for x in numbers) / len(numbers)
109
+ return variance ** 0.5
110
+
111
+ # Build the LangGraph
112
  def build_graph():
 
 
 
113
  llm = initialize_llm()
 
 
114
  search_tool = initialize_search_tool()
 
 
 
115
  recommendation_chain = initialize_recommendation_chain(llm)
116
 
 
117
  @tool
118
  def weather_tool(location: str) -> str:
119
+ return get_weather(location, search_tool)
 
120
 
121
  @tool
122
  def recommendation_tool(weather_condition: str) -> str:
 
123
  return get_recommendation(weather_condition, recommendation_chain)
124
 
125
+ tools = [weather_tool, recommendation_tool, add, subtract, multiply, divide, square, cube, power, factorial, mean, standard_deviation]
126
 
 
127
  llm_with_tools = llm.bind_tools(tools)
128
 
 
129
  def assistant(state: MessagesState):
 
130
  print("Entering assistant node...")
131
  response = llm_with_tools.invoke(state["messages"])
132
  print(f"Assistant says: {response.content}")
133
  return {"messages": [response]}
134
 
 
135
  builder = StateGraph(MessagesState)
136
  builder.add_node("assistant", assistant)
137
  builder.add_node("tools", ToolNode(tools))
138
  builder.set_entry_point("assistant")
139
  builder.add_conditional_edges("assistant", tools_condition)
140
  builder.add_edge("tools", "assistant")
141
+ return builder.compile()
 
 
 
 
142
 
 
143
  if __name__ == "__main__":
 
144
  graph = build_graph()
145
+ question = "What is the factorial of 6 and can you also tell me the weather in Paris?"
146
  messages = [HumanMessage(content=question)]
147
+ result = graph.invoke({"messages": messages})
148
+ for msg in result["messages"]:
149
+ msg.pretty_print()
chroma_db/b4f29986-cfbe-4e28-871d-c988b39d1992/data_level0.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:23add52afbe7588391f32d3deffb581b2663d2e2ad8851aba7de25e6b3f66761
3
- size 32120000
 
 
 
 
chroma_db/b4f29986-cfbe-4e28-871d-c988b39d1992/header.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8c7f00b4415698ee6cb94332eff91aedc06ba8e066b1f200e78ca5df51abb57
3
- size 100
 
 
 
 
chroma_db/b4f29986-cfbe-4e28-871d-c988b39d1992/length.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e7e2dcff542de95352682dc186432e98f0188084896773f1973276b0577d5305
3
- size 40000
 
 
 
 
chroma_db/b4f29986-cfbe-4e28-871d-c988b39d1992/link_lists.bin DELETED
File without changes