HarshitSundriyal commited on
Commit
fc341bd
·
1 Parent(s): 57e7e75

added assignment files

Browse files
agent.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain_groq import ChatGroq
3
+ from langchain.prompts import PromptTemplate
4
+ from langgraph.graph import START, StateGraph, MessagesState
5
+ from langgraph.prebuilt import ToolNode, tools_condition
6
+ from langchain_community.tools.tavily_search import TavilySearchResults
7
+ from langchain_core.messages import HumanMessage
8
+ from langchain.tools import tool
9
+ from langchain_core.prompts import ChatPromptTemplate
10
+ from langchain_core.runnables import Runnable
11
+ from dotenv import load_dotenv
12
+
13
+ # Load environment variables from .env
14
+ load_dotenv()
15
+
16
+
17
+ # Initialize LLM
18
+ def initialize_llm():
19
+ """Initializes the ChatGroq LLM."""
20
+ llm = ChatGroq(
21
+ temperature=0,
22
+ model_name="qwen-qwq-32b",
23
+ groq_api_key=os.getenv("GROQ_API_KEY")
24
+ )
25
+ return llm
26
+
27
+ # Initialize Tavily Search Tool
28
+ def initialize_search_tool():
29
+ """Initializes the TavilySearchResults tool."""
30
+ search_tool = TavilySearchResults()
31
+ return search_tool
32
+
33
+
34
+
35
+ # Define Tools
36
+ def get_weather(location: str, search_tool: TavilySearchResults = None) -> str:
37
+ """Fetch the current weather information for a given location using Tavily search."""
38
+ if search_tool is None:
39
+ search_tool = initialize_search_tool()
40
+ query = f"current weather in {location}"
41
+ results = search_tool.run(query)
42
+ return results
43
+
44
+
45
+ def initialize_recommendation_chain(llm: ChatGroq) -> Runnable:
46
+ """Initializes the recommendation chain."""
47
+ recommendation_prompt = ChatPromptTemplate.from_template("""
48
+ You are a helpful assistant that gives weather-based advice.
49
+
50
+ Given the current weather condition: "{weather_condition}", provide:
51
+ 1. Clothing or activity recommendations suited for this weather.
52
+ 2. At least one health tip to stay safe or comfortable in this condition.
53
+
54
+ Be concise and clear.
55
+ """)
56
+ return recommendation_prompt | llm
57
+
58
+
59
+
60
+ def get_recommendation(weather_condition: str, recommendation_chain: Runnable = None) -> str:
61
+ """Give activity/clothing recommendations and health tips based on the weather condition using an LLM."""
62
+ if recommendation_chain is None:
63
+ llm = initialize_llm()
64
+ recommendation_chain = initialize_recommendation_chain(llm)
65
+ return recommendation_chain.invoke({"weather_condition": weather_condition})
66
+
67
+
68
+
69
+ def build_graph():
70
+ """Build the graph using Groq and custom prompt/tools setup"""
71
+
72
+ # Initialize the LLM
73
+ llm = initialize_llm()
74
+
75
+ # Initialize Tavily tool
76
+ search_tool = initialize_search_tool()
77
+
78
+
79
+ # Initialize the recommendation chain
80
+ recommendation_chain = initialize_recommendation_chain(llm)
81
+
82
+ # Define tools
83
+ @tool
84
+ def weather_tool(location: str) -> str:
85
+ """Fetch the current weather information for a given location."""
86
+ return get_weather(location, search_tool) # Pass the search tool
87
+
88
+ @tool
89
+ def recommendation_tool(weather_condition: str) -> str:
90
+ """Get recommendations based on weather."""
91
+ return get_recommendation(weather_condition, recommendation_chain)
92
+
93
+ tools = [weather_tool, recommendation_tool]
94
+
95
+ # Bind tools to LLM
96
+ llm_with_tools = llm.bind_tools(tools)
97
+
98
+ # Define assistant node
99
+ def assistant(state: MessagesState):
100
+ """Assistant node"""
101
+ print("Entering assistant node...")
102
+ response = llm_with_tools.invoke(state["messages"])
103
+ print(f"Assistant says: {response.content}")
104
+ return {"messages": [response]}
105
+
106
+ # Create graph
107
+ builder = StateGraph(MessagesState)
108
+ builder.add_node("assistant", assistant)
109
+ builder.add_node("tools", ToolNode(tools))
110
+ builder.set_entry_point("assistant")
111
+ builder.add_conditional_edges("assistant", tools_condition)
112
+ builder.add_edge("tools", "assistant")
113
+ graph = builder.compile()
114
+
115
+ return graph
116
+
117
+
118
+
119
+ # Main execution
120
+ if __name__ == "__main__":
121
+ # Build and run the graph
122
+ graph = build_graph()
123
+ question = "What are the Upanishads?"
124
+ messages = [HumanMessage(content=question)]
125
+ messages = graph.invoke({"messages": messages})
126
+ for m in messages["messages"]:
127
+ m.pretty_print()
agents.ipynb ADDED
@@ -0,0 +1,801 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "72ef7057",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "/Users/harshitsundriyal/Library/Python/3.9/lib/python/site-packages/urllib3/__init__.py:35: NotOpenSSLWarning: urllib3 v2 only supports OpenSSL 1.1.1+, currently the 'ssl' module is compiled with 'LibreSSL 2.8.3'. See: https://github.com/urllib3/urllib3/issues/3020\n",
14
+ " warnings.warn(\n"
15
+ ]
16
+ }
17
+ ],
18
+ "source": [
19
+ "import os\n",
20
+ "from dotenv import load_dotenv\n",
21
+ "from langchain_groq import ChatGroq\n",
22
+ "from langchain.prompts import PromptTemplate"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 2,
28
+ "id": "11b5a125",
29
+ "metadata": {},
30
+ "outputs": [
31
+ {
32
+ "data": {
33
+ "text/plain": [
34
+ "True"
35
+ ]
36
+ },
37
+ "execution_count": 2,
38
+ "metadata": {},
39
+ "output_type": "execute_result"
40
+ }
41
+ ],
42
+ "source": [
43
+ "from langgraph.graph import START, StateGraph, MessagesState\n",
44
+ "from langgraph.prebuilt import (tools_condition,\n",
45
+ " ToolNode)\n",
46
+ "from langchain_groq import ChatGroq\n",
47
+ "from langchain_huggingface import HuggingFaceEmbeddings, ChatHuggingFace, HuggingFaceEndpoint\n",
48
+ "from langchain_community.tools.tavily_search import TavilySearchResults\n",
49
+ "from langchain_core.messages import SystemMessage, HumanMessage\n",
50
+ "from langchain.tools import tool\n",
51
+ "from langchain_community.document_loaders import (\n",
52
+ " WikipediaLoader,\n",
53
+ " GoogleDriveLoader,\n",
54
+ " ArxivLoader)\n",
55
+ "\n",
56
+ "\n",
57
+ "load_dotenv()"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": 3,
63
+ "id": "f55a2a45",
64
+ "metadata": {},
65
+ "outputs": [],
66
+ "source": [
67
+ "llm = ChatGroq(\n",
68
+ " temperature=0,\n",
69
+ " model_name=\"qwen-qwq-32b\", # Updated to working model\n",
70
+ " groq_api_key=os.getenv(\"GROQ_API_KEY\")\n",
71
+ ")"
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "code",
76
+ "execution_count": 4,
77
+ "id": "5d9243b4",
78
+ "metadata": {},
79
+ "outputs": [],
80
+ "source": [
81
+ "# Initialize Tavily tool\n",
82
+ "search_tool = TavilySearchResults()"
83
+ ]
84
+ },
85
+ {
86
+ "cell_type": "code",
87
+ "execution_count": 5,
88
+ "id": "b4c2d3fb",
89
+ "metadata": {},
90
+ "outputs": [
91
+ {
92
+ "name": "stdout",
93
+ "output_type": "stream",
94
+ "text": [
95
+ "Tavily Search Results:\n",
96
+ "[{'title': 'What is the capital of India? States and union territories explained.', 'url': 'https://www.usatoday.com/story/news/world/2023/05/24/what-is-the-capital-of-india/70195720007/', 'content': 'Want to learn more about the soon-to-be most populous country? Here’s some interesting information about how India is organized.\\n\\nWhat is the capital of India?\\n\\nThe capital of India is New Delhi, located in the north-central part of the country to the west of the Yamuna River.\\n\\nCalcutta (now Kolkata, the capital of West Bengal) was the country’s capital until 1911 when King George V declared Delhi the new capital and construction of New Delhi began. [...] When the national government achieved independence in 1947, New Delhi became the capital.\\n\\nMumbai, the state capital of Maharashtra, is often considered the financial capital of India because of its role in the national and international economy.\\n\\nHow many states are in India?\\n\\nIndia is home to 28 states, each with its own capital and run by a Governor who represents the President:\\n\\nIndia also has eight union territories, governed by an Administrator appointed by the President: [...] What state is New Delhi in?\\n\\nNew Delhi is part of one of India’s union territories, the National Capital Territory of Dehli. Old Delhi, now a historic city, is north of New Delhi in the same union territory.', 'score': 0.942385}, {'title': 'List of capitals of India - Wikipedia', 'url': 'https://en.wikipedia.org/wiki/List_of_capitals_of_India', 'content': 'Find sources:\\xa0\"List of capitals of India\"\\xa0–\\xa0news\\xa0· newspapers\\xa0· books\\xa0· scholar\\xa0· JSTOR (September 2012) (Learn how and when to remove this message)\\nThis is a list of locations which have served as capital cities in India. The current capital city is New Delhi, which replaced Calcutta in 1911.\\nAncient period[edit]\\nRajgir: Initial capital of the Magadha Empire from 6th century BCE to 460 BCE, called Girivraj at the time. [1] [...] In 1858, Allahabad (now Prayagraj) became the capital of India for a day when it also served as the capital of North-Western Provinces.[3]\\nDuring the British Raj, until 1911, Calcutta was the capital of India.[4]\\nBy the latter half of the 19th century, Shimla had become the summer capital.[5] [...] King George V proclaimed the transfer of the capital from Calcutta to Delhi at the climax of the 1911 Delhi Durbar on 12 December 1911. The buildings housing the Viceroy, government, and parliament were inaugurated in early 1931.\\nReferences[edit]\\n^ \"Rajgir: On Hallowed Ground\". 3 September 2017. Retrieved 22 July 2020. between the 6th and 5th centuries BCE, all roads led to the great city of Rajgir', 'score': 0.90869105}, {'title': 'Which City became the capital of India for a day, Check here', 'url': 'https://www.jagranjosh.com/general-knowledge/which-city-became-the-capital-of-india-for-a-day-1731421925-1', 'content': 'one day capital of India\\nIf someone asks you what is the capital of India, you will answer that it is New Delhi, which the British established. In Indian history, the capital of the country changed many times and different cities got the honour of becoming the capital of the country. [...] Which City became the Capital of India for a Day, Check Here\\nIf you are asked what the capital of India is, your answer will be New Delhi, which was settled by the British. Do you know that once in Indian history, a district was made the capital of the country? However, this incident lasted only for a day, after which it was recorded in the pages of history forever. This article will introduce you to one such district in India.\\n\\nByKishan Kumar\\nNov 12, 2024, 20:03 IST [...] After making Kolkata its capital, on 12 December 1912, New Delhi was declared as the capital of the country in the presence of George V. Since then, the name of New Delhi has been associated with the capital of the country.\\nAbout the Author', 'score': 0.9047265}, {'title': 'Capital of India - Definition, Meaning & Synonyms - Vocabulary.com', 'url': 'https://www.vocabulary.com/dictionary/capital%20of%20India', 'content': 'Capital of India - Definition, Meaning & Synonyms | Vocabulary.com\\nSKIP TO CONTENT\\n\\n\\nDictionary\\nVocabulary Lists\\nVocabTrainer™\\n\\ncapital of India\\nOther forms: capitals of India\\nDefinitions of capital of India\\n\\n\\nnoun\\nthe capital of India is a division of the old city of Delhi\\nsynonyms: Indian capital, New Delhi\\nsee moresee less\\nexample of:\\nnational capital\\nthe capital city of a nation\\n\\n\\nCite this entry\\nStyle:\\nMLA\\n\\nMLA\\nAPA\\nChicago', 'score': 0.8533396}, {'title': 'Which is the Capital of India? #capitalofindia #delhi #newdelhi', 'url': 'https://www.youtube.com/watch?v=Gj9rOLe4CZU', 'content': 'Enroll Now https://chahalacademy.com/offers Follow Us For Regular Updates: Instagram: https://www.instagram.com/chahalacademy Telegram:', 'score': 0.8399576}]\n"
97
+ ]
98
+ }
99
+ ],
100
+ "source": [
101
+ "# Run a test query\n",
102
+ "query = \"What is the capital of India?\"\n",
103
+ "results = search_tool.run(query)\n",
104
+ "\n",
105
+ "# Print results\n",
106
+ "print(\"Tavily Search Results:\")\n",
107
+ "print(results)"
108
+ ]
109
+ },
110
+ {
111
+ "cell_type": "code",
112
+ "execution_count": 6,
113
+ "id": "26e31bb4",
114
+ "metadata": {},
115
+ "outputs": [],
116
+ "source": [
117
+ "@tool\n",
118
+ "def get_weather(location: str) -> str:\n",
119
+ " \"\"\"Fetch the current weather information for a given location using Tavily search.\"\"\"\n",
120
+ " query = f\"current weather in {location}\"\n",
121
+ " results = search_tool.run(query)\n",
122
+ " return results"
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "code",
127
+ "execution_count": 7,
128
+ "id": "b98961af",
129
+ "metadata": {},
130
+ "outputs": [
131
+ {
132
+ "data": {
133
+ "text/plain": [
134
+ "[{'title': 'Weather in Delhi',\n",
135
+ " 'url': 'https://www.weatherapi.com/',\n",
136
+ " 'content': \"{'location': {'name': 'Delhi', 'region': 'Ontario', 'country': 'Canada', 'lat': 42.85, 'lon': -80.5, 'tz_id': 'America/Toronto', 'localtime_epoch': 1746246286, 'localtime': '2025-05-03 00:24'}, 'current': {'last_updated_epoch': 1746245700, 'last_updated': '2025-05-03 00:15', 'temp_c': 10.4, 'temp_f': 50.7, 'is_day': 0, 'condition': {'text': 'Overcast', 'icon': '//cdn.weatherapi.com/weather/64x64/night/122.png', 'code': 1009}, 'wind_mph': 6.7, 'wind_kph': 10.8, 'wind_degree': 39, 'wind_dir': 'NE', 'pressure_mb': 1013.0, 'pressure_in': 29.91, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 74, 'cloud': 0, 'feelslike_c': 9.0, 'feelslike_f': 48.2, 'windchill_c': 5.9, 'windchill_f': 42.7, 'heatindex_c': 8.2, 'heatindex_f': 46.7, 'dewpoint_c': 4.4, 'dewpoint_f': 40.0, 'vis_km': 10.0, 'vis_miles': 6.0, 'uv': 0.0, 'gust_mph': 10.5, 'gust_kph': 16.9}}\",\n",
137
+ " 'score': 0.9573658},\n",
138
+ " {'title': 'New Delhi 14 Day Extended Forecast - Weather - Time and Date',\n",
139
+ " 'url': 'https://www.timeanddate.com/weather/india/new-delhi/ext',\n",
140
+ " 'content': 'SatMay 17 | | 98 / 89\\xa0°F | Sunny. | 93\\xa0°F | 15 mph | ↑ | 15% | 4% | - | 10(Very high) | 5:29 am | 7:06 pm\\n* Updated Saturday, May 3, 2025 2:54:05 am New Delhi time - Weather by CustomWeather, © 2025\\nHour-by-hour weather for New Delhi next 7 days [...] TueMay 13 | | 97 / 86\\xa0°F | Showers late. Increasing cloudiness. | 94\\xa0°F | 14 mph | ↑ | 24% | 46% | 0.01\" | 5(Moderate) | 5:31 am | 7:03 pm\\nWedMay 14 | | 97 / 86\\xa0°F | Sunny. | 92\\xa0°F | 13 mph | ↑ | 14% | 5% | - | 10(Very high) | 5:30 am | 7:04 pm\\nThuMay 15 | | 97 / 88\\xa0°F | Sunny. | 93\\xa0°F | 14 mph | ↑ | 15% | 4% | - | 10(Very high) | 5:30 am | 7:04 pm\\nFriMay 16 | | 98 / 89\\xa0°F | Sunny. | 93\\xa0°F | 15 mph | ↑ | 16% | 5% | - | 10(Very high) | 5:29 am | 7:05 pm',\n",
141
+ " 'score': 0.7171114},\n",
142
+ " {'title': 'Real-Time New Delhi Weather Conditions & Forecast - India - AQI',\n",
143
+ " 'url': 'https://www.aqi.in/weather/in/india/delhi/new-delhi',\n",
144
+ " 'content': '8.\\n\\nBodhan, India\\n\\nSunny\\n\\n30%\\n\\n2.5\\n\\n20.5 kmph\\xa0/\\xa0 SW\\n\\n9.\\n\\nPrayagraj, India\\n\\nSunny\\n\\n24%\\n\\n2.5\\n\\n12.6 kmph\\xa0/\\xa0 SE\\n\\n10.\\n\\nAllahabad, India\\n\\nSunny\\n\\n24%\\n\\n2.5\\n\\n12.6 kmph\\xa0/\\xa0 SE\\n\\nLast Updated: 03 May 2025, 03:37 AM\\n\\nPrana sense\\n\\nExperience precise, real-time weather monitoring with our Automatic Weather Station, reliable source for accurate meteorological data.\\n\\nWeather Station [...] New Delhi, Delhi, IndiaReal-time Weather Conditions\\n\\nNew Delhi, Delhi, India\\n\\nReal-time Weather Conditions\\n\\n25\\xa0\\xa0°C\\n\\n173\\xa0AQI\\n\\nAir quality index is: \\xa0Unhealthy\\n\\nLast Updated: 2025-05-03 09:00 (Local Time)\\n\\nNew Delhi\\n\\nWeather ParametersNew Delhi\\n\\nWeather Parameters\\n\\nNew Delhi\\n\\n12\\xa0km/h\\n\\n4\\xa0m/s\\n\\n168° SSE\\n\\n75\\xa0%\\n\\n2.2\\xa0km\\n\\n0\\xa0mm\\n\\n1009\\xa0mb\\n\\nUV Index4\\n\\nSuggestions for\\n\\n3\\xa0May\\n\\nTrending in India\\n\\nTrending in India\\n\\nHow Recent Dust Storms Affected Air Quality Across the Middle East [...] Moderate or heavy rain with thunder\\n\\n10.8 kmph\\xa0/\\xa0 SSE\\n\\nMost Hottest Cities 2025India\\n\\nMost Hottest Cities 2025\\n\\nIndia\\n\\n1.\\n\\nAmravati, India\\n\\nSunny\\n\\n14%\\n\\n2.7\\n\\n10.1 kmph\\xa0/\\xa0 NW\\n\\n2.\\n\\nAdilabad, India\\n\\nSunny\\n\\n28%\\n\\n2.7\\n\\n24.8 kmph\\xa0/\\xa0 SW\\n\\n3.\\n\\nNanded, India\\n\\nSunny\\n\\n16%\\n\\n2.6\\n\\n11.9 kmph\\xa0/\\xa0 WSW\\n\\n4.\\n\\nAkola, India\\n\\nSunny\\n\\n15%\\n\\n2.6\\n\\n27 kmph\\xa0/\\xa0 NW\\n\\n5.\\n\\nChandrapur, India\\n\\nSunny\\n\\n32%\\n\\n2.7\\n\\n17.6 kmph\\xa0/\\xa0 SSW\\n\\n6.\\n\\nYavatmal, India\\n\\nSunny\\n\\n17%\\n\\n2.7\\n\\n17.6 kmph\\xa0/\\xa0 WSW\\n\\n7.\\n\\nKupti, India\\n\\nSunny\\n\\n24%\\n\\n2.7\\n\\n15.8 kmph\\xa0/\\xa0 WSW',\n",
145
+ " 'score': 0.7023877},\n",
146
+ " {'title': 'Current Weather - Delhi - AccuWeather',\n",
147
+ " 'url': 'https://www.accuweather.com/en/in/delhi/202396/current-weather/202396',\n",
148
+ " 'content': 'Current Weather. 6:23 AM. 78°F. Sunny. RealFeel® 76°. Pleasant. RealFeel Guide. Pleasant. 63° to 81°. Most consider this temperature range ideal.',\n",
149
+ " 'score': 0.6672566},\n",
150
+ " {'title': 'Delhi, Delhi, India Weather Forecast - AccuWeather',\n",
151
+ " 'url': 'https://www.accuweather.com/en/in/delhi/202396/weather-forecast/202396',\n",
152
+ " 'content': \"Today's Weather. Sat, May 3. Hazy sunshine with a thunderstorm in spots this afternoon Hi: 93°. Tonight: Partly cloudy Lo: 78° · Current Weather. 7:07 AM. 78°F.\",\n",
153
+ " 'score': 0.54535896}]"
154
+ ]
155
+ },
156
+ "execution_count": 7,
157
+ "metadata": {},
158
+ "output_type": "execute_result"
159
+ }
160
+ ],
161
+ "source": [
162
+ "get_weather.invoke(\"Delhi\")"
163
+ ]
164
+ },
165
+ {
166
+ "cell_type": "code",
167
+ "execution_count": 8,
168
+ "id": "9c2884c7",
169
+ "metadata": {},
170
+ "outputs": [],
171
+ "source": [
172
+ "from langchain_core.prompts import ChatPromptTemplate\n",
173
+ "from langchain_core.runnables import Runnable\n",
174
+ "\n",
175
+ "\n",
176
+ "# Define a prompt template\n",
177
+ "recommendation_prompt = ChatPromptTemplate.from_template(\"\"\"\n",
178
+ "You are a helpful assistant that gives weather-based advice.\n",
179
+ "\n",
180
+ "Given the current weather condition: \"{weather_condition}\", provide:\n",
181
+ "1. Clothing or activity recommendations suited for this weather.\n",
182
+ "2. At least one health tip to stay safe or comfortable in this condition.\n",
183
+ "\n",
184
+ "Be concise and clear.\n",
185
+ "\"\"\")\n",
186
+ "\n",
187
+ "# Combine prompt and LLM into a runnable\n",
188
+ "recommendation_chain: Runnable = recommendation_prompt | llm\n",
189
+ "\n",
190
+ "# Wrap it as a LangChain tool\n",
191
+ "@tool\n",
192
+ "def get_recommendation(weather_condition: str) -> str:\n",
193
+ " \"\"\"Give activity/clothing recommendations and health tips based on the weather condition using an LLM.\"\"\"\n",
194
+ " return recommendation_chain.invoke({\"weather_condition\": weather_condition})"
195
+ ]
196
+ },
197
+ {
198
+ "cell_type": "code",
199
+ "execution_count": 9,
200
+ "id": "76f5efa2",
201
+ "metadata": {},
202
+ "outputs": [
203
+ {
204
+ "data": {
205
+ "text/plain": [
206
+ "AIMessage(content=\"\\n<think>\\nOkay, the user wants advice for sunny 30°C weather. Let me start by thinking about clothing. It's warm but not super hot yet. Light fabrics like cotton or linen would be good because they're breathable. Shorts and a t-shirt are obvious, but maybe mention specific types like loose-fitting to allow airflow. A hat would provide shade, maybe a wide-brimmed one to protect the face and neck. Sunglasses are a must to shield the eyes from UV rays.\\n\\nActivities? Outdoor stuff obviously. Maybe suggest water activities like swimming since it's hot. Barbecues or picnics in the park could be nice. But also think about timing—like suggesting to do outdoor exercises in the morning or evening to avoid the midday sun. Maybe mention things like hiking or cycling but again with the time consideration.\\n\\nHealth tips. Hydration is key. Need to remind to drink water even if not thirsty. Sunscreen with high SPF, reapplying every few hours. Maybe mention avoiding the sun during peak hours, like 11 AM to 3 PM. Also, wearing protective clothing if staying out long. Oh, and maybe something about not leaving kids or pets in the car. Wait, but the user didn't specify, so maybe stick to general tips. \\n\\nWait, the user asked for at least one health tip. So pick the most important ones. Hydration and sunscreen are top. Maybe combine them. Also, taking breaks in the shade. \\n\\nLet me structure this. For clothing and activities, list a few bullet points. Then the health tip as a separate point. Need to be concise. Let me check if 30°C is considered hot. 30 is about 86°F, so yeah, warm but manageable. So maybe light clothing, staying hydrated, using sunscreen. \\n\\nHmm, maybe also mention staying in the shade when possible. Or using an umbrella? Not sure if that's common. Hats are better. \\n\\nOkay, putting it all together. Make sure it's clear and not too wordy. The user wants concise answers. Alright, I think that's covered.\\n</think>\\n\\n1. **Clothing/Activities**: Wear light, breathable fabrics (e.g., linen/cotton), a wide-brimmed hat, and sunglasses. Enjoy outdoor activities like swimming, hiking in shaded areas, or evening barbecues. Avoid intense midday sun; plan outdoor exercise for early morning or late afternoon. \\n\\n2. **Health Tip**: Stay hydrated by drinking water regularly, even if not thirsty. Apply broad-spectrum SPF 30+ sunscreen, reapplying every 2 hours, and seek shade during peak sun hours (10 AM–4 PM).\", additional_kwargs={}, response_metadata={'token_usage': {'completion_tokens': 552, 'prompt_tokens': 71, 'total_tokens': 623, 'completion_time': 1.372413311, 'prompt_time': 0.005961551, 'queue_time': 0.24641989999999997, 'total_time': 1.378374862}, 'model_name': 'qwen-qwq-32b', 'system_fingerprint': 'fp_3796682456', 'finish_reason': 'stop', 'logprobs': None}, id='run-0b552e02-103f-486a-ab52-5e5b833a7a71-0', usage_metadata={'input_tokens': 71, 'output_tokens': 552, 'total_tokens': 623})"
207
+ ]
208
+ },
209
+ "execution_count": 9,
210
+ "metadata": {},
211
+ "output_type": "execute_result"
212
+ }
213
+ ],
214
+ "source": [
215
+ "get_recommendation.invoke(\"sunny and 30 degrees Celsius\")"
216
+ ]
217
+ },
218
+ {
219
+ "cell_type": "code",
220
+ "execution_count": 10,
221
+ "id": "1bf8d533",
222
+ "metadata": {},
223
+ "outputs": [],
224
+ "source": [
225
+ "from langchain_community.document_loaders import WikipediaLoader\n",
226
+ "@tool\n",
227
+ "def wiki_search(query : str) -> str:\n",
228
+ " \"\"\"Search Wikipedia for a given query and return the summary.\n",
229
+ " Args:\n",
230
+ " query (str): The search query.\n",
231
+ " \"\"\"\n",
232
+ " \n",
233
+ " search_docs = WikipediaLoader(query=query, load_max_docs=1).load()\n",
234
+ " formatted_search_docs = \"\\n\\n----\\n\\n\".join(\n",
235
+ " [\n",
236
+ " f'<Document Source=\"{doc.metadata[\"source\"]}\" page=\"{doc.metadata.get(\"page\", \"\")}\">\\n{doc.page_content}\\n</Document>'\n",
237
+ " for doc in search_docs\n",
238
+ " ])\n",
239
+ " return formatted_search_docs"
240
+ ]
241
+ },
242
+ {
243
+ "cell_type": "code",
244
+ "execution_count": 11,
245
+ "id": "87617077",
246
+ "metadata": {},
247
+ "outputs": [],
248
+ "source": [
249
+ "# ! pip install wikipedia"
250
+ ]
251
+ },
252
+ {
253
+ "cell_type": "code",
254
+ "execution_count": 12,
255
+ "id": "4f5b4398",
256
+ "metadata": {},
257
+ "outputs": [
258
+ {
259
+ "data": {
260
+ "text/plain": [
261
+ "'<Document Source=\"https://en.wikipedia.org/wiki/Agentic_AI\" page=\"\">\\nAgentic AI is a class of artificial intelligence that focuses on autonomous systems that can make decisions and perform tasks without human intervention. The independent systems automatically respond to conditions, to produce process results. The field is closely linked to agentic automation, also known as agent-based process management systems, when applied to process automation. Applications include software development, customer support, cybersecurity and business intelligence. \\n\\n\\n== Overview ==\\nThe core concept of agentic AI is the use of AI agents to perform automated tasks but without human intervention. While robotic process automation (RPA) and AI agents can be programmed to automate specific tasks or support rule-based decisions, the rules are usually fixed. Agentic AI operates independently, making decisions through continuous learning and analysis of external data and complex data sets. Functioning agents can require various AI techniques, such as natural language processing, machine learning (ML), and computer vision, depending on the environment.\\nParticularly, reinforcement learning (RL) is essential in assisting agentic AI in making self-directed choices by supporting agents in learning best actions through the trial-and-error method. Agents using RL continuously to explore their surroundings, will be given rewards or punishment for their actions, which refines their decision-making capability over time. While Deep learning, as opposed to rule-based methods, supports Agentic AI through multi-layered neural networks to learn features from extensive and complex sets of data. RL combined with deep learning thus supports the use of AI agents to adjust dynamically, optimize procedures, and engage in complex behaviors with limited control from humans.\\n\\n\\n== History ==\\nSome scholars trace the conceptual roots of agentic AI to Alan Turing\\'s mid-20th century work with machine intelligence and Norbert Wiener\\'s work on feedback systems. The term agent-based process management system was used as far back as 1998 to describe the concept of using autonomous agents for business process management. The psychological principle of agency was also discussed in the 2008 work of sociologist Albert Bandura, who studied how humans can shape their environments. This research would shape how humans modeled and developed artificial intelligence agents. \\nSome additional milestones of agentic AI include IBM\\'s Deep Blue, demonstrating how agency could work within a confined domain, advances in machine learning in the 2000s, AI being integrated into robotics, and the rise of generative AI such as OpenAI\\'s GPT models and Salesforce\\'s Agentforce platform.\\nIn the last decade, significant advances in AI have spurred the development of Agentic AI. Breakthroughs in deep learning, reinforcement learning, and neural networks allowed AI systems to learn on their own and make decision with minimal human guidance. Consilience of agentic AI across autonomous transportation, industrial automation, and tailored healthcare has also supported its viability. Self-driving cars use agentic AI to handle complex road scenarios, while\\nIn 2025, research firm Forrester named agentic AI a top emerging technology for 2025.\\n\\n\\n== Applications ==\\nApplications using agentic AI include:\\n\\nSoftware development - AI coding agents can write large pieces of code, and review it. Agents can even perform non-code related tasks such as reverse engineering specifications from code.\\nCustomer support automation - AI agents can improve customer service by improving the ability of chatbots to answer a wider variety of questions, rather than having a limited set of answers pre-programmed by humans.\\nEnterprise workflows - AI agents can automatically automate routine tasks by processing pooled data, as opposed to a company needing APIs preprogrammed for specific tasks.\\nCybersecurity and threat detection - AI agents deployed for cybersecurity can automatically detect and mitigate threats in\\n</Document>'"
262
+ ]
263
+ },
264
+ "execution_count": 12,
265
+ "metadata": {},
266
+ "output_type": "execute_result"
267
+ }
268
+ ],
269
+ "source": [
270
+ "import wikipedia\n",
271
+ "wiki_search.invoke(\"AI Agents\")"
272
+ ]
273
+ },
274
+ {
275
+ "cell_type": "code",
276
+ "execution_count": 13,
277
+ "id": "1f445fb8",
278
+ "metadata": {},
279
+ "outputs": [],
280
+ "source": [
281
+ "@tool\n",
282
+ "def web_search(query: str) -> str:\n",
283
+ " \"\"\"Search the web for a given query and return the summary.\n",
284
+ " Args:\n",
285
+ " query (str): The search query.\n",
286
+ " \"\"\"\n",
287
+ " \n",
288
+ " search_tool = TavilySearchResults()\n",
289
+ " result = search_tool.run(query)\n",
290
+ " return result[0]['content']"
291
+ ]
292
+ },
293
+ {
294
+ "cell_type": "code",
295
+ "execution_count": 14,
296
+ "id": "70e42662",
297
+ "metadata": {},
298
+ "outputs": [
299
+ {
300
+ "name": "stdout",
301
+ "output_type": "stream",
302
+ "text": [
303
+ "AI agents are a type of artificial intelligence (AI) system that can understand and respond to customer inquiries without human intervention. They are created using an agent builder, like Agentforce, and rely on machine learning and natural language processing (NLP) to handle a wide range of tasks. These intelligent agents can include anything from answering simple questions to resolving complex issues — even multi-tasking. Most importantly, AI agents can continuously improve their own [...] Want better, fully-optimized marketing campaigns? AI agents can help your marketing team build better campaigns — faster. With Agentforce Campaigns, AI agents generate a campaign brief and target audience segment, then create relevant content speaking to those audiences. AI can even build a customer journey in Flow. AI agents also continually analyze campaign performance against your key performance indicators and proactively recommend improvements.\n"
304
+ ]
305
+ }
306
+ ],
307
+ "source": [
308
+ "output = web_search.invoke(\"AI Agents\")\n",
309
+ "print(output)"
310
+ ]
311
+ },
312
+ {
313
+ "cell_type": "code",
314
+ "execution_count": 18,
315
+ "id": "bf1c120e",
316
+ "metadata": {},
317
+ "outputs": [],
318
+ "source": [
319
+ "@tool\n",
320
+ "def add(x: int, y: int) -> int:\n",
321
+ " \"\"\"Add two numbers.\n",
322
+ " Args:\n",
323
+ " x (int): First number.\n",
324
+ " y (int): Second number.\n",
325
+ " \"\"\"\n",
326
+ "\n",
327
+ " return x + y\n",
328
+ "\n",
329
+ "@tool\n",
330
+ "def subtract(x: int, y: int) -> int:\n",
331
+ " \"\"\"Subtract two numbers.\n",
332
+ " Args:\n",
333
+ " x (int): First number.\n",
334
+ " y (int): Second number.\n",
335
+ " \"\"\"\n",
336
+ "\n",
337
+ " return x - y\n",
338
+ "\n",
339
+ "@tool\n",
340
+ "def multiply(x: int, y: int) -> int:\n",
341
+ " \"\"\"Multiply two numbers.\n",
342
+ " Args:\n",
343
+ " x (int): First number.\n",
344
+ " y (int): Second number.\n",
345
+ " \"\"\"\n",
346
+ "\n",
347
+ " return x * y\n",
348
+ "\n",
349
+ "@tool\n",
350
+ "def divide(x: int, y: int) -> float:\n",
351
+ " \"\"\"Divide two numbers.\n",
352
+ " Args:\n",
353
+ " x (int): First number.\n",
354
+ " y (int): Second number.\n",
355
+ " \"\"\"\n",
356
+ "\n",
357
+ " if y == 0:\n",
358
+ " raise ValueError(\"Cannot divide by zero.\")\n",
359
+ " return x / y\n",
360
+ "\n",
361
+ "@tool\n",
362
+ "def square(x: int) -> int:\n",
363
+ " \"\"\"Square a number.\n",
364
+ " Args:\n",
365
+ " x (int): The number to be squared.\n",
366
+ " \"\"\"\n",
367
+ "\n",
368
+ " return x * x\n",
369
+ "\n",
370
+ "@tool\n",
371
+ "def cube(x: int) -> int:\n",
372
+ " \"\"\"Cube a number.\n",
373
+ " Args:\n",
374
+ " x (int): The number to be cubed.\n",
375
+ " \"\"\"\n",
376
+ "\n",
377
+ " return x * x * x\n",
378
+ "\n",
379
+ "@tool\n",
380
+ "def power(x: int, y: int) -> int:\n",
381
+ " \"\"\"Raise a number to the power of another number.\n",
382
+ " Args:\n",
383
+ " x (int): The base number.\n",
384
+ " y (int): The exponent.\n",
385
+ " \"\"\"\n",
386
+ "\n",
387
+ " return x ** y\n",
388
+ "\n",
389
+ "@tool\n",
390
+ "def factorial(n: int) -> int:\n",
391
+ " \"\"\"Calculate the factorial of a number.\n",
392
+ " Args:\n",
393
+ " n (int): The number to calculate the factorial for.\n",
394
+ " \"\"\"\n",
395
+ "\n",
396
+ " if n < 0:\n",
397
+ " raise ValueError(\"Factorial is not defined for negative numbers.\")\n",
398
+ " if n == 0 or n == 1:\n",
399
+ " return 1\n",
400
+ " result = 1\n",
401
+ " for i in range(2, n + 1):\n",
402
+ " result *= i\n",
403
+ " return result\n",
404
+ "\n",
405
+ "@tool\n",
406
+ "def mean(numbers: list) -> float:\n",
407
+ " \"\"\"Calculate the mean of a list of numbers.\n",
408
+ " Args:\n",
409
+ " numbers (list): A list of numbers.\n",
410
+ " \"\"\"\n",
411
+ "\n",
412
+ " if not numbers:\n",
413
+ " raise ValueError(\"The list is empty.\")\n",
414
+ " return sum(numbers) / len(numbers)\n",
415
+ "\n",
416
+ "@tool\n",
417
+ "def standard_deviation(numbers: list) -> float:\n",
418
+ " \"\"\"Calculate the standard deviation of a list of numbers.\n",
419
+ " Args:\n",
420
+ " numbers (list): A list of numbers.\n",
421
+ " \"\"\"\n",
422
+ "\n",
423
+ " if not numbers:\n",
424
+ " raise ValueError(\"The list is empty.\")\n",
425
+ " mean_value = mean(numbers)\n",
426
+ " variance = sum((x - mean_value) ** 2 for x in numbers) / len(numbers)\n",
427
+ " return variance ** 0.5"
428
+ ]
429
+ },
430
+ {
431
+ "cell_type": "code",
432
+ "execution_count": 29,
433
+ "id": "bf43300a",
434
+ "metadata": {},
435
+ "outputs": [],
436
+ "source": [
437
+ "# !pip install chromadb"
438
+ ]
439
+ },
440
+ {
441
+ "cell_type": "code",
442
+ "execution_count": 25,
443
+ "id": "e798f4ac",
444
+ "metadata": {},
445
+ "outputs": [
446
+ {
447
+ "name": "stderr",
448
+ "output_type": "stream",
449
+ "text": [
450
+ "/var/folders/r6/l4km4ywx28zb1dkl0yp8fyn80000gn/T/ipykernel_1001/4015992125.py:20: LangChainDeprecationWarning: Since Chroma 0.4.x the manual persistence method is no longer supported as docs are automatically persisted.\n",
451
+ " vector_store.persist()\n"
452
+ ]
453
+ }
454
+ ],
455
+ "source": [
456
+ "from langchain_community.vectorstores import Chroma\n",
457
+ "from langchain.embeddings import HuggingFaceEmbeddings\n",
458
+ "from langchain.tools.retriever import create_retriever_tool\n",
459
+ "from langchain_core.documents import Document\n",
460
+ "import os\n",
461
+ "\n",
462
+ "# Create embeddings\n",
463
+ "embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-mpnet-base-v2\")\n",
464
+ "\n",
465
+ "# Example documents to index\n",
466
+ "documents = [\n",
467
+ " Document(page_content=\"What is artificial intelligence?\"),\n",
468
+ " Document(page_content=\"Explain how AI agents operate.\"),\n",
469
+ "]\n",
470
+ "\n",
471
+ "# Create a Chroma vector store (in memory or persistent)\n",
472
+ "vector_store = Chroma.from_documents(documents, embeddings, persist_directory=\"./chroma_db\")\n",
473
+ "\n",
474
+ "# (Optional) Persist the DB to disk\n",
475
+ "vector_store.persist()\n",
476
+ "\n",
477
+ "# Create the retriever tool\n",
478
+ "create_retriever_tool = create_retriever_tool(\n",
479
+ " retriever=vector_store.as_retriever(),\n",
480
+ " name=\"Question_Search\",\n",
481
+ " description=\"A tool to retrieve similar questions using ChromaDB vector store.\",\n",
482
+ ")"
483
+ ]
484
+ },
485
+ {
486
+ "cell_type": "code",
487
+ "execution_count": 26,
488
+ "id": "06c18b4c",
489
+ "metadata": {},
490
+ "outputs": [
491
+ {
492
+ "name": "stderr",
493
+ "output_type": "stream",
494
+ "text": [
495
+ "/var/folders/r6/l4km4ywx28zb1dkl0yp8fyn80000gn/T/ipykernel_1001/1571824264.py:1: LangChainDeprecationWarning: The class `Chroma` was deprecated in LangChain 0.2.9 and will be removed in 1.0. An updated version of the class exists in the :class:`~langchain-chroma package and should be used instead. To use it run `pip install -U :class:`~langchain-chroma` and import as `from :class:`~langchain_chroma import Chroma``.\n",
496
+ " vector_store = Chroma(persist_directory=\"./chroma_db\", embedding_function=embeddings)\n"
497
+ ]
498
+ }
499
+ ],
500
+ "source": [
501
+ "vector_store = Chroma(persist_directory=\"./chroma_db\", embedding_function=embeddings)"
502
+ ]
503
+ },
504
+ {
505
+ "cell_type": "code",
506
+ "execution_count": 27,
507
+ "id": "9eefbbe0",
508
+ "metadata": {},
509
+ "outputs": [],
510
+ "source": [
511
+ "tools = [\n",
512
+ " multiply,\n",
513
+ " add,\n",
514
+ " subtract,\n",
515
+ " divide,\n",
516
+ " square,\n",
517
+ " cube,\n",
518
+ " power,\n",
519
+ " factorial,\n",
520
+ " mean,\n",
521
+ " standard_deviation,\n",
522
+ " get_weather,\n",
523
+ " get_recommendation,\n",
524
+ " wiki_search,\n",
525
+ " web_search\n",
526
+ "]"
527
+ ]
528
+ },
529
+ {
530
+ "cell_type": "code",
531
+ "execution_count": 31,
532
+ "id": "10b24bed",
533
+ "metadata": {},
534
+ "outputs": [],
535
+ "source": [
536
+ "from langgraph.graph import StateGraph, START\n",
537
+ "from langgraph.prebuilt import ToolNode\n",
538
+ "from langchain_groq import ChatGroq\n",
539
+ "from langchain_core.messages import HumanMessage\n",
540
+ "from configs.prompt import Agent_prompt_template\n",
541
+ "\n",
542
+ "def build_graph():\n",
543
+ " \"\"\"Build the graph using Groq and custom prompt/tools setup\"\"\"\n",
544
+ "\n",
545
+ " # Initialize the LLM\n",
546
+ " llm = ChatGroq(\n",
547
+ " model=\"qwen-qwq-32b\",\n",
548
+ " temperature=0\n",
549
+ " )\n",
550
+ "\n",
551
+ " # Bind tools to LLM\n",
552
+ " llm_with_tools = llm.bind_tools(tools)\n",
553
+ "\n",
554
+ " # Define assistant node\n",
555
+ " def assistant(state: MessagesState):\n",
556
+ " \"\"\"Assistant node\"\"\"\n",
557
+ " return {\"messages\": [llm_with_tools.invoke(state[\"messages\"])]}\n",
558
+ "\n",
559
+ " # Define retriever node\n",
560
+ " def retriever(state: MessagesState):\n",
561
+ " \"\"\"Retriever node\"\"\"\n",
562
+ " similar_question = vector_store.similarity_search(state[\"messages\"][0].content)\n",
563
+ " example_msg = HumanMessage(\n",
564
+ " content=f\"Here I provide a similar question and answer for reference: \\n\\n{similar_question[0].page_content}\",\n",
565
+ " )\n",
566
+ " return {\"messages\": [Agent_prompt_template] + state[\"messages\"] + [example_msg]}\n",
567
+ "\n",
568
+ " # Create graph\n",
569
+ " builder = StateGraph(MessagesState)\n",
570
+ " builder.add_node(\"retriever\", retriever)\n",
571
+ " builder.add_node(\"assistant\", assistant)\n",
572
+ " builder.add_node(\"tools\", ToolNode(tools))\n",
573
+ "\n",
574
+ " builder.add_edge(START, \"retriever\")\n",
575
+ " builder.add_edge(\"retriever\", \"assistant\")\n",
576
+ " builder.add_conditional_edges(\"assistant\", tools_condition)\n",
577
+ " builder.add_edge(\"tools\", \"assistant\")\n",
578
+ "\n",
579
+ " # Compile and return the graph\n",
580
+ " return builder.compile()"
581
+ ]
582
+ },
583
+ {
584
+ "cell_type": "code",
585
+ "execution_count": 33,
586
+ "id": "2a74e687",
587
+ "metadata": {},
588
+ "outputs": [
589
+ {
590
+ "name": "stdout",
591
+ "output_type": "stream",
592
+ "text": [
593
+ "================================\u001b[1m Human Message \u001b[0m=================================\n",
594
+ "\n",
595
+ "When was attention is all you need paper printed and what is the main idea of it?\n",
596
+ "================================\u001b[1m Human Message \u001b[0m=================================\n",
597
+ "\n",
598
+ "You are a helpful assistant following the REACT methodology and tasked with answering questions using a set of tools. \n",
599
+ "Once a question is asked,you have to Report your thoughts, and finish your answer with the following template: \n",
600
+ "FINAL ANSWER: [YOUR FINAL ANSWER]. \n",
601
+ "\n",
602
+ "### **Instructions:** \n",
603
+ "- YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. \n",
604
+ "- If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. \n",
605
+ "- If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.\n",
606
+ "- Provide the answer in clear and professional language. \n",
607
+ "- **Limit** the FINAL ANSWER to 1000 words and keep it to the point.\n",
608
+ "- **Do not** include any additional information or explanations in your FINAL ANSWER.\n",
609
+ "- **Do not** include any information that is not relevant to the question.\n",
610
+ "- **Validate** your answer before providing it. \n",
611
+ "\n",
612
+ "\n",
613
+ "Your answer should only start with \"FINAL ANSWER: \", then follows with the answer. \n",
614
+ "================================\u001b[1m Human Message \u001b[0m=================================\n",
615
+ "\n",
616
+ "Here I provide a similar question and answer for reference: \n",
617
+ "\n",
618
+ "What is artificial intelligence?\n",
619
+ "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
620
+ "\n",
621
+ "FINAL ANSWER: 2017, The main idea is the introduction of the Transformer architecture using self-attention mechanisms for efficient sequence processing, eliminating the need for recurrent layers.\n"
622
+ ]
623
+ }
624
+ ],
625
+ "source": [
626
+ "# test\n",
627
+ "if __name__ == \"__main__\":\n",
628
+ " question = \"When was attention is all you need paper printed and what is the main idea of it?\"\n",
629
+ " # Build the graph\n",
630
+ " graph = build_graph()\n",
631
+ " # Run the graph\n",
632
+ " messages = [HumanMessage(content=question)]\n",
633
+ " messages = graph.invoke({\"messages\": messages})\n",
634
+ " for m in messages[\"messages\"]:\n",
635
+ " m.pretty_print()\n"
636
+ ]
637
+ },
638
+ {
639
+ "cell_type": "code",
640
+ "execution_count": 43,
641
+ "id": "4c0862b1",
642
+ "metadata": {},
643
+ "outputs": [],
644
+ "source": [
645
+ "def build_graph():\n",
646
+ " \"\"\"Build the graph with traceable output using Groq + tools + vector search.\"\"\"\n",
647
+ " \n",
648
+ " from langgraph.graph import StateGraph, START\n",
649
+ " from langgraph.prebuilt import ToolNode\n",
650
+ " from langchain_core.messages import HumanMessage\n",
651
+ " from configs.prompt import Agent_prompt_template\n",
652
+ "\n",
653
+ " llm = ChatGroq(model=\"qwen-qwq-32b\", temperature=0)\n",
654
+ " llm_with_tools = llm.bind_tools(tools)\n",
655
+ "\n",
656
+ " def assistant(state: MessagesState):\n",
657
+ " llm_response = llm_with_tools.invoke(state[\"messages\"])\n",
658
+ " step_log = {\n",
659
+ " \"step\": \"assistant\",\n",
660
+ " \"tool_used\": \"LLM (Groq qwen-qwq-32b)\",\n",
661
+ " \"input\": state[\"messages\"],\n",
662
+ " \"output\": llm_response.content\n",
663
+ " }\n",
664
+ " return {\n",
665
+ " \"messages\": [llm_response],\n",
666
+ " \"steps\": state.get(\"steps\", []) + [step_log]\n",
667
+ " }\n",
668
+ "\n",
669
+ " def retriever(state: MessagesState):\n",
670
+ " query = state[\"messages\"][0].content\n",
671
+ " similar_docs = vector_store.similarity_search(query)\n",
672
+ " example_msg = HumanMessage(\n",
673
+ " content=f\"Here I provide a similar question and answer for reference:\\n\\n{similar_docs[0].page_content}\",\n",
674
+ " )\n",
675
+ " step_log = {\n",
676
+ " \"step\": \"retriever\",\n",
677
+ " \"tool_used\": \"Vector Store (Similarity Search)\",\n",
678
+ " \"input\": query,\n",
679
+ " \"output\": similar_docs[0].page_content\n",
680
+ " }\n",
681
+ " return {\n",
682
+ " \"messages\": [Agent_prompt_template] + state[\"messages\"] + [example_msg],\n",
683
+ " \"steps\": state.get(\"steps\", []) + [step_log]\n",
684
+ " }\n",
685
+ "\n",
686
+ " builder = StateGraph(MessagesState)\n",
687
+ " builder.add_node(\"retriever\", retriever)\n",
688
+ " builder.add_node(\"assistant\", assistant)\n",
689
+ " builder.add_node(\"tools\", ToolNode(tools))\n",
690
+ " builder.add_edge(START, \"retriever\")\n",
691
+ " builder.add_edge(\"retriever\", \"assistant\")\n",
692
+ " builder.add_conditional_edges(\"assistant\", tools_condition)\n",
693
+ " builder.add_edge(\"tools\", \"assistant\")\n",
694
+ "\n",
695
+ " return builder.compile()"
696
+ ]
697
+ },
698
+ {
699
+ "cell_type": "code",
700
+ "execution_count": 44,
701
+ "id": "f658bc7a",
702
+ "metadata": {},
703
+ "outputs": [
704
+ {
705
+ "name": "stdout",
706
+ "output_type": "stream",
707
+ "text": [
708
+ "================================\u001b[1m Human Message \u001b[0m=================================\n",
709
+ "\n",
710
+ "What is the average temperature in Delhi in last week in celcius and are there any health tips?\n",
711
+ "================================\u001b[1m Human Message \u001b[0m=================================\n",
712
+ "\n",
713
+ "You are a helpful assistant following the REACT methodology and tasked with answering questions using a set of tools. \n",
714
+ "Once a question is asked,you have to Report your thoughts, and finish your answer with the following template: \n",
715
+ "FINAL ANSWER: [YOUR FINAL ANSWER]. \n",
716
+ "\n",
717
+ "### **Instructions:** \n",
718
+ "- YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. \n",
719
+ "- If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. \n",
720
+ "- If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.\n",
721
+ "- Provide the answer in clear and professional language. \n",
722
+ "- **Limit** the FINAL ANSWER to 1000 words and keep it to the point.\n",
723
+ "- **Do not** include any additional information or explanations in your FINAL ANSWER.\n",
724
+ "- **Do not** include any information that is not relevant to the question.\n",
725
+ "- **Validate** your answer before providing it. \n",
726
+ "\n",
727
+ "\n",
728
+ "Your answer should only start with \"FINAL ANSWER: \", then follows with the answer. \n",
729
+ "================================\u001b[1m Human Message \u001b[0m=================================\n",
730
+ "\n",
731
+ "Here I provide a similar question and answer for reference:\n",
732
+ "\n",
733
+ "What is artificial intelligence?\n",
734
+ "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
735
+ "Tool Calls:\n",
736
+ " web_search (call_74ab)\n",
737
+ " Call ID: call_74ab\n",
738
+ " Args:\n",
739
+ " query: average temperature in Delhi last week in celsius\n",
740
+ "=================================\u001b[1m Tool Message \u001b[0m=================================\n",
741
+ "Name: web_search\n",
742
+ "\n",
743
+ "Past Weather in New Delhi, Delhi, India — Yesterday and Last 2 Weeks\n",
744
+ "\n",
745
+ "Past Weather in New Delhi — Graph\n",
746
+ "\n",
747
+ "See weather overview\n",
748
+ "\n",
749
+ "New Delhi Temperature Yesterday\n",
750
+ "\n",
751
+ "Maximum temperature yesterday: 84 °F (at 2:30 am)Minimum temperature yesterday: 68 °F (at 5:30 am)Average temperature yesterday: 76 °F\n",
752
+ "\n",
753
+ "High & Low Weather Summary for the Past Weeks [...] | Temperature | Humidity | Pressure\n",
754
+ "High | 105 °F(Apr 21, 2:30 pm) | 100%(May 2, 5:30 am) | 29.89 \"Hg(May 2, 5:30 am)\n",
755
+ "Low | 68 °F(May 2, 5:30 am) | 13%(Apr 22, 2:30 pm) | 29.44 \"Hg(Apr 18, 5:30 pm)\n",
756
+ "Average | 88 °F | 45% | 29.66 \"Hg\n",
757
+ "* Reported Apr 18 11:30 am — May 3 11:30 am, New Delhi. Weather by CustomWeather, © 2025\n",
758
+ "Note: Actual official high and low records may vary slightly from our data, if they occured in-between our weather recording intervals... More about our weather records\n",
759
+ "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
760
+ "\n",
761
+ "FINAL ANSWER: 31, Stay hydrated, use sunscreen, wear light clothing, avoid prolonged sun exposure, stay in shaded areas during peak heat.\n"
762
+ ]
763
+ }
764
+ ],
765
+ "source": [
766
+ "# test\n",
767
+ "if __name__ == \"__main__\":\n",
768
+ " question = \"What is the average temperature in Delhi in last week in celcius and are there any health tips?\"\n",
769
+ " # Build the graph\n",
770
+ " graph = build_graph()\n",
771
+ " # Run the graph\n",
772
+ " messages = [HumanMessage(content=question)]\n",
773
+ " messages = graph.invoke({\"messages\": messages})\n",
774
+ " for m in messages[\"messages\"]:\n",
775
+ " m.pretty_print()\n",
776
+ "\n"
777
+ ]
778
+ }
779
+ ],
780
+ "metadata": {
781
+ "kernelspec": {
782
+ "display_name": "Python 3",
783
+ "language": "python",
784
+ "name": "python3"
785
+ },
786
+ "language_info": {
787
+ "codemirror_mode": {
788
+ "name": "ipython",
789
+ "version": 3
790
+ },
791
+ "file_extension": ".py",
792
+ "mimetype": "text/x-python",
793
+ "name": "python",
794
+ "nbconvert_exporter": "python",
795
+ "pygments_lexer": "ipython3",
796
+ "version": "3.9.6"
797
+ }
798
+ },
799
+ "nbformat": 4,
800
+ "nbformat_minor": 5
801
+ }
app.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import requests
4
+ import inspect
5
+ import pandas as pd
6
+ from agent import build_graph
7
+ from langchain_core.messages import HumanMessage
8
+
9
+
10
+ # (Keep Constants as is)
11
+ # --- Constants ---
12
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
+
14
+ # --- Basic Agent Definition ---
15
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
16
+ class BasicAgent:
17
+ def __init__(self):
18
+ print("BasicAgent initialized.")
19
+ self.graph = build_graph()
20
+
21
+ def __call__(self, question: str) -> str:
22
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
23
+ # fixed_answer = "This is a default answer."
24
+ # print(f"Agent returning fixed answer: {fixed_answer}")
25
+ # return fixed_answer
26
+ messages = [HumanMessage(content=question)]
27
+ messages = self.graph.invoke({"messages": messages})
28
+ answer = messages['messages'][-1].content
29
+ return answer
30
+
31
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
32
+ """
33
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
34
+ and displays the results.
35
+ """
36
+ # --- Determine HF Space Runtime URL and Repo URL ---
37
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
38
+
39
+ if profile:
40
+ username= f"{profile.username}"
41
+ print(f"User logged in: {username}")
42
+ else:
43
+ print("User not logged in.")
44
+ return "Please Login to Hugging Face with the button.", None
45
+
46
+ api_url = DEFAULT_API_URL
47
+ questions_url = f"{api_url}/questions"
48
+ submit_url = f"{api_url}/submit"
49
+
50
+ # 1. Instantiate Agent ( modify this part to create your agent)
51
+ try:
52
+ agent = BasicAgent()
53
+ except Exception as e:
54
+ print(f"Error instantiating agent: {e}")
55
+ return f"Error initializing agent: {e}", None
56
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
57
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
58
+ print(agent_code)
59
+
60
+ # 2. Fetch Questions
61
+ print(f"Fetching questions from: {questions_url}")
62
+ try:
63
+ response = requests.get(questions_url, timeout=15)
64
+ response.raise_for_status()
65
+ questions_data = response.json()
66
+ if not questions_data:
67
+ print("Fetched questions list is empty.")
68
+ return "Fetched questions list is empty or invalid format.", None
69
+ print(f"Fetched {len(questions_data)} questions.")
70
+ except requests.exceptions.RequestException as e:
71
+ print(f"Error fetching questions: {e}")
72
+ return f"Error fetching questions: {e}", None
73
+ except requests.exceptions.JSONDecodeError as e:
74
+ print(f"Error decoding JSON response from questions endpoint: {e}")
75
+ print(f"Response text: {response.text[:500]}")
76
+ return f"Error decoding server response for questions: {e}", None
77
+ except Exception as e:
78
+ print(f"An unexpected error occurred fetching questions: {e}")
79
+ return f"An unexpected error occurred fetching questions: {e}", None
80
+
81
+ # 3. Run your Agent
82
+ results_log = []
83
+ answers_payload = []
84
+ print(f"Running agent on {len(questions_data)} questions...")
85
+ for item in questions_data:
86
+ task_id = item.get("task_id")
87
+ question_text = item.get("question")
88
+ if not task_id or question_text is None:
89
+ print(f"Skipping item with missing task_id or question: {item}")
90
+ continue
91
+ try:
92
+ submitted_answer = agent(question_text)
93
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
94
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
95
+ except Exception as e:
96
+ print(f"Error running agent on task {task_id}: {e}")
97
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
98
+
99
+ if not answers_payload:
100
+ print("Agent did not produce any answers to submit.")
101
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
102
+
103
+ # 4. Prepare Submission
104
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
105
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
106
+ print(status_update)
107
+
108
+ # 5. Submit
109
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
110
+ try:
111
+ response = requests.post(submit_url, json=submission_data, timeout=60)
112
+ response.raise_for_status()
113
+ result_data = response.json()
114
+ final_status = (
115
+ f"Submission Successful!\n"
116
+ f"User: {result_data.get('username')}\n"
117
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
118
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
119
+ f"Message: {result_data.get('message', 'No message received.')}"
120
+ )
121
+ print("Submission successful.")
122
+ results_df = pd.DataFrame(results_log)
123
+ return final_status, results_df
124
+ except requests.exceptions.HTTPError as e:
125
+ error_detail = f"Server responded with status {e.response.status_code}."
126
+ try:
127
+ error_json = e.response.json()
128
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
129
+ except requests.exceptions.JSONDecodeError:
130
+ error_detail += f" Response: {e.response.text[:500]}"
131
+ status_message = f"Submission Failed: {error_detail}"
132
+ print(status_message)
133
+ results_df = pd.DataFrame(results_log)
134
+ return status_message, results_df
135
+ except requests.exceptions.Timeout:
136
+ status_message = "Submission Failed: The request timed out."
137
+ print(status_message)
138
+ results_df = pd.DataFrame(results_log)
139
+ return status_message, results_df
140
+ except requests.exceptions.RequestException as e:
141
+ status_message = f"Submission Failed: Network error - {e}"
142
+ print(status_message)
143
+ results_df = pd.DataFrame(results_log)
144
+ return status_message, results_df
145
+ except Exception as e:
146
+ status_message = f"An unexpected error occurred during submission: {e}"
147
+ print(status_message)
148
+ results_df = pd.DataFrame(results_log)
149
+ return status_message, results_df
150
+
151
+
152
+ # --- Build Gradio Interface using Blocks ---
153
+ with gr.Blocks() as demo:
154
+ gr.Markdown("# Basic Agent Evaluation Runner")
155
+ gr.Markdown(
156
+ """
157
+ **Instructions:**
158
+
159
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
160
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
161
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
162
+
163
+ ---
164
+ **Disclaimers:**
165
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
166
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
167
+ """
168
+ )
169
+
170
+ gr.LoginButton()
171
+
172
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
173
+
174
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
175
+ # Removed max_rows=10 from DataFrame constructor
176
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
177
+
178
+ run_button.click(
179
+ fn=run_and_submit_all,
180
+ outputs=[status_output, results_table]
181
+ )
182
+
183
+ if __name__ == "__main__":
184
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
185
+ # Check for SPACE_HOST and SPACE_ID at startup for information
186
+ space_host_startup = os.getenv("SPACE_HOST")
187
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
188
+
189
+ if space_host_startup:
190
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
191
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
192
+ else:
193
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
194
+
195
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
196
+ print(f"✅ SPACE_ID found: {space_id_startup}")
197
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
198
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
199
+ else:
200
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
201
+
202
+ print("-"*(60 + len(" App Starting ")) + "\n")
203
+
204
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
205
+ demo.launch(debug=True, share=False)
chroma_db/b4f29986-cfbe-4e28-871d-c988b39d1992/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23add52afbe7588391f32d3deffb581b2663d2e2ad8851aba7de25e6b3f66761
3
+ size 32120000
chroma_db/b4f29986-cfbe-4e28-871d-c988b39d1992/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8c7f00b4415698ee6cb94332eff91aedc06ba8e066b1f200e78ca5df51abb57
3
+ size 100
chroma_db/b4f29986-cfbe-4e28-871d-c988b39d1992/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7e2dcff542de95352682dc186432e98f0188084896773f1973276b0577d5305
3
+ size 40000
chroma_db/b4f29986-cfbe-4e28-871d-c988b39d1992/link_lists.bin ADDED
File without changes
configs/prompt.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Agent_prompt_template = '''You are a helpful assistant following the REACT methodology and tasked with answering questions using a set of tools.
2
+ Once a question is asked,you have to Report your thoughts, and finish your answer with the following template:
3
+ FINAL ANSWER: [YOUR FINAL ANSWER].
4
+
5
+ ### **Instructions:**
6
+ - YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
7
+ - If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
8
+ - If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
9
+ - Provide the answer in clear and professional language.
10
+ - **Limit** the FINAL ANSWER to 1000 words and keep it to the point.
11
+ - **Do not** include any additional information or explanations in your FINAL ANSWER.
12
+ - **Do not** include any information that is not relevant to the question.
13
+ - **Validate** your answer before providing it.
14
+ - **Print** all the steps you take to arrive at your answer.
15
+
16
+ Your answer should only start with "FINAL ANSWER: ", then follows with the answer. '''
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ requests
3
+ langchain==0.3.24
4
+ langchain-community==0.3.23
5
+ langchain-groq==0.3.2
6
+ langgraph==0.4.1
7
+ tavily-python==0.7.1
8
+ python-dotenv
9
+ wikipedia
10
+ pandas
11
+ faiss-cpu
12
+ langchain_huggingface
test.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from langchain_groq import ChatGroq
4
+ from langchain.schema import HumanMessage
5
+
6
+ # Load environment variables from .env
7
+ load_dotenv()
8
+
9
+ # Initialize the Groq chat model with Qwen
10
+ llm = ChatGroq(
11
+ temperature=0,
12
+ model_name="qwen-qwq-32b", # Updated to working model
13
+ groq_api_key=os.getenv("GROQ_API_KEY")
14
+ )
15
+
16
+ # Send a test message
17
+ response = llm([HumanMessage(content="Hello Groq, how fast are you?")])
18
+
19
+ print(response.content)