File size: 8,584 Bytes
1dfef0f
26f5620
03aebad
836d49c
26f5620
cd8de6f
 
26f5620
 
 
 
cd8de6f
 
26f5620
 
 
b0eca46
26f5620
87c8549
16c9822
 
 
037cb93
b7e4e52
aaf11bc
9b810cb
b7e4e52
 
3fb3844
ff8595b
 
 
9b810cb
aaf11bc
03aebad
 
26f5620
 
 
 
 
 
 
 
037cb93
26f5620
 
 
 
 
 
 
 
 
037cb93
 
26f5620
 
 
 
 
 
 
 
7240bca
 
26f5620
 
 
 
 
 
 
 
 
 
7240bca
 
26f5620
 
 
 
 
 
 
 
7240bca
26f5620
 
 
1dfef0f
26f5620
 
 
 
 
 
 
 
 
 
 
 
 
9af2eae
26f5620
 
 
 
 
 
 
7240bca
26f5620
 
 
 
 
1dfef0f
26f5620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70f8384
257f406
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b810cb
257f406
b7e4e52
46c3b43
 
 
 
257f406
46c3b43
 
 
 
 
 
 
 
 
 
 
 
b7e4e52
46c3b43
 
b7e4e52
 
257f406
b7e4e52
257f406
9b810cb
aaf11bc
9b810cb
aaf11bc
 
b81a0d7
 
9b810cb
aaf11bc
9b810cb
 
257f406
 
9b810cb
257f406
9b810cb
 
1466462
aaf11bc
9b810cb
 
 
aaf11bc
7ef81f0
45e2eff
94e7570
9b810cb
 
b7e4e52
26f5620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfee30e
26f5620
 
 
 
 
 
 
7240bca
26f5620
 
 
 
 
 
 
 
 
a52ceb6
26f5620
a1fdb15
 
3fb3844
 
 
 
 
 
 
 
a1fdb15
26f5620
3fb3844
 
26f5620
e844a7f
a1fdb15
 
 
 
 
 
 
 
 
26f5620
 
cd8de6f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
# agent.py

import os
from dotenv import load_dotenv
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition
from langgraph.prebuilt import ToolNode
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_groq import ChatGroq
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.document_loaders import WikipediaLoader
from langchain_community.document_loaders import ArxivLoader
from langchain_community.vectorstores import SupabaseVectorStore
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.tools import tool
from langchain.tools.retriever import create_retriever_tool
from supabase.client import Client, create_client
from sentence_transformers import SentenceTransformer
from langchain.embeddings.base import Embeddings
from typing import List
import numpy as np


import pandas as pd
import uuid
import requests
import json
from langchain_core.documents import Document
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings



load_dotenv()

@tool
def multiply(a: int, b: int) -> int:
    """Multiply two numbers.
    Args:
        a: first int
        b: second int
    """
    return a * b

@tool
def add(a: int, b: int) -> int:
    """Add two numbers.
    
    Args:
        a: first int
        b: second int
    """
    return a + b

@tool
def subtract(a: int, b: int) -> int:
    """Subtract two numbers.
    
    Args:
        a: first int
        b: second int
    """
    return a - b

@tool
def divide(a: int, b: int) -> int:
    """Divide two numbers.
    
    Args:
        a: first int
        b: second int
    """
    if b == 0:
        raise ValueError("Cannot divide by zero.")
    return a / b

@tool
def modulus(a: int, b: int) -> int:
    """Get the modulus of two numbers.
    
    Args:
        a: first int
        b: second int
    """
    return a % b

@tool
def wiki_search(query: str) -> str:
    """Search Wikipedia for a query and return maximum 2 results.
    
    Args:
        query: The search query."""
    search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
    formatted_search_docs = "\n\n---\n\n".join(
        [
            f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
            for doc in search_docs
        ])
    return {"wiki_results": formatted_search_docs}

@tool
def web_search(query: str) -> str:
    """Search Tavily for a query and return maximum 3 results.
    
    Args:
        query: The search query."""
    search_docs = TavilySearchResults(max_results=3).invoke(query=query)
    formatted_search_docs = "\n\n---\n\n".join(
        [
            f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
            for doc in search_docs
        ])
    return {"web_results": formatted_search_docs}

@tool
def arvix_search(query: str) -> str:
    """Search Arxiv for a query and return maximum 3 result.
    
    Args:
        query: The search query."""
    search_docs = ArxivLoader(query=query, load_max_docs=3).load()
    formatted_search_docs = "\n\n---\n\n".join(
        [
            f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
            for doc in search_docs
        ])
    return {"arvix_results": formatted_search_docs}



# load the system prompt from the file
with open("system_prompt.txt", "r", encoding="utf-8") as f:
    system_prompt = f.read()

# System message
sys_msg = SystemMessage(content=system_prompt)



# -------------------------------
# Step 2: Load the JSON file or tasks (Replace this part if you're loading tasks dynamically)
# -------------------------------
# Here we assume the tasks are already fetched from a URL or file.
# For now, using an example JSON array directly. Replace this with the actual loading logic.

tasks = [
    {
        "task_id": "8e867cd7-cff9-4e6c-867a-ff5ddc2550be",
        "question": "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of English Wikipedia.",
        "Level": "1",
        "file_name": ""
    },
    {
        "task_id": "a1e91b78-d3d8-4675-bb8d-62741b4b68a6",
        "question": "In the video https://www.youtube.com/watch?v=L1vXCYZAYYM, what is the highest number of bird species to be on camera simultaneously?",
        "Level": "1",
        "file_name": ""
    }
]

# -------------------------------
# Step 3: Create Documents from Each JSON Object
# -------------------------------
docs = []
for task in tasks:
    # Debugging: Print the keys of each task to ensure 'question' exists
    print(f"Keys in task: {task.keys()}")

    # Ensure the required field 'question' exists
    if 'question' not in task:
        print(f"Skipping task with missing 'question' field: {task}")
        continue

    content = task.get('question', "").strip()
    if not content:
        print(f"Skipping task with empty 'question': {task}")
        continue

    # Add unique ID to each document
    task['id'] = str(uuid.uuid4())

    # Create a document from the task data
    docs.append(Document(page_content=content, metadata=task))



# -------------------------------
# Step 4: Set up HuggingFace Embeddings and FAISS VectorStore
# -------------------------------
# Initialize HuggingFace Embedding model
embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")

# Create FAISS VectorStore from documents
vector_store = FAISS.from_documents(docs, embedding_model)
#vector_store = FAISS.load_local("faiss_index", embedding_model)

# Save the FAISS index locally
vector_store.save_local("faiss_index")



# -------------------------------
# Step 5: Create Retriever Tool (for use in LangChain)
# -------------------------------
retriever = vector_store.as_retriever()

# Create the retriever tool
question_retriever_tool = create_retriever_tool(
    retriever=retriever,
    name="Question_Search",
    description="A tool to retrieve documents related to a user's question."
)





tools = [
    multiply,
    add,
    subtract,
    divide,
    modulus,
    wiki_search,
    web_search,
    arvix_search,
]

# Build graph function
def build_graph(provider: str = "google"):
    """Build the graph"""
    # Load environment variables from .env file
    if provider == "google":
        # Google Gemini
        llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
    elif provider == "groq":
        # Groq https://console.groq.com/docs/models
        llm = ChatGroq(model="qwen-qwq-32b", temperature=0) # optional : qwen-qwq-32b gemma2-9b-it
    elif provider == "huggingface":
        # TODO: Add huggingface endpoint
        llm = ChatHuggingFace(
            llm=HuggingFaceEndpoint(
                url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
                temperature=0,
            ),
        )
    else:
        raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.")
    # Bind tools to LLM
    llm_with_tools = llm.bind_tools(tools)

    # Node
    def assistant(state: MessagesState):
        """Assistant node"""
        return {"messages": [llm_with_tools.invoke(state["messages"])]}
    
    def retriever(state: MessagesState):
        """Retriever node"""
        similar_question = vector_store.similarity_search(state["messages"][0].content)
    
        if not similar_question:
            example_msg = HumanMessage(content="No similar question found.")
        else:
            example_msg = HumanMessage(
                content=f"Here I provide a similar question and answer for reference:\n\n{similar_question[0].page_content}",
            )
    
        return {"messages": [sys_msg] + state["messages"] + [example_msg]}


    
    builder = StateGraph(MessagesState)
    builder.add_node("retriever", retriever)
    builder.add_node("assistant", assistant)
    builder.add_node("tools", ToolNode(tools))
    builder.add_edge(START, "retriever")
    builder.add_edge("retriever", "assistant")
    builder.add_conditional_edges(
        "assistant",
        tools_condition,
    )
    builder.add_edge("tools", "assistant")

    # Compile graph
    return builder.compile()