File size: 5,649 Bytes
f499a2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
from langchain.schema import HumanMessage, AIMessage, SystemMessage
from langchain_openai import ChatOpenAI
from langchain_core.messages import AnyMessage, SystemMessage
from langchain_core.tools import tool
from langchain_community.document_loaders import WikipediaLoader
from langchain_community.document_loaders import ArxivLoader

# from langchain_community.tools.tavily_search import TavilySearchResults
from langchain.tools.retriever import create_retriever_tool

from langgraph.graph.message import add_messages
from langgraph.graph import START, StateGraph, MessagesState, END
from langgraph.prebuilt import tools_condition, ToolNode

import os
from dotenv import load_dotenv
from typing import TypedDict, Annotated, Optional
from langchain_community.tools import DuckDuckGoSearchResults

from langchain_huggingface import (
    ChatHuggingFace,
    HuggingFaceEndpoint,
    HuggingFaceEmbeddings,
)


load_dotenv()

embddings = HuggingFaceEmbeddings(
    model_name="sentence-transformers/all-mpnet-base-v2",
)


# Initialize the DuckDuckGo search tool
search_tool = DuckDuckGoSearchResults()


@tool
def wiki_search(query: str) -> str:
    """Search Wikipedia for a query and return maximum 2 results.



    Args:

        query: The search query."""
    search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
    formatted_search_docs = "\n\n---\n\n".join(
        [
            f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
            for doc in search_docs
        ]
    )
    return {"wiki_results": formatted_search_docs}


@tool
def web_search(query: str) -> str:
    """Search Tavily for a query and return maximum 3 results.



    Args:

        query: The search query."""
    search_docs = TavilySearchResults(max_results=3).invoke(query=query)
    formatted_search_docs = "\n\n---\n\n".join(
        [
            f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
            for doc in search_docs
        ]
    )
    return {"web_results": formatted_search_docs}


@tool
def arvix_search(query: str) -> str:
    """Search Arxiv for a query and return maximum 3 result.



    Args:

        query: The search query."""
    search_docs = ArxivLoader(query=query, load_max_docs=3).load()
    formatted_search_docs = "\n\n---\n\n".join(
        [
            f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
            for doc in search_docs
        ]
    )
    return {"arvix_results": formatted_search_docs}


# Load LLM model
# llm = ChatOpenAI(
#     model="gpt-4o",
#     base_url="https://models.inference.ai.azure.com",
#     api_key=os.environ["GITHUB_TOKEN"],
#     temperature=0.2,
#     max_tokens=4096,
# )
llm = ChatHuggingFace(
    llm=HuggingFaceEndpoint(
        repo_id="microsoft/Phi-3-mini-4k-instruct",
        temperature=0,
        # huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"],
    ),
    verbose=True,
)

tools = [
    arvix_search,
    wiki_search,
    # web_search,
    search_tool,
]
# Bind the tools to the LLM
model_with_tools = llm.bind_tools(tools)
tool_node = ToolNode(tools)


def build_agent_workflow():

    def should_continue(state: MessagesState):
        messages = state["messages"]
        last_message = messages[-1]
        if last_message.tool_calls:
            return "tools"
        return END

    def call_model(state: MessagesState):
        system_message = SystemMessage(
            content=f"""

        You are a helpful assistant tasked with answering questions using a set of tools.

        Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:

        FINAL ANSWER: [YOUR FINAL ANSWER].

        YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.

        Your answer should only start with "FINAL ANSWER: ", then follows with the answer. """
        )

        messages = [system_message] + state["messages"]
        print("Messages to LLM:", messages)

        response = model_with_tools.invoke(messages)
        return {"messages": [response]}

    # Define the state graph
    workflow = StateGraph(MessagesState)
    workflow.add_node("agent", call_model)
    workflow.add_node("tools", tool_node)

    workflow.add_edge(START, "agent")
    workflow.add_conditional_edges("agent", should_continue, ["tools", END])
    workflow.add_edge("tools", "agent")

    app = workflow.compile()

    return app


if __name__ == "__main__":
    question = "Who nominated the only Featured Article on English Wikipedia about a dinosaur that was promoted in November 2016?"
    # Build the graph
    graph = build_agent_workflow()
    # Run the graph
    messages = [HumanMessage(content=question)]
    messages = graph.invoke({"messages": messages})
    for m in messages["messages"]:
        m.pretty_print()