File size: 3,268 Bytes
9142765
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
from langchain_community.chat_models import ChatOllama
from langgraph.graph import MessagesState, StateGraph, START, END
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_community.tools import DuckDuckGoSearchRun
from langchain_core.tools import tool
from langgraph.prebuilt import ToolNode
from langchain_community.document_loaders import WikipediaLoader
from langgraph.prebuilt import tools_condition
from langchain_huggingface import HuggingFaceEndpoint
import os
from huggingface_hub import login
from dotenv import load_dotenv
load_dotenv()  
os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HF_TOKEN")

@tool
def use_search_tool(query: str) -> str:
    """Use the search tool to find information.
    
    Args: query (str): The search query.
    Returns: str: The search result.
    """
    search_result = DuckDuckGoSearchRun(verbose=0).run(query)
    return {"search_result": search_result}

@tool
def use_wikipedia_tool(query: str) -> str:
    """Fetch a summary from Wikipedia.
    
    Args:
        query (str): The topic to search on Wikipedia.
    Returns:
        str: A summary of the topic from Wikipedia.
    """
    result = WikipediaLoader(query=query, load_max_docs=2).load()
    if result:
        return {"Wikipedia_summary": result}
    else:
        return f"Sorry, I couldn't find any information on '{query}' in Wikipedia."

def build_agent():
    # llm = ChatOllama(model="llama3.1")
    llm = HuggingFaceEndpoint(
        endpoint_url="https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-Prover-V2-671B",
        huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN")
    )
    tools = [use_wikipedia_tool, use_search_tool]

    system_template = (
        "You are a helpful, friendly, and respectful AI assistant. "
        "Always address the user politely and answer their questions in a positive manner.\n"
        "When reasoning, always use the following format:\n"
        "Thought: [your reasoning here]\n"
        "Action: [the action to take, should be one of [{tool_names}]]\n"
        "Action Input: [the input to the action]\n"
        "If you know the answer without using a tool, respond with:\n"
        "Thought: [your reasoning here]\n"
        "Final Answer: [your answer here]\n"
        "Always ensure your responses are polite, accurate, and helpful."
    )
    system_prompt = SystemMessage(content=system_template.format(
        tool_names=", ".join([tool.name for tool in tools])
    ))

    def call_model(state: MessagesState):
        """Call the LLM with the given state."""
        messages = [system_prompt] + state["messages"]
        response = llm.invoke(messages)
        return {"messages" : response} 
    
    workflow = StateGraph(MessagesState)
    workflow.add_node("Assistent", call_model)
    workflow.add_node("tools", ToolNode(tools))
    workflow.add_edge(START, "Assistent")
    workflow.add_conditional_edges("Assistent", tools_condition)
    workflow.add_edge("tools", "Assistent")
    workflow.add_edge("Assistent", END)
    return workflow.compile()

if __name__ == "__main__":
    

    graph = build_agent()
    input =  HumanMessage(content="Hello, how are you?")
    response = graph.invoke(input)

    print(response)