errchh
		
	commited on
		
		
					Commit 
							
							·
						
						03310c4
	
1
								Parent(s):
							
							e5513ad
								
change llm
Browse files- __pycache__/agent.cpython-312.pyc +0 -0
- agent.py +18 -13
- pyproject.toml +2 -0
- uv.lock +0 -0
    	
        __pycache__/agent.cpython-312.pyc
    CHANGED
    
    | Binary files a/__pycache__/agent.cpython-312.pyc and b/__pycache__/agent.cpython-312.pyc differ | 
|  | 
    	
        agent.py
    CHANGED
    
    | @@ -1,8 +1,11 @@ | |
|  | |
| 1 | 
             
            import os
         | 
| 2 | 
             
            from dotenv import load_dotenv
         | 
| 3 | 
             
            from typing import TypedDict, List, Dict, Any, Optional, Annotated
         | 
| 4 |  | 
| 5 | 
            -
            from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint | 
|  | |
|  | |
| 6 |  | 
| 7 | 
             
            from langgraph.graph import StateGraph, MessagesState, START, END
         | 
| 8 | 
             
            from langgraph.graph.message import add_messages
         | 
| @@ -13,7 +16,8 @@ from langchain.tools import Tool | |
| 13 | 
             
            from langchain_core.tools import tool
         | 
| 14 | 
             
            from langchain_community.tools import WikipediaQueryRun
         | 
| 15 | 
             
            from langchain_community.utilities import WikipediaAPIWrapper
         | 
| 16 | 
            -
            from langchain_community. | 
|  | |
| 17 | 
             
            from langchain_community.utilities import ArxivAPIWrapper
         | 
| 18 | 
             
            from langchain_community.retrievers import BM25Retriever
         | 
| 19 |  | 
| @@ -24,6 +28,8 @@ from langgraph.prebuilt import ToolNode, tools_condition | |
| 24 | 
             
            load_dotenv()
         | 
| 25 | 
             
            HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
         | 
| 26 | 
             
            print(f"DEBUG: HUGGINGFACEHUB_API_TOKEN = {HUGGINGFACEHUB_API_TOKEN}")
         | 
|  | |
|  | |
| 27 |  | 
| 28 |  | 
| 29 | 
             
            # maths tool
         | 
| @@ -103,8 +109,9 @@ def search_web(query: str) -> Dict[str, str]: | |
| 103 | 
             
                args:
         | 
| 104 | 
             
                    query: a search query
         | 
| 105 | 
             
                """
         | 
| 106 | 
            -
                 | 
| 107 | 
            -
                docs | 
|  | |
| 108 | 
             
                formatted_result = f'<Document source="{docs.metadata["source"]}" page="{docs.metadata.get("page", "")}"/>\n{docs.page_content}\n</Document>'
         | 
| 109 | 
             
                return {"web_results": formatted_result}
         | 
| 110 |  | 
| @@ -150,25 +157,23 @@ tools = [ | |
| 150 | 
             
            # build graph function
         | 
| 151 | 
             
            def build_graph():
         | 
| 152 | 
             
                # llm
         | 
| 153 | 
            -
                 | 
| 154 | 
            -
             | 
| 155 | 
            -
                     | 
|  | |
| 156 | 
             
                )
         | 
| 157 | 
             
                print(f"DEBUG: llm object = {llm}")
         | 
| 158 |  | 
| 159 | 
            -
                chat = ChatHuggingFace(llm=llm, verbose=False)
         | 
| 160 | 
            -
                print(f"DEBUG: chat object = {chat}")
         | 
| 161 | 
            -
             | 
| 162 | 
             
                # bind tools to llm
         | 
| 163 | 
            -
                 | 
| 164 | 
            -
                print(f"DEBUG:  | 
| 165 |  | 
| 166 | 
             
                # generate AgentState and Agent graph
         | 
| 167 | 
             
                class AgentState(TypedDict):
         | 
| 168 | 
             
                    messages: Annotated[list[AnyMessage], add_messages]
         | 
| 169 |  | 
| 170 | 
             
                def assistant(state: AgentState):
         | 
| 171 | 
            -
                    result =  | 
| 172 | 
             
                    # Ensure the result is always wrapped in a list, even if invoke returns a single message
         | 
| 173 | 
             
                    # Add usage information if it's not already present
         | 
| 174 | 
             
                    if isinstance(result, AIMessage) and result.usage_metadata is None:
         | 
|  | |
| 1 | 
            +
            import getpass
         | 
| 2 | 
             
            import os
         | 
| 3 | 
             
            from dotenv import load_dotenv
         | 
| 4 | 
             
            from typing import TypedDict, List, Dict, Any, Optional, Annotated
         | 
| 5 |  | 
| 6 | 
            +
            from langchain_huggingface import HuggingFaceEmbeddings # Removed ChatHuggingFace, HuggingFaceEndpoint
         | 
| 7 | 
            +
            from langchain_google_genai import ChatGoogleGenerativeAI # Added ChatGoogleGenerativeAI
         | 
| 8 | 
            +
             | 
| 9 |  | 
| 10 | 
             
            from langgraph.graph import StateGraph, MessagesState, START, END
         | 
| 11 | 
             
            from langgraph.graph.message import add_messages
         | 
|  | |
| 16 | 
             
            from langchain_core.tools import tool
         | 
| 17 | 
             
            from langchain_community.tools import WikipediaQueryRun
         | 
| 18 | 
             
            from langchain_community.utilities import WikipediaAPIWrapper
         | 
| 19 | 
            +
            from langchain_community.tools import DuckDuckGoSearchResults
         | 
| 20 | 
            +
            from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
         | 
| 21 | 
             
            from langchain_community.utilities import ArxivAPIWrapper
         | 
| 22 | 
             
            from langchain_community.retrievers import BM25Retriever
         | 
| 23 |  | 
|  | |
| 28 | 
             
            load_dotenv()
         | 
| 29 | 
             
            HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
         | 
| 30 | 
             
            print(f"DEBUG: HUGGINGFACEHUB_API_TOKEN = {HUGGINGFACEHUB_API_TOKEN}")
         | 
| 31 | 
            +
            GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
         | 
| 32 | 
            +
            print(f"DEBUG: GOOGLE_API_KEY = {GOOGLE_API_KEY}")
         | 
| 33 |  | 
| 34 |  | 
| 35 | 
             
            # maths tool
         | 
|  | |
| 109 | 
             
                args:
         | 
| 110 | 
             
                    query: a search query
         | 
| 111 | 
             
                """
         | 
| 112 | 
            +
                wrapper = DuckDuckGoSearchAPIWrapper(region="en-us", max_results=2)
         | 
| 113 | 
            +
                docs = DuckDuckGoSearchResults(api_wrapper=wrapper)
         | 
| 114 | 
            +
                docs.invoke(query)
         | 
| 115 | 
             
                formatted_result = f'<Document source="{docs.metadata["source"]}" page="{docs.metadata.get("page", "")}"/>\n{docs.page_content}\n</Document>'
         | 
| 116 | 
             
                return {"web_results": formatted_result}
         | 
| 117 |  | 
|  | |
| 157 | 
             
            # build graph function
         | 
| 158 | 
             
            def build_graph():
         | 
| 159 | 
             
                # llm
         | 
| 160 | 
            +
                # Using ChatGoogleGenerativeAI as the LLM
         | 
| 161 | 
            +
                llm = ChatGoogleGenerativeAI(
         | 
| 162 | 
            +
                    model="gemini-2.5-flash-preview-04-17",
         | 
| 163 | 
            +
                    google_api_key=GOOGLE_API_KEY
         | 
| 164 | 
             
                )
         | 
| 165 | 
             
                print(f"DEBUG: llm object = {llm}")
         | 
| 166 |  | 
|  | |
|  | |
|  | |
| 167 | 
             
                # bind tools to llm
         | 
| 168 | 
            +
                llm_with_tools = llm.bind_tools(tools)
         | 
| 169 | 
            +
                print(f"DEBUG: llm_with_tools object = {llm_with_tools}")
         | 
| 170 |  | 
| 171 | 
             
                # generate AgentState and Agent graph
         | 
| 172 | 
             
                class AgentState(TypedDict):
         | 
| 173 | 
             
                    messages: Annotated[list[AnyMessage], add_messages]
         | 
| 174 |  | 
| 175 | 
             
                def assistant(state: AgentState):
         | 
| 176 | 
            +
                    result = llm_with_tools.invoke(state["messages"])
         | 
| 177 | 
             
                    # Ensure the result is always wrapped in a list, even if invoke returns a single message
         | 
| 178 | 
             
                    # Add usage information if it's not already present
         | 
| 179 | 
             
                    if isinstance(result, AIMessage) and result.usage_metadata is None:
         | 
    	
        pyproject.toml
    CHANGED
    
    | @@ -6,8 +6,10 @@ readme = "README.md" | |
| 6 | 
             
            requires-python = ">=3.12"
         | 
| 7 | 
             
            dependencies = [
         | 
| 8 | 
             
                "dotenv>=0.9.9",
         | 
|  | |
| 9 | 
             
                "gradio>=5.29.0",
         | 
| 10 | 
             
                "langchain-community>=0.3.23",
         | 
|  | |
| 11 | 
             
                "langchain-huggingface>=0.2.0",
         | 
| 12 | 
             
                "langchain-openai>=0.3.16",
         | 
| 13 | 
             
                "langchain-tools>=0.1.34",
         | 
|  | |
| 6 | 
             
            requires-python = ">=3.12"
         | 
| 7 | 
             
            dependencies = [
         | 
| 8 | 
             
                "dotenv>=0.9.9",
         | 
| 9 | 
            +
                "duckduckgo-search>=8.0.1",
         | 
| 10 | 
             
                "gradio>=5.29.0",
         | 
| 11 | 
             
                "langchain-community>=0.3.23",
         | 
| 12 | 
            +
                "langchain-google-genai>=2.1.4",
         | 
| 13 | 
             
                "langchain-huggingface>=0.2.0",
         | 
| 14 | 
             
                "langchain-openai>=0.3.16",
         | 
| 15 | 
             
                "langchain-tools>=0.1.34",
         | 
    	
        uv.lock
    CHANGED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
 
			
