Spaces:
No application file
No application file
import os | |
# Import the load_dotenv function from the dotenv library | |
from dotenv import load_dotenv | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
from multimodal_tools import extract_text_tool, analyze_image_tool, analyze_audio_tool | |
# Load environment variables from .env file | |
load_dotenv() | |
# Read your API key from the environment variable or set it manually | |
api_key = os.getenv("GEMINI_API_KEY") | |
langfuse_secret_key = os.getenv("LANGFUSE_SECRET_KEY") | |
langfuse_public_key = os.getenv("LANGFUSE_PUBLIC_KEY") | |
from typing import TypedDict, Annotated | |
from langgraph.graph.message import add_messages | |
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage | |
from langgraph.prebuilt import ToolNode | |
from langgraph.graph import START, StateGraph | |
from langgraph.prebuilt import tools_condition | |
from langchain_community.tools.tavily_search import TavilySearchResults # Importa Tavily | |
from langchain_community.tools import DuckDuckGoSearchRun | |
# from langfuse import Langfuse # Langfuse is initialized by CallbackHandler directly | |
from langfuse.callback import CallbackHandler | |
from youtube_tools import youtube_transcript_tool | |
from math_tools import add_tool, subtract_tool, multiply_tool, divide_tool | |
from serpapi_tools import serpapi_search_tool | |
from IPython.display import Image, display | |
# Generate thfrom langchain_community.tools.tavily_search import TavilySearchResults | |
from langchain_community.tools.tavily_search import TavilySearchResults | |
# Initialize Langfuse CallbackHandler for LangGraph/Langchain (tracing) | |
langfuse_handler = CallbackHandler( | |
public_key=langfuse_public_key, | |
secret_key=langfuse_secret_key, | |
host="http://localhost:3000" | |
) | |
# Create LLM class | |
chat = ChatGoogleGenerativeAI( | |
model= "gemini-2.5-pro-preview-05-06", | |
temperature=0, | |
max_retries=2, | |
google_api_key=api_key, | |
thinking_budget= 0 | |
) | |
search_tool = TavilySearchResults( | |
name="tavily_web_search", # Puoi personalizzare il nome se vuoi | |
description="Esegue una ricerca web avanzata utilizzando Tavily per informazioni aggiornate e complete. Utile per domande complesse o che richiedono dati recenti. Può essere utile fare più ricerche modificando la query per ottenere risultati migliori.", # Descrizione per l'LLM | |
max_results=5 | |
) | |
tools = [ | |
extract_text_tool, | |
analyze_image_tool, | |
analyze_audio_tool, | |
youtube_transcript_tool, | |
add_tool, | |
subtract_tool, | |
multiply_tool, | |
divide_tool, | |
search_tool | |
] | |
chat_with_tools = chat.bind_tools(tools) | |
class AgentState(TypedDict): | |
messages: Annotated[list[AnyMessage], add_messages] | |
def assistant(state: AgentState): | |
sys_msg = "You are a helpful assistant with access to tools. Understand user requests accurately. Use your tools when needed to answer effectively. Strictly follow all user instructions and constraints." \ | |
"Pay attention: your output needs to contain only the final answer without any reasoning since it will be strictly evaluated against a dataset which contains only the specific response." \ | |
"Your final output needs to be just the string or integer containing the answer, not an array or technical stuff." | |
return { | |
"messages": [chat_with_tools.invoke([sys_msg] + state["messages"])] | |
} | |
## The graph | |
builder = StateGraph(AgentState) | |
# Define nodes: these do the work | |
builder.add_node("assistant", assistant) | |
builder.add_node("tools", ToolNode(tools)) | |
# Define edges: these determine how the control flow moves | |
builder.add_edge(START, "assistant") | |
builder.add_conditional_edges( | |
"assistant", | |
# If the latest message requires a tool, route to tools | |
# Otherwise, provide a direct response | |
tools_condition, | |
) | |
builder.add_edge("tools", "assistant") | |
alfred = builder.compile() | |
""" # Salva l'immagine del grafo su un file | |
graph_image_bytes = alfred.get_graph(xray=True).draw_mermaid_png() | |
with open("alfred_graph.png", "wb") as f: | |
f.write(graph_image_bytes) | |
print("L'immagine del grafo è stata salvata come alfred_graph.png") | |
messages = [HumanMessage(content="Who did the actor who played Ray in the Polish-language version of Everybody Loves Raymond play in Magda M.? Give only the first name.")] | |
response = alfred.invoke(input={"messages": messages}, config={"callbacks": [langfuse_handler]}) | |
print("🎩 Alfred's Response:") | |
print(response['messages'][-1].content) | |
""" |