|
import gradio as gr |
|
from langgraph.graph import StateGraph, END |
|
from langchain_core.runnables import RunnableLambda |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import torch |
|
|
|
|
|
model_id = "Qwen/Qwen1.5-32B-Chat" |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_id, |
|
device_map="auto", |
|
torch_dtype=torch.float16, |
|
trust_remote_code=True |
|
) |
|
model.eval() |
|
|
|
|
|
class QwenWrapper: |
|
def invoke(self, prompt: str) -> str: |
|
messages = [{"role": "user", "content": prompt}] |
|
inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(model.device) |
|
output = model.generate(inputs, max_new_tokens=256, do_sample=True, temperature=0.7) |
|
decoded = tokenizer.decode(output[0], skip_special_tokens=True) |
|
return decoded.split("assistant")[-1].strip() |
|
|
|
qwen = QwenWrapper() |
|
|
|
|
|
class AgentState(dict): |
|
pass |
|
|
|
|
|
def agent_step(state: AgentState) -> AgentState: |
|
user_input = state["input"] |
|
|
|
travel_prompt = ( |
|
"Eres un agente de viajes profesional y experimentado. " |
|
"Asesora, recomienda y planifica itinerarios, destinos y actividades de viaje según las preferencias del usuario. " |
|
f"Usuario: {user_input}" |
|
) |
|
response = qwen.invoke(travel_prompt) |
|
return {"input": user_input, "output": response} |
|
|
|
|
|
agent_node = RunnableLambda(agent_step) |
|
|
|
graph_builder = StateGraph(AgentState) |
|
graph_builder.add_node("agent", agent_node) |
|
graph_builder.set_entry_point("agent") |
|
graph_builder.add_edge("agent", END) |
|
graph = graph_builder.compile() |
|
|
|
|
|
def agent_step(state: AgentState) -> AgentState: |
|
user_input = state.get("input") |
|
if not user_input: |
|
return {"input": "", "output": "No se recibió entrada."} |
|
travel_prompt = ( |
|
"Eres un agente de viajes profesional y experimentado. " |
|
"Asesora, recomienda y planifica itinerarios, destinos y actividades de viaje según las preferencias del usuario. " |
|
f"Usuario: {user_input}" |
|
) |
|
response = qwen.invoke(travel_prompt) |
|
return {"input": user_input, "output": response} |
|
|
|
|
|
|
|
iface = gr.Interface( |
|
fn=chat_with_agent, |
|
inputs=gr.Textbox(lines=2, placeholder="Haz una consulta sobre viajes..."), |
|
outputs="text", |
|
title="Agente de Viajes con LangGraph y Qwen", |
|
description="Agente de viajes que utiliza LangGraph y Qwen/Qwen1.5-32B-Chat para recomendar destinos, itinerarios y consejos de viaje." |
|
) |
|
|
|
iface.launch() |
|
|