File size: 6,332 Bytes
8ea5933
 
 
 
 
 
 
 
83c68ab
8ea5933
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7fcb17d
8ea5933
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7fcb17d
8ea5933
 
 
 
7fcb17d
 
8ea5933
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import os
from dotenv import load_dotenv
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
from typing import TypedDict, Annotated, Literal
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
import gradio as gr

load_dotenv()

# Configuraci贸n
max_tokens = 2000
num_iterations = 2
quality_threshold = 8

# Base de datos simulada de destinos tur铆sticos
travel_database = {
    "paris": {"destination": "Paris", "price": 1500, "features": ["romantic", "cultural", "historic"]},
    "bali": {"destination": "Bali", "price": 1200, "features": ["beach", "relaxing", "adventurous"]},
    "new_york": {"destination": "New York", "price": 2000, "features": ["urban", "shopping", "nightlife"]},
    "tokyo": {"destination": "Tokyo", "price": 1800, "features": ["modern", "cultural", "tech-savvy"]},
}

# Modelos estructurados para la salida de cada nodo
class GenerateRecommendation(BaseModel):
    destination: str = Field(description="El destino tur铆stico recomendado")
    explanation: str = Field(description="Explicaci贸n breve de la recomendaci贸n")

class RecommendationQualityScore(BaseModel):
    score: int = Field(description="Puntuaci贸n de la recomendaci贸n entre 1-10")
    comment: str = Field(description="Comentario sobre la calidad de la recomendaci贸n")

# Estado del grafo
class GraphState(TypedDict):
    messages: Annotated[list, add_messages]
    quality: Annotated[int, 0]  # Valor inicial 0
    iterations: Annotated[int, 0]  # Valor inicial 0

# Inicializaci贸n del grafo
builder = StateGraph(GraphState)

llm = ChatOpenAI(
    model="gpt-4o-mini",
    temperature=0,
    max_tokens=max_tokens,
    api_key=os.getenv("OPENAI_API_KEY")
)
developer_structure_llm = llm.with_structured_output(GenerateRecommendation, method="json_mode")
reviewer_structure_llm = llm.with_structured_output(RecommendationQualityScore, method="json_mode")

def travel_recommender(state):
    # Se asume que el 煤ltimo mensaje contiene las preferencias del usuario
    user_requirements = state["messages"][-1].content
    system_prompt = f"""
    Eres un experto en recomendaciones de viajes.
    Con base en las siguientes preferencias del usuario: {user_requirements},
    selecciona el mejor destino de la siguiente base de datos: {travel_database}.
    Responde en JSON con la clave `destination` para el destino recomendado y `explanation` con una breve raz贸n de la recomendaci贸n.
    """
    human_messages = [msg for msg in state["messages"] if isinstance(msg, HumanMessage)]
    ai_messages = [msg for msg in state["messages"] if isinstance(msg, AIMessage)]
    system_messages = [SystemMessage(content=system_prompt)]
    
    messages = system_messages + human_messages + ai_messages
    message = developer_structure_llm.invoke(messages)
    
    recommendation_output = f"Destination: {message.destination}\nExplanation: {message.explanation}"
    # Se agrega la recomendaci贸n a los mensajes para los siguientes nodos
    state["messages"].append(AIMessage(content=recommendation_output))
    state["iterations"] += 1
    return state

def recommendation_review(state):
    system_prompt = """
    Eres un revisor de recomendaciones con altos est谩ndares.
    Revisa la recomendaci贸n proporcionada y asigna una puntuaci贸n de calidad entre 1-10.
    Eval煤a la relevancia, precisi贸n y alineaci贸n con las necesidades del cliente.
    Responde en JSON con las claves `score` y `comment`.
    """
    human_messages = [msg for msg in state["messages"] if isinstance(msg, HumanMessage)]
    ai_messages = [msg for msg in state["messages"] if isinstance(msg, AIMessage)]
    system_messages = [SystemMessage(content=system_prompt)]
    
    messages = system_messages + human_messages + ai_messages
    message = reviewer_structure_llm.invoke(messages)
    
    review_comment = f"Review Score: {message.score}\nComment: {message.comment}"
    state["messages"].append(AIMessage(content=review_comment))
    state["quality"] = message.score
    return state

def final_recommendation(state):
    system_prompt = "Revisa la recomendaci贸n final y proporciona una respuesta final para el usuario."
    human_messages = [msg for msg in state["messages"] if isinstance(msg, HumanMessage)]
    ai_messages = [msg for msg in state["messages"] if isinstance(msg, AIMessage)]
    system_messages = [SystemMessage(content=system_prompt)]
    
    messages = system_messages + human_messages + ai_messages
    final_message = llm.invoke(messages)
    
    # Se guarda la recomendaci贸n final en el estado para mostrarla
    state["final_recommendation"] = final_message.content
    state["messages"].append(AIMessage(content=f"Final Recommendation: {final_message.content}"))
    return state

# Funci贸n para definir la condici贸n de la bifurcaci贸n en el grafo
def quality_gate_condition(state) -> Literal["travel_recommender", "final_recommendation"]:
    if state["iterations"] >= num_iterations:
        return "final_recommendation"
    if state["quality"] < quality_threshold:
        return "travel_recommender"
    else:
        return "final_recommendation"

# Agregar nodos al grafo
builder.add_node("travel_recommender", travel_recommender)
builder.add_node("recommendation_review", recommendation_review)
builder.add_node("final_recommendation", final_recommendation)

# Conectar nodos
builder.add_edge(START, "travel_recommender")
builder.add_edge("travel_recommender", "recommendation_review")
builder.add_edge("final_recommendation", END)

builder.add_conditional_edges("recommendation_review", quality_gate_condition)

graph = builder.compile()

# Funci贸n que ejecuta el grafo con la entrada del usuario
def run_graph(user_input: str) -> str:
    initial_state = {"messages": [HumanMessage(content=user_input)], "quality": 0, "iterations": 0}
    final_state = graph.invoke(initial_state)
    return final_state.get("final_recommendation", "No se gener贸 una recomendaci贸n final.")

# Interfaz de Gradio
iface = gr.Interface(
    fn=run_graph,
    inputs=gr.Textbox(label="Ingrese sus preferencias de viaje"),
    outputs=gr.Textbox(label="Recomendaci贸n Final"),
    title="Sistema de Recomendaci贸n de Viajes"
)

if __name__ == "__main__":
    iface.launch()