Spaces:
Running
Running
import os | |
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
from langchain.document_loaders import PyPDFLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain_huggingface import HuggingFaceEmbeddings | |
from langchain_community.vectorstores import Chroma | |
from rerankers import Reranker | |
# Cargar PDF y partirlo en chunks | |
loader = PyPDFLoader("80dias.pdf") | |
documents = loader.load() | |
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=20) | |
splits = splitter.split_documents(documents) | |
# Crear embeddings | |
embedding_model = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2" | |
embeddings = HuggingFaceEmbeddings(model_name=embedding_model) | |
vectordb = Chroma.from_documents(splits, embedding=embeddings) | |
# Inicializar reranker | |
ranker = Reranker("answerdotai/answerai-colbert-small-v1", model_type="colbert") | |
# Cargar modelo de lenguaje de Hugging Face | |
model_id = "tiiuae/falcon-7b-instruct" | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto") | |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
# Funci贸n principal RAG | |
def rag_chat(message, history): | |
query = message | |
results = vectordb.similarity_search_with_score(query) | |
# Seleccionar contextos relevantes | |
context = [] | |
for doc, score in results: | |
if score < 7: | |
context.append(doc.page_content) | |
if not context: | |
return "No tengo informaci贸n suficiente para responder a esa pregunta." | |
# Aplicar reranking | |
ranking = ranker.rank(query=query, docs=context) | |
best_context = ranking[0].text | |
# Crear prompt final | |
prompt = f"""Responde a la siguiente pregunta utilizando solo el contexto proporcionado: | |
Contexto: | |
{best_context} | |
Pregunta: {query} | |
Respuesta:""" | |
# Generar respuesta | |
output = generator(prompt, max_new_tokens=100, do_sample=False)[0]["generated_text"] | |
response = output.split("Respuesta:")[-1].strip() | |
return response | |
# Gradio Chat Interface | |
iface = gr.ChatInterface( | |
fn=rag_chat, | |
title="Chat Julio Verne - RAG", | |
description="Pregunta lo que quieras sobre *La vuelta al mundo en 80 d铆as* de Julio Verne.", | |
chatbot=gr.Chatbot(type="messages"), | |
theme="default" | |
) | |
iface.launch() | |