Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
from peft import PeftModel
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
|
6 |
+
# Cargar el tokenizador
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained("Juliofc/chaterapia_model")
|
8 |
+
|
9 |
+
# A帽adir el token especial [PAD]
|
10 |
+
#tokenizer.add_special_tokens({'pad_token': '[PAD]'})
|
11 |
+
|
12 |
+
# Cargar el modelo base y ajustar el tama帽o de los embeddings de tokens
|
13 |
+
model_base = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it")
|
14 |
+
model_base.resize_token_embeddings(len(tokenizer))
|
15 |
+
|
16 |
+
# Cargar el modelo con el adaptador
|
17 |
+
model_with_adapter = PeftModel.from_pretrained(model_base, "Juliofc/chaterapia_model")
|
18 |
+
|
19 |
+
|
20 |
+
# Suponiendo que `tokenizer` y `model_with_adapter` ya est谩n inicializados
|
21 |
+
|
22 |
+
def chat_with_model(user_input, conversation_history):
|
23 |
+
# Actualiza el historial con la entrada del usuario
|
24 |
+
conversation_history += f"Usuario: {user_input}\n"
|
25 |
+
|
26 |
+
# Prepara la entrada para el modelo
|
27 |
+
input_tokens = tokenizer.encode(user_input, return_tensors='pt')
|
28 |
+
|
29 |
+
# Genera la respuesta del modelo
|
30 |
+
output_tokens = model_with_adapter.generate(input_tokens, max_new_tokens=50, pad_token_id=tokenizer.pad_token_id)
|
31 |
+
|
32 |
+
# Decodifica la respuesta
|
33 |
+
generated_text = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
34 |
+
|
35 |
+
# Actualiza el historial con la respuesta del modelo
|
36 |
+
conversation_history += f"Modelo: {generated_text}\n"
|
37 |
+
|
38 |
+
return "", conversation_history # Retorna el historial actualizado
|
39 |
+
|
40 |
+
# Define los componentes de la interfaz de Gradio
|
41 |
+
with gr.Blocks() as demo:
|
42 |
+
gr.Markdown("### Chat con IA")
|
43 |
+
input_text = gr.Textbox(label="Tu mensaje")
|
44 |
+
submit_button = gr.Button("Enviar")
|
45 |
+
output_text = gr.Textbox(label="Historial de la conversaci贸n", lines=10, interactive=False)
|
46 |
+
conversation_history = gr.State() # Inicializa el estado para mantener el historial
|
47 |
+
|
48 |
+
# Configura la acci贸n al presionar el bot贸n de env铆o
|
49 |
+
submit_button.click(
|
50 |
+
fn=chat_with_model,
|
51 |
+
inputs=[input_text, conversation_history],
|
52 |
+
outputs=[input_text, conversation_history]
|
53 |
+
)
|
54 |
+
|
55 |
+
demo.launch()
|