Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
from peft import PeftModel
|
3 |
+
import gradio as gr
|
4 |
+
import os
|
5 |
+
import torch
|
6 |
+
|
7 |
+
os.system('pip install dashscope')
|
8 |
+
from http import HTTPStatus
|
9 |
+
import dashscope
|
10 |
+
from dashscope import Generation
|
11 |
+
from dashscope.api_entities.dashscope_response import Role
|
12 |
+
from typing import List, Optional, Tuple, Dict
|
13 |
+
from urllib.error import HTTPError
|
14 |
+
default_system = 'You are a helpful assistant.'
|
15 |
+
|
16 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
17 |
+
print(device)
|
18 |
+
# Aseg煤rate de que tu token de Hugging Face est谩 cargado como una variable de entorno
|
19 |
+
hf_token = os.environ.get("token")
|
20 |
+
if hf_token is not None:
|
21 |
+
from huggingface_hub import HfFolder
|
22 |
+
HfFolder.save_token(hf_token)
|
23 |
+
else:
|
24 |
+
print("No se encontr贸 el token de Hugging Face. Aseg煤rate de que la variable de entorno HF_TOKEN est茅 configurada.")
|
25 |
+
|
26 |
+
# Configuraci贸n inicial
|
27 |
+
tokenizer = AutoTokenizer.from_pretrained("Juliofc/chaterapia_model")
|
28 |
+
model_base = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it").to(device)
|
29 |
+
model_base.resize_token_embeddings(len(tokenizer))
|
30 |
+
model_with_adapter = PeftModel.from_pretrained(model_base, "Juliofc/chaterapia_model").to(device)
|
31 |
+
|
32 |
+
CHAT_TEMPLATE= """{% for message in messages %}
|
33 |
+
{% if message['role'] == 'user' %}
|
34 |
+
{{'<user> ' + message['content'].strip() + ' </user>' }}
|
35 |
+
{% elif message['role'] == 'system' %}
|
36 |
+
{{'<system>\\n' + message['content'].strip() + '\\n</system>\\n\\n' }}
|
37 |
+
{% elif message['role'] == 'assistant' %}
|
38 |
+
{{ message['content'].strip() + ' </assistant>' + eos_token }}
|
39 |
+
{% elif message['role'] == 'input' %}
|
40 |
+
{{'<input> ' + message['content'] + ' </input>' }}
|
41 |
+
{% endif %}
|
42 |
+
{% endfor %}""" # Aseg煤rate de usar tu CHAT_TEMPLATE aqu铆
|
43 |
+
tokenizer.chat_template = CHAT_TEMPLATE
|
44 |
+
|
45 |
+
chat_history = [] # Historial de chat global
|
46 |
+
|
47 |
+
def generate_response(user_input):
|
48 |
+
global chat_history
|
49 |
+
# Agregar input del usuario al historial
|
50 |
+
chat_history.append({"content": user_input, "role": "user"})
|
51 |
+
|
52 |
+
# Preparaci贸n del input para el modelo
|
53 |
+
user_input = tokenizer.apply_chat_template(chat_history, tokenize=False)
|
54 |
+
input_tokens = tokenizer(user_input, return_tensors='pt', padding=True, truncation=True, max_length=1024).to(device)
|
55 |
+
|
56 |
+
# Generaci贸n de la respuesta del modelo
|
57 |
+
output_tokens = model_with_adapter.generate(**input_tokens, max_length=1024, pad_token_id=tokenizer.eos_token_id, top_k=50, do_sample=True, top_p=0.95, temperature=0.7)
|
58 |
+
generated_text = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
59 |
+
|
60 |
+
# Extracci贸n de la respuesta generada
|
61 |
+
last_us = generated_text.rfind("</user>") + len("</user>")
|
62 |
+
last_as = generated_text.rfind("</assistant>")
|
63 |
+
generated_text = generated_text[last_us:last_as].strip()
|
64 |
+
|
65 |
+
# Agregar la respuesta del bot al historial
|
66 |
+
chat_history.append({"content": generated_text, "role": "assistant"})
|
67 |
+
print(chat_history)
|
68 |
+
return generated_text
|
69 |
+
|
70 |
+
def respond(message):
|
71 |
+
if message: # Verificar si el mensaje no est谩 vac铆o
|
72 |
+
bot_response = generate_response(message)
|
73 |
+
return [(message, bot_response)]
|
74 |
+
return [("", "")]
|
75 |
+
|
76 |
+
|
77 |
+
def clear_chat_and_history():
|
78 |
+
global chat_history
|
79 |
+
chat_history.clear() # Vaciar el historial de chat
|
80 |
+
# Devolver valores que efectivamente "limpien" los componentes de Gradio visualmente
|
81 |
+
# En este caso, estamos limpiando el Textbox y el Chatbot, pero como el Chatbot
|
82 |
+
# no puede ser "limpiado" directamente de esta manera, simplemente retornamos una lista vac铆a
|
83 |
+
# que ser谩 interpretada como ning煤n mensaje nuevo para mostrar.
|
84 |
+
return "", []
|
85 |
+
|
86 |
+
with gr.Blocks() as demo:
|
87 |
+
with gr.Row():
|
88 |
+
msg = gr.Textbox(label="Tu mensaje", placeholder="Escribe aqu铆...", lines=1)
|
89 |
+
send_btn = gr.Button("Enviar")
|
90 |
+
chatbot = gr.Chatbot()
|
91 |
+
# Usar un Button regular en lugar de ClearButton para tener control sobre la funci贸n que se ejecuta
|
92 |
+
clear_btn = gr.Button("Limpiar Chat")
|
93 |
+
|
94 |
+
# Acci贸n al presionar el bot贸n Enviar
|
95 |
+
send_btn.click(fn=respond, inputs=msg, outputs=chatbot)
|
96 |
+
# Configurar el bot贸n de limpieza para ejecutar clear_chat_and_history cuando se presione
|
97 |
+
# y actualizar tanto el msg como el chatbot con los valores retornados por la funci贸n
|
98 |
+
clear_btn.click(fn=clear_chat_and_history, inputs=None, outputs=[msg, chatbot])
|
99 |
+
|
100 |
+
demo.launch()
|