Juliofc commited on
Commit
fccd3a2
·
verified ·
1 Parent(s): e0b9104

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -55
app.py CHANGED
@@ -42,23 +42,17 @@ CHAT_TEMPLATE= """{% for message in messages %}
42
  {% endfor %}""" # Asegúrate de usar tu CHAT_TEMPLATE aquí
43
  tokenizer.chat_template = CHAT_TEMPLATE
44
 
45
- # Función para generar respuestas del modelo
46
- import gradio as gr
47
-
48
- # Asume que todas tus importaciones previas y configuraciones del modelo están aquí
49
-
50
- # Aquí deberías tener definida la función `generate_response` tal como la compartiste
51
  # Función para generar respuestas del modelo
52
  def generate_response(user_input, chat_history):
53
  # Preparar el input agregando el historial de chat
54
  chat_history.append({"content": user_input, "role": "user"})
55
- print(chat_history)
56
  user_input = tokenizer.apply_chat_template(chat_history, tokenize=False)
57
 
58
  input_tokens = tokenizer(user_input, return_tensors='pt', padding=True, truncation=True, max_length=1024).to(device)
59
 
60
  # Generar la respuesta
61
- output_tokens = model_with_adapter.generate(**input_tokens, max_length=1024, pad_token_id=tokenizer.eos_token_id, top_k=50, top_p=0.95, temperature=0.7)
62
  generated_text = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
63
 
64
  last_us = generated_text.rfind("</user>") + len("</user>")
@@ -67,52 +61,11 @@ def generate_response(user_input, chat_history):
67
  chat_history.append({"content": generated_text, "role": "assistant"})
68
  return generated_text, chat_history
69
 
70
- History = List[Tuple[str, str]]
71
-
72
- def clear_session() -> History:
73
- return '', []
74
-
75
- def modify_system_session(system: str) -> str:
76
- if system is None or len(system) == 0:
77
- system = default_system
78
- return system, system, []
79
-
80
- def model_chat(query: Optional[str], history: Optional[History], system: str
81
- ) -> Tuple[str, str, History]:
82
- if query is None:
83
- query = ''
84
- if history is None:
85
- history = []
86
- if not history or history[-1][0] != query: # Asegurar que no se repita la última pregunta
87
- history.append((query, ''))
88
- response, history = generate_response(query, history) # Tu función de modelo
89
- return '', response, history
90
-
91
- placeholder = "This is a simulated conversation with your model."
92
-
93
- with gr.Blocks() as demo:
94
- gr.Markdown("<center><font size=6>Chatbot with Your Model</center>")
95
-
96
- with gr.Row():
97
- system_input = gr.Textbox(value=default_system, lines=1, label='System')
98
- modify_system = gr.Button("🛠️ Set system prompt and clear the history")
99
- system_state = gr.Textbox(value=default_system, visible=False)
100
-
101
- chatbot = gr.Chatbot(label='Chat with Your Model', placeholder=placeholder)
102
- textbox = gr.Textbox(lines=2, label='Input')
103
-
104
- with gr.Row():
105
- clear_history = gr.Button("🧹 Clear history")
106
- submit = gr.Button("🚀 Send")
107
 
108
- submit.click(model_chat,
109
- inputs=[textbox, chatbot, system_state],
110
- outputs=[textbox, chatbot, system_input])
111
- clear_history.click(fn=clear_session,
112
- inputs=[],
113
- outputs=[textbox, chatbot])
114
- modify_system.click(fn=modify_system_session,
115
- inputs=[system_input],
116
- outputs=[system_state, system_input, chatbot])
117
 
118
- demo.launch()
 
42
  {% endfor %}""" # Asegúrate de usar tu CHAT_TEMPLATE aquí
43
  tokenizer.chat_template = CHAT_TEMPLATE
44
 
45
+ chat_history = []
 
 
 
 
 
46
  # Función para generar respuestas del modelo
47
  def generate_response(user_input, chat_history):
48
  # Preparar el input agregando el historial de chat
49
  chat_history.append({"content": user_input, "role": "user"})
 
50
  user_input = tokenizer.apply_chat_template(chat_history, tokenize=False)
51
 
52
  input_tokens = tokenizer(user_input, return_tensors='pt', padding=True, truncation=True, max_length=1024).to(device)
53
 
54
  # Generar la respuesta
55
+ output_tokens = model_with_adapter.generate(**input_tokens, max_length=1024, pad_token_id=tokenizer.eos_token_id, top_k=50, do_sample=True, top_p=0.95, temperature=0.7)
56
  generated_text = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
57
 
58
  last_us = generated_text.rfind("</user>") + len("</user>")
 
61
  chat_history.append({"content": generated_text, "role": "assistant"})
62
  return generated_text, chat_history
63
 
64
+ def response(user_input, chat_history):
65
+ response, chat_history = generate_response(user_input, chat_history)
66
+ print(chat_history)
67
+ return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
+ iface = gr.ChatInterface(fn=response, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
70
 
71
+ iface.launch()