|
import gradio as gr |
|
import os |
|
|
|
|
|
|
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
|
|
|
|
model_name = "microsoft/DialoGPT-small" |
|
model = GPT2LMHeadModel.from_pretrained(model_name) |
|
tokenizer = GPT2Tokenizer.from_pretrained(model_name) |
|
|
|
|
|
system_prompt = "You are a helpful assistant." |
|
chat_history = system_prompt |
|
|
|
def generate_response(prompt, max_length=50, temperature=0.8): |
|
global chat_history |
|
input_text = chat_history + " User: " + prompt |
|
input_ids = tokenizer.encode(input_text, return_tensors="pt") |
|
output_ids = model.generate(input_ids, max_length=max_length, temperature=temperature, num_return_sequences=1) |
|
response = tokenizer.decode(output_ids[0], skip_special_tokens=True) |
|
|
|
|
|
chat_history += f" User: {prompt} Assistant: {response}" |
|
|
|
return response |
|
|
|
def chat_room_ui(): |
|
user_input = gr.Textbox(prompt="User Input", placeholder="Type your message...") |
|
chat_display = gr.Textbox(text=chat_history, label="Chat Room", readonly=True) |
|
generate_button = gr.Button(text="Generate Response") |
|
|
|
def generate_response_callback(): |
|
global chat_history |
|
user_prompt = user_input.value |
|
response = generate_response(user_prompt) |
|
chat_history += f" User: {user_prompt} Assistant: {response}" |
|
chat_display.value = chat_history |
|
|
|
generate_button.click(generate_response_callback) |
|
|
|
return gr.Column([user_input, chat_display, generate_button]) |
|
|
|
iface = gr.Interface( |
|
fn=None, |
|
live=False, |
|
ui=chat_room_ui |
|
) |
|
|
|
iface.launch() |
|
|