File size: 1,731 Bytes
b6126da e8cf90a db183cb 76353a0 db183cb b6126da e8cf90a b6126da e8cf90a b6126da e8cf90a b6126da 76353a0 b6126da 76353a0 b6126da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import gradio as gr
import os
#os.system('pip install transformers torch')
from transformers import GPT2LMHeadModel, GPT2Tokenizer
# Load the pre-trained model and tokenizer
model_name = "microsoft/DialoGPT-small"
model = GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
# Initial system prompt and chat history
system_prompt = "You are a helpful assistant."
chat_history = system_prompt
def generate_response(prompt, max_length=50, temperature=0.8):
global chat_history
input_text = chat_history + " User: " + prompt
input_ids = tokenizer.encode(input_text, return_tensors="pt")
output_ids = model.generate(input_ids, max_length=max_length, temperature=temperature, num_return_sequences=1)
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
# Update chat history
chat_history += f" User: {prompt} Assistant: {response}"
return response
def chat_room_ui():
user_input = gr.Textbox(prompt="User Input", placeholder="Type your message...")
chat_display = gr.Textbox(text=chat_history, label="Chat Room", readonly=True)
generate_button = gr.Button(text="Generate Response")
def generate_response_callback():
global chat_history
user_prompt = user_input.value
response = generate_response(user_prompt)
chat_history += f" User: {user_prompt} Assistant: {response}"
chat_display.value = chat_history
generate_button.click(generate_response_callback)
return gr.Column([user_input, chat_display, generate_button])
iface = gr.Interface(
fn=None, # Set fn to None since it will be handled by the UI
live=False,
ui=chat_room_ui
)
iface.launch()
|