test / app.py
wop's picture
Update app.py
9a514c4
raw
history blame
792 Bytes
import gradio as gr
import os
os.system('pip install transformers torch')
from transformers import GPT2LMHeadModel, GPT2Tokenizer
# Load the pre-trained model and tokenizer
model_name = "microsoft/DialoGPT-small"
model = GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
def generate_response(prompt, max_length=50, temperature=0.8):
input_ids = tokenizer.encode(prompt, return_tensors="pt")
output_ids = model.generate(input_ids, max_length=max_length, temperature=temperature, num_return_sequences=1)
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
return response
iface = gr.Interface(
fn=generate_response,
inputs=gr.Textbox(),
outputs="text",
capture_session=True
)
iface.launch()