gpt4 / app.py
Kvikontent's picture
Update app.py
11f9d7a
raw
history blame contribute delete
980 Bytes
import torch
from transformers import AutoTokenizer, OpenAIGPTLMHeadModel
import gradio as gr
tokenizer = AutoTokenizer.from_pretrained("openai-gpt")
model = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt")
def generate_response(text):
inputs = tokenizer(text, return_tensors="pt")
outputs = model(**inputs, labels=inputs["input_ids"])
generated_ids = model.generate(inputs['input_ids'],
num_return_sequences=1,
max_length=50)
generated_text = tokenizer.decode(generated_ids.squeeze(), skip_special_tokens=True)
return generated_text
iface = gr.Interface(
fn=generate_response,
inputs="text",
outputs="text",
title="OpenAI GPT4 Chatbot",
description="Enter some text, and the OpenAI GPT model will generate a response.",
examples=[
["Hello, how are you?"],
["What is the capital of France?"],
["Tell me a joke."]
]
)
iface.launch()