gpt4 / app.py
Kvikontent's picture
Create app.py
caa0347
raw
history blame
839 Bytes
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import gradio as gr
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("gpt2")
model = AutoModelForCausalLM.from_pretrained("gpt2")
def generate_response(input_text):
input_ids = tokenizer.encode(input_text, return_tensors='pt')
generated_output = model.generate(input_ids, max_length=100, num_return_sequences=1)
response = tokenizer.decode(generated_output[0], skip_special_tokens=True)
return response
iface = gr.Interface(
fn=generate_response,
inputs='text',
outputs='text',
layout='vertical',
title='ChatGPT',
description='A simple chatbot powered by ChatGPT',
article= 'https://huggingface.co/models',
examples=[['Hello'], ['How are you?'], ['What is your name?']],
)
iface.launch()