gpt4 / app.py
Kvikontent's picture
Update app.py
acaca81
raw
history blame
902 Bytes
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import gradio as gr
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("gpt-chatbot")
model = AutoModelForCausalLM.from_pretrained("gpt-chatbot")
def generate_response(input_text):
input_ids = tokenizer.encode(input_text + " >> User: ", return_tensors='pt')
generated_output = model.generate(input_ids, max_length=100, num_return_sequences=1)
response = tokenizer.decode(generated_output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
return response
iface = gr.Interface(
fn=generate_response,
inputs='text',
outputs='text',
layout='vertical',
title='ChatGPT',
description='A simple chatbot powered by ChatGPT',
article= 'https://huggingface.co/models',
examples=[['Hello'], ['How are you?'], ['What is your name?']],
)
iface.launch(share=True)