RamAnanth1's picture
Update app.py
dff33be
raw
history blame
1.34 kB
import gradio as gr
import pandas as pd
from datasets import load_dataset
from transformers import T5ForConditionalGeneration, T5Tokenizer
device = 'cpu' # if you have a GPU
tokenizer = T5Tokenizer.from_pretrained('stanfordnlp/SteamSHP-flan-t5-large')
model = T5ForConditionalGeneration.from_pretrained('stanfordnlp/SteamSHP-flan-t5-large').to(device)
def process():
input_text = "POST: Instacart gave me 50 pounds of limes instead of 5 pounds... what the hell do I do with 50 pounds of limes? I've already donated a bunch and gave a bunch away. I'm planning on making a bunch of lime-themed cocktails, but... jeez. Ceviche? \n\n RESPONSE A: Lime juice, and zest, then freeze in small quantities.\n\n RESPONSE B: Lime marmalade lol\n\n Which response is better? RESPONSE"
x = tokenizer([input_text], return_tensors='pt').input_ids.to(device)
y = model.generate(x, max_new_tokens=1)
return tokenizer.batch_decode(y, skip_special_tokens=True)[0]
title = "Compare Instruction Models to see which one is more helpful"
interface = gr.Interface(fn=process,
inputs=[],
outputs=[
gr.Textbox(label = "Responses")
],
title=title,
)
interface.launch(debug=True)