jslin09's picture
Update app.py
59c9e49
raw
history blame
420 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
tokenizer = AutoTokenizer.from_pretrained("jslin09/bloom-560m-finetuned-fraud")
model = AutoModelForCausalLM.from_pretrained("jslin09/bloom-560m-finetuned-fraud")
def predict(input, history=[]):
# tokenize the new input sentence
new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')