File size: 691 Bytes
f556ef6 f09166e bc8faf9 d8ea436 bc8faf9 f09166e bc8faf9 f09166e d8ea436 f09166e bc8faf9 f09166e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
st.title("Uzmi GPT - Romantic Quote Generator")
@st.cache_resource
def load_model():
tokenizer = AutoTokenizer.from_pretrained("rajan3208/uzmi-gpt")
model = AutoModelForCausalLM.from_pretrained("rajan3208/uzmi-gpt")
return tokenizer, model
tokenizer, model = load_model()
prompt = st.text_area("Enter a prompt", "A romantic quote about forever")
if st.button("Generate"):
inputs = tokenizer(prompt, return_tensors="pt")
output = model.generate(**inputs, max_new_tokens=50)
generated = tokenizer.decode(output[0], skip_special_tokens=True)
st.success(generated)
|