|
import streamlit as st |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import torch |
|
|
|
st.title("Uzmi GPT - Romantic Quote Generator") |
|
|
|
@st.cache_resource |
|
def load_model(): |
|
tokenizer = AutoTokenizer.from_pretrained("rajan3208/uzmi-gpt") |
|
model = AutoModelForCausalLM.from_pretrained("rajan3208/uzmi-gpt") |
|
return tokenizer, model |
|
|
|
tokenizer, model = load_model() |
|
|
|
prompt = st.text_area("Enter a prompt", "A romantic quote about forever") |
|
|
|
if st.button("Generate"): |
|
inputs = tokenizer(prompt, return_tensors="pt") |
|
output = model.generate(**inputs, max_new_tokens=50) |
|
generated = tokenizer.decode(output[0], skip_special_tokens=True) |
|
st.success(generated) |
|
|