book-qa-streamlit / src /streamlit_app.py
tianzhechu's picture
upd
df2fe71
raw
history blame
1.04 kB
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
import os
@st.cache_resource
def load_model():
# Create a local cache directory
cache_dir = "./model_cache"
os.makedirs(cache_dir, exist_ok=True)
tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small", cache_dir=cache_dir)
model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small", cache_dir=cache_dir)
return pipeline("text2text-generation", model=model, tokenizer=tokenizer)
st.set_page_config(page_title="LLM Demo", layout="centered")
st.title("πŸš€ FLAN-T5 Small - HuggingFace Demo")
pipe = load_model()
user_input = st.text_area("Enter your instruction or question:", "")
if st.button("Generate Response"):
if user_input.strip() == "":
st.warning("Please enter some text.")
else:
with st.spinner("Generating..."):
output = pipe(user_input, max_new_tokens=100)[0]["generated_text"]
st.success("### Response:")
st.write(output)