File size: 1,040 Bytes
ab964e2
c129502
df2fe71
ab964e2
c129502
 
df2fe71
 
 
 
 
 
1c905f8
ab964e2
c129502
 
ab964e2
c129502
ab964e2
c129502
ab964e2
c129502
1c905f8
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
import os

@st.cache_resource
def load_model():
    # Create a local cache directory
    cache_dir = "./model_cache"
    os.makedirs(cache_dir, exist_ok=True)
    
    tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small", cache_dir=cache_dir)
    model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small", cache_dir=cache_dir)
    return pipeline("text2text-generation", model=model, tokenizer=tokenizer)

st.set_page_config(page_title="LLM Demo", layout="centered")
st.title("πŸš€ FLAN-T5 Small - HuggingFace Demo")

pipe = load_model()

user_input = st.text_area("Enter your instruction or question:", "")

if st.button("Generate Response"):
    if user_input.strip() == "":
        st.warning("Please enter some text.")
    else:
        with st.spinner("Generating..."):
            output = pipe(user_input, max_new_tokens=100)[0]["generated_text"]
            st.success("### Response:")
            st.write(output)