arhamm40182 commited on
Commit
61c17d7
·
1 Parent(s): d3eeadf

Initial Implementation

Browse files
Files changed (2) hide show
  1. app.py +24 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
+
4
+ @st.cache_resource
5
+ def load_model():
6
+ tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small")
7
+ model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small")
8
+ return pipeline("text2text-generation", model=model, tokenizer=tokenizer)
9
+
10
+ st.set_page_config(page_title="LLM Demo", layout="centered")
11
+ st.title("🚀 FLAN-T5 Small - HuggingFace Demo")
12
+
13
+ pipe = load_model()
14
+
15
+ user_input = st.text_area("Enter your instruction or question:", "")
16
+
17
+ if st.button("Generate Response"):
18
+ if user_input.strip() == "":
19
+ st.warning("Please enter some text.")
20
+ else:
21
+ with st.spinner("Generating..."):
22
+ output = pipe(user_input, max_new_tokens=100)[0]["generated_text"]
23
+ st.success("### Response:")
24
+ st.write(output)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ transformers
2
+ torch
3
+ streamlit