TI / app.py
rajrakeshdr's picture
Update app.py
3d37119 verified
raw
history blame
1.39 kB
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Disable safetensors fast GPU loading (if needed)
import os
os.environ["SAFETENSORS_FAST_GPU"] = "0"
# Cache the model and tokenizer
@st.cache_resource
def load_model_and_tokenizer():
model_name = "rajrakeshdr/IntelliSoc"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, use_safetensors=False)
return model, tokenizer
# Load the model and tokenizer
model, tokenizer = load_model_and_tokenizer()
# Streamlit app title
st.title("IntelliSoc Text Generation")
# Input prompt
prompt = st.text_area("Enter your prompt:", "Once upon a time")
# Generate text on button click
if st.button("Generate Text"):
# Tokenize input
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
# Generate text
with torch.no_grad():
outputs = model.generate(
inputs.input_ids,
max_length=100,
num_return_sequences=1,
no_repeat_ngram_size=2,
top_k=50,
top_p=0.95,
temperature=0.7
)
# Decode the generated text
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Display the generated text
st.write("Generated Text:")
st.write(generated_text)