amiguel's picture
Update app.py
0373f3c verified
raw
history blame
5.21 kB
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
from huggingface_hub import login
from threading import Thread
import PyPDF2
import pandas as pd
import torch
# Set page configuration
st.set_page_config(
page_title="WizNerd Insp",
page_icon="πŸš€",
layout="centered"
)
# Correct model name
MODEL_NAME = "amiguel/optimizedModelListing6.1"
# Title with rocket emojis
st.title("πŸš€ WizNerd Insp πŸš€")
# Sidebar configuration
with st.sidebar:
st.header("Authentication πŸ”’")
hf_token = st.text_input("Hugging Face Token", type="password",
help="Get your token from https://huggingface.co/settings/tokens")
st.header("Upload Documents πŸ“‚")
uploaded_file = st.file_uploader(
"Choose a PDF or XLSX file",
type=["pdf", "xlsx"],
label_visibility="collapsed"
)
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Process uploaded files
@st.cache_data
def process_file(uploaded_file):
if uploaded_file is None:
return ""
try:
if uploaded_file.type == "application/pdf":
pdf_reader = PyPDF2.PdfReader(uploaded_file)
return "\n".join([page.extract_text() for page in pdf_reader.pages])
elif uploaded_file.type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":
df = pd.read_excel(uploaded_file)
return df.to_markdown()
except Exception as e:
st.error(f"πŸ“„ Error processing file: {str(e)}")
return ""
# Load model and tokenizer with authentication
@st.cache_resource
def load_model(hf_token):
try:
if hf_token:
login(token=hf_token)
else:
st.error("πŸ” Authentication required!")
return None, None
tokenizer = AutoTokenizer.from_pretrained(
MODEL_NAME,
token=hf_token
)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
device_map="auto",
torch_dtype=torch.float16,
token=hf_token
)
return model, tokenizer
except Exception as e:
st.error(f"πŸ€– Model loading failed: {str(e)}")
return None, None
# Generate responses with streaming
def generate_response(prompt, file_context):
full_prompt = f"""Analyze this context:
{file_context}
Question: {prompt}
Answer:"""
streamer = TextIteratorStreamer(
tokenizer,
skip_prompt=True,
skip_special_tokens=True
)
inputs = tokenizer(
full_prompt,
return_tensors="pt",
max_length=4096,
truncation=True
).to(model.device)
generation_kwargs = dict(
inputs,
streamer=streamer,
max_new_tokens=1024,
temperature=0.7,
top_p=0.9,
repetition_penalty=1.1,
do_sample=True,
use_cache=True
)
Thread(target=model.generate, kwargs=generation_kwargs).start()
return streamer
# Display chat messages
for message in st.session_state.messages:
try:
avatar = "πŸ‘€" if message["role"] == "user" else "πŸ€–"
with st.chat_message(message["role"], avatar=avatar):
st.markdown(message["content"])
except:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input handling
if prompt := st.chat_input("Ask your inspection question..."):
if not hf_token:
st.error("πŸ”‘ Authentication required!")
st.stop()
# Load model if not loaded
if "model" not in st.session_state:
st.session_state.model, st.session_state.tokenizer = load_model(hf_token)
model = st.session_state.model
tokenizer = st.session_state.tokenizer
# Add user message
with st.chat_message("user", avatar="πŸ‘€"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
# Process file
file_context = process_file(uploaded_file)
# Generate response
if model and tokenizer:
try:
with st.chat_message("assistant", avatar="πŸ€–"):
streamer = generate_response(prompt, file_context)
response_container = st.empty()
full_response = ""
for chunk in streamer:
# Remove <think> tags and clean text
cleaned_chunk = chunk.replace("<think>", "").replace("</think>", "").strip()
full_response += cleaned_chunk + " "
# Update display with typing cursor
response_container.markdown(full_response + "β–Œ", unsafe_allow_html=True)
# Display final response
response_container.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
except Exception as e:
st.error(f"⚑ Generation error: {str(e)}")
else:
st.error("πŸ€– Model not loaded!")