amiguel commited on
Commit
a7ba67c
Β·
verified Β·
1 Parent(s): ec4d167

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -6,6 +6,7 @@ import PyPDF2
6
  import pandas as pd
7
  import torch
8
  import time
 
9
 
10
  # Check if 'peft' is installed
11
  try:
@@ -23,8 +24,8 @@ st.set_page_config(
23
  layout="centered"
24
  )
25
 
26
- # Hardcoded Hugging Face token (replace with your actual token)
27
- HF_TOKEN = "your_hugging_face_token_here"
28
 
29
  # Model names
30
  BASE_MODEL_NAME = "google-bert/bert-base-uncased"
@@ -80,7 +81,7 @@ def process_file(uploaded_file):
80
  def load_model(hf_token, model_type, selected_model):
81
  try:
82
  if not hf_token:
83
- st.error("πŸ” Authentication required! Please provide a valid Hugging Face token.")
84
  return None
85
 
86
  login(token=hf_token)
@@ -163,7 +164,7 @@ if prompt := st.chat_input("Ask your inspection question..."):
163
  if "model" not in st.session_state or st.session_state.get("model_type") != model_type:
164
  model_data = load_model(HF_TOKEN, model_type, selected_model)
165
  if model_data is None:
166
- st.error("Failed to load model. Please check your token and try again.")
167
  st.stop()
168
 
169
  st.session_state.model, st.session_state.tokenizer = model_data
@@ -177,7 +178,7 @@ if prompt := st.chat_input("Ask your inspection question..."):
177
  st.markdown(prompt)
178
  st.session_state.messages.append({"role": "user", "content": prompt})
179
 
180
- # Process file
181
  file_context = process_file(uploaded_file)
182
 
183
  # Generate response with KV caching
 
6
  import pandas as pd
7
  import torch
8
  import time
9
+ import os
10
 
11
  # Check if 'peft' is installed
12
  try:
 
24
  layout="centered"
25
  )
26
 
27
+ # Load Hugging Face token from environment variable
28
+ HF_TOKEN = os.getenv("HF_TOKEN") # Set this in your environment, e.g., via export HF_TOKEN="your_token"
29
 
30
  # Model names
31
  BASE_MODEL_NAME = "google-bert/bert-base-uncased"
 
81
  def load_model(hf_token, model_type, selected_model):
82
  try:
83
  if not hf_token:
84
+ st.error("πŸ” Authentication required! Please set the HF_TOKEN environment variable.")
85
  return None
86
 
87
  login(token=hf_token)
 
164
  if "model" not in st.session_state or st.session_state.get("model_type") != model_type:
165
  model_data = load_model(HF_TOKEN, model_type, selected_model)
166
  if model_data is None:
167
+ st.error("Failed to load model. Please ensure HF_TOKEN is set correctly.")
168
  st.stop()
169
 
170
  st.session_state.model, st.session_state.tokenizer = model_data
 
178
  st.markdown(prompt)
179
  st.session_state.messages.append({"role": "user", "content": prompt})
180
 
181
+ # Process Rank
182
  file_context = process_file(uploaded_file)
183
 
184
  # Generate response with KV caching