Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -317,24 +317,32 @@ try:
|
|
317 |
import os
|
318 |
# Get HuggingFace token from environment/secrets
|
319 |
hf_token = os.getenv('HF_TOKEN')
|
320 |
-
if hf_token
|
321 |
-
os.environ['HF_TOKEN'] = hf_token
|
322 |
-
|
323 |
-
chatbot_model = Gemma3nForConditionalGeneration.from_pretrained(
|
324 |
-
"google/gemma-3n-e4b-it",
|
325 |
-
device_map="auto",
|
326 |
-
torch_dtype=torch.bfloat16,
|
327 |
-
use_auth_token=hf_token
|
328 |
-
).eval()
|
329 |
|
330 |
-
|
331 |
-
"
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
336 |
except Exception as e:
|
337 |
-
print(f"Error loading chatbot model: {e}")
|
|
|
|
|
338 |
chatbot_model = None
|
339 |
chatbot_processor = None
|
340 |
|
@@ -342,7 +350,7 @@ except Exception as e:
|
|
342 |
# Global state for managing tabs
|
343 |
processed_markdown = ""
|
344 |
show_results_tab = False
|
345 |
-
chatbot_model
|
346 |
|
347 |
|
348 |
def process_uploaded_pdf(pdf_file, progress=gr.Progress()):
|
@@ -437,10 +445,12 @@ with gr.Blocks(
|
|
437 |
with gr.Tabs() as main_tabs:
|
438 |
# Home Tab
|
439 |
with gr.TabItem("π Home", id="home"):
|
|
|
440 |
gr.Markdown(
|
441 |
"# Scholar Express\n"
|
442 |
"### Upload a research paper to get a web-friendly version, an AI chatbot, and a podcast summary. Because of our reliance on Generative AI, some errors are inevitable.\n"
|
443 |
-
f"**
|
|
|
444 |
)
|
445 |
|
446 |
with gr.Column(elem_classes="upload-container"):
|
|
|
317 |
import os
|
318 |
# Get HuggingFace token from environment/secrets
|
319 |
hf_token = os.getenv('HF_TOKEN')
|
320 |
+
print(f"HF_TOKEN found: {'Yes' if hf_token else 'No'}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
321 |
|
322 |
+
if hf_token:
|
323 |
+
print("Loading chatbot model with token...")
|
324 |
+
chatbot_model = Gemma3nForConditionalGeneration.from_pretrained(
|
325 |
+
"google/gemma-3n-e4b-it",
|
326 |
+
device_map="auto",
|
327 |
+
torch_dtype=torch.bfloat16,
|
328 |
+
token=hf_token # Use 'token' instead of 'use_auth_token'
|
329 |
+
).eval()
|
330 |
+
|
331 |
+
chatbot_processor = AutoProcessor.from_pretrained(
|
332 |
+
"google/gemma-3n-e4b-it",
|
333 |
+
token=hf_token # Use 'token' instead of 'use_auth_token'
|
334 |
+
)
|
335 |
+
|
336 |
+
print("β
Chatbot model loaded successfully")
|
337 |
+
else:
|
338 |
+
print("β No HF_TOKEN found in environment")
|
339 |
+
chatbot_model = None
|
340 |
+
chatbot_processor = None
|
341 |
+
|
342 |
except Exception as e:
|
343 |
+
print(f"β Error loading chatbot model: {e}")
|
344 |
+
import traceback
|
345 |
+
traceback.print_exc()
|
346 |
chatbot_model = None
|
347 |
chatbot_processor = None
|
348 |
|
|
|
350 |
# Global state for managing tabs
|
351 |
processed_markdown = ""
|
352 |
show_results_tab = False
|
353 |
+
# chatbot_model is initialized above
|
354 |
|
355 |
|
356 |
def process_uploaded_pdf(pdf_file, progress=gr.Progress()):
|
|
|
445 |
with gr.Tabs() as main_tabs:
|
446 |
# Home Tab
|
447 |
with gr.TabItem("π Home", id="home"):
|
448 |
+
chatbot_status = "β
Chatbot ready" if chatbot_model else "β Chatbot not loaded"
|
449 |
gr.Markdown(
|
450 |
"# Scholar Express\n"
|
451 |
"### Upload a research paper to get a web-friendly version, an AI chatbot, and a podcast summary. Because of our reliance on Generative AI, some errors are inevitable.\n"
|
452 |
+
f"**PDF Processing:** {model_status}\n"
|
453 |
+
f"**Chatbot:** {chatbot_status}"
|
454 |
)
|
455 |
|
456 |
with gr.Column(elem_classes="upload-container"):
|