Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
c625bd1
1
Parent(s):
7c1376e
Remove model preloading at startup to prevent Zero GPU initialization issues; models will now load and cache on first use.
Browse files
app.py
CHANGED
@@ -529,24 +529,12 @@ with tranception_design:
|
|
529 |
gr.Markdown("<p><b>Tranception: Protein Fitness Prediction with Autoregressive Transformers and Inference-time Retrieval</b><br>Pascal Notin, Mafalda Dias, Jonathan Frazer, Javier Marchena-Hurtado, Aidan N. Gomez, Debora S. Marks<sup>*</sup>, Yarin Gal<sup>*</sup><br><sup>* equal senior authorship</sup></p>")
|
530 |
gr.Markdown("Links: <a href='https://proceedings.mlr.press/v162/notin22a.html' target='_blank'>Paper</a> <a href='https://github.com/OATML-Markslab/Tranception' target='_blank'>Code</a> <a href='https://sites.google.com/view/proteingym/substitutions' target='_blank'>ProteinGym</a> <a href='https://igem.org/teams/5247' target='_blank'>BASIS-China iGEM Team</a>")
|
531 |
|
532 |
-
# Preload models function
|
533 |
-
def preload_models():
|
534 |
-
"""Preload models at startup to avoid downloading during inference"""
|
535 |
-
print("Preloading models at startup...")
|
536 |
-
try:
|
537 |
-
# Try to load Small model (fastest)
|
538 |
-
load_model_cached("Small")
|
539 |
-
print("Small model preloaded successfully")
|
540 |
-
except Exception as e:
|
541 |
-
print(f"Could not preload Small model: {e}")
|
542 |
-
|
543 |
-
# Optionally preload other models
|
544 |
-
# load_model_cached("Medium")
|
545 |
-
# load_model_cached("Large")
|
546 |
-
|
547 |
if __name__ == "__main__":
|
548 |
-
#
|
549 |
-
|
|
|
|
|
|
|
550 |
|
551 |
# Simple launch without queue to avoid Zero GPU conflicts
|
552 |
tranception_design.launch(
|
|
|
529 |
gr.Markdown("<p><b>Tranception: Protein Fitness Prediction with Autoregressive Transformers and Inference-time Retrieval</b><br>Pascal Notin, Mafalda Dias, Jonathan Frazer, Javier Marchena-Hurtado, Aidan N. Gomez, Debora S. Marks<sup>*</sup>, Yarin Gal<sup>*</sup><br><sup>* equal senior authorship</sup></p>")
|
530 |
gr.Markdown("Links: <a href='https://proceedings.mlr.press/v162/notin22a.html' target='_blank'>Paper</a> <a href='https://github.com/OATML-Markslab/Tranception' target='_blank'>Code</a> <a href='https://sites.google.com/view/proteingym/substitutions' target='_blank'>ProteinGym</a> <a href='https://igem.org/teams/5247' target='_blank'>BASIS-China iGEM Team</a>")
|
531 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
532 |
if __name__ == "__main__":
|
533 |
+
# Don't preload models at startup - this can cause Zero GPU initialization issues
|
534 |
+
# Models will be loaded and cached on first use
|
535 |
+
print("Starting Tranception app...")
|
536 |
+
print("Note: Models will be downloaded on first use")
|
537 |
+
print("Zero GPU spaces may sleep after ~15 minutes of inactivity")
|
538 |
|
539 |
# Simple launch without queue to avoid Zero GPU conflicts
|
540 |
tranception_design.launch(
|