oberbics commited on
Commit
bf785f5
·
verified ·
1 Parent(s): 98d16bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -31
app.py CHANGED
@@ -84,38 +84,7 @@ class SafeGeocoder:
84
 
85
 
86
  # Corrected model loading function based on official usage example
87
- def load_model():
88
- global tokenizer, model
89
- try:
90
- if model is None:
91
- # Only load the tokenizer first (no CUDA initialization)
92
- try:
93
- from modelscope import AutoTokenizer as MSAutoTokenizer
94
- tokenizer = MSAutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
95
- print("Loaded tokenizer using modelscope AutoTokenizer")
96
- except:
97
- # Fall back to regular tokenizer
98
- tokenizer = AutoTokenizer.from_pretrained(
99
- MODEL_NAME,
100
- trust_remote_code=True,
101
- revision="main"
102
- )
103
- print("Loaded tokenizer using standard AutoTokenizer")
104
-
105
- # For the model, we'll only create a loading configuration but not actually load it yet
106
- # This avoids CUDA initialization in the main process
107
- print(f"Tokenizer successfully loaded, model will be loaded when needed")
108
- return "✅ Tokenizer erfolgreich geladen. Model wird bei Bedarf geladen."
109
-
110
- except Exception as e:
111
- import traceback
112
- trace = traceback.format_exc()
113
- print(f"Error loading tokenizer: {e}\n{trace}")
114
- return f"❌ Fehler beim Laden des Tokenizers: {str(e)}"
115
 
116
- # Then, modify your extract_info function to load the model on first use
117
- @spaces.GPU
118
- @spaces.GPU
119
  def extract_info(template, text):
120
  global tokenizer, model
121
 
 
84
 
85
 
86
  # Corrected model loading function based on official usage example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
 
 
 
88
  def extract_info(template, text):
89
  global tokenizer, model
90