cella110n commited on
Commit
b922bca
·
verified ·
1 Parent(s): 9c27523

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -2
app.py CHANGED
@@ -286,8 +286,8 @@ def visualize_predictions(image: Image.Image, predictions, threshold=0.45):
286
 
287
  # 定数
288
  REPO_ID = "cella110n/cl_tagger"
289
- MODEL_FILENAME = "cl_eva02_tagger_v1_250426/model_optimized.onnx"
290
- # MODEL_FILENAME = "cl_eva02_tagger_v1_250426/model.onnx" # Use non-optimized if needed
291
  TAG_MAPPING_FILENAME = "cl_eva02_tagger_v1_250426/tag_mapping.json"
292
  CACHE_DIR = "./model_cache"
293
 
@@ -323,6 +323,35 @@ def initialize_model():
323
  if onnx_session is None:
324
  model_path, tag_mapping_path = download_model_files()
325
  print("Loading model and labels...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  # ONNXセッションの初期化 (GPU優先)
327
  available_providers = ort.get_available_providers()
328
  print(f"Available ONNX Runtime providers: {available_providers}")
 
286
 
287
  # 定数
288
  REPO_ID = "cella110n/cl_tagger"
289
+ # MODEL_FILENAME = "cl_eva02_tagger_v1_250426/model_optimized.onnx"
290
+ MODEL_FILENAME = "cl_eva02_tagger_v1_250426/model.onnx" # Use non-optimized if needed
291
  TAG_MAPPING_FILENAME = "cl_eva02_tagger_v1_250426/tag_mapping.json"
292
  CACHE_DIR = "./model_cache"
293
 
 
323
  if onnx_session is None:
324
  model_path, tag_mapping_path = download_model_files()
325
  print("Loading model and labels...")
326
+
327
+ # --- Added Logging ---
328
+ print("--- Environment Check ---")
329
+ try:
330
+ import torch
331
+ print(f"PyTorch version: {torch.__version__}")
332
+ if torch.cuda.is_available():
333
+ print(f"PyTorch CUDA available: True")
334
+ print(f"PyTorch CUDA version: {torch.version.cuda}")
335
+ print(f"Detected GPU: {torch.cuda.get_device_name(0)}")
336
+ if torch.backends.cudnn.is_available():
337
+ print(f"PyTorch cuDNN available: True")
338
+ print(f"PyTorch cuDNN version: {torch.backends.cudnn.version()}")
339
+ else:
340
+ print("PyTorch cuDNN available: False")
341
+ else:
342
+ print("PyTorch CUDA available: False")
343
+ except ImportError:
344
+ print("PyTorch not found.")
345
+ except Exception as e:
346
+ print(f"Error during PyTorch check: {e}")
347
+
348
+ try:
349
+ print(f"ONNX Runtime build info: {ort.get_buildinfo()}")
350
+ except Exception as e:
351
+ print(f"Error getting ONNX Runtime build info: {e}")
352
+ print("-------------------------")
353
+ # --- End Added Logging ---
354
+
355
  # ONNXセッションの初期化 (GPU優先)
356
  available_providers = ort.get_available_providers()
357
  print(f"Available ONNX Runtime providers: {available_providers}")