LiamKhoaLe commited on
Commit
c7408dc
·
1 Parent(s): c93d8f0

2 layers for model loading

Browse files
Files changed (2) hide show
  1. Dockerfile +4 -1
  2. app.py +9 -18
Dockerfile CHANGED
@@ -1,6 +1,6 @@
1
  FROM python:3.11
2
 
3
- # Create and use a non-root user
4
  RUN useradd -m -u 1000 user
5
  USER user
6
  ENV PATH="/home/user/.local/bin:$PATH"
@@ -26,6 +26,9 @@ RUN mkdir -p $SENTENCE_TRANSFORMERS_HOME && \
26
  RUN python -c "from huggingface_hub import snapshot_download; \
27
  snapshot_download(repo_id='sentence-transformers/all-MiniLM-L6-v2', cache_dir='/app/model_cache')"
28
 
 
 
 
29
  # Ensure ownership and permissions remain intact
30
  RUN chown -R user:user /app/model_cache
31
 
 
1
  FROM python:3.11
2
 
3
+ # Create and use a non-root user (optional)
4
  RUN useradd -m -u 1000 user
5
  USER user
6
  ENV PATH="/home/user/.local/bin:$PATH"
 
26
  RUN python -c "from huggingface_hub import snapshot_download; \
27
  snapshot_download(repo_id='sentence-transformers/all-MiniLM-L6-v2', cache_dir='/app/model_cache')"
28
 
29
+ # Ensure the model files are available at runtime (list out)
30
+ RUN ls -l /app/model_cache && cat /app/model_cache/config.json
31
+
32
  # Ensure ownership and permissions remain intact
33
  RUN chown -R user:user /app/model_cache
34
 
app.py CHANGED
@@ -60,10 +60,10 @@ os.environ["TOKENIZERS_PARALLELISM"] = "false"
60
  # os.environ["HF_HOME"] = huggingface_cache_dir # Use this folder for HF cache
61
  # 2b) Setup Hugging Face Cloud project model cache
62
  hf_cache_dir = "/home/user/.cache/huggingface"
63
- os.environ["HF_HOME"] = hf_cache_dir
64
- os.environ["SENTENCE_TRANSFORMERS_HOME"] = hf_cache_dir
65
  # Model storage location
66
  model_cache_dir = "/app/model_cache"
 
 
67
  # 3. Download (or load from cache) the SentenceTransformer model
68
  from huggingface_hub import snapshot_download
69
  print("⏳ Checking or downloading the all-MiniLM-L6-v2 model from huggingface_hub...")
@@ -74,22 +74,13 @@ if os.path.exists(model_cache_dir) and os.path.exists(os.path.join(model_cache_d
74
  model_loc = model_cache_dir
75
  # Else, try loading backup from snapshot_download
76
  else:
77
- try:
78
- model_loc = snapshot_download(
79
- repo_id="sentence-transformers/all-MiniLM-L6-v2",
80
- cache_dir=hf_cache_dir,
81
- local_files_only=True # ✅ Ensure it's loaded from cache
82
- )
83
- print(f"✅ Model loaded from local cache: {model_loc}")
84
- except Exception as e:
85
- print(f"❌ Error loading model from cache: {e}")
86
- print("⚠️ Retrying with online download enabled...")
87
- model_loc = snapshot_download(
88
- repo_id="sentence-transformers/all-MiniLM-L6-v2",
89
- cache_dir=hf_cache_dir,
90
- local_files_only=False # ✅ Fallback to online download
91
- )
92
- print(f"✅ Model directory after retry: {model_loc}")
93
 
94
  from sentence_transformers import SentenceTransformer
95
  print("📥 **Loading Embedding Model...**")
 
60
  # os.environ["HF_HOME"] = huggingface_cache_dir # Use this folder for HF cache
61
  # 2b) Setup Hugging Face Cloud project model cache
62
  hf_cache_dir = "/home/user/.cache/huggingface"
 
 
63
  # Model storage location
64
  model_cache_dir = "/app/model_cache"
65
+ os.environ["HF_HOME"] = hf_cache_dir
66
+ os.environ["SENTENCE_TRANSFORMERS_HOME"] = hf_cache_dir
67
  # 3. Download (or load from cache) the SentenceTransformer model
68
  from huggingface_hub import snapshot_download
69
  print("⏳ Checking or downloading the all-MiniLM-L6-v2 model from huggingface_hub...")
 
74
  model_loc = model_cache_dir
75
  # Else, try loading backup from snapshot_download
76
  else:
77
+ print(f"❌ Model not found in {model_cache_dir}. This should not happen!")
78
+ print("⚠️ Retrying with snapshot_download...")
79
+ model_loc = snapshot_download(
80
+ repo_id="sentence-transformers/all-MiniLM-L6-v2",
81
+ cache_dir=hf_cache_dir,
82
+ local_files_only=True # Change to `False` for fallback to online download
83
+ )
 
 
 
 
 
 
 
 
 
84
 
85
  from sentence_transformers import SentenceTransformer
86
  print("📥 **Loading Embedding Model...**")