FROM python:3.10-slim WORKDIR /code COPY . . # Install C++ build tools first # RUN apt-get update && apt-get install -y build-essential && apt-get clean RUN apt-get update && \ apt-get install -y build-essential libgl1-mesa-glx libglib2.0-0 && \ apt-get clean # Install all dependencies (including pillow) RUN pip install --upgrade pip && pip install -r requirements.txt # Set cache dir env vars (fixes Hugging Face Space permissions) # ENV HF_HOME="/data" # ENV TRANSFORMERS_CACHE="/data" # RUN mkdir -p /data # # Set DeepFace home directory (to avoid writing to /) # ENV DEEPFACE_HOME="/data/.deepface" # RUN mkdir -p /data/.deepface # ENV HF_HOME="/data" # ENV TRANSFORMERS_CACHE="/data" # ENV DEEPFACE_HOME="/data" # RUN mkdir -p /data && chmod -R 777 /data # ENV INSIGHTFACE_HOME="/data/.insightface" # RUN mkdir -p /data/.insightface && chmod -R 777 /data/.insightface ENV HF_HOME="/data" ENV TRANSFORMERS_CACHE="/data" ENV DEEPFACE_HOME="/data" ENV INSIGHTFACE_HOME="/data/.insightface" RUN mkdir -p /data && \ mkdir -p /data/.insightface && \ chmod -R 777 /data # (Optional, but speeds up runtime) Pre-download BLIP model weights # RUN python -c "from transformers import BlipProcessor, BlipForConditionalGeneration; BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-base'); BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-base')" # Pre-download DeepFace models # RUN python -c "from deepface import DeepFace; DeepFace.analyze('https://raw.githubusercontent.com/serengil/deepface/master/tests/dataset/img1.jpg', actions=['age', 'gender', 'race', 'emotion'], enforce_detection=False)" # Pre-download InsightFace models # RUN python -c \"import insightface; import numpy as np; app = insightface.app.FaceAnalysis(name='buffalo_l', providers=['CPUExecutionProvider']); app.prepare(ctx_id=0); img = np.zeros((640, 640, 3), dtype=np.uint8); app.get(img)\" EXPOSE 7860 CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]