File size: 2,012 Bytes
54e8ea7
 
 
5ac2785
007bde3
30a2bfc
 
 
 
 
 
007bde3
5ac2785
54e8ea7
5ac2785
 
51ee4c8
 
 
5ac2785
51ee4c8
 
 
a699516
e5ca683
 
 
 
 
 
 
 
51ee4c8
 
 
e5ca683
 
 
 
 
5ac2785
30a2bfc
 
 
1cbd893
30a2bfc
1cbd893
30a2bfc
 
5ac2785
54e8ea7
5ac2785
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
FROM python:3.10-slim
WORKDIR /code
COPY . .

# Install C++ build tools first
# RUN apt-get update && apt-get install -y build-essential && apt-get clean

RUN apt-get update && \
    apt-get install -y build-essential libgl1-mesa-glx libglib2.0-0 && \
    apt-get clean


# Install all dependencies (including pillow)
RUN pip install --upgrade pip && pip install -r requirements.txt

# Set cache dir env vars (fixes Hugging Face Space permissions)
# ENV HF_HOME="/data"
# ENV TRANSFORMERS_CACHE="/data"
# RUN mkdir -p /data

# # Set DeepFace home directory (to avoid writing to /)
# ENV DEEPFACE_HOME="/data/.deepface"
# RUN mkdir -p /data/.deepface

# ENV HF_HOME="/data"
# ENV TRANSFORMERS_CACHE="/data"
# ENV DEEPFACE_HOME="/data"
# RUN mkdir -p /data && chmod -R 777 /data
# ENV INSIGHTFACE_HOME="/data/.insightface"
# RUN mkdir -p /data/.insightface && chmod -R 777 /data/.insightface


ENV HF_HOME="/data"
ENV TRANSFORMERS_CACHE="/data"
ENV DEEPFACE_HOME="/data"
ENV INSIGHTFACE_HOME="/data/.insightface"
RUN mkdir -p /data && \
    mkdir -p /data/.insightface && \
    chmod -R 777 /data

# (Optional, but speeds up runtime) Pre-download BLIP model weights
# RUN python -c "from transformers import BlipProcessor, BlipForConditionalGeneration; BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-base'); BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-base')"

# Pre-download DeepFace models
# RUN python -c "from deepface import DeepFace; DeepFace.analyze('https://raw.githubusercontent.com/serengil/deepface/master/tests/dataset/img1.jpg', actions=['age', 'gender', 'race', 'emotion'], enforce_detection=False)"
# Pre-download InsightFace models
# RUN python -c \"import insightface; import numpy as np; app = insightface.app.FaceAnalysis(name='buffalo_l', providers=['CPUExecutionProvider']); app.prepare(ctx_id=0); img = np.zeros((640, 640, 3), dtype=np.uint8); app.get(img)\"



EXPOSE 7860
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]