roll-ai commited on
Commit
c94624b
·
verified ·
1 Parent(s): abb7d8e

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +37 -38
Dockerfile CHANGED
@@ -1,44 +1,43 @@
1
- FROM nvidia/cuda:12.1.1-cudnn8-runtime-ubuntu22.04
2
-
3
- ENV DEBIAN_FRONTEND=noninteractive \
4
- PYTHONDONTWRITEBYTECODE=1 \
5
- PYTHONUNBUFFERED=1 \
6
- PIP_NO_CACHE_DIR=1 \
7
- HF_HOME=/root/.cache/huggingface \
8
- TRANSFORMERS_CACHE=/root/.cache/huggingface/transformers \
9
- GRADIO_SERVER_NAME=0.0.0.0 \
10
- GRADIO_SERVER_PORT=8080 \
11
- DISPLAY=:99
12
-
13
- # System deps
14
- RUN apt-get update && apt-get install -y --no-install-recommends \
15
- python3 python3-pip python3-venv \
16
- git git-lfs \
17
- ffmpeg libgl1 libgl1-mesa-dri libglib2.0-0 xvfb \
18
- && git lfs install \
19
- && rm -rf /var/lib/apt/lists/*
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  WORKDIR /app
 
 
 
 
 
 
 
 
 
 
 
22
  COPY requirements.txt /app/requirements.txt
 
 
 
23
 
24
- # Torch first, then everything except deepspeed / flash-attn
25
- RUN python3 -m pip install --upgrade pip \
26
- && python3 -m pip install --extra-index-url https://download.pytorch.org/whl/cu121 torch torchvision torchaudio \
27
- && awk '!/^ *#/ && !/deepspeed/ && !/flash-attn/ && !/flash_attn/ {print $0}' requirements.txt > requirements.infer.txt \
28
- && python3 -m pip install -r requirements.infer.txt \
29
- && python3 -m pip install huggingface_hub imageio-ffmpeg
30
 
31
- # App code
32
- COPY . /app
 
 
 
 
33
 
34
- EXPOSE 8080
35
- ENV DEVICE=cuda \
36
- SAVE_FPS=16 \
37
- RESULT_DIR=/app/results \
38
- MODEL_META_PATH=demo/models.json \
39
- EXAMPLE_META_PATH=demo/examples.json \
40
- CAMERA_POSE_META_PATH=demo/camera_poses.json \
41
- DEPTH_MODEL_PATH=pretrained/Metric3D/metric_depth_vit_large_800k.pth \
42
- CAPTION_MODEL_PATH=pretrained/Qwen2.5-VL-7B-Instruct
43
-
44
- CMD ["bash", "-lc", "xvfb-run -s '-screen 0 1280x720x24' python3 gradio_app.py"]
 
1
+ FROM pytorch/pytorch:2.2.2-cuda12.1-cudnn8-runtime
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ SHELL ["/bin/bash", "-c"]
4
+
5
+ # Environment variables for Hugging Face cache
6
+ ENV HF_HOME=/app/hf_cache
7
+ ENV TRANSFORMERS_CACHE=/app/hf_cache
8
+ ENV HF_TOKEN=${HF_TOKEN}
9
+ ENV PATH=/opt/conda/bin:$PATH
10
+ # Install system dependencies
11
+ RUN apt-get update && apt-get install -y \
12
+ git wget curl unzip ffmpeg libgl1-mesa-glx libglib2.0-0 && \
13
+ apt-get clean
14
+
15
+ # Set up working directory as /app
16
  WORKDIR /app
17
+
18
+ # Copy project into /app
19
+ COPY . /app
20
+
21
+ # Fix permissions for all subdirectories
22
+ RUN mkdir -p /app/pretrained /app/hf_cache /.cache/gdown && \
23
+ chmod -R 777 /app && \
24
+ chmod -R 777 /.cache && \
25
+ chmod -R 777 /root
26
+
27
+ # Create conda environment and install dependencies
28
  COPY requirements.txt /app/requirements.txt
29
+ RUN conda create -n epic python=3.10 -y && \
30
+ conda run -n epic pip install --upgrade pip && \
31
+ conda run -n epic pip install -r /app/requirements.txt
32
 
33
+ RUN chmod -R 777 /app /workspace
 
 
 
 
 
34
 
35
+ # # List contents (for debug)
36
+ RUN ls -la /app
37
+ RUN pip install gradio
38
+
39
+ # Expose Gradio default port
40
+ EXPOSE 7860
41
 
42
+ # Start the Gradio app
43
+ CMD ["conda", "run", "--no-capture-output", "-n", "epic", "python", "gradio_batch.py"]