gen3c / Dockerfile
elungky's picture
Explicitly accept conda TOS in Dockerfile
ca59c13
raw
history blame
3.61 kB
# Start from a clean NVIDIA CUDA base image.
# This provides the necessary CUDA runtime and development tools.
# Using 12.4.0-devel-ubuntu22.04 to align with the CUDA version specified in your cosmos-predict1.yaml.
FROM nvidia/cuda:12.4.0-devel-ubuntu22.04
# Set environment variables for non-interactive installations to prevent prompts during apt-get.
ENV DEBIAN_FRONTEND=noninteractive
# Define the base directory for Conda installation.
ENV CONDA_DIR=/opt/conda
# Add Conda's binary directory to the system's PATH.
ENV PATH=$CONDA_DIR/bin:$PATH
# Set the working directory inside the container. All subsequent commands will run from here.
WORKDIR /app
# Install essential system dependencies required for Miniconda and general build processes.
# This includes wget for downloading, git for cloning (if needed), build-essential for compiling,
# and libgl1-mesa-glx for graphics-related libraries often used by ML frameworks.
RUN apt-get update && apt-get install -y --no-install-recommends \
wget \
git \
build-essential \
libgl1-mesa-glx \
# Clean up apt cache to reduce image size
&& rm -rf /var/lib/apt/lists/*
# Install Miniconda
RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh && \
/bin/bash miniconda.sh -b -p $CONDA_DIR && \
rm miniconda.sh && \
conda clean --all --yes && \
conda config --set auto_activate_base false && \
conda config --add channels conda-forge
# --- NEW: Accept Conda Terms of Service for default channels ---
# This is required for non-interactive environments like Docker builds.
# We source conda.sh first to make the 'conda' command available in this RUN layer.
RUN source $CONDA_DIR/etc/profile.d/conda.sh && \
conda tos accept --override-channels --channel https://repo.anaconda.com/pkgs/main && \
conda tos accept --override-channels --channel https://repo.anaconda.com/pkgs/r
# --- END NEW ---
# Copy all local project files into the container's working directory (/app).
# This includes your cosmos-predict1.yaml, gui/requirements.txt, start.sh, etc.
COPY . /app
# Create the Conda environment named 'cosmos-predict1' using the provided YAML file.
# This step will install all specified Python, PyTorch, CUDA, and pip dependencies.
RUN conda env create -f cosmos-predict1.yaml
# Set the default Conda environment to be activated.
ENV CONDA_DEFAULT_ENV=cosmos-predict1
# Add the newly created Conda environment's binary directory to the PATH.
# This ensures that executables (like python, pip, uvicorn) from this environment are found.
ENV PATH=$CONDA_DIR/envs/cosmos-predict1/bin:$PATH
# --- Verification Steps (Optional, but highly recommended for debugging) ---
# These commands help confirm that Python, Conda, and PyTorch are set up correctly.
RUN echo "Verifying Python and Conda installations..."
RUN python --version
RUN conda env list
RUN echo "Verifying PyTorch and CUDA availability..."
# Use 'conda run' to explicitly run the command within the 'cosmos-predict1' environment
RUN conda run -n cosmos-predict1 python -c "import torch; print(f'PyTorch Version: {torch.__version__}'); print(f'CUDA Available: {torch.cuda.is_available()}'); print(f'CUDA Device Name: {torch.cuda.get_device_name(0) if torch.cuda.is_available() else 'N/A'}')" || echo "PyTorch verification failed. Check dependencies in cosmos-predict1.yaml."
# --- End Verification Steps ---
# Make the start.sh script executable.
RUN chmod +x /app/start.sh
# Set the default command to run when the container starts.
# This will execute your start.sh script.
CMD ["/app/start.sh"]