Spaces:
Runtime error
Runtime error
# Frontend build stage | |
FROM node:18-alpine AS frontend-build | |
WORKDIR /app/frontend | |
COPY frontend/package.json frontend/package-lock.json* ./ | |
RUN npm install | |
COPY frontend/ ./ | |
RUN npm run build | |
# Dockerfile to support Translations API Build - works locally and on Hugging Face Spaces | |
FROM nvidia/cuda:12.4.0-runtime-ubuntu22.04 as base | |
ENV PYTHON_VERSION=3.10 \ | |
PYTHON_VERSION_SHORT=310 | |
RUN apt-get update && apt-get upgrade -y | |
RUN apt-get install -y \ | |
build-essential \ | |
wget \ | |
python${PYTHON_VERSION} \ | |
python3-pip \ | |
libpq-dev | |
#Constants | |
ENV PYTHONUNBUFFERED TRUE | |
ARG DEBIAN_FRONTEND=noninteractive | |
# Set up user with UID 1000 for HF Spaces compatibility | |
RUN useradd -m -u 1000 user | |
# Install base utilities and linux packages | |
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked --mount=type=cache,target=/var/lib/apt \ | |
apt-get update && \ | |
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ | |
fakeroot \ | |
ca-certificates \ | |
curl \ | |
vim \ | |
ssh \ | |
wget \ | |
gcc \ | |
git && \ | |
apt-get clean && \ | |
rm -rf /var/lib/apt/lists/* | |
# Install miniconda | |
ENV CONDA_DIR /opt/conda | |
# Put conda in path and install | |
ENV PATH=$CONDA_DIR/bin:$PATH | |
RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh \ | |
&& /bin/bash ~/miniconda.sh -b -p /opt/conda | |
RUN conda config --set auto_activate_base false && \ | |
conda config --set channel_priority flexible && \ | |
mkdir -p ~/.conda && \ | |
echo "channel_priority: flexible" > ~/.condarc && \ | |
conda config --add channels conda-forge && \ | |
conda config --set remote_max_retries 5 && \ | |
conda config --set remote_connect_timeout_secs 30 && \ | |
conda config --set remote_read_timeout_secs 30 && \ | |
conda config --set show_channel_urls True && \ | |
conda config --set auto_update_conda False && \ | |
conda config --set notify_outdated_conda False && \ | |
conda config --set report_errors False && \ | |
conda config --set always_yes True && \ | |
conda tos accept --override-channels --channel https://repo.anaconda.com/pkgs/main && \ | |
conda tos accept --override-channels --channel https://repo.anaconda.com/pkgs/r && \ | |
conda clean -afy | |
RUN --mount=type=cache,target=/opt/conda/pkgs \ | |
conda config --set channel_priority false && \ | |
conda create -n translations-api python=${PYTHON_VERSION} -y && \ | |
conda install -n translations-api -c conda-forge libsndfile=1.0.31 -y | |
# Enable conda | |
SHELL ["conda", "run", "-n", "translations-api", "/bin/bash", "-c"] | |
# Set up working directory and environment for user | |
ENV HOME=/home/user \ | |
PATH=/home/user/.local/bin:$PATH | |
WORKDIR $HOME/app | |
# Copy requirements.txt before installing dependencies | |
COPY --chown=user requirements.txt ./ | |
# Install Python dependencies | |
RUN --mount=type=cache,target=/opt/conda/pkgs --mount=type=cache,target=/root/.cache/pip \ | |
pip install -r requirements.txt | |
# Install fairseq2 with specific index URL - GPU version with CUDA 12.4 | |
RUN --mount=type=cache,target=/opt/conda/pkgs --mount=type=cache,target=/root/.cache/pip \ | |
pip install torch==2.6.0 --extra-index-url https://download.pytorch.org/whl/cu124 --no-deps && \ | |
pip install torchaudio==2.6.0 --extra-index-url https://download.pytorch.org/whl/cu124 --no-deps && \ | |
pip install fairseq2 --extra-index-url https://fair.pkg.atmeta.com/fairseq2/whl/pt2.6.0/cu124 | |
# Install debugpy for development debugging | |
RUN --mount=type=cache,target=/opt/conda/pkgs --mount=type=cache,target=/root/.cache/pip \ | |
pip install debugpy | |
# Copy server code into the image with proper ownership | |
COPY --chown=user ./server $HOME/app/server | |
# Copy frontend build from the frontend-build stage | |
COPY --from=frontend-build --chown=user /app/frontend/dist $HOME/app/frontend/dist | |
# Make scripts executable and create directories with proper ownership | |
RUN chmod +x $HOME/app/server/run.sh $HOME/app/server/download_models.sh && \ | |
mkdir -p $HOME/app/models && \ | |
chown -R user:user $HOME/app && \ | |
chmod -R 755 $HOME/app | |
# Switch to user for runtime | |
USER user | |
# Create /data/models if possible (for HF Spaces) | |
RUN mkdir -p /data/models 2>/dev/null || true | |
# Set working directory to server | |
WORKDIR $HOME/app/server | |
# Expose port 7860 for HF Spaces (also works locally) | |
EXPOSE 7860 | |
# For production: pre-download models into the image (optional) | |
# Uncomment the following lines if you want models baked into the production image | |
# RUN mkdir -p $HOME/app/models | |
# RUN cd $HOME/app/models && \ | |
# wget -O ctc_alignment_mling_uroman_model_dict.txt https://dl.fbaipublicfiles.com/mms/torchaudio/ctc_alignment_mling_uroman/dictionary.txt && \ | |
# wget -O ctc_alignment_mling_uroman_model.pt https://dl.fbaipublicfiles.com/mms/torchaudio/ctc_alignment_mling_uroman/model.pt && \ | |
# wget https://dl.fbaipublicfiles.com/mms/mms_1143_langs_tokenizer_spm.model && \ | |
# wget https://dl.fbaipublicfiles.com/mms/mms_XRI.pt | |
# Default command - works for both local and HF Spaces | |
CMD ["conda", "run", "--no-capture-output", "-n", "translations-api", "./run.sh"] | |