nanoVLM-inference / Dockerfile
vidhanm
Add application files for nanoVLM
4670dfa
raw
history blame
1.03 kB
# Use a slim Python base image. For GPU, you'd need a CUDA-enabled base.
FROM python:3.9-slim
# Set the working directory in the container
WORKDIR /app
# Install git (useful for some Hugging Face model/tokenizer downloads that might use it)
# Also install common build tools often needed for Python packages
RUN apt-get update && apt-get install -y \
git \
build-essential \
&& rm -rf /var/lib/apt/lists/*
# Copy the requirements file first to leverage Docker layer caching
COPY requirements.txt requirements.txt
# Install Python dependencies
# --no-cache-dir reduces image size
# --prefer-binary can speed up builds for packages with binary distributions
RUN pip install --no-cache-dir --prefer-binary -r requirements.txt
# Copy the application code into the container
COPY app.py app.py
# Expose the port Gradio will run on (default is 7860)
EXPOSE 7860
# Set the default command to run the Gradio application
# Using `python -u` for unbuffered output, which is good for logging
CMD ["python", "-u", "app.py"]