Spaces:
Running
Running
# Base image with GPU support | |
FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04 | |
# Set env vars | |
ENV DEBIAN_FRONTEND=noninteractive | |
ENV PYTHONDONTWRITEBYTECODE=1 | |
ENV PYTHONUNBUFFERED=1 | |
# Install Python and other system dependencies | |
RUN apt-get update && apt-get install -y \ | |
python3 python3-pip ffmpeg curl git wget sudo \ | |
&& rm -rf /var/lib/apt/lists/* | |
# Ensure `python` points to `python3` | |
RUN ln -sf /usr/bin/python3 /usr/bin/python && pip install --upgrade pip | |
# Add a user to avoid running everything as root (best practice) | |
RUN useradd -ms /bin/bash ollama | |
USER ollama | |
WORKDIR /home/ollama | |
# Install Ollama as user | |
RUN curl -fsSL https://ollama.com/install.sh | sh | |
# Switch back to root to install Python packages globally | |
USER root | |
# Create working directory for your app | |
WORKDIR /app | |
# Copy project files | |
COPY . . | |
# Download SpeechBrain model interface if needed | |
RUN wget -O src/custome_interface.py https://huggingface.co/Jzuluaga/accent-id-commonaccent_xlsr-en-english/resolve/main/custom_interface.py | |
# Install Python dependencies | |
RUN pip install -r requirements.txt | |
# Expose Streamlit default port | |
EXPOSE 8501 | |
# Entrypoint: Start Ollama server, wait a bit, pull model, then launch Streamlit | |
CMD bash -c "\ | |
/home/ollama/.ollama/bin/ollama serve & \ | |
sleep 5 && \ | |
/home/ollama/.ollama/bin/ollama pull gemma3 && \ | |
streamlit run streamlit_app.py --server.port=8501 --server.address=0.0.0.0" | |