Spaces:
Paused
Paused
| FROM python:3.10-slim | |
| # Install system dependencies and build tools | |
| RUN apt-get update && apt-get install -y \ | |
| ffmpeg \ | |
| git \ | |
| curl \ | |
| build-essential \ | |
| cmake \ | |
| libgomp1 \ | |
| && apt-get clean && rm -rf /var/lib/apt/lists/* | |
| # Set working directory | |
| WORKDIR /app | |
| # Copy application files | |
| COPY . /app | |
| # Install Python dependencies, forcing CPU-only PyTorch | |
| RUN pip install --upgrade pip \ | |
| && pip install --index-url https://download.pytorch.org/whl/cpu \ | |
| torch==2.1.0+cpu torchvision==0.16.0+cpu torchaudio==2.1.0+cpu \ | |
| && pip install -r requirements.txt \ | |
| && pip install huggingface_hub | |
| && pip install huggingface_hub | |
| # Download GGUF model at build time into models/ | |
| RUN mkdir -p models \ | |
| && python - <<EOF | |
| from huggingface_hub import hf_hub_download | |
| hf_hub_download( | |
| repo_id="TheBloke/Mistral-7B-Instruct-GPTQ", | |
| filename="mistral-7b-instruct-q4_k_m.gguf", | |
| cache_dir="models", | |
| library_name="llama-cpp-python" | |
| ) | |
| EOF | |
| # Expose port 7860 for Flask app | |
| EXPOSE 7860 | |
| # Start the Flask application | |
| CMD ["python", "app.py"] | |