Spaces:
Paused
Paused
| # Base image | |
| FROM ghcr.io/ggerganov/llama.cpp:full-cuda | |
| ENV DEBIAN_FRONTEND=noninteractive | |
| # Update and install necessary dependencies | |
| RUN apt update && \ | |
| apt install --no-install-recommends -y \ | |
| build-essential \ | |
| python3 \ | |
| python3-pip \ | |
| wget \ | |
| curl \ | |
| git \ | |
| cmake \ | |
| zlib1g-dev \ | |
| libblas-dev && \ | |
| apt clean && \ | |
| rm -rf /var/lib/apt/lists/* | |
| # Setting up CUDA environment variables (this may not be necessary since you're using the official nvidia/cuda image, but it's good to be explicit) | |
| ENV PATH="/usr/local/cuda/bin:$PATH" \ | |
| LD_LIBRARY_PATH="/usr/local/cuda/lib64:$LD_LIBRARY_PATH" \ | |
| CUDA_HOME="/usr/local/cuda" | |
| WORKDIR /app | |
| # Download ggml and mmproj models from HuggingFace | |
| RUN wget https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/mistral-7b-q_5_k.gguf && \ | |
| wget https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/mmproj-mistral7b-f16.gguf | |
| # Clone and build llava-server with CUDA support | |
| RUN ls -al | |
| RUN make LLAMA_CUBLAS=1 | |
| # Expose the port | |
| EXPOSE 8080 | |
| # Start the llava-server with models | |
| CMD ["--server", "--model", "mistral-7b-q_5_k.gguf", "--mmproj", "mmproj-mistral7b-f16.gguf", "--threads", "6", "--host", "0.0.0.0", "-ngl", "31"] | |