Spaces:
Paused
Paused
ARG UBUNTU_VERSION=22.04 | |
# This needs to generally match the container host's environment. | |
ARG CUDA_VERSION=12.6.0 | |
# Target the CUDA build image | |
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} | |
# Target the CUDA runtime image | |
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} | |
FROM ${BASE_CUDA_DEV_CONTAINER} AS build | |
# CUDA architecture to build for (defaults to all supported archs) | |
ARG CUDA_DOCKER_ARCH=default | |
RUN apt-get update && \ | |
apt-get install -y build-essential git cmake libcurl4-openssl-dev | |
WORKDIR /app | |
RUN git clone https://github.com/ggerganov/llama.cpp --depth 1 . | |
# Use the default CUDA archs if not specified | |
RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \ | |
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \ | |
fi && \ | |
cmake -B build -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} . && \ | |
cmake --build build --config Release --target llama-server -j$(nproc) | |
FROM node:22 AS runtime | |
RUN npm i -g http-server | |
WORKDIR /app | |
COPY --from=build /app/build/bin/llama-server /app/llama-server | |
CMD ["http-server", "/app", "-p", "7860", "-c-1"] | |