ngxson HF Staff commited on
Commit
6531fa2
·
verified ·
1 Parent(s): d44cf73

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +4 -30
Dockerfile CHANGED
@@ -1,37 +1,11 @@
1
- ARG UBUNTU_VERSION=22.04
2
- # This needs to generally match the container host's environment.
3
- ARG CUDA_VERSION=12.6.0
4
- # Target the CUDA build image
5
- ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
6
- # Target the CUDA runtime image
7
- ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
8
-
9
- FROM ${BASE_CUDA_DEV_CONTAINER} AS build
10
-
11
- # CUDA architecture to build for (defaults to all supported archs)
12
- ARG CUDA_DOCKER_ARCH=default
13
-
14
- RUN apt-get update && \
15
- apt-get install -y build-essential git cmake libcurl4-openssl-dev
16
-
17
- WORKDIR /app
18
-
19
- RUN git clone https://github.com/ggerganov/llama.cpp --depth 1 .
20
-
21
- # Use the default CUDA archs if not specified
22
- RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
23
- export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
24
- fi && \
25
- cmake -B build -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} . && \
26
- cmake --build build --config Release --target llama-server -j$(nproc)
27
-
28
-
29
-
30
  FROM node:22 AS runtime
31
 
32
  RUN npm i -g http-server
33
 
34
  WORKDIR /app
35
- COPY --from=build /app/build/bin/llama-server /app/llama-server
 
 
36
 
37
  CMD ["http-server", "/app", "-p", "7860", "-c-1"]
 
1
+ FROM ghcr.io/ggerganov/llama.cpp:server-cuda AS build
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  FROM node:22 AS runtime
3
 
4
  RUN npm i -g http-server
5
 
6
  WORKDIR /app
7
+ COPY --from=build /libggml.so /app/libggml.so
8
+ COPY --from=build /libllama.so /app/libllama.so
9
+ COPY --from=build /llama-server /app/llama-server
10
 
11
  CMD ["http-server", "/app", "-p", "7860", "-c-1"]