MVPilgrim commited on
Commit
8d35f8b
·
1 Parent(s): 7fbac2c
Files changed (1) hide show
  1. Dockerfile +2 -1
Dockerfile CHANGED
@@ -12,6 +12,7 @@ RUN apt-get update && apt-get install -y \
12
  musl-dev \
13
  musl-tools \
14
  libffi-dev \
 
15
  && rm -rf /var/lib/apt/lists/*
16
 
17
  # Install Python 3.11
@@ -49,7 +50,7 @@ RUN pip3 install --break-system-packages https://files.pythonhosted.org/packages
49
  #RUN FORCE_CMAKE=1 CMAKE_SYSTEM_PROCESSOR=AMD64 pip3 install --break-system-packages --verbose --no-cache-dir llama-cpp-python
50
 
51
  #RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on -DCUDA_PATH=/usr/local/cuda-12.2 -DCUDAToolkit_ROOT=/usr/local/cuda-12.2 -DCUDAToolkit_INCLUDE_DIR=/usr/local/cuda-12/include -DCUDAToolkit_LIBRARY_DIR=/usr/local/cuda-12.2/lib64" FORCE_CMAKE=1 pip install llama-cpp-python - no-cache-dir
52
- RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install --break-system-packages llama-cpp-python --no-cache-dir
53
 
54
  RUN pip3 install --break-system-packages cffi
55
 
 
12
  musl-dev \
13
  musl-tools \
14
  libffi-dev \
15
+ git \
16
  && rm -rf /var/lib/apt/lists/*
17
 
18
  # Install Python 3.11
 
50
  #RUN FORCE_CMAKE=1 CMAKE_SYSTEM_PROCESSOR=AMD64 pip3 install --break-system-packages --verbose --no-cache-dir llama-cpp-python
51
 
52
  #RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on -DCUDA_PATH=/usr/local/cuda-12.2 -DCUDAToolkit_ROOT=/usr/local/cuda-12.2 -DCUDAToolkit_INCLUDE_DIR=/usr/local/cuda-12/include -DCUDAToolkit_LIBRARY_DIR=/usr/local/cuda-12.2/lib64" FORCE_CMAKE=1 pip install llama-cpp-python - no-cache-dir
53
+ RUN CMAKE_ARGS="-GGML_CUDA=on" FORCE_CMAKE=1 pip install --break-system-packages llama-cpp-python --no-cache-dir
54
 
55
  RUN pip3 install --break-system-packages cffi
56