MVPilgrim commited on
Commit
3db0d4d
·
verified ·
1 Parent(s): 2288815

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +2 -2
Dockerfile CHANGED
@@ -61,8 +61,8 @@ RUN pip3 install https://files.pythonhosted.org/packages/13/87/e0cb08c2d4bd7d38
61
  #RUN FORCE_CMAKE=1 CMAKE_SYSTEM_PROCESSOR=AMD64 pip3 install --break-system-packages --verbose --no-cache-dir llama-cpp-python
62
 
63
  #RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on -DCUDA_PATH=/usr/local/cuda-12.2 -DCUDAToolkit_ROOT=/usr/local/cuda-12.2 -DCUDAToolkit_INCLUDE_DIR=/usr/local/cuda-12/include -DCUDAToolkit_LIBRARY_DIR=/usr/local/cuda-12.2/lib64" FORCE_CMAKE=1 pip install llama-cpp-python - no-cache-dir
64
- RUN CMAKE_ARGS="GGML_CUDA=on" FORCE_CMAKE=1 pip install --break-system-packages llama-cpp-python --no-cache-dir
65
-
66
  RUN pip3 install --break-system-packages cffi
67
 
68
  # Install text2vec-transformers
 
61
  #RUN FORCE_CMAKE=1 CMAKE_SYSTEM_PROCESSOR=AMD64 pip3 install --break-system-packages --verbose --no-cache-dir llama-cpp-python
62
 
63
  #RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on -DCUDA_PATH=/usr/local/cuda-12.2 -DCUDAToolkit_ROOT=/usr/local/cuda-12.2 -DCUDAToolkit_INCLUDE_DIR=/usr/local/cuda-12/include -DCUDAToolkit_LIBRARY_DIR=/usr/local/cuda-12.2/lib64" FORCE_CMAKE=1 pip install llama-cpp-python - no-cache-dir
64
+ #RUN CMAKE_ARGS="GGML_CUDA=on" FORCE_CMAKE=1 pip install --break-system-packages llama-cpp-python --no-cache-dir
65
+ RUN CMAKE_ARGS="GGML_CUDA=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir
66
  RUN pip3 install --break-system-packages cffi
67
 
68
  # Install text2vec-transformers