Spaces:
Sleeping
Sleeping
Update start.sh
Browse files
start.sh
CHANGED
|
@@ -5,13 +5,17 @@ if [ ! -d "llama.cpp" ]; then
|
|
| 5 |
git clone https://github.com/ggerganov/llama.cpp
|
| 6 |
fi
|
| 7 |
|
|
|
|
| 8 |
if [[ -z "${RUN_LOCALLY}" ]]; then
|
| 9 |
# enable CUDA if NOT running locally
|
| 10 |
-
export
|
| 11 |
fi
|
| 12 |
|
| 13 |
cd llama.cpp
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
cd ..
|
| 17 |
python app.py
|
|
|
|
| 5 |
git clone https://github.com/ggerganov/llama.cpp
|
| 6 |
fi
|
| 7 |
|
| 8 |
+
export GGML_CUDA=OFF
|
| 9 |
if [[ -z "${RUN_LOCALLY}" ]]; then
|
| 10 |
# enable CUDA if NOT running locally
|
| 11 |
+
export GGML_CUDA=ON
|
| 12 |
fi
|
| 13 |
|
| 14 |
cd llama.cpp
|
| 15 |
+
cmake -B build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=${GGML_CUDA} -DLLAMA_CURL=ON
|
| 16 |
+
cmake --build build --config Release -j --target llama-quantize llama-gguf-split llama-imatrix
|
| 17 |
+
cp ./build/bin/llama-* .
|
| 18 |
+
rm -rf build
|
| 19 |
|
| 20 |
cd ..
|
| 21 |
python app.py
|