gguf-my-repo / start.sh
Oleg Shulyakov
Use llama.cpp image
99ea333
raw
history blame contribute delete
121 Bytes
#!/bin/bash
export GGML_CUDA=OFF
# enable CUDA
if [[ -z "${RUN_CUDA}" ]]; then
export GGML_CUDA=ON
fi
python3 app.py