Spaces:
Running
Running
Update start.sh
Browse files
start.sh
CHANGED
@@ -1,18 +1,35 @@
|
|
1 |
#!/bin/bash
|
2 |
|
3 |
-
|
4 |
export WORK="/home/user/app"
|
5 |
-
cd $WORK
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
wget -O model.gguf https://huggingface.co/unsloth/gpt-oss-20b-GGUF/resolve/main/gpt-oss-20b-Q4_K_M.gguf > /dev/null 2>&1
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
#python3 -m http.server 7860 --bind 0.0.0.0
|
|
|
1 |
#!/bin/bash
|
2 |
|
3 |
+
# 設定工作目錄
|
4 |
export WORK="/home/user/app"
|
5 |
+
cd "$WORK" || exit 1
|
6 |
+
|
7 |
+
# 建立 Python 虛擬環境
|
8 |
+
python3 -m venv venv
|
9 |
+
|
10 |
+
# 啟動虛擬環境
|
11 |
+
source venv/bin/activate
|
12 |
+
|
13 |
+
echo "Unzip and download model..."
|
14 |
+
unzip llama_cpp_avx512.zip > /dev/null 2>&1
|
15 |
wget -O model.gguf https://huggingface.co/unsloth/gpt-oss-20b-GGUF/resolve/main/gpt-oss-20b-Q4_K_M.gguf > /dev/null 2>&1
|
16 |
+
|
17 |
+
echo "Start llama.cpp server..."
|
18 |
+
./llama-server -m model.gguf \
|
19 |
+
--port 8000 \
|
20 |
+
--host 0.0.0.0 \
|
21 |
+
--threads 2 \
|
22 |
+
--ctx-size 4096 \
|
23 |
+
--mlock \
|
24 |
+
--jinja \
|
25 |
+
--temp 0.2 \
|
26 |
+
--top-p 0.85 &
|
27 |
+
|
28 |
+
# 安裝 Python 套件到虛擬環境
|
29 |
+
pip install --upgrade pip
|
30 |
+
pip install gradio openai
|
31 |
+
|
32 |
+
echo "Start app.py..."
|
33 |
+
python app.py
|
34 |
|
35 |
#python3 -m http.server 7860 --bind 0.0.0.0
|