File size: 949 Bytes
898d83e
264681b
d487566
264681b
9408e2e
3be7e28
3851b2c
05fb68f
e0c0fb6
aa7ea36
 
db4e8bf
05fb68f
db4e8bf
aa7ea36
 
 
 
db4e8bf
946e50b
80cc641
d4b7fb9
00e5fa1
 
80cc641
 
3851b2c
 
 
 
11f5c93
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
#!/bin/bash

mkdir -p data

#HF_MODEL_PATH=Qwen/Qwen2.5-1.5B-Instruct
#HF_MODEL_PATH=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
#HF_MODEL_PATH=Qwen/Qwen2.5-VL-3B-Instruct
HF_MODEL_PATH=TEN-framework/TEN_Turn_Detection
HF_MODEL_NAME=$(basename ${HF_MODEL_PATH})
LOCAL_MODEL_PATH=./data/${HF_MODEL_NAME}

# TODO: use your own key and put into secret
VLLM_SERVE_API_KEY=TEN_Turn_Detection

# download model
HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download ${HF_MODEL_PATH} --local-dir ${LOCAL_MODEL_PATH}

# start vllm server
vllm serve ${LOCAL_MODEL_PATH} --served-model-name ${HF_MODEL_NAME} --api-key ${VLLM_SERVE_API_KEY} &

# start frontend
export NEXT_PUBLIC_EDIT_GRAPH_MODE=false
#cd /app/playground && npm run dev &
cd /app/demo && npm run dev &

# start backend
#export OPENAI_API_BASE=http://127.0.0.1:8000/v1
#export OPENAI_PROXY_URL=
#export OPENAI_MODEL=${HF_MODEL_NAME}
#export OPENAI_API_KEY=${VLLM_SERVE_API_KEY}
cd /app && task run