Upload 4 files
Browse files- .gitattributes +1 -0
- Dockerfile +13 -0
- Llama-3.2-1B-Instruct-Q4_K_M.gguf +3 -0
- requirements.txt +8 -0
- server.py +43 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
Llama-3.2-1B-Instruct-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11
|
2 |
+
|
3 |
+
WORKDIR /app
|
4 |
+
RUN mkdir -p /app/hf_cache
|
5 |
+
RUN chmod -R 777 /app/hf_cache
|
6 |
+
ENV HF_HOME=/app/hf_cache
|
7 |
+
COPY ./requirements.txt .
|
8 |
+
|
9 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
10 |
+
|
11 |
+
COPY . .
|
12 |
+
EXPOSE 7860
|
13 |
+
CMD ["python", "server.py"]
|
Llama-3.2-1B-Instruct-Q4_K_M.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f7ede42862ceca07ad1c88a97b67520019c4ac7e5ced250d2e696fa62ab189af
|
3 |
+
size 807690688
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
litserve
|
2 |
+
--extra-index-url https://download.pytorch.org/whl/cpu
|
3 |
+
transformers==4.38.2
|
4 |
+
accelerate
|
5 |
+
bitsandbytes
|
6 |
+
optimum
|
7 |
+
llama-cpp-python
|
8 |
+
uvloop
|
server.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import litserve as ls
|
3 |
+
from typing import List, Dict, Any
|
4 |
+
from llama_cpp import Llama
|
5 |
+
from fastapi import Depends, HTTPException
|
6 |
+
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
7 |
+
|
8 |
+
|
9 |
+
class SimpleLitAPI(ls.LitAPI):
|
10 |
+
def setup(self, device):
|
11 |
+
self.llm = Llama(
|
12 |
+
model_path="Llama-3.2-1B-Instruct-Q4_K_M.gguf",
|
13 |
+
verbose=False
|
14 |
+
)
|
15 |
+
def decode_request(self, request):
|
16 |
+
return request
|
17 |
+
|
18 |
+
def predict(self, input):
|
19 |
+
temperature = input.get("temperature", 1.0)
|
20 |
+
max_tokens = input.get("max_tokens", 100)
|
21 |
+
top_k = input.get("top_k", 50)
|
22 |
+
top_p = input.get("top_p", 0.9)
|
23 |
+
repeat_penalty = input.get("repeat_penalty", 1.0)
|
24 |
+
return self.llm.create_chat_completion(
|
25 |
+
messages=input["messages"],
|
26 |
+
temperature=temperature,
|
27 |
+
max_tokens=max_tokens,
|
28 |
+
top_k=top_k,
|
29 |
+
top_p=top_p,
|
30 |
+
repeat_penalty=repeat_penalty,
|
31 |
+
)
|
32 |
+
|
33 |
+
def encode_response(self, output):
|
34 |
+
return {"output": output}
|
35 |
+
|
36 |
+
def authorize(self, auth: HTTPAuthorizationCredentials = Depends(HTTPBearer())):
|
37 |
+
if auth.scheme != "Bearer" or auth.credentials != "1234":
|
38 |
+
raise HTTPException(status_code=401, detail="Bad token")
|
39 |
+
|
40 |
+
if __name__ == "__main__":
|
41 |
+
api = SimpleLitAPI()
|
42 |
+
server = ls.LitServer(api, accelerator="cpu", devices=2, workers_per_device=2)
|
43 |
+
server.run(port=7860)
|