fix: update model loading logic
Browse files- app.py +17 -1
- requirements.txt +19 -4
app.py
CHANGED
@@ -1,5 +1,21 @@
|
|
1 |
import gradio as gr
|
2 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
# Initialize DeepSeek-VL model (CPU for free Spaces)
|
5 |
model = DeepSeekVL(model_path="deepseek-ai/deepseek-vl-7b", device="cpu")
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoProcessor, AutoModelForVision2Seq
|
3 |
+
import torch
|
4 |
+
|
5 |
+
class DeepSeekVL:
|
6 |
+
def __init__(self, model_path="deepseek-ai/deepseek-vl-7b", device="cpu"):
|
7 |
+
self.device = device
|
8 |
+
self.processor = AutoProcessor.from_pretrained(model_path)
|
9 |
+
self.model = AutoModelForVision2Seq.from_pretrained(
|
10 |
+
model_path,
|
11 |
+
torch_dtype=torch.float32
|
12 |
+
).to(device)
|
13 |
+
|
14 |
+
def generate(self, image, question, max_new_tokens=128):
|
15 |
+
inputs = self.processor(text=question, images=image, return_tensors="pt").to(self.device)
|
16 |
+
with torch.no_grad():
|
17 |
+
output_ids = self.model.generate(**inputs, max_new_tokens=max_new_tokens)
|
18 |
+
return self.processor.batch_decode(output_ids, skip_special_tokens=True)[0]
|
19 |
|
20 |
# Initialize DeepSeek-VL model (CPU for free Spaces)
|
21 |
model = DeepSeekVL(model_path="deepseek-ai/deepseek-vl-7b", device="cpu")
|
requirements.txt
CHANGED
@@ -1,4 +1,19 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch==2.0.1
|
2 |
+
transformers>=4.38.2
|
3 |
+
timm>=0.9.16
|
4 |
+
accelerate
|
5 |
+
sentencepiece
|
6 |
+
attrdict
|
7 |
+
einops
|
8 |
+
|
9 |
+
# for gradio demo
|
10 |
+
gradio==3.48.0
|
11 |
+
gradio-client==0.6.1
|
12 |
+
mdtex2html==1.3.0
|
13 |
+
pypinyin==0.50.0
|
14 |
+
tiktoken==0.5.2
|
15 |
+
tqdm==4.64.0
|
16 |
+
colorama==0.4.5
|
17 |
+
Pygments==2.12.0
|
18 |
+
markdown==3.4.1
|
19 |
+
SentencePiece==0.1.96
|