Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -15,13 +15,13 @@ from PIL import Image
|
|
15 |
from transformers import AutoProcessor, AutoModelForImageTextToText, TextIteratorStreamer, Qwen2_5_VLForConditionalGeneration
|
16 |
from qwen_vl_utils import process_vision_info
|
17 |
|
18 |
-
import subprocess
|
19 |
-
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
20 |
|
21 |
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
22 |
"lingshu-medical-mllm/Lingshu-7B",
|
23 |
torch_dtype=torch.bfloat16,
|
24 |
-
attn_implementation="flash_attention_2",
|
25 |
device_map="auto",
|
26 |
)
|
27 |
|
|
|
15 |
from transformers import AutoProcessor, AutoModelForImageTextToText, TextIteratorStreamer, Qwen2_5_VLForConditionalGeneration
|
16 |
from qwen_vl_utils import process_vision_info
|
17 |
|
18 |
+
#import subprocess
|
19 |
+
#subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
20 |
|
21 |
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
22 |
"lingshu-medical-mllm/Lingshu-7B",
|
23 |
torch_dtype=torch.bfloat16,
|
24 |
+
#attn_implementation="flash_attention_2",
|
25 |
device_map="auto",
|
26 |
)
|
27 |
|