Spaces:
Running
Running
reduce models
Browse files
app.py
CHANGED
@@ -10,16 +10,6 @@ from transformers.utils.processor_visualizer_utils import ImageVisualizer
|
|
10 |
MODELS = [
|
11 |
"openai/clip-vit-base-patch32",
|
12 |
"HuggingFaceM4/Idefics3-8B-Llama3",
|
13 |
-
"llava-hf/llava-1.5-7b-hf",
|
14 |
-
"OpenGVLab/InternVL2-2B",
|
15 |
-
"OpenGVLab/InternVL3-8B-hf",
|
16 |
-
"Salesforce/blip-image-captioning-base",
|
17 |
-
"Salesforce/blip2-flan-t5-xl",
|
18 |
-
"Qwen/Qwen2-VL-2B-Instruct",
|
19 |
-
"Qwen/Qwen2.5-VL-3B-Instruct",
|
20 |
-
"meta-llama/Llama-3.2-11B-Vision",
|
21 |
-
"microsoft/Florence-2-base",
|
22 |
-
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K",
|
23 |
]
|
24 |
|
25 |
|
|
|
10 |
MODELS = [
|
11 |
"openai/clip-vit-base-patch32",
|
12 |
"HuggingFaceM4/Idefics3-8B-Llama3",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
]
|
14 |
|
15 |
|