Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -55,12 +55,16 @@ model_o = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
|
55 |
MODEL_ID_O, trust_remote_code=True,
|
56 |
torch_dtype=torch.float16).to(device).eval()
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
61 |
model_w = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
62 |
MODEL_ID_W, trust_remote_code=True,
|
|
|
63 |
torch_dtype=torch.float16).to(device).eval()
|
|
|
64 |
|
65 |
# Function to downsample video frames
|
66 |
def downsample_video(video_path):
|
@@ -109,7 +113,7 @@ def generate_image(model_name: str,
|
|
109 |
elif model_name == "R1-Onevision-7B":
|
110 |
processor = processor_t
|
111 |
model = model_t
|
112 |
-
elif model_name == "
|
113 |
processor = processor_w
|
114 |
model = model_w
|
115 |
else:
|
@@ -176,7 +180,7 @@ def generate_video(model_name: str,
|
|
176 |
elif model_name == "R1-Onevision-7B":
|
177 |
processor = processor_t
|
178 |
model = model_t
|
179 |
-
elif model_name == "
|
180 |
processor = processor_w
|
181 |
model = model_w
|
182 |
else:
|
@@ -326,8 +330,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
326 |
#download_btn = gr.Button("Download Result.md")
|
327 |
|
328 |
model_choice = gr.Radio(choices=[
|
329 |
-
"Vision-Matters-7B-Math", "ViGaL-7B", "Visionary-R1",
|
330 |
-
"R1-Onevision-7B", "VLM-R1-Qwen2.5VL-3B-Math-0305"
|
331 |
],
|
332 |
label="Select Model",
|
333 |
value="Vision-Matters-7B-Math")
|
@@ -337,7 +340,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
337 |
gr.Markdown("> [ViGaL 7B](https://huggingface.co/yunfeixie/ViGaL-7B): vigal-7b shows that training a 7b mllm on simple games like snake using reinforcement learning boosts performance on benchmarks like mathvista and mmmu without needing worked solutions or diagrams indicating transferable reasoning skills.")
|
338 |
gr.Markdown("> [Visionary-R1](https://huggingface.co/maifoundations/Visionary-R1): visionary-r1 is a novel framework for training visual language models (vlms) to perform robust visual reasoning using reinforcement learning (rl). unlike traditional approaches that rely heavily on (sft) or (cot) annotations, visionary-r1 leverages only visual question-answer pairs and rl, making the process more scalable and accessible.")
|
339 |
gr.Markdown("> [R1-Onevision-7B](https://huggingface.co/Fancy-MLLM/R1-Onevision-7B): r1-onevision model enhances vision-language understanding and reasoning capabilities, making it suitable for various tasks such as visual reasoning and image understanding. with its robust ability to perform multimodal reasoning, r1-onevision emerges as a powerful ai assistant capable of addressing different domains.")
|
340 |
-
gr.Markdown("> [
|
341 |
gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
|
342 |
|
343 |
# Define the submit button actions
|
|
|
55 |
MODEL_ID_O, trust_remote_code=True,
|
56 |
torch_dtype=torch.float16).to(device).eval()
|
57 |
|
58 |
+
#-----------------------------subfolder-----------------------------#
|
59 |
+
# Load MonkeyOCR-3B-0709
|
60 |
+
MODEL_ID_W = "echo840/MonkeyOCR-3B-0709"
|
61 |
+
SUBFOLDER = "Recognition"
|
62 |
+
processor_w = AutoProcessor.from_pretrained(MODEL_ID_W, trust_remote_code=True, subfolder=SUBFOLDER)
|
63 |
model_w = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
64 |
MODEL_ID_W, trust_remote_code=True,
|
65 |
+
subfolder=SUBFOLDER,
|
66 |
torch_dtype=torch.float16).to(device).eval()
|
67 |
+
#-----------------------------subfolder-----------------------------#
|
68 |
|
69 |
# Function to downsample video frames
|
70 |
def downsample_video(video_path):
|
|
|
113 |
elif model_name == "R1-Onevision-7B":
|
114 |
processor = processor_t
|
115 |
model = model_t
|
116 |
+
elif model_name == "MonkeyOCR-3B-0709":
|
117 |
processor = processor_w
|
118 |
model = model_w
|
119 |
else:
|
|
|
180 |
elif model_name == "R1-Onevision-7B":
|
181 |
processor = processor_t
|
182 |
model = model_t
|
183 |
+
elif model_name == "MonkeyOCR-3B-0709":
|
184 |
processor = processor_w
|
185 |
model = model_w
|
186 |
else:
|
|
|
330 |
#download_btn = gr.Button("Download Result.md")
|
331 |
|
332 |
model_choice = gr.Radio(choices=[
|
333 |
+
"Vision-Matters-7B-Math", "MonkeyOCR-3B-0709", "ViGaL-7B", "Visionary-R1", "R1-Onevision-7B"
|
|
|
334 |
],
|
335 |
label="Select Model",
|
336 |
value="Vision-Matters-7B-Math")
|
|
|
340 |
gr.Markdown("> [ViGaL 7B](https://huggingface.co/yunfeixie/ViGaL-7B): vigal-7b shows that training a 7b mllm on simple games like snake using reinforcement learning boosts performance on benchmarks like mathvista and mmmu without needing worked solutions or diagrams indicating transferable reasoning skills.")
|
341 |
gr.Markdown("> [Visionary-R1](https://huggingface.co/maifoundations/Visionary-R1): visionary-r1 is a novel framework for training visual language models (vlms) to perform robust visual reasoning using reinforcement learning (rl). unlike traditional approaches that rely heavily on (sft) or (cot) annotations, visionary-r1 leverages only visual question-answer pairs and rl, making the process more scalable and accessible.")
|
342 |
gr.Markdown("> [R1-Onevision-7B](https://huggingface.co/Fancy-MLLM/R1-Onevision-7B): r1-onevision model enhances vision-language understanding and reasoning capabilities, making it suitable for various tasks such as visual reasoning and image understanding. with its robust ability to perform multimodal reasoning, r1-onevision emerges as a powerful ai assistant capable of addressing different domains.")
|
343 |
+
gr.Markdown("> [MonkeyOCR-3B-0709](https://huggingface.co/omlab/VLM-R1-Qwen2.5VL-3B-Math-0305): vlm-r1 is a framework designed to enhance the reasoning and generalization capabilities of vision-language models (vlms) using a reinforcement learning (rl) approach inspired by the r1 methodology originally developed for large language models.")
|
344 |
gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
|
345 |
|
346 |
# Define the submit button actions
|