prithivMLmods commited on
Commit
5c54dd0
·
verified ·
1 Parent(s): 7970ec1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -41,8 +41,8 @@ model_x = Qwen2_5_VLForConditionalGeneration.from_pretrained(
41
  MODEL_ID_X, trust_remote_code=True,
42
  torch_dtype=torch.float16).to(device).eval()
43
 
44
- # Load R1-Onevision-7B
45
- MODEL_ID_T = "FriendliAI/R1-Onevision-7B"
46
  processor_t = AutoProcessor.from_pretrained(MODEL_ID_T, trust_remote_code=True)
47
  model_t = Qwen2_5_VLForConditionalGeneration.from_pretrained(
48
  MODEL_ID_T, trust_remote_code=True,
@@ -110,7 +110,7 @@ def generate_image(model_name: str,
110
  elif model_name == "Visionary-R1-3B":
111
  processor = processor_o
112
  model = model_o
113
- elif model_name == "R1-Onevision-7B":
114
  processor = processor_t
115
  model = model_t
116
  elif model_name == "MonkeyOCR-pro-1.2B":
@@ -177,7 +177,7 @@ def generate_video(model_name: str,
177
  elif model_name == "Visionary-R1-3B":
178
  processor = processor_o
179
  model = model_o
180
- elif model_name == "R1-Onevision-7B":
181
  processor = processor_t
182
  model = model_t
183
  elif model_name == "MonkeyOCR-pro-1.2B":
@@ -331,18 +331,18 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
331
  #download_btn = gr.Button("Download Result.md")
332
 
333
  model_choice = gr.Radio(choices=[
334
- "Vision-Matters-7B", "R1-Onevision-7B",
335
  "ViGaL-7B", "MonkeyOCR-pro-1.2B", "Visionary-R1-3B"
336
  ],
337
  label="Select Model",
338
  value="Vision-Matters-7B")
339
 
340
- gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLMs-5x/discussions)")
 
341
  gr.Markdown("> [MonkeyOCR-pro-1.2B](https://huggingface.co/echo840/MonkeyOCR-pro-1.2B): MonkeyOCR adopts a structure-recognition-relation (SRR) triplet paradigm, which simplifies the multi-tool pipeline of modular approaches while avoiding the inefficiency of using large multimodal models for full-page document processing.")
342
  gr.Markdown("> [Vision Matters 7B](https://huggingface.co/Yuting6/Vision-Matters-7B): vision-matters is a simple visual perturbation framework that can be easily integrated into existing post-training pipelines including sft, dpo, and grpo. our findings highlight the critical role of visual perturbation: better reasoning begins with better seeing.")
343
  gr.Markdown("> [ViGaL 7B](https://huggingface.co/yunfeixie/ViGaL-7B): vigal-7b shows that training a 7b mllm on simple games like snake using reinforcement learning boosts performance on benchmarks like mathvista and mmmu without needing worked solutions or diagrams indicating transferable reasoning skills.")
344
  gr.Markdown("> [Visionary-R1](https://huggingface.co/maifoundations/Visionary-R1): visionary-r1 is a novel framework for training visual language models (vlms) to perform robust visual reasoning using reinforcement learning (rl). unlike traditional approaches that rely heavily on (sft) or (cot) annotations, visionary-r1 leverages only visual question-answer pairs and rl, making the process more scalable and accessible.")
345
- gr.Markdown("> [R1-Onevision-7B](https://huggingface.co/Fancy-MLLM/R1-Onevision-7B): r1-onevision model enhances vision-language understanding and reasoning capabilities, making it suitable for various tasks such as visual reasoning and image understanding. with its robust ability to perform multimodal reasoning, r1-onevision emerges as a powerful ai assistant capable of addressing different domains.")
346
  gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
347
 
348
  # Define the submit button actions
 
41
  MODEL_ID_X, trust_remote_code=True,
42
  torch_dtype=torch.float16).to(device).eval()
43
 
44
+ # Load prithivMLmods/WR30a-Deep-7B-0711
45
+ MODEL_ID_T = "prithivMLmods/WR30a-Deep-7B-0711"
46
  processor_t = AutoProcessor.from_pretrained(MODEL_ID_T, trust_remote_code=True)
47
  model_t = Qwen2_5_VLForConditionalGeneration.from_pretrained(
48
  MODEL_ID_T, trust_remote_code=True,
 
110
  elif model_name == "Visionary-R1-3B":
111
  processor = processor_o
112
  model = model_o
113
+ elif model_name == "WR30a-Deep-7B-0711":
114
  processor = processor_t
115
  model = model_t
116
  elif model_name == "MonkeyOCR-pro-1.2B":
 
177
  elif model_name == "Visionary-R1-3B":
178
  processor = processor_o
179
  model = model_o
180
+ elif model_name == "WR30a-Deep-7B-0711":
181
  processor = processor_t
182
  model = model_t
183
  elif model_name == "MonkeyOCR-pro-1.2B":
 
331
  #download_btn = gr.Button("Download Result.md")
332
 
333
  model_choice = gr.Radio(choices=[
334
+ "Vision-Matters-7B", "WR30a-Deep-7B-0711",
335
  "ViGaL-7B", "MonkeyOCR-pro-1.2B", "Visionary-R1-3B"
336
  ],
337
  label="Select Model",
338
  value="Vision-Matters-7B")
339
 
340
+ gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLMs-5x/discussions)")
341
+ gr.Markdown("> [WR30a-Deep-7B-0711](https://huggingface.co/prithivMLmods/WR30a-Deep-7B-0711): wr30a-deep-7b-0711 model is a fine-tuned version of qwen2.5-vl-7b-instruct, optimized for image captioning, visual analysis, and image reasoning. Built on top of the qwen2.5-vl architecture, this experimental model enhances visual comprehension capabilities with focused training on 1,500k image pairs for superior image understanding.")
342
  gr.Markdown("> [MonkeyOCR-pro-1.2B](https://huggingface.co/echo840/MonkeyOCR-pro-1.2B): MonkeyOCR adopts a structure-recognition-relation (SRR) triplet paradigm, which simplifies the multi-tool pipeline of modular approaches while avoiding the inefficiency of using large multimodal models for full-page document processing.")
343
  gr.Markdown("> [Vision Matters 7B](https://huggingface.co/Yuting6/Vision-Matters-7B): vision-matters is a simple visual perturbation framework that can be easily integrated into existing post-training pipelines including sft, dpo, and grpo. our findings highlight the critical role of visual perturbation: better reasoning begins with better seeing.")
344
  gr.Markdown("> [ViGaL 7B](https://huggingface.co/yunfeixie/ViGaL-7B): vigal-7b shows that training a 7b mllm on simple games like snake using reinforcement learning boosts performance on benchmarks like mathvista and mmmu without needing worked solutions or diagrams indicating transferable reasoning skills.")
345
  gr.Markdown("> [Visionary-R1](https://huggingface.co/maifoundations/Visionary-R1): visionary-r1 is a novel framework for training visual language models (vlms) to perform robust visual reasoning using reinforcement learning (rl). unlike traditional approaches that rely heavily on (sft) or (cot) annotations, visionary-r1 leverages only visual question-answer pairs and rl, making the process more scalable and accessible.")
 
346
  gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
347
 
348
  # Define the submit button actions