prithivMLmods commited on
Commit
e5db41a
·
verified ·
1 Parent(s): 1741310

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -56,8 +56,8 @@ model_o = Qwen2_5_VLForConditionalGeneration.from_pretrained(
56
  torch_dtype=torch.float16).to(device).eval()
57
 
58
  #-----------------------------subfolder-----------------------------#
59
- # Load MonkeyOCR-1.2B-0709
60
- MODEL_ID_W = "echo840/MonkeyOCR-1.2B-0709"
61
  SUBFOLDER = "Recognition"
62
  processor_w = AutoProcessor.from_pretrained(MODEL_ID_W, trust_remote_code=True, subfolder=SUBFOLDER)
63
  model_w = Qwen2_5_VLForConditionalGeneration.from_pretrained(
@@ -113,7 +113,7 @@ def generate_image(model_name: str,
113
  elif model_name == "R1-Onevision-7B":
114
  processor = processor_t
115
  model = model_t
116
- elif model_name == "MonkeyOCR-1.2B-0709":
117
  processor = processor_w
118
  model = model_w
119
  else:
@@ -180,7 +180,7 @@ def generate_video(model_name: str,
180
  elif model_name == "R1-Onevision-7B":
181
  processor = processor_t
182
  model = model_t
183
- elif model_name == "MonkeyOCR-1.2B-0709":
184
  processor = processor_w
185
  model = model_w
186
  else:
@@ -332,13 +332,13 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
332
 
333
  model_choice = gr.Radio(choices=[
334
  "Vision-Matters-7B", "R1-Onevision-7B",
335
- "ViGaL-7B", "MonkeyOCR-1.2B-0709", "Visionary-R1-3B"
336
  ],
337
  label="Select Model",
338
  value="Vision-Matters-7B")
339
 
340
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLMs-5x/discussions)")
341
- gr.Markdown("> [MonkeyOCR-1.2B-0709](https://huggingface.co/echo840/MonkeyOCR-1.2B-0709): MonkeyOCR adopts a structure-recognition-relation (SRR) triplet paradigm, which simplifies the multi-tool pipeline of modular approaches while avoiding the inefficiency of using large multimodal models for full-page document processing.")
342
  gr.Markdown("> [Vision Matters 7B](https://huggingface.co/Yuting6/Vision-Matters-7B): vision-matters is a simple visual perturbation framework that can be easily integrated into existing post-training pipelines including sft, dpo, and grpo. our findings highlight the critical role of visual perturbation: better reasoning begins with better seeing.")
343
  gr.Markdown("> [ViGaL 7B](https://huggingface.co/yunfeixie/ViGaL-7B): vigal-7b shows that training a 7b mllm on simple games like snake using reinforcement learning boosts performance on benchmarks like mathvista and mmmu without needing worked solutions or diagrams indicating transferable reasoning skills.")
344
  gr.Markdown("> [Visionary-R1](https://huggingface.co/maifoundations/Visionary-R1): visionary-r1 is a novel framework for training visual language models (vlms) to perform robust visual reasoning using reinforcement learning (rl). unlike traditional approaches that rely heavily on (sft) or (cot) annotations, visionary-r1 leverages only visual question-answer pairs and rl, making the process more scalable and accessible.")
 
56
  torch_dtype=torch.float16).to(device).eval()
57
 
58
  #-----------------------------subfolder-----------------------------#
59
+ # Load MonkeyOCR-pro-1.2B
60
+ MODEL_ID_W = "echo840/MonkeyOCR-pro-1.2B"
61
  SUBFOLDER = "Recognition"
62
  processor_w = AutoProcessor.from_pretrained(MODEL_ID_W, trust_remote_code=True, subfolder=SUBFOLDER)
63
  model_w = Qwen2_5_VLForConditionalGeneration.from_pretrained(
 
113
  elif model_name == "R1-Onevision-7B":
114
  processor = processor_t
115
  model = model_t
116
+ elif model_name == "MonkeyOCR-pro-1.2B":
117
  processor = processor_w
118
  model = model_w
119
  else:
 
180
  elif model_name == "R1-Onevision-7B":
181
  processor = processor_t
182
  model = model_t
183
+ elif model_name == "MonkeyOCR-pro-1.2B":
184
  processor = processor_w
185
  model = model_w
186
  else:
 
332
 
333
  model_choice = gr.Radio(choices=[
334
  "Vision-Matters-7B", "R1-Onevision-7B",
335
+ "ViGaL-7B", "MonkeyOCR-pro-1.2B", "Visionary-R1-3B"
336
  ],
337
  label="Select Model",
338
  value="Vision-Matters-7B")
339
 
340
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLMs-5x/discussions)")
341
+ gr.Markdown("> [MonkeyOCR-pro-1.2B](https://huggingface.co/echo840/MonkeyOCR-pro-1.2B): MonkeyOCR adopts a structure-recognition-relation (SRR) triplet paradigm, which simplifies the multi-tool pipeline of modular approaches while avoiding the inefficiency of using large multimodal models for full-page document processing.")
342
  gr.Markdown("> [Vision Matters 7B](https://huggingface.co/Yuting6/Vision-Matters-7B): vision-matters is a simple visual perturbation framework that can be easily integrated into existing post-training pipelines including sft, dpo, and grpo. our findings highlight the critical role of visual perturbation: better reasoning begins with better seeing.")
343
  gr.Markdown("> [ViGaL 7B](https://huggingface.co/yunfeixie/ViGaL-7B): vigal-7b shows that training a 7b mllm on simple games like snake using reinforcement learning boosts performance on benchmarks like mathvista and mmmu without needing worked solutions or diagrams indicating transferable reasoning skills.")
344
  gr.Markdown("> [Visionary-R1](https://huggingface.co/maifoundations/Visionary-R1): visionary-r1 is a novel framework for training visual language models (vlms) to perform robust visual reasoning using reinforcement learning (rl). unlike traditional approaches that rely heavily on (sft) or (cot) annotations, visionary-r1 leverages only visual question-answer pairs and rl, making the process more scalable and accessible.")