prithivMLmods commited on
Commit
cb73066
·
verified ·
1 Parent(s): 7a60f0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -292,14 +292,12 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
292
  model_choice = gr.Radio(
293
  choices=["openbmb/MiniCPM-V-4", "Lumian-VLR-7B-Thinking", "Typhoon-OCR-3B", "DREX-062225-7B-exp", "olmOCR-7B-0225-preview"],
294
  label="Select Model",
295
- value="openbmb/MiniCPM-V-4"
296
  )
297
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLM-Thinking/discussions)")
298
- gr.Markdown("> **MiniCPM-V 4.0** is an efficient open-source multimodal model with strong performance in single/multi-image and video understanding, inheriting and improving upon the MiniCPM-V series.")
299
- gr.Markdown("> **Lumian-VLR-7B-Thinking** is a high-fidelity vision-language reasoning model for fine-grained multimodal understanding, video reasoning, and document comprehension.")
300
- gr.Markdown("> **olmOCR-7B-0225-preview** is a 7B parameter model designed for robust text extraction in complex OCR tasks.")
301
- gr.Markdown("> **Typhoon-OCR-3B** is a 3B parameter OCR model optimized for efficient and accurate character recognition.")
302
- gr.Markdown("> **DREX-062225-exp** is an experimental model emphasizing strong document reading, extraction, and vision-language understanding.")
303
  gr.Markdown("> ⚠️ Note: Video inference performance can vary significantly between models.")
304
 
305
  image_submit.click(
 
292
  model_choice = gr.Radio(
293
  choices=["openbmb/MiniCPM-V-4", "Lumian-VLR-7B-Thinking", "Typhoon-OCR-3B", "DREX-062225-7B-exp", "olmOCR-7B-0225-preview"],
294
  label="Select Model",
295
+ value="Lumian-VLR-7B-Thinking"
296
  )
297
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLM-Thinking/discussions)")
298
+ gr.Markdown("> MiniCPM-V 4.0 is the latest efficient model in the MiniCPM-V series. The model is built based on SigLIP2-400M and MiniCPM4-3B with a total of 4.1B parameters. It inherits the strong single-image, multi-image and video understanding performance of MiniCPM-V 2.6 with largely improved efficiency. Lumian-VLR-7B-Thinking is a high-fidelity vision-language reasoning model built on Qwen2.5-VL-7B-Instruct, designed for fine-grained multimodal understanding, video reasoning, and document comprehension through explicit grounded reasoning.")
299
+ gr.Markdown("> olmOCR-7B-0225-preview is a 7B parameter open large model designed for OCR tasks with robust text extraction, especially in complex document layouts. Typhoon-ocr-3b is a 3B parameter OCR model optimized for efficient and accurate optical character recognition in challenging conditions.")
300
+ gr.Markdown("> DREX-062225-exp is an experimental multimodal model emphasizing strong document reading and extraction capabilities combined with vision-language understanding to support detailed document parsing and reasoning tasks.")
 
 
301
  gr.Markdown("> ⚠️ Note: Video inference performance can vary significantly between models.")
302
 
303
  image_submit.click(