Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -282,23 +282,18 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
282 |
with gr.Column(elem_classes="canvas-output"):
|
283 |
gr.Markdown("## Output")
|
284 |
output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=2, show_copy_button=True)
|
285 |
-
|
286 |
with gr.Accordion("(Result.md)", open=False):
|
287 |
markdown_output = gr.Markdown(label="(Result.Md)")
|
288 |
-
|
289 |
-
|
290 |
model_choice = gr.Radio(
|
291 |
choices=["Lumian-VLR-7B-Thinking", "DREX-062225-7B-exp", "olmOCR-7B-0225-preview", "Typhoon-OCR-3B"],
|
292 |
label="Select Model",
|
293 |
-
value="
|
294 |
)
|
295 |
|
296 |
-
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/
|
297 |
-
gr.Markdown(">
|
298 |
-
gr.Markdown(">
|
299 |
-
gr.Markdown("> [Typhoon-OCR-3B](https://huggingface.co/scb10x/typhoon-ocr-3b): a bilingual document parsing model built specifically for real-world documents in thai and english, inspired by models like olmocr, based on qwen2.5-vl-instruction. this model is intended to be used with a specific prompt only.")
|
300 |
-
gr.Markdown("> [olmOCR-7B-0225](https://huggingface.co/allenai/olmOCR-7B-0225-preview): the olmocr-7b-0225-preview model is based on qwen2-vl-7b, optimized for document-level optical character recognition (ocr), long-context vision-language understanding, and accurate image-to-text conversion with mathematical latex formatting. designed with a focus on high-fidelity visual-textual comprehension.")
|
301 |
-
gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
|
302 |
|
303 |
image_submit.click(
|
304 |
fn=generate_image,
|
@@ -310,11 +305,6 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
310 |
inputs=[model_choice, video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
|
311 |
outputs=[output, markdown_output]
|
312 |
)
|
313 |
-
# download_btn.click(
|
314 |
-
# fn=save_to_md,
|
315 |
-
# inputs=output,
|
316 |
-
# outputs=None
|
317 |
-
# )
|
318 |
|
319 |
if __name__ == "__main__":
|
320 |
demo.queue(max_size=30).launch(share=True, mcp_server=True, ssr_mode=False, show_error=True)
|
|
|
282 |
with gr.Column(elem_classes="canvas-output"):
|
283 |
gr.Markdown("## Output")
|
284 |
output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=2, show_copy_button=True)
|
|
|
285 |
with gr.Accordion("(Result.md)", open=False):
|
286 |
markdown_output = gr.Markdown(label="(Result.Md)")
|
287 |
+
|
|
|
288 |
model_choice = gr.Radio(
|
289 |
choices=["Lumian-VLR-7B-Thinking", "DREX-062225-7B-exp", "olmOCR-7B-0225-preview", "Typhoon-OCR-3B"],
|
290 |
label="Select Model",
|
291 |
+
value="Lumian-VLR-7B-Thinking"
|
292 |
)
|
293 |
|
294 |
+
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLM-Thinking/discussions)")
|
295 |
+
gr.Markdown("> Lumian-VLR-7B-Thinking is a high-fidelity vision-language reasoning model built on Qwen2.5-VL-7B-Instruct, excelling at fine-grained multimodal tasks such as image captioning, sampled video reasoning, and document comprehension through explicit grounded reasoning and advanced reinforcement learning. olmOCR-7B-0225-preview, developed by AllenAI, is a Qwen2-VL-7B-Instruct derivative optimized specifically for robust document OCR, efficiently processing large volumes of document images with specialized prompting and high scalability.")
|
296 |
+
gr.Markdown("> Typhoon-OCR-3B targets bilingual (Thai and English) document parsing, providing reliable OCR and text extraction for real-world documents, emphasizing usability in diverse and complex layouts. DREX-062225-exp is a document retrieval and extraction expert model, fine-tuned from docscopeOCR-7B, focusing on superior document analysis, structured data extraction, and maintaining advanced OCR capabilities including LaTeX and multilingual support. Together, these models represent the state-of-the-art in multimodal document understanding, OCR, and vision-language reasoning for a wide range of real-world and research applications.")
|
|
|
|
|
|
|
297 |
|
298 |
image_submit.click(
|
299 |
fn=generate_image,
|
|
|
305 |
inputs=[model_choice, video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
|
306 |
outputs=[output, markdown_output]
|
307 |
)
|
|
|
|
|
|
|
|
|
|
|
308 |
|
309 |
if __name__ == "__main__":
|
310 |
demo.queue(max_size=30).launch(share=True, mcp_server=True, ssr_mode=False, show_error=True)
|