Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -27,8 +27,8 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
27 |
|
28 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
29 |
|
30 |
-
# Load
|
31 |
-
MODEL_ID_M = "
|
32 |
processor_m = AutoProcessor.from_pretrained(MODEL_ID_M, trust_remote_code=True)
|
33 |
model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
34 |
MODEL_ID_M, trust_remote_code=True,
|
@@ -101,7 +101,7 @@ def generate_image(model_name: str,
|
|
101 |
"""
|
102 |
Generates responses using the selected model for image input.
|
103 |
"""
|
104 |
-
if model_name == "
|
105 |
processor = processor_m
|
106 |
model = model_m
|
107 |
elif model_name == "ViGaL-7B":
|
@@ -168,7 +168,7 @@ def generate_video(model_name: str,
|
|
168 |
"""
|
169 |
Generates responses using the selected model for video input.
|
170 |
"""
|
171 |
-
if model_name == "
|
172 |
processor = processor_m
|
173 |
model = model_m
|
174 |
elif model_name == "ViGaL-7B":
|
@@ -331,14 +331,14 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
331 |
#download_btn = gr.Button("Download Result.md")
|
332 |
|
333 |
model_choice = gr.Radio(choices=[
|
334 |
-
"
|
335 |
"ViGaL-7B", "MonkeyOCR-pro-1.2B", "Visionary-R1-3B"
|
336 |
],
|
337 |
label="Select Model",
|
338 |
value="Vision-Matters-7B")
|
339 |
|
340 |
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLMs-5x/discussions)")
|
341 |
-
gr.Markdown("> [
|
342 |
gr.Markdown("> [MonkeyOCR-pro-1.2B](https://huggingface.co/echo840/MonkeyOCR-pro-1.2B): MonkeyOCR adopts a structure-recognition-relation (SRR) triplet paradigm, which simplifies the multi-tool pipeline of modular approaches while avoiding the inefficiency of using large multimodal models for full-page document processing.")
|
343 |
gr.Markdown("> [Vision Matters 7B](https://huggingface.co/Yuting6/Vision-Matters-7B): vision-matters is a simple visual perturbation framework that can be easily integrated into existing post-training pipelines including sft, dpo, and grpo. our findings highlight the critical role of visual perturbation: better reasoning begins with better seeing.")
|
344 |
gr.Markdown("> [ViGaL 7B](https://huggingface.co/yunfeixie/ViGaL-7B): vigal-7b shows that training a 7b mllm on simple games like snake using reinforcement learning boosts performance on benchmarks like mathvista and mmmu without needing worked solutions or diagrams indicating transferable reasoning skills.")
|
|
|
27 |
|
28 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
29 |
|
30 |
+
# Load Camel-Doc-OCR-080125
|
31 |
+
MODEL_ID_M = "prithivMLmods/Camel-Doc-OCR-080125"
|
32 |
processor_m = AutoProcessor.from_pretrained(MODEL_ID_M, trust_remote_code=True)
|
33 |
model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
34 |
MODEL_ID_M, trust_remote_code=True,
|
|
|
101 |
"""
|
102 |
Generates responses using the selected model for image input.
|
103 |
"""
|
104 |
+
if model_name == "Camel-Doc-OCR-080125(v2)":
|
105 |
processor = processor_m
|
106 |
model = model_m
|
107 |
elif model_name == "ViGaL-7B":
|
|
|
168 |
"""
|
169 |
Generates responses using the selected model for video input.
|
170 |
"""
|
171 |
+
if model_name == "Camel-Doc-OCR-080125(v2)":
|
172 |
processor = processor_m
|
173 |
model = model_m
|
174 |
elif model_name == "ViGaL-7B":
|
|
|
331 |
#download_btn = gr.Button("Download Result.md")
|
332 |
|
333 |
model_choice = gr.Radio(choices=[
|
334 |
+
"Camel-Doc-OCR-080125(v2)", "WR30a-Deep-7B-0711",
|
335 |
"ViGaL-7B", "MonkeyOCR-pro-1.2B", "Visionary-R1-3B"
|
336 |
],
|
337 |
label="Select Model",
|
338 |
value="Vision-Matters-7B")
|
339 |
|
340 |
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLMs-5x/discussions)")
|
341 |
+
gr.Markdown("> [Camel-Doc-OCR-080125(v2)](https://huggingface.co/prithivMLmods/WR30a-Deep-7B-0711): the camel-doc-ocr-080125 model is a fine-tuned version of qwen2.5-vl-7b-instruct, optimized for document retrieval, content extraction, and analysis recognition. built on top of the qwen2.5-vl architecture, this model enhances document comprehension capabilities.")
|
342 |
gr.Markdown("> [MonkeyOCR-pro-1.2B](https://huggingface.co/echo840/MonkeyOCR-pro-1.2B): MonkeyOCR adopts a structure-recognition-relation (SRR) triplet paradigm, which simplifies the multi-tool pipeline of modular approaches while avoiding the inefficiency of using large multimodal models for full-page document processing.")
|
343 |
gr.Markdown("> [Vision Matters 7B](https://huggingface.co/Yuting6/Vision-Matters-7B): vision-matters is a simple visual perturbation framework that can be easily integrated into existing post-training pipelines including sft, dpo, and grpo. our findings highlight the critical role of visual perturbation: better reasoning begins with better seeing.")
|
344 |
gr.Markdown("> [ViGaL 7B](https://huggingface.co/yunfeixie/ViGaL-7B): vigal-7b shows that training a 7b mllm on simple games like snake using reinforcement learning boosts performance on benchmarks like mathvista and mmmu without needing worked solutions or diagrams indicating transferable reasoning skills.")
|