prithivMLmods's picture
Update app.py
0ff76c1 verified
raw
history blame
13 kB
import os
import random
import uuid
import json
import time
import asyncio
from threading import Thread
import gradio as gr
import spaces
import torch
import numpy as np
from PIL import Image
import cv2
import requests
from transformers import (
Qwen2VLForConditionalGeneration,
Qwen2_5_VLForConditionalGeneration,
AutoModelForImageTextToText,
AutoProcessor,
TextIteratorStreamer,
)
from transformers.image_utils import load_image
# Constants for text generation
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load DREX-062225-exp
MODEL_ID_X = "prithivMLmods/DREX-062225-exp"
processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True)
model_x = Qwen2_5_VLForConditionalGeneration.from_pretrained(
MODEL_ID_X,
trust_remote_code=True,
torch_dtype=torch.float16
).to(device).eval()
# Load typhoon-ocr-3b
MODEL_ID_T = "scb10x/typhoon-ocr-3b"
processor_t = AutoProcessor.from_pretrained(MODEL_ID_T, trust_remote_code=True)
model_t = Qwen2_5_VLForConditionalGeneration.from_pretrained(
MODEL_ID_T,
trust_remote_code=True,
torch_dtype=torch.float16
).to(device).eval()
# Load olmOCR-7B-0225-preview
MODEL_ID_O = "allenai/olmOCR-7B-0225-preview"
processor_o = AutoProcessor.from_pretrained(MODEL_ID_O, trust_remote_code=True)
model_o = Qwen2VLForConditionalGeneration.from_pretrained(
MODEL_ID_O,
trust_remote_code=True,
torch_dtype=torch.float16
).to(device).eval()
# Load Lumian-VLR-7B-Thinking
MODEL_ID_J = "prithivMLmods/Lumian-VLR-7B-Thinking"
processor_j = AutoProcessor.from_pretrained(MODEL_ID_J, trust_remote_code=True)
model_j = Qwen2_5_VLForConditionalGeneration.from_pretrained(
MODEL_ID_J,
trust_remote_code=True,
torch_dtype=torch.float16
).to(device).eval()
# Load LMM-R1-MGT-PerceReason
MODEL_ID_F = "VLM-Reasoner/LMM-R1-MGT-PerceReason"
processor_f = AutoProcessor.from_pretrained(MODEL_ID_F, trust_remote_code=True)
model_f = Qwen2_5_VLForConditionalGeneration.from_pretrained(
MODEL_ID_F,
trust_remote_code=True,
torch_dtype=torch.float16
).to(device).eval()
def downsample_video(video_path):
"""
Downsamples the video to evenly spaced frames.
Each frame is returned as a PIL image along with its timestamp.
"""
vidcap = cv2.VideoCapture(video_path)
total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = vidcap.get(cv2.CAP_PROP_FPS)
frames = []
frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)
for i in frame_indices:
vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)
success, image = vidcap.read()
if success:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
pil_image = Image.fromarray(image)
timestamp = round(i / fps, 2)
frames.append((pil_image, timestamp))
vidcap.release()
return frames
@spaces.GPU
def generate_image(model_name: str, text: str, image: Image.Image,
max_new_tokens: int = 1024,
temperature: float = 0.6,
top_p: float = 0.9,
top_k: int = 50,
repetition_penalty: float = 1.2):
"""
Generates responses using the selected model for image input.
"""
if model_name == "DREX-062225-7B-exp":
processor = processor_x
model = model_x
elif model_name == "olmOCR-7B-0225-preview":
processor = processor_o
model = model_o
elif model_name == "Typhoon-OCR-3B":
processor = processor_t
model = model_t
elif model_name == "Lumian-VLR-7B-Thinking":
processor = processor_j
model = model_j
elif model_name == "LMM-R1-MGT-PerceReason":
processor = processor_f
model = model_f
else:
yield "Invalid model selected.", "Invalid model selected."
return
if image is None:
yield "Please upload an image.", "Please upload an image."
return
messages = [{
"role": "user",
"content": [
{"type": "image", "image": image},
{"type": "text", "text": text},
]
}]
prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = processor(
text=[prompt_full],
images=[image],
return_tensors="pt",
padding=True,
truncation=False,
max_length=MAX_INPUT_TOKEN_LENGTH
).to(device)
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
generation_kwargs = {**inputs, "streamer": streamer, "max_new_tokens": max_new_tokens}
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
buffer = ""
for new_text in streamer:
buffer += new_text
time.sleep(0.01)
yield buffer, buffer
@spaces.GPU
def generate_video(model_name: str, text: str, video_path: str,
max_new_tokens: int = 1024,
temperature: float = 0.6,
top_p: float = 0.9,
top_k: int = 50,
repetition_penalty: float = 1.2):
"""
Generates responses using the selected model for video input.
"""
if model_name == "DREX-062225-7B-exp":
processor = processor_x
model = model_x
elif model_name == "olmOCR-7B-0225-preview":
processor = processor_o
model = model_o
elif model_name == "Typhoon-OCR-3B":
processor = processor_t
model = model_t
elif model_name == "Lumian-VLR-7B-Thinking":
processor = processor_j
model = model_j
elif model_name == "LMM-R1-MGT-PerceReason":
processor = processor_f
model = model_f
else:
yield "Invalid model selected.", "Invalid model selected."
return
if video_path is None:
yield "Please upload a video.", "Please upload a video."
return
frames = downsample_video(video_path)
messages = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{"role": "user", "content": [{"type": "text", "text": text}]}
]
for frame in frames:
image, timestamp = frame
messages[1]["content"].append({"type": "text", "text": f"Frame {timestamp}:"})
messages[1]["content"].append({"type": "image", "image": image})
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
truncation=False,
max_length=MAX_INPUT_TOKEN_LENGTH
).to(device)
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
generation_kwargs = {
**inputs,
"streamer": streamer,
"max_new_tokens": max_new_tokens,
"do_sample": True,
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
"repetition_penalty": repetition_penalty,
}
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
buffer = ""
for new_text in streamer:
buffer += new_text
buffer = buffer.replace("<|im_end|>", "")
time.sleep(0.01)
yield buffer, buffer
def save_to_md(output_text):
"""
Saves the output text to a Markdown file and returns the file path for download.
"""
file_path = f"result_{uuid.uuid4()}.md"
with open(file_path, "w") as f:
f.write(output_text)
return file_path
# Define examples for image and video inference
image_examples = [
["Convert this page to doc [text] precisely.", "images/3.png"],
["Convert this page to doc [text] precisely.", "images/4.png"],
["Convert this page to doc [text] precisely.", "images/1.png"],
["Convert chart to OTSL.", "images/2.png"]
]
video_examples = [
["Explain the video in detail.", "videos/2.mp4"],
["Explain the ad in detail.", "videos/1.mp4"]
]
# Added CSS to style the output area as a "Canvas"
css = """
.submit-btn {
background-color: #2980b9 !important;
color: white !important;
}
.submit-btn:hover {
background-color: #3498db !important;
}
.canvas-output {
border: 2px solid #4682B4;
border-radius: 10px;
padding: 20px;
}
"""
# Create the Gradio Interface
with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
gr.Markdown("# **[Multimodal VLM Thinking](https://huggingface.co/collections/prithivMLmods/multimodal-implementations-67c9982ea04b39f0608badb0)**")
with gr.Row():
with gr.Column():
with gr.Tabs():
with gr.TabItem("Image Inference"):
image_query = gr.Textbox(label="Query Input", placeholder="Enter your query here...")
image_upload = gr.Image(type="pil", label="Image")
image_submit = gr.Button("Submit", elem_classes="submit-btn")
gr.Examples(
examples=image_examples,
inputs=[image_query, image_upload]
)
with gr.TabItem("Video Inference"):
video_query = gr.Textbox(label="Query Input", placeholder="Enter your query here...")
video_upload = gr.Video(label="Video")
video_submit = gr.Button("Submit", elem_classes="submit-btn")
gr.Examples(
examples=video_examples,
inputs=[video_query, video_upload]
)
with gr.Accordion("Advanced options", open=False):
max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6)
top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9)
top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
with gr.Column():
with gr.Column(elem_classes="canvas-output"):
gr.Markdown("## Output")
output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=2, show_copy_button=True)
with gr.Accordion("(Result.md)", open=False):
markdown_output = gr.Markdown(label="(Result.Md)")
model_choice = gr.Radio(
choices=["Lumian-VLR-7B-Thinking", "DREX-062225-7B-exp", "olmOCR-7B-0225-preview", "LMM-R1-MGT-PerceReason", "Typhoon-OCR-3B"],
label="Select Model",
value="Lumian-VLR-7B-Thinking"
)
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLM-Thinking/discussions)")
gr.Markdown("> Lumian-VLR-7B-Thinking is a high-fidelity vision-language reasoning model built on Qwen2.5-VL-7B-Instruct, excelling at fine-grained multimodal tasks such as image captioning, sampled video reasoning, and document comprehension through explicit grounded reasoning and advanced reinforcement learning. olmOCR-7B-0225-preview, developed by AllenAI, is a Qwen2-VL-7B-Instruct derivative optimized specifically for robust document OCR, efficiently processing large volumes of document images with specialized prompting and high scalability.")
gr.Markdown("> Typhoon-OCR-3B targets bilingual (Thai and English) document parsing, providing reliable OCR and text extraction for real-world documents, emphasizing usability in diverse and complex layouts. DREX-062225-exp is a document retrieval and extraction expert model, fine-tuned from docscopeOCR-7B, focusing on superior document analysis, structured data extraction, and maintaining advanced OCR capabilities including LaTeX and multilingual support. Together, these models represent the state-of-the-art in multimodal document understanding, OCR, and vision-language reasoning for a wide range of real-world and research applications.")
image_submit.click(
fn=generate_image,
inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
outputs=[output, markdown_output]
)
video_submit.click(
fn=generate_video,
inputs=[model_choice, video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
outputs=[output, markdown_output]
)
if __name__ == "__main__":
demo.queue(max_size=30).launch(share=True, mcp_server=True, ssr_mode=False, show_error=True)