Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers.image_utils import load_image | |
| from threading import Thread | |
| import time | |
| import torch | |
| import spaces | |
| import cv2 | |
| import numpy as np | |
| from PIL import Image | |
| from transformers import ( | |
| Qwen2VLForConditionalGeneration, | |
| AutoProcessor, | |
| TextIteratorStreamer, | |
| ) | |
| from transformers import Qwen2_5_VLForConditionalGeneration | |
| # --------------------------- | |
| # Helper Functions | |
| # --------------------------- | |
| def progress_bar_html(label: str, primary_color: str = "#4B0082", secondary_color: str = "#9370DB") -> str: | |
| """ | |
| Returns an HTML snippet for a thin animated progress bar with a label. | |
| Colors can be customized; default colors are used for Qwen2VL/Aya‑Vision. | |
| """ | |
| return f''' | |
| <div style="display: flex; align-items: center;"> | |
| <span style="margin-right: 10px; font-size: 14px;">{label}</span> | |
| <div style="width: 110px; height: 5px; background-color: {secondary_color}; border-radius: 2px; overflow: hidden;"> | |
| <div style="width: 100%; height: 100%; background-color: {primary_color}; animation: loading 1.5s linear infinite;"></div> | |
| </div> | |
| </div> | |
| <style> | |
| @keyframes loading {{ | |
| 0% {{ transform: translateX(-100%); }} | |
| 100% {{ transform: translateX(100%); }} | |
| }} | |
| </style> | |
| ''' | |
| def downsample_video(video_path): | |
| """ | |
| Downsamples a video file by extracting 10 evenly spaced frames. | |
| Returns a list of tuples (PIL.Image, timestamp). | |
| """ | |
| vidcap = cv2.VideoCapture(video_path) | |
| total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
| fps = vidcap.get(cv2.CAP_PROP_FPS) | |
| frames = [] | |
| if total_frames <= 0 or fps <= 0: | |
| vidcap.release() | |
| return frames | |
| # Determine 10 evenly spaced frame indices. | |
| frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int) | |
| for i in frame_indices: | |
| vidcap.set(cv2.CAP_PROP_POS_FRAMES, i) | |
| success, image = vidcap.read() | |
| if success: | |
| image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
| pil_image = Image.fromarray(image) | |
| timestamp = round(i / fps, 2) | |
| frames.append((pil_image, timestamp)) | |
| vidcap.release() | |
| return frames | |
| # Model and Processor Setup | |
| # Qwen2VL OCR (default branch) | |
| #QV_MODEL_ID = "prithivMLmods/Qwen2-VL-OCR-2B-Instruct" | |
| QV_MODEL_ID = "prithivMLmods/coreOCR-7B-050325-preview" # [or] prithivMLmods/Qwen2-VL-OCR2-2B-Instruct | |
| qwen_processor = AutoProcessor.from_pretrained(QV_MODEL_ID, trust_remote_code=True) | |
| qwen_model = Qwen2VLForConditionalGeneration.from_pretrained( | |
| QV_MODEL_ID, | |
| trust_remote_code=True, | |
| torch_dtype=torch.float16 | |
| ).to("cuda").eval() | |
| # RolmOCR branch (@RolmOCR) | |
| ROLMOCR_MODEL_ID = "reducto/RolmOCR" | |
| rolmocr_processor = AutoProcessor.from_pretrained(ROLMOCR_MODEL_ID, trust_remote_code=True) | |
| rolmocr_model = Qwen2_5_VLForConditionalGeneration.from_pretrained( | |
| ROLMOCR_MODEL_ID, | |
| trust_remote_code=True, | |
| torch_dtype=torch.bfloat16 | |
| ).to("cuda").eval() | |
| # Main Inference Function | |
| def model_inference(input_dict, history): | |
| text = input_dict["text"].strip() | |
| files = input_dict.get("files", []) | |
| # RolmOCR Inference (@RolmOCR) | |
| if text.lower().startswith("@rolmocr"): | |
| # Remove the tag from the query. | |
| text_prompt = text[len("@rolmocr"):].strip() | |
| # Check if a video is provided for inference. | |
| if files and isinstance(files[0], str) and files[0].lower().endswith((".mp4", ".avi", ".mov")): | |
| video_path = files[0] | |
| frames = downsample_video(video_path) | |
| if not frames: | |
| yield "Error: Could not extract frames from the video." | |
| return | |
| # Build the message: prompt followed by each frame with its timestamp. | |
| content_list = [{"type": "text", "text": text_prompt}] | |
| for image, timestamp in frames: | |
| content_list.append({"type": "text", "text": f"Frame {timestamp}:"}) | |
| content_list.append({"type": "image", "image": image}) | |
| messages = [{"role": "user", "content": content_list}] | |
| # For video, extract images only. | |
| video_images = [image for image, _ in frames] | |
| prompt_full = rolmocr_processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
| inputs = rolmocr_processor( | |
| text=[prompt_full], | |
| images=video_images, | |
| return_tensors="pt", | |
| padding=True, | |
| ).to("cuda") | |
| else: | |
| # Assume image(s) or text query. | |
| if len(files) > 1: | |
| images = [load_image(image) for image in files] | |
| elif len(files) == 1: | |
| images = [load_image(files[0])] | |
| else: | |
| images = [] | |
| if text_prompt == "" and not images: | |
| yield "Error: Please input a text query and/or provide an image for the @RolmOCR feature." | |
| return | |
| messages = [{ | |
| "role": "user", | |
| "content": [ | |
| *[{"type": "image", "image": image} for image in images], | |
| {"type": "text", "text": text_prompt}, | |
| ], | |
| }] | |
| prompt_full = rolmocr_processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
| inputs = rolmocr_processor( | |
| text=[prompt_full], | |
| images=images if images else None, | |
| return_tensors="pt", | |
| padding=True, | |
| ).to("cuda") | |
| streamer = TextIteratorStreamer(rolmocr_processor, skip_prompt=True, skip_special_tokens=True) | |
| generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024) | |
| thread = Thread(target=rolmocr_model.generate, kwargs=generation_kwargs) | |
| thread.start() | |
| buffer = "" | |
| # Use a different color scheme for RolmOCR (purple-themed). | |
| yield progress_bar_html("Processing with Qwen2.5VL (RolmOCR)") | |
| for new_text in streamer: | |
| buffer += new_text | |
| buffer = buffer.replace("<|im_end|>", "") | |
| time.sleep(0.01) | |
| yield buffer | |
| return | |
| # Default Inference: Qwen2VL OCR | |
| # Process files: support multiple images. | |
| if len(files) > 1: | |
| images = [load_image(image) for image in files] | |
| elif len(files) == 1: | |
| images = [load_image(files[0])] | |
| else: | |
| images = [] | |
| if text == "" and not images: | |
| yield "Error: Please input a text query and optionally image(s)." | |
| return | |
| if text == "" and images: | |
| yield "Error: Please input a text query along with the image(s)." | |
| return | |
| messages = [{ | |
| "role": "user", | |
| "content": [ | |
| *[{"type": "image", "image": image} for image in images], | |
| {"type": "text", "text": text}, | |
| ], | |
| }] | |
| prompt_full = qwen_processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
| inputs = qwen_processor( | |
| text=[prompt_full], | |
| images=images if images else None, | |
| return_tensors="pt", | |
| padding=True, | |
| ).to("cuda") | |
| streamer = TextIteratorStreamer(qwen_processor, skip_prompt=True, skip_special_tokens=True) | |
| generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024) | |
| thread = Thread(target=qwen_model.generate, kwargs=generation_kwargs) | |
| thread.start() | |
| buffer = "" | |
| yield progress_bar_html("Processing with Qwen2VL OCR") | |
| for new_text in streamer: | |
| buffer += new_text | |
| buffer = buffer.replace("<|im_end|>", "") | |
| time.sleep(0.01) | |
| yield buffer | |
| # Gradio Interface | |
| examples = [ | |
| [{"text": "@RolmOCR OCR the Text in the Image", "files": ["rolm/1.jpeg"]}], | |
| [{"text": "@RolmOCR Explain the Ad in Detail", "files": ["examples/videoplayback.mp4"]}], | |
| [{"text": "@RolmOCR OCR the Image", "files": ["rolm/3.jpeg"]}], | |
| [{"text": "Extract as JSON table from the table", "files": ["examples/4.jpg"]}], | |
| ] | |
| demo = gr.ChatInterface( | |
| fn=model_inference, | |
| description="# **Multimodal OCR `@RolmOCR and Default Qwen2VL OCR`**", | |
| examples=examples, | |
| textbox=gr.MultimodalTextbox( | |
| label="Query Input", | |
| file_types=["image", "video"], | |
| file_count="multiple", | |
| placeholder="Use tag @RolmOCR for RolmOCR, or leave blank for default Qwen2VL OCR" | |
| ), | |
| stop_btn="Stop Generation", | |
| multimodal=True, | |
| cache_examples=False, | |
| ) | |
| demo.launch(debug=True) |