# app.py import gradio as gr from transformers import AutoModelForVision2Seq, AutoProcessor import torch from PIL import Image # Load Qwen-VL model and processor model_id = "Qwen/Qwen-VL-Chat" processor = AutoProcessor.from_pretrained(model_id) model = AutoModelForVision2Seq.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto") # Inference function def ocr_with_qwen(image): prompt = "<|im_start|>system\nYou are a helpful assistant. Extract all text from the image and output only the text.<|im_end|>\n<|im_start|>user\n" inputs = processor(images=image, text=prompt, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_new_tokens=512) result = processor.batch_decode(outputs, skip_special_tokens=True)[0] return result.strip() # Gradio UI gr.Interface( fn=ocr_with_qwen, inputs=gr.Image(type="pil", label="Upload Image (test.jpg)"), outputs=gr.Textbox(label="Extracted Text"), title="OCR with Qwen2.5-VL", description="Upload an image to extract text using Qwen-VL model." ).launch()