import subprocess subprocess.run( "pip install flash-attn --no-build-isolation", env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"}, shell=True ) from typing import Any, List import gradio as gr import requests import spaces import torch from PIL import Image, ImageDraw from transformers import AutoModelForImageTextToText, AutoProcessor from transformers.models.qwen2_vl.image_processing_qwen2_vl import smart_resize from . import navigation # --- Configuration --- MODEL_ID = "Hcompany/Holo1-7B" # --- Model and Processor Loading (Load once) --- print(f"Loading model and processor for {MODEL_ID}...") model = None processor = None model_loaded = False load_error_message = "" try: model = AutoModelForImageTextToText.from_pretrained( MODEL_ID, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", trust_remote_code=True ).to("cuda") processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True) model_loaded = True print("Model and processor loaded successfully.") except Exception as e: load_error_message = ( f"Error loading model/processor: {e}\n" "This might be due to network issues, an incorrect model ID, or missing dependencies (like flash_attention_2 if enabled by default in some config).\n" "Ensure you have a stable internet connection and the necessary libraries installed." ) print(load_error_message) # --- Helper functions from the model card (or adapted) --- @spaces.GPU(duration=120) def run_inference_localization( messages_for_template: List[dict[str, Any]], pil_image_for_processing: Image.Image ) -> str: model.to("cuda") torch.cuda.set_device(0) """ Runs inference using the Holo1 model. - messages_for_template: The prompt structure, potentially including the PIL image object (which apply_chat_template converts to an image tag). - pil_image_for_processing: The actual PIL image to be processed into tensors. """ # 1. Apply chat template to messages. This will create the text part of the prompt, # including image tags if the image was part of `messages_for_template`. text_prompt = processor.apply_chat_template(messages_for_template, tokenize=False, add_generation_prompt=True) # 2. Process text and image together to get model inputs inputs = processor( text=[text_prompt], images=[pil_image_for_processing], # Provide the actual image data here padding=True, return_tensors="pt", ) inputs = inputs.to(model.device) # 3. Generate response # Using do_sample=False for more deterministic output, as in the model card's structured output example generated_ids = model.generate(**inputs, max_new_tokens=128, do_sample=False) # 4. Trim input_ids from generated_ids to get only the generated part generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] # 5. Decode the generated tokens decoded_output = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) return decoded_output[0] if decoded_output else "" # --- Gradio processing function --- def navigate(input_pil_image: Image.Image, task: str) -> str: if not model_loaded or not processor or not model: return f"Model not loaded. Error: {load_error_message}", None if not input_pil_image: return "No image provided. Please upload an image.", None if not task or task.strip() == "": return "No task provided. Please type an task.", input_pil_image.copy().convert("RGB") # 1. Prepare image: Resize according to model's image processor's expected properties # This ensures predicted coordinates match the (resized) image dimensions. image_proc_config = processor.image_processor try: resized_height, resized_width = smart_resize( input_pil_image.height, input_pil_image.width, factor=image_proc_config.patch_size * image_proc_config.merge_size, min_pixels=image_proc_config.min_pixels, max_pixels=image_proc_config.max_pixels, ) # Using LANCZOS for resampling as it's generally good for downscaling. # The model card used `resample=None`, which might imply nearest or default. # For visual quality in the demo, LANCZOS is reasonable. resized_image = input_pil_image.resize( size=(resized_width, resized_height), resample=Image.Resampling.LANCZOS, # type: ignore ) except Exception as e: print(f"Error resizing image: {e}") return f"Error resizing image: {e}", input_pil_image.copy().convert("RGB") # 2. Create the prompt using the resized image (for correct image tagging context) and task prompt = navigation.get_navigation_prompt(task, resized_image, step=1) # 3. Run inference # Pass `messages` (which includes the image object for template processing) # and `resized_image` (for actual tensor conversion). try: navigation_str = run_inference_localization(prompt, resized_image) except Exception as e: print(f"Error during model inference: {e}") return f"Error during model inference: {e}", resized_image.copy().convert("RGB") return navigation_str # return navigation.NavigationStep(**json.loads(navigation_str)) # --- Load Example Data --- example_image = None example_task = "Book a hotel in Paris on August 3rd for 3 nights" try: example_image_url = "https://huggingface.co/Hcompany/Holo1-7B/resolve/main/calendar_example.jpg" example_image = Image.open(requests.get(example_image_url, stream=True).raw) except Exception as e: print(f"Could not load example image from URL: {e}") # Create a placeholder image if loading fails, so Gradio example still works try: example_image = Image.new("RGB", (200, 150), color="lightgray") draw = ImageDraw.Draw(example_image) draw.text((10, 10), "Example image\nfailed to load", fill="black") except: # If PIL itself is an issue (unlikely here but good for robustness) pass # --- Gradio Interface Definition --- title = "Holo1-7B: Action VLM Navigation Demo" description = """ This demo showcases **Holo1-7B**, an Action Vision-Language Model developed by HCompany, fine-tuned from Qwen/Qwen2.5-VL-7B-Instruct. It's designed to interact with web interfaces like a human user. Here, we demonstrate its UI localization capability. **How to use:** 1. Upload an image (e.g., a screenshot of a UI, like the calendar example). 2. Provide a textual task (e.g., "Book a hotel in Paris on August 3rd for 3 nights"). 3. The model will predict the navigation step. The model processes a resized version of your input image. Coordinates are relative to this resized image. """ article = f"""

Model: {MODEL_ID} by HCompany | Paper: HCompany Tech Report | Blog: Surfer-H Blog Post

""" if not model_loaded: with gr.Blocks() as demo: gr.Markdown(f"#
⚠️ Error: Model Failed to Load ⚠️
") gr.Markdown(f"
{load_error_message}
") gr.Markdown( "
Please check the console output for more details. Reloading the space might help if it's a temporary issue.
" ) else: with gr.Blocks(theme=gr.themes.Soft()) as demo: gr.Markdown(f"

{title}

") # gr.Markdown(description) with gr.Row(): with gr.Column(scale=1): input_image_component = gr.Image(type="pil", label="Input UI Image", height=400) task_component = gr.Textbox( label="task", placeholder="e.g., Click the 'Login' button", info="Type the action you want the model to localize on the image.", ) submit_button = gr.Button("Localize Click", variant="primary") with gr.Column(scale=1): output_coords_component = gr.Textbox( label="Predicted Coordinates (Format: Click(x,y))", interactive=False ) output_image_component = gr.Image( type="pil", label="Image with Predicted Click Point", height=400, interactive=False ) if example_image: gr.Examples( examples=[[example_image, example_task]], inputs=[input_image_component, task_component], outputs=[output_coords_component, output_image_component], fn=navigate, cache_examples="lazy", ) gr.Markdown(article) submit_button.click( fn=navigate, inputs=[input_image_component, task_component], outputs=[output_coords_component, output_image_component], ) if __name__ == "__main__": demo.launch(debug=True)