Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from transformers import AutoProcessor, AutoModelForCausalLM | |
# Load the processor and model, trusting the remote code for custom implementations | |
processor = AutoProcessor.from_pretrained( | |
"lmms-lab/LLaVA-Video-7B-Qwen2", | |
trust_remote_code=True | |
) | |
model = AutoModelForCausalLM.from_pretrained( | |
"lmms-lab/LLaVA-Video-7B-Qwen2", | |
trust_remote_code=True | |
) | |
# Set the device (use GPU if available) | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
model.to(device) | |
def analyze_video(video_path): | |
""" | |
Analyzes a concert/event video to determine the moment when the crowd is most engaged. | |
""" | |
# Define the prompt instructing the model on what to do | |
prompt = "Analyze this video of a concert and determine the moment when the crowd is most engaged." | |
# Process the video and prompt | |
inputs = processor(text=prompt, video=video_path, return_tensors="pt") | |
# Move all tensor inputs to the selected device | |
inputs = {key: value.to(device) for key, value in inputs.items()} | |
# Generate the model's response | |
outputs = model.generate(**inputs, max_new_tokens=100) | |
# Decode the generated tokens to a human-readable string | |
answer = processor.decode(outputs[0], skip_special_tokens=True) | |
return answer | |
# Create the Gradio Interface | |
iface = gr.Interface( | |
fn=analyze_video, | |
inputs=gr.Video(label="Upload Concert/Event Video", type="filepath"), | |
outputs=gr.Textbox(label="Engagement Analysis"), | |
title="Crowd Engagement Analyzer", | |
description=( | |
"Upload a video of a concert or event and the model will analyze " | |
"the video to identify the moment when the crowd is most engaged." | |
) | |
) | |
if __name__ == "__main__": | |
iface.launch() | |