Spaces:
Runtime error
Runtime error
ZeroGPU compatible update
Browse files- image_captioning.py +1 -6
image_captioning.py
CHANGED
@@ -321,9 +321,9 @@ def analyze_video_activities(video_path):
|
|
321 |
|
322 |
# Now process chunks
|
323 |
chunk_generator = encode_video_in_chunks(video_path)
|
|
|
324 |
|
325 |
for chunk_idx, video_frames, chunk_info in chunk_generator:
|
326 |
-
model, tokenizer, processor = load_model_and_tokenizer()
|
327 |
prompt = "Analyze this construction site video chunk and describe the activities happening. Focus on construction activities, machinery usage, and worker actions. Include any construction equipment or machinery you can identify."
|
328 |
response = process_video_chunk(video_frames, model, tokenizer, processor, prompt)
|
329 |
print(f"Chunk {chunk_idx}: {response}")
|
@@ -364,11 +364,6 @@ def analyze_video_activities(video_path):
|
|
364 |
}
|
365 |
|
366 |
all_activities.append(activity)
|
367 |
-
|
368 |
-
# Cleanup
|
369 |
-
del model, tokenizer, processor
|
370 |
-
torch.cuda.empty_cache()
|
371 |
-
gc.collect()
|
372 |
|
373 |
# Sort activities by timestamp
|
374 |
all_activities.sort(key=lambda x: x['timestamp_seconds'])
|
|
|
321 |
|
322 |
# Now process chunks
|
323 |
chunk_generator = encode_video_in_chunks(video_path)
|
324 |
+
model, tokenizer, processor = load_model_and_tokenizer()
|
325 |
|
326 |
for chunk_idx, video_frames, chunk_info in chunk_generator:
|
|
|
327 |
prompt = "Analyze this construction site video chunk and describe the activities happening. Focus on construction activities, machinery usage, and worker actions. Include any construction equipment or machinery you can identify."
|
328 |
response = process_video_chunk(video_frames, model, tokenizer, processor, prompt)
|
329 |
print(f"Chunk {chunk_idx}: {response}")
|
|
|
364 |
}
|
365 |
|
366 |
all_activities.append(activity)
|
|
|
|
|
|
|
|
|
|
|
367 |
|
368 |
# Sort activities by timestamp
|
369 |
all_activities.sort(key=lambda x: x['timestamp_seconds'])
|