Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ from scenedetect import open_video, SceneManager, ContentDetector
|
|
8 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
9 |
from openai import OpenAI
|
10 |
import base64
|
11 |
-
|
12 |
# Load AI models
|
13 |
caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
14 |
caption_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
@@ -128,9 +128,9 @@ if uploaded_file:
|
|
128 |
|
129 |
def create_summary_video(image_folder, output_video):
|
130 |
images = sorted([os.path.join(image_folder, img) for img in os.listdir(image_folder) if img.endswith(".jpg")])
|
131 |
-
clips = [
|
132 |
|
133 |
-
video =
|
134 |
video.write_videofile(output_video, fps=24)
|
135 |
|
136 |
# Example usage
|
|
|
8 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
9 |
from openai import OpenAI
|
10 |
import base64
|
11 |
+
import moviepy.editor as mp
|
12 |
# Load AI models
|
13 |
caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
14 |
caption_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
|
|
128 |
|
129 |
def create_summary_video(image_folder, output_video):
|
130 |
images = sorted([os.path.join(image_folder, img) for img in os.listdir(image_folder) if img.endswith(".jpg")])
|
131 |
+
clips = [mp.ImageClip(img).set_duration(2) for img in images] # 2 sec per frame
|
132 |
|
133 |
+
video = mp.concatenate_videoclips(clips, method="compose")
|
134 |
video.write_videofile(output_video, fps=24)
|
135 |
|
136 |
# Example usage
|