hina19 commited on
Commit
398f54d
·
verified ·
1 Parent(s): f8944c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -8,7 +8,7 @@ from scenedetect import open_video, SceneManager, ContentDetector
8
  from transformers import BlipProcessor, BlipForConditionalGeneration
9
  from openai import OpenAI
10
  import base64
11
- import moviepy.editor as mp
12
  # Load AI models
13
  caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
14
  caption_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
@@ -128,9 +128,9 @@ if uploaded_file:
128
 
129
  def create_summary_video(image_folder, output_video):
130
  images = sorted([os.path.join(image_folder, img) for img in os.listdir(image_folder) if img.endswith(".jpg")])
131
- clips = [mp.ImageClip(img).set_duration(2) for img in images] # 2 sec per frame
132
 
133
- video = mp.concatenate_videoclips(clips, method="compose")
134
  video.write_videofile(output_video, fps=24)
135
 
136
  # Example usage
@@ -141,7 +141,7 @@ if uploaded_file:
141
  # st.write("🎬 Merging audio with the video...")
142
 
143
  # def add_audio_to_video(video_path, audio_path, output_video="final_video.mp4"):
144
- # video = mp.VideoFileClip(video_path)
145
  # audio = mp.AudioFileClip(audio_path)
146
  # if audio.duration > video.duration:
147
  # audio = audio.subclip(0, video.duration)
 
8
  from transformers import BlipProcessor, BlipForConditionalGeneration
9
  from openai import OpenAI
10
  import base64
11
+ import moviepy
12
  # Load AI models
13
  caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
14
  caption_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
 
128
 
129
  def create_summary_video(image_folder, output_video):
130
  images = sorted([os.path.join(image_folder, img) for img in os.listdir(image_folder) if img.endswith(".jpg")])
131
+ clips = [moviepy.editor.ImageClip(img).set_duration(2) for img in images] # 2 sec per frame
132
 
133
+ video = moviepy.editor.concatenate_videoclips(clips, method="compose")
134
  video.write_videofile(output_video, fps=24)
135
 
136
  # Example usage
 
141
  # st.write("🎬 Merging audio with the video...")
142
 
143
  # def add_audio_to_video(video_path, audio_path, output_video="final_video.mp4"):
144
+ # video = moviepy.editor.VideoFileClip(video_path)
145
  # audio = mp.AudioFileClip(audio_path)
146
  # if audio.duration > video.duration:
147
  # audio = audio.subclip(0, video.duration)