Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -698,7 +698,6 @@ def fix_imagemagick_policy():
|
|
698 |
|
699 |
|
700 |
|
701 |
-
|
702 |
def create_clip(media_path, asset_type, tts_path, duration=None, effects=None, narration_text=None, segment_index=0):
|
703 |
"""Create a video clip with synchronized subtitles and narration."""
|
704 |
try:
|
@@ -711,6 +710,7 @@ def create_clip(media_path, asset_type, tts_path, duration=None, effects=None, n
|
|
711 |
audio_duration = audio_clip.duration
|
712 |
target_duration = audio_duration + 0.2
|
713 |
|
|
|
714 |
if asset_type == "video":
|
715 |
clip = VideoFileClip(media_path)
|
716 |
clip = resize_to_fill(clip, TARGET_RESOLUTION)
|
@@ -731,7 +731,7 @@ def create_clip(media_path, asset_type, tts_path, duration=None, effects=None, n
|
|
731 |
else:
|
732 |
return None
|
733 |
|
734 |
-
#
|
735 |
if narration_text and CAPTION_COLOR != "transparent":
|
736 |
try:
|
737 |
words = narration_text.split()
|
@@ -748,22 +748,23 @@ def create_clip(media_path, asset_type, tts_path, duration=None, effects=None, n
|
|
748 |
chunk_duration = audio_duration / len(chunks)
|
749 |
subtitle_clips = []
|
750 |
subtitle_y_position = int(TARGET_RESOLUTION[1] * 0.70)
|
751 |
-
|
752 |
for i, chunk_text in enumerate(chunks):
|
753 |
start_time = i * chunk_duration
|
754 |
end_time = (i + 1) * chunk_duration
|
755 |
txt_clip = TextClip(
|
756 |
chunk_text,
|
757 |
fontsize=font_size,
|
758 |
-
font='DejaVu-Sans',
|
759 |
color=CAPTION_COLOR,
|
760 |
stroke_width=2,
|
761 |
-
stroke_color='black'
|
762 |
).set_start(start_time).set_end(end_time)
|
763 |
txt_clip = txt_clip.set_position(('center', subtitle_y_position))
|
764 |
subtitle_clips.append(txt_clip)
|
765 |
|
766 |
clip = CompositeVideoClip([clip] + subtitle_clips)
|
|
|
767 |
except Exception as sub_error:
|
768 |
print(f"Subtitle error: {sub_error}")
|
769 |
txt_clip = TextClip(
|
@@ -776,10 +777,11 @@ def create_clip(media_path, asset_type, tts_path, duration=None, effects=None, n
|
|
776 |
).set_position(('center', int(TARGET_RESOLUTION[1] / 3))).set_duration(clip.duration)
|
777 |
clip = CompositeVideoClip([clip, txt_clip])
|
778 |
|
779 |
-
|
780 |
clip = clip.set_audio(audio_clip)
|
781 |
print(f"Clip created: {clip.duration:.1f}s")
|
782 |
return clip
|
|
|
783 |
except Exception as e:
|
784 |
print(f"Error in create_clip: {str(e)}")
|
785 |
return None
|
@@ -802,6 +804,7 @@ def create_clip(media_path, asset_type, tts_path, duration=None, effects=None, n
|
|
802 |
|
803 |
|
804 |
|
|
|
805 |
|
806 |
# ---------------- Main Video Generation Function ---------------- #
|
807 |
def generate_video(user_input, resolution, caption_option):
|
|
|
698 |
|
699 |
|
700 |
|
|
|
701 |
def create_clip(media_path, asset_type, tts_path, duration=None, effects=None, narration_text=None, segment_index=0):
|
702 |
"""Create a video clip with synchronized subtitles and narration."""
|
703 |
try:
|
|
|
710 |
audio_duration = audio_clip.duration
|
711 |
target_duration = audio_duration + 0.2
|
712 |
|
713 |
+
# Handle media (video or image)
|
714 |
if asset_type == "video":
|
715 |
clip = VideoFileClip(media_path)
|
716 |
clip = resize_to_fill(clip, TARGET_RESOLUTION)
|
|
|
731 |
else:
|
732 |
return None
|
733 |
|
734 |
+
# Add subtitles if enabled
|
735 |
if narration_text and CAPTION_COLOR != "transparent":
|
736 |
try:
|
737 |
words = narration_text.split()
|
|
|
748 |
chunk_duration = audio_duration / len(chunks)
|
749 |
subtitle_clips = []
|
750 |
subtitle_y_position = int(TARGET_RESOLUTION[1] * 0.70)
|
751 |
+
|
752 |
for i, chunk_text in enumerate(chunks):
|
753 |
start_time = i * chunk_duration
|
754 |
end_time = (i + 1) * chunk_duration
|
755 |
txt_clip = TextClip(
|
756 |
chunk_text,
|
757 |
fontsize=font_size,
|
758 |
+
font='DejaVu-Sans', # Hugging Face friendly
|
759 |
color=CAPTION_COLOR,
|
760 |
stroke_width=2,
|
761 |
+
stroke_color='black' # Outline for readability
|
762 |
).set_start(start_time).set_end(end_time)
|
763 |
txt_clip = txt_clip.set_position(('center', subtitle_y_position))
|
764 |
subtitle_clips.append(txt_clip)
|
765 |
|
766 |
clip = CompositeVideoClip([clip] + subtitle_clips)
|
767 |
+
|
768 |
except Exception as sub_error:
|
769 |
print(f"Subtitle error: {sub_error}")
|
770 |
txt_clip = TextClip(
|
|
|
777 |
).set_position(('center', int(TARGET_RESOLUTION[1] / 3))).set_duration(clip.duration)
|
778 |
clip = CompositeVideoClip([clip, txt_clip])
|
779 |
|
780 |
+
# Set audio
|
781 |
clip = clip.set_audio(audio_clip)
|
782 |
print(f"Clip created: {clip.duration:.1f}s")
|
783 |
return clip
|
784 |
+
|
785 |
except Exception as e:
|
786 |
print(f"Error in create_clip: {str(e)}")
|
787 |
return None
|
|
|
804 |
|
805 |
|
806 |
|
807 |
+
|
808 |
|
809 |
# ---------------- Main Video Generation Function ---------------- #
|
810 |
def generate_video(user_input, resolution, caption_option):
|