Spaces:
Runtime error
Runtime error
malvin noel
commited on
Commit
·
c6771b3
1
Parent(s):
454765e
changes
Browse files- app.py +37 -34
- scripts/edit_video.py +44 -34
- scripts/generate_scripts.py +1 -0
- scripts/generate_subtitles.py +11 -26
- scripts/get_footage.py +40 -86
app.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import os
|
| 3 |
import shutil
|
|
@@ -44,72 +45,74 @@ def process_video(
|
|
| 44 |
user_music: Optional[str] = None,
|
| 45 |
show_progress_bar: bool = True,
|
| 46 |
):
|
| 47 |
-
"""Build the final video
|
| 48 |
|
| 49 |
if not accumulated_videos:
|
| 50 |
raise ValueError("❌ Please upload at least one background video (.mp4) before generating.")
|
| 51 |
|
| 52 |
approx_words = int(target_duration * WORDS_PER_SECOND)
|
| 53 |
|
| 54 |
-
#
|
| 55 |
if script_mode == "Use my script":
|
| 56 |
if not custom_script or not custom_script.strip():
|
| 57 |
raise ValueError("❌ You selected 'Use my script' but the script field is empty!")
|
| 58 |
script = custom_script.strip()
|
| 59 |
-
title = generate_title(script)
|
| 60 |
-
description = generate_description(script)
|
| 61 |
else:
|
| 62 |
prompt = (
|
| 63 |
-
f"You are a video creation expert
|
| 64 |
-
f"Instruction
|
| 65 |
-
f"🔴 Strict target duration: {target_duration}s — ≈ {approx_words} words
|
| 66 |
)
|
| 67 |
script = generate_script(prompt)
|
| 68 |
-
title = generate_title(script)
|
| 69 |
-
description = generate_description(script)
|
| 70 |
|
| 71 |
-
|
| 72 |
-
|
|
|
|
|
|
|
|
|
|
| 73 |
os.makedirs(folder, exist_ok=True)
|
| 74 |
|
| 75 |
-
voice_path
|
| 76 |
-
final_no_subs
|
| 77 |
-
final_with_subs = "./assets/output/final_video_subtitles.mp4"
|
| 78 |
|
| 79 |
-
#
|
| 80 |
-
for f in os.listdir("./assets/backgrounds"):
|
| 81 |
-
if f.lower().endswith(".mp4"):
|
| 82 |
-
os.remove(os.path.join("./assets/backgrounds", f))
|
| 83 |
for idx, v in enumerate(accumulated_videos):
|
| 84 |
if not os.path.isfile(v) or not v.lower().endswith(".mp4"):
|
| 85 |
raise ValueError(f"❌ Invalid file: {v}")
|
| 86 |
safe_copy(v, os.path.join("./assets/backgrounds", f"video_{idx:03d}.mp4"))
|
| 87 |
|
| 88 |
-
#
|
| 89 |
-
|
|
|
|
| 90 |
|
| 91 |
-
#
|
| 92 |
-
music_path = user_music if user_music and os.path.isfile(user_music) else None
|
| 93 |
_, out_no_audio = get_video_montage_from_folder(
|
| 94 |
folder_path="./assets/backgrounds",
|
| 95 |
-
audio_path=voice_path,
|
| 96 |
output_dir="./assets/video_music",
|
| 97 |
-
lum=lum,
|
| 98 |
-
contrast=contrast,
|
| 99 |
-
gamma=gamma,
|
| 100 |
show_progress_bar=show_progress_bar,
|
| 101 |
)
|
| 102 |
|
| 103 |
-
#
|
| 104 |
-
|
| 105 |
-
|
| 106 |
if add_subs:
|
| 107 |
segments = transcribe_audio_to_subs(voice_path)
|
| 108 |
-
subs
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
else
|
| 112 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
|
| 114 |
# ──────────────────────────────────────────────────────────────────────────────
|
| 115 |
# Upload helper
|
|
|
|
| 1 |
+
#app.py
|
| 2 |
import gradio as gr
|
| 3 |
import os
|
| 4 |
import shutil
|
|
|
|
| 45 |
user_music: Optional[str] = None,
|
| 46 |
show_progress_bar: bool = True,
|
| 47 |
):
|
| 48 |
+
"""Build the final video with a single encoding pass."""
|
| 49 |
|
| 50 |
if not accumulated_videos:
|
| 51 |
raise ValueError("❌ Please upload at least one background video (.mp4) before generating.")
|
| 52 |
|
| 53 |
approx_words = int(target_duration * WORDS_PER_SECOND)
|
| 54 |
|
| 55 |
+
# ── 1. Script (AI ou perso) ──────────────────────────────
|
| 56 |
if script_mode == "Use my script":
|
| 57 |
if not custom_script or not custom_script.strip():
|
| 58 |
raise ValueError("❌ You selected 'Use my script' but the script field is empty!")
|
| 59 |
script = custom_script.strip()
|
|
|
|
|
|
|
| 60 |
else:
|
| 61 |
prompt = (
|
| 62 |
+
f"You are a video creation expert.\n\nContext:\n{context.strip()}\n\n"
|
| 63 |
+
f"Instruction:\n{instruction.strip()}\n\n"
|
| 64 |
+
f"🔴 Strict target duration: {target_duration}s — ≈ {approx_words} words."
|
| 65 |
)
|
| 66 |
script = generate_script(prompt)
|
|
|
|
|
|
|
| 67 |
|
| 68 |
+
title = generate_title(script)
|
| 69 |
+
description = generate_description(script)
|
| 70 |
+
|
| 71 |
+
# ── 2. Préparation répertoires ───────────────────────────
|
| 72 |
+
for folder in ("./assets/audio", "./assets/backgrounds", "./assets/output", "./assets/video_music"):
|
| 73 |
os.makedirs(folder, exist_ok=True)
|
| 74 |
|
| 75 |
+
voice_path = "./assets/audio/voice.mp3"
|
| 76 |
+
final_no_subs = "./assets/output/final_video.mp4"
|
|
|
|
| 77 |
|
| 78 |
+
# ── 3. Copie unique des vidéos de fond ───────────────────
|
|
|
|
|
|
|
|
|
|
| 79 |
for idx, v in enumerate(accumulated_videos):
|
| 80 |
if not os.path.isfile(v) or not v.lower().endswith(".mp4"):
|
| 81 |
raise ValueError(f"❌ Invalid file: {v}")
|
| 82 |
safe_copy(v, os.path.join("./assets/backgrounds", f"video_{idx:03d}.mp4"))
|
| 83 |
|
| 84 |
+
# ── 4. Voix IA (cache disque) ────────────────────────────
|
| 85 |
+
if not os.path.isfile(voice_path):
|
| 86 |
+
generate_voice(script, voice_path)
|
| 87 |
|
| 88 |
+
# ── 5. Montage silencieux (pas d’audio) ──────────────────
|
|
|
|
| 89 |
_, out_no_audio = get_video_montage_from_folder(
|
| 90 |
folder_path="./assets/backgrounds",
|
| 91 |
+
audio_path=voice_path, # juste pour la durée, pas d’injection
|
| 92 |
output_dir="./assets/video_music",
|
| 93 |
+
lum=lum, contrast=contrast, gamma=gamma,
|
|
|
|
|
|
|
| 94 |
show_progress_bar=show_progress_bar,
|
| 95 |
)
|
| 96 |
|
| 97 |
+
# ── 6. Sous-titres (optionnel) ───────────────────────────
|
| 98 |
+
subs = None
|
|
|
|
| 99 |
if add_subs:
|
| 100 |
segments = transcribe_audio_to_subs(voice_path)
|
| 101 |
+
subs = chunk_text_by_words(segments, max_words=3)
|
| 102 |
+
|
| 103 |
+
# ── 7. Mux final en une passe ────────────────────────────
|
| 104 |
+
music_path = user_music if user_music and os.path.isfile(user_music) else None
|
| 105 |
+
|
| 106 |
+
edit_video(
|
| 107 |
+
video_path = out_no_audio,
|
| 108 |
+
audio_path = voice_path,
|
| 109 |
+
music_path = music_path,
|
| 110 |
+
output_path = final_no_subs,
|
| 111 |
+
music_volume = 0.10,
|
| 112 |
+
subtitles = subs, # ← injectés ici
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
return script, title, description, final_no_subs
|
| 116 |
|
| 117 |
# ──────────────────────────────────────────────────────────────────────────────
|
| 118 |
# Upload helper
|
scripts/edit_video.py
CHANGED
|
@@ -1,69 +1,79 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
# ============================
|
| 4 |
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
-
|
| 8 |
-
# edit_video.py (révision => musique optionnelle et volume paramétrable)
|
| 9 |
-
# ============================
|
| 10 |
-
|
| 11 |
-
"""Assemble la voix IA et, si fourni, la musique de fond.
|
| 12 |
-
|
| 13 |
-
Appel :
|
| 14 |
-
edit_video(
|
| 15 |
-
video_path="./assets/video_music/video_silent.mp4",
|
| 16 |
-
audio_path="./assets/audio/voice.mp3",
|
| 17 |
-
music_path=None, # ou chemin .mp3 / .wav
|
| 18 |
-
output_path="./assets/output/final_video.mp4",
|
| 19 |
-
music_volume=0.10, # volume musique (0‑1)
|
| 20 |
-
)
|
| 21 |
-
"""
|
| 22 |
-
|
| 23 |
-
from moviepy import VideoFileClip, AudioFileClip, CompositeAudioClip
|
| 24 |
-
import os
|
| 25 |
|
| 26 |
|
| 27 |
def edit_video(
|
| 28 |
video_path: str,
|
| 29 |
audio_path: str,
|
| 30 |
-
music_path: str
|
| 31 |
output_path: str,
|
| 32 |
*,
|
| 33 |
music_volume: float = 0.10,
|
|
|
|
| 34 |
):
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
voice_clip = AudioFileClip(audio_path)
|
| 37 |
-
tracks = [voice_clip]
|
| 38 |
|
| 39 |
-
|
|
|
|
|
|
|
| 40 |
try:
|
| 41 |
music_clip = (
|
| 42 |
AudioFileClip(music_path)
|
| 43 |
.with_volume_scaled(music_volume)
|
| 44 |
-
.with_duration(
|
| 45 |
)
|
| 46 |
tracks.insert(0, music_clip)
|
| 47 |
except Exception as err:
|
| 48 |
-
print(f"⚠️
|
|
|
|
|
|
|
| 49 |
|
| 50 |
-
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
|
|
|
|
|
|
| 53 |
final_clip.write_videofile(
|
| 54 |
output_path,
|
| 55 |
codec="libx264",
|
| 56 |
audio_codec="aac",
|
| 57 |
fps=30,
|
| 58 |
-
threads=
|
| 59 |
preset="medium",
|
| 60 |
-
ffmpeg_params=["-pix_fmt", "yuv420p"]
|
| 61 |
)
|
| 62 |
-
print(f"✅
|
| 63 |
|
| 64 |
-
|
| 65 |
voice_clip.close()
|
| 66 |
if "music_clip" in locals():
|
| 67 |
music_clip.close()
|
| 68 |
final_audio.close()
|
| 69 |
final_clip.close()
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
from typing import Optional, List, Dict
|
|
|
|
| 3 |
|
| 4 |
+
from moviepy import (
|
| 5 |
+
VideoFileClip, AudioFileClip, CompositeAudioClip,
|
| 6 |
+
CompositeVideoClip
|
| 7 |
+
)
|
| 8 |
|
| 9 |
+
from scripts.generate_subtitles import create_animated_subtitle_clip
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
|
| 12 |
def edit_video(
|
| 13 |
video_path: str,
|
| 14 |
audio_path: str,
|
| 15 |
+
music_path: Optional[str],
|
| 16 |
output_path: str,
|
| 17 |
*,
|
| 18 |
music_volume: float = 0.10,
|
| 19 |
+
subtitles: Optional[List[Dict]] = None, # ← nouveau
|
| 20 |
):
|
| 21 |
+
"""
|
| 22 |
+
Encodage final : ajoute voix, (optionnel) musique et sous-titres
|
| 23 |
+
en UNE seule passe.
|
| 24 |
+
"""
|
| 25 |
+
vid_clip = VideoFileClip(video_path)
|
| 26 |
voice_clip = AudioFileClip(audio_path)
|
|
|
|
| 27 |
|
| 28 |
+
# ── piste audio composite ─────────────────────────────────
|
| 29 |
+
tracks = [voice_clip]
|
| 30 |
+
if music_path and Path(music_path).is_file():
|
| 31 |
try:
|
| 32 |
music_clip = (
|
| 33 |
AudioFileClip(music_path)
|
| 34 |
.with_volume_scaled(music_volume)
|
| 35 |
+
.with_duration(vid_clip.duration)
|
| 36 |
)
|
| 37 |
tracks.insert(0, music_clip)
|
| 38 |
except Exception as err:
|
| 39 |
+
print(f"⚠️ Music ignored: {err}")
|
| 40 |
+
|
| 41 |
+
final_audio = CompositeAudioClip(tracks).with_duration(vid_clip.duration)
|
| 42 |
|
| 43 |
+
# ── couche(s) vidéo / sous-titres ─────────────────────────
|
| 44 |
+
layers = [vid_clip]
|
| 45 |
+
if subtitles:
|
| 46 |
+
w, h = vid_clip.size
|
| 47 |
+
for sub in subtitles:
|
| 48 |
+
layers.append(
|
| 49 |
+
create_animated_subtitle_clip(
|
| 50 |
+
sub["text"], sub["start"], sub["end"], w, h
|
| 51 |
+
)
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
final_clip = (
|
| 55 |
+
CompositeVideoClip(layers, size=vid_clip.size)
|
| 56 |
+
.with_duration(vid_clip.duration)
|
| 57 |
+
.with_audio(final_audio)
|
| 58 |
+
)
|
| 59 |
|
| 60 |
+
# ── export ────────────────────────────────────────────────
|
| 61 |
+
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
|
| 62 |
final_clip.write_videofile(
|
| 63 |
output_path,
|
| 64 |
codec="libx264",
|
| 65 |
audio_codec="aac",
|
| 66 |
fps=30,
|
| 67 |
+
threads=os.cpu_count(),
|
| 68 |
preset="medium",
|
| 69 |
+
ffmpeg_params=["-pix_fmt", "yuv420p"]
|
| 70 |
)
|
| 71 |
+
print(f"✅ Video written → {output_path}")
|
| 72 |
|
| 73 |
+
# ── nettoyage ─────────────────────────────────────────────
|
| 74 |
voice_clip.close()
|
| 75 |
if "music_clip" in locals():
|
| 76 |
music_clip.close()
|
| 77 |
final_audio.close()
|
| 78 |
final_clip.close()
|
| 79 |
+
vid_clip.close()
|
scripts/generate_scripts.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
import os
|
| 2 |
import re
|
| 3 |
import json
|
|
|
|
| 1 |
+
#generate_scripts.py
|
| 2 |
import os
|
| 3 |
import re
|
| 4 |
import json
|
scripts/generate_subtitles.py
CHANGED
|
@@ -25,7 +25,8 @@ SUBTITLE_COLORS = [
|
|
| 25 |
"white", "yellow", "cyan", "deeppink", "gold", "lightgreen", "magenta", "orange"
|
| 26 |
]
|
| 27 |
|
| 28 |
-
|
|
|
|
| 29 |
|
| 30 |
|
| 31 |
def color_for_word(word: str) -> str:
|
|
@@ -85,29 +86,14 @@ def save_subtitles_to_srt(subtitles, output_path):
|
|
| 85 |
|
| 86 |
|
| 87 |
@spaces.GPU()
|
| 88 |
-
def transcribe_audio_to_subs(audio_path):
|
| 89 |
-
"""
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
subtitles = [{
|
| 98 |
-
"start": seg['start'],
|
| 99 |
-
"end": seg['end'],
|
| 100 |
-
"text": seg['text']
|
| 101 |
-
} for seg in result['segments']]
|
| 102 |
-
|
| 103 |
-
print(f"📝 {len(subtitles)} sous-titres générés.")
|
| 104 |
-
|
| 105 |
-
# Sauvegarde .srt
|
| 106 |
-
base_name = os.path.splitext(audio_path)[0]
|
| 107 |
-
srt_path = f"{base_name}.srt"
|
| 108 |
-
save_subtitles_to_srt(subtitles, srt_path)
|
| 109 |
-
print(f"💾 Sous-titres enregistrés dans : {srt_path}")
|
| 110 |
-
|
| 111 |
return subtitles
|
| 112 |
|
| 113 |
def format_subtitle_text(text, max_chars=50):
|
|
@@ -231,5 +217,4 @@ def add_subtitles_to_video(video_path, subtitles, output_file="./assets/output/v
|
|
| 231 |
ffmpeg_params=["-pix_fmt", "yuv420p"]
|
| 232 |
)
|
| 233 |
|
| 234 |
-
print(f"✅ Vidéo Shorts/TikTok prête : {output_file}")
|
| 235 |
-
|
|
|
|
| 25 |
"white", "yellow", "cyan", "deeppink", "gold", "lightgreen", "magenta", "orange"
|
| 26 |
]
|
| 27 |
|
| 28 |
+
WHISPER = whisper.load_model("medium",
|
| 29 |
+
device="cuda" if torch.cuda.is_available() else "cpu")
|
| 30 |
|
| 31 |
|
| 32 |
def color_for_word(word: str) -> str:
|
|
|
|
| 86 |
|
| 87 |
|
| 88 |
@spaces.GPU()
|
| 89 |
+
def transcribe_audio_to_subs(audio_path: str):
|
| 90 |
+
"""Transcrit audio → segments (pas de re-load du modèle)."""
|
| 91 |
+
print("🎙️ Transcription Whisper (cached model)...")
|
| 92 |
+
result = WHISPER.transcribe(audio_path)
|
| 93 |
+
subtitles = [
|
| 94 |
+
{"start": s["start"], "end": s["end"], "text": s["text"]}
|
| 95 |
+
for s in result["segments"]
|
| 96 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
return subtitles
|
| 98 |
|
| 99 |
def format_subtitle_text(text, max_chars=50):
|
|
|
|
| 217 |
ffmpeg_params=["-pix_fmt", "yuv420p"]
|
| 218 |
)
|
| 219 |
|
| 220 |
+
print(f"✅ Vidéo Shorts/TikTok prête : {output_file}")
|
|
|
scripts/get_footage.py
CHANGED
|
@@ -23,7 +23,6 @@ from moviepy import (
|
|
| 23 |
CompositeVideoClip
|
| 24 |
)
|
| 25 |
|
| 26 |
-
FONT_PATH = "C:/Windows/Fonts/arialbd.ttf"
|
| 27 |
|
| 28 |
def add_pan_effect(clip):
|
| 29 |
"""
|
|
@@ -130,7 +129,7 @@ def apply_crossfade_effects(clips, duration=0.12):
|
|
| 130 |
|
| 131 |
def get_video_montage_from_folder(
|
| 132 |
folder_path: str = "./assets/videos",
|
| 133 |
-
audio_path: str =
|
| 134 |
output_dir: str = "./assets/backgrounds",
|
| 135 |
lum: float = 6.0,
|
| 136 |
contrast: float = 1.0,
|
|
@@ -138,103 +137,61 @@ def get_video_montage_from_folder(
|
|
| 138 |
show_progress_bar: bool = True,
|
| 139 |
):
|
| 140 |
"""
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
3) La durée totale est bornée à la durée de l'audio (on coupe le surplus).
|
| 145 |
-
4) Exporte deux versions : avec et sans audio.
|
| 146 |
"""
|
| 147 |
|
| 148 |
-
# Prépare les chemins de sortie
|
| 149 |
os.makedirs(output_dir, exist_ok=True)
|
| 150 |
-
|
| 151 |
-
output_no_audio = os.path.join(output_dir, "video_silent.mp4")
|
| 152 |
|
| 153 |
-
#
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
|
|
|
| 157 |
|
| 158 |
-
#
|
| 159 |
all_videos = [
|
| 160 |
f for f in os.listdir(folder_path)
|
| 161 |
if f.lower().endswith((".mp4", ".mov", ".avi", ".mkv"))
|
| 162 |
]
|
| 163 |
-
|
| 164 |
if not all_videos:
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
else:
|
| 188 |
-
# On adapte la largeur
|
| 189 |
-
clip = clip.resized(width=target_w)
|
| 190 |
-
# On coupe la hauteur
|
| 191 |
-
clip = clip.cropped(height=target_h, y_center=clip.h / 2)
|
| 192 |
-
|
| 193 |
-
# Applique l’effet dynamique
|
| 194 |
-
dynamic_clip = dynamic_effect(clip, lum, contrast, gamma)
|
| 195 |
-
clips.append(dynamic_clip)
|
| 196 |
-
total_duration += dynamic_clip.duration
|
| 197 |
-
|
| 198 |
-
# Si la somme dépasse la durée audio, on arrête la boucle
|
| 199 |
-
if total_duration >= audio_duration:
|
| 200 |
-
break
|
| 201 |
-
|
| 202 |
-
except Exception as e:
|
| 203 |
-
print(f"⚠️ Erreur avec le fichier {video_file} : {e}")
|
| 204 |
|
| 205 |
if not clips:
|
| 206 |
-
|
| 207 |
-
return None, None
|
| 208 |
|
| 209 |
-
# Crossfade entre les clips
|
| 210 |
clips = apply_crossfade_effects(clips, duration=0.15)
|
|
|
|
|
|
|
|
|
|
| 211 |
|
| 212 |
-
# Concaténation, borne la durée totale à celle de l'audio
|
| 213 |
-
final_clip = concatenate_videoclips(clips, method="compose").subclipped(0, audio_duration)
|
| 214 |
-
|
| 215 |
-
# Overlay (par ex. barre de progression)
|
| 216 |
if show_progress_bar:
|
| 217 |
final_clip = add_timer_overlay(final_clip)
|
| 218 |
|
| 219 |
-
#
|
| 220 |
-
# 1) Version AVEC audio
|
| 221 |
-
# --------------------
|
| 222 |
-
final_clip_with_audio = final_clip.with_audio(voiceover)
|
| 223 |
-
final_clip_with_audio.write_videofile(
|
| 224 |
-
output_with_audio,
|
| 225 |
-
codec='libx264',
|
| 226 |
-
audio_codec='aac',
|
| 227 |
-
fps=30,
|
| 228 |
-
threads=4,
|
| 229 |
-
preset="medium",
|
| 230 |
-
ffmpeg_params=["-pix_fmt", "yuv420p"]
|
| 231 |
-
)
|
| 232 |
-
print(f"✅ Montage créé (AVEC audio) : {output_with_audio}")
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
# --------------------
|
| 236 |
-
# 2) Version SANS audio
|
| 237 |
-
# --------------------
|
| 238 |
final_clip.write_videofile(
|
| 239 |
output_no_audio,
|
| 240 |
codec='libx264',
|
|
@@ -246,14 +203,11 @@ def get_video_montage_from_folder(
|
|
| 246 |
)
|
| 247 |
print(f"✅ Montage créé (SANS audio) : {output_no_audio}")
|
| 248 |
|
| 249 |
-
#
|
| 250 |
for c in clips:
|
| 251 |
c.close()
|
| 252 |
-
voiceover.close()
|
| 253 |
final_clip.close()
|
| 254 |
-
final_clip_with_audio.close()
|
| 255 |
-
|
| 256 |
|
| 257 |
-
return
|
| 258 |
|
| 259 |
|
|
|
|
| 23 |
CompositeVideoClip
|
| 24 |
)
|
| 25 |
|
|
|
|
| 26 |
|
| 27 |
def add_pan_effect(clip):
|
| 28 |
"""
|
|
|
|
| 129 |
|
| 130 |
def get_video_montage_from_folder(
|
| 131 |
folder_path: str = "./assets/videos",
|
| 132 |
+
audio_path: str | None = None, # ← devient optionnel
|
| 133 |
output_dir: str = "./assets/backgrounds",
|
| 134 |
lum: float = 6.0,
|
| 135 |
contrast: float = 1.0,
|
|
|
|
| 137 |
show_progress_bar: bool = True,
|
| 138 |
):
|
| 139 |
"""
|
| 140 |
+
Construit un montage vertical 1080×1920 SANS piste audio.
|
| 141 |
+
Si audio_path est fourni, on s’en sert seulement pour borner la durée.
|
| 142 |
+
Retourne (None, path_video_silent).
|
|
|
|
|
|
|
| 143 |
"""
|
| 144 |
|
|
|
|
| 145 |
os.makedirs(output_dir, exist_ok=True)
|
| 146 |
+
output_no_audio = os.path.join(output_dir, "video_silent.mp4")
|
|
|
|
| 147 |
|
| 148 |
+
# Durée cible = longueur voice-over (facultatif)
|
| 149 |
+
audio_duration = None
|
| 150 |
+
if audio_path and os.path.isfile(audio_path):
|
| 151 |
+
audio_duration = AudioFileClip(audio_path).duration
|
| 152 |
+
print(f"🎧 Target duration (voice-over) : {audio_duration:.2f} s")
|
| 153 |
|
| 154 |
+
# Récupère toutes les vidéos du dossier
|
| 155 |
all_videos = [
|
| 156 |
f for f in os.listdir(folder_path)
|
| 157 |
if f.lower().endswith((".mp4", ".mov", ".avi", ".mkv"))
|
| 158 |
]
|
|
|
|
| 159 |
if not all_videos:
|
| 160 |
+
raise RuntimeError(f"❌ No videos found in {folder_path}")
|
| 161 |
+
|
| 162 |
+
clips, total_duration = [], 0.0
|
| 163 |
+
|
| 164 |
+
for video_file in all_videos:
|
| 165 |
+
clip = VideoFileClip(os.path.join(folder_path, video_file))
|
| 166 |
+
|
| 167 |
+
# Resize/crop en 1080×1920
|
| 168 |
+
target_w, target_h = 1080, 1920
|
| 169 |
+
clip_ar, target_ar = clip.w / clip.h, target_w / target_h
|
| 170 |
+
if clip_ar > target_ar:
|
| 171 |
+
clip = clip.resized(height=target_h).cropped(width=target_w, x_center=clip.w/2)
|
| 172 |
+
else:
|
| 173 |
+
clip = clip.resized(width=target_w).cropped(height=target_h, y_center=clip.h/2)
|
| 174 |
+
|
| 175 |
+
# Effets dynamiques
|
| 176 |
+
dynamic_clip = dynamic_effect(clip, lum, contrast, gamma)
|
| 177 |
+
clips.append(dynamic_clip)
|
| 178 |
+
total_duration += dynamic_clip.duration
|
| 179 |
+
|
| 180 |
+
if audio_duration and total_duration >= audio_duration:
|
| 181 |
+
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 182 |
|
| 183 |
if not clips:
|
| 184 |
+
raise RuntimeError("❌ Montage impossible (no valid clips).")
|
|
|
|
| 185 |
|
|
|
|
| 186 |
clips = apply_crossfade_effects(clips, duration=0.15)
|
| 187 |
+
final_clip = concatenate_videoclips(clips, method="compose")
|
| 188 |
+
if audio_duration:
|
| 189 |
+
final_clip = final_clip.subclipped(0, audio_duration)
|
| 190 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
if show_progress_bar:
|
| 192 |
final_clip = add_timer_overlay(final_clip)
|
| 193 |
|
| 194 |
+
# Export silencieux
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
final_clip.write_videofile(
|
| 196 |
output_no_audio,
|
| 197 |
codec='libx264',
|
|
|
|
| 203 |
)
|
| 204 |
print(f"✅ Montage créé (SANS audio) : {output_no_audio}")
|
| 205 |
|
| 206 |
+
# Clean
|
| 207 |
for c in clips:
|
| 208 |
c.close()
|
|
|
|
| 209 |
final_clip.close()
|
|
|
|
|
|
|
| 210 |
|
| 211 |
+
return None, output_no_audio
|
| 212 |
|
| 213 |
|