wip
Browse filesfeat: Add dynamic image backgrounds and spectrogram opacity
This commit introduces a major feature allowing users to add image backgrounds to the generated spectrogram video. It also enhances the visualizer by making it semi-transparent when a background is present.
app.py
CHANGED
@@ -9,7 +9,7 @@ import subprocess
|
|
9 |
import matplotlib.font_manager as fm
|
10 |
from typing import Tuple, List, Dict
|
11 |
from mutagen.flac import FLAC
|
12 |
-
from moviepy import CompositeVideoClip, TextClip, VideoClip, AudioFileClip
|
13 |
|
14 |
# --- Font Scanning and Management ---
|
15 |
def get_font_display_name(font_path: str) -> Tuple[str, str]:
|
@@ -128,7 +128,7 @@ SYSTEM_FONTS_MAP, FONT_DISPLAY_NAMES = get_font_data()
|
|
128 |
print(f"Scan complete. Found {len(FONT_DISPLAY_NAMES)} available fonts.")
|
129 |
|
130 |
|
131 |
-
# --- CUE Sheet Parsing Logic
|
132 |
def cue_time_to_seconds(time_str: str) -> float:
|
133 |
try:
|
134 |
minutes, seconds, frames = map(int, time_str.split(':'))
|
@@ -160,7 +160,7 @@ def parse_cue_sheet_manually(cue_data: str) -> List[Dict[str, any]]:
|
|
160 |
return tracks
|
161 |
|
162 |
|
163 |
-
# ---
|
164 |
def increase_video_framerate(input_path: str, output_path: str, target_fps: int = 24):
|
165 |
"""
|
166 |
Uses FFmpeg to increase the video's framerate without re-encoding.
|
@@ -203,7 +203,8 @@ def increase_video_framerate(input_path: str, output_path: str, target_fps: int
|
|
203 |
|
204 |
# --- Main Processing Function ---
|
205 |
def process_audio_to_video(
|
206 |
-
audio_path: str,
|
|
|
207 |
font_name: str, font_size: int, font_color: str,
|
208 |
font_bg_color: str, font_bg_alpha: float,
|
209 |
pos_h: str, pos_v: str
|
@@ -252,17 +253,74 @@ def process_audio_to_video(
|
|
252 |
y, sr = librosa.load(audio_path, sr=None, mono=True)
|
253 |
duration = librosa.get_duration(y=y, sr=sr)
|
254 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
255 |
# Spectrogram calculation
|
256 |
N_FFT, HOP_LENGTH, N_BANDS = 2048, 512, 32
|
257 |
MIN_DB, MAX_DB = -80.0, 0.0
|
258 |
S_mel = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=N_FFT, hop_length=HOP_LENGTH, n_mels=N_BANDS, fmax=sr/2)
|
259 |
S_mel_db = librosa.power_to_db(S_mel, ref=np.max)
|
260 |
|
261 |
-
# Frame generation logic
|
262 |
def frame_generator(t):
|
263 |
-
|
264 |
-
|
265 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
time_idx = int((t / duration) * (S_mel_db.shape[1] - 1))
|
267 |
bar_width = WIDTH / N_BANDS
|
268 |
for i in range(N_BANDS):
|
@@ -279,17 +337,24 @@ def process_audio_to_video(
|
|
279 |
return frame
|
280 |
|
281 |
video_clip = VideoClip(frame_function=frame_generator, duration=duration)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
282 |
audio_clip = AudioFileClip(audio_path)
|
283 |
|
284 |
# CUE Sheet title overlay logic
|
285 |
text_clips = []
|
286 |
-
tracks = []
|
287 |
if audio_path.lower().endswith('.flac'):
|
288 |
try:
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
|
|
293 |
|
294 |
if tracks:
|
295 |
font_path = SYSTEM_FONTS_MAP.get(font_name)
|
@@ -310,13 +375,17 @@ def process_audio_to_video(
|
|
310 |
if text_duration <= 0: continue
|
311 |
|
312 |
# Note: TextClip's `color` argument can handle color names like 'white' directly
|
313 |
-
txt_clip = (TextClip(text=f"{i+1}. {title}", font_size=font_size, color=font_color, font=font_path, bg_color=font_bg_rgba)
|
314 |
.with_position(position)
|
315 |
.with_duration(text_duration)
|
316 |
.with_start(start_time))
|
317 |
text_clips.append(txt_clip)
|
318 |
|
319 |
-
|
|
|
|
|
|
|
|
|
320 |
|
321 |
# Step 1: Render the slow, 1 FPS intermediate file
|
322 |
print(f"Step 1/2: Rendering base video at {RENDER_FPS} FPS...")
|
@@ -361,19 +430,37 @@ with gr.Blocks(title="Spectrogram Video Generator") as iface:
|
|
361 |
with gr.Column(scale=1):
|
362 |
audio_input = gr.Audio(type="filepath", label="Upload Audio File")
|
363 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
364 |
with gr.Accordion("Visualizer Options", open=True):
|
365 |
fg_color = gr.ColorPicker(value="#71808c", label="Spectrogram Bar Top Color")
|
366 |
-
bg_color = gr.ColorPicker(value="#2C3E50", label="Background Color")
|
367 |
|
368 |
with gr.Accordion("Text Overlay Options", open=True):
|
369 |
-
|
370 |
-
# --- CORE CORRECTION: Add clarification text ---
|
371 |
gr.Markdown(
|
372 |
"**Note:** These options only take effect if the input audio file has an embedded CUE sheet."
|
373 |
)
|
374 |
-
gr.Markdown("---")
|
375 |
-
# --- CORRECTION END ---
|
376 |
-
|
377 |
gr.Markdown("If your CUE sheet contains non-English characters, please select a compatible font.")
|
378 |
default_font = "Microsoft JhengHei" if "Microsoft JhengHei" in FONT_DISPLAY_NAMES else ("Arial" if "Arial" in FONT_DISPLAY_NAMES else (FONT_DISPLAY_NAMES[0] if FONT_DISPLAY_NAMES else None))
|
379 |
font_name_dd = gr.Dropdown(choices=FONT_DISPLAY_NAMES, value=default_font, label="Font Family")
|
@@ -396,10 +483,12 @@ with gr.Blocks(title="Spectrogram Video Generator") as iface:
|
|
396 |
with gr.Column(scale=2):
|
397 |
video_output = gr.Video(label="Generated Video")
|
398 |
|
|
|
399 |
submit_btn.click(
|
400 |
fn=process_audio_to_video,
|
401 |
inputs=[
|
402 |
-
audio_input,
|
|
|
403 |
font_name_dd, font_size_slider, font_color_picker,
|
404 |
font_bg_color_picker, font_bg_alpha_slider,
|
405 |
pos_h_radio, pos_v_radio
|
|
|
9 |
import matplotlib.font_manager as fm
|
10 |
from typing import Tuple, List, Dict
|
11 |
from mutagen.flac import FLAC
|
12 |
+
from moviepy import CompositeVideoClip, TextClip, VideoClip, AudioFileClip, ImageClip
|
13 |
|
14 |
# --- Font Scanning and Management ---
|
15 |
def get_font_display_name(font_path: str) -> Tuple[str, str]:
|
|
|
128 |
print(f"Scan complete. Found {len(FONT_DISPLAY_NAMES)} available fonts.")
|
129 |
|
130 |
|
131 |
+
# --- CUE Sheet Parsing Logic ---
|
132 |
def cue_time_to_seconds(time_str: str) -> float:
|
133 |
try:
|
134 |
minutes, seconds, frames = map(int, time_str.split(':'))
|
|
|
160 |
return tracks
|
161 |
|
162 |
|
163 |
+
# --- FFmpeg Framerate Conversion ---
|
164 |
def increase_video_framerate(input_path: str, output_path: str, target_fps: int = 24):
|
165 |
"""
|
166 |
Uses FFmpeg to increase the video's framerate without re-encoding.
|
|
|
203 |
|
204 |
# --- Main Processing Function ---
|
205 |
def process_audio_to_video(
|
206 |
+
audio_path: str, image_paths: List[str],
|
207 |
+
spec_fg_color: str, spec_bg_color: str,
|
208 |
font_name: str, font_size: int, font_color: str,
|
209 |
font_bg_color: str, font_bg_alpha: float,
|
210 |
pos_h: str, pos_v: str
|
|
|
253 |
y, sr = librosa.load(audio_path, sr=None, mono=True)
|
254 |
duration = librosa.get_duration(y=y, sr=sr)
|
255 |
|
256 |
+
# --- Image Processing Logic ---
|
257 |
+
image_clips = []
|
258 |
+
# Check if any images were uploaded.
|
259 |
+
if image_paths and len(image_paths) > 0:
|
260 |
+
print(f"Found {len(image_paths)} images to process.")
|
261 |
+
|
262 |
+
# First, try to parse the CUE sheet from the audio file.
|
263 |
+
tracks = []
|
264 |
+
if audio_path.lower().endswith('.flac'):
|
265 |
+
try:
|
266 |
+
audio_meta = FLAC(audio_path)
|
267 |
+
if 'cuesheet' in audio_meta.tags:
|
268 |
+
tracks = parse_cue_sheet_manually(audio_meta.tags['cuesheet'][0])
|
269 |
+
print(f"Successfully parsed {len(tracks)} tracks from CUE sheet.")
|
270 |
+
except Exception as e:
|
271 |
+
print(f"Warning: Could not read or parse CUE sheet: {e}")
|
272 |
+
|
273 |
+
# Mode 1: If CUE tracks match the number of images, align them.
|
274 |
+
if tracks and len(tracks) == len(image_paths):
|
275 |
+
print("Image count matches track count. Aligning images with tracks.")
|
276 |
+
for i, (track, img_path) in enumerate(zip(tracks, image_paths)):
|
277 |
+
start_time = track.get('start_time', 0)
|
278 |
+
# The end time of a track is the start time of the next, or the total duration for the last track.
|
279 |
+
end_time = tracks[i+1].get('start_time', duration) if i + 1 < len(tracks) else duration
|
280 |
+
img_duration = end_time - start_time
|
281 |
+
if img_duration <= 0: continue
|
282 |
+
|
283 |
+
# Create an ImageClip for the duration of the track.
|
284 |
+
clip = (ImageClip(img_path)
|
285 |
+
.set_duration(img_duration)
|
286 |
+
.set_start(start_time)
|
287 |
+
.resize(width=WIDTH, height=HEIGHT)) # Resize to fit video dimensions
|
288 |
+
image_clips.append(clip)
|
289 |
+
|
290 |
+
# Mode 2: If no CUE or mismatch, distribute images evenly across the audio duration.
|
291 |
+
else:
|
292 |
+
if tracks: print("Image count does not match track count. Distributing images evenly.")
|
293 |
+
else: print("No CUE sheet found. Distributing images evenly.")
|
294 |
+
|
295 |
+
img_duration = duration / len(image_paths)
|
296 |
+
for i, img_path in enumerate(image_paths):
|
297 |
+
start_time = i * img_duration
|
298 |
+
# Create an ImageClip for a calculated segment of time.
|
299 |
+
clip = (ImageClip(img_path)
|
300 |
+
.set_duration(img_duration)
|
301 |
+
.set_start(start_time)
|
302 |
+
.resize(width=WIDTH, height=HEIGHT)) # Resize to fit video dimensions
|
303 |
+
image_clips.append(clip)
|
304 |
+
|
305 |
# Spectrogram calculation
|
306 |
N_FFT, HOP_LENGTH, N_BANDS = 2048, 512, 32
|
307 |
MIN_DB, MAX_DB = -80.0, 0.0
|
308 |
S_mel = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=N_FFT, hop_length=HOP_LENGTH, n_mels=N_BANDS, fmax=sr/2)
|
309 |
S_mel_db = librosa.power_to_db(S_mel, ref=np.max)
|
310 |
|
311 |
+
# Frame generation logic for the spectrogram
|
312 |
def frame_generator(t):
|
313 |
+
# If images are used as background, the spectrogram's own background should be transparent.
|
314 |
+
# Otherwise, use the selected background color.
|
315 |
+
# Here, we will use a simple opacity setting on the final clip, so we always generate the frame.
|
316 |
+
frame_bg = bg_rgb if not image_clips else (0,0,0) # Use black if it will be made transparent later
|
317 |
+
frame = np.full((HEIGHT, WIDTH, 3), frame_bg, dtype=np.uint8)
|
318 |
+
|
319 |
+
# Draw the grid lines only if no images are being used.
|
320 |
+
if not image_clips:
|
321 |
+
for i in range(1, 9):
|
322 |
+
y_pos = int(i * (HEIGHT / 9)); frame[y_pos-1:y_pos, :] = grid_rgb
|
323 |
+
|
324 |
time_idx = int((t / duration) * (S_mel_db.shape[1] - 1))
|
325 |
bar_width = WIDTH / N_BANDS
|
326 |
for i in range(N_BANDS):
|
|
|
337 |
return frame
|
338 |
|
339 |
video_clip = VideoClip(frame_function=frame_generator, duration=duration)
|
340 |
+
|
341 |
+
# --- NEW: Set Spectrogram Opacity ---
|
342 |
+
# If image clips were created, make the spectrogram layer 50% transparent.
|
343 |
+
if image_clips:
|
344 |
+
print("Applying 50% opacity to spectrogram layer.")
|
345 |
+
video_clip = video_clip.set_opacity(0.5)
|
346 |
+
|
347 |
audio_clip = AudioFileClip(audio_path)
|
348 |
|
349 |
# CUE Sheet title overlay logic
|
350 |
text_clips = []
|
|
|
351 |
if audio_path.lower().endswith('.flac'):
|
352 |
try:
|
353 |
+
audio_meta = FLAC(audio_path)
|
354 |
+
if 'cuesheet' in audio_meta.tags:
|
355 |
+
tracks = parse_cue_sheet_manually(audio_meta.tags['cuesheet'][0])
|
356 |
+
except Exception:
|
357 |
+
pass # Already handled above
|
358 |
|
359 |
if tracks:
|
360 |
font_path = SYSTEM_FONTS_MAP.get(font_name)
|
|
|
375 |
if text_duration <= 0: continue
|
376 |
|
377 |
# Note: TextClip's `color` argument can handle color names like 'white' directly
|
378 |
+
txt_clip = (TextClip(text=f"{i+1}. {title}", font_size=font_size, color=font_color, font=font_path, bg_color=f'rgba({font_bg_rgba[0]}, {font_bg_rgba[1]}, {font_bg_rgba[2]}, {font_bg_alpha})')
|
379 |
.with_position(position)
|
380 |
.with_duration(text_duration)
|
381 |
.with_start(start_time))
|
382 |
text_clips.append(txt_clip)
|
383 |
|
384 |
+
# --- Clip Composition ---
|
385 |
+
# The final composition order is important: images at the bottom, then spectrogram, then text.
|
386 |
+
# The base layer is now the list of image clips.
|
387 |
+
final_layers = image_clips + [video_clip] + text_clips
|
388 |
+
final_clip = CompositeVideoClip(final_layers).with_audio(audio_clip)
|
389 |
|
390 |
# Step 1: Render the slow, 1 FPS intermediate file
|
391 |
print(f"Step 1/2: Rendering base video at {RENDER_FPS} FPS...")
|
|
|
430 |
with gr.Column(scale=1):
|
431 |
audio_input = gr.Audio(type="filepath", label="Upload Audio File")
|
432 |
|
433 |
+
# --- Image Upload Component ---
|
434 |
+
gr.Markdown(
|
435 |
+
"""
|
436 |
+
### Background Image Options (Optional)
|
437 |
+
|
438 |
+
Upload one or more images to create a dynamic background for the video. The display behavior changes based on your audio file and the number of images provided.
|
439 |
+
|
440 |
+
* **Mode 1: CUE Sheet Synchronization**
|
441 |
+
If your audio file contains an embedded CUE sheet AND the number of images you upload **exactly matches** the number of tracks, the images will be synchronized with the tracks. The first image will appear during the first track, the second during the second, and so on.
|
442 |
+
|
443 |
+
* **Mode 2: Even Time Distribution**
|
444 |
+
In all other cases (e.g., the audio has no CUE sheet, or the number of images and tracks do not match), the images will be displayed sequentially. The total duration of the video will be divided equally among all uploaded images.
|
445 |
+
|
446 |
+
**Note:** When any image is used as a background, the spectrogram visualizer will automatically become **semi-transparent** to ensure the background is clearly visible.
|
447 |
+
"""
|
448 |
+
)
|
449 |
+
image_uploads = gr.File(
|
450 |
+
label="Upload Background Images",
|
451 |
+
file_count="multiple", # Allow multiple files
|
452 |
+
file_types=["image"] # Accept only image formats
|
453 |
+
)
|
454 |
+
|
455 |
with gr.Accordion("Visualizer Options", open=True):
|
456 |
fg_color = gr.ColorPicker(value="#71808c", label="Spectrogram Bar Top Color")
|
457 |
+
bg_color = gr.ColorPicker(value="#2C3E50", label="Background Color (if no images)")
|
458 |
|
459 |
with gr.Accordion("Text Overlay Options", open=True):
|
|
|
|
|
460 |
gr.Markdown(
|
461 |
"**Note:** These options only take effect if the input audio file has an embedded CUE sheet."
|
462 |
)
|
463 |
+
gr.Markdown("---")
|
|
|
|
|
464 |
gr.Markdown("If your CUE sheet contains non-English characters, please select a compatible font.")
|
465 |
default_font = "Microsoft JhengHei" if "Microsoft JhengHei" in FONT_DISPLAY_NAMES else ("Arial" if "Arial" in FONT_DISPLAY_NAMES else (FONT_DISPLAY_NAMES[0] if FONT_DISPLAY_NAMES else None))
|
466 |
font_name_dd = gr.Dropdown(choices=FONT_DISPLAY_NAMES, value=default_font, label="Font Family")
|
|
|
483 |
with gr.Column(scale=2):
|
484 |
video_output = gr.Video(label="Generated Video")
|
485 |
|
486 |
+
# --- Add image_uploads to the inputs list ---
|
487 |
submit_btn.click(
|
488 |
fn=process_audio_to_video,
|
489 |
inputs=[
|
490 |
+
audio_input, image_uploads,
|
491 |
+
fg_color, bg_color,
|
492 |
font_name_dd, font_size_slider, font_color_picker,
|
493 |
font_bg_color_picker, font_bg_alpha_slider,
|
494 |
pos_h_radio, pos_v_radio
|