avans06's picture
wip
4e47df7
import gradio as gr
import librosa
import numpy as np
import re
import os
import time
import struct
import subprocess
import matplotlib.font_manager as fm
from typing import Tuple, List, Dict
from mutagen.flac import FLAC
from moviepy import CompositeVideoClip, TextClip, VideoClip, AudioFileClip, ImageClip
# --- Font Scanning and Management ---
def get_font_display_name(font_path: str) -> Tuple[str, str]:
"""
A robust TTF/TTC parser based on the user's final design.
It reads the 'name' table to find the localized "Full Font Name" (nameID=4).
Returns a tuple of (display_name, language_tag {'zh'/'ja'/'ko'/'en'/'other'}).
"""
def decode_name_string(name_bytes: bytes, platform_id: int, encoding_id: int) -> str:
"""Decodes the name string based on platform and encoding IDs."""
try:
if platform_id == 3 and encoding_id in [1, 10]: # Windows, Unicode
return name_bytes.decode('utf_16_be').strip('\x00')
elif platform_id == 1 and encoding_id == 0: # Macintosh, Roman
return name_bytes.decode('mac_roman').strip('\x00')
elif platform_id == 0: # Unicode
return name_bytes.decode('utf_16_be').strip('\x00')
else: # Fallback
return name_bytes.decode('utf_8', errors='ignore').strip('\x00')
except Exception:
return None
try:
with open(font_path, 'rb') as f: data = f.read()
def read_ushort(offset): return struct.unpack('>H', data[offset:offset+2])[0]
def read_ulong(offset): return struct.unpack('>I', data[offset:offset+4])[0]
font_offsets = [0]
# Check for TTC (TrueType Collection) header
if data[:4] == b'ttcf':
num_fonts = read_ulong(8)
font_offsets = [read_ulong(12 + i * 4) for i in range(num_fonts)]
# For simplicity, we only parse the first font in a TTC
font_offset = font_offsets[0]
num_tables = read_ushort(font_offset + 4)
name_table_offset = -1
# Locate the 'name' table
for i in range(num_tables):
entry_offset = font_offset + 12 + i * 16
tag = data[entry_offset:entry_offset+4]
if tag == b'name':
name_table_offset = read_ulong(entry_offset + 8); break
if name_table_offset == -1: return None, None
count, string_offset = read_ushort(name_table_offset + 2), read_ushort(name_table_offset + 4)
name_candidates = {}
# Iterate through all name records
for i in range(count):
rec_offset = name_table_offset + 6 + i * 12
platform_id, encoding_id, language_id, name_id, length, offset = struct.unpack('>HHHHHH', data[rec_offset:rec_offset+12])
if name_id == 4: # We only care about the "Full Font Name"
string_pos = name_table_offset + string_offset + offset
value = decode_name_string(data[string_pos : string_pos + length], platform_id, encoding_id)
if value:
# Store candidates based on language ID
if language_id in [1028, 2052, 3076, 4100, 5124]: name_candidates["zh"] = value # Chinese
elif language_id == 1041: name_candidates["ja"] = value # Japanese
elif language_id == 1042: name_candidates["ko"] = value # Korean
elif language_id in [1033, 0]: name_candidates["en"] = value # English
else:
if "other" not in name_candidates: name_candidates["other"] = value
# Return the best candidate based on language priority
if name_candidates.get("zh"): return name_candidates.get("zh"), "zh"
if name_candidates.get("ja"): return name_candidates.get("ja"), "ja"
if name_candidates.get("ko"): return name_candidates.get("ko"), "ko"
if name_candidates.get("other"): return name_candidates.get("other"), "other"
if name_candidates.get("en"): return name_candidates.get("en"), "en"
return None, None
except Exception:
return None, None
def get_font_data() -> Tuple[Dict[str, str], List[str]]:
"""
Scans system fonts, parses their display names, and returns a sorted list
with a corresponding name-to-path map.
"""
font_map = {}
found_names = [] # Stores (display_name, is_fallback, lang_tag)
# Scan for both .ttf and .ttc files
ttf_files = fm.findSystemFonts(fontpaths=None, fontext='ttf')
ttc_files = fm.findSystemFonts(fontpaths=None, fontext='ttc')
all_font_files = list(set(ttf_files + ttc_files))
for path in all_font_files:
display_name, lang_tag = get_font_display_name(path)
is_fallback = display_name is None
if is_fallback:
# Create a fallback name from the filename
display_name = os.path.splitext(os.path.basename(path))[0].replace('-', ' ').replace('_', ' ').title()
lang_tag = 'fallback'
if display_name and display_name not in font_map:
font_map[display_name] = path
found_names.append((display_name, is_fallback, lang_tag))
# Define sort priority for languages
sort_order = {'zh': 0, 'ja': 1, 'ko': 2, 'en': 3, 'other': 4, 'fallback': 5}
# Sort by priority, then alphabetically
found_names.sort(key=lambda x: (sort_order.get(x[2], 99), x[0]))
sorted_display_names = [name for name, _, _ in found_names]
return font_map, sorted_display_names
print("Scanning system fonts and parsing names...")
SYSTEM_FONTS_MAP, FONT_DISPLAY_NAMES = get_font_data()
print(f"Scan complete. Found {len(FONT_DISPLAY_NAMES)} available fonts.")
# --- CUE Sheet Parsing Logic ---
def cue_time_to_seconds(time_str: str) -> float:
try:
minutes, seconds, frames = map(int, time_str.split(':'))
return minutes * 60 + seconds + frames / 75.0
except ValueError:
return 0.0
def parse_cue_sheet_manually(cue_data: str) -> List[Dict[str, any]]:
tracks = []
current_track_info = None
for line in cue_data.splitlines():
line = line.strip()
if line.upper().startswith('TRACK'):
if current_track_info and 'title' in current_track_info and 'start_time' in current_track_info:
tracks.append(current_track_info)
current_track_info = {}
continue
if current_track_info is not None:
title_match = re.search(r'TITLE\s+"(.*?)"', line, re.IGNORECASE)
if title_match:
current_track_info['title'] = title_match.group(1)
continue
index_match = re.search(r'INDEX\s+01\s+(\d{2}:\d{2}:\d{2})', line, re.IGNORECASE)
if index_match:
current_track_info['start_time'] = cue_time_to_seconds(index_match.group(1))
continue
if current_track_info and 'title' in current_track_info and 'start_time' in current_track_info:
tracks.append(current_track_info)
return tracks
# --- FFmpeg Framerate Conversion ---
def increase_video_framerate(input_path: str, output_path: str, target_fps: int = 24):
"""
Uses FFmpeg to increase the video's framerate without re-encoding.
This is extremely fast as it only copies streams and changes metadata.
Args:
input_path (str): Path to the low-framerate video file.
output_path (str): Path for the final, high-framerate video file.
target_fps (int): The desired output framerate.
"""
print(f"Increasing framerate of '{input_path}' to {target_fps} FPS...")
# Construct the FFmpeg command based on the user's specification
command = [
'ffmpeg',
'-y', # Overwrite output file if exists
'-i', input_path,
'-map', '0', # Map all streams (video, audio, subtitles)
'-vf', 'fps=24', # Use fps filter to convert framerate to 24
'-c:v', 'libx264', # Re-encode video with H.264 codec
'-preset', 'fast', # Encoding speed/quality tradeoff
'-crf', '18', # Quality (lower is better)
'-c:a', 'copy', # Copy audio without re-encoding
output_path
]
try:
# Execute the command
# Using capture_output to hide ffmpeg logs from the main console unless an error occurs
result = subprocess.run(command, check=True, capture_output=True, text=True)
print("Framerate increase successful.")
except FileNotFoundError:
# This error occurs if FFmpeg is not installed or not in the system's PATH
raise gr.Error("FFmpeg not found. Please ensure FFmpeg is installed and accessible in your system's PATH.")
except subprocess.CalledProcessError as e:
# This error occurs if FFmpeg returns a non-zero exit code
print("FFmpeg error output:\n", e.stderr)
raise gr.Error(f"FFmpeg failed to increase the framerate. See console for details. Error: {e.stderr}")
# --- Main Processing Function ---
def process_audio_to_video(
audio_path: str, image_paths: List[str],
spec_fg_color: str, spec_bg_color: str,
font_name: str, font_size: int, font_color: str,
font_bg_color: str, font_bg_alpha: float,
pos_h: str, pos_v: str
) -> str:
if not audio_path: raise gr.Error("Please upload an audio file first.")
if not font_name: raise gr.Error("Please select a font from the list.")
# Define paths for temporary and final files
timestamp = int(time.time())
temp_fps1_path = f"temp_{timestamp}_fps1.mp4"
final_output_path = f"final_video_{timestamp}_fps24.mp4"
WIDTH, HEIGHT, RENDER_FPS = 1280, 720, 1 # Render at 1 FPS
PLAYBACK_FPS = 24 # Final playback framerate
# --- A robust color parser for hex and rgb() strings ---
def parse_color_to_rgb(color_str: str) -> Tuple[int, int, int]:
"""
Parses a color string which can be in hex format (#RRGGBB) or
rgb format (e.g., "rgb(255, 128, 0)").
Returns a tuple of (R, G, B).
"""
color_str = color_str.strip()
if color_str.startswith('#'):
# Handle hex format
hex_val = color_str.lstrip('#')
if len(hex_val) == 3: # Handle shorthand hex like #FFF
hex_val = "".join([c*2 for c in hex_val])
return tuple(int(hex_val[i:i+2], 16) for i in (0, 2, 4))
elif color_str.startswith('rgb'):
# Handle rgb format
try:
numbers = re.findall(r'\d+', color_str)
return tuple(int(n) for n in numbers[:3])
except (ValueError, IndexError):
raise ValueError(f"Could not parse rgb color string: {color_str}")
else:
raise ValueError(f"Unknown color format: {color_str}")
# Use the new robust parser for all color inputs
fg_rgb, bg_rgb = parse_color_to_rgb(spec_fg_color), parse_color_to_rgb(spec_bg_color)
grid_rgb = tuple(min(c + 40, 255) for c in bg_rgb)
# Wrap the entire process in a try...finally block to ensure cleanup
try:
y, sr = librosa.load(audio_path, sr=None, mono=True)
duration = librosa.get_duration(y=y, sr=sr)
# --- Image Processing Logic ---
image_clips = []
# Check if any images were uploaded.
if image_paths and len(image_paths) > 0:
print(f"Found {len(image_paths)} images to process.")
# First, try to parse the CUE sheet from the audio file.
tracks = []
if audio_path.lower().endswith('.flac'):
try:
audio_meta = FLAC(audio_path)
if 'cuesheet' in audio_meta.tags:
tracks = parse_cue_sheet_manually(audio_meta.tags['cuesheet'][0])
print(f"Successfully parsed {len(tracks)} tracks from CUE sheet.")
except Exception as e:
print(f"Warning: Could not read or parse CUE sheet: {e}")
# Mode 1: If CUE tracks match the number of images, align them.
if tracks and len(tracks) == len(image_paths):
print("Image count matches track count. Aligning images with tracks.")
for i, (track, img_path) in enumerate(zip(tracks, image_paths)):
start_time = track.get('start_time', 0)
# The end time of a track is the start time of the next, or the total duration for the last track.
end_time = tracks[i+1].get('start_time', duration) if i + 1 < len(tracks) else duration
img_duration = end_time - start_time
if img_duration <= 0: continue
# Create an ImageClip for the duration of the track.
clip = (ImageClip(img_path)
.set_duration(img_duration)
.set_start(start_time)
.resize(width=WIDTH, height=HEIGHT)) # Resize to fit video dimensions
image_clips.append(clip)
# Mode 2: If no CUE or mismatch, distribute images evenly across the audio duration.
else:
if tracks: print("Image count does not match track count. Distributing images evenly.")
else: print("No CUE sheet found. Distributing images evenly.")
img_duration = duration / len(image_paths)
for i, img_path in enumerate(image_paths):
start_time = i * img_duration
# Create an ImageClip for a calculated segment of time.
clip = (ImageClip(img_path)
.set_duration(img_duration)
.set_start(start_time)
.resize(width=WIDTH, height=HEIGHT)) # Resize to fit video dimensions
image_clips.append(clip)
# Spectrogram calculation
N_FFT, HOP_LENGTH, N_BANDS = 2048, 512, 32
MIN_DB, MAX_DB = -80.0, 0.0
S_mel = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=N_FFT, hop_length=HOP_LENGTH, n_mels=N_BANDS, fmax=sr/2)
S_mel_db = librosa.power_to_db(S_mel, ref=np.max)
# Frame generation logic for the spectrogram
def frame_generator(t):
# If images are used as background, the spectrogram's own background should be transparent.
# Otherwise, use the selected background color.
# Here, we will use a simple opacity setting on the final clip, so we always generate the frame.
frame_bg = bg_rgb if not image_clips else (0,0,0) # Use black if it will be made transparent later
frame = np.full((HEIGHT, WIDTH, 3), frame_bg, dtype=np.uint8)
# Draw the grid lines only if no images are being used.
if not image_clips:
for i in range(1, 9):
y_pos = int(i * (HEIGHT / 9)); frame[y_pos-1:y_pos, :] = grid_rgb
time_idx = int((t / duration) * (S_mel_db.shape[1] - 1))
bar_width = WIDTH / N_BANDS
for i in range(N_BANDS):
energy_db = S_mel_db[i, time_idx]
norm_height = np.clip((energy_db - MIN_DB) / (MAX_DB - MIN_DB), 0, 1)
bar_height = int(norm_height * HEIGHT)
if bar_height < 1: continue
x_start, x_end = int(i * bar_width), int((i + 1) * bar_width - 2)
y_start = HEIGHT - bar_height
for k in range(bar_height):
y_pos, ratio = y_start + k, k / bar_height
r, g, b = (int(c1 * (1-ratio) + c2 * ratio) for c1, c2 in zip(fg_rgb, bg_rgb))
frame[y_pos, x_start:x_end] = (r, g, b)
return frame
video_clip = VideoClip(frame_function=frame_generator, duration=duration)
# --- NEW: Set Spectrogram Opacity ---
# If image clips were created, make the spectrogram layer 50% transparent.
if image_clips:
print("Applying 50% opacity to spectrogram layer.")
video_clip = video_clip.set_opacity(0.5)
audio_clip = AudioFileClip(audio_path)
# CUE Sheet title overlay logic
text_clips = []
if audio_path.lower().endswith('.flac'):
try:
audio_meta = FLAC(audio_path)
if 'cuesheet' in audio_meta.tags:
tracks = parse_cue_sheet_manually(audio_meta.tags['cuesheet'][0])
except Exception:
pass # Already handled above
if tracks:
font_path = SYSTEM_FONTS_MAP.get(font_name)
if not font_path: raise gr.Error(f"Font path for '{font_name}' not found!")
# Use the robust parser for text colors as well
font_bg_rgb = parse_color_to_rgb(font_bg_color)
font_bg_rgba = (*font_bg_rgb, int(font_bg_alpha * 255))
position = (pos_h.lower(), pos_v.lower())
print(f"Using font: {font_name}, Size: {font_size}, Position: {position}")
for i, track in enumerate(tracks):
start_time = track.get('start_time', 0)
title, end_time = track.get('title', 'Unknown Track'), tracks[i+1].get('start_time', duration) if i + 1 < len(tracks) else duration
text_duration = end_time - start_time
if text_duration <= 0: continue
# Note: TextClip's `color` argument can handle color names like 'white' directly
txt_clip = (TextClip(text=f"{i+1}. {title}", font_size=font_size, color=font_color, font=font_path, bg_color=f'rgba({font_bg_rgba[0]}, {font_bg_rgba[1]}, {font_bg_rgba[2]}, {font_bg_alpha})')
.with_position(position)
.with_duration(text_duration)
.with_start(start_time))
text_clips.append(txt_clip)
# --- Clip Composition ---
# The final composition order is important: images at the bottom, then spectrogram, then text.
# The base layer is now the list of image clips.
final_layers = image_clips + [video_clip] + text_clips
final_clip = CompositeVideoClip(final_layers).with_audio(audio_clip)
# Step 1: Render the slow, 1 FPS intermediate file
print(f"Step 1/2: Rendering base video at {RENDER_FPS} FPS...")
try:
# Attempt to copy audio stream directly
print("Attempting to copy audio stream directly...")
final_clip.write_videofile(
temp_fps1_path, codec="libx264", audio_codec="copy", fps=RENDER_FPS,
logger='bar', threads=os.cpu_count(), preset='ultrafast'
)
print("Audio stream successfully copied!")
except Exception:
# Fallback to AAC encoding if copy fails
print("Direct audio copy failed, falling back to AAC encoding...")
final_clip.write_videofile(
temp_fps1_path, codec="libx264", audio_codec="aac", fps=RENDER_FPS,
logger='bar', threads=os.cpu_count(), preset='ultrafast'
)
print("AAC audio encoding complete.")
final_clip.close()
# Step 2: Use FFmpeg to quickly increase the framerate to 24 FPS
print(f"\nStep 2/2: Remuxing video to {PLAYBACK_FPS} FPS...")
increase_video_framerate(temp_fps1_path, final_output_path, target_fps=PLAYBACK_FPS)
return final_output_path
except Exception as e:
# Re-raise the exception to be caught and displayed by Gradio
raise e
finally:
# Step 3: Clean up the temporary file regardless of success or failure
if os.path.exists(temp_fps1_path):
print(f"Cleaning up temporary file: {temp_fps1_path}")
os.remove(temp_fps1_path)
# --- Gradio UI ---
with gr.Blocks(title="Spectrogram Video Generator") as iface:
gr.Markdown("# Spectrogram Video Generator")
with gr.Row():
with gr.Column(scale=1):
audio_input = gr.Audio(type="filepath", label="Upload Audio File")
# --- Image Upload Component ---
gr.Markdown(
"""
### Background Image Options (Optional)
Upload one or more images to create a dynamic background for the video. The display behavior changes based on your audio file and the number of images provided.
* **Mode 1: CUE Sheet Synchronization**
If your audio file contains an embedded CUE sheet AND the number of images you upload **exactly matches** the number of tracks, the images will be synchronized with the tracks. The first image will appear during the first track, the second during the second, and so on.
* **Mode 2: Even Time Distribution**
In all other cases (e.g., the audio has no CUE sheet, or the number of images and tracks do not match), the images will be displayed sequentially. The total duration of the video will be divided equally among all uploaded images.
**Note:** When any image is used as a background, the spectrogram visualizer will automatically become **semi-transparent** to ensure the background is clearly visible.
"""
)
image_uploads = gr.File(
label="Upload Background Images",
file_count="multiple", # Allow multiple files
file_types=["image"] # Accept only image formats
)
with gr.Accordion("Visualizer Options", open=True):
fg_color = gr.ColorPicker(value="#71808c", label="Spectrogram Bar Top Color")
bg_color = gr.ColorPicker(value="#2C3E50", label="Background Color (if no images)")
with gr.Accordion("Text Overlay Options", open=True):
gr.Markdown(
"**Note:** These options only take effect if the input audio file has an embedded CUE sheet."
)
gr.Markdown("---")
gr.Markdown("If your CUE sheet contains non-English characters, please select a compatible font.")
default_font = "Microsoft JhengHei" if "Microsoft JhengHei" in FONT_DISPLAY_NAMES else ("Arial" if "Arial" in FONT_DISPLAY_NAMES else (FONT_DISPLAY_NAMES[0] if FONT_DISPLAY_NAMES else None))
font_name_dd = gr.Dropdown(choices=FONT_DISPLAY_NAMES, value=default_font, label="Font Family")
with gr.Row():
font_size_slider = gr.Slider(minimum=12, maximum=128, value=40, step=1, label="Font Size")
font_color_picker = gr.ColorPicker(value="#FFFFFF", label="Font Color")
with gr.Row():
font_bg_color_picker = gr.ColorPicker(value="#000000", label="Text BG Color")
font_bg_alpha_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.6, step=0.05, label="Text BG Opacity")
gr.Markdown("Text Position")
with gr.Row():
pos_h_radio = gr.Radio(["left", "center", "right"], value="center", label="Horizontal Align")
pos_v_radio = gr.Radio(["top", "center", "bottom"], value="bottom", label="Vertical Align")
submit_btn = gr.Button("Generate Video", variant="primary")
with gr.Column(scale=2):
video_output = gr.Video(label="Generated Video")
# --- Add image_uploads to the inputs list ---
submit_btn.click(
fn=process_audio_to_video,
inputs=[
audio_input, image_uploads,
fg_color, bg_color,
font_name_dd, font_size_slider, font_color_picker,
font_bg_color_picker, font_bg_alpha_slider,
pos_h_radio, pos_v_radio
],
outputs=video_output
)
if __name__ == "__main__":
iface.launch(inbrowser=True)