Ivan000's picture
Update app.py
d48dfdf verified
raw
history blame
5.22 kB
# app.py
# =============
# This is a complete app.py file for a Gradio application that allows users to upload an audio file and generate a video with frequency visualization.
import gradio as gr
import numpy as np
import matplotlib.pyplot as plt
import librosa
import librosa.display
import os
import cv2
# Function to generate frequency visualization frames from audio
def generate_frequency_visualization(audio_path, fps, num_bars):
try:
# Load the audio file
y, sr = librosa.load(audio_path, sr=None)
duration = librosa.get_duration(y=y, sr=sr)
print(f"Loaded audio file with sampling rate: {sr}, and duration: {duration} seconds.")
if sr == 0 or len(y) == 0:
raise ValueError("Invalid audio file: sampling rate or audio data is zero.")
# Perform Short-Time Fourier Transform (STFT)
hop_length = int(sr / fps) # Hop length to match the desired fps
S = np.abs(librosa.stft(y, n_fft=2048, hop_length=hop_length))
frequencies = librosa.fft_frequencies(sr=sr)
# Create frequency bins for the bars
bins = np.linspace(0, len(frequencies), num_bars + 1, dtype=int)
bar_heights = []
# Aggregate power for each bar
for i in range(len(S[0])):
frame = S[:, i]
bar_frame = [np.mean(frame[bins[j]:bins[j+1]]) for j in range(num_bars)]
bar_heights.append(bar_frame)
# Create a directory to save the frames
os.makedirs('frames', exist_ok=True)
# Generate and save each frame
for i, heights in enumerate(bar_heights):
# Create black background
img = np.zeros((720, 1280, 3), dtype=np.uint8)
# Normalize heights to fit the frame
heights = np.array(heights)
heights = (heights / np.max(heights) * 600).astype(int)
# Calculate bar positions
bar_width = 80
spacing = (1280 - num_bars * bar_width) // (num_bars + 1)
for j, height in enumerate(heights):
x = spacing + j * (bar_width + spacing)
y = 720 - height
color = tuple(int(c * 255) for c in plt.cm.viridis(j / num_bars)[:3]) # Use Viridis colormap
cv2.rectangle(img, (x, 720), (x + bar_width, y), color, -1)
# Save the frame
frame_path = f'frames/frame_{i:04d}.png'
cv2.imwrite(frame_path, img)
print(f"Generated {len(bar_heights)} frames for visualization.")
return 'frames', duration
except Exception as e:
print(f"Error generating frequency visualization: {e}")
return None, None
# Function to create a video from the generated frames
def create_video_from_frames(frames_directory, audio_path, fps, duration):
try:
# Get the list of frame files
frame_files = [os.path.join(frames_directory, f) for f in os.listdir(frames_directory) if f.endswith('.png')]
frame_files.sort()
if not frame_files:
raise ValueError("No frames found to create the video.")
# Get video dimensions from the first frame
first_frame = cv2.imread(frame_files[0])
height, width, _ = first_frame.shape
# Initialize video writer
video_path = 'output_video.mp4'
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(video_path, fourcc, fps, (width, height))
# Write frames to video
for frame_file in frame_files:
frame = cv2.imread(frame_file)
video_writer.write(frame)
video_writer.release()
# Merge audio with video using ffmpeg
os.system(f"ffmpeg -i {video_path} -i {audio_path} -c:v copy -c:a aac -strict experimental output_with_audio.mp4 -y")
print(f"Video created with {len(frame_files)} frames.")
return 'output_with_audio.mp4'
except Exception as e:
print(f"Error creating video from frames: {e}")
return None
# Gradio interface function
def process_audio(audio):
audio_path = audio
fps = 60
num_bars = 12
frames_directory, duration = generate_frequency_visualization(audio_path, fps, num_bars)
if frames_directory:
video_path = create_video_from_frames(frames_directory, audio_path, fps, duration)
return video_path
else:
return None
# Create the Gradio interface with explanations and recommendations
iface = gr.Interface(
fn=process_audio,
inputs=gr.Audio(type="filepath", label="Upload Audio File"),
outputs=gr.Video(label="Generated Video"),
title="Audio Frequency Visualization",
description="Upload an audio file to generate a video with frequency visualization. "
"Supported file types: WAV, MP3, FLAC. "
"Recommended file duration: 10 seconds to 5 minutes. "
"The visualization will consist of 12 bars representing frequency ranges.",
)
# Launch the Gradio interface
if __name__ == "__main__":
iface.launch()
# Dependencies
# =============
# The following dependencies are required to run this app:
# - librosa
# - numpy
# - matplotlib
# - opencv-python
# - ffmpeg (installed separately)