Spaces:
Sleeping
Sleeping
feat: Enhance CUE parsing, editing, and metadata preservation
Browse filesThis commit introduces a comprehensive overhaul of the CUE sheet handling logic to significantly improve robustness, preserve all original metadata, and fix several critical bugs.
The primary architectural change is the move from a simple list of timestamps to a list of structured dictionaries (`track_data`). This allows each track to maintain its own metadata, such as its title. Furthermore, track selection for editing has been refactored to use stable numerical indices instead of fragile string matching, preventing data loss during operations.
app.py
CHANGED
@@ -2,6 +2,7 @@ import os
|
|
2 |
import re
|
3 |
import librosa
|
4 |
import gradio as gr
|
|
|
5 |
|
6 |
# --- Helper Functions ---
|
7 |
|
@@ -23,34 +24,48 @@ def parse_cue_time_to_seconds(time_str):
|
|
23 |
return m * 60 + s + f / 75.0
|
24 |
return None
|
25 |
|
26 |
-
def format_cue_text(
|
27 |
-
"""
|
28 |
-
|
|
|
|
|
|
|
29 |
return ""
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
cue_text += f' TRACK {idx+1:02d} AUDIO\n'
|
40 |
-
cue_text += f' TITLE "
|
41 |
cue_text += f' INDEX 01 {cue_time_str}\n'
|
42 |
return cue_text
|
43 |
|
44 |
-
def
|
45 |
-
"""Creates
|
46 |
-
if not
|
47 |
return []
|
48 |
-
|
|
|
49 |
track_choices = []
|
50 |
-
for i,
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
54 |
return track_choices
|
55 |
|
56 |
# --- Core Gradio Functions ---
|
@@ -99,172 +114,216 @@ def analyze_audio_to_cue(audio_file, top_db, min_segment_len, merge_threshold, m
|
|
99 |
|
100 |
# --- 4. Prepare Outputs for Gradio ---
|
101 |
times = sorted(list(set(times)))
|
|
|
|
|
|
|
|
|
102 |
audio_filename = os.path.basename(audio_file)
|
103 |
-
|
104 |
-
|
|
|
|
|
105 |
|
106 |
# This function now returns everything needed to update the entire UI in one step.
|
107 |
return (
|
108 |
-
initial_cue_text,
|
109 |
-
gr.update(choices=
|
110 |
)
|
111 |
|
112 |
def parse_cue_and_update_ui(cue_text):
|
113 |
-
"""Workflow 2: Parses pasted CUE text
|
114 |
if not cue_text or "INDEX 01" not in cue_text:
|
115 |
-
return cue_text,
|
116 |
|
117 |
-
|
118 |
-
|
|
|
119 |
|
120 |
-
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
-
if not
|
124 |
-
return cue_text,
|
125 |
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
|
132 |
-
return
|
133 |
|
134 |
-
def update_editing_tools(
|
135 |
"""Dynamically shows/hides editing tools based on selection count."""
|
136 |
-
num_selected = len(
|
137 |
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
if num_selected == 1:
|
139 |
-
|
140 |
-
#
|
141 |
-
|
142 |
-
start_time =
|
143 |
-
end_time = audio_duration if (track_idx + 1) >= len(
|
144 |
|
145 |
-
# --- 2.
|
146 |
# A CUE sheet frame is 1/75s (~0.013s). We use a slightly larger padding.
|
147 |
padding = 0.02
|
|
|
148 |
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
return (
|
159 |
-
gr.update(visible=False), # Hide Merge button
|
160 |
-
gr.update(visible=split_possible), # Show/Hide Split Group
|
161 |
-
gr.update(visible=True), # Show Edit Time Group
|
162 |
-
gr.update(minimum=new_min_time, maximum=new_max_time, value=mid_point), # Configure Slider
|
163 |
-
gr.update(value=f"Split at: {seconds_to_cue_time(mid_point)}"), # Update slider label
|
164 |
-
gr.update(value=current_start_time_str) # Set current start time in edit box
|
165 |
-
)
|
166 |
|
167 |
elif num_selected > 1:
|
168 |
-
|
169 |
-
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), None, None, None
|
170 |
-
else:
|
171 |
-
# Hide everything
|
172 |
-
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), None, None, None
|
173 |
-
|
174 |
-
def perform_manual_merge(selected_tracks, original_times, audio_duration, audio_filename):
|
175 |
-
"""Merges selected tracks. The internal logic is robust and unchanged."""
|
176 |
|
177 |
-
|
178 |
-
indices_to_merge = {int(label.split(' ')[1]) - 1 for label in selected_tracks}
|
179 |
|
180 |
-
|
|
|
|
|
|
|
|
|
181 |
# --- This logic correctly handles all merge cases. ---
|
182 |
-
|
183 |
-
|
184 |
-
for i, time in enumerate(original_times):
|
185 |
-
is_selected = i in indices_to_merge
|
186 |
-
|
187 |
# Condition to KEEP a track's start time:
|
188 |
# 1. It was NOT selected.
|
189 |
# OR
|
190 |
# 2. It WAS selected, BUT it's the start of a merge block.
|
191 |
# (This means it's the very first track, OR the track before it was NOT selected).
|
192 |
-
if not
|
193 |
-
|
194 |
|
195 |
-
# ---
|
196 |
# The new CUE text for the textbox
|
197 |
-
final_cue_text = format_cue_text(
|
198 |
-
|
199 |
|
200 |
# Return a tuple that will update the textbox, the state, and the checklist
|
201 |
-
return final_cue_text,
|
202 |
-
|
203 |
|
204 |
-
def perform_manual_split(split_time_sec,
|
205 |
"""Splits a track at the time specified by the slider."""
|
206 |
-
if split_time_sec in
|
207 |
raise gr.Error("This exact timestamp already exists.")
|
208 |
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
|
|
|
|
213 |
|
214 |
# --- Timeline Shift ---
|
215 |
-
def shift_timeline(shift_amount_sec,
|
216 |
"""Shifts all track start times by a specified amount."""
|
217 |
-
if not
|
218 |
raise gr.Error("No track times to shift.")
|
219 |
|
220 |
-
#
|
221 |
-
|
222 |
-
|
223 |
-
# Clean up by sorting and removing duplicates (e.g., if multiple tracks are clamped to 0)
|
224 |
-
new_times = sorted(list(set(new_times)))
|
225 |
|
226 |
-
|
227 |
-
|
228 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
|
230 |
|
231 |
# --- Edit Track Start Time ---
|
232 |
-
def edit_track_start_time(
|
233 |
-
"""Edits the start time of a single selected track."""
|
234 |
-
if not
|
235 |
raise gr.Error("No track selected for editing.")
|
236 |
-
|
237 |
new_time_sec = parse_cue_time_to_seconds(new_time_str)
|
238 |
if new_time_sec is None:
|
239 |
raise gr.Error("Invalid time format. Please use MM:SS:FF.")
|
240 |
|
241 |
-
track_idx =
|
242 |
-
|
243 |
# Boundary checks
|
244 |
-
|
245 |
-
|
246 |
|
247 |
-
if new_time_sec <=
|
248 |
-
raise gr.Error(f"New time cannot be earlier than the previous track's start time
|
249 |
-
if new_time_sec >=
|
250 |
-
raise gr.Error(f"New time cannot be later than the next track's start time
|
251 |
|
252 |
-
|
253 |
-
|
254 |
|
255 |
-
final_cue_text = format_cue_text(
|
256 |
-
|
257 |
-
return final_cue_text,
|
258 |
-
|
259 |
|
260 |
# --- Gradio User Interface Definition ---
|
261 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
262 |
gr.Markdown("# 🎵 Advanced CUE Sheet Generator")
|
263 |
|
264 |
# --- Hidden State Variables ---
|
265 |
-
|
|
|
266 |
audio_duration_state = gr.State(0)
|
267 |
-
audio_filename_state = gr.State("CDImage.wav")
|
268 |
|
269 |
with gr.Tabs():
|
270 |
with gr.TabItem("Start with Audio File"):
|
@@ -279,7 +338,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
279 |
|
280 |
with gr.TabItem("Start with CUE Text"):
|
281 |
gr.Markdown("Or paste CUE text below and click outside the box. The editing tools will appear automatically.")
|
282 |
-
cue_text_input_for_paste = gr.Textbox(label="Paste CUE Text Here", lines=8)
|
283 |
|
284 |
# The main output textbox is now outside the tabs, serving as a central display.
|
285 |
output_text = gr.Textbox(label="CUE Sheet Output", lines=15, show_copy_button=True, interactive=True)
|
@@ -312,18 +371,18 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
312 |
# --- Event Wiring ---
|
313 |
|
314 |
# Combined update for enabling editing groups
|
315 |
-
def show_editing_groups(
|
316 |
-
is_visible = bool(
|
317 |
return gr.update(visible=is_visible), gr.update(visible=is_visible)
|
318 |
|
319 |
# Workflow 1: Audio analysis button now updates everything, including the editing tools.
|
320 |
generate_button.click(
|
321 |
fn=analyze_audio_to_cue,
|
322 |
inputs=[audio_input, threshold_slider, min_length_slider, merge_length_slider, min_silence_length_slider],
|
323 |
-
outputs=[output_text,
|
324 |
).then(
|
325 |
fn=show_editing_groups,
|
326 |
-
inputs=[
|
327 |
outputs=[manual_editing_group, global_editing_group]
|
328 |
)
|
329 |
|
@@ -332,51 +391,51 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
332 |
cue_text_input_for_paste.change(
|
333 |
fn=parse_cue_and_update_ui,
|
334 |
inputs=[cue_text_input_for_paste],
|
335 |
-
outputs=[output_text,
|
336 |
).then(
|
337 |
fn=show_editing_groups,
|
338 |
-
inputs=[
|
339 |
outputs=[manual_editing_group, global_editing_group]
|
340 |
)
|
341 |
|
342 |
# Dynamic UI controller for showing/hiding Merge/Split tools
|
343 |
track_checkboxes.change(
|
344 |
fn=update_editing_tools,
|
345 |
-
inputs=[track_checkboxes,
|
346 |
-
outputs=[merge_tools, single_track_tools,
|
347 |
)
|
348 |
-
|
349 |
# Live update for the split slider's time display
|
350 |
split_slider.input(
|
351 |
fn=lambda t: f"Split at: {seconds_to_cue_time(t)}",
|
352 |
inputs=[split_slider],
|
353 |
outputs=[split_slider_label]
|
354 |
)
|
355 |
-
|
356 |
# Action buttons
|
357 |
merge_button.click(
|
358 |
fn=perform_manual_merge,
|
359 |
-
inputs=[track_checkboxes,
|
360 |
-
outputs=[output_text,
|
361 |
)
|
362 |
-
|
363 |
split_button.click(
|
364 |
fn=perform_manual_split,
|
365 |
-
inputs=[split_slider,
|
366 |
-
outputs=[output_text,
|
367 |
)
|
368 |
|
369 |
# --- Action Buttons for New Features ---
|
370 |
shift_button.click(
|
371 |
fn=shift_timeline,
|
372 |
-
inputs=[shift_amount_input,
|
373 |
-
outputs=[output_text,
|
374 |
)
|
375 |
|
376 |
edit_time_button.click(
|
377 |
fn=edit_track_start_time,
|
378 |
-
inputs=[track_checkboxes, edit_time_input,
|
379 |
-
outputs=[output_text,
|
380 |
)
|
381 |
|
382 |
if __name__ == "__main__":
|
|
|
2 |
import re
|
3 |
import librosa
|
4 |
import gradio as gr
|
5 |
+
from copy import deepcopy
|
6 |
|
7 |
# --- Helper Functions ---
|
8 |
|
|
|
24 |
return m * 60 + s + f / 75.0
|
25 |
return None
|
26 |
|
27 |
+
def format_cue_text(track_data, cue_globals):
|
28 |
+
"""
|
29 |
+
Generates the final CUE sheet string from track data and global info.
|
30 |
+
MODIFIED: Now accepts track data with titles.
|
31 |
+
"""
|
32 |
+
if not track_data:
|
33 |
return ""
|
34 |
+
|
35 |
+
# Sort tracks by time before formatting
|
36 |
+
sorted_tracks = sorted(track_data, key=lambda x: x['time'])
|
37 |
+
|
38 |
+
cue_text = f'PERFORMER "{cue_globals.get("performer", "Unknown Artist")}"\n'
|
39 |
+
cue_text += f'TITLE "{cue_globals.get("title", os.path.splitext(cue_globals["filename"])[0])}"\n'
|
40 |
+
# Use parsed file type or default to WAVE
|
41 |
+
file_type = cue_globals.get("filetype", "WAVE")
|
42 |
+
cue_text += f'FILE "{cue_globals["filename"]}" {file_type}\n'
|
43 |
+
|
44 |
+
for idx, track in enumerate(sorted_tracks):
|
45 |
+
cue_time_str = seconds_to_cue_time(track['time'])
|
46 |
+
# ** FEATURE 1: Use existing title or create a default one **
|
47 |
+
title = track.get('title') or f"Track {idx+1:02d}"
|
48 |
+
|
49 |
cue_text += f' TRACK {idx+1:02d} AUDIO\n'
|
50 |
+
cue_text += f' TITLE "{title}"\n'
|
51 |
cue_text += f' INDEX 01 {cue_time_str}\n'
|
52 |
return cue_text
|
53 |
|
54 |
+
def generate_track_choices(track_data, audio_duration):
|
55 |
+
"""Creates choices for the CheckboxGroup as (label, index) tuples."""
|
56 |
+
if not track_data:
|
57 |
return []
|
58 |
+
# Data is already sorted, but we re-sort just in case.
|
59 |
+
sorted_tracks = sorted(track_data, key=lambda x: x['time'])
|
60 |
track_choices = []
|
61 |
+
for i, track in enumerate(sorted_tracks):
|
62 |
+
start_time = track['time']
|
63 |
+
end_time = sorted_tracks[i+1]['time'] if i < len(sorted_tracks) - 1 else audio_duration
|
64 |
+
track_length = end_time - start_time
|
65 |
+
title = track.get('title', f"Track {i+1:02d}")
|
66 |
+
|
67 |
+
label = f'"{title}" (Starts: {seconds_to_cue_time(start_time)}) [Length: {seconds_to_cue_time(track_length)}]'
|
68 |
+
track_choices.append((label, i))
|
69 |
return track_choices
|
70 |
|
71 |
# --- Core Gradio Functions ---
|
|
|
114 |
|
115 |
# --- 4. Prepare Outputs for Gradio ---
|
116 |
times = sorted(list(set(times)))
|
117 |
+
|
118 |
+
# Convert times list to the new track_data structure
|
119 |
+
track_data = [{'time': t, 'title': None} for t in times]
|
120 |
+
|
121 |
audio_filename = os.path.basename(audio_file)
|
122 |
+
cue_globals = {"filename": audio_filename}
|
123 |
+
|
124 |
+
initial_cue_text = format_cue_text(track_data, cue_globals)
|
125 |
+
track_choices = generate_track_choices(track_data, audio_duration)
|
126 |
|
127 |
# This function now returns everything needed to update the entire UI in one step.
|
128 |
return (
|
129 |
+
initial_cue_text, cue_globals, track_data, audio_duration,
|
130 |
+
gr.update(choices=track_choices, value=[]), gr.update(visible=True)
|
131 |
)
|
132 |
|
133 |
def parse_cue_and_update_ui(cue_text):
|
134 |
+
"""Workflow 2: Parses pasted CUE text, preserving titles."""
|
135 |
if not cue_text or "INDEX 01" not in cue_text:
|
136 |
+
return cue_text, {}, [], 0, gr.update(choices=[], value=[]), gr.update(visible=False)
|
137 |
|
138 |
+
cue_globals = {}
|
139 |
+
track_data = []
|
140 |
+
current_track = None
|
141 |
|
142 |
+
lines = cue_text.split('\n')
|
143 |
+
|
144 |
+
for line in lines:
|
145 |
+
line = line.strip()
|
146 |
+
if not line:
|
147 |
+
continue
|
148 |
+
|
149 |
+
if re.search(r'TRACK\s+\d+\s+AUDIO', line, re.IGNORECASE):
|
150 |
+
if current_track is not None:
|
151 |
+
track_data.append(current_track)
|
152 |
+
current_track = {}
|
153 |
+
continue
|
154 |
+
|
155 |
+
if current_track is None:
|
156 |
+
# **OPTIMIZATION: Capture file type (WAVE, MP3, etc.)**
|
157 |
+
if match := re.search(r'FILE\s+"([^"]+)"\s+([A-Z0-9]+)', line, re.IGNORECASE):
|
158 |
+
cue_globals['filename'] = match.group(1)
|
159 |
+
cue_globals['filetype'] = match.group(2)
|
160 |
+
elif match := re.search(r'PERFORMER\s+"([^"]+)"', line, re.IGNORECASE):
|
161 |
+
cue_globals['performer'] = match.group(1)
|
162 |
+
elif match := re.search(r'^TITLE\s+"([^"]+)"', line, re.IGNORECASE):
|
163 |
+
cue_globals['title'] = match.group(1)
|
164 |
+
else:
|
165 |
+
if match := re.search(r'TITLE\s+"([^"]+)"', line, re.IGNORECASE):
|
166 |
+
current_track['title'] = match.group(1)
|
167 |
+
elif match := re.search(r'INDEX\s+\d+\s+([\d:]{7,8})', line, re.IGNORECASE):
|
168 |
+
# **BUG FIX: Check for None instead of truthiness to correctly handle 0.0**
|
169 |
+
time_sec = parse_cue_time_to_seconds(match.group(1))
|
170 |
+
if time_sec is not None:
|
171 |
+
current_track['time'] = time_sec
|
172 |
+
|
173 |
+
if current_track:
|
174 |
+
track_data.append(current_track)
|
175 |
+
|
176 |
+
if not track_data or not cue_globals.get('filename'):
|
177 |
+
return cue_text, {}, [], 0, gr.update(choices=[], value=[]), gr.update(visible=False)
|
178 |
+
|
179 |
+
#Filter incomplete tracks before sorting
|
180 |
+
track_data = sorted([t for t in track_data if 'time' in t], key=lambda x: x['time'])
|
181 |
|
182 |
+
if not track_data: # All tracks might have been invalid
|
183 |
+
return cue_text, {}, [], 0, gr.update(choices=[], value=[]), gr.update(visible=False)
|
184 |
|
185 |
+
audio_duration = track_data[-1]['time'] if track_data else 0
|
186 |
+
track_choices = generate_track_choices(track_data, audio_duration)
|
187 |
+
|
188 |
+
# Re-generate the CUE text to ensure consistent formatting
|
189 |
+
formatted_text = format_cue_text(track_data, cue_globals)
|
190 |
|
191 |
+
return formatted_text, cue_globals, track_data, audio_duration, gr.update(choices=track_choices, value=[]), gr.update(visible=True)
|
192 |
|
193 |
+
def update_editing_tools(selected_indices, track_data, audio_duration):
|
194 |
"""Dynamically shows/hides editing tools based on selection count."""
|
195 |
+
num_selected = len(selected_indices)
|
196 |
|
197 |
+
merge_update = gr.update(visible=False)
|
198 |
+
single_update = gr.update(visible=False)
|
199 |
+
slider_update = gr.update()
|
200 |
+
slider_label_update = gr.update()
|
201 |
+
edit_box_update = gr.update()
|
202 |
+
|
203 |
if num_selected == 1:
|
204 |
+
track_idx = selected_indices[0]
|
205 |
+
single_update['visible'] = True # Use dict update to avoid overwriting the object
|
206 |
+
|
207 |
+
start_time = track_data[track_idx]['time']
|
208 |
+
end_time = audio_duration if (track_idx + 1) >= len(track_data) else track_data[track_idx + 1]['time']
|
209 |
|
210 |
+
# --- 2. Add padding to prevent splitting at the exact edges ---
|
211 |
# A CUE sheet frame is 1/75s (~0.013s). We use a slightly larger padding.
|
212 |
padding = 0.02
|
213 |
+
split_possible = (start_time + padding) < (end_time - padding)
|
214 |
|
215 |
+
if split_possible:
|
216 |
+
mid_point = start_time + (end_time - start_time) / 2
|
217 |
+
slider_update = gr.update(minimum=start_time + padding, maximum=end_time - padding, value=mid_point)
|
218 |
+
slider_label_update = gr.update(value=f"Split at: {seconds_to_cue_time(mid_point)}")
|
219 |
+
else:
|
220 |
+
slider_label_update = gr.update(value="Track is too short to be split")
|
221 |
+
|
222 |
+
edit_box_update = gr.update(value=seconds_to_cue_time(start_time))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
|
224 |
elif num_selected > 1:
|
225 |
+
merge_update['visible'] = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
|
227 |
+
return merge_update, single_update, slider_update, slider_label_update, edit_box_update
|
|
|
228 |
|
229 |
+
def perform_manual_merge(indices_to_merge, original_track_data, audio_duration, cue_globals):
|
230 |
+
"""Merges selected tracks based on their indices."""
|
231 |
+
indices_set = set(indices_to_merge)
|
232 |
+
|
233 |
+
# --- Create the new list of times ---
|
234 |
# --- This logic correctly handles all merge cases. ---
|
235 |
+
new_track_data = []
|
236 |
+
for i, track in enumerate(original_track_data):
|
|
|
|
|
|
|
237 |
# Condition to KEEP a track's start time:
|
238 |
# 1. It was NOT selected.
|
239 |
# OR
|
240 |
# 2. It WAS selected, BUT it's the start of a merge block.
|
241 |
# (This means it's the very first track, OR the track before it was NOT selected).
|
242 |
+
if i not in indices_set or (i == 0) or ((i - 1) not in indices_set):
|
243 |
+
new_track_data.append(track)
|
244 |
|
245 |
+
# --- Prepare all the outputs to update the UI ---
|
246 |
# The new CUE text for the textbox
|
247 |
+
final_cue_text = format_cue_text(new_track_data, cue_globals)
|
248 |
+
new_track_choices = generate_track_choices(new_track_data, audio_duration)
|
249 |
|
250 |
# Return a tuple that will update the textbox, the state, and the checklist
|
251 |
+
return final_cue_text, new_track_data, gr.update(choices=new_track_choices, value=[])
|
|
|
252 |
|
253 |
+
def perform_manual_split(split_time_sec, original_track_data, audio_duration, cue_globals):
|
254 |
"""Splits a track at the time specified by the slider."""
|
255 |
+
if any(abs(t['time'] - split_time_sec) < 1e-3 for t in original_track_data):
|
256 |
raise gr.Error("This exact timestamp already exists.")
|
257 |
|
258 |
+
new_track = {'time': split_time_sec, 'title': None}
|
259 |
+
new_track_data = sorted(original_track_data + [new_track], key=lambda x: x['time'])
|
260 |
+
|
261 |
+
final_cue_text = format_cue_text(new_track_data, cue_globals)
|
262 |
+
new_track_choices = generate_track_choices(new_track_data, audio_duration)
|
263 |
+
return final_cue_text, new_track_data, gr.update(choices=new_track_choices, value=[])
|
264 |
|
265 |
# --- Timeline Shift ---
|
266 |
+
def shift_timeline(shift_amount_sec, original_track_data, audio_duration, cue_globals):
|
267 |
"""Shifts all track start times by a specified amount."""
|
268 |
+
if not original_track_data:
|
269 |
raise gr.Error("No track times to shift.")
|
270 |
|
271 |
+
# Use deepcopy to avoid modifying the original state directly
|
272 |
+
new_track_data = deepcopy(original_track_data)
|
|
|
|
|
|
|
273 |
|
274 |
+
# ** FEATURE 2: Apply shift without an upper bound, allowing the last track to move forward **
|
275 |
+
for track in new_track_data:
|
276 |
+
track['time'] = max(0, track['time'] + shift_amount_sec)
|
277 |
+
|
278 |
+
# Remove duplicates that might be created if multiple tracks are clamped to 0
|
279 |
+
unique_tracks = []
|
280 |
+
seen_times = set()
|
281 |
+
for track in sorted(new_track_data, key=lambda x: x['time']):
|
282 |
+
if track['time'] not in seen_times:
|
283 |
+
unique_tracks.append(track)
|
284 |
+
seen_times.add(track['time'])
|
285 |
+
|
286 |
+
final_cue_text = format_cue_text(unique_tracks, cue_globals)
|
287 |
+
new_track_choices = generate_track_choices(unique_tracks, audio_duration)
|
288 |
+
return final_cue_text, unique_tracks, gr.update(choices=new_track_choices, value=[])
|
289 |
|
290 |
|
291 |
# --- Edit Track Start Time ---
|
292 |
+
def edit_track_start_time(selected_indices, new_time_str, original_track_data, audio_duration, cue_globals):
|
293 |
+
"""Edits the start time of a single selected track using its index."""
|
294 |
+
if not selected_indices:
|
295 |
raise gr.Error("No track selected for editing.")
|
296 |
+
|
297 |
new_time_sec = parse_cue_time_to_seconds(new_time_str)
|
298 |
if new_time_sec is None:
|
299 |
raise gr.Error("Invalid time format. Please use MM:SS:FF.")
|
300 |
|
301 |
+
track_idx = selected_indices[0]
|
302 |
+
|
303 |
# Boundary checks
|
304 |
+
prev_time = original_track_data[track_idx - 1]['time'] if track_idx > 0 else -1
|
305 |
+
next_time = original_track_data[track_idx + 1]['time'] if track_idx < len(original_track_data) - 1 else float('inf')
|
306 |
|
307 |
+
if new_time_sec <= prev_time:
|
308 |
+
raise gr.Error(f"New time cannot be earlier than the previous track's start time.")
|
309 |
+
if new_time_sec >= next_time:
|
310 |
+
raise gr.Error(f"New time cannot be later than or equal to the next track's start time.")
|
311 |
|
312 |
+
new_track_data = deepcopy(original_track_data)
|
313 |
+
new_track_data[track_idx]['time'] = new_time_sec
|
314 |
|
315 |
+
final_cue_text = format_cue_text(new_track_data, cue_globals)
|
316 |
+
new_track_choices = generate_track_choices(new_track_data, audio_duration)
|
317 |
+
return final_cue_text, new_track_data, gr.update(choices=new_track_choices, value=[])
|
|
|
318 |
|
319 |
# --- Gradio User Interface Definition ---
|
320 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
321 |
gr.Markdown("# 🎵 Advanced CUE Sheet Generator")
|
322 |
|
323 |
# --- Hidden State Variables ---
|
324 |
+
cue_globals_state = gr.State({})
|
325 |
+
track_data_state = gr.State([])
|
326 |
audio_duration_state = gr.State(0)
|
|
|
327 |
|
328 |
with gr.Tabs():
|
329 |
with gr.TabItem("Start with Audio File"):
|
|
|
338 |
|
339 |
with gr.TabItem("Start with CUE Text"):
|
340 |
gr.Markdown("Or paste CUE text below and click outside the box. The editing tools will appear automatically.")
|
341 |
+
cue_text_input_for_paste = gr.Textbox(label="Paste CUE Text Here", lines=8, placeholder="Paste your CUE sheet content here and click outside the box. The editing tools will appear automatically.")
|
342 |
|
343 |
# The main output textbox is now outside the tabs, serving as a central display.
|
344 |
output_text = gr.Textbox(label="CUE Sheet Output", lines=15, show_copy_button=True, interactive=True)
|
|
|
371 |
# --- Event Wiring ---
|
372 |
|
373 |
# Combined update for enabling editing groups
|
374 |
+
def show_editing_groups(track_data):
|
375 |
+
is_visible = bool(track_data)
|
376 |
return gr.update(visible=is_visible), gr.update(visible=is_visible)
|
377 |
|
378 |
# Workflow 1: Audio analysis button now updates everything, including the editing tools.
|
379 |
generate_button.click(
|
380 |
fn=analyze_audio_to_cue,
|
381 |
inputs=[audio_input, threshold_slider, min_length_slider, merge_length_slider, min_silence_length_slider],
|
382 |
+
outputs=[output_text, cue_globals_state, track_data_state, audio_duration_state, track_checkboxes]
|
383 |
).then(
|
384 |
fn=show_editing_groups,
|
385 |
+
inputs=[track_data_state],
|
386 |
outputs=[manual_editing_group, global_editing_group]
|
387 |
)
|
388 |
|
|
|
391 |
cue_text_input_for_paste.change(
|
392 |
fn=parse_cue_and_update_ui,
|
393 |
inputs=[cue_text_input_for_paste],
|
394 |
+
outputs=[output_text, cue_globals_state, track_data_state, audio_duration_state, track_checkboxes]
|
395 |
).then(
|
396 |
fn=show_editing_groups,
|
397 |
+
inputs=[track_data_state],
|
398 |
outputs=[manual_editing_group, global_editing_group]
|
399 |
)
|
400 |
|
401 |
# Dynamic UI controller for showing/hiding Merge/Split tools
|
402 |
track_checkboxes.change(
|
403 |
fn=update_editing_tools,
|
404 |
+
inputs=[track_checkboxes, track_data_state, audio_duration_state],
|
405 |
+
outputs=[merge_tools, single_track_tools, split_slider, split_slider_label, edit_time_input]
|
406 |
)
|
407 |
+
|
408 |
# Live update for the split slider's time display
|
409 |
split_slider.input(
|
410 |
fn=lambda t: f"Split at: {seconds_to_cue_time(t)}",
|
411 |
inputs=[split_slider],
|
412 |
outputs=[split_slider_label]
|
413 |
)
|
414 |
+
|
415 |
# Action buttons
|
416 |
merge_button.click(
|
417 |
fn=perform_manual_merge,
|
418 |
+
inputs=[track_checkboxes, track_data_state, audio_duration_state, cue_globals_state],
|
419 |
+
outputs=[output_text, track_data_state, track_checkboxes]
|
420 |
)
|
421 |
+
|
422 |
split_button.click(
|
423 |
fn=perform_manual_split,
|
424 |
+
inputs=[split_slider, track_data_state, audio_duration_state, cue_globals_state],
|
425 |
+
outputs=[output_text, track_data_state, track_checkboxes]
|
426 |
)
|
427 |
|
428 |
# --- Action Buttons for New Features ---
|
429 |
shift_button.click(
|
430 |
fn=shift_timeline,
|
431 |
+
inputs=[shift_amount_input, track_data_state, audio_duration_state, cue_globals_state],
|
432 |
+
outputs=[output_text, track_data_state, track_checkboxes]
|
433 |
)
|
434 |
|
435 |
edit_time_button.click(
|
436 |
fn=edit_track_start_time,
|
437 |
+
inputs=[track_checkboxes, edit_time_input, track_data_state, audio_duration_state, cue_globals_state],
|
438 |
+
outputs=[output_text, track_data_state, track_checkboxes]
|
439 |
)
|
440 |
|
441 |
if __name__ == "__main__":
|