update slicer in finetune_gradio, legacy min_length 2s changed to 20s
Browse files
src/f5_tts/train/finetune_gradio.py
CHANGED
@@ -178,45 +178,12 @@ def get_audio_duration(audio_path):
|
|
178 |
return audio.shape[1] / sample_rate
|
179 |
|
180 |
|
181 |
-
def get_rms(
|
182 |
-
y,
|
183 |
-
frame_length=2048,
|
184 |
-
hop_length=512,
|
185 |
-
pad_mode="constant",
|
186 |
-
): # https://github.com/RVC-Boss/GPT-SoVITS/blob/main/tools/slicer2.py
|
187 |
-
padding = (int(frame_length // 2), int(frame_length // 2))
|
188 |
-
y = np.pad(y, padding, mode=pad_mode)
|
189 |
-
|
190 |
-
axis = -1
|
191 |
-
# put our new within-frame axis at the end for now
|
192 |
-
out_strides = y.strides + tuple([y.strides[axis]])
|
193 |
-
# Reduce the shape on the framing axis
|
194 |
-
x_shape_trimmed = list(y.shape)
|
195 |
-
x_shape_trimmed[axis] -= frame_length - 1
|
196 |
-
out_shape = tuple(x_shape_trimmed) + tuple([frame_length])
|
197 |
-
xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides)
|
198 |
-
if axis < 0:
|
199 |
-
target_axis = axis - 1
|
200 |
-
else:
|
201 |
-
target_axis = axis + 1
|
202 |
-
xw = np.moveaxis(xw, -1, target_axis)
|
203 |
-
# Downsample along the target axis
|
204 |
-
slices = [slice(None)] * xw.ndim
|
205 |
-
slices[axis] = slice(0, None, hop_length)
|
206 |
-
x = xw[tuple(slices)]
|
207 |
-
|
208 |
-
# Calculate power
|
209 |
-
power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True)
|
210 |
-
|
211 |
-
return np.sqrt(power)
|
212 |
-
|
213 |
-
|
214 |
class Slicer: # https://github.com/RVC-Boss/GPT-SoVITS/blob/main/tools/slicer2.py
|
215 |
def __init__(
|
216 |
self,
|
217 |
sr: int,
|
218 |
threshold: float = -40.0,
|
219 |
-
min_length: int =
|
220 |
min_interval: int = 300,
|
221 |
hop_size: int = 20,
|
222 |
max_sil_kept: int = 2000,
|
@@ -247,7 +214,7 @@ class Slicer: # https://github.com/RVC-Boss/GPT-SoVITS/blob/main/tools/slicer2.
|
|
247 |
samples = waveform
|
248 |
if samples.shape[0] <= self.min_length:
|
249 |
return [waveform]
|
250 |
-
rms_list =
|
251 |
sil_tags = []
|
252 |
silence_start = None
|
253 |
clip_start = 0
|
@@ -301,8 +268,7 @@ class Slicer: # https://github.com/RVC-Boss/GPT-SoVITS/blob/main/tools/slicer2.
|
|
301 |
silence_end = min(total_frames, silence_start + self.max_sil_kept)
|
302 |
pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start
|
303 |
sil_tags.append((pos, total_frames + 1))
|
304 |
-
# Apply and return slices
|
305 |
-
####音频+起始时间+终止时间
|
306 |
if len(sil_tags) == 0:
|
307 |
return [[waveform, 0, int(total_frames * self.hop_size)]]
|
308 |
else:
|
|
|
178 |
return audio.shape[1] / sample_rate
|
179 |
|
180 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
class Slicer: # https://github.com/RVC-Boss/GPT-SoVITS/blob/main/tools/slicer2.py
|
182 |
def __init__(
|
183 |
self,
|
184 |
sr: int,
|
185 |
threshold: float = -40.0,
|
186 |
+
min_length: int = 20000, # 20 seconds
|
187 |
min_interval: int = 300,
|
188 |
hop_size: int = 20,
|
189 |
max_sil_kept: int = 2000,
|
|
|
214 |
samples = waveform
|
215 |
if samples.shape[0] <= self.min_length:
|
216 |
return [waveform]
|
217 |
+
rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0)
|
218 |
sil_tags = []
|
219 |
silence_start = None
|
220 |
clip_start = 0
|
|
|
268 |
silence_end = min(total_frames, silence_start + self.max_sil_kept)
|
269 |
pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start
|
270 |
sil_tags.append((pos, total_frames + 1))
|
271 |
+
# Apply and return slices: [chunk, start, end]
|
|
|
272 |
if len(sil_tags) == 0:
|
273 |
return [[waveform, 0, int(total_frames * self.hop_size)]]
|
274 |
else:
|