shukdevdatta123 commited on
Commit
f74edeb
·
verified ·
1 Parent(s): ade1d83

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -283
app.py CHANGED
@@ -1,295 +1,46 @@
1
  import gradio as gr
2
- import torch
3
- import torchaudio
4
  import numpy as np
5
- import tempfile
6
- import os
7
- from pathlib import Path
8
- import librosa
9
- import soundfile as sf
10
- from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
11
- from transformers import Wav2Vec2Processor, Wav2Vec2Model
12
- from datasets import load_dataset
13
- import warnings
14
- import gc
15
 
16
- warnings.filterwarnings("ignore")
 
 
17
 
18
- class VoiceCloningTTS:
19
- def __init__(self):
20
- self.device = torch.device("cpu")
21
- print(f"Using device: {self.device}")
22
-
23
- try:
24
- print("Loading SpeechT5 processor...")
25
- self.processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
26
-
27
- print("Loading SpeechT5 TTS model...")
28
- self.model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
29
- self.model.to(self.device)
30
- self.model.eval()
31
-
32
- print("Loading SpeechT5 vocoder...")
33
- self.vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
34
- self.vocoder.to(self.device)
35
- self.vocoder.eval()
36
-
37
- print("Loading Wav2Vec2 for speaker embedding...")
38
- self.wav2vec2_processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
39
- self.wav2vec2_model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h")
40
- self.wav2vec2_model.to(self.device)
41
- self.wav2vec2_model.eval()
42
-
43
- print("Loading speaker embeddings dataset...")
44
- embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
45
- self.speaker_embeddings_dataset = embeddings_dataset
46
- self.default_speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0).to(self.device)
47
-
48
- self.user_speaker_embeddings = None
49
- self.sample_rate = 16000
50
-
51
- print("✅ TTS system initialized successfully!")
52
- except Exception as e:
53
- print(f"❌ Error initializing TTS system: {str(e)}")
54
- raise e
55
 
56
- def preprocess_audio(self, audio_path):
57
- try:
58
- waveform, sample_rate = torchaudio.load(audio_path)
59
- if waveform.shape[0] > 1:
60
- waveform = torch.mean(waveform, dim=0, keepdim=True)
61
- if sample_rate != self.sample_rate:
62
- resampler = torchaudio.transforms.Resample(sample_rate, self.sample_rate)
63
- waveform = resampler(waveform)
64
- waveform = waveform / (torch.max(torch.abs(waveform)) + 1e-8)
65
- min_length = 3 * self.sample_rate
66
- if waveform.shape[1] < min_length:
67
- repeat_times = int(np.ceil(min_length / waveform.shape[1]))
68
- waveform = waveform.repeat(1, repeat_times)[:, :min_length]
69
- max_length = 20 * self.sample_rate
70
- if waveform.shape[1] > max_length:
71
- waveform = waveform[:, :max_length]
72
- return waveform.squeeze()
73
- except Exception as e:
74
- print(f"Error in audio preprocessing: {e}")
75
- raise e
76
 
77
- def extract_speaker_embedding_advanced(self, audio_path):
78
- try:
79
- print(f"Processing audio file: {audio_path}")
80
- audio_tensor = self.preprocess_audio(audio_path)
81
- audio_numpy = audio_tensor.numpy()
82
-
83
- print("Extracting deep audio features with Wav2Vec2...")
84
- with torch.no_grad():
85
- inputs = self.wav2vec2_processor(audio_numpy, sampling_rate=self.sample_rate, return_tensors="pt", padding=True)
86
- outputs = self.wav2vec2_model(inputs.input_values.to(self.device))
87
- speaker_features = torch.mean(outputs.last_hidden_state, dim=1)
88
-
89
- print(f"Extracted Wav2Vec2 features: {speaker_features.shape}")
90
- best_embedding = self.find_best_matching_speaker(speaker_features, audio_numpy)
91
-
92
- print("✅ Advanced speaker embedding created successfully!")
93
- return best_embedding, "✅ Voice profile extracted using advanced neural analysis!"
94
- except Exception as e:
95
- print(f"Error in advanced embedding extraction: {e}")
96
- return self.extract_speaker_embedding_improved(audio_path)
97
-
98
- def find_best_matching_speaker(self, target_features, audio_numpy):
99
- try:
100
- mfccs = librosa.feature.mfcc(y=audio_numpy, sr=self.sample_rate, n_mfcc=13)
101
- pitch, _ = librosa.piptrack(y=audio_numpy, sr=self.sample_rate)
102
- spectral_centroids = librosa.feature.spectral_centroid(y=audio_numpy, sr=self.sample_rate)
103
-
104
- acoustic_signature = np.concatenate([
105
- np.mean(mfccs, axis=1),
106
- np.std(mfccs, axis=1),
107
- [np.mean(pitch[pitch > 0]) if np.any(pitch > 0) else 200],
108
- [np.mean(spectral_centroids)]
109
- ])
110
-
111
- best_embedding = self.default_speaker_embeddings
112
- modification_factor = 0.3 # Increased for more distinct voice
113
- feature_mod = torch.tensor(acoustic_signature[:best_embedding.shape[1]], dtype=torch.float32).to(self.device)
114
- feature_mod = (feature_mod - torch.mean(feature_mod)) / (torch.std(feature_mod) + 1e-8)
115
- modified_embedding = best_embedding + modification_factor * feature_mod.unsqueeze(0)
116
- modified_embedding = torch.nn.functional.normalize(modified_embedding, p=2, dim=1)
117
-
118
- return modified_embedding
119
- except Exception as e:
120
- print(f"Error in speaker matching: {e}")
121
- return self.default_speaker_embeddings
122
-
123
- def extract_speaker_embedding_improved(self, audio_path):
124
- try:
125
- print("Using improved speaker embedding extraction...")
126
- audio_tensor = self.preprocess_audio(audio_path)
127
- audio_numpy = audio_tensor.numpy()
128
-
129
- print("Extracting comprehensive acoustic features...")
130
- mfccs = librosa.feature.mfcc(y=audio_numpy, sr=self.sample_rate, n_mfcc=20)
131
- delta_mfccs = librosa.feature.delta(mfccs)
132
- delta2_mfccs = librosa.feature.delta(mfccs, order=2)
133
- f0, _, _ = librosa.pyin(audio_numpy, fmin=librosa.note_to_hz('C2'), fmax=librosa.note_to_hz('C7'))
134
- f0_clean = f0[~np.isnan(f0)]
135
- spectral_centroids = librosa.feature.spectral_centroid(y=audio_numpy, sr=self.sample_rate)
136
- spectral_bandwidth = librosa.feature.spectral_bandwidth(y=audio_numpy, sr=self.sample_rate)
137
- spectral_rolloff = librosa.feature.spectral_rolloff(y=audio_numpy, sr=self.sample_rate)
138
- spectral_contrast = librosa.feature.spectral_contrast(y=audio_numpy, sr=self.sample_rate)
139
- lpc_coeffs = librosa.lpc(audio_numpy, order=16)
140
-
141
- features = np.concatenate([
142
- np.mean(mfccs, axis=1),
143
- np.std(mfccs, axis=1),
144
- np.mean(delta_mfccs, axis=1),
145
- np.mean(delta2_mfccs, axis=1),
146
- [np.mean(f0_clean) if len(f0_clean) > 0 else 200],
147
- [np.std(f0_clean) if len(f0_clean) > 0 else 50],
148
- [np.mean(spectral_centroids)],
149
- [np.mean(spectral_bandwidth)],
150
- [np.mean(spectral_rolloff)],
151
- np.mean(spectral_contrast, axis=1),
152
- lpc_coeffs[1:]
153
- ])
154
-
155
- print(f"Extracted {len(features)} advanced acoustic features")
156
- base_embedding = self.default_speaker_embeddings
157
- embedding_size = base_embedding.shape[1]
158
- features_normalized = (features - np.mean(features)) / (np.std(features) + 1e-8)
159
-
160
- if len(features_normalized) > embedding_size:
161
- modification_vector = features_normalized[:embedding_size]
162
- else:
163
- modification_vector = np.pad(features_normalized, (0, embedding_size - len(features_normalized)), 'reflect')
164
-
165
- modification_tensor = torch.tensor(modification_vector, dtype=torch.float32).to(self.device)
166
- modification_strength = 0.3 # Increased for more distinct voice
167
- speaker_embedding = base_embedding + modification_strength * modification_tensor.unsqueeze(0)
168
-
169
- if len(f0_clean) > 0:
170
- pitch_factor = np.mean(f0_clean) / 200.0
171
- pitch_modification = 0.05 * (pitch_factor - 1.0)
172
- speaker_embedding = speaker_embedding * (1.0 + pitch_modification)
173
-
174
- speaker_embedding = torch.nn.functional.normalize(speaker_embedding, p=2, dim=1)
175
- return speaker_embedding, "✅ Voice profile extracted with enhanced acoustic analysis!"
176
- except Exception as e:
177
- print(f"❌ Error in improved embedding extraction: {str(e)}")
178
- return None, f"❌ Error processing audio: {str(e)}"
179
-
180
- def extract_speaker_embedding(self, audio_path):
181
- try:
182
- return self.extract_speaker_embedding_advanced(audio_path)
183
- except Exception as e:
184
- print(f"Advanced method failed: {e}")
185
- return self.extract_speaker_embedding_improved(audio_path)
186
-
187
- def synthesize_speech(self, text, use_cloned_voice=True):
188
- try:
189
- if not text.strip():
190
- return None, "❌ Please enter some text to convert."
191
- if len(text) > 500:
192
- text = text[:500]
193
- print("Text truncated to 500 characters")
194
-
195
- print(f"Synthesizing speech for: '{text[:50]}...'")
196
- if use_cloned_voice and self.user_speaker_embeddings is not None:
197
- speaker_embeddings = self.user_speaker_embeddings
198
- voice_type = "your cloned voice"
199
- print("Using cloned voice embeddings")
200
- else:
201
- speaker_embeddings = self.default_speaker_embeddings
202
- voice_type = "default voice"
203
- print("Using default voice embeddings")
204
-
205
- print(f"Speaker embedding shape: {speaker_embeddings.shape}")
206
- inputs = self.processor(text=text, return_tensors="pt")
207
- input_ids = inputs["input_ids"].to(self.device)
208
-
209
- print("Generating speech...")
210
- with torch.no_grad():
211
- speaker_embeddings = speaker_embeddings.to(self.device)
212
- if speaker_embeddings.dim() == 1:
213
- speaker_embeddings = speaker_embeddings.unsqueeze(0)
214
- speech = self.model.generate_speech(input_ids, speaker_embeddings, vocoder=self.vocoder)
215
-
216
- speech_numpy = speech.cpu().numpy()
217
- print(f"Generated audio shape: {speech_numpy.shape}")
218
- with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
219
- sf.write(tmp_file.name, speech_numpy, self.sample_rate)
220
- print(f"Audio saved to: {tmp_file.name}")
221
- del speech, input_ids
222
- gc.collect()
223
- return tmp_file.name, f"✅ Speech generated successfully using {voice_type}!"
224
- except Exception as e:
225
- print(f"❌ Error in synthesize_speech: {str(e)}")
226
- return None, f"❌ Error generating speech: {str(e)}"
227
-
228
- print("🚀 Initializing Voice Cloning TTS System...")
229
- tts_system = VoiceCloningTTS()
230
-
231
- def process_voice_upload(audio_file):
232
- if audio_file is None:
233
- return "❌ Please upload an audio file first.", gr.update(interactive=False), gr.update(interactive=False)
234
- try:
235
- print(f"Processing uploaded file: {audio_file}")
236
- speaker_embedding, message = tts_system.extract_speaker_embedding(audio_file)
237
- if speaker_embedding is not None:
238
- tts_system.user_speaker_embeddings = speaker_embedding
239
- print("✅ Speaker embeddings saved successfully")
240
- return message, gr.update(interactive=True), gr.update(interactive=True)
241
- else:
242
- return message, gr.update(interactive=False), gr.update(interactive=False)
243
- except Exception as e:
244
- error_msg = f"❌ Error processing audio: {str(e)}"
245
- print(error_msg)
246
- return error_msg, gr.update(interactive=False), gr.update(interactive=False)
247
-
248
- def generate_speech(text, use_cloned_voice):
249
- if not text.strip():
250
- return None, "❌ Please enter some text to convert."
251
- try:
252
- print(f"Generating speech - Use cloned voice: {use_cloned_voice}")
253
- audio_file, message = tts_system.synthesize_speech(text, use_cloned_voice)
254
- return audio_file, message
255
- except Exception as e:
256
- error_msg = f"❌ Error generating speech: {str(e)}"
257
- print(error_msg)
258
- return None, error_msg
259
-
260
- def clear_voice_profile():
261
- tts_system.user_speaker_embeddings = None
262
- return "🔄 Voice profile cleared.", gr.update(interactive=False), gr.update(interactive=False)
263
-
264
- def update_generate_button(text, use_cloned):
265
- text_ready = bool(text.strip())
266
- voice_ready = (not use_cloned) or (tts_system.user_speaker_embeddings is not None)
267
- return gr.update(interactive=text_ready and voice_ready)
268
 
269
- with gr.Blocks(title="Voice Cloning TTS System") as demo:
270
- gr.Markdown("# Voice Cloning TTS System")
271
- gr.Markdown("Upload an audio file to clone your voice and generate speech.")
 
272
 
273
  with gr.Row():
274
- with gr.Column():
275
- voice_upload = gr.Audio(label="Upload Voice Sample", type="filepath", sources=["upload", "microphone"])
276
- upload_status = gr.Textbox(label="Status", interactive=False)
277
- clear_btn = gr.Button("Clear Voice Profile")
278
-
279
- with gr.Column():
280
- text_input = gr.Textbox(label="Text to Convert", lines=5)
281
- use_cloned_voice = gr.Checkbox(label="Use Cloned Voice", value=True, interactive=False)
282
- generate_btn = gr.Button("Generate Speech", interactive=False)
283
 
284
- output_audio = gr.Audio(label="Generated Speech", type="filepath")
285
- generation_status = gr.Textbox(label="Generation Status", interactive=False)
286
 
287
- voice_upload.change(fn=process_voice_upload, inputs=[voice_upload], outputs=[upload_status, use_cloned_voice, generate_btn])
288
- text_input.change(fn=update_generate_button, inputs=[text_input, use_cloned_voice], outputs=[generate_btn])
289
- use_cloned_voice.change(fn=update_generate_button, inputs=[text_input, use_cloned_voice], outputs=[generate_btn])
290
- generate_btn.click(fn=generate_speech, inputs=[text_input, use_cloned_voice], outputs=[output_audio, generation_status])
291
- clear_btn.click(fn=clear_voice_profile, outputs=[upload_status, use_cloned_voice, generate_btn])
 
292
 
293
- if __name__ == "__main__":
294
- print("🌟 Starting Voice Cloning TTS System...")
295
- demo.launch()
 
1
  import gradio as gr
2
+ from TTS.api import TTS
 
3
  import numpy as np
 
 
 
 
 
 
 
 
 
 
4
 
5
+ # Load the YourTTS model once at startup
6
+ tts = TTS(model_name="tts_models/multilingual/multi-dataset/your_tts", progress_bar=False)
7
+ sample_rate = tts.synthesizer.output_sample_rate
8
 
9
+ def generate_speech(reference_audio, text):
10
+ """
11
+ Generate speech audio mimicking the voice from the reference audio.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ Parameters:
14
+ reference_audio (str): Filepath to the uploaded voice sample.
15
+ text (str): Text to convert to speech.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ Returns:
18
+ tuple: (audio waveform as numpy array, sample rate)
19
+ """
20
+ # Generate speech using the reference audio and text
21
+ wav = tts.tts(text=text, speaker_wav=reference_audio, language="en")
22
+ # Convert list to numpy array for Gradio
23
+ wav_np = np.array(wav)
24
+ return (wav_np, sample_rate)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ # Build the Gradio interface
27
+ with gr.Blocks(title="Voice Cloning TTS") as app:
28
+ gr.Markdown("## Voice Cloning Text-to-Speech")
29
+ gr.Markdown("Upload a short voice sample in English, then enter text to hear it in your voice!")
30
 
31
  with gr.Row():
32
+ audio_input = gr.Audio(source="upload", type="filepath", label="Upload Your Voice Sample (English)")
33
+ text_input = gr.Textbox(label="Enter Text to Convert to Speech", placeholder="e.g., I love chocolate")
 
 
 
 
 
 
 
34
 
35
+ generate_btn = gr.Button("Generate Speech")
36
+ audio_output = gr.Audio(label="Generated Speech", interactive=False)
37
 
38
+ # Connect the button to the generation function
39
+ generate_btn.click(
40
+ fn=generate_speech,
41
+ inputs=[audio_input, text_input],
42
+ outputs=audio_output
43
+ )
44
 
45
+ # Launch the application
46
+ app.launch()