SohomToom's picture
Update app.py
b3721bb verified
raw
history blame
9.32 kB
import os
os.environ["NUMBA_DISABLE_CACHE"] = "1"
import os
import gradio as gr
from docx import Document
from TTS.api import TTS
import tempfile
import zipfile
from io import BytesIO
import re
from pydub import AudioSegment
final_audio = AudioSegment.empty()
from pydub import AudioSegment
from bark import generate_audio # Importing Bark
# Voice model
VOICE_MODEL = "tts_models/en/vctk/vits"
# Embedded metadata (from your file)
SPEAKER_METADATA = {
300: { "age": 23, "gender": "F", "accent": "American"},
271: { "age": 19, "gender": "M", "accent": "Scottish"},
287: { "age": 23, "gender": "M", "accent": "English"},
262: { "age": 23, "gender": "F", "accent": "Scottish"},
284: { "age": 20, "gender": "M", "accent": "Scottish"},
297: { "age": 20, "gender": "F", "accent": "American"},
227: { "age": 38, "gender": "M", "accent": "English"},
246: { "age": 22, "gender": "M", "accent": "Scottish"},
225: { "age": 23, "gender": "F", "accent": "English"},
259: { "age": 23, "gender": "M", "accent": "English"},
252: { "age": 22, "gender": "M", "accent": "Scottish"},
231: { "age": 23, "gender": "F", "accent": "English"},
266: { "age": 22, "gender": "F", "accent": "Irish"},
241: { "age": 21, "gender": "M", "accent": "Scottish"},
312: { "age": 19, "gender": "F", "accent": "Canadian"},
329: { "age": 23, "gender": "F", "accent": "American"},
232: { "age": 23, "gender": "M", "accent": "English"},
305: { "age": 19, "gender": "F", "accent": "American"},
311: { "age": 21, "gender": "M", "accent": "American"},
301: { "age": 23, "gender": "F", "accent": "American"},
304: { "age": 22, "gender": "M", "accent": "NorthernIrish"},
310: { "age": 21, "gender": "F", "accent": "American"},
260: { "age": 21, "gender": "M", "accent": "Scottish"},
315: { "age": 18, "gender": "M", "accent": "American"},
374: { "age": 28, "gender": "M", "accent": "Australian"},
364: { "age": 23, "gender": "M", "accent": "Irish"},
269: { "age": 20, "gender": "F", "accent": "English"},
345: { "age": 22, "gender": "M", "accent": "American"},
326: { "age": 26, "gender": "M", "accent": "Australian"},
343: { "age": 27, "gender": "F", "accent": "Canadian"},
230: { "age": 22, "gender": "F", "accent": "English"},
376: { "age": 22, "gender": "M", "accent": "Indian"},
240: { "age": 21, "gender": "F", "accent": "English"},
298: { "age": 19, "gender": "M", "accent": "Irish"},
272: { "age": 23, "gender": "M", "accent": "Scottish"},
248: { "age": 23, "gender": "F", "accent": "Indian"},
264: { "age": 23, "gender": "F", "accent": "Scottish"},
250: { "age": 22, "gender": "F", "accent": "English"},
292: { "age": 23, "gender": "M", "accent": "NorthernIrish"},
237: { "age": 22, "gender": "M", "accent": "Scottish"},
363: { "age": 22, "gender": "M", "accent": "Canadian"},
313: { "age": 24, "gender": "F", "accent": "Irish"},
285: { "age": 21, "gender": "M", "accent": "Scottish"},
268: { "age": 23, "gender": "F", "accent": "English"},
302: { "age": 20, "gender": "M", "accent": "Canadian"},
261: { "age": 26, "gender": "F", "accent": "NorthernIrish"},
336: { "age": 18, "gender": "F", "accent": "SouthAfrican"},
288: { "age": 22, "gender": "F", "accent": "Irish"},
226: { "age": 22, "gender": "M", "accent": "English"},
277: { "age": 23, "gender": "F", "accent": "English"},
360: { "age": 19, "gender": "M", "accent": "American"},
257: { "age": 24, "gender": "F", "accent": "English"},
254: { "age": 21, "gender": "M", "accent": "English"},
339: { "age": 21, "gender": "F", "accent": "American"},
323: { "age": 19, "gender": "F", "accent": "SouthAfrican"},
255: { "age": 19, "gender": "M", "accent": "Scottish"},
249: { "age": 22, "gender": "F", "accent": "Scottish"},
293: { "age": 22, "gender": "F", "accent": "NorthernIrish"},
244: { "age": 22, "gender": "F", "accent": "English"},
245: { "age": 25, "gender": "M", "accent": "Irish"},
361: { "age": 19, "gender": "F", "accent": "American"},
314: { "age": 26, "gender": "F", "accent": "SouthAfrican"},
308: { "age": 18, "gender": "F", "accent": "American"},
229: { "age": 23, "gender": "F", "accent": "English"},
341: { "age": 26, "gender": "F", "accent": "American"},
275: { "age": 23, "gender": "M", "accent": "Scottish"},
263: { "age": 22, "gender": "M", "accent": "Scottish"},
253: { "age": 22, "gender": "F", "accent": "Welsh"},
299: { "age": 25, "gender": "F", "accent": "American"},
316: { "age": 20, "gender": "M", "accent": "Canadian"},
282: { "age": 23, "gender": "F", "accent": "English"},
362: { "age": 29, "gender": "F", "accent": "American"},
294: { "age": 33, "gender": "F", "accent": "American"},
274: { "age": 22, "gender": "M", "accent": "English"},
279: { "age": 23, "gender": "M", "accent": "English"},
281: { "age": 29, "gender": "M", "accent": "Scottish"},
286: { "age": 23, "gender": "M", "accent": "English"},
258: { "age": 22, "gender": "M", "accent": "English"},
247: { "age": 22, "gender": "M", "accent": "Scottish"},
351: { "age": 21, "gender": "F", "accent": "NorthernIrish"},
283: { "age": 24, "gender": "F", "accent": "Irish"},
334: { "age": 18, "gender": "M", "accent": "American"},
333: { "age": 19, "gender": "F", "accent": "American"},
295: { "age": 23, "gender": "F", "accent": "Irish"},
330: { "age": 26, "gender": "F", "accent": "American"},
335: { "age": 25, "gender": "F", "accent": "NewZealand"},
228: { "age": 22, "gender": "F", "accent": "English"},
267: { "age": 23, "gender": "F", "accent": "English"},
273: { "age": 18, "gender": "F", "accent": "English"}
}
# Bark prompts (example)
BARK_PROMPTS = [
"Shy girl",
"Old man",
"Excited child",
"Angry woman"
]
def list_speaker_choices(metadata):
"""Helper function to list speakers from metadata (for VCTK and Coqui)"""
return [f"Speaker {sid} | {meta['gender']} | {meta['accent']}" for sid, meta in SPEAKER_METADATA.items()]
def get_speaker_id_from_label(label):
"""Extract speaker ID from label string"""
return label.split('|')[0].strip()
def generate_audio(sample_text, speaker_label, engine):
"""Generate audio based on engine choice"""
speaker_id = get_speaker_id_from_label(speaker_label)
model = None
# Engine selection logic
if engine == "bark":
model = TTS("bark_model_path") # Replace with actual path for Bark model
elif engine == "coqui":
model = TTS("tts_models/multilingual/multi-dataset/xtts_v2") # Replace with actual path for Coqui model
elif engine == "vctk":
model = TTS(VOICE_MODEL) # Replace with actual path for VCTK model
# Temporary file creation for output audio
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp_wav:
model.tts_to_file(text=sample_text, speaker="p"+speaker_id, file_path=tmp_wav.name)
return tmp_wav.name
# --- UI Components ---
with gr.Blocks() as demo:
gr.Markdown("## 📄 TTS Voice Generator with Multiple Engines")
# Engine dropdown
engine_dropdown = gr.Dropdown(
label="Select TTS Engine",
choices=["bark", "coqui", "vctk"],
value="vctk"
)
# Speaker/Prompt dropdown (dynamic)
speaker_dropdown = gr.Dropdown(label="Select Speaker", visible=False)
prompt_dropdown = gr.Dropdown(label="Select Prompt", visible=False)
# Sample text box
sample_textbox = gr.Textbox(label="Enter Sample Text (Max 500 characters)", max_lines=5)
sample_audio = gr.Audio(label="Sample Output Audio", type="filepath")
# Define metadata choices for speakers (Coqui and VCTK)
speaker_choices_vctk_coqui = list_speaker_choices(SPEAKER_METADATA)
speaker_dropdown.choices = speaker_choices_vctk_coqui # Use metadata for VCTK/Coqui speakers
# Define Bark prompts (choose from predefined prompts)
prompt_dropdown.choices = BARK_PROMPTS
# Dynamically update dropdown visibility based on engine selection
def update_dropdowns(engine):
if engine == "bark":
speaker_dropdown.visible = False
prompt_dropdown.visible = True
elif engine == "coqui" or engine == "vctk":
speaker_dropdown.visible = True
prompt_dropdown.visible = False
return gr.update(visible=speaker_dropdown.visible), gr.update(visible=prompt_dropdown.visible)
# Trigger dropdown visibility changes
engine_dropdown.change(update_dropdowns, inputs=engine_dropdown, outputs=[speaker_dropdown, prompt_dropdown])
# Button to generate audio from sample text
generate_button = gr.Button("Generate Audio")
generate_button.click(
fn=generate_audio,
inputs=[sample_textbox, speaker_dropdown, engine_dropdown],
outputs=[sample_audio]
)
# Button to clear the sample text and audio
def clear_sample():
return "", None
clear_button = gr.Button("Clear")
clear_button.click(fn=clear_sample, inputs=[], outputs=[sample_textbox, sample_audio])
if __name__ == "__main__":
demo.launch()