File size: 3,126 Bytes
bebc496
68f40ec
bebc496
c849c89
 
 
 
 
 
 
 
 
 
 
 
 
b9bf9b2
c849c89
40ede2a
c849c89
 
 
 
 
 
 
 
 
 
 
152fe30
b9bf9b2
c849c89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bebc496
c849c89
 
 
 
 
 
 
 
 
 
bebc496
40ede2a
c849c89
 
bebc496
c849c89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a34b148
b9bf9b2
40ede2a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import gradio as gr
from TTS.api import TTS
import tempfile
import docx

# Voice models dictionary with metadata on whether they support multi-speaker
VOICE_MODELS = {
    "LJSpeech (Standard Female)": {
        "model_name": "tts_models/en/ljspeech/vits",
        "multi_speaker": False
    },
    "VCTK (Multi-speaker English)": {
        "model_name": "tts_models/en/vctk/vits",
        "multi_speaker": True
    }
}

# Embedded short speaker metadata (from your CSV)
SPEAKER_METADATA = {
    "225": {"age": 23, "gender": "F", "accent": "English"},
    "226": {"age": 22, "gender": "M", "accent": "English"},
    "227": {"age": 38, "gender": "M", "accent": "English"},
    "228": {"age": 22, "gender": "F", "accent": "English"},
    "229": {"age": 23, "gender": "F", "accent": "English"},
    "230": {"age": 22, "gender": "F", "accent": "English"},
    "231": {"age": 23, "gender": "F", "accent": "English"},
    "232": {"age": 23, "gender": "M", "accent": "English"},
    "233": {"age": 23, "gender": "F", "accent": "English"},
    "234": {"age": 22, "gender": "F", "accent": "Scottish"}
    # Add more as needed
}

# Pre-format speaker dropdown choices
SPEAKER_CHOICES = [
    (sid, f"p{sid} ({data['gender']}, {data['accent']}, {data['age']} yrs)")
    for sid, data in SPEAKER_METADATA.items()
]

# Model cache
MODEL_CACHE = {}

def load_tts_model(model_key):
    if model_key in MODEL_CACHE:
        return MODEL_CACHE[model_key]
    model_info = VOICE_MODELS[model_key]
    tts = TTS(model_name=model_info["model_name"], gpu=False)
    MODEL_CACHE[model_key] = tts
    return tts

def extract_text_from_docx(file):
    doc = docx.Document(file)
    return "\n".join([para.text for para in doc.paragraphs if para.text.strip()])

def generate_audio(voice_key, speaker_id, docx_file):
    text = extract_text_from_docx(docx_file)
    tts = load_tts_model(voice_key)
    kwargs = {}

    if VOICE_MODELS[voice_key]["multi_speaker"]:
        kwargs["speaker"] = speaker_id

    output_path = tempfile.mktemp(suffix=".wav")
    tts.tts_to_file(text=text, file_path=output_path, **kwargs)
    return output_path

def update_speaker_visibility(voice_key):
    visible = VOICE_MODELS[voice_key]["multi_speaker"]
    return gr.update(visible=visible)

with gr.Blocks() as demo:
    gr.Markdown("## DOCX to Speech with Speaker Selection")

    with gr.Row():
        voice_dropdown = gr.Dropdown(
            choices=list(VOICE_MODELS.keys()),
            value="LJSpeech (Standard Female)",
            label="Select Voice"
        )

        speaker_dropdown = gr.Dropdown(
            choices=SPEAKER_CHOICES,
            label="Select Speaker",
            visible=False
        )

    docx_input = gr.File(label="Upload .docx File", file_types=[".docx"])
    generate_btn = gr.Button("Generate Audio")
    audio_output = gr.Audio(label="Output Audio")

    voice_dropdown.change(fn=update_speaker_visibility, inputs=voice_dropdown, outputs=speaker_dropdown)

    generate_btn.click(
        fn=generate_audio,
        inputs=[voice_dropdown, speaker_dropdown, docx_input],
        outputs=audio_output
    )

demo.launch()