File size: 2,083 Bytes
c962c9a
 
 
 
3e0d7e1
 
 
 
 
c962c9a
582cf5b
3e0d7e1
 
b04ebb9
3e0d7e1
b04ebb9
3e0d7e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4c5bfad
b04ebb9
3e0d7e1
b04ebb9
3e0d7e1
c962c9a
 
b04ebb9
4c5bfad
3e0d7e1
c962c9a
 
 
 
a25c8ec
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import gradio as gr
from TTS.api import TTS

# Init TTS
tts = TTS(model_name="tts_models/multilingual/multi-dataset/your_tts", progress_bar = False, gpu=False)
zh_tts = TTS(model_name="tts_models/zh-CN/baker/tacotron2-DDC-GST", progress_bar=False, gpu=False)
en_tts = TTS(model_name = "tts_models/en/ljspeech/vits", gpu=False)
fr_tts = TTS(model_name = "tts_models/fr/css10/vits", gpu=False)
de_tts = TTS(model_name = "tts_models/de/thorsten/vits", gpu=False)

def text_to_speech(text: str, speaker_wav, language: str):
    file_path = "output.wav"
    if language == "zh-CN":
        if speaker_wav is not None:
            zh_tts.tts_to_file(text, speaker_wav=speaker_wav, file_path=file_path)
        else:
            zh_tts.tts_to_file(text, file_path=file_path)
    else if language == "de":
        if speaker_wav is not None:
            de_tts.tts_to_file(text, speaker_wav=speaker_wav, file_path=file_path)
        else:
            de_tts.tts_to_file(text, file_path=file_path)
    else if language == "fr":
        if speaker_wav is not None:
            fr_tts.tts_to_file(text, speaker_wav=speaker_wav, file_path=file_path)
        else:
            fr_tts.tts_to_file(text, file_path=file_path)
    else if language == "en":
        if speaker_wav is not None:
            en_tts.tts_to_file(text, speaker_wav=speaker_wav, file_path=file_path)
        else:
            en_tts.tts_to_file(text, file_path=file_path)
    else:
        if speaker_wav is not None:
            tts.tts_to_file(text, speaker_wav=speaker_wav, language=language, file_path=file_path)
        else:
            tts.tts_to_file(text, speaker=tts.speakers[0], language=language, file_path=file_path)
    return 'output.wav'

inputs = [gr.Textbox(label="Input the text", value="", max_lines=3),
          gr.Audio(lable="Input your voice here", source="microphone", type="filepath"), 
            gr.Radio(label="Language", choices=["en", "zh-CN", "fr", "de"], value="en")]
outputs = gr.Audio(label="Output")

demo = gr.Interface(fn=text_to_speech, inputs=inputs, outputs=outputs)

demo.launch()