File size: 5,474 Bytes
4f61505
18ccbe4
 
 
 
 
 
0970a84
18ccbe4
 
 
 
4c57756
4f61505
18ccbe4
d595bbf
 
18ccbe4
d595bbf
18ccbe4
 
 
 
 
af81852
 
18ccbe4
 
 
 
 
 
 
 
 
 
dac86f1
18ccbe4
 
 
0c13c69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dac86f1
18ccbe4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f61505
18ccbe4
 
 
 
 
d0f77c8
dac86f1
 
 
 
 
 
 
 
 
 
 
18ccbe4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f61505
18ccbe4
 
 
 
 
 
4f61505
18ccbe4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
727f6ec
dac86f1
 
0c13c69
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import re, io, os, stat, logging
import tempfile, subprocess
import requests
import torch
import traceback
import numpy as np
import scipy
from TTS.api import TTS
from flask import Flask, Blueprint,  request, jsonify, send_file

import torch
import torchaudio

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
app = Flask(__name__)
# def upload_bytes(bytes, ext=".wav"):
#     return bytes

from qili import upload_bytes
# if __name__ == "__main__":
#     app = Flask(__name__)
# else:
#     app = Blueprint("xtts", __name__)

tts=None
model=None 

sample_root= os.environ.get('XTTS_SAMPLE_DIR') 
if(sample_root==None):
    sample_root=f'{os.getcwd()}/samples'
if not os.path.exists(sample_root):
    os.makedirs(sample_root)

default_sample=f'{os.path.dirname(os.path.abspath(__file__))}/sample.wav', f'{sample_root}/sample.pt'
ffmpeg=f'{os.path.dirname(os.path.abspath(__file__))}/ffmpeg'

def predict(text, sample=None, language="zh"):
    global tts 
    global model
    try:
        if tts is None:
            # model_dir=os.environ.get("MODEL_DIR")
            # model_path=model_dir
            # config_path=f'{model_dir}/config.json'
            # vocoder_config_path=f'{model_dir}/vocab.json'
            model_name="tts_models/multilingual/multi-dataset/xtts_v2"
            logging.info(f"loading model {model_name} ...")
            tts = TTS(
                model_name,
                # model_path=model_path, 
                # config_path=config_path, 
                # vocoder_config_path=vocoder_config_path,  
                progress_bar=False
            )
            model=tts.synthesizer.tts_model
            #hack to use cache
            model.__get_conditioning_latents=model.get_conditioning_latents
            model.get_conditioning_latents=get_conditioning_latents
            logging.info("model is ready")
        text= re.sub("([^\x00-\x7F]|\w)(\.|\。|\?)",r"\1 \2\2",text)
        wav = tts.tts(
            text, 
            language=language if language is not None else "zh",
            speaker_wav=sample if sample is not None else default_sample[0],
        )

        with io.BytesIO() as wav_buffer:
            if torch.is_tensor(wav):
                wav = wav.cpu().numpy()
            if isinstance(wav, list):
                wav = np.array(wav)
            wav_norm = wav * (32767 / max(0.01, np.max(np.abs(wav))))
            wav_norm = wav_norm.astype(np.int16)
            scipy.io.wavfile.write(wav_buffer, tts.synthesizer.output_sample_rate, wav_norm)
            wav_bytes = wav_buffer.getvalue()
            url= upload_bytes(wav_bytes, ext=".wav")
            logging.debug(f'wav is at {url}')
            return url
    except Exception as e:
        traceback.print_exc()
        return str(e)

@app.route("/")
def convert():
    text = request.args.get('text')
    if text is None:
        return jsonify({'error': 'text is missing'}), 400
        
    sample = request.args.get('sample')
    language = request.args.get('language')
    
    return predict(text, sample, language)

@app.route("/play")
def play():
    url=predict()
    return f'''
        <html>
            <body>
                <audio controls autoplay>
                    <source src="{url}" type="audio/wav">
                    Your browser does not support the audio element.
                </audio>
            </body>
        </html>
    '''

def get_conditioning_latents(audio_path, **others):
    global model
    speaker_wav, pt_file=download(audio_path)
    try:
        if pt_file != None:
            (
                gpt_cond_latent,
                speaker_embedding,
            ) = torch.load(pt_file)
            logging.debug(f'sample wav info loaded from {pt_file}')
    except:
        (
            gpt_cond_latent,
            speaker_embedding,
        ) = model.__get_conditioning_latents(audio_path=speaker_wav, **others)
        torch.save((gpt_cond_latent,speaker_embedding), pt_file)
        logging.debug(f'sample wav info saved to {pt_file}')
    return gpt_cond_latent,speaker_embedding

def download(url):
    try:
        response = requests.get(url)
        if response.status_code == 200:
            id=f'{sample_root}/{response.headers["etag"]}.pt'.replace('"','')
            if(os.path.exists(id)):
                return "", id
            with tempfile.NamedTemporaryFile(mode="wb", delete=True) as temp_file:
                temp_file.write(response.content)
                return trim_sample_audio(os.path.abspath(temp_file.name)), id
    except:
        return default_sample

def trim_sample_audio(speaker_wav):
    global ffmpeg
    try:
        lowpass_highpass = "lowpass=8000,highpass=75,"
        trim_silence = "areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02,areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02,"
        out_filename=speaker_wav.replace(".wav","_trimed.wav")
        shell_command = f"{ffmpeg} -y -i {speaker_wav} -af {lowpass_highpass}{trim_silence} {out_filename}".split(" ")
        subprocess.run(
            [item for item in shell_command],
            capture_output=False,
            text=True,
            check=True,
        )
        return out_filename
    except:
        traceback.print_exc()
        return speaker_wav

logging.info("xtts is ready")

# import gradio as gr
# gr.Interface(predict, inputs=["text", "text"], outputs=gr.Audio()).launch()