File size: 3,522 Bytes
ee04bc2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import argparse
import os

import torch
import torchaudio

from api import TextToSpeech, MODELS_DIR
from utils.audio import load_voices

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--text",
        type=str,
        help="Text to speak.",
        default="The expressiveness of autoregressive transformers is literally nuts! I absolutely adore them.",
    )
    parser.add_argument(
        "--voice",
        type=str,
        help="Selects the voice to use for generation. See options in voices/ directory (and add your own!) "
        "Use the & character to join two voices together. Use a comma to perform inference on multiple voices.",
        default="random",
    )
    parser.add_argument(
        "--preset", type=str, help="Which voice preset to use.", default="fast"
    )
    parser.add_argument(
        "--output_path", type=str, help="Where to store outputs.", default="results/"
    )
    parser.add_argument(
        "--model_dir",
        type=str,
        help="Where to find pretrained model checkpoints. Tortoise automatically downloads these to .models, so this"
        "should only be specified if you have custom checkpoints.",
        default=MODELS_DIR,
    )
    parser.add_argument(
        "--candidates",
        type=int,
        help="How many output candidates to produce per-voice.",
        default=3,
    )
    parser.add_argument(
        "--seed",
        type=int,
        help="Random seed which can be used to reproduce results.",
        default=None,
    )
    parser.add_argument(
        "--produce_debug_state",
        type=bool,
        help="Whether or not to produce debug_state.pth, which can aid in reproducing problems. Defaults to true.",
        default=True,
    )
    parser.add_argument(
        "--cvvp_amount",
        type=float,
        help="How much the CVVP model should influence the output."
        "Increasing this can in some cases reduce the likelyhood of multiple speakers. Defaults to 0 (disabled)",
        default=0.0,
    )
    args = parser.parse_args()
    os.makedirs(args.output_path, exist_ok=True)

    tts = TextToSpeech(models_dir=args.model_dir)

    selected_voices = args.voice.split(",")
    for k, selected_voice in enumerate(selected_voices):
        if "&" in selected_voice:
            voice_sel = selected_voice.split("&")
        else:
            voice_sel = [selected_voice]
        voice_samples, conditioning_latents = load_voices(voice_sel)

        gen, dbg_state = tts.tts_with_preset(
            args.text,
            k=args.candidates,
            voice_samples=voice_samples,
            conditioning_latents=conditioning_latents,
            preset=args.preset,
            use_deterministic_seed=args.seed,
            return_deterministic_state=True,
            cvvp_amount=args.cvvp_amount,
        )
        if isinstance(gen, list):
            for j, g in enumerate(gen):
                torchaudio.save(
                    os.path.join(args.output_path, f"{selected_voice}_{k}_{j}.wav"),
                    g.squeeze(0).cpu(),
                    24000,
                )
        else:
            torchaudio.save(
                os.path.join(args.output_path, f"{selected_voice}_{k}.wav"),
                gen.squeeze(0).cpu(),
                24000,
            )

        if args.produce_debug_state:
            os.makedirs("debug_states", exist_ok=True)
            torch.save(dbg_state, f"debug_states/do_tts_debug_{selected_voice}.pth")