Staticaliza commited on
Commit
c8264c2
·
verified ·
1 Parent(s): 3dcd4a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -71
app.py CHANGED
@@ -1,79 +1,52 @@
1
- import gradio as gr
2
- import os
3
-
4
- os.environ["TORCH_COMPILE_DISABLE"] = "1" # disable torch.compile globally
5
- os.environ["TORCHINDUCTOR_DISABLE"] = "1" # belt-and-suspenders for inductor
6
-
7
- import torch, torch._dynamo # must come *after* the env vars
8
- torch._dynamo.config.suppress_errors = True # swallow any stray compile calls
9
- torch._dynamo.disable()
10
 
11
- import spaces
12
- import torch
13
- import numpy as np
14
 
15
- from zonos.model import Zonos
16
- from zonos.conditioning import make_cond_dict
 
17
 
18
- cpu = "cuda"
19
- model = Zonos.from_pretrained("Zyphra/Zonos-v0.1-transformer", device=cpu)
 
20
 
21
- def _speaker_embed(audio):
22
  if audio is None: return None
23
- sr, wav = audio
24
- if wav.dtype.kind in "iu":
25
- wav = wav.astype(np.float32) / np.iinfo(wav.dtype).max
26
- else:
27
- wav = wav.astype(np.float32)
28
- wav = torch.from_numpy(wav).unsqueeze(0)
29
- return model.make_speaker_embedding(wav, sr)
30
 
31
  @spaces.GPU
32
- def tts(
33
- text, language, speaker_audio,
34
- happy, sad, disgust, fear, surprise, anger, other, neutral,
35
- speaking_rate, pitch_std
36
- ):
37
- speaker = _speaker_embed(speaker_audio)
38
- emotion = [happy, sad, disgust, fear, surprise, anger, other, neutral]
39
- model.to("cuda")
 
40
  with torch.no_grad():
41
- cond = make_cond_dict(
42
- text=text,
43
- language=language,
44
- speaker=speaker,
45
- emotion=emotion,
46
- speaking_rate=float(speaking_rate),
47
- pitch_std=float(pitch_std),
48
- )
49
- conditioning = model.prepare_conditioning(cond)
50
- codes = model.generate(conditioning)
51
- wav = model.autoencoder.decode(codes)[0].cpu()
52
- model.to(cpu)
53
- torch.cuda.empty_cache()
54
- return (model.autoencoder.sampling_rate, wav.numpy())
55
-
56
- langs = ["en-us", "fr-fr", "ja", "de-de", "zh"]
57
- demo = gr.Interface(
58
- fn=tts,
59
- inputs=[
60
- gr.Textbox(label="text"),
61
- gr.Dropdown(langs, value="en-us", label="language"),
62
- gr.Audio(type="numpy", label="speaker reference (optional)"),
63
- gr.Slider(0, 1, 0.3, 0.05, label="happiness"),
64
- gr.Slider(0, 1, 0.0, 0.05, label="sadness"),
65
- gr.Slider(0, 1, 0.0, 0.05, label="disgust"),
66
- gr.Slider(0, 1, 0.0, 0.05, label="fear"),
67
- gr.Slider(0, 1, 0.0, 0.05, label="surprise"),
68
- gr.Slider(0, 1, 0.0, 0.05, label="anger"),
69
- gr.Slider(0, 1, 0.2, 0.05, label="other"),
70
- gr.Slider(0, 1, 0.5, 0.05, label="neutral"),
71
- gr.Slider(0, 40, 15, 1, label="speaking_rate"),
72
- gr.Slider(0, 400, 20, 1, label="pitch_std"),
73
- ],
74
- outputs=gr.Audio(label="generated speech"),
75
- title="zonos-v0.1 zerogpu tts",
76
- )
77
-
78
- if __name__ == "__main__":
79
- demo.launch(share=True)
 
1
+ import os, shlex, subprocess, torch, numpy as np, spaces, gradio as gr, torchaudio
2
+ from zonos.model import Zonos
3
+ from zonos.conditioning import make_cond_dict, supported_language_codes
 
 
 
 
 
 
4
 
5
+ subprocess.run(shlex.split("pip install flash-attn --no-build-isolation"), env=os.environ | {"FLASH_ATTENTION_SKIP_CUDA_BUILD":"TRUE"}, check=True)
6
+ subprocess.run(shlex.split("pip install https://github.com/state-spaces/mamba/releases/download/v2.2.4/mamba_ssm-2.2.4+cu12torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl"), check=True)
7
+ subprocess.run(shlex.split("pip install https://github.com/Dao-AILab/causal-conv1d/releases/download/v1.5.0.post8/causal_conv1d-1.5.0.post8+cu12torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl"), check=True)
8
 
9
+ os.environ["TORCH_COMPILE_DISABLE"]="1"
10
+ os.environ["TORCHINDUCTOR_DISABLE"]="1"
11
+ import torch._dynamo; torch._dynamo.disable()
12
 
13
+ device="cuda"
14
+ MODEL_NAMES=["Zyphra/Zonos-v0.1-transformer","Zyphra/Zonos-v0.1-hybrid"]
15
+ MODELS={n:Zonos.from_pretrained(n,device=device).eval() for n in MODEL_NAMES}
16
 
17
+ def _speaker_embed(model,audio):
18
  if audio is None: return None
19
+ sr,wav=audio
20
+ if wav.dtype.kind in "iu": wav=wav.astype(np.float32)/np.iinfo(wav.dtype).max
21
+ wav=torch.from_numpy(wav).unsqueeze(0)
22
+ return model.make_speaker_embedding(wav,sr)
 
 
 
23
 
24
  @spaces.GPU
25
+ def tts(model_choice,text,language,speaker_audio,
26
+ e1,e2,e3,e4,e5,e6,e7,e8,
27
+ speaking_rate,pitch_std):
28
+ m=MODELS[model_choice]
29
+ speaker=_speaker_embed(m,speaker_audio)
30
+ emotion=[e1,e2,e3,e4,e5,e6,e7,e8]
31
+ cond=make_cond_dict(text=text,language=language,speaker=speaker,
32
+ emotion=emotion,speaking_rate=float(speaking_rate),
33
+ pitch_std=float(pitch_std),device=device)
34
  with torch.no_grad():
35
+ wav=m.autoencoder.decode(m.generate(m.prepare_conditioning(cond)))[0].cpu()
36
+ return (m.autoencoder.sampling_rate,wav.numpy())
37
+
38
+ langs=supported_language_codes
39
+ with gr.Blocks() as demo:
40
+ mc=gr.Dropdown(MODEL_NAMES,value=MODEL_NAMES[0],label="model")
41
+ txt=gr.Textbox(label="text")
42
+ lang=gr.Dropdown(langs,value="en-us",label="language")
43
+ spk=gr.Audio(type="numpy",label="speaker ref")
44
+ emos=[gr.Slider(0,1,0.3 if i==0 else 0.0,0.05,label=l) for i,l in enumerate(
45
+ ["happiness","sadness","disgust","fear","surprise","anger","other","neutral"])]
46
+ rate=gr.Slider(0,40,15,1,label="speaking_rate")
47
+ pitch=gr.Slider(0,400,20,1,label="pitch_std")
48
+ out=gr.Audio(label="output")
49
+ gr.Button("generate").click(fn=tts,
50
+ inputs=[mc,txt,lang,spk,*emos,rate,pitch],
51
+ outputs=out)
52
+ if __name__=="__main__": demo.launch()