File size: 6,781 Bytes
7bba462
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import gradio as gr
import torch
from diffusers import AudioLDMPipeline
from share_btn import community_icon_html, loading_icon_html, share_js

from transformers import AutoProcessor, ClapModel


# make Space compatible with CPU duplicates
if torch.cuda.is_available():
    device = "cuda"
    torch_dtype = torch.float16
else:
    device = "cpu"
    torch_dtype = torch.float32

# load the diffusers pipeline
repo_id = "cvssp/audioldm-m-full"
pipe = AudioLDMPipeline.from_pretrained(repo_id, torch_dtype=torch_dtype).to(device)
pipe.unet = torch.compile(pipe.unet)

# CLAP model (only required for automatic scoring)
clap_model = ClapModel.from_pretrained("sanchit-gandhi/clap-htsat-unfused-m-full").to(device)
processor = AutoProcessor.from_pretrained("sanchit-gandhi/clap-htsat-unfused-m-full")

generator = torch.Generator(device)


def text2audio(text, negative_prompt, duration, guidance_scale, random_seed, n_candidates):
    if text is None:
        raise gr.Error("Please provide a text input.")

    waveforms = pipe(
        text,
        audio_length_in_s=duration,
        guidance_scale=guidance_scale,
        num_inference_steps=100,
        negative_prompt=negative_prompt,
        num_waveforms_per_prompt=n_candidates if n_candidates else 1,
        generator=generator.manual_seed(int(random_seed)),
    )["audios"]

    if waveforms.shape[0] > 1:
        waveform = score_waveforms(text, waveforms)
    else:
        waveform = waveforms[0]

    return gr.make_waveform((16000, waveform), bg_image="bg.png")


def score_waveforms(text, waveforms):
    inputs = processor(text=text, audios=list(waveforms), return_tensors="pt", padding=True)
    inputs = {key: inputs[key].to(device) for key in inputs}
    with torch.no_grad():
        logits_per_text = clap_model(**inputs).logits_per_text  # this is the audio-text similarity score
        probs = logits_per_text.softmax(dim=-1)  # we can take the softmax to get the label probabilities
        most_probable = torch.argmax(probs)  # and now select the most likely audio waveform
    waveform = waveforms[most_probable]
    return waveform


css = """
        a {
            color: inherit; text-decoration: underline;
        } .gradio-container {
            font-family: 'IBM Plex Sans', sans-serif;
        } .gr-button {
            color: white; border-color: #000000; background: #000000;
        } input[type='range'] {
            accent-color: #000000;
        } .dark input[type='range'] {
            accent-color: #dfdfdf;
        } .container {
            max-width: 730px; margin: auto; padding-top: 1.5rem;
        } #gallery {
            min-height: 22rem; margin-bottom: 15px; margin-left: auto; margin-right: auto; border-bottom-right-radius:
            .5rem !important; border-bottom-left-radius: .5rem !important;
        } #gallery>div>.h-full {
            min-height: 20rem;
        } .details:hover {
            text-decoration: underline;
        } .gr-button {
            white-space: nowrap;
        } .gr-button:focus {
            border-color: rgb(147 197 253 / var(--tw-border-opacity)); outline: none; box-shadow:
            var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); --tw-border-opacity: 1;
            --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width)
            var(--tw-ring-offset-color); --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px
            var(--tw-ring-offset-width)) var(--tw-ring-color); --tw-ring-color: rgb(191 219 254 /
            var(--tw-ring-opacity)); --tw-ring-opacity: .5;
        } #advanced-btn {
            font-size: .7rem !important; line-height: 19px; margin-top: 12px; margin-bottom: 12px; padding: 2px 8px;
            border-radius: 14px !important;
        } #advanced-options {
            margin-bottom: 20px;
        } .footer {
            margin-bottom: 45px; margin-top: 35px; text-align: center; border-bottom: 1px solid #e5e5e5;
        } .footer>p {
            font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white;
        } .dark .footer {
            border-color: #303030;
        } .dark .footer>p {
            background: #0b0f19;
        } .acknowledgments h4{
            margin: 1.25em 0 .25em 0; font-weight: bold; font-size: 115%;
        } #container-advanced-btns{
            display: flex; flex-wrap: wrap; justify-content: space-between; align-items: center;
        } .animate-spin {
            animation: spin 1s linear infinite;
        } @keyframes spin {
            from {
                transform: rotate(0deg);
            } to {
                transform: rotate(360deg);
            }
        } #share-btn-container {
            display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color:
            #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
            margin-top: 10px; margin-left: auto;
        } #share-btn {
            all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif;
            margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem
            !important;right:0;
        } #share-btn * {
            all: unset;
        } #share-btn-container div:nth-child(-n+2){
            width: auto !important; min-height: 0px !important;
        } #share-btn-container .wrap {
            display: none !important;
        } .gr-form{
            flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
        } #prompt-container{
            gap: 0;
        } #generated_id{
            min-height: 700px
        } #setting_id{
          margin-bottom: 12px; text-align: center; font-weight: 900;
        }
"""
iface = gr.Blocks(css=css)

with iface:
    gr.HTML(
        """
            <div style="text-align: center; max-width: 700px; margin: 0 auto;">
              <div
                style="
                  display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;
                "
              >
                <h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;">
                  AudioLDM: Text-to-Audio Generation with Latent Diffusion Models
                </h1>
              </div> <p style="margin-bottom: 10px; font-size: 94%">
                <a href="https://arxiv.org/abs/2301.12503">[Paper]</a> <a href="https://audioldm.github.io/">[Project
                page]</a> <a href="https://huggingface.co/docs/diffusers/main/en/api/pipelines/audioldm">[🧨
                Diffusers]</a>
              </p>
            </div>
        """
    )
iface.queue(max_size=10).launch(debug=True)