Commit
·
acae072
1
Parent(s):
0c4fe90
Refactor
Browse files
app.py
CHANGED
|
@@ -60,7 +60,7 @@ eigvecs = np.flip(eigvecs, axis=1)[:, :75]
|
|
| 60 |
U = eigvecs * np.sqrt(eigvals)
|
| 61 |
U = torch.from_numpy(U).float()
|
| 62 |
mean = torch.from_numpy(mean).float()
|
| 63 |
-
|
| 64 |
|
| 65 |
with open(INFO_PATH) as f:
|
| 66 |
info = json.load(f)
|
|
@@ -93,7 +93,7 @@ meter = pyln.Meter(44100)
|
|
| 93 |
|
| 94 |
|
| 95 |
@torch.no_grad()
|
| 96 |
-
def inference(audio
|
| 97 |
sr, y = audio
|
| 98 |
if sr != 44100:
|
| 99 |
y = resample(y, sr, 44100)
|
|
@@ -109,18 +109,19 @@ def inference(audio, randomise_rest, *pcs):
|
|
| 109 |
if y.shape[1] != 1:
|
| 110 |
y = y.mean(dim=1, keepdim=True)
|
| 111 |
|
| 112 |
-
M = eigvals.shape[0]
|
| 113 |
-
z = torch.cat(
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
)
|
| 123 |
x = U @ z + mean
|
|
|
|
| 124 |
|
| 125 |
fx.load_state_dict(vec2dict(x), strict=False)
|
| 126 |
fx.apply(partial(clip_delay_eq_Q, Q=0.707))
|
|
@@ -179,7 +180,7 @@ with gr.Blocks() as demo:
|
|
| 179 |
audio_input = gr.Audio(type="numpy", sources="upload", label="Input Audio")
|
| 180 |
with gr.Row():
|
| 181 |
random_button = gr.Button(
|
| 182 |
-
f"Randomise
|
| 183 |
elem_id="randomise-button",
|
| 184 |
)
|
| 185 |
reset_button = gr.Button(
|
|
@@ -189,12 +190,26 @@ with gr.Blocks() as demo:
|
|
| 189 |
render_button = gr.Button(
|
| 190 |
"Run", elem_id="render-button", variant="primary"
|
| 191 |
)
|
| 192 |
-
random_rest_checkbox = gr.Checkbox(
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
)
|
| 197 |
sliders = get_important_pcs(NUMBER_OF_PCS, value=0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
with gr.Column():
|
| 199 |
audio_output = gr.Audio(
|
| 200 |
type="numpy", label="Output Audio", interactive=False
|
|
@@ -205,27 +220,47 @@ with gr.Blocks() as demo:
|
|
| 205 |
lambda *args: (lambda x: (x, model2json()))(inference(*args)),
|
| 206 |
inputs=[
|
| 207 |
audio_input,
|
| 208 |
-
random_rest_checkbox,
|
| 209 |
]
|
| 210 |
-
+ sliders,
|
|
|
|
| 211 |
outputs=[audio_output, json_output],
|
| 212 |
)
|
| 213 |
|
| 214 |
random_button.click(
|
| 215 |
-
lambda *xs: [
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
],
|
| 222 |
-
|
| 223 |
-
|
|
|
|
|
|
|
|
|
|
| 224 |
)
|
| 225 |
reset_button.click(
|
| 226 |
-
lambda *xs: [0 for _ in range(len(xs))],
|
| 227 |
-
inputs=sliders,
|
| 228 |
-
outputs=sliders,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 229 |
)
|
| 230 |
|
| 231 |
demo.launch()
|
|
|
|
| 60 |
U = eigvecs * np.sqrt(eigvals)
|
| 61 |
U = torch.from_numpy(U).float()
|
| 62 |
mean = torch.from_numpy(mean).float()
|
| 63 |
+
z = torch.zeros(75)
|
| 64 |
|
| 65 |
with open(INFO_PATH) as f:
|
| 66 |
info = json.load(f)
|
|
|
|
| 93 |
|
| 94 |
|
| 95 |
@torch.no_grad()
|
| 96 |
+
def inference(audio):
|
| 97 |
sr, y = audio
|
| 98 |
if sr != 44100:
|
| 99 |
y = resample(y, sr, 44100)
|
|
|
|
| 109 |
if y.shape[1] != 1:
|
| 110 |
y = y.mean(dim=1, keepdim=True)
|
| 111 |
|
| 112 |
+
# M = eigvals.shape[0]
|
| 113 |
+
# z = torch.cat(
|
| 114 |
+
# [
|
| 115 |
+
# torch.tensor([float(x) for x in pcs]),
|
| 116 |
+
# (
|
| 117 |
+
# torch.randn(M - len(pcs)) * TEMPERATURE
|
| 118 |
+
# if randomise_rest
|
| 119 |
+
# else torch.zeros(M - len(pcs))
|
| 120 |
+
# ),
|
| 121 |
+
# ]
|
| 122 |
+
# )
|
| 123 |
x = U @ z + mean
|
| 124 |
+
# print(z)
|
| 125 |
|
| 126 |
fx.load_state_dict(vec2dict(x), strict=False)
|
| 127 |
fx.apply(partial(clip_delay_eq_Q, Q=0.707))
|
|
|
|
| 180 |
audio_input = gr.Audio(type="numpy", sources="upload", label="Input Audio")
|
| 181 |
with gr.Row():
|
| 182 |
random_button = gr.Button(
|
| 183 |
+
f"Randomise PCs",
|
| 184 |
elem_id="randomise-button",
|
| 185 |
)
|
| 186 |
reset_button = gr.Button(
|
|
|
|
| 190 |
render_button = gr.Button(
|
| 191 |
"Run", elem_id="render-button", variant="primary"
|
| 192 |
)
|
| 193 |
+
# random_rest_checkbox = gr.Checkbox(
|
| 194 |
+
# label=f"Randomise PCs > {NUMBER_OF_PCS} (default to zeros)",
|
| 195 |
+
# value=False,
|
| 196 |
+
# elem_id="randomise-checkbox",
|
| 197 |
+
# )
|
| 198 |
sliders = get_important_pcs(NUMBER_OF_PCS, value=0)
|
| 199 |
+
|
| 200 |
+
extra_pc_dropdown = gr.Dropdown(
|
| 201 |
+
list(range(NUMBER_OF_PCS + 1, 76)),
|
| 202 |
+
label=f"PC > {NUMBER_OF_PCS}",
|
| 203 |
+
info="Select which extra PC to adjust",
|
| 204 |
+
interactive=True,
|
| 205 |
+
)
|
| 206 |
+
extra_slider = gr.Slider(
|
| 207 |
+
minimum=SLIDER_MIN,
|
| 208 |
+
maximum=SLIDER_MAX,
|
| 209 |
+
label="Extra PC",
|
| 210 |
+
value=0,
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
with gr.Column():
|
| 214 |
audio_output = gr.Audio(
|
| 215 |
type="numpy", label="Output Audio", interactive=False
|
|
|
|
| 220 |
lambda *args: (lambda x: (x, model2json()))(inference(*args)),
|
| 221 |
inputs=[
|
| 222 |
audio_input,
|
| 223 |
+
# random_rest_checkbox,
|
| 224 |
]
|
| 225 |
+
# + sliders,
|
| 226 |
+
,
|
| 227 |
outputs=[audio_output, json_output],
|
| 228 |
)
|
| 229 |
|
| 230 |
random_button.click(
|
| 231 |
+
# lambda *xs: [
|
| 232 |
+
# chain_functions(
|
| 233 |
+
# partial(max, SLIDER_MIN),
|
| 234 |
+
# partial(min, SLIDER_MAX),
|
| 235 |
+
# )(normalvariate(0, 1))
|
| 236 |
+
# for _ in range(len(xs))
|
| 237 |
+
# ],
|
| 238 |
+
lambda i: (lambda x: x[:NUMBER_OF_PCS].tolist() + [x[i - 1].item()])(
|
| 239 |
+
z.normal_(0, 1).clip_(SLIDER_MIN, SLIDER_MAX)
|
| 240 |
+
),
|
| 241 |
+
inputs=extra_pc_dropdown,
|
| 242 |
+
outputs=sliders + [extra_slider],
|
| 243 |
)
|
| 244 |
reset_button.click(
|
| 245 |
+
lambda *xs: (lambda _: [0 for _ in range(len(xs))])(z.zero_()),
|
| 246 |
+
inputs=sliders + [extra_slider],
|
| 247 |
+
outputs=sliders + [extra_slider],
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
def update_z(s, i):
|
| 251 |
+
z[i] = s
|
| 252 |
+
return
|
| 253 |
+
|
| 254 |
+
for i, slider in enumerate(sliders):
|
| 255 |
+
slider.change(partial(update_z, i=i), inputs=slider)
|
| 256 |
+
extra_slider.change(
|
| 257 |
+
lambda _, i: update_z(_, i - 1), inputs=[extra_slider, extra_pc_dropdown]
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
extra_pc_dropdown.change(
|
| 261 |
+
lambda i: z[i - 1].item(),
|
| 262 |
+
inputs=extra_pc_dropdown,
|
| 263 |
+
outputs=extra_slider,
|
| 264 |
)
|
| 265 |
|
| 266 |
demo.launch()
|