Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,452 +1,314 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
import
|
|
|
13 |
from pydub import AudioSegment
|
14 |
from torch.cuda.amp import autocast
|
15 |
from audiocraft.models import MusicGen
|
16 |
from huggingface_hub import login
|
17 |
|
18 |
-
#
|
19 |
-
#
|
20 |
-
#
|
21 |
if not hasattr(torch, "get_default_device"):
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
|
26 |
-
# ----------------------------------------------------------------------
|
27 |
-
# WARNING SUPPRESSION & ENV TUNING
|
28 |
-
# ----------------------------------------------------------------------
|
29 |
warnings.filterwarnings("ignore")
|
30 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
|
31 |
|
32 |
-
#
|
33 |
-
#
|
34 |
-
#
|
35 |
-
|
36 |
-
if not
|
37 |
-
print("ERROR: HF_TOKEN
|
38 |
-
sys.exit(1)
|
39 |
-
|
40 |
-
try:
|
41 |
-
login(hf_token)
|
42 |
-
except Exception as e:
|
43 |
-
print(f"ERROR: Failed to authenticate with Hugging Face: {e}")
|
44 |
sys.exit(1)
|
|
|
45 |
|
46 |
-
#
|
47 |
-
#
|
48 |
-
#
|
49 |
-
if np.__version__ != "1.23.5":
|
50 |
-
print(f"WARNING: NumPy version {np.__version__} detected (expected 1.23.5).")
|
51 |
-
if not torch.__version__.startswith("2.3.1"):
|
52 |
-
print(f"WARNING: PyTorch version {torch.__version__} detected (expected 2.3.1).")
|
53 |
-
|
54 |
-
# ----------------------------------------------------------------------
|
55 |
-
# DEVICE SETUP
|
56 |
-
# ----------------------------------------------------------------------
|
57 |
-
print("Debugging GPU and CUDA setupβ¦")
|
58 |
-
print(f"PyTorch CUDA available: {torch.cuda.is_available()}")
|
59 |
-
if torch.cuda.is_available():
|
60 |
-
print(f"CUDA device count : {torch.cuda.device_count()}")
|
61 |
-
print(f"CUDA device name : {torch.cuda.get_device_name(0)}")
|
62 |
-
print(f"CUDA version : {torch.version.cuda}")
|
63 |
-
else:
|
64 |
-
print("CUDA unavailable; falling back to CPU (performance will suffer).")
|
65 |
-
|
66 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
67 |
-
|
68 |
-
# Pre-run memory cleanup (GPU)
|
69 |
if device == "cuda":
|
70 |
-
torch.cuda.
|
71 |
-
gc.collect()
|
72 |
-
torch.cuda.ipc_collect()
|
73 |
-
torch.cuda.synchronize()
|
74 |
-
|
75 |
-
# ----------------------------------------------------------------------
|
76 |
-
# LOAD MUSICGEN
|
77 |
-
# ----------------------------------------------------------------------
|
78 |
-
try:
|
79 |
-
print("Loading MusicGen medium modelβ¦")
|
80 |
-
musicgen_model = MusicGen.get_pretrained("facebook/musicgen-medium", device=device)
|
81 |
-
musicgen_model.set_generation_params(duration=10, two_step_cfg=False)
|
82 |
-
except Exception as e:
|
83 |
-
print(f"ERROR: Failed to load MusicGen model: {e}")
|
84 |
-
print("Check HF access and PyTorch version compatibility.")
|
85 |
-
sys.exit(1)
|
86 |
|
87 |
-
|
88 |
-
# HELPER: RESOURCE MONITOR
|
89 |
-
# ----------------------------------------------------------------------
|
90 |
-
def print_resource_usage(stage: str):
|
91 |
-
print(f"--- {stage} ---")
|
92 |
if device == "cuda":
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
|
98 |
-
def
|
99 |
if device != "cuda":
|
100 |
return True
|
101 |
-
total = torch.cuda.get_device_properties(0).total_memory /
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
#
|
109 |
-
#
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
def
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
def
|
172 |
-
return
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
def set_post_punk_prompt(bpm, drum, synth, steps, bass, guitar):
|
196 |
-
return _prompt(
|
197 |
-
"Instrumental post-punk",
|
198 |
-
bpm, drum, synth, steps, bass, guitar,
|
199 |
-
", driving basslines", ", jangly guitars",
|
200 |
-
"{bpm} BPM sharp steps" if bpm > 120 else "moody rhythmic pulse"
|
201 |
-
)
|
202 |
-
|
203 |
-
def set_indie_rock_prompt(bpm, drum, synth, steps, bass, guitar):
|
204 |
-
return _prompt(
|
205 |
-
"Instrumental indie rock",
|
206 |
-
bpm, drum, synth, steps, bass, guitar,
|
207 |
-
"", ", jangly guitars",
|
208 |
-
"{bpm} BPM catchy steps" if bpm > 120 else "jangly rhythmic flow"
|
209 |
-
)
|
210 |
-
|
211 |
-
def set_funk_rock_prompt(bpm, drum, synth, steps, bass, guitar):
|
212 |
-
return _prompt(
|
213 |
-
"Instrumental funk rock",
|
214 |
-
bpm, drum, synth, steps, bass, guitar,
|
215 |
-
", slap bass", ", funky guitar chords",
|
216 |
-
"{bpm} BPM aggressive steps" if bpm > 120 else "funky rhythmic groove"
|
217 |
-
)
|
218 |
-
|
219 |
-
def set_detroit_techno_prompt(bpm, drum, synth, steps, bass, guitar):
|
220 |
-
return _prompt(
|
221 |
-
"Instrumental Detroit techno",
|
222 |
-
bpm, drum, synth, steps, bass, guitar,
|
223 |
-
", driving basslines", "",
|
224 |
-
"{bpm} BPM pulsing steps" if bpm > 120 else "deep rhythmic groove"
|
225 |
-
)
|
226 |
-
|
227 |
-
def set_deep_house_prompt(bpm, drum, synth, steps, bass, guitar):
|
228 |
-
return _prompt(
|
229 |
-
"Instrumental deep house",
|
230 |
-
bpm, drum, synth, steps, bass, guitar,
|
231 |
-
", deep basslines", "",
|
232 |
-
"{bpm} BPM soulful steps" if bpm > 120 else "laid-back rhythmic flow"
|
233 |
-
)
|
234 |
-
|
235 |
-
# ----------------------------------------------------------------------
|
236 |
-
# AUDIO POST-PROCESSING
|
237 |
-
# ----------------------------------------------------------------------
|
238 |
def apply_eq(seg: AudioSegment):
|
239 |
return seg.low_pass_filter(8000).high_pass_filter(80)
|
240 |
|
241 |
def apply_fade(seg: AudioSegment, fin=1000, fout=1000):
|
242 |
return seg.fade_in(fin).fade_out(fout)
|
243 |
|
244 |
-
#
|
245 |
-
#
|
246 |
-
#
|
247 |
-
def generate_music(prompt,
|
248 |
-
total_dur,
|
249 |
-
bpm,
|
250 |
|
251 |
if not prompt.strip():
|
252 |
-
return None,
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
segments = []
|
265 |
|
266 |
torch.manual_seed(42)
|
267 |
np.random.seed(42)
|
268 |
|
269 |
-
|
270 |
-
for i in range(
|
271 |
-
|
272 |
-
|
273 |
-
duration=render_len,
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
)
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
chunk = torch.cat([chunk, chunk], dim=0)
|
290 |
-
|
291 |
-
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
|
292 |
-
torchaudio.save(tmp.name, chunk, sr, bits_per_sample=24)
|
293 |
-
seg = AudioSegment.from_wav(tmp.name)
|
294 |
os.unlink(tmp.name)
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
final
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
final.export(out_path, format="mp3", bitrate="128k",
|
313 |
-
tags={"title": "GhostAI Instrumental", "artist": "GhostAI"})
|
314 |
-
print_resource_usage("After final")
|
315 |
-
print(f"Total time: {time.time() - start:.2f}s")
|
316 |
-
return out_path, "β
Done!"
|
317 |
-
|
318 |
-
# ----------------------------------------------------------------------
|
319 |
-
# CLEAR INPUTS
|
320 |
-
# ----------------------------------------------------------------------
|
321 |
def clear_inputs():
|
322 |
-
return ("",
|
323 |
-
120,
|
324 |
-
|
325 |
-
#
|
326 |
-
#
|
327 |
-
#
|
328 |
-
css
|
329 |
-
|
330 |
-
.header-container{text-align:center;padding:10px 20px;background:rgba(0,0,0,.9);border-bottom:1px solid #00FF9F}
|
331 |
-
#ghost-logo{font-size:40px;animation:glitch-ghost 1.5s infinite}
|
332 |
-
h1{color:#A100FF;font-size:24px;animation:glitch-text 2s infinite}
|
333 |
-
p{color:#E0E0E0;font-size:12px}
|
334 |
-
.input-container,.settings-container,.output-container{max-width:1200px;margin:20px auto;padding:20px;background:rgba(28,37,38,.8);border-radius:10px}
|
335 |
-
.textbox{background:#1A1A1A;border:1px solid #A100FF;color:#E0E0E0}
|
336 |
-
.genre-buttons{display:flex;justify-content:center;flex-wrap:wrap;gap:15px}
|
337 |
-
.genre-btn,button{background:linear-gradient(45deg,#A100FF,#00FF9F);border:none;color:#0A0A0A;padding:10px 20px;border-radius:5px}
|
338 |
-
.gradio-container{padding:20px}
|
339 |
-
.group-container{margin-bottom:20px;padding:15px;border:1px solid #00FF9F;border-radius:8px}
|
340 |
-
@keyframes glitch-ghost{0%{transform:translate(0,0);opacity:1}20%{transform:translate(-5px,2px);opacity:.8}100%{transform:translate(0,0);opacity:1}}
|
341 |
-
@keyframes glitch-text{0%{transform:translate(0,0)}20%{transform:translate(-2px,1px)}100%{transform:translate(0,0)}}
|
342 |
-
@font-face{font-family:'Orbitron';src:url('https://fonts.gstatic.com/s/orbitron/v29/yMJRMIlzdpvBhQQL_Qq7dy0.woff2') format('woff2')}
|
343 |
"""
|
344 |
|
345 |
-
# ----------------------------------------------------------------------
|
346 |
-
# GRADIO UI
|
347 |
-
# ----------------------------------------------------------------------
|
348 |
with gr.Blocks(css=css) as demo:
|
349 |
-
gr.
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
""
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
bass_style = gr.Dropdown(label="Bass Style πΈ",
|
410 |
-
choices=["none","slap bass","deep bass","melodic bass"], value="none")
|
411 |
-
guitar_style=gr.Dropdown(label="Guitar Style πΈ",
|
412 |
-
choices=["none","distorted","clean","jangle"], value="none")
|
413 |
-
|
414 |
-
with gr.Row(elem_classes="action-buttons"):
|
415 |
-
gen_btn = gr.Button("Generate Music π")
|
416 |
-
clr_btn = gr.Button("Clear Inputs π§Ή")
|
417 |
-
|
418 |
-
# OUTPUT
|
419 |
-
with gr.Column(elem_classes="output-container"):
|
420 |
-
gr.Markdown("### π§ Output")
|
421 |
-
out_audio = gr.Audio(label="Generated Track π΅", type="filepath")
|
422 |
-
status = gr.Textbox(label="Status π’", interactive=False)
|
423 |
-
|
424 |
-
# ACTIONS
|
425 |
-
gen_btn.click(
|
426 |
-
generate_music,
|
427 |
-
inputs=[prompt_box, cfg_scale, top_k, top_p, temperature,
|
428 |
-
total_duration, chunk_duration, crossfade_duration,
|
429 |
-
bpm, drum_beat, synthesizer, rhythmic_steps, bass_style, guitar_style],
|
430 |
-
outputs=[out_audio, status]
|
431 |
-
)
|
432 |
-
clr_btn.click(
|
433 |
-
clear_inputs,
|
434 |
-
inputs=None,
|
435 |
-
outputs=[prompt_box, cfg_scale, top_k, top_p, temperature,
|
436 |
-
total_duration, chunk_duration, crossfade_duration,
|
437 |
-
bpm, drum_beat, synthesizer, rhythmic_steps, bass_style, guitar_style]
|
438 |
-
)
|
439 |
-
|
440 |
-
# ----------------------------------------------------------------------
|
441 |
-
# LAUNCH
|
442 |
-
# ----------------------------------------------------------------------
|
443 |
-
app = demo.launch(share=False, inbrowser=False, show_error=True)
|
444 |
-
|
445 |
-
# Disable OpenAPI docs (HF Spaces hardening)
|
446 |
try:
|
447 |
-
|
448 |
-
fastapi_app.docs_url = None
|
449 |
-
fastapi_app.redoc_url = None
|
450 |
-
fastapi_app.openapi_url = None
|
451 |
except Exception:
|
452 |
pass
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
GhostAI Music Generator β Zero-GPU Friendly
|
5 |
+
Full Gradio application with conflict-free dependencies.
|
6 |
+
|
7 |
+
Python : 3.10
|
8 |
+
Torch : 2.1 CPU wheels
|
9 |
+
Updated : 2025-05-29
|
10 |
+
"""
|
11 |
+
|
12 |
+
import os, sys, gc, time, random, warnings, tempfile, psutil, numpy as np
|
13 |
+
import torch, torchaudio, gradio as gr
|
14 |
from pydub import AudioSegment
|
15 |
from torch.cuda.amp import autocast
|
16 |
from audiocraft.models import MusicGen
|
17 |
from huggingface_hub import login
|
18 |
|
19 |
+
# ------------------------------------------------------------------ #
|
20 |
+
# Compatibility shim (torch < 2.3) #
|
21 |
+
# ------------------------------------------------------------------ #
|
22 |
if not hasattr(torch, "get_default_device"):
|
23 |
+
torch.get_default_device = lambda: torch.device(
|
24 |
+
"cuda" if torch.cuda.is_available() else "cpu"
|
25 |
+
)
|
26 |
|
|
|
|
|
|
|
27 |
warnings.filterwarnings("ignore")
|
28 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
|
29 |
|
30 |
+
# ------------------------------------------------------------------ #
|
31 |
+
# Hugging Face authentication #
|
32 |
+
# ------------------------------------------------------------------ #
|
33 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
34 |
+
if not HF_TOKEN:
|
35 |
+
print("ERROR: set HF_TOKEN in the Space secrets.")
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
sys.exit(1)
|
37 |
+
login(HF_TOKEN)
|
38 |
|
39 |
+
# ------------------------------------------------------------------ #
|
40 |
+
# Device setup #
|
41 |
+
# ------------------------------------------------------------------ #
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
43 |
+
print(f"Running on {device.upper()}")
|
|
|
44 |
if device == "cuda":
|
45 |
+
print(f"GPU: {torch.cuda.get_device_name(0)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
def gpu_clean():
|
|
|
|
|
|
|
|
|
48 |
if device == "cuda":
|
49 |
+
torch.cuda.empty_cache()
|
50 |
+
gc.collect()
|
51 |
+
torch.cuda.ipc_collect()
|
52 |
+
torch.cuda.synchronize()
|
53 |
+
|
54 |
+
gpu_clean()
|
55 |
+
|
56 |
+
# ------------------------------------------------------------------ #
|
57 |
+
# Load MusicGen #
|
58 |
+
# ------------------------------------------------------------------ #
|
59 |
+
print("Loading facebook/musicgen-medium β¦")
|
60 |
+
musicgen = MusicGen.get_pretrained("facebook/musicgen-medium", device=device)
|
61 |
+
musicgen.set_generation_params(duration=10, two_step_cfg=False)
|
62 |
+
|
63 |
+
# ------------------------------------------------------------------ #
|
64 |
+
# Resource monitor #
|
65 |
+
# ------------------------------------------------------------------ #
|
66 |
+
def log_resources(tag=""):
|
67 |
+
if tag:
|
68 |
+
print(f"-- {tag} --")
|
69 |
+
if device == "cuda":
|
70 |
+
alloc = torch.cuda.memory_allocated() / 1024 ** 3
|
71 |
+
res = torch.cuda.memory_reserved() / 1024 ** 3
|
72 |
+
print(f"GPU mem | alloc {alloc:.2f} GB reserved {res:.2f} GB")
|
73 |
+
print(f"CPU mem | {psutil.virtual_memory().percent}% used")
|
74 |
|
75 |
+
def vram_ok(th=3.5):
|
76 |
if device != "cuda":
|
77 |
return True
|
78 |
+
total = torch.cuda.get_device_properties(0).total_memory / 1024 ** 3
|
79 |
+
free = total - torch.cuda.memory_allocated() / 1024 ** 3
|
80 |
+
if free < th:
|
81 |
+
print(f"Only {free:.2f} GB VRAM free (<{th} GB)")
|
82 |
+
return free >= th
|
83 |
+
|
84 |
+
# ------------------------------------------------------------------ #
|
85 |
+
# Prompt builders #
|
86 |
+
# ------------------------------------------------------------------ #
|
87 |
+
def _p(base,bpm,drum,synth,step,bass,gtr,def_bass,def_gtr,flow):
|
88 |
+
step_txt = f" with {step}" if step!="none" else flow.format(bpm=bpm)
|
89 |
+
drum_txt = f", {drum} drums" if drum != "none" else ""
|
90 |
+
syn_txt = f", {synth} accents" if synth != "none" else ""
|
91 |
+
bass_txt = f", {bass}" if bass != "none" else def_bass
|
92 |
+
gtr_txt = f", {gtr} guitar riffs" if gtr != "none" else def_gtr
|
93 |
+
return f"{base}{bass_txt}{gtr_txt}{drum_txt}{syn_txt}{step_txt} at {bpm} BPM."
|
94 |
+
|
95 |
+
def set_red_hot_chili_peppers_prompt(bpm,dr,syn,st,bass,gtr):
|
96 |
+
return _p("Instrumental funk rock",bpm,dr,syn,st,bass,gtr,
|
97 |
+
", groovy basslines",", syncopated guitar riffs",
|
98 |
+
"{bpm} BPM funky flow" if bpm>120 else "groovy rhythmic flow")
|
99 |
+
|
100 |
+
def set_nirvana_grunge_prompt(bpm,dr,syn,st,bass,gtr):
|
101 |
+
return _p("Instrumental grunge",bpm,dr,syn,st,bass,gtr,
|
102 |
+
", melodic basslines",", raw distorted guitar riffs",
|
103 |
+
"{bpm} BPM grungy pulse" if bpm>120 else "grungy rhythmic pulse")
|
104 |
+
|
105 |
+
def set_pearl_jam_grunge_prompt(bpm,dr,syn,st,bass,gtr):
|
106 |
+
return _p("Instrumental grunge",bpm,dr,syn,st,bass,gtr,
|
107 |
+
", deep bass",", soulful guitar leads",
|
108 |
+
"{bpm} BPM driving flow" if bpm>120 else "driving rhythmic flow")
|
109 |
+
|
110 |
+
def set_soundgarden_grunge_prompt(bpm,dr,syn,st,bass,gtr):
|
111 |
+
return _p("Instrumental grunge",bpm,dr,syn,st,bass,gtr,
|
112 |
+
"",", heavy sludgy guitar riffs",
|
113 |
+
"{bpm} BPM heavy groove" if bpm>120 else "sludgy rhythmic groove")
|
114 |
+
|
115 |
+
def set_foo_fighters_prompt(bpm,dr,syn,st,bass,gtr):
|
116 |
+
styles=["anthemic","gritty","melodic","fast-paced","driving"]
|
117 |
+
moods=["energetic","introspective","rebellious","uplifting"]
|
118 |
+
return (_p("Instrumental alternative rock",bpm,dr,syn,st,bass,gtr,
|
119 |
+
"",f", {random.choice(styles)} guitar riffs",
|
120 |
+
"{bpm} BPM powerful groove" if bpm>120 else "catchy rhythmic groove")
|
121 |
+
+f", Foo Fighters-inspired {random.choice(moods)} vibe")
|
122 |
+
|
123 |
+
def set_smashing_pumpkins_prompt(bpm,dr,syn,st,bass,gtr):
|
124 |
+
return _p("Instrumental alternative rock",bpm,dr,syn,st,bass,gtr,
|
125 |
+
"",", dreamy guitar textures",
|
126 |
+
"{bpm} BPM dynamic flow" if bpm>120 else "dreamy rhythmic flow")
|
127 |
+
|
128 |
+
def set_radiohead_prompt(bpm,dr,syn,st,bass,gtr):
|
129 |
+
return _p("Instrumental experimental rock",bpm,dr,syn,st,bass,gtr,
|
130 |
+
"",", intricate guitar layers",
|
131 |
+
"{bpm} BPM intricate pulse" if bpm>120 else "intricate rhythmic pulse")
|
132 |
+
|
133 |
+
def set_classic_rock_prompt(bpm,dr,syn,st,bass,gtr):
|
134 |
+
return _p("Instrumental classic rock",bpm,dr,syn,st,bass,gtr,
|
135 |
+
", groovy bass",", bluesy electric guitars",
|
136 |
+
"{bpm} BPM bluesy steps" if bpm>120 else "steady rhythmic groove")
|
137 |
+
|
138 |
+
def set_alternative_rock_prompt(bpm,dr,syn,st,bass,gtr):
|
139 |
+
return _p("Instrumental alternative rock",bpm,dr,syn,st,bass,gtr,
|
140 |
+
", melodic basslines",", distorted guitar riffs",
|
141 |
+
"{bpm} BPM quirky steps" if bpm>120 else "energetic rhythmic flow")
|
142 |
+
|
143 |
+
def set_post_punk_prompt(bpm,dr,syn,st,bass,gtr):
|
144 |
+
return _p("Instrumental post-punk",bpm,dr,syn,st,bass,gtr,
|
145 |
+
", driving basslines",", jangly guitars",
|
146 |
+
"{bpm} BPM sharp steps" if bpm>120 else "moody rhythmic pulse")
|
147 |
+
|
148 |
+
def set_indie_rock_prompt(bpm,dr,syn,st,bass,gtr):
|
149 |
+
return _p("Instrumental indie rock",bpm,dr,syn,st,bass,gtr,
|
150 |
+
"",", jangly guitars",
|
151 |
+
"{bpm} BPM catchy steps" if bpm>120 else "jangly rhythmic flow")
|
152 |
+
|
153 |
+
def set_funk_rock_prompt(bpm,dr,syn,st,bass,gtr):
|
154 |
+
return _p("Instrumental funk rock",bpm,dr,syn,st,bass,gtr,
|
155 |
+
", slap bass",", funky guitar chords",
|
156 |
+
"{bpm} BPM aggressive steps" if bpm>120 else "funky rhythmic groove")
|
157 |
+
|
158 |
+
def set_detroit_techno_prompt(bpm,dr,syn,st,bass,gtr):
|
159 |
+
return _p("Instrumental Detroit techno",bpm,dr,syn,st,bass,gtr,
|
160 |
+
", driving basslines","",
|
161 |
+
"{bpm} BPM pulsing steps" if bpm>120 else "deep rhythmic groove")
|
162 |
+
|
163 |
+
def set_deep_house_prompt(bpm,dr,syn,st,bass,gtr):
|
164 |
+
return _p("Instrumental deep house",bpm,dr,syn,st,bass,gtr,
|
165 |
+
", deep basslines","",
|
166 |
+
"{bpm} BPM soulful steps" if bpm>120 else "laid-back rhythmic flow")
|
167 |
+
|
168 |
+
# ------------------------------------------------------------------ #
|
169 |
+
# Audio post-processing #
|
170 |
+
# ------------------------------------------------------------------ #
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
def apply_eq(seg: AudioSegment):
|
172 |
return seg.low_pass_filter(8000).high_pass_filter(80)
|
173 |
|
174 |
def apply_fade(seg: AudioSegment, fin=1000, fout=1000):
|
175 |
return seg.fade_in(fin).fade_out(fout)
|
176 |
|
177 |
+
# ------------------------------------------------------------------ #
|
178 |
+
# Core generation #
|
179 |
+
# ------------------------------------------------------------------ #
|
180 |
+
def generate_music(prompt,cfg,k,p,temp,
|
181 |
+
total_dur,chunk_dur,xfade,
|
182 |
+
bpm,drum,synth,step,bass,gtr):
|
183 |
|
184 |
if not prompt.strip():
|
185 |
+
return None,"β οΈ Prompt cannot be empty."
|
186 |
+
if not vram_ok():
|
187 |
+
return None,"β οΈ Not enough VRAM."
|
188 |
+
|
189 |
+
total_dur = int(total_dur)
|
190 |
+
chunk_dur = int(max(5,min(chunk_dur,15)))
|
191 |
+
chunks = max(1,total_dur//chunk_dur)
|
192 |
+
chunk_dur = total_dur/chunks
|
193 |
+
overlap = min(1.0,xfade/1000.0)
|
194 |
+
render_len = chunk_dur+overlap
|
195 |
+
sr = musicgen.sample_rate
|
196 |
+
parts = []
|
|
|
197 |
|
198 |
torch.manual_seed(42)
|
199 |
np.random.seed(42)
|
200 |
|
201 |
+
t0=time.time()
|
202 |
+
for i in range(chunks):
|
203 |
+
log_resources(f"before chunk {i+1}")
|
204 |
+
musicgen.set_generation_params(
|
205 |
+
duration=render_len,use_sampling=True,
|
206 |
+
top_k=k,top_p=p,temperature=temp,cfg_coef=cfg)
|
207 |
+
with torch.no_grad(),autocast():
|
208 |
+
audio=musicgen.generate([prompt],progress=False)[0]
|
209 |
+
|
210 |
+
audio=audio.cpu().float()
|
211 |
+
if audio.dim()==1:
|
212 |
+
audio=torch.stack([audio,audio])
|
213 |
+
elif audio.shape[0]==1:
|
214 |
+
audio=torch.cat([audio,audio])
|
215 |
+
elif audio.shape[0]!=2:
|
216 |
+
audio=torch.cat([audio[:1],audio[:1]])
|
217 |
+
|
218 |
+
with tempfile.NamedTemporaryFile(suffix=".wav",delete=False) as tmp:
|
219 |
+
torchaudio.save(tmp.name,audio,sr,bits_per_sample=24)
|
220 |
+
seg=AudioSegment.from_wav(tmp.name)
|
|
|
|
|
|
|
|
|
|
|
221 |
os.unlink(tmp.name)
|
222 |
+
parts.append(seg)
|
223 |
+
gpu_clean()
|
224 |
+
log_resources(f"after chunk {i+1}")
|
225 |
+
|
226 |
+
track=parts[0]
|
227 |
+
for seg in parts[1:]:
|
228 |
+
track=track.append(seg+1,crossfade=xfade)
|
229 |
+
track=track[:total_dur*1000]
|
230 |
+
track=apply_fade(apply_eq(track).normalize(headroom=-9.0))
|
231 |
+
|
232 |
+
out_path="output_cleaned.mp3"
|
233 |
+
track.export(out_path,format="mp3",bitrate="128k",
|
234 |
+
tags={"title":"GhostAI Instrumental","artist":"GhostAI"})
|
235 |
+
log_resources("final")
|
236 |
+
print(f"Time: {time.time()-t0:.1f}s")
|
237 |
+
return out_path,"β
Done!"
|
238 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
def clear_inputs():
|
240 |
+
return ("",3.0,250,0.9,1.0,30,10,1000,
|
241 |
+
120,"none","none","none","none","none")
|
242 |
+
|
243 |
+
# ------------------------------------------------------------------ #
|
244 |
+
# UI #
|
245 |
+
# ------------------------------------------------------------------ #
|
246 |
+
css="""body{background:linear-gradient(135deg,#0A0A0A 0%,#1C2526 100%);color:#E0E0E0;font-family:'Arial',sans-serif}
|
247 |
+
.header{text-align:center;padding:10px 20px;background:rgba(0,0,0,.9);border-bottom:1px solid #00FF9F}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
248 |
"""
|
249 |
|
|
|
|
|
|
|
250 |
with gr.Blocks(css=css) as demo:
|
251 |
+
gr.HTML('<div class="header"><span style="font-size:32px">π»</span> <b>GhostAI Music Generator</b></div>')
|
252 |
+
|
253 |
+
prompt_box=gr.Textbox(label="Prompt",lines=4)
|
254 |
+
cfg_scale =gr.Slider(1,10,3,label="CFG")
|
255 |
+
top_k =gr.Slider(10,500,250,step=10,label="Top-K")
|
256 |
+
top_p =gr.Slider(0,1,0.9,step=0.05,label="Top-P")
|
257 |
+
temp =gr.Slider(0.1,2,1,step=0.1,label="Temperature")
|
258 |
+
total_dur =gr.Radio([30,60,90,120],value=30,label="Length (s)")
|
259 |
+
chunk_dur =gr.Slider(5,15,10,step=1,label="Chunk Length (s)")
|
260 |
+
xfade =gr.Slider(100,2000,1000,step=100,label="Cross-fade (ms)")
|
261 |
+
|
262 |
+
bpm =gr.Slider(60,180,120,label="BPM")
|
263 |
+
drum =gr.Dropdown(["none","standard rock","funk groove","techno kick","jazz swing"],value="none",label="Drum")
|
264 |
+
synth =gr.Dropdown(["none","analog synth","digital pad","arpeggiated synth"],value="none",label="Synth")
|
265 |
+
step =gr.Dropdown(["none","syncopated steps","steady steps","complex steps"],value="none",label="Steps")
|
266 |
+
bass =gr.Dropdown(["none","slap bass","deep bass","melodic bass"],value="none",label="Bass")
|
267 |
+
gtr =gr.Dropdown(["none","distorted","clean","jangle"],value="none",label="Guitar")
|
268 |
+
|
269 |
+
# Genre buttons
|
270 |
+
btns={
|
271 |
+
"RHCP πΆοΈ":set_red_hot_chili_peppers_prompt,
|
272 |
+
"Nirvana πΈ":set_nirvana_grunge_prompt,
|
273 |
+
"Pearl Jam π¦ͺ":set_pearl_jam_grunge_prompt,
|
274 |
+
"Soundgarden π":set_soundgarden_grunge_prompt,
|
275 |
+
"Foo Fighters π€":set_foo_fighters_prompt,
|
276 |
+
"Smashing Pumpkins π":set_smashing_pumpkins_prompt,
|
277 |
+
"Radiohead π§ ":set_radiohead_prompt,
|
278 |
+
"Classic Rock πΈ":set_classic_rock_prompt,
|
279 |
+
"Alt Rock π΅":set_alternative_rock_prompt,
|
280 |
+
"Post-Punk π€":set_post_punk_prompt,
|
281 |
+
"Indie Rock π€":set_indie_rock_prompt,
|
282 |
+
"Funk Rock πΊ":set_funk_rock_prompt,
|
283 |
+
"Detroit Techno ποΈ":set_detroit_techno_prompt,
|
284 |
+
"Deep House π ":set_deep_house_prompt
|
285 |
+
}
|
286 |
+
with gr.Row():
|
287 |
+
for label,fn in btns.items():
|
288 |
+
gr.Button(label).click(
|
289 |
+
fn,
|
290 |
+
inputs=[bpm,drum,synth,step,bass,gtr],
|
291 |
+
outputs=prompt_box
|
292 |
+
)
|
293 |
+
|
294 |
+
gen=gr.Button("Generate πΌ")
|
295 |
+
clr=gr.Button("Clear")
|
296 |
+
|
297 |
+
audio_out=gr.Audio(type="filepath")
|
298 |
+
status =gr.Textbox(interactive=False,label="Status")
|
299 |
+
|
300 |
+
gen.click(generate_music,
|
301 |
+
inputs=[prompt_box,cfg_scale,top_k,top_p,temp,
|
302 |
+
total_dur,chunk_dur,xfade,
|
303 |
+
bpm,drum,synth,step,bass,gtr],
|
304 |
+
outputs=[audio_out,status])
|
305 |
+
clr.click(clear_inputs,None,
|
306 |
+
[prompt_box,cfg_scale,top_k,top_p,temp,
|
307 |
+
total_dur,chunk_dur,xfade,
|
308 |
+
bpm,drum,synth,step,bass,gtr])
|
309 |
+
|
310 |
+
app=demo.launch(share=False,show_error=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
311 |
try:
|
312 |
+
demo._server.app.docs_url=demo._server.app.redoc_url=demo._server.app.openapi_url=None
|
|
|
|
|
|
|
313 |
except Exception:
|
314 |
pass
|