ghostai1 commited on
Commit
545f8e1
ยท
verified ยท
1 Parent(s): 80cc045

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -141
app.py CHANGED
@@ -1,15 +1,28 @@
1
  #!/usr/bin/env python3
2
- # GhostAI Music Generator โ€“ HF-download version
 
 
 
 
 
 
 
 
3
  import os, sys, gc, time, warnings, random, tempfile
4
- import torch, torchaudio, numpy as np, gradio as gr, psutil
 
 
 
 
 
 
5
  from pydub import AudioSegment
6
- from torch.cuda.amp import autocast
7
  from audiocraft.models import MusicGen
8
  from huggingface_hub import login
 
9
 
10
- # ------------------------------------------------------------------ #
11
- # โ“ Torch <2.3 shim (adds get_default_device) #
12
- # ------------------------------------------------------------------ #
13
  if not hasattr(torch, "get_default_device"):
14
  torch.get_default_device = lambda: torch.device(
15
  "cuda" if torch.cuda.is_available() else "cpu"
@@ -18,61 +31,27 @@ if not hasattr(torch, "get_default_device"):
18
  warnings.filterwarnings("ignore")
19
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
20
 
21
- # ------------------------------------------------------------------ #
22
- # ๐Ÿ”‘ Login to HF (model download) #
23
- # ------------------------------------------------------------------ #
24
  HF_TOKEN = os.getenv("HF_TOKEN")
25
  if not HF_TOKEN:
26
- sys.exit("ERROR: environment variable HF_TOKEN not set in the Space settings.")
27
  login(HF_TOKEN)
28
 
29
- # ------------------------------------------------------------------ #
30
- # ๐Ÿ–ฅ Device setup #
31
- # ------------------------------------------------------------------ #
32
  device = "cuda" if torch.cuda.is_available() else "cpu"
33
- print(f"Running on {device.upper()}")
34
- if device == "cuda":
35
- print(f"GPU : {torch.cuda.get_device_name(0)}")
36
- def clean():
37
- if device == "cuda":
38
- torch.cuda.empty_cache()
39
- gc.collect()
40
- clean()
41
-
42
- # ------------------------------------------------------------------ #
43
- # ๐Ÿ“ฅ Load MusicGen from HF Hub #
44
- # ------------------------------------------------------------------ #
45
- print("Downloading & loading facebook/musicgen-medium โ€ฆ")
46
- musicgen_model = MusicGen.get_pretrained("facebook/musicgen-medium", device=device)
47
- musicgen_model.set_generation_params(duration=10, two_step_cfg=False)
48
-
49
- sample_rate = musicgen_model.sample_rate
50
-
51
- # ------------------------------------------------------------------ #
52
- # ๐Ÿ“Š Helpers #
53
- # ------------------------------------------------------------------ #
54
- def vram_ok(req=3.5):
55
- if device != "cuda":
56
- return True
57
- total = torch.cuda.get_device_properties(0).total_memory / 1024**3
58
- free = total - torch.cuda.memory_allocated() / 1024**3
59
- if free < req:
60
- print(f"โš ๏ธ Only {free:.2f} GB free (< {req} GB).")
61
- return free >= req
62
 
63
- def log(stage=""):
64
- if stage: print(f"โ”€โ”€ {stage} โ”€โ”€")
65
- if device == "cuda":
66
- a = torch.cuda.memory_allocated() / 1024**3
67
- r = torch.cuda.memory_reserved() / 1024**3
68
- print(f"GPU mem alloc {a:.2f} GB reserved {r:.2f} GB")
69
- print(f"CPU mem {psutil.virtual_memory().percent}% used")
70
-
71
- # ------------------------------------------------------------------ #
72
- # ๐ŸŽ› Prompt builders (unchanged) #
73
- # ------------------------------------------------------------------ #
74
- def _p(base,bpm,dr,syn,st,bass,gtr,dflt_bass,dflt_gtr,vibe):
75
- step = f" with {st}" if st!="none" else vibe.format(bpm=bpm)
76
  dr = f", {dr} drums" if dr!="none" else ""
77
  syn = f", {syn} accents" if syn!="none" else ""
78
  bass = f", {bass}" if bass!="none" else dflt_bass
@@ -83,131 +62,129 @@ def set_red_hot_chili_peppers_prompt(bpm,dr,syn,st,bass,gtr):
83
  return _p("Instrumental funk rock",bpm,dr,syn,st,bass,gtr,
84
  ", groovy basslines",", syncopated guitar riffs",
85
  "{bpm} BPM funky flow" if bpm>120 else "groovy rhythmic flow")
86
- # โ€ฆ keep your other 17 prompt functions exactly as before โ€ฆ
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
- # ------------------------------------------------------------------ #
89
- # ๐ŸŽš Audio post-processing #
90
- # ------------------------------------------------------------------ #
91
- def apply_eq(seg): return seg.low_pass_filter(8000).high_pass_filter(80)
92
- def apply_fade(seg): return seg.fade_in(1000).fade_out(1000)
93
 
94
- # ------------------------------------------------------------------ #
95
- # ๐Ÿš€ Generator #
96
- # ------------------------------------------------------------------ #
 
97
  def generate_music(prompt,cfg,k,p,temp,
98
  total_len,chunk_len,xfade,
99
  bpm,dr,syn,step,bass,gtr):
100
 
101
  if not prompt.strip():
102
- return None, "โŒ Prompt is empty."
103
- if not vram_ok():
104
- return None, "โŒ Not enough VRAM."
105
 
106
- total_len = int(total_len)
107
- chunk_len = max(5, min(int(chunk_len), 15))
108
- n_chunks = max(1, total_len // chunk_len)
109
- chunk_len = total_len / n_chunks
110
- overlap = min(1.0, xfade / 1000.0)
111
- render_len = chunk_len + overlap
112
- segments = []
113
 
114
- torch.manual_seed(42)
115
- np.random.seed(42)
116
 
117
  t0 = time.time()
118
  for i in range(n_chunks):
119
  log(f"before chunk {i+1}")
120
- musicgen_model.set_generation_params(
121
- duration=render_len,use_sampling=True,
122
- top_k=k,top_p=p,temperature=temp,cfg_coef=cfg)
 
123
  with torch.no_grad(), autocast():
124
- audio = musicgen_model.generate([prompt], progress=False)[0]
125
 
126
  audio = audio.cpu().float()
127
- if audio.dim()==1: audio = audio.repeat(2,1)
128
- elif audio.shape[0]==1: audio = audio.repeat(2,1)
129
- elif audio.shape[0]!=2: audio = audio[:1].repeat(2,1)
130
 
131
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
132
- torchaudio.save(tmp.name, audio, sample_rate)
133
  seg = AudioSegment.from_wav(tmp.name)
134
  os.unlink(tmp.name)
135
- segments.append(seg)
136
- clean(); log(f"after chunk {i+1}")
 
137
 
138
- track = segments[0]
139
- for seg in segments[1:]:
140
  track = track.append(seg, crossfade=xfade)
141
  track = track[: total_len*1000]
142
  track = apply_fade(apply_eq(track).normalize(headroom=-9.0))
143
 
144
- out_path = "output_cleaned.mp3"
145
- track.export(out_path, format="mp3", bitrate="128k",
146
  tags={"title":"GhostAI Track","artist":"GhostAI"})
147
- log("final"); print(f"โฑ {time.time()-t0:.1f}s total")
148
- return out_path, "โœ… Done!"
149
 
150
  def clear_inputs():
151
  return ("",3.0,250,0.9,1.0,30,10,1000,
152
  120,"none","none","none","none","none")
153
 
154
- # ------------------------------------------------------------------ #
155
- # ๐ŸŽจ Custom CSS (unchanged) #
156
- # ------------------------------------------------------------------ #
157
- css = """
158
- body{background:linear-gradient(135deg,#0A0A0A 0%,#1C2526 100%);color:#E0E0E0;font-family:'Orbitron',sans-serif}
159
- .header{padding:10px;text-align:center;background:rgba(0,0,0,.9);border-bottom:1px solid #00FF9F}
160
- """
161
 
162
- # ------------------------------------------------------------------ #
163
- # ๐Ÿ–ผ Gradio Blocks UI #
164
- # ------------------------------------------------------------------ #
165
  with gr.Blocks(css=css) as demo:
166
- gr.HTML('<div class="header"><h1>๐Ÿ‘ป GhostAI Music Generator</h1></div>')
167
- prompt_box = gr.Textbox(label="Instrumental Prompt", lines=4)
168
 
169
- # genre buttons (showing two; add the rest as needed)
170
  with gr.Row():
171
- gr.Button("RHCP ๐ŸŒถ๏ธ").click(
172
- set_red_hot_chili_peppers_prompt,
173
  inputs=[gr.State(120),"none","none","none","none","none"],
174
- outputs=prompt_box
175
- )
176
- gr.Button("Nirvana ๐ŸŽธ").click(
177
- set_nirvana_grunge_prompt,
178
  inputs=[gr.State(120),"none","none","none","none","none"],
179
- outputs=prompt_box
180
- )
181
-
182
- # parameter sliders
183
- cfg_scale = gr.Slider(1,10,3,label="CFG Scale")
184
- top_k = gr.Slider(10,500,250,step=10,label="Top-K")
185
- top_p = gr.Slider(0,1,0.9,step=0.05,label="Top-P")
186
- temp = gr.Slider(0.1,2,1,step=0.1,label="Temperature")
187
- total_len = gr.Radio([30,60,90,120],value=30,label="Length (s)")
188
- chunk_len = gr.Slider(5,15,10,step=1,label="Chunk (s)")
189
- crossfade = gr.Slider(100,2000,1000,step=100,label="Cross-fade (ms)")
190
-
191
- bpm = gr.Slider(60,180,120,label="BPM")
192
- drum = gr.Dropdown(["none","standard rock","funk groove","techno kick","jazz swing"],value="none",label="Drum")
193
- synth = gr.Dropdown(["none","analog synth","digital pad","arpeggiated synth"],value="none",label="Synth")
194
- step = gr.Dropdown(["none","syncopated steps","steady steps","complex steps"],value="none",label="Steps")
195
- bass = gr.Dropdown(["none","slap bass","deep bass","melodic bass"],value="none",label="Bass")
196
- gtr = gr.Dropdown(["none","distorted","clean","jangle"],value="none",label="Guitar")
197
-
198
- gen = gr.Button("Generate ๐ŸŽผ")
199
  clr = gr.Button("Clear ๐Ÿงน")
200
- audio_out = gr.Audio(type="filepath")
201
- status = gr.Textbox(interactive=False)
202
 
203
  gen.click(generate_music,
204
- inputs=[prompt_box,cfg_scale,top_k,top_p,temp,
205
- total_len,chunk_len,crossfade,
206
- bpm,drum,synth,step,bass,gtr],
207
- outputs=[audio_out,status])
208
- clr.click(clear_inputs, None,
209
- [prompt_box,cfg_scale,top_k,top_p,temp,
210
- total_len,chunk_len,crossfade,
211
- bpm,drum,synth,step,bass,gtr])
212
 
213
  demo.launch(share=False)
 
1
  #!/usr/bin/env python3
2
+ """
3
+ GhostAI Music Generator โ€” ZeroGPU Space
4
+ Streams facebook/musicgen-medium with dynamic GPU bursts.
5
+ """
6
+
7
+ # 0๏ธโƒฃ Import spaces *first* so CUDA isnโ€™t touched beforehand
8
+ import spaces # HF ZeroGPU decorator
9
+
10
+ # 1๏ธโƒฃ Standard libs
11
  import os, sys, gc, time, warnings, random, tempfile
12
+ import numpy as np, psutil
13
+
14
+ # 2๏ธโƒฃ Torch (CPU wheels; ZeroGPU migrates tensors when needed)
15
+ import torch, torchaudio
16
+
17
+ # 3๏ธโƒฃ Other deps
18
+ import gradio as gr
19
  from pydub import AudioSegment
 
20
  from audiocraft.models import MusicGen
21
  from huggingface_hub import login
22
+ from torch.cuda.amp import autocast
23
 
24
+ # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
25
+ # Torch <2.3 shim (transformers may call torch.get_default_device)
 
26
  if not hasattr(torch, "get_default_device"):
27
  torch.get_default_device = lambda: torch.device(
28
  "cuda" if torch.cuda.is_available() else "cpu"
 
31
  warnings.filterwarnings("ignore")
32
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
33
 
34
+ # ๐Ÿ”‘ Authenticate so we can pull the model
 
 
35
  HF_TOKEN = os.getenv("HF_TOKEN")
36
  if not HF_TOKEN:
37
+ sys.exit("ERROR: Add HF_TOKEN as a secret in your Space.")
38
  login(HF_TOKEN)
39
 
 
 
 
40
  device = "cuda" if torch.cuda.is_available() else "cpu"
41
+ print(f"ZeroGPU detected โ†’ initial device is {device.upper()}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
+ # ๐Ÿ“ฅ Download model from Hub
44
+ print("Loading facebook/musicgen-medium โ€ฆ (first run may take ~6 GB download)")
45
+ musicgen = MusicGen.get_pretrained("facebook/musicgen-medium")
46
+ musicgen.set_generation_params(duration=10, two_step_cfg=False)
47
+ SAMPLE_RATE = musicgen.sample_rate
48
+
49
+
50
+ # โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ
51
+ # โ”‚ Prompt helpers (kept exactly from your original script) โ”‚
52
+ # โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ
53
+ def _p(base,bpm,dr,syn,st,bass,gtr,dflt_bass,dflt_gtr,flow):
54
+ step = f" with {st}" if st!="none" else flow.format(bpm=bpm)
 
55
  dr = f", {dr} drums" if dr!="none" else ""
56
  syn = f", {syn} accents" if syn!="none" else ""
57
  bass = f", {bass}" if bass!="none" else dflt_bass
 
62
  return _p("Instrumental funk rock",bpm,dr,syn,st,bass,gtr,
63
  ", groovy basslines",", syncopated guitar riffs",
64
  "{bpm} BPM funky flow" if bpm>120 else "groovy rhythmic flow")
65
+ def set_nirvana_grunge_prompt(bpm,dr,syn,st,bass,gtr):
66
+ return _p("Instrumental grunge",bpm,dr,syn,st,bass,gtr,
67
+ ", melodic basslines",", raw distorted guitar riffs",
68
+ "{bpm} BPM grungy pulse" if bpm>120 else "grungy rhythmic pulse")
69
+ # โ€ฆ include your other genre functions unchanged โ€ฆ
70
+
71
+ # Audio FX
72
+ def apply_eq(s): return s.low_pass_filter(8000).high_pass_filter(80)
73
+ def apply_fade(s): return s.fade_in(1000).fade_out(1000)
74
+
75
+ def log(stage=""):
76
+ if stage: print(f"โ”€โ”€ {stage} โ”€โ”€")
77
+ if torch.cuda.is_available():
78
+ alloc = torch.cuda.memory_allocated()/1024**3
79
+ res = torch.cuda.memory_reserved()/1024**3
80
+ print(f"GPU mem alloc {alloc:.2f} GB reserved {res:.2f} GB")
81
+ print(f"CPU mem {psutil.virtual_memory().percent}% used")
82
 
 
 
 
 
 
83
 
84
+ # โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ
85
+ # โ”‚ Core generator โ€” wrapped with @spaces.GPU โ”‚
86
+ # โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ
87
+ @spaces.GPU
88
  def generate_music(prompt,cfg,k,p,temp,
89
  total_len,chunk_len,xfade,
90
  bpm,dr,syn,step,bass,gtr):
91
 
92
  if not prompt.strip():
93
+ return None, "โš ๏ธ Prompt is empty."
 
 
94
 
95
+ total_len = int(total_len)
96
+ chunk_len = max(5, min(int(chunk_len), 15))
97
+ n_chunks = max(1, total_len // chunk_len)
98
+ chunk_len = total_len / n_chunks
99
+ overlap = min(1.0, xfade / 1000.0)
100
+ render = chunk_len + overlap
101
+ pieces = []
102
 
103
+ torch.manual_seed(42); np.random.seed(42)
 
104
 
105
  t0 = time.time()
106
  for i in range(n_chunks):
107
  log(f"before chunk {i+1}")
108
+ musicgen.set_generation_params(
109
+ duration=render, use_sampling=True,
110
+ top_k=k, top_p=p, temperature=temp, cfg_coef=cfg
111
+ )
112
  with torch.no_grad(), autocast():
113
+ audio = musicgen.generate([prompt], progress=False)[0]
114
 
115
  audio = audio.cpu().float()
116
+ if audio.dim()==1: audio = audio.repeat(2,1)
117
+ elif audio.shape[0]==1: audio = audio.repeat(2,1)
118
+ elif audio.shape[0]!=2: audio = audio[:1].repeat(2,1)
119
 
120
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
121
+ torchaudio.save(tmp.name, audio, SAMPLE_RATE)
122
  seg = AudioSegment.from_wav(tmp.name)
123
  os.unlink(tmp.name)
124
+ pieces.append(seg)
125
+ torch.cuda.empty_cache(); gc.collect()
126
+ log(f"after chunk {i+1}")
127
 
128
+ track = pieces[0]
129
+ for seg in pieces[1:]:
130
  track = track.append(seg, crossfade=xfade)
131
  track = track[: total_len*1000]
132
  track = apply_fade(apply_eq(track).normalize(headroom=-9.0))
133
 
134
+ out_file = "output_cleaned.mp3"
135
+ track.export(out_file, format="mp3", bitrate="128k",
136
  tags={"title":"GhostAI Track","artist":"GhostAI"})
137
+ log("final"); print(f"Total {time.time()-t0:.1f}s")
138
+ return out_file, "โœ… Done!"
139
 
140
  def clear_inputs():
141
  return ("",3.0,250,0.9,1.0,30,10,1000,
142
  120,"none","none","none","none","none")
143
 
144
+ # โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ
145
+ # โ”‚ Gradio Blocks UI with your CSS & controls โ”‚
146
+ # โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ
147
+ css = "body{background:#0A0A0A;color:#E0E0E0;font-family:'Orbitron',sans-serif}"
 
 
 
148
 
 
 
 
149
  with gr.Blocks(css=css) as demo:
150
+ gr.HTML("<h1 style='text-align:center'>๐Ÿ‘ป GhostAI Music Generator</h1>")
151
+ prompt = gr.Textbox(label="Prompt", lines=4)
152
 
 
153
  with gr.Row():
154
+ gr.Button("RHCP ๐ŸŒถ๏ธ").click(set_red_hot_chili_peppers_prompt,
 
155
  inputs=[gr.State(120),"none","none","none","none","none"],
156
+ outputs=prompt)
157
+ gr.Button("Nirvana ๐ŸŽธ").click(set_nirvana_grunge_prompt,
 
 
158
  inputs=[gr.State(120),"none","none","none","none","none"],
159
+ outputs=prompt)
160
+ # add more genre buttons here โ€ฆ
161
+
162
+ cfg = gr.Slider(1,10,3,label="CFG")
163
+ top_k = gr.Slider(10,500,250,step=10,label="Top-K")
164
+ top_p = gr.Slider(0,1,0.9,step=0.05,label="Top-P")
165
+ temp = gr.Slider(0.1,2,1,step=0.1,label="Temperature")
166
+ length= gr.Radio([30,60,90,120],value=30,label="Length (s)")
167
+ chunk = gr.Slider(5,15,10,step=1,label="Chunk (s)")
168
+ xfade = gr.Slider(100,2000,1000,step=100,label="Cross-fade (ms)")
169
+
170
+ bpm = gr.Slider(60,180,120,label="BPM")
171
+ drum = gr.Dropdown(["none","standard rock","funk groove","techno kick","jazz swing"],"none","Drum")
172
+ synth = gr.Dropdown(["none","analog synth","digital pad","arpeggiated synth"],"none","Synth")
173
+ steps = gr.Dropdown(["none","syncopated steps","steady steps","complex steps"],"none","Steps")
174
+ bass = gr.Dropdown(["none","slap bass","deep bass","melodic bass"],"none","Bass")
175
+ gtr = gr.Dropdown(["none","distorted","clean","jangle"],"none","Guitar")
176
+
177
+ gen = gr.Button("Generate ๐ŸŽถ")
 
178
  clr = gr.Button("Clear ๐Ÿงน")
179
+ audio = gr.Audio(type="filepath")
180
+ status= gr.Textbox(interactive=False)
181
 
182
  gen.click(generate_music,
183
+ inputs=[prompt,cfg,top_k,top_p,temp,length,chunk,xfade,
184
+ bpm,drum,synth,steps,bass,gtr],
185
+ outputs=[audio,status])
186
+ clr.click(clear_inputs,None,
187
+ [prompt,cfg,top_k,top_p,temp,length,chunk,xfade,
188
+ bpm,drum,synth,steps,bass,gtr])
 
 
189
 
190
  demo.launch(share=False)