ginipick commited on
Commit
81eb577
Β·
verified Β·
1 Parent(s): 9187b50

Update ui/components.py

Browse files
Files changed (1) hide show
  1. ui/components.py +474 -83
ui/components.py CHANGED
@@ -9,7 +9,11 @@ Apache 2.0 License
9
  import gradio as gr
10
  import librosa
11
  import os
12
-
 
 
 
 
13
 
14
  TAG_DEFAULT = "funk, pop, soul, rock, melodic, guitar, drums, bass, keyboard, percussion, 105 BPM, energetic, upbeat, groovy, vibrant, dynamic"
15
  LYRIC_DEFAULT = """[verse]
@@ -34,7 +38,7 @@ Hear the night sing out our song
34
  Guitar strings they start to weep
35
  Wake the soul from silent sleep
36
  Every note a story told
37
- In this night we’re bold and gold
38
 
39
  [bridge]
40
  Voices blend in harmony
@@ -49,52 +53,317 @@ Catch the tune and hold it tight
49
  In this moment we take flight
50
  """
51
 
52
- # First, let's define the presets at the top of the file, after the imports
53
  GENRE_PRESETS = {
54
- "Modern Pop": "pop, synth, drums, guitar, 120 bpm, upbeat, catchy, vibrant, female vocals, polished vocals",
55
- "Rock": "rock, electric guitar, drums, bass, 130 bpm, energetic, rebellious, gritty, male vocals, raw vocals",
56
- "Hip Hop": "hip hop, 808 bass, hi-hats, synth, 90 bpm, bold, urban, intense, male vocals, rhythmic vocals",
57
- "Country": "country, acoustic guitar, steel guitar, fiddle, 100 bpm, heartfelt, rustic, warm, male vocals, twangy vocals",
58
- "EDM": "edm, synth, bass, kick drum, 128 bpm, euphoric, pulsating, energetic, instrumental",
59
- "Reggae": "reggae, guitar, bass, drums, 80 bpm, chill, soulful, positive, male vocals, smooth vocals",
60
- "Classical": "classical, orchestral, strings, piano, 60 bpm, elegant, emotive, timeless, instrumental",
61
- "Jazz": "jazz, saxophone, piano, double bass, 110 bpm, smooth, improvisational, soulful, male vocals, crooning vocals",
62
- "Metal": "metal, electric guitar, double kick drum, bass, 160 bpm, aggressive, intense, heavy, male vocals, screamed vocals",
63
- "R&B": "r&b, synth, bass, drums, 85 bpm, sultry, groovy, romantic, female vocals, silky vocals"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  }
65
 
66
- # Add this function to handle preset selection
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  def update_tags_from_preset(preset_name):
68
  if preset_name == "Custom":
69
  return ""
70
  return GENRE_PRESETS.get(preset_name, "")
71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  def create_output_ui(task_name="Text2Music"):
74
  # For many consumer-grade GPU devices, only one batch can be run
75
  output_audio1 = gr.Audio(type="filepath", label=f"{task_name} Generated Audio 1")
76
- # output_audio2 = gr.Audio(type="filepath", label="Generated Audio 2")
77
- with gr.Accordion(f"{task_name} Parameters", open=False):
78
  input_params_json = gr.JSON(label=f"{task_name} Parameters")
79
- # outputs = [output_audio1, output_audio2]
 
 
 
 
 
 
 
 
 
 
80
  outputs = [output_audio1]
81
  return outputs, input_params_json
82
 
83
-
84
  def dump_func(*args):
85
  print(args)
86
  return []
87
 
88
-
89
  def create_text2music_ui(
90
  gr,
91
  text2music_process_func,
92
  sample_data_func=None,
93
  load_data_func=None,
94
  ):
 
 
95
 
96
  with gr.Row():
97
  with gr.Column():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  with gr.Row(equal_height=True):
99
  # add markdown, tags and lyrics examples are from ai music generation community
100
  audio_duration = gr.Slider(
@@ -105,13 +374,19 @@ def create_text2music_ui(
105
  label="Audio Duration",
106
  interactive=True,
107
  info="-1 means random duration (30 ~ 240).",
108
- scale=9,
109
  )
110
  sample_bnt = gr.Button("Sample", variant="secondary", scale=1)
 
111
 
112
  # audio2audio
113
  with gr.Row(equal_height=True):
114
- audio2audio_enable = gr.Checkbox(label="Enable Audio2Audio", value=False, info="Check to enable Audio-to-Audio generation using a reference audio.", elem_id="audio2audio_checkbox")
 
 
 
 
 
115
  lora_name_or_path = gr.Dropdown(
116
  label="Lora Name or Path",
117
  choices=["ACE-Step/ACE-Step-v1-chinese-rap-LoRA", "none"],
@@ -119,7 +394,13 @@ def create_text2music_ui(
119
  allow_custom_value=True,
120
  )
121
 
122
- ref_audio_input = gr.Audio(type="filepath", label="Reference Audio (for Audio2Audio)", visible=False, elem_id="ref_audio_input", show_download_button=True)
 
 
 
 
 
 
123
  ref_audio_strength = gr.Slider(
124
  label="Refer audio strength",
125
  minimum=0.0,
@@ -145,43 +426,61 @@ def create_text2music_ui(
145
 
146
  with gr.Column(scale=2):
147
  with gr.Group():
148
- gr.Markdown("""<center>Support tags, descriptions, and scene. Use commas to separate different tags.<br>Tags and lyrics examples are from AI music generation community.</center>""")
 
 
149
  with gr.Row():
150
  genre_preset = gr.Dropdown(
151
  choices=["Custom"] + list(GENRE_PRESETS.keys()),
152
  value="Custom",
153
- label="Preset",
154
  scale=1,
155
  )
156
- prompt = gr.Textbox(
157
- lines=1,
158
- label="Tags",
159
- max_lines=4,
160
- value=TAG_DEFAULT,
161
- scale=9,
162
  )
 
 
 
 
 
 
 
 
163
 
164
- # Add the change event for the preset dropdown
165
  genre_preset.change(
166
  fn=update_tags_from_preset,
167
  inputs=[genre_preset],
168
  outputs=[prompt]
169
  )
 
 
 
 
 
 
 
170
  with gr.Group():
171
- gr.Markdown("""<center>Support lyric structure tags like [verse], [chorus], and [bridge] to separate different parts of the lyrics.<br>Use [instrumental] or [inst] to generate instrumental music. Not support genre structure tag in lyrics</center>""")
 
172
  lyrics = gr.Textbox(
173
  lines=9,
174
  label="Lyrics",
175
  max_lines=13,
176
  value=LYRIC_DEFAULT,
 
177
  )
178
 
179
  with gr.Accordion("Basic Settings", open=False):
180
  infer_step = gr.Slider(
181
  minimum=1,
182
- maximum=200,
183
  step=1,
184
- value=200,
185
  label="Infer Steps",
186
  interactive=True,
187
  )
@@ -219,6 +518,13 @@ def create_text2music_ui(
219
  info="Seed for the generation",
220
  )
221
 
 
 
 
 
 
 
 
222
  with gr.Accordion("Advanced Settings", open=False):
223
  scheduler_type = gr.Radio(
224
  ["euler", "heun"],
@@ -294,10 +600,58 @@ def create_text2music_ui(
294
  info="Optimal Steps for the generation. But not test well",
295
  )
296
 
297
- text2music_bnt = gr.Button("Generate", variant="primary")
298
 
299
  with gr.Column():
300
  outputs, input_params_json = create_output_ui()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301
  with gr.Tab("retake"):
302
  retake_variance = gr.Slider(
303
  minimum=0.0, maximum=1.0, step=0.01, value=0.2, label="variance"
@@ -309,37 +663,32 @@ def create_text2music_ui(
309
  retake_outputs, retake_input_params_json = create_output_ui("Retake")
310
 
311
  def retake_process_func(json_data, retake_variance, retake_seeds):
312
- return text2music_process_func(
313
- json_data["audio_duration"],
314
- json_data["prompt"],
315
- json_data["lyrics"],
316
- json_data["infer_step"],
317
- json_data["guidance_scale"],
318
- json_data["scheduler_type"],
319
- json_data["cfg_type"],
320
- json_data["omega_scale"],
321
- ", ".join(map(str, json_data["actual_seeds"])),
322
- json_data["guidance_interval"],
323
- json_data["guidance_interval_decay"],
324
- json_data["min_guidance_scale"],
325
- json_data["use_erg_tag"],
326
- json_data["use_erg_lyric"],
327
- json_data["use_erg_diffusion"],
328
- ", ".join(map(str, json_data["oss_steps"])),
329
- (
330
- json_data["guidance_scale_text"]
331
- if "guidance_scale_text" in json_data
332
- else 0.0
333
- ),
334
- (
335
- json_data["guidance_scale_lyric"]
336
- if "guidance_scale_lyric" in json_data
337
- else 0.0
338
- ),
339
- retake_seeds=retake_seeds,
340
  retake_variance=retake_variance,
341
- task="retake",
342
- lora_name_or_path="none" if "lora_name_or_path" not in json_data else json_data["lora_name_or_path"]
343
  )
344
 
345
  retake_bnt.click(
@@ -351,6 +700,7 @@ def create_text2music_ui(
351
  ],
352
  outputs=retake_outputs + [retake_input_params_json],
353
  )
 
354
  with gr.Tab("repainting"):
355
  retake_variance = gr.Slider(
356
  minimum=0.0, maximum=1.0, step=0.01, value=0.2, label="variance"
@@ -437,7 +787,7 @@ def create_text2music_ui(
437
  json_data = repaint_json_data
438
  src_audio_path = json_data["audio_path"]
439
 
440
- return text2music_process_func(
441
  json_data["audio_duration"],
442
  prompt,
443
  lyrics,
@@ -462,7 +812,7 @@ def create_text2music_ui(
462
  repaint_start=repaint_start,
463
  repaint_end=repaint_end,
464
  src_audio_path=src_audio_path,
465
- lora_name_or_path="none" if "lora_name_or_path" not in json_data else json_data["lora_name_or_path"]
466
  )
467
 
468
  repaint_bnt.click(
@@ -496,6 +846,7 @@ def create_text2music_ui(
496
  ],
497
  outputs=repaint_outputs + [repaint_input_params_json],
498
  )
 
499
  with gr.Tab("edit"):
500
  edit_prompt = gr.Textbox(lines=2, label="Edit Tags", max_lines=4)
501
  edit_lyrics = gr.Textbox(lines=9, label="Edit Lyrics", max_lines=13)
@@ -610,7 +961,7 @@ def create_text2music_ui(
610
  if not edit_lyrics:
611
  edit_lyrics = lyrics
612
 
613
- return text2music_process_func(
614
  json_data["audio_duration"],
615
  prompt,
616
  lyrics,
@@ -636,7 +987,7 @@ def create_text2music_ui(
636
  edit_n_min=edit_n_min,
637
  edit_n_max=edit_n_max,
638
  retake_seeds=retake_seeds,
639
- lora_name_or_path="none" if "lora_name_or_path" not in json_data else json_data["lora_name_or_path"]
640
  )
641
 
642
  edit_bnt.click(
@@ -671,6 +1022,7 @@ def create_text2music_ui(
671
  ],
672
  outputs=edit_outputs + [edit_input_params_json],
673
  )
 
674
  with gr.Tab("extend"):
675
  extend_seeds = gr.Textbox(
676
  label="extend seeds (default None)", placeholder="", value=None
@@ -756,7 +1108,7 @@ def create_text2music_ui(
756
 
757
  repaint_start = -left_extend_length
758
  repaint_end = json_data["audio_duration"] + right_extend_length
759
- return text2music_process_func(
760
  json_data["audio_duration"],
761
  prompt,
762
  lyrics,
@@ -781,7 +1133,7 @@ def create_text2music_ui(
781
  repaint_start=repaint_start,
782
  repaint_end=repaint_end,
783
  src_audio_path=src_audio_path,
784
- lora_name_or_path="none" if "lora_name_or_path" not in json_data else json_data["lora_name_or_path"]
785
  )
786
 
787
  extend_bnt.click(
@@ -861,8 +1213,10 @@ def create_text2music_ui(
861
  )
862
 
863
  def sample_data(lora_name_or_path_):
864
- json_data = sample_data_func(lora_name_or_path_)
865
- return json2output(json_data)
 
 
866
 
867
  sample_bnt.click(
868
  sample_data,
@@ -892,8 +1246,9 @@ def create_text2music_ui(
892
  ],
893
  )
894
 
 
895
  text2music_bnt.click(
896
- fn=text2music_process_func,
897
  inputs=[
898
  audio_duration,
899
  prompt,
@@ -917,6 +1272,9 @@ def create_text2music_ui(
917
  ref_audio_strength,
918
  ref_audio_input,
919
  lora_name_or_path,
 
 
 
920
  ],
921
  outputs=outputs + [input_params_json],
922
  )
@@ -928,19 +1286,51 @@ def create_main_demo_ui(
928
  load_data_func=dump_func,
929
  ):
930
  with gr.Blocks(
931
- title="ACE-Step Model 1.0 DEMO",
 
 
 
 
 
 
 
 
 
 
 
 
932
  ) as demo:
933
  gr.Markdown(
934
  """
935
- <h1 style="text-align: center;">ACE-Step: A Step Towards Music Generation Foundation Model</h1>
936
- <p>
937
- <a href="https://ace-step.github.io/" target='_blank'>Project</a> |
938
- <a href="https://huggingface.co/ACE-Step/ACE-Step-v1-3.5B">Checkpoints</a> |
939
- <a href="https://discord.gg/rjAZz2xBdG" target='_blank'>Discord</a>
940
- </p>
 
 
 
941
  """
942
  )
943
- with gr.Tab("text2music"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
944
  create_text2music_ui(
945
  gr=gr,
946
  text2music_process_func=text2music_process_func,
@@ -955,4 +1345,5 @@ if __name__ == "__main__":
955
  demo.launch(
956
  server_name="0.0.0.0",
957
  server_port=7860,
958
- )
 
 
9
  import gradio as gr
10
  import librosa
11
  import os
12
+ import random
13
+ import hashlib
14
+ import numpy as np
15
+ import json
16
+ from typing import Dict, List, Tuple, Optional
17
 
18
  TAG_DEFAULT = "funk, pop, soul, rock, melodic, guitar, drums, bass, keyboard, percussion, 105 BPM, energetic, upbeat, groovy, vibrant, dynamic"
19
  LYRIC_DEFAULT = """[verse]
 
38
  Guitar strings they start to weep
39
  Wake the soul from silent sleep
40
  Every note a story told
41
+ In this night we're bold and gold
42
 
43
  [bridge]
44
  Voices blend in harmony
 
53
  In this moment we take flight
54
  """
55
 
56
+ # ν™•μž₯된 μž₯λ₯΄ 프리셋 (κΈ°μ‘΄ + κ°œμ„ λœ νƒœκ·Έ)
57
  GENRE_PRESETS = {
58
+ "Modern Pop": "pop, synth, drums, guitar, 120 bpm, upbeat, catchy, vibrant, female vocals, polished vocals, radio-ready, commercial, layered vocals",
59
+ "Rock": "rock, electric guitar, drums, bass, 130 bpm, energetic, rebellious, gritty, male vocals, raw vocals, power chords, driving rhythm",
60
+ "Hip Hop": "hip hop, 808 bass, hi-hats, synth, 90 bpm, bold, urban, intense, male vocals, rhythmic vocals, trap beats, punchy drums",
61
+ "Country": "country, acoustic guitar, steel guitar, fiddle, 100 bpm, heartfelt, rustic, warm, male vocals, twangy vocals, storytelling, americana",
62
+ "EDM": "edm, synth, bass, kick drum, 128 bpm, euphoric, pulsating, energetic, instrumental, progressive build, festival anthem, electronic",
63
+ "Reggae": "reggae, guitar, bass, drums, 80 bpm, chill, soulful, positive, male vocals, smooth vocals, offbeat rhythm, island vibes",
64
+ "Classical": "classical, orchestral, strings, piano, 60 bpm, elegant, emotive, timeless, instrumental, dynamic range, sophisticated harmony",
65
+ "Jazz": "jazz, saxophone, piano, double bass, 110 bpm, smooth, improvisational, soulful, male vocals, crooning vocals, swing feel, sophisticated",
66
+ "Metal": "metal, electric guitar, double kick drum, bass, 160 bpm, aggressive, intense, heavy, male vocals, screamed vocals, distorted, powerful",
67
+ "R&B": "r&b, synth, bass, drums, 85 bpm, sultry, groovy, romantic, female vocals, silky vocals, smooth production, neo-soul"
68
+ }
69
+
70
+ # ν’ˆμ§ˆ 프리셋 μ‹œμŠ€ν…œ μΆ”κ°€
71
+ QUALITY_PRESETS = {
72
+ "Draft (Fast)": {
73
+ "infer_step": 50,
74
+ "guidance_scale": 10.0,
75
+ "scheduler_type": "euler",
76
+ "omega_scale": 5.0,
77
+ "use_erg_diffusion": False,
78
+ "use_erg_tag": True,
79
+ "description": "λΉ λ₯Έ μ΄ˆμ•ˆ 생성 (1-2λΆ„)"
80
+ },
81
+ "Standard": {
82
+ "infer_step": 100,
83
+ "guidance_scale": 15.0,
84
+ "scheduler_type": "euler",
85
+ "omega_scale": 10.0,
86
+ "use_erg_diffusion": True,
87
+ "use_erg_tag": True,
88
+ "description": "ν‘œμ€€ ν’ˆμ§ˆ (3-5λΆ„)"
89
+ },
90
+ "High Quality": {
91
+ "infer_step": 200,
92
+ "guidance_scale": 18.0,
93
+ "scheduler_type": "heun",
94
+ "omega_scale": 15.0,
95
+ "use_erg_diffusion": True,
96
+ "use_erg_tag": True,
97
+ "description": "κ³ ν’ˆμ§ˆ 생성 (8-12λΆ„)"
98
+ },
99
+ "Ultra (Best)": {
100
+ "infer_step": 300,
101
+ "guidance_scale": 20.0,
102
+ "scheduler_type": "heun",
103
+ "omega_scale": 20.0,
104
+ "use_erg_diffusion": True,
105
+ "use_erg_tag": True,
106
+ "description": "졜고 ν’ˆμ§ˆ (15-20λΆ„)"
107
+ }
108
  }
109
 
110
+ # 닀쀑 μ‹œλ“œ 생성 μ„€μ •
111
+ MULTI_SEED_OPTIONS = {
112
+ "Single": 1,
113
+ "Best of 3": 3,
114
+ "Best of 5": 5,
115
+ "Best of 10": 10
116
+ }
117
+
118
+ class MusicGenerationCache:
119
+ """생성 κ²°κ³Ό 캐싱 μ‹œμŠ€ν…œ"""
120
+ def __init__(self):
121
+ self.cache = {}
122
+ self.max_cache_size = 50
123
+
124
+ def get_cache_key(self, params):
125
+ # μ€‘μš”ν•œ νŒŒλΌλ―Έν„°λ§ŒμœΌλ‘œ ν•΄μ‹œ 생성
126
+ key_params = {k: v for k, v in params.items()
127
+ if k in ['prompt', 'lyrics', 'infer_step', 'guidance_scale', 'audio_duration']}
128
+ return hashlib.md5(str(sorted(key_params.items())).encode()).hexdigest()[:16]
129
+
130
+ def get_cached_result(self, params):
131
+ key = self.get_cache_key(params)
132
+ return self.cache.get(key)
133
+
134
+ def cache_result(self, params, result):
135
+ if len(self.cache) >= self.max_cache_size:
136
+ oldest_key = next(iter(self.cache))
137
+ del self.cache[oldest_key]
138
+
139
+ key = self.get_cache_key(params)
140
+ self.cache[key] = result
141
+
142
+ # μ „μ—­ μΊμ‹œ μΈμŠ€ν„΄μŠ€
143
+ generation_cache = MusicGenerationCache()
144
+
145
+ def enhance_prompt_with_genre(base_prompt: str, genre: str) -> str:
146
+ """μž₯λ₯΄μ— λ”°λ₯Έ 슀마트 ν”„λ‘¬ν”„νŠΈ ν™•μž₯"""
147
+ if genre == "Custom" or not genre:
148
+ return base_prompt
149
+
150
+ # μž₯λ₯΄λ³„ μΆ”κ°€ κ°œμ„  νƒœκ·Έ
151
+ genre_enhancements = {
152
+ "Modern Pop": ["polished production", "mainstream appeal", "hook-driven"],
153
+ "Rock": ["guitar-driven", "powerful drums", "energetic performance"],
154
+ "Hip Hop": ["rhythmic flow", "urban atmosphere", "bass-heavy"],
155
+ "Country": ["acoustic warmth", "storytelling melody", "authentic feel"],
156
+ "EDM": ["electronic atmosphere", "build-ups", "dance-friendly"],
157
+ "Reggae": ["laid-back groove", "tropical vibes", "rhythmic guitar"],
158
+ "Classical": ["orchestral depth", "musical sophistication", "timeless beauty"],
159
+ "Jazz": ["musical complexity", "improvisational spirit", "sophisticated harmony"],
160
+ "Metal": ["aggressive energy", "powerful sound", "intense atmosphere"],
161
+ "R&B": ["smooth groove", "soulful expression", "rhythmic sophistication"]
162
+ }
163
+
164
+ if genre in genre_enhancements:
165
+ additional_tags = ", ".join(genre_enhancements[genre])
166
+ return f"{base_prompt}, {additional_tags}"
167
+
168
+ return base_prompt
169
+
170
+ def calculate_quality_score(audio_path: str) -> float:
171
+ """κ°„λ‹¨ν•œ ν’ˆμ§ˆ 점수 계산 (μ‹€μ œ κ΅¬ν˜„μ—μ„œλŠ” 더 λ³΅μž‘ν•œ λ©”νŠΈλ¦­ μ‚¬μš©)"""
172
+ try:
173
+ y, sr = librosa.load(audio_path)
174
+
175
+ # κΈ°λ³Έ ν’ˆμ§ˆ λ©”νŠΈλ¦­
176
+ rms_energy = np.sqrt(np.mean(y**2))
177
+ spectral_centroid = np.mean(librosa.feature.spectral_centroid(y=y, sr=sr))
178
+ zero_crossing_rate = np.mean(librosa.feature.zero_crossing_rate(y))
179
+
180
+ # μ •κ·œν™”λœ 점수 (0-100)
181
+ energy_score = min(rms_energy * 1000, 40) # 0-40점
182
+ spectral_score = min(spectral_centroid / 100, 40) # 0-40점
183
+ clarity_score = min((1 - zero_crossing_rate) * 20, 20) # 0-20점
184
+
185
+ total_score = energy_score + spectral_score + clarity_score
186
+ return round(total_score, 1)
187
+ except:
188
+ return 50.0 # κΈ°λ³Έκ°’
189
+
190
  def update_tags_from_preset(preset_name):
191
  if preset_name == "Custom":
192
  return ""
193
  return GENRE_PRESETS.get(preset_name, "")
194
 
195
+ def update_quality_preset(preset_name):
196
+ """ν’ˆμ§ˆ 프리셋 적용"""
197
+ if preset_name not in QUALITY_PRESETS:
198
+ return (100, 15.0, "euler", 10.0, True, True)
199
+
200
+ preset = QUALITY_PRESETS[preset_name]
201
+ return (
202
+ preset.get("infer_step", 100),
203
+ preset.get("guidance_scale", 15.0),
204
+ preset.get("scheduler_type", "euler"),
205
+ preset.get("omega_scale", 10.0),
206
+ preset.get("use_erg_diffusion", True),
207
+ preset.get("use_erg_tag", True)
208
+ )
209
+
210
+ def create_enhanced_process_func(original_func):
211
+ """κΈ°μ‘΄ ν•¨μˆ˜λ₯Ό ν–₯μƒλœ κΈ°λŠ₯으둜 λž˜ν•‘"""
212
+
213
+ def enhanced_func(
214
+ audio_duration, prompt, lyrics, infer_step, guidance_scale,
215
+ scheduler_type, cfg_type, omega_scale, manual_seeds,
216
+ guidance_interval, guidance_interval_decay, min_guidance_scale,
217
+ use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
218
+ guidance_scale_text, guidance_scale_lyric,
219
+ audio2audio_enable=False, ref_audio_strength=0.5, ref_audio_input=None,
220
+ lora_name_or_path="none", multi_seed_mode="Single",
221
+ enable_smart_enhancement=True, genre_preset="Custom", **kwargs
222
+ ):
223
+ # 슀마트 ν”„λ‘¬ν”„νŠΈ ν™•μž₯
224
+ if enable_smart_enhancement and genre_preset != "Custom":
225
+ prompt = enhance_prompt_with_genre(prompt, genre_preset)
226
+
227
+ # μΊμ‹œ 확인
228
+ cache_params = {
229
+ 'prompt': prompt, 'lyrics': lyrics, 'audio_duration': audio_duration,
230
+ 'infer_step': infer_step, 'guidance_scale': guidance_scale
231
+ }
232
+
233
+ cached_result = generation_cache.get_cached_result(cache_params)
234
+ if cached_result:
235
+ return cached_result
236
+
237
+ # 닀쀑 μ‹œλ“œ 생성
238
+ num_candidates = MULTI_SEED_OPTIONS.get(multi_seed_mode, 1)
239
+
240
+ if num_candidates == 1:
241
+ # κΈ°μ‘΄ ν•¨μˆ˜ 호좜
242
+ result = original_func(
243
+ audio_duration, prompt, lyrics, infer_step, guidance_scale,
244
+ scheduler_type, cfg_type, omega_scale, manual_seeds,
245
+ guidance_interval, guidance_interval_decay, min_guidance_scale,
246
+ use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
247
+ guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
248
+ ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs
249
+ )
250
+ else:
251
+ # 닀쀑 μ‹œλ“œ 생성 및 졜적 선택
252
+ candidates = []
253
+
254
+ for i in range(num_candidates):
255
+ seed = random.randint(1, 10000)
256
+
257
+ try:
258
+ result = original_func(
259
+ audio_duration, prompt, lyrics, infer_step, guidance_scale,
260
+ scheduler_type, cfg_type, omega_scale, str(seed),
261
+ guidance_interval, guidance_interval_decay, min_guidance_scale,
262
+ use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
263
+ guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
264
+ ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs
265
+ )
266
+
267
+ if result and len(result) > 0:
268
+ audio_path = result[0] # 첫 번째 κ²°κ³Όκ°€ μ˜€λ””μ˜€ 파일 경둜
269
+ if audio_path and os.path.exists(audio_path):
270
+ quality_score = calculate_quality_score(audio_path)
271
+ candidates.append({
272
+ "result": result,
273
+ "quality_score": quality_score,
274
+ "seed": seed
275
+ })
276
+ except Exception as e:
277
+ print(f"Generation {i+1} failed: {e}")
278
+ continue
279
+
280
+ if candidates:
281
+ # 졜고 ν’ˆμ§ˆ 선택
282
+ best_candidate = max(candidates, key=lambda x: x["quality_score"])
283
+ result = best_candidate["result"]
284
+
285
+ # ν’ˆμ§ˆ 정보 μΆ”κ°€
286
+ if len(result) > 1 and isinstance(result[1], dict):
287
+ result[1]["quality_score"] = best_candidate["quality_score"]
288
+ result[1]["selected_seed"] = best_candidate["seed"]
289
+ result[1]["candidates_count"] = len(candidates)
290
+ else:
291
+ # λͺ¨λ“  생성 μ‹€νŒ¨μ‹œ κΈ°λ³Έ 생성
292
+ result = original_func(
293
+ audio_duration, prompt, lyrics, infer_step, guidance_scale,
294
+ scheduler_type, cfg_type, omega_scale, manual_seeds,
295
+ guidance_interval, guidance_interval_decay, min_guidance_scale,
296
+ use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
297
+ guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
298
+ ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs
299
+ )
300
+
301
+ # κ²°κ³Ό μΊμ‹œ
302
+ generation_cache.cache_result(cache_params, result)
303
+ return result
304
+
305
+ return enhanced_func
306
 
307
  def create_output_ui(task_name="Text2Music"):
308
  # For many consumer-grade GPU devices, only one batch can be run
309
  output_audio1 = gr.Audio(type="filepath", label=f"{task_name} Generated Audio 1")
310
+
311
+ with gr.Accordion(f"{task_name} Parameters & Quality Info", open=False):
312
  input_params_json = gr.JSON(label=f"{task_name} Parameters")
313
+
314
+ # ν’ˆμ§ˆ 정보 ν‘œμ‹œ μΆ”κ°€
315
+ with gr.Row():
316
+ quality_score = gr.Number(label="Quality Score (0-100)", value=0, interactive=False)
317
+ generation_info = gr.Textbox(
318
+ label="Generation Info",
319
+ value="",
320
+ interactive=False,
321
+ max_lines=2
322
+ )
323
+
324
  outputs = [output_audio1]
325
  return outputs, input_params_json
326
 
 
327
  def dump_func(*args):
328
  print(args)
329
  return []
330
 
 
331
  def create_text2music_ui(
332
  gr,
333
  text2music_process_func,
334
  sample_data_func=None,
335
  load_data_func=None,
336
  ):
337
+ # ν–₯μƒλœ ν”„λ‘œμ„ΈμŠ€ ν•¨μˆ˜ 생성
338
+ enhanced_process_func = create_enhanced_process_func(text2music_process_func)
339
 
340
  with gr.Row():
341
  with gr.Column():
342
+ # ν’ˆμ§ˆ 및 μ„±λŠ₯ μ„€μ • μ„Ήμ…˜ μΆ”κ°€
343
+ with gr.Group():
344
+ gr.Markdown("### ⚑ ν’ˆμ§ˆ & μ„±λŠ₯ μ„€μ •")
345
+ with gr.Row():
346
+ quality_preset = gr.Dropdown(
347
+ choices=list(QUALITY_PRESETS.keys()),
348
+ value="Standard",
349
+ label="ν’ˆμ§ˆ 프리셋",
350
+ scale=2
351
+ )
352
+ multi_seed_mode = gr.Dropdown(
353
+ choices=list(MULTI_SEED_OPTIONS.keys()),
354
+ value="Single",
355
+ label="닀쀑 생성 λͺ¨λ“œ",
356
+ scale=2,
357
+ info="μ—¬λŸ¬ 번 μƒμ„±ν•˜μ—¬ 졜고 ν’ˆμ§ˆ 선택"
358
+ )
359
+
360
+ preset_description = gr.Textbox(
361
+ value=QUALITY_PRESETS["Standard"]["description"],
362
+ label="μ„€λͺ…",
363
+ interactive=False,
364
+ max_lines=1
365
+ )
366
+
367
  with gr.Row(equal_height=True):
368
  # add markdown, tags and lyrics examples are from ai music generation community
369
  audio_duration = gr.Slider(
 
374
  label="Audio Duration",
375
  interactive=True,
376
  info="-1 means random duration (30 ~ 240).",
377
+ scale=7,
378
  )
379
  sample_bnt = gr.Button("Sample", variant="secondary", scale=1)
380
+ preview_bnt = gr.Button("🎡 Preview", variant="secondary", scale=2)
381
 
382
  # audio2audio
383
  with gr.Row(equal_height=True):
384
+ audio2audio_enable = gr.Checkbox(
385
+ label="Enable Audio2Audio",
386
+ value=False,
387
+ info="Check to enable Audio-to-Audio generation using a reference audio.",
388
+ elem_id="audio2audio_checkbox"
389
+ )
390
  lora_name_or_path = gr.Dropdown(
391
  label="Lora Name or Path",
392
  choices=["ACE-Step/ACE-Step-v1-chinese-rap-LoRA", "none"],
 
394
  allow_custom_value=True,
395
  )
396
 
397
+ ref_audio_input = gr.Audio(
398
+ type="filepath",
399
+ label="Reference Audio (for Audio2Audio)",
400
+ visible=False,
401
+ elem_id="ref_audio_input",
402
+ show_download_button=True
403
+ )
404
  ref_audio_strength = gr.Slider(
405
  label="Refer audio strength",
406
  minimum=0.0,
 
426
 
427
  with gr.Column(scale=2):
428
  with gr.Group():
429
+ gr.Markdown("""### 🎼 슀마트 ν”„λ‘¬ν”„νŠΈ μ‹œμŠ€ν…œ
430
+ <center>μž₯λ₯΄ 선택 μ‹œ μžλ™μœΌλ‘œ μ΅œμ ν™”λœ νƒœκ·Έκ°€ μΆ”κ°€λ©λ‹ˆλ‹€. 콀마둜 κ΅¬λΆ„ν•˜μ—¬ νƒœκ·Έλ₯Ό μž…λ ₯ν•˜μ„Έμš”.</center>""")
431
+
432
  with gr.Row():
433
  genre_preset = gr.Dropdown(
434
  choices=["Custom"] + list(GENRE_PRESETS.keys()),
435
  value="Custom",
436
+ label="μž₯λ₯΄ 프리셋",
437
  scale=1,
438
  )
439
+ enable_smart_enhancement = gr.Checkbox(
440
+ label="슀마트 ν–₯상",
441
+ value=True,
442
+ info="μžλ™ νƒœκ·Έ μ΅œμ ν™”",
443
+ scale=1
 
444
  )
445
+
446
+ prompt = gr.Textbox(
447
+ lines=2,
448
+ label="Tags",
449
+ max_lines=4,
450
+ value=TAG_DEFAULT,
451
+ placeholder="콀마둜 κ΅¬λΆ„λœ νƒœκ·Έλ“€...",
452
+ )
453
 
454
+ # μž₯λ₯΄ 프리셋 λ³€κ²½ 이벀트
455
  genre_preset.change(
456
  fn=update_tags_from_preset,
457
  inputs=[genre_preset],
458
  outputs=[prompt]
459
  )
460
+
461
+ quality_preset.change(
462
+ fn=lambda x: QUALITY_PRESETS.get(x, {}).get("description", ""),
463
+ inputs=[quality_preset],
464
+ outputs=[preset_description]
465
+ )
466
+
467
  with gr.Group():
468
+ gr.Markdown("""### πŸ“ 가사 μž…λ ₯
469
+ <center>ꡬ쑰 νƒœκ·Έ [verse], [chorus], [bridge] μ‚¬μš©μ„ ꢌμž₯ν•©λ‹ˆλ‹€.<br>[instrumental] λ˜λŠ” [inst]λ₯Ό μ‚¬μš©ν•˜λ©΄ 연주곑을 μƒμ„±ν•©λ‹ˆλ‹€.</center>""")
470
  lyrics = gr.Textbox(
471
  lines=9,
472
  label="Lyrics",
473
  max_lines=13,
474
  value=LYRIC_DEFAULT,
475
+ placeholder="가사λ₯Ό μž…λ ₯ν•˜μ„Έμš”. [verse], [chorus] λ“±μ˜ ꡬ쑰 νƒœκ·Έ μ‚¬μš©μ„ ꢌμž₯ν•©λ‹ˆλ‹€."
476
  )
477
 
478
  with gr.Accordion("Basic Settings", open=False):
479
  infer_step = gr.Slider(
480
  minimum=1,
481
+ maximum=300,
482
  step=1,
483
+ value=100,
484
  label="Infer Steps",
485
  interactive=True,
486
  )
 
518
  info="Seed for the generation",
519
  )
520
 
521
+ # ν’ˆμ§ˆ 프리셋 λ³€κ²½ 이벀트
522
+ quality_preset.change(
523
+ fn=update_quality_preset,
524
+ inputs=[quality_preset],
525
+ outputs=[infer_step, guidance_scale, scheduler_type, omega_scale, use_erg_diffusion, use_erg_tag]
526
+ )
527
+
528
  with gr.Accordion("Advanced Settings", open=False):
529
  scheduler_type = gr.Radio(
530
  ["euler", "heun"],
 
600
  info="Optimal Steps for the generation. But not test well",
601
  )
602
 
603
+ text2music_bnt = gr.Button("🎡 Generate Music", variant="primary", size="lg")
604
 
605
  with gr.Column():
606
  outputs, input_params_json = create_output_ui()
607
+
608
+ # μ‹€μ‹œκ°„ 프리뷰 κΈ°λŠ₯
609
+ def generate_preview(prompt, lyrics, genre_preset):
610
+ """10초 프리뷰 생성"""
611
+ preview_params = {
612
+ "audio_duration": 10,
613
+ "infer_step": 50,
614
+ "guidance_scale": 12.0,
615
+ "scheduler_type": "euler",
616
+ "cfg_type": "apg",
617
+ "omega_scale": 5.0,
618
+ }
619
+
620
+ enhanced_prompt = enhance_prompt_with_genre(prompt, genre_preset) if genre_preset != "Custom" else prompt
621
+
622
+ try:
623
+ # μ‹€μ œ κ΅¬ν˜„μ—μ„œλŠ” λΉ λ₯Έ 생성 λͺ¨λ“œ μ‚¬μš©
624
+ result = enhanced_process_func(
625
+ preview_params["audio_duration"],
626
+ enhanced_prompt,
627
+ lyrics[:200], # 가사 μΌλΆ€λ§Œ μ‚¬μš©
628
+ preview_params["infer_step"],
629
+ preview_params["guidance_scale"],
630
+ preview_params["scheduler_type"],
631
+ preview_params["cfg_type"],
632
+ preview_params["omega_scale"],
633
+ None, # manual_seeds
634
+ 0.5, # guidance_interval
635
+ 0.0, # guidance_interval_decay
636
+ 3.0, # min_guidance_scale
637
+ True, # use_erg_tag
638
+ False, # use_erg_lyric
639
+ True, # use_erg_diffusion
640
+ None, # oss_steps
641
+ 0.0, # guidance_scale_text
642
+ 0.0, # guidance_scale_lyric
643
+ multi_seed_mode="Single"
644
+ )
645
+ return result[0] if result else None
646
+ except Exception as e:
647
+ return f"프리뷰 생성 μ‹€νŒ¨: {str(e)}"
648
+
649
+ preview_bnt.click(
650
+ fn=generate_preview,
651
+ inputs=[prompt, lyrics, genre_preset],
652
+ outputs=[outputs[0]]
653
+ )
654
+
655
  with gr.Tab("retake"):
656
  retake_variance = gr.Slider(
657
  minimum=0.0, maximum=1.0, step=0.01, value=0.2, label="variance"
 
663
  retake_outputs, retake_input_params_json = create_output_ui("Retake")
664
 
665
  def retake_process_func(json_data, retake_variance, retake_seeds):
666
+ return enhanced_process_func(
667
+ json_data.get("audio_duration", 30),
668
+ json_data.get("prompt", ""),
669
+ json_data.get("lyrics", ""),
670
+ json_data.get("infer_step", 100),
671
+ json_data.get("guidance_scale", 15.0),
672
+ json_data.get("scheduler_type", "euler"),
673
+ json_data.get("cfg_type", "apg"),
674
+ json_data.get("omega_scale", 10.0),
675
+ retake_seeds,
676
+ json_data.get("guidance_interval", 0.5),
677
+ json_data.get("guidance_interval_decay", 0.0),
678
+ json_data.get("min_guidance_scale", 3.0),
679
+ json_data.get("use_erg_tag", True),
680
+ json_data.get("use_erg_lyric", False),
681
+ json_data.get("use_erg_diffusion", True),
682
+ json_data.get("oss_steps", None),
683
+ json_data.get("guidance_scale_text", 0.0),
684
+ json_data.get("guidance_scale_lyric", 0.0),
685
+ audio2audio_enable=json_data.get("audio2audio_enable", False),
686
+ ref_audio_strength=json_data.get("ref_audio_strength", 0.5),
687
+ ref_audio_input=json_data.get("ref_audio_input", None),
688
+ lora_name_or_path=json_data.get("lora_name_or_path", "none"),
689
+ multi_seed_mode="Best of 3", # retakeλŠ” μžλ™μœΌλ‘œ 닀쀑 생성
 
 
 
 
690
  retake_variance=retake_variance,
691
+ task="retake"
 
692
  )
693
 
694
  retake_bnt.click(
 
700
  ],
701
  outputs=retake_outputs + [retake_input_params_json],
702
  )
703
+
704
  with gr.Tab("repainting"):
705
  retake_variance = gr.Slider(
706
  minimum=0.0, maximum=1.0, step=0.01, value=0.2, label="variance"
 
787
  json_data = repaint_json_data
788
  src_audio_path = json_data["audio_path"]
789
 
790
+ return enhanced_process_func(
791
  json_data["audio_duration"],
792
  prompt,
793
  lyrics,
 
812
  repaint_start=repaint_start,
813
  repaint_end=repaint_end,
814
  src_audio_path=src_audio_path,
815
+ lora_name_or_path="none"
816
  )
817
 
818
  repaint_bnt.click(
 
846
  ],
847
  outputs=repaint_outputs + [repaint_input_params_json],
848
  )
849
+
850
  with gr.Tab("edit"):
851
  edit_prompt = gr.Textbox(lines=2, label="Edit Tags", max_lines=4)
852
  edit_lyrics = gr.Textbox(lines=9, label="Edit Lyrics", max_lines=13)
 
961
  if not edit_lyrics:
962
  edit_lyrics = lyrics
963
 
964
+ return enhanced_process_func(
965
  json_data["audio_duration"],
966
  prompt,
967
  lyrics,
 
987
  edit_n_min=edit_n_min,
988
  edit_n_max=edit_n_max,
989
  retake_seeds=retake_seeds,
990
+ lora_name_or_path="none"
991
  )
992
 
993
  edit_bnt.click(
 
1022
  ],
1023
  outputs=edit_outputs + [edit_input_params_json],
1024
  )
1025
+
1026
  with gr.Tab("extend"):
1027
  extend_seeds = gr.Textbox(
1028
  label="extend seeds (default None)", placeholder="", value=None
 
1108
 
1109
  repaint_start = -left_extend_length
1110
  repaint_end = json_data["audio_duration"] + right_extend_length
1111
+ return enhanced_process_func(
1112
  json_data["audio_duration"],
1113
  prompt,
1114
  lyrics,
 
1133
  repaint_start=repaint_start,
1134
  repaint_end=repaint_end,
1135
  src_audio_path=src_audio_path,
1136
+ lora_name_or_path="none"
1137
  )
1138
 
1139
  extend_bnt.click(
 
1213
  )
1214
 
1215
  def sample_data(lora_name_or_path_):
1216
+ if sample_data_func:
1217
+ json_data = sample_data_func(lora_name_or_path_)
1218
+ return json2output(json_data)
1219
+ return {}
1220
 
1221
  sample_bnt.click(
1222
  sample_data,
 
1246
  ],
1247
  )
1248
 
1249
+ # 메인 생성 λ²„νŠΌ 이벀트 (ν–₯μƒλœ ν•¨μˆ˜ μ‚¬μš©)
1250
  text2music_bnt.click(
1251
+ fn=enhanced_process_func,
1252
  inputs=[
1253
  audio_duration,
1254
  prompt,
 
1272
  ref_audio_strength,
1273
  ref_audio_input,
1274
  lora_name_or_path,
1275
+ multi_seed_mode,
1276
+ enable_smart_enhancement,
1277
+ genre_preset
1278
  ],
1279
  outputs=outputs + [input_params_json],
1280
  )
 
1286
  load_data_func=dump_func,
1287
  ):
1288
  with gr.Blocks(
1289
+ title="ACE-Step Model 1.0 DEMO - Enhanced",
1290
+ theme=gr.themes.Soft(),
1291
+ css="""
1292
+ .gradio-container {
1293
+ max-width: 1200px !important;
1294
+ }
1295
+ .quality-info {
1296
+ background: linear-gradient(45deg, #f0f8ff, #e6f3ff);
1297
+ padding: 10px;
1298
+ border-radius: 8px;
1299
+ margin: 5px 0;
1300
+ }
1301
+ """
1302
  ) as demo:
1303
  gr.Markdown(
1304
  """
1305
+ <h1 style="text-align: center;">🎡 ACE-Step: Enhanced Music Generation Foundation Model</h1>
1306
+ <div style="text-align: center; margin: 20px;">
1307
+ <p><strong>πŸš€ μƒˆλ‘œμš΄ κΈ°λŠ₯:</strong> ν’ˆμ§ˆ 프리셋 | 닀쀑 생성 | 슀마트 ν”„λ‘¬ν”„νŠΈ | μ‹€μ‹œκ°„ 프리뷰 | ν’ˆμ§ˆ 점수</p>
1308
+ <p>
1309
+ <a href="https://ace-step.github.io/" target='_blank'>Project</a> |
1310
+ <a href="https://huggingface.co/ACE-Step/ACE-Step-v1-3.5B">Checkpoints</a> |
1311
+ <a href="https://discord.gg/rjAZz2xBdG" target='_blank'>Discord</a>
1312
+ </p>
1313
+ </div>
1314
  """
1315
  )
1316
+
1317
+ # μ‚¬μš©λ²• κ°€μ΄λ“œ μΆ”κ°€
1318
+ with gr.Accordion("πŸ“– μ‚¬μš©λ²• κ°€μ΄λ“œ", open=False):
1319
+ gr.Markdown("""
1320
+ ### 🎯 λΉ λ₯Έ μ‹œμž‘
1321
+ 1. **μž₯λ₯΄ 선택**: μ›ν•˜λŠ” μŒμ•… μž₯λ₯΄λ₯Ό μ„ νƒν•˜λ©΄ μžλ™μœΌλ‘œ μ΅œμ ν™”λœ νƒœκ·Έκ°€ μ μš©λ©λ‹ˆλ‹€
1322
+ 2. **ν’ˆμ§ˆ μ„€μ •**: Draft(빠름) β†’ Standard(ꢌμž₯) β†’ High Quality β†’ Ultra 쀑 선택
1323
+ 3. **닀쀑 생성**: "Best of 3/5/10" μ„ νƒν•˜λ©΄ μ—¬λŸ¬ 번 μƒμ„±ν•˜μ—¬ 졜고 ν’ˆμ§ˆμ„ μžλ™ μ„ νƒν•©λ‹ˆλ‹€
1324
+ 4. **프리뷰**: 전체 생성 μ „ 10초 ν”„λ¦¬λ·°λ‘œ λΉ λ₯΄κ²Œ 확인할 수 μžˆμŠ΅λ‹ˆλ‹€
1325
+
1326
+ ### πŸ’‘ ν’ˆμ§ˆ ν–₯상 팁
1327
+ - **κ³ ν’ˆμ§ˆ 생성**: "High Quality" + "Best of 5" μ‘°ν•© μΆ”μ²œ
1328
+ - **λΉ λ₯Έ ν…ŒμŠ€νŠΈ**: "Draft" + "프리뷰" κΈ°λŠ₯ ν™œμš©
1329
+ - **μž₯λ₯΄ νŠΉν™”**: μž₯λ₯΄ 프리셋 선택 ν›„ "슀마트 ν–₯상" 체크
1330
+ - **가사 ꡬ쑰**: [verse], [chorus], [bridge] νƒœκ·Έ 적극 ν™œμš©
1331
+ """)
1332
+
1333
+ with gr.Tab("🎡 Enhanced Text2Music"):
1334
  create_text2music_ui(
1335
  gr=gr,
1336
  text2music_process_func=text2music_process_func,
 
1345
  demo.launch(
1346
  server_name="0.0.0.0",
1347
  server_port=7860,
1348
+ share=True # 곡유 링크 생성
1349
+ )