ACE-Singer / ui /components.py
ginipick's picture
Create components.py
8973713 verified
raw
history blame
64.7 kB
"""
ACE-Step: A Step Towards Music Generation Foundation Model
https://github.com/ace-step/ACE-Step
Apache 2.0 License
"""
import gradio as gr
import librosa
import os
import random
import hashlib
import numpy as np
import json
from typing import Dict, List, Tuple, Optional
from openai import OpenAI
# OpenAI ν΄λΌμ΄μ–ΈνŠΈ μ΄ˆκΈ°ν™”
try:
client = OpenAI(api_key=os.getenv("LLM_API"))
except:
client = None
TAG_DEFAULT = "funk, pop, soul, rock, melodic, guitar, drums, bass, keyboard, percussion, 105 BPM, energetic, upbeat, groovy, vibrant, dynamic, duet, male and female vocals"
LYRIC_DEFAULT = """[verse - male]
Neon lights they flicker bright
City hums in dead of night
Rhythms pulse through concrete veins
Lost in echoes of refrains
[verse - female]
Bassline groovin' in my chest
Heartbeats match the city's zest
Electric whispers fill the air
Synthesized dreams everywhere
[chorus - duet]
Turn it up and let it flow
Feel the fire let it grow
In this rhythm we belong
Hear the night sing out our song
[verse - male]
Guitar strings they start to weep
Wake the soul from silent sleep
Every note a story told
In this night we're bold and gold
[bridge - female]
Voices blend in harmony
Lost in pure cacophony
Timeless echoes timeless cries
Soulful shouts beneath the skies
[verse - duet]
Keyboard dances on the keys
Melodies on evening breeze
Catch the tune and hold it tight
In this moment we take flight
"""
# ν™•μž₯된 μž₯λ₯΄ 프리셋 (κΈ°μ‘΄ + κ°œμ„ λœ νƒœκ·Έ)
GENRE_PRESETS = {
"Modern Pop": "pop, synth, drums, guitar, 120 bpm, upbeat, catchy, vibrant, duet vocals, polished vocals, radio-ready, commercial, layered vocals",
"Rock": "rock, electric guitar, drums, bass, 130 bpm, energetic, rebellious, gritty, powerful vocals, raw vocals, power chords, driving rhythm",
"Hip Hop": "hip hop, 808 bass, hi-hats, synth, 90 bpm, bold, urban, intense, rhythmic vocals, trap beats, punchy drums",
"Country": "country, acoustic guitar, steel guitar, fiddle, 100 bpm, heartfelt, rustic, warm, twangy vocals, storytelling, americana",
"EDM": "edm, synth, bass, kick drum, 128 bpm, euphoric, pulsating, energetic, instrumental, progressive build, festival anthem, electronic",
"Reggae": "reggae, guitar, bass, drums, 80 bpm, chill, soulful, positive, smooth vocals, offbeat rhythm, island vibes",
"Classical": "classical, orchestral, strings, piano, 60 bpm, elegant, emotive, timeless, instrumental, dynamic range, sophisticated harmony",
"Jazz": "jazz, saxophone, piano, double bass, 110 bpm, smooth, improvisational, soulful, crooning vocals, swing feel, sophisticated",
"Metal": "metal, electric guitar, double kick drum, bass, 160 bpm, aggressive, intense, heavy, powerful vocals, distorted, powerful",
"R&B": "r&b, synth, bass, drums, 85 bpm, sultry, groovy, romantic, silky vocals, smooth production, neo-soul",
"K-Pop": "k-pop, synth, bass, drums, 128 bpm, catchy, energetic, polished, mixed vocals, electronic elements, danceable",
"Ballad": "ballad, piano, strings, acoustic guitar, 70 bpm, emotional, heartfelt, romantic, expressive vocals, orchestral arrangement"
}
# 곑 μŠ€νƒ€μΌ μ˜΅μ…˜
SONG_STYLES = {
"λ“€μ—£ (남녀 ν˜Όμ„±)": "duet, male and female vocals, harmonious, call and response",
"μ†”λ‘œ (남성)": "solo, male vocals, powerful voice",
"μ†”λ‘œ (μ—¬μ„±)": "solo, female vocals, emotional voice",
"κ·Έλ£Ή (ν˜Όμ„±)": "group vocals, mixed gender, layered harmonies",
"ν•©μ°½": "choir, multiple voices, choral arrangement",
"랩/νž™ν•©": "rap vocals, rhythmic flow, urban style",
"μΈμŠ€νŠΈλ£¨λ©˜νƒˆ": "instrumental, no vocals"
}
# AI μž‘μ‚¬ μ‹œμŠ€ν…œ ν”„λ‘¬ν”„νŠΈ
LYRIC_SYSTEM_PROMPT = """λ„ˆλŠ” λ…Έλž˜ 가사λ₯Ό μž‘μ‚¬ν•˜λŠ” μ „λ¬Έκ°€ 역할이닀. μ΄μš©μžκ°€ μž…λ ₯ν•˜λŠ” μ£Όμ œμ™€ μŠ€νƒ€μΌμ— 따라 κ΄€λ ¨λœ λ…Έλž˜ 가사λ₯Ό μž‘μ„±ν•˜λΌ.
가사 μž‘μ„± κ·œμΉ™:
1. ꡬ쑰 νƒœκ·ΈλŠ” λ°˜λ“œμ‹œ "[ ]"둜 κ΅¬λΆ„ν•œλ‹€
2. μ‚¬μš© κ°€λŠ₯ν•œ ꡬ쑰 νƒœκ·Έ: [verse], [chorus], [bridge], [intro], [outro], [pre-chorus]
3. 듀엣인 경우 [verse - male], [verse - female], [chorus - duet] ν˜•μ‹μœΌλ‘œ 파트λ₯Ό λͺ…μ‹œν•œλ‹€
4. μž…λ ₯ 언어와 λ™μΌν•œ μ–Έμ–΄λ‘œ 가사λ₯Ό μž‘μ„±ν•œλ‹€
5. 각 κ΅¬μ‘°λŠ” 4-8쀄 μ •λ„λ‘œ μž‘μ„±ν•œλ‹€
6. μŒμ•… μž₯λ₯΄μ™€ λΆ„μœ„κΈ°μ— λ§žλŠ” 가사λ₯Ό μž‘μ„±ν•œλ‹€
μ˜ˆμ‹œ ν˜•μ‹:
[verse - male]
첫 번째 ꡬ절 가사
두 번째 ꡬ절 가사
...
[chorus - duet]
후렴ꡬ 가사
...
"""
def generate_lyrics_with_ai(prompt: str, genre: str, song_style: str, language: str = "auto") -> str:
"""AIλ₯Ό μ‚¬μš©ν•˜μ—¬ 가사 생성"""
if not client:
return LYRIC_DEFAULT
try:
# μ–Έμ–΄ 감지 및 μŠ€νƒ€μΌ 정보 μΆ”κ°€
style_info = ""
if "λ“€μ—£" in song_style:
style_info = "남녀 λ“€μ—£ ν˜•μ‹μœΌλ‘œ 파트λ₯Ό λ‚˜λˆ„μ–΄ μž‘μ„±ν•΄μ£Όμ„Έμš”."
elif "μ†”λ‘œ (남성)" in song_style:
style_info = "남성 μ†”λ‘œ κ°€μˆ˜λ₯Ό μœ„ν•œ 가사λ₯Ό μž‘μ„±ν•΄μ£Όμ„Έμš”."
elif "μ†”λ‘œ (μ—¬μ„±)" in song_style:
style_info = "μ—¬μ„± μ†”λ‘œ κ°€μˆ˜λ₯Ό μœ„ν•œ 가사λ₯Ό μž‘μ„±ν•΄μ£Όμ„Έμš”."
elif "κ·Έλ£Ή" in song_style:
style_info = "그룹이 λΆ€λ₯΄λŠ” ν˜•μ‹μœΌλ‘œ 파트λ₯Ό λ‚˜λˆ„μ–΄ μž‘μ„±ν•΄μ£Όμ„Έμš”."
user_prompt = f"""
주제: {prompt}
μž₯λ₯΄: {genre}
μŠ€νƒ€μΌ: {style_info}
μœ„ 정보λ₯Ό λ°”νƒ•μœΌλ‘œ λ…Έλž˜ 가사λ₯Ό μž‘μ„±ν•΄μ£Όμ„Έμš”. μž…λ ₯된 언어와 λ™μΌν•œ μ–Έμ–΄λ‘œ μž‘μ„±ν•˜κ³ , ꡬ쑰 νƒœκ·Έλ₯Ό λ°˜λ“œμ‹œ ν¬ν•¨ν•΄μ£Όμ„Έμš”.
"""
response = client.chat.completions.create(
model="gpt-4-mini",
messages=[
{"role": "system", "content": LYRIC_SYSTEM_PROMPT},
{"role": "user", "content": user_prompt}
],
temperature=0.8,
max_tokens=1000
)
return response.choices[0].message.content
except Exception as e:
print(f"AI 가사 생성 였λ₯˜: {e}")
return LYRIC_DEFAULT
# ν’ˆμ§ˆ 프리셋 μ‹œμŠ€ν…œ μΆ”κ°€
QUALITY_PRESETS = {
"Draft (Fast)": {
"infer_step": 50,
"guidance_scale": 10.0,
"scheduler_type": "euler",
"omega_scale": 5.0,
"use_erg_diffusion": False,
"use_erg_tag": True,
"description": "λΉ λ₯Έ μ΄ˆμ•ˆ 생성 (1-2λΆ„)"
},
"Standard": {
"infer_step": 150,
"guidance_scale": 15.0,
"scheduler_type": "euler",
"omega_scale": 10.0,
"use_erg_diffusion": True,
"use_erg_tag": True,
"description": "ν‘œμ€€ ν’ˆμ§ˆ (3-5λΆ„)"
},
"High Quality": {
"infer_step": 200,
"guidance_scale": 18.0,
"scheduler_type": "heun",
"omega_scale": 15.0,
"use_erg_diffusion": True,
"use_erg_tag": True,
"description": "κ³ ν’ˆμ§ˆ 생성 (8-12λΆ„)"
},
"Ultra (Best)": {
"infer_step": 299,
"guidance_scale": 20.0,
"scheduler_type": "heun",
"omega_scale": 20.0,
"use_erg_diffusion": True,
"use_erg_tag": True,
"description": "졜고 ν’ˆμ§ˆ (15-20λΆ„)"
}
}
# 닀쀑 μ‹œλ“œ 생성 μ„€μ •
MULTI_SEED_OPTIONS = {
"Single": 1,
"Best of 3": 3,
"Best of 5": 5,
"Best of 10": 10
}
class MusicGenerationCache:
"""생성 κ²°κ³Ό 캐싱 μ‹œμŠ€ν…œ"""
def __init__(self):
self.cache = {}
self.max_cache_size = 50
def get_cache_key(self, params):
# μ€‘μš”ν•œ νŒŒλΌλ―Έν„°λ§ŒμœΌλ‘œ ν•΄μ‹œ 생성
key_params = {k: v for k, v in params.items()
if k in ['prompt', 'lyrics', 'infer_step', 'guidance_scale', 'audio_duration']}
return hashlib.md5(str(sorted(key_params.items())).encode()).hexdigest()[:16]
def get_cached_result(self, params):
key = self.get_cache_key(params)
return self.cache.get(key)
def cache_result(self, params, result):
if len(self.cache) >= self.max_cache_size:
oldest_key = next(iter(self.cache))
del self.cache[oldest_key]
key = self.get_cache_key(params)
self.cache[key] = result
# μ „μ—­ μΊμ‹œ μΈμŠ€ν„΄μŠ€
generation_cache = MusicGenerationCache()
def enhance_prompt_with_genre(base_prompt: str, genre: str, song_style: str) -> str:
"""μž₯λ₯΄μ™€ μŠ€νƒ€μΌμ— λ”°λ₯Έ 슀마트 ν”„λ‘¬ν”„νŠΈ ν™•μž₯"""
if genre == "Custom" or not genre:
enhanced_prompt = base_prompt
else:
# μž₯λ₯΄λ³„ μΆ”κ°€ κ°œμ„  νƒœκ·Έ
genre_enhancements = {
"Modern Pop": ["polished production", "mainstream appeal", "hook-driven"],
"Rock": ["guitar-driven", "powerful drums", "energetic performance"],
"Hip Hop": ["rhythmic flow", "urban atmosphere", "bass-heavy"],
"Country": ["acoustic warmth", "storytelling melody", "authentic feel"],
"EDM": ["electronic atmosphere", "build-ups", "dance-friendly"],
"Reggae": ["laid-back groove", "tropical vibes", "rhythmic guitar"],
"Classical": ["orchestral depth", "musical sophistication", "timeless beauty"],
"Jazz": ["musical complexity", "improvisational spirit", "sophisticated harmony"],
"Metal": ["aggressive energy", "powerful sound", "intense atmosphere"],
"R&B": ["smooth groove", "soulful expression", "rhythmic sophistication"],
"K-Pop": ["catchy hooks", "dynamic arrangement", "polished production"],
"Ballad": ["emotional depth", "slow tempo", "heartfelt delivery"]
}
if genre in genre_enhancements:
additional_tags = ", ".join(genre_enhancements[genre])
enhanced_prompt = f"{base_prompt}, {additional_tags}"
else:
enhanced_prompt = base_prompt
# μŠ€νƒ€μΌ νƒœκ·Έ μΆ”κ°€
if song_style in SONG_STYLES:
style_tags = SONG_STYLES[song_style]
enhanced_prompt = f"{enhanced_prompt}, {style_tags}"
return enhanced_prompt
def calculate_quality_score(audio_path: str) -> float:
"""κ°„λ‹¨ν•œ ν’ˆμ§ˆ 점수 계산 (μ‹€μ œ κ΅¬ν˜„μ—μ„œλŠ” 더 λ³΅μž‘ν•œ λ©”νŠΈλ¦­ μ‚¬μš©)"""
try:
y, sr = librosa.load(audio_path)
# κΈ°λ³Έ ν’ˆμ§ˆ λ©”νŠΈλ¦­
rms_energy = np.sqrt(np.mean(y**2))
spectral_centroid = np.mean(librosa.feature.spectral_centroid(y=y, sr=sr))
zero_crossing_rate = np.mean(librosa.feature.zero_crossing_rate(y))
# μ •κ·œν™”λœ 점수 (0-100)
energy_score = min(rms_energy * 1000, 40) # 0-40점
spectral_score = min(spectral_centroid / 100, 40) # 0-40점
clarity_score = min((1 - zero_crossing_rate) * 20, 20) # 0-20점
total_score = energy_score + spectral_score + clarity_score
return round(total_score, 1)
except:
return 50.0 # κΈ°λ³Έκ°’
def update_tags_from_preset(preset_name):
if preset_name == "Custom":
return ""
return GENRE_PRESETS.get(preset_name, "")
def update_quality_preset(preset_name):
"""ν’ˆμ§ˆ 프리셋 적용"""
if preset_name not in QUALITY_PRESETS:
return (100, 15.0, "euler", 10.0, True, True)
preset = QUALITY_PRESETS[preset_name]
return (
preset.get("infer_step", 100),
preset.get("guidance_scale", 15.0),
preset.get("scheduler_type", "euler"),
preset.get("omega_scale", 10.0),
preset.get("use_erg_diffusion", True),
preset.get("use_erg_tag", True)
)
def create_enhanced_process_func(original_func):
"""κΈ°μ‘΄ ν•¨μˆ˜λ₯Ό ν–₯μƒλœ κΈ°λŠ₯으둜 λž˜ν•‘"""
def enhanced_func(
audio_duration, prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric,
audio2audio_enable=False, ref_audio_strength=0.5, ref_audio_input=None,
lora_name_or_path="none", multi_seed_mode="Single",
enable_smart_enhancement=True, genre_preset="Custom", song_style="λ“€μ—£ (남녀 ν˜Όμ„±)", **kwargs
):
# 슀마트 ν”„λ‘¬ν”„νŠΈ ν™•μž₯
if enable_smart_enhancement:
prompt = enhance_prompt_with_genre(prompt, genre_preset, song_style)
# μΊμ‹œ 확인
cache_params = {
'prompt': prompt, 'lyrics': lyrics, 'audio_duration': audio_duration,
'infer_step': infer_step, 'guidance_scale': guidance_scale
}
cached_result = generation_cache.get_cached_result(cache_params)
if cached_result:
return cached_result
# 닀쀑 μ‹œλ“œ 생성
num_candidates = MULTI_SEED_OPTIONS.get(multi_seed_mode, 1)
if num_candidates == 1:
# κΈ°μ‘΄ ν•¨μˆ˜ 호좜
result = original_func(
audio_duration, prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs
)
else:
# 닀쀑 μ‹œλ“œ 생성 및 졜적 선택
candidates = []
for i in range(num_candidates):
seed = random.randint(1, 10000)
try:
result = original_func(
audio_duration, prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, str(seed),
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs
)
if result and len(result) > 0:
audio_path = result[0] # 첫 번째 κ²°κ³Όκ°€ μ˜€λ””μ˜€ 파일 경둜
if audio_path and os.path.exists(audio_path):
quality_score = calculate_quality_score(audio_path)
candidates.append({
"result": result,
"quality_score": quality_score,
"seed": seed
})
except Exception as e:
print(f"Generation {i+1} failed: {e}")
continue
if candidates:
# 졜고 ν’ˆμ§ˆ 선택
best_candidate = max(candidates, key=lambda x: x["quality_score"])
result = best_candidate["result"]
# ν’ˆμ§ˆ 정보 μΆ”κ°€
if len(result) > 1 and isinstance(result[1], dict):
result[1]["quality_score"] = best_candidate["quality_score"]
result[1]["selected_seed"] = best_candidate["seed"]
result[1]["candidates_count"] = len(candidates)
else:
# λͺ¨λ“  생성 μ‹€νŒ¨μ‹œ κΈ°λ³Έ 생성
result = original_func(
audio_duration, prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs
)
# κ²°κ³Ό μΊμ‹œ
generation_cache.cache_result(cache_params, result)
return result
return enhanced_func
def create_output_ui(task_name="Text2Music"):
# For many consumer-grade GPU devices, only one batch can be run
output_audio1 = gr.Audio(type="filepath", label=f"{task_name} Generated Audio 1")
with gr.Accordion(f"{task_name} Parameters & Quality Info", open=False):
input_params_json = gr.JSON(label=f"{task_name} Parameters")
# ν’ˆμ§ˆ 정보 ν‘œμ‹œ μΆ”κ°€
with gr.Row():
quality_score = gr.Number(label="Quality Score (0-100)", value=0, interactive=False)
generation_info = gr.Textbox(
label="Generation Info",
value="",
interactive=False,
max_lines=2
)
outputs = [output_audio1]
return outputs, input_params_json
def dump_func(*args):
print(args)
return []
def create_text2music_ui(
gr,
text2music_process_func,
sample_data_func=None,
load_data_func=None,
):
# ν–₯μƒλœ ν”„λ‘œμ„ΈμŠ€ ν•¨μˆ˜ 생성
enhanced_process_func = create_enhanced_process_func(text2music_process_func)
with gr.Row():
with gr.Column():
# ν’ˆμ§ˆ 및 μ„±λŠ₯ μ„€μ • μ„Ήμ…˜ μΆ”κ°€
with gr.Group():
gr.Markdown("### ⚑ ν’ˆμ§ˆ & μ„±λŠ₯ μ„€μ •")
with gr.Row():
quality_preset = gr.Dropdown(
choices=list(QUALITY_PRESETS.keys()),
value="Standard",
label="ν’ˆμ§ˆ 프리셋",
scale=2
)
multi_seed_mode = gr.Dropdown(
choices=list(MULTI_SEED_OPTIONS.keys()),
value="Single",
label="닀쀑 생성 λͺ¨λ“œ",
scale=2,
info="μ—¬λŸ¬ 번 μƒμ„±ν•˜μ—¬ 졜고 ν’ˆμ§ˆ 선택"
)
preset_description = gr.Textbox(
value=QUALITY_PRESETS["Standard"]["description"],
label="μ„€λͺ…",
interactive=False,
max_lines=1
)
with gr.Row(equal_height=True):
# add markdown, tags and lyrics examples are from ai music generation community
audio_duration = gr.Slider(
-1,
240.0,
step=0.00001,
value=-1,
label="Audio Duration",
interactive=True,
info="-1 means random duration (30 ~ 240).",
scale=7,
)
random_bnt = gr.Button("🎲 Random", variant="secondary", scale=1)
preview_bnt = gr.Button("🎡 Preview", variant="secondary", scale=2)
# audio2audio
with gr.Row(equal_height=True):
audio2audio_enable = gr.Checkbox(
label="Enable Audio2Audio",
value=False,
info="Check to enable Audio-to-Audio generation using a reference audio.",
elem_id="audio2audio_checkbox"
)
lora_name_or_path = gr.Dropdown(
label="Lora Name or Path",
choices=["ACE-Step/ACE-Step-v1-chinese-rap-LoRA", "none"],
value="none",
allow_custom_value=True,
)
ref_audio_input = gr.Audio(
type="filepath",
label="Reference Audio (for Audio2Audio)",
visible=False,
elem_id="ref_audio_input",
show_download_button=True
)
ref_audio_strength = gr.Slider(
label="Refer audio strength",
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.5,
elem_id="ref_audio_strength",
visible=False,
interactive=True,
)
def toggle_ref_audio_visibility(is_checked):
return (
gr.update(visible=is_checked, elem_id="ref_audio_input"),
gr.update(visible=is_checked, elem_id="ref_audio_strength"),
)
audio2audio_enable.change(
fn=toggle_ref_audio_visibility,
inputs=[audio2audio_enable],
outputs=[ref_audio_input, ref_audio_strength],
)
with gr.Column(scale=2):
with gr.Group():
gr.Markdown("""### 🎼 슀마트 ν”„λ‘¬ν”„νŠΈ μ‹œμŠ€ν…œ
<center>μž₯λ₯΄μ™€ μŠ€νƒ€μΌμ„ μ„ νƒν•˜λ©΄ μžλ™μœΌλ‘œ μ΅œμ ν™”λœ νƒœκ·Έκ°€ μΆ”κ°€λ©λ‹ˆλ‹€.</center>""")
with gr.Row():
genre_preset = gr.Dropdown(
choices=["Custom"] + list(GENRE_PRESETS.keys()),
value="Custom",
label="μž₯λ₯΄ 프리셋",
scale=1,
)
song_style = gr.Dropdown(
choices=list(SONG_STYLES.keys()),
value="λ“€μ—£ (남녀 ν˜Όμ„±)",
label="곑 μŠ€νƒ€μΌ",
scale=1,
)
enable_smart_enhancement = gr.Checkbox(
label="슀마트 ν–₯상",
value=True,
info="μžλ™ νƒœκ·Έ μ΅œμ ν™”",
scale=1
)
prompt = gr.Textbox(
lines=2,
label="Tags",
max_lines=4,
value=TAG_DEFAULT,
placeholder="콀마둜 κ΅¬λΆ„λœ νƒœκ·Έλ“€...",
)
with gr.Group():
gr.Markdown("""### πŸ“ AI μž‘μ‚¬ μ‹œμŠ€ν…œ
<center>주제λ₯Ό μž…λ ₯ν•˜κ³  'AI μž‘μ‚¬' λ²„νŠΌμ„ ν΄λ¦­ν•˜λ©΄ μžλ™μœΌλ‘œ 가사가 μƒμ„±λ©λ‹ˆλ‹€.</center>""")
with gr.Row():
lyric_prompt = gr.Textbox(
label="μž‘μ‚¬ 주제",
placeholder="예: μ²«μ‚¬λž‘μ˜ μ„€λ ˜, μ΄λ³„μ˜ μ•„ν””, 희망찬 내일...",
scale=3
)
generate_lyrics_btn = gr.Button("πŸ€– AI μž‘μ‚¬", variant="secondary", scale=1)
lyrics = gr.Textbox(
lines=9,
label="Lyrics",
max_lines=13,
value=LYRIC_DEFAULT,
placeholder="가사λ₯Ό μž…λ ₯ν•˜μ„Έμš”. [verse], [chorus] λ“±μ˜ ꡬ쑰 νƒœκ·Έ μ‚¬μš©μ„ ꢌμž₯ν•©λ‹ˆλ‹€."
)
with gr.Accordion("Basic Settings", open=False):
infer_step = gr.Slider(
minimum=1,
maximum=300,
step=1,
value=150,
label="Infer Steps",
interactive=True,
)
guidance_scale = gr.Slider(
minimum=0.0,
maximum=30.0,
step=0.1,
value=15.0,
label="Guidance Scale",
interactive=True,
info="When guidance_scale_lyric > 1 and guidance_scale_text > 1, the guidance scale will not be applied.",
)
guidance_scale_text = gr.Slider(
minimum=0.0,
maximum=10.0,
step=0.1,
value=0.0,
label="Guidance Scale Text",
interactive=True,
info="Guidance scale for text condition. It can only apply to cfg. set guidance_scale_text=5.0, guidance_scale_lyric=1.5 for start",
)
guidance_scale_lyric = gr.Slider(
minimum=0.0,
maximum=10.0,
step=0.1,
value=0.0,
label="Guidance Scale Lyric",
interactive=True,
)
manual_seeds = gr.Textbox(
label="manual seeds (default None)",
placeholder="1,2,3,4",
value=None,
info="Seed for the generation",
)
with gr.Accordion("Advanced Settings", open=False):
scheduler_type = gr.Radio(
["euler", "heun"],
value="euler",
label="Scheduler Type",
elem_id="scheduler_type",
info="Scheduler type for the generation. euler is recommended. heun will take more time.",
)
cfg_type = gr.Radio(
["cfg", "apg", "cfg_star"],
value="apg",
label="CFG Type",
elem_id="cfg_type",
info="CFG type for the generation. apg is recommended. cfg and cfg_star are almost the same.",
)
use_erg_tag = gr.Checkbox(
label="use ERG for tag",
value=True,
info="Use Entropy Rectifying Guidance for tag. It will multiple a temperature to the attention to make a weaker tag condition and make better diversity.",
)
use_erg_lyric = gr.Checkbox(
label="use ERG for lyric",
value=False,
info="The same but apply to lyric encoder's attention.",
)
use_erg_diffusion = gr.Checkbox(
label="use ERG for diffusion",
value=True,
info="The same but apply to diffusion model's attention.",
)
omega_scale = gr.Slider(
minimum=-100.0,
maximum=100.0,
step=0.1,
value=10.0,
label="Granularity Scale",
interactive=True,
info="Granularity scale for the generation. Higher values can reduce artifacts",
)
guidance_interval = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.5,
label="Guidance Interval",
interactive=True,
info="Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)",
)
guidance_interval_decay = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.0,
label="Guidance Interval Decay",
interactive=True,
info="Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.",
)
min_guidance_scale = gr.Slider(
minimum=0.0,
maximum=200.0,
step=0.1,
value=3.0,
label="Min Guidance Scale",
interactive=True,
info="Min guidance scale for guidance interval decay's end scale",
)
oss_steps = gr.Textbox(
label="OSS Steps",
placeholder="16, 29, 52, 96, 129, 158, 172, 183, 189, 200",
value=None,
info="Optimal Steps for the generation. But not test well",
)
text2music_bnt = gr.Button("🎡 Generate Music", variant="primary", size="lg")
# AI μž‘μ‚¬ λ²„νŠΌ 이벀트
def generate_ai_lyrics(lyric_prompt, genre_preset, song_style):
if not lyric_prompt:
return "μž‘μ‚¬ 주제λ₯Ό μž…λ ₯ν•΄μ£Όμ„Έμš”."
return generate_lyrics_with_ai(lyric_prompt, genre_preset, song_style)
generate_lyrics_btn.click(
fn=generate_ai_lyrics,
inputs=[lyric_prompt, genre_preset, song_style],
outputs=[lyrics]
)
# 랜덀 데이터 생성 ν•¨μˆ˜
def generate_random_music_data(genre_preset, song_style):
# 랜덀 μž₯λ₯΄ 선택
if genre_preset == "Custom":
genre = random.choice(list(GENRE_PRESETS.keys()))
else:
genre = genre_preset
# 랜덀 주제 리슀트
themes = [
"λ„μ‹œμ˜ λ°€", "μ²«μ‚¬λž‘μ˜ μΆ”μ–΅", "μ—¬λ¦„λ‚ μ˜ ν•΄λ³€", "κ°€μ„μ˜ μ •μ·¨",
"희망찬 내일", "자유둜운 영혼", "별빛 μ•„λž˜ μΆ€", "청좘의 μ—΄μ •",
"λΉ„ μ˜€λŠ” λ‚ μ˜ 감성", "κΏˆμ„ ν–₯ν•œ 도전", "이별 ν›„μ˜ μ„±μž₯", "μƒˆλ‘œμš΄ μ‹œμž‘"
]
# 랜덀 μ„€μ •
duration = random.choice([30, 60, 90, 120, 180])
theme = random.choice(themes)
# AI둜 가사 생성
lyrics = generate_lyrics_with_ai(theme, genre, song_style)
# νƒœκ·Έ 생성
tags = GENRE_PRESETS.get(genre, "")
if song_style in SONG_STYLES:
tags = f"{tags}, {SONG_STYLES[song_style]}"
# 랜덀 νŒŒλΌλ―Έν„° μ„€μ •
return (
duration, # audio_duration
tags, # prompt
lyrics, # lyrics
150, # infer_step
15.0, # guidance_scale
"euler", # scheduler_type
"apg", # cfg_type
10.0, # omega_scale
str(random.randint(1, 10000)), # manual_seeds
0.5, # guidance_interval
0.0, # guidance_interval_decay
3.0, # min_guidance_scale
True, # use_erg_tag
False, # use_erg_lyric
True, # use_erg_diffusion
None, # oss_steps
0.0, # guidance_scale_text
0.0, # guidance_scale_lyric
False, # audio2audio_enable
0.5, # ref_audio_strength
None, # ref_audio_input
)
# λͺ¨λ“  UI μš”μ†Œκ°€ μ •μ˜λœ ν›„ 이벀트 ν•Έλ“€λŸ¬ μ„€μ •
genre_preset.change(
fn=update_tags_from_preset,
inputs=[genre_preset],
outputs=[prompt]
)
quality_preset.change(
fn=lambda x: QUALITY_PRESETS.get(x, {}).get("description", ""),
inputs=[quality_preset],
outputs=[preset_description]
)
quality_preset.change(
fn=update_quality_preset,
inputs=[quality_preset],
outputs=[infer_step, guidance_scale, scheduler_type, omega_scale, use_erg_diffusion, use_erg_tag]
)
with gr.Column():
outputs, input_params_json = create_output_ui()
# μ‹€μ‹œκ°„ 프리뷰 κΈ°λŠ₯
def generate_preview(prompt, lyrics, genre_preset, song_style):
"""10초 프리뷰 생성"""
preview_params = {
"audio_duration": 10,
"infer_step": 50,
"guidance_scale": 12.0,
"scheduler_type": "euler",
"cfg_type": "apg",
"omega_scale": 5.0,
}
enhanced_prompt = enhance_prompt_with_genre(prompt, genre_preset, song_style)
try:
# μ‹€μ œ κ΅¬ν˜„μ—μ„œλŠ” λΉ λ₯Έ 생성 λͺ¨λ“œ μ‚¬μš©
result = enhanced_process_func(
preview_params["audio_duration"],
enhanced_prompt,
lyrics[:200], # 가사 μΌλΆ€λ§Œ μ‚¬μš©
preview_params["infer_step"],
preview_params["guidance_scale"],
preview_params["scheduler_type"],
preview_params["cfg_type"],
preview_params["omega_scale"],
None, # manual_seeds
0.5, # guidance_interval
0.0, # guidance_interval_decay
3.0, # min_guidance_scale
True, # use_erg_tag
False, # use_erg_lyric
True, # use_erg_diffusion
None, # oss_steps
0.0, # guidance_scale_text
0.0, # guidance_scale_lyric
multi_seed_mode="Single",
song_style=song_style
)
return result[0] if result else None
except Exception as e:
return f"프리뷰 생성 μ‹€νŒ¨: {str(e)}"
preview_bnt.click(
fn=generate_preview,
inputs=[prompt, lyrics, genre_preset, song_style],
outputs=[outputs[0]]
)
with gr.Tab("retake"):
retake_variance = gr.Slider(
minimum=0.0, maximum=1.0, step=0.01, value=0.2, label="variance"
)
retake_seeds = gr.Textbox(
label="retake seeds (default None)", placeholder="", value=None
)
retake_bnt = gr.Button("Retake", variant="primary")
retake_outputs, retake_input_params_json = create_output_ui("Retake")
def retake_process_func(json_data, retake_variance, retake_seeds):
return enhanced_process_func(
json_data.get("audio_duration", 30),
json_data.get("prompt", ""),
json_data.get("lyrics", ""),
json_data.get("infer_step", 100),
json_data.get("guidance_scale", 15.0),
json_data.get("scheduler_type", "euler"),
json_data.get("cfg_type", "apg"),
json_data.get("omega_scale", 10.0),
retake_seeds,
json_data.get("guidance_interval", 0.5),
json_data.get("guidance_interval_decay", 0.0),
json_data.get("min_guidance_scale", 3.0),
json_data.get("use_erg_tag", True),
json_data.get("use_erg_lyric", False),
json_data.get("use_erg_diffusion", True),
json_data.get("oss_steps", None),
json_data.get("guidance_scale_text", 0.0),
json_data.get("guidance_scale_lyric", 0.0),
audio2audio_enable=json_data.get("audio2audio_enable", False),
ref_audio_strength=json_data.get("ref_audio_strength", 0.5),
ref_audio_input=json_data.get("ref_audio_input", None),
lora_name_or_path=json_data.get("lora_name_or_path", "none"),
multi_seed_mode="Best of 3", # retakeλŠ” μžλ™μœΌλ‘œ 닀쀑 생성
retake_variance=retake_variance,
task="retake"
)
retake_bnt.click(
fn=retake_process_func,
inputs=[
input_params_json,
retake_variance,
retake_seeds,
],
outputs=retake_outputs + [retake_input_params_json],
)
with gr.Tab("repainting"):
retake_variance = gr.Slider(
minimum=0.0, maximum=1.0, step=0.01, value=0.2, label="variance"
)
retake_seeds = gr.Textbox(
label="repaint seeds (default None)", placeholder="", value=None
)
repaint_start = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=0.0,
label="Repaint Start Time",
interactive=True,
)
repaint_end = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=30.0,
label="Repaint End Time",
interactive=True,
)
repaint_source = gr.Radio(
["text2music", "last_repaint", "upload"],
value="text2music",
label="Repaint Source",
elem_id="repaint_source",
)
repaint_source_audio_upload = gr.Audio(
label="Upload Audio",
type="filepath",
visible=False,
elem_id="repaint_source_audio_upload",
show_download_button=True,
)
repaint_source.change(
fn=lambda x: gr.update(
visible=x == "upload", elem_id="repaint_source_audio_upload"
),
inputs=[repaint_source],
outputs=[repaint_source_audio_upload],
)
repaint_bnt = gr.Button("Repaint", variant="primary")
repaint_outputs, repaint_input_params_json = create_output_ui("Repaint")
def repaint_process_func(
text2music_json_data,
repaint_json_data,
retake_variance,
retake_seeds,
repaint_start,
repaint_end,
repaint_source,
repaint_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
):
if repaint_source == "upload":
src_audio_path = repaint_source_audio_upload
audio_duration = librosa.get_duration(filename=src_audio_path)
json_data = {"audio_duration": audio_duration}
elif repaint_source == "text2music":
json_data = text2music_json_data
src_audio_path = json_data["audio_path"]
elif repaint_source == "last_repaint":
json_data = repaint_json_data
src_audio_path = json_data["audio_path"]
return enhanced_process_func(
json_data["audio_duration"],
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds=retake_seeds,
retake_variance=retake_variance,
task="repaint",
repaint_start=repaint_start,
repaint_end=repaint_end,
src_audio_path=src_audio_path,
lora_name_or_path="none"
)
repaint_bnt.click(
fn=repaint_process_func,
inputs=[
input_params_json,
repaint_input_params_json,
retake_variance,
retake_seeds,
repaint_start,
repaint_end,
repaint_source,
repaint_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
],
outputs=repaint_outputs + [repaint_input_params_json],
)
with gr.Tab("edit"):
edit_prompt = gr.Textbox(lines=2, label="Edit Tags", max_lines=4)
edit_lyrics = gr.Textbox(lines=9, label="Edit Lyrics", max_lines=13)
retake_seeds = gr.Textbox(
label="edit seeds (default None)", placeholder="", value=None
)
edit_type = gr.Radio(
["only_lyrics", "remix"],
value="only_lyrics",
label="Edit Type",
elem_id="edit_type",
info="`only_lyrics` will keep the whole song the same except lyrics difference. Make your diffrence smaller, e.g. one lyrc line change.\nremix can change the song melody and genre",
)
edit_n_min = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.6,
label="edit_n_min",
interactive=True,
)
edit_n_max = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=1.0,
label="edit_n_max",
interactive=True,
)
def edit_type_change_func(edit_type):
if edit_type == "only_lyrics":
n_min = 0.6
n_max = 1.0
elif edit_type == "remix":
n_min = 0.2
n_max = 0.4
return n_min, n_max
edit_type.change(
edit_type_change_func,
inputs=[edit_type],
outputs=[edit_n_min, edit_n_max],
)
edit_source = gr.Radio(
["text2music", "last_edit", "upload"],
value="text2music",
label="Edit Source",
elem_id="edit_source",
)
edit_source_audio_upload = gr.Audio(
label="Upload Audio",
type="filepath",
visible=False,
elem_id="edit_source_audio_upload",
show_download_button=True,
)
edit_source.change(
fn=lambda x: gr.update(
visible=x == "upload", elem_id="edit_source_audio_upload"
),
inputs=[edit_source],
outputs=[edit_source_audio_upload],
)
edit_bnt = gr.Button("Edit", variant="primary")
edit_outputs, edit_input_params_json = create_output_ui("Edit")
def edit_process_func(
text2music_json_data,
edit_input_params_json,
edit_source,
edit_source_audio_upload,
prompt,
lyrics,
edit_prompt,
edit_lyrics,
edit_n_min,
edit_n_max,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds,
):
if edit_source == "upload":
src_audio_path = edit_source_audio_upload
audio_duration = librosa.get_duration(filename=src_audio_path)
json_data = {"audio_duration": audio_duration}
elif edit_source == "text2music":
json_data = text2music_json_data
src_audio_path = json_data["audio_path"]
elif edit_source == "last_edit":
json_data = edit_input_params_json
src_audio_path = json_data["audio_path"]
if not edit_prompt:
edit_prompt = prompt
if not edit_lyrics:
edit_lyrics = lyrics
return enhanced_process_func(
json_data["audio_duration"],
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
task="edit",
src_audio_path=src_audio_path,
edit_target_prompt=edit_prompt,
edit_target_lyrics=edit_lyrics,
edit_n_min=edit_n_min,
edit_n_max=edit_n_max,
retake_seeds=retake_seeds,
lora_name_or_path="none"
)
edit_bnt.click(
fn=edit_process_func,
inputs=[
input_params_json,
edit_input_params_json,
edit_source,
edit_source_audio_upload,
prompt,
lyrics,
edit_prompt,
edit_lyrics,
edit_n_min,
edit_n_max,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds,
],
outputs=edit_outputs + [edit_input_params_json],
)
with gr.Tab("extend"):
extend_seeds = gr.Textbox(
label="extend seeds (default None)", placeholder="", value=None
)
left_extend_length = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=0.0,
label="Left Extend Length",
interactive=True,
)
right_extend_length = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=30.0,
label="Right Extend Length",
interactive=True,
)
extend_source = gr.Radio(
["text2music", "last_extend", "upload"],
value="text2music",
label="Extend Source",
elem_id="extend_source",
)
extend_source_audio_upload = gr.Audio(
label="Upload Audio",
type="filepath",
visible=False,
elem_id="extend_source_audio_upload",
show_download_button=True,
)
extend_source.change(
fn=lambda x: gr.update(
visible=x == "upload", elem_id="extend_source_audio_upload"
),
inputs=[extend_source],
outputs=[extend_source_audio_upload],
)
extend_bnt = gr.Button("Extend", variant="primary")
extend_outputs, extend_input_params_json = create_output_ui("Extend")
def extend_process_func(
text2music_json_data,
extend_input_params_json,
extend_seeds,
left_extend_length,
right_extend_length,
extend_source,
extend_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
):
if extend_source == "upload":
src_audio_path = extend_source_audio_upload
# get audio duration
audio_duration = librosa.get_duration(filename=src_audio_path)
json_data = {"audio_duration": audio_duration}
elif extend_source == "text2music":
json_data = text2music_json_data
src_audio_path = json_data["audio_path"]
elif extend_source == "last_extend":
json_data = extend_input_params_json
src_audio_path = json_data["audio_path"]
repaint_start = -left_extend_length
repaint_end = json_data["audio_duration"] + right_extend_length
return enhanced_process_func(
json_data["audio_duration"],
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds=extend_seeds,
retake_variance=1.0,
task="extend",
repaint_start=repaint_start,
repaint_end=repaint_end,
src_audio_path=src_audio_path,
lora_name_or_path="none"
)
extend_bnt.click(
fn=extend_process_func,
inputs=[
input_params_json,
extend_input_params_json,
extend_seeds,
left_extend_length,
right_extend_length,
extend_source,
extend_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
],
outputs=extend_outputs + [extend_input_params_json],
)
# Random λ²„νŠΌ 이벀트
random_bnt.click(
fn=generate_random_music_data,
inputs=[genre_preset, song_style],
outputs=[
audio_duration,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
audio2audio_enable,
ref_audio_strength,
ref_audio_input,
],
)
# 메인 생성 λ²„νŠΌ 이벀트 (ν–₯μƒλœ ν•¨μˆ˜ μ‚¬μš©)
text2music_bnt.click(
fn=enhanced_process_func,
inputs=[
audio_duration,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
audio2audio_enable,
ref_audio_strength,
ref_audio_input,
lora_name_or_path,
multi_seed_mode,
enable_smart_enhancement,
genre_preset,
song_style
],
outputs=outputs + [input_params_json],
)
def create_main_demo_ui(
text2music_process_func=dump_func,
sample_data_func=dump_func,
load_data_func=dump_func,
):
with gr.Blocks(
title="ACE-Step Model 1.0 DEMO - Enhanced",
theme=gr.themes.Soft(),
css="""
/* κ·ΈλΌλ””μ–ΈνŠΈ λ°°κ²½ */
.gradio-container {
max-width: 1200px !important;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
min-height: 100vh;
}
/* 메인 μ»¨ν…Œμ΄λ„ˆ μŠ€νƒ€μΌ */
.main-container {
background: rgba(255, 255, 255, 0.95);
border-radius: 20px;
padding: 30px;
margin: 20px auto;
box-shadow: 0 20px 40px rgba(0, 0, 0, 0.1);
}
/* 헀더 μŠ€νƒ€μΌ */
.header-title {
background: linear-gradient(45deg, #667eea, #764ba2);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
font-size: 3em;
font-weight: bold;
text-align: center;
margin-bottom: 10px;
}
/* λ²„νŠΌ μŠ€νƒ€μΌ */
.gr-button-primary {
background: linear-gradient(45deg, #667eea, #764ba2) !important;
border: none !important;
color: white !important;
font-weight: bold !important;
transition: all 0.3s ease !important;
}
.gr-button-primary:hover {
transform: translateY(-2px);
box-shadow: 0 10px 20px rgba(102, 126, 234, 0.3);
}
.gr-button-secondary {
background: linear-gradient(45deg, #f093fb, #f5576c) !important;
border: none !important;
color: white !important;
transition: all 0.3s ease !important;
}
/* κ·Έλ£Ή μŠ€νƒ€μΌ */
.gr-group {
background: rgba(255, 255, 255, 0.8) !important;
border: 1px solid rgba(102, 126, 234, 0.2) !important;
border-radius: 15px !important;
padding: 20px !important;
margin: 10px 0 !important;
backdrop-filter: blur(10px) !important;
}
/* νƒ­ μŠ€νƒ€μΌ */
.gr-tab {
background: rgba(255, 255, 255, 0.9) !important;
border-radius: 10px !important;
padding: 15px !important;
}
/* μž…λ ₯ ν•„λ“œ μŠ€νƒ€μΌ */
.gr-textbox, .gr-dropdown, .gr-slider {
border: 2px solid rgba(102, 126, 234, 0.3) !important;
border-radius: 10px !important;
transition: all 0.3s ease !important;
}
.gr-textbox:focus, .gr-dropdown:focus {
border-color: #667eea !important;
box-shadow: 0 0 10px rgba(102, 126, 234, 0.2) !important;
}
/* ν’ˆμ§ˆ 정보 μŠ€νƒ€μΌ */
.quality-info {
background: linear-gradient(135deg, #f093fb20, #f5576c20);
padding: 15px;
border-radius: 10px;
margin: 10px 0;
border: 1px solid rgba(240, 147, 251, 0.3);
}
/* μ• λ‹ˆλ©”μ΄μ…˜ */
@keyframes fadeIn {
from {
opacity: 0;
transform: translateY(20px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
.gr-row, .gr-column {
animation: fadeIn 0.5s ease-out;
}
/* μŠ€ν¬λ‘€λ°” μŠ€νƒ€μΌ */
::-webkit-scrollbar {
width: 10px;
}
::-webkit-scrollbar-track {
background: rgba(255, 255, 255, 0.1);
border-radius: 10px;
}
::-webkit-scrollbar-thumb {
background: linear-gradient(45deg, #667eea, #764ba2);
border-radius: 10px;
}
/* λ§ˆν¬λ‹€μš΄ μŠ€νƒ€μΌ */
.gr-markdown {
color: #4a5568 !important;
}
.gr-markdown h3 {
color: #667eea !important;
font-weight: 600 !important;
margin: 15px 0 !important;
}
"""
) as demo:
with gr.Column(elem_classes="main-container"):
gr.HTML(
"""
<h1 class="header-title">🎡 ACE-Step PRO</h1>
<div style="text-align: center; margin: 20px;">
<p style="font-size: 1.2em; color: #4a5568;"><strong>πŸš€ μƒˆλ‘œμš΄ κΈ°λŠ₯:</strong> AI μž‘μ‚¬ | ν’ˆμ§ˆ 프리셋 | 닀쀑 생성 | 슀마트 ν”„λ‘¬ν”„νŠΈ | μ‹€μ‹œκ°„ 프리뷰</p>
<p style="margin-top: 10px;">
<a href="https://ace-step.github.io/" target='_blank' style="color: #667eea; text-decoration: none; margin: 0 10px;">πŸ“„ Project</a> |
<a href="https://huggingface.co/ACE-Step/ACE-Step-v1-3.5B" style="color: #667eea; text-decoration: none; margin: 0 10px;">πŸ€— Checkpoints</a> |
<a href="https://discord.gg/rjAZz2xBdG" target='_blank' style="color: #667eea; text-decoration: none; margin: 0 10px;">πŸ’¬ Discord</a>
</p>
</div>
"""
)
# μ‚¬μš©λ²• κ°€μ΄λ“œ μΆ”κ°€
with gr.Accordion("πŸ“– μ‚¬μš©λ²• κ°€μ΄λ“œ", open=False):
gr.Markdown("""
### 🎯 λΉ λ₯Έ μ‹œμž‘
1. **μž₯λ₯΄ & μŠ€νƒ€μΌ 선택**: μ›ν•˜λŠ” μŒμ•… μž₯λ₯΄μ™€ 곑 μŠ€νƒ€μΌ(λ“€μ—£, μ†”λ‘œ λ“±)을 μ„ νƒν•©λ‹ˆλ‹€
2. **AI μž‘μ‚¬**: 주제λ₯Ό μž…λ ₯ν•˜κ³  'AI μž‘μ‚¬' λ²„νŠΌμœΌλ‘œ μžλ™ 가사λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€
3. **ν’ˆμ§ˆ μ„€μ •**: Draft(빠름) β†’ Standard(ꢌμž₯) β†’ High Quality β†’ Ultra 쀑 선택
4. **닀쀑 생성**: "Best of 3/5/10" μ„ νƒν•˜λ©΄ μ—¬λŸ¬ 번 μƒμ„±ν•˜μ—¬ 졜고 ν’ˆμ§ˆμ„ μžλ™ μ„ νƒν•©λ‹ˆλ‹€
5. **프리뷰**: 전체 생성 μ „ 10초 ν”„λ¦¬λ·°λ‘œ λΉ λ₯΄κ²Œ 확인할 수 μžˆμŠ΅λ‹ˆλ‹€
### πŸ’‘ ν’ˆμ§ˆ ν–₯상 팁
- **κ³ ν’ˆμ§ˆ 생성**: "High Quality" + "Best of 5" μ‘°ν•© μΆ”μ²œ
- **λΉ λ₯Έ ν…ŒμŠ€νŠΈ**: "Draft" + "프리뷰" κΈ°λŠ₯ ν™œμš©
- **μž₯λ₯΄ νŠΉν™”**: μž₯λ₯΄ 프리셋 선택 ν›„ "슀마트 ν–₯상" 체크
- **가사 ꡬ쑰**: [verse], [chorus], [bridge] νƒœκ·Έ 적극 ν™œμš©
- **λ‹€κ΅­μ–΄ 지원**: ν•œκ΅­μ–΄λ‘œ 주제λ₯Ό μž…λ ₯ν•˜λ©΄ ν•œκ΅­μ–΄ 가사가 μƒμ„±λ©λ‹ˆλ‹€
""")
with gr.Tab("🎡 Enhanced Text2Music", elem_classes="gr-tab"):
create_text2music_ui(
gr=gr,
text2music_process_func=text2music_process_func,
sample_data_func=sample_data_func,
load_data_func=load_data_func,
)
return demo
if __name__ == "__main__":
demo = create_main_demo_ui()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=True # 곡유 링크 생성
)