"""
ACE-Step: A Step Towards Music Generation Foundation Model
https://github.com/ace-step/ACE-Step
Apache 2.0 License
"""
import gradio as gr
import librosa
import os
import random
import hashlib
import numpy as np
import json
from typing import Dict, List, Tuple, Optional
# OpenAI 클라이언트 초기화
try:
api_key = os.getenv("LLM_API") or os.getenv("OPENAI_API_KEY")
if api_key:
from openai import OpenAI
client = OpenAI(api_key=api_key)
print("✅ OpenAI API client initialized successfully")
else:
client = None
print("⚠️ Warning: No OpenAI API key found. AI lyrics generation will be disabled.")
except Exception as e:
client = None
print(f"❌ Warning: Failed to initialize OpenAI client: {e}")
TAG_DEFAULT = "funk, pop, soul, rock, melodic, guitar, drums, bass, keyboard, percussion, 105 BPM, energetic, upbeat, groovy, vibrant, dynamic, duet, male and female vocals"
LYRIC_DEFAULT = """[verse - male]
Neon lights they flicker bright
City hums in dead of night
Rhythms pulse through concrete veins
Lost in echoes of refrains
[verse - female]
Bassline groovin' in my chest
Heartbeats match the city's zest
Electric whispers fill the air
Synthesized dreams everywhere
[chorus - duet]
Turn it up and let it flow
Feel the fire let it grow
In this rhythm we belong
Hear the night sing out our song
[verse - male]
Guitar strings they start to weep
Wake the soul from silent sleep
Every note a story told
In this night we're bold and gold
[bridge - female]
Voices blend in harmony
Lost in pure cacophony
Timeless echoes timeless cries
Soulful shouts beneath the skies
[verse - duet]
Keyboard dances on the keys
Melodies on evening breeze
Catch the tune and hold it tight
In this moment we take flight
"""
# 확장된 장르 프리셋 (기존 + 개선된 태그)
GENRE_PRESETS = {
"Modern Pop": "pop, synth, drums, guitar, 120 bpm, upbeat, catchy, vibrant, polished vocals, radio-ready, commercial, layered vocals",
"Rock": "rock, electric guitar, drums, bass, 130 bpm, energetic, rebellious, gritty, powerful vocals, raw vocals, power chords, driving rhythm",
"Hip Hop": "hip hop, 808 bass, hi-hats, synth, 90 bpm, bold, urban, intense, rhythmic vocals, trap beats, punchy drums",
"Country": "country, acoustic guitar, steel guitar, fiddle, 100 bpm, heartfelt, rustic, warm, twangy vocals, storytelling, americana",
"EDM": "edm, synth, bass, kick drum, 128 bpm, euphoric, pulsating, energetic, instrumental, progressive build, festival anthem, electronic",
"Reggae": "reggae, guitar, bass, drums, 80 bpm, chill, soulful, positive, smooth vocals, offbeat rhythm, island vibes",
"Classical": "classical, orchestral, strings, piano, 60 bpm, elegant, emotive, timeless, instrumental, dynamic range, sophisticated harmony",
"Jazz": "jazz, saxophone, piano, double bass, 110 bpm, smooth, improvisational, soulful, crooning vocals, swing feel, sophisticated",
"Metal": "metal, electric guitar, double kick drum, bass, 160 bpm, aggressive, intense, heavy, powerful vocals, distorted, powerful",
"R&B": "r&b, synth, bass, drums, 85 bpm, sultry, groovy, romantic, silky vocals, smooth production, neo-soul",
"K-Pop": "k-pop, synth, bass, drums, 128 bpm, catchy, energetic, polished, mixed vocals, electronic elements, danceable",
"Ballad": "ballad, piano, strings, acoustic guitar, 70 bpm, emotional, heartfelt, romantic, expressive vocals, orchestral arrangement"
}
# 곡 스타일 옵션
SONG_STYLES = {
"듀엣 (남녀 혼성)": "duet, male and female vocals, harmonious, call and response",
"솔로 (남성)": "solo, male vocals, powerful voice",
"솔로 (여성)": "solo, female vocals, emotional voice",
"그룹 (혼성)": "group vocals, mixed gender, layered harmonies",
"합창": "choir, multiple voices, choral arrangement",
"랩/힙합": "rap vocals, rhythmic flow, urban style",
"인스트루멘탈": "instrumental, no vocals"
}
# AI 작사 시스템 프롬프트
LYRIC_SYSTEM_PROMPT = """너는 노래 가사를 작사하는 전문가 역할이다. 이용자가 입력하는 주제와 스타일에 따라 관련된 노래 가사를 작성하라.
가사 작성 규칙:
1. 구조 태그는 반드시 "[ ]"로 구분한다
2. 사용 가능한 구조 태그: [verse], [chorus], [bridge], [intro], [outro], [pre-chorus]
3. 듀엣인 경우 [verse - male], [verse - female], [chorus - duet] 형식으로 파트를 명시한다
4. 입력 언어와 동일한 언어로 가사를 작성한다
5. 각 구조는 4-8줄 정도로 작성한다
6. 음악 장르와 분위기에 맞는 가사를 작성한다
예시 형식:
[verse - male]
첫 번째 구절 가사
두 번째 구절 가사
...
[chorus - duet]
후렴구 가사
...
"""
def generate_lyrics_with_ai(prompt: str, genre: str, song_style: str) -> str:
"""AI를 사용하여 가사 생성"""
print(f"🎵 generate_lyrics_with_ai called with: prompt='{prompt}', genre='{genre}', style='{song_style}'")
if not client:
print("❌ OpenAI client not available, returning default lyrics")
return LYRIC_DEFAULT
if not prompt or prompt.strip() == "":
print("⚠️ Empty prompt, returning default lyrics")
return LYRIC_DEFAULT
try:
# 언어 감지 및 스타일 정보 추가
style_info = ""
if "듀엣" in song_style:
style_info = "남녀 듀엣 형식으로 파트를 나누어 작성해주세요. [verse - male], [verse - female], [chorus - duet] 형식을 사용하세요."
elif "솔로 (남성)" in song_style:
style_info = "남성 솔로 가수를 위한 가사를 작성해주세요."
elif "솔로 (여성)" in song_style:
style_info = "여성 솔로 가수를 위한 가사를 작성해주세요."
elif "그룹" in song_style:
style_info = "그룹이 부르는 형식으로 파트를 나누어 작성해주세요."
elif "인스트루멘탈" in song_style:
return "[instrumental]\n\n[inst]\n\n[instrumental break]\n\n[inst]"
user_prompt = f"""
주제: {prompt}
장르: {genre}
스타일: {style_info}
위 정보를 바탕으로 노래 가사를 작성해주세요. 입력된 언어와 동일한 언어로 작성하고, 구조 태그를 반드시 포함해주세요.
"""
print(f"📝 Sending request to OpenAI...")
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": LYRIC_SYSTEM_PROMPT},
{"role": "user", "content": user_prompt}
],
temperature=0.8,
max_tokens=1000
)
generated_lyrics = response.choices[0].message.content
print(f"✅ Generated lyrics successfully")
return generated_lyrics
except Exception as e:
print(f"❌ AI 가사 생성 오류: {e}")
return LYRIC_DEFAULT
# 품질 프리셋 시스템 추가
QUALITY_PRESETS = {
"Draft (Fast)": {
"infer_step": 50,
"guidance_scale": 10.0,
"scheduler_type": "euler",
"omega_scale": 5.0,
"use_erg_diffusion": False,
"use_erg_tag": True,
"description": "빠른 초안 생성 (1-2분)"
},
"Standard": {
"infer_step": 150,
"guidance_scale": 15.0,
"scheduler_type": "euler",
"omega_scale": 10.0,
"use_erg_diffusion": True,
"use_erg_tag": True,
"description": "표준 품질 (3-5분)"
},
"High Quality": {
"infer_step": 200,
"guidance_scale": 18.0,
"scheduler_type": "heun",
"omega_scale": 15.0,
"use_erg_diffusion": True,
"use_erg_tag": True,
"description": "고품질 생성 (8-12분)"
},
"Ultra (Best)": {
"infer_step": 299,
"guidance_scale": 20.0,
"scheduler_type": "heun",
"omega_scale": 20.0,
"use_erg_diffusion": True,
"use_erg_tag": True,
"description": "최고 품질 (15-20분)"
}
}
# 다중 시드 생성 설정
MULTI_SEED_OPTIONS = {
"Single": 1,
"Best of 3": 3,
"Best of 5": 5,
"Best of 10": 10
}
class MusicGenerationCache:
"""생성 결과 캐싱 시스템"""
def __init__(self):
self.cache = {}
self.max_cache_size = 50
def get_cache_key(self, params):
# 중요한 파라미터만으로 해시 생성
key_params = {k: v for k, v in params.items()
if k in ['prompt', 'lyrics', 'infer_step', 'guidance_scale', 'audio_duration']}
return hashlib.md5(str(sorted(key_params.items())).encode()).hexdigest()[:16]
def get_cached_result(self, params):
key = self.get_cache_key(params)
return self.cache.get(key)
def cache_result(self, params, result):
if len(self.cache) >= self.max_cache_size:
oldest_key = next(iter(self.cache))
del self.cache[oldest_key]
key = self.get_cache_key(params)
self.cache[key] = result
# 전역 캐시 인스턴스
generation_cache = MusicGenerationCache()
def enhance_prompt_with_genre(base_prompt: str, genre: str, song_style: str) -> str:
"""장르와 스타일에 따른 스마트 프롬프트 확장"""
enhanced_prompt = base_prompt
if genre != "Custom" and genre:
# 장르별 추가 개선 태그
genre_enhancements = {
"Modern Pop": ["polished production", "mainstream appeal", "hook-driven"],
"Rock": ["guitar-driven", "powerful drums", "energetic performance"],
"Hip Hop": ["rhythmic flow", "urban atmosphere", "bass-heavy"],
"Country": ["acoustic warmth", "storytelling melody", "authentic feel"],
"EDM": ["electronic atmosphere", "build-ups", "dance-friendly"],
"Reggae": ["laid-back groove", "tropical vibes", "rhythmic guitar"],
"Classical": ["orchestral depth", "musical sophistication", "timeless beauty"],
"Jazz": ["musical complexity", "improvisational spirit", "sophisticated harmony"],
"Metal": ["aggressive energy", "powerful sound", "intense atmosphere"],
"R&B": ["smooth groove", "soulful expression", "rhythmic sophistication"],
"K-Pop": ["catchy hooks", "dynamic arrangement", "polished production"],
"Ballad": ["emotional depth", "slow tempo", "heartfelt delivery"]
}
if genre in genre_enhancements:
additional_tags = ", ".join(genre_enhancements[genre])
enhanced_prompt = f"{base_prompt}, {additional_tags}"
# 스타일 태그 추가
if song_style in SONG_STYLES:
style_tags = SONG_STYLES[song_style]
enhanced_prompt = f"{enhanced_prompt}, {style_tags}"
return enhanced_prompt
def calculate_quality_score(audio_path: str) -> float:
"""간단한 품질 점수 계산 (실제 구현에서는 더 복잡한 메트릭 사용)"""
try:
y, sr = librosa.load(audio_path)
# 기본 품질 메트릭
rms_energy = np.sqrt(np.mean(y**2))
spectral_centroid = np.mean(librosa.feature.spectral_centroid(y=y, sr=sr))
zero_crossing_rate = np.mean(librosa.feature.zero_crossing_rate(y))
# 정규화된 점수 (0-100)
energy_score = min(rms_energy * 1000, 40) # 0-40점
spectral_score = min(spectral_centroid / 100, 40) # 0-40점
clarity_score = min((1 - zero_crossing_rate) * 20, 20) # 0-20점
total_score = energy_score + spectral_score + clarity_score
return round(total_score, 1)
except:
return 50.0 # 기본값
def update_quality_preset(preset_name):
"""품질 프리셋 적용"""
if preset_name not in QUALITY_PRESETS:
return (100, 15.0, "euler", 10.0, True, True)
preset = QUALITY_PRESETS[preset_name]
return (
preset.get("infer_step", 100),
preset.get("guidance_scale", 15.0),
preset.get("scheduler_type", "euler"),
preset.get("omega_scale", 10.0),
preset.get("use_erg_diffusion", True),
preset.get("use_erg_tag", True)
)
def create_enhanced_process_func(original_func):
"""기존 함수를 향상된 기능으로 래핑"""
def enhanced_func(
audio_duration, prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric,
audio2audio_enable=False, ref_audio_strength=0.5, ref_audio_input=None,
lora_name_or_path="none", multi_seed_mode="Single",
enable_smart_enhancement=True, genre_preset="Custom", song_style="듀엣 (남녀 혼성)", **kwargs
):
# 스마트 프롬프트 확장
if enable_smart_enhancement:
prompt = enhance_prompt_with_genre(prompt, genre_preset, song_style)
# 캐시 확인
cache_params = {
'prompt': prompt, 'lyrics': lyrics, 'audio_duration': audio_duration,
'infer_step': infer_step, 'guidance_scale': guidance_scale
}
cached_result = generation_cache.get_cached_result(cache_params)
if cached_result:
return cached_result
# 다중 시드 생성
num_candidates = MULTI_SEED_OPTIONS.get(multi_seed_mode, 1)
if num_candidates == 1:
# 기존 함수 호출
result = original_func(
audio_duration, prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs
)
else:
# 다중 시드 생성 및 최적 선택
candidates = []
for i in range(num_candidates):
seed = random.randint(1, 10000)
try:
result = original_func(
audio_duration, prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, str(seed),
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs
)
if result and len(result) > 0:
audio_path = result[0] # 첫 번째 결과가 오디오 파일 경로
if audio_path and os.path.exists(audio_path):
quality_score = calculate_quality_score(audio_path)
candidates.append({
"result": result,
"quality_score": quality_score,
"seed": seed
})
except Exception as e:
print(f"Generation {i+1} failed: {e}")
continue
if candidates:
# 최고 품질 선택
best_candidate = max(candidates, key=lambda x: x["quality_score"])
result = best_candidate["result"]
# 품질 정보 추가
if len(result) > 1 and isinstance(result[1], dict):
result[1]["quality_score"] = best_candidate["quality_score"]
result[1]["selected_seed"] = best_candidate["seed"]
result[1]["candidates_count"] = len(candidates)
else:
# 모든 생성 실패시 기본 생성
result = original_func(
audio_duration, prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs
)
# 결과 캐시
generation_cache.cache_result(cache_params, result)
return result
return enhanced_func
def create_output_ui(task_name="Text2Music"):
# For many consumer-grade GPU devices, only one batch can be run
output_audio1 = gr.Audio(type="filepath", label=f"{task_name} Generated Audio 1")
with gr.Accordion(f"{task_name} Parameters & Quality Info", open=False):
input_params_json = gr.JSON(label=f"{task_name} Parameters")
# 품질 정보 표시 추가
with gr.Row():
quality_score = gr.Number(label="Quality Score (0-100)", value=0, interactive=False)
generation_info = gr.Textbox(
label="Generation Info",
value="",
interactive=False,
max_lines=2
)
outputs = [output_audio1]
return outputs, input_params_json
def dump_func(*args):
print(args)
return []
def create_text2music_ui(
gr,
text2music_process_func,
sample_data_func=None,
load_data_func=None,
):
# 향상된 프로세스 함수 생성
enhanced_process_func = create_enhanced_process_func(text2music_process_func)
with gr.Row():
with gr.Column():
# 품질 및 성능 설정 섹션 추가
with gr.Group():
gr.Markdown("### ⚡ 품질 & 성능 설정")
with gr.Row():
quality_preset = gr.Dropdown(
choices=list(QUALITY_PRESETS.keys()),
value="Standard",
label="품질 프리셋",
scale=2,
interactive=True
)
multi_seed_mode = gr.Dropdown(
choices=list(MULTI_SEED_OPTIONS.keys()),
value="Single",
label="다중 생성 모드",
scale=2,
info="여러 번 생성하여 최고 품질 선택",
interactive=True
)
preset_description = gr.Textbox(
value=QUALITY_PRESETS["Standard"]["description"],
label="설명",
interactive=False,
max_lines=1
)
with gr.Row(equal_height=True):
# add markdown, tags and lyrics examples are from ai music generation community
audio_duration = gr.Slider(
-1,
240.0,
step=0.00001,
value=-1,
label="Audio Duration",
interactive=True,
info="-1 means random duration (30 ~ 240).",
scale=7,
)
random_bnt = gr.Button("🎲 Random", variant="secondary", scale=1)
preview_bnt = gr.Button("🎵 Preview", variant="secondary", scale=2)
# audio2audio
with gr.Row(equal_height=True):
audio2audio_enable = gr.Checkbox(
label="Enable Audio2Audio",
value=False,
info="Check to enable Audio-to-Audio generation using a reference audio.",
elem_id="audio2audio_checkbox"
)
lora_name_or_path = gr.Dropdown(
label="Lora Name or Path",
choices=["ACE-Step/ACE-Step-v1-chinese-rap-LoRA", "none"],
value="none",
allow_custom_value=True,
)
ref_audio_input = gr.Audio(
type="filepath",
label="Reference Audio (for Audio2Audio)",
visible=False,
elem_id="ref_audio_input",
show_download_button=True
)
ref_audio_strength = gr.Slider(
label="Refer audio strength",
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.5,
elem_id="ref_audio_strength",
visible=False,
interactive=True,
)
def toggle_ref_audio_visibility(is_checked):
return (
gr.update(visible=is_checked),
gr.update(visible=is_checked),
)
audio2audio_enable.change(
fn=toggle_ref_audio_visibility,
inputs=[audio2audio_enable],
outputs=[ref_audio_input, ref_audio_strength],
)
with gr.Column(scale=2):
with gr.Group():
gr.Markdown("""### 🎼 스마트 프롬프트 시스템
장르와 스타일을 선택하면 자동으로 최적화된 태그가 추가됩니다.""")
with gr.Row():
genre_preset = gr.Dropdown(
choices=["Custom"] + list(GENRE_PRESETS.keys()),
value="Custom",
label="장르 프리셋",
scale=1,
interactive=True
)
song_style = gr.Dropdown(
choices=list(SONG_STYLES.keys()),
value="듀엣 (남녀 혼성)",
label="곡 스타일",
scale=1,
interactive=True
)
enable_smart_enhancement = gr.Checkbox(
label="스마트 향상",
value=True,
info="자동 태그 최적화",
scale=1
)
prompt = gr.Textbox(
lines=2,
label="Tags",
max_lines=4,
value=TAG_DEFAULT,
placeholder="콤마로 구분된 태그들...",
interactive=True
)
with gr.Group():
gr.Markdown("""### 📝 AI 작사 시스템
주제를 입력하고 'AI 작사' 버튼을 클릭하면 자동으로 가사가 생성됩니다.""")
with gr.Row():
lyric_prompt = gr.Textbox(
label="작사 주제",
placeholder="예: 첫사랑의 설렘, 이별의 아픔, 희망찬 내일...",
scale=3,
interactive=True
)
generate_lyrics_btn = gr.Button("🤖 AI 작사", variant="secondary", scale=1)
lyrics = gr.Textbox(
lines=9,
label="Lyrics",
max_lines=13,
value=LYRIC_DEFAULT,
placeholder="가사를 입력하세요. [verse], [chorus] 등의 구조 태그 사용을 권장합니다.",
interactive=True
)
with gr.Accordion("Basic Settings", open=False):
infer_step = gr.Slider(
minimum=1,
maximum=300,
step=1,
value=150,
label="Infer Steps",
interactive=True,
)
guidance_scale = gr.Slider(
minimum=0.0,
maximum=30.0,
step=0.1,
value=15.0,
label="Guidance Scale",
interactive=True,
info="When guidance_scale_lyric > 1 and guidance_scale_text > 1, the guidance scale will not be applied.",
)
guidance_scale_text = gr.Slider(
minimum=0.0,
maximum=10.0,
step=0.1,
value=0.0,
label="Guidance Scale Text",
interactive=True,
info="Guidance scale for text condition. It can only apply to cfg. set guidance_scale_text=5.0, guidance_scale_lyric=1.5 for start",
)
guidance_scale_lyric = gr.Slider(
minimum=0.0,
maximum=10.0,
step=0.1,
value=0.0,
label="Guidance Scale Lyric",
interactive=True,
)
manual_seeds = gr.Textbox(
label="manual seeds (default None)",
placeholder="1,2,3,4",
value=None,
info="Seed for the generation",
)
with gr.Accordion("Advanced Settings", open=False):
scheduler_type = gr.Radio(
["euler", "heun"],
value="euler",
label="Scheduler Type",
elem_id="scheduler_type",
info="Scheduler type for the generation. euler is recommended. heun will take more time.",
)
cfg_type = gr.Radio(
["cfg", "apg", "cfg_star"],
value="apg",
label="CFG Type",
elem_id="cfg_type",
info="CFG type for the generation. apg is recommended. cfg and cfg_star are almost the same.",
)
use_erg_tag = gr.Checkbox(
label="use ERG for tag",
value=True,
info="Use Entropy Rectifying Guidance for tag. It will multiple a temperature to the attention to make a weaker tag condition and make better diversity.",
)
use_erg_lyric = gr.Checkbox(
label="use ERG for lyric",
value=False,
info="The same but apply to lyric encoder's attention.",
)
use_erg_diffusion = gr.Checkbox(
label="use ERG for diffusion",
value=True,
info="The same but apply to diffusion model's attention.",
)
omega_scale = gr.Slider(
minimum=-100.0,
maximum=100.0,
step=0.1,
value=10.0,
label="Granularity Scale",
interactive=True,
info="Granularity scale for the generation. Higher values can reduce artifacts",
)
guidance_interval = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.5,
label="Guidance Interval",
interactive=True,
info="Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)",
)
guidance_interval_decay = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.0,
label="Guidance Interval Decay",
interactive=True,
info="Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.",
)
min_guidance_scale = gr.Slider(
minimum=0.0,
maximum=200.0,
step=0.1,
value=3.0,
label="Min Guidance Scale",
interactive=True,
info="Min guidance scale for guidance interval decay's end scale",
)
oss_steps = gr.Textbox(
label="OSS Steps",
placeholder="16, 29, 52, 96, 129, 158, 172, 183, 189, 200",
value=None,
info="Optimal Steps for the generation. But not test well",
)
text2music_bnt = gr.Button("🎵 Generate Music", variant="primary", size="lg")
with gr.Column():
outputs, input_params_json = create_output_ui()
with gr.Tab("retake"):
retake_variance = gr.Slider(
minimum=0.0, maximum=1.0, step=0.01, value=0.2, label="variance"
)
retake_seeds = gr.Textbox(
label="retake seeds (default None)", placeholder="", value=None
)
retake_bnt = gr.Button("Retake", variant="primary")
retake_outputs, retake_input_params_json = create_output_ui("Retake")
def retake_process_func(json_data, retake_variance, retake_seeds):
return enhanced_process_func(
json_data.get("audio_duration", 30),
json_data.get("prompt", ""),
json_data.get("lyrics", ""),
json_data.get("infer_step", 100),
json_data.get("guidance_scale", 15.0),
json_data.get("scheduler_type", "euler"),
json_data.get("cfg_type", "apg"),
json_data.get("omega_scale", 10.0),
retake_seeds,
json_data.get("guidance_interval", 0.5),
json_data.get("guidance_interval_decay", 0.0),
json_data.get("min_guidance_scale", 3.0),
json_data.get("use_erg_tag", True),
json_data.get("use_erg_lyric", False),
json_data.get("use_erg_diffusion", True),
json_data.get("oss_steps", None),
json_data.get("guidance_scale_text", 0.0),
json_data.get("guidance_scale_lyric", 0.0),
audio2audio_enable=json_data.get("audio2audio_enable", False),
ref_audio_strength=json_data.get("ref_audio_strength", 0.5),
ref_audio_input=json_data.get("ref_audio_input", None),
lora_name_or_path=json_data.get("lora_name_or_path", "none"),
multi_seed_mode="Best of 3", # retake는 자동으로 다중 생성
retake_variance=retake_variance,
task="retake"
)
retake_bnt.click(
fn=retake_process_func,
inputs=[
input_params_json,
retake_variance,
retake_seeds,
],
outputs=retake_outputs + [retake_input_params_json],
)
with gr.Tab("repainting"):
retake_variance = gr.Slider(
minimum=0.0, maximum=1.0, step=0.01, value=0.2, label="variance"
)
retake_seeds = gr.Textbox(
label="repaint seeds (default None)", placeholder="", value=None
)
repaint_start = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=0.0,
label="Repaint Start Time",
interactive=True,
)
repaint_end = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=30.0,
label="Repaint End Time",
interactive=True,
)
repaint_source = gr.Radio(
["text2music", "last_repaint", "upload"],
value="text2music",
label="Repaint Source",
elem_id="repaint_source",
)
repaint_source_audio_upload = gr.Audio(
label="Upload Audio",
type="filepath",
visible=False,
elem_id="repaint_source_audio_upload",
show_download_button=True,
)
repaint_source.change(
fn=lambda x: gr.update(
visible=x == "upload", elem_id="repaint_source_audio_upload"
),
inputs=[repaint_source],
outputs=[repaint_source_audio_upload],
)
repaint_bnt = gr.Button("Repaint", variant="primary")
repaint_outputs, repaint_input_params_json = create_output_ui("Repaint")
def repaint_process_func(
text2music_json_data,
repaint_json_data,
retake_variance,
retake_seeds,
repaint_start,
repaint_end,
repaint_source,
repaint_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
):
if repaint_source == "upload":
src_audio_path = repaint_source_audio_upload
audio_duration = librosa.get_duration(filename=src_audio_path)
json_data = {"audio_duration": audio_duration}
elif repaint_source == "text2music":
json_data = text2music_json_data
src_audio_path = json_data["audio_path"]
elif repaint_source == "last_repaint":
json_data = repaint_json_data
src_audio_path = json_data["audio_path"]
return enhanced_process_func(
json_data["audio_duration"],
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds=retake_seeds,
retake_variance=retake_variance,
task="repaint",
repaint_start=repaint_start,
repaint_end=repaint_end,
src_audio_path=src_audio_path,
lora_name_or_path="none"
)
repaint_bnt.click(
fn=repaint_process_func,
inputs=[
input_params_json,
repaint_input_params_json,
retake_variance,
retake_seeds,
repaint_start,
repaint_end,
repaint_source,
repaint_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
],
outputs=repaint_outputs + [repaint_input_params_json],
)
with gr.Tab("edit"):
edit_prompt = gr.Textbox(lines=2, label="Edit Tags", max_lines=4)
edit_lyrics = gr.Textbox(lines=9, label="Edit Lyrics", max_lines=13)
retake_seeds = gr.Textbox(
label="edit seeds (default None)", placeholder="", value=None
)
edit_type = gr.Radio(
["only_lyrics", "remix"],
value="only_lyrics",
label="Edit Type",
elem_id="edit_type",
info="`only_lyrics` will keep the whole song the same except lyrics difference. Make your diffrence smaller, e.g. one lyrc line change.\nremix can change the song melody and genre",
)
edit_n_min = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.6,
label="edit_n_min",
interactive=True,
)
edit_n_max = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=1.0,
label="edit_n_max",
interactive=True,
)
def edit_type_change_func(edit_type):
if edit_type == "only_lyrics":
n_min = 0.6
n_max = 1.0
elif edit_type == "remix":
n_min = 0.2
n_max = 0.4
return n_min, n_max
edit_type.change(
edit_type_change_func,
inputs=[edit_type],
outputs=[edit_n_min, edit_n_max],
)
edit_source = gr.Radio(
["text2music", "last_edit", "upload"],
value="text2music",
label="Edit Source",
elem_id="edit_source",
)
edit_source_audio_upload = gr.Audio(
label="Upload Audio",
type="filepath",
visible=False,
elem_id="edit_source_audio_upload",
show_download_button=True,
)
edit_source.change(
fn=lambda x: gr.update(
visible=x == "upload", elem_id="edit_source_audio_upload"
),
inputs=[edit_source],
outputs=[edit_source_audio_upload],
)
edit_bnt = gr.Button("Edit", variant="primary")
edit_outputs, edit_input_params_json = create_output_ui("Edit")
def edit_process_func(
text2music_json_data,
edit_input_params_json,
edit_source,
edit_source_audio_upload,
prompt,
lyrics,
edit_prompt,
edit_lyrics,
edit_n_min,
edit_n_max,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds,
):
if edit_source == "upload":
src_audio_path = edit_source_audio_upload
audio_duration = librosa.get_duration(filename=src_audio_path)
json_data = {"audio_duration": audio_duration}
elif edit_source == "text2music":
json_data = text2music_json_data
src_audio_path = json_data["audio_path"]
elif edit_source == "last_edit":
json_data = edit_input_params_json
src_audio_path = json_data["audio_path"]
if not edit_prompt:
edit_prompt = prompt
if not edit_lyrics:
edit_lyrics = lyrics
return enhanced_process_func(
json_data["audio_duration"],
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
task="edit",
src_audio_path=src_audio_path,
edit_target_prompt=edit_prompt,
edit_target_lyrics=edit_lyrics,
edit_n_min=edit_n_min,
edit_n_max=edit_n_max,
retake_seeds=retake_seeds,
lora_name_or_path="none"
)
edit_bnt.click(
fn=edit_process_func,
inputs=[
input_params_json,
edit_input_params_json,
edit_source,
edit_source_audio_upload,
prompt,
lyrics,
edit_prompt,
edit_lyrics,
edit_n_min,
edit_n_max,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds,
],
outputs=edit_outputs + [edit_input_params_json],
)
with gr.Tab("extend"):
extend_seeds = gr.Textbox(
label="extend seeds (default None)", placeholder="", value=None
)
left_extend_length = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=0.0,
label="Left Extend Length",
interactive=True,
)
right_extend_length = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=30.0,
label="Right Extend Length",
interactive=True,
)
extend_source = gr.Radio(
["text2music", "last_extend", "upload"],
value="text2music",
label="Extend Source",
elem_id="extend_source",
)
extend_source_audio_upload = gr.Audio(
label="Upload Audio",
type="filepath",
visible=False,
elem_id="extend_source_audio_upload",
show_download_button=True,
)
extend_source.change(
fn=lambda x: gr.update(
visible=x == "upload", elem_id="extend_source_audio_upload"
),
inputs=[extend_source],
outputs=[extend_source_audio_upload],
)
extend_bnt = gr.Button("Extend", variant="primary")
extend_outputs, extend_input_params_json = create_output_ui("Extend")
def extend_process_func(
text2music_json_data,
extend_input_params_json,
extend_seeds,
left_extend_length,
right_extend_length,
extend_source,
extend_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
):
if extend_source == "upload":
src_audio_path = extend_source_audio_upload
# get audio duration
audio_duration = librosa.get_duration(filename=src_audio_path)
json_data = {"audio_duration": audio_duration}
elif extend_source == "text2music":
json_data = text2music_json_data
src_audio_path = json_data["audio_path"]
elif extend_source == "last_extend":
json_data = extend_input_params_json
src_audio_path = json_data["audio_path"]
repaint_start = -left_extend_length
repaint_end = json_data["audio_duration"] + right_extend_length
return enhanced_process_func(
json_data["audio_duration"],
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds=extend_seeds,
retake_variance=1.0,
task="extend",
repaint_start=repaint_start,
repaint_end=repaint_end,
src_audio_path=src_audio_path,
lora_name_or_path="none"
)
extend_bnt.click(
fn=extend_process_func,
inputs=[
input_params_json,
extend_input_params_json,
extend_seeds,
left_extend_length,
right_extend_length,
extend_source,
extend_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
],
outputs=extend_outputs + [extend_input_params_json],
)
# ===== 간단하고 직접적인 이벤트 핸들러 =====
print("🔗 이벤트 핸들러 연결 중...")
# 1. 장르 프리셋 변경
def on_genre_change(genre, style):
print(f"🎵 Genre changed to: {genre}")
if genre == "Custom":
return TAG_DEFAULT
tags = GENRE_PRESETS.get(genre, TAG_DEFAULT)
if style and style in SONG_STYLES:
tags = f"{tags}, {SONG_STYLES[style]}"
return tags
genre_preset.change(
fn=on_genre_change,
inputs=[genre_preset, song_style],
outputs=prompt
)
# 2. 스타일 변경
def on_style_change(genre, style):
print(f"🎤 Style changed to: {style}")
if genre == "Custom":
base_tags = TAG_DEFAULT
else:
base_tags = GENRE_PRESETS.get(genre, TAG_DEFAULT)
if style and style in SONG_STYLES:
return f"{base_tags}, {SONG_STYLES[style]}"
return base_tags
song_style.change(
fn=on_style_change,
inputs=[genre_preset, song_style],
outputs=prompt
)
# 3. 품질 프리셋
def on_quality_change(preset):
print(f"⚡ Quality preset changed to: {preset}")
if preset in QUALITY_PRESETS:
p = QUALITY_PRESETS[preset]
return (
p["description"],
p["infer_step"],
p["guidance_scale"],
p["scheduler_type"],
p["omega_scale"],
p["use_erg_diffusion"],
p["use_erg_tag"]
)
return ("", 150, 15.0, "euler", 10.0, True, True)
quality_preset.change(
fn=on_quality_change,
inputs=quality_preset,
outputs=[preset_description, infer_step, guidance_scale, scheduler_type, omega_scale, use_erg_diffusion, use_erg_tag]
)
# 4. AI 작사
def generate_lyrics_click(prompt_text, genre, style):
print(f"🤖 AI 작사 버튼 클릭! Prompt: '{prompt_text}'")
if not prompt_text:
return "작사 주제를 입력해주세요!"
return generate_lyrics_with_ai(prompt_text, genre, style)
generate_lyrics_btn.click(
fn=generate_lyrics_click,
inputs=[lyric_prompt, genre_preset, song_style],
outputs=lyrics
)
# 5. Random 버튼
def random_click(genre, style):
print("🎲 Random 버튼 클릭!")
if genre == "Custom":
genre = random.choice(list(GENRE_PRESETS.keys()))
themes = ["도시의 밤", "첫사랑", "여름 해변", "가을 정취", "희망", "자유", "별빛", "청춘"]
theme = random.choice(themes)
duration = random.choice([30, 60, 90, 120])
# 태그
tags = GENRE_PRESETS.get(genre, TAG_DEFAULT)
if style in SONG_STYLES:
tags = f"{tags}, {SONG_STYLES[style]}"
# 가사 생성
new_lyrics = generate_lyrics_with_ai(theme, genre, style)
return [
duration, tags, new_lyrics, 150, 15.0, "euler", "apg", 10.0,
str(random.randint(1, 10000)), 0.5, 0.0, 3.0, True, False, True,
None, 0.0, 0.0, False, 0.5, None
]
random_bnt.click(
fn=random_click,
inputs=[genre_preset, song_style],
outputs=[
audio_duration, prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
ref_audio_strength, ref_audio_input
]
)
# 6. Preview 버튼
preview_bnt.click(
fn=lambda p, l, g, s: print(f"🎵 Preview clicked! Genre: {g}, Style: {s}"),
inputs=[prompt, lyrics, genre_preset, song_style],
outputs=None
)
# 7. 메인 생성 버튼
text2music_bnt.click(
fn=enhanced_process_func,
inputs=[
audio_duration, prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
ref_audio_strength, ref_audio_input, lora_name_or_path,
multi_seed_mode, enable_smart_enhancement, genre_preset, song_style
],
outputs=outputs + [input_params_json]
)
print("✅ 모든 이벤트 핸들러 연결 완료!")
def create_main_demo_ui(
text2music_process_func=dump_func,
sample_data_func=dump_func,
load_data_func=dump_func,
):
with gr.Blocks(
title="ACE-Step Model 1.0 DEMO - Enhanced",
theme=gr.themes.Soft(),
css="""
/* 그라디언트 배경 */
.gradio-container {
max-width: 1200px !important;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
min-height: 100vh;
}
/* 메인 컨테이너 스타일 */
.main-container {
background: rgba(255, 255, 255, 0.95);
border-radius: 20px;
padding: 30px;
margin: 20px auto;
box-shadow: 0 20px 40px rgba(0, 0, 0, 0.1);
}
/* 헤더 스타일 */
.header-title {
background: linear-gradient(45deg, #667eea, #764ba2);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
font-size: 3em;
font-weight: bold;
text-align: center;
margin-bottom: 10px;
}
/* 버튼 스타일 */
.gr-button-primary {
background: linear-gradient(45deg, #667eea, #764ba2) !important;
border: none !important;
color: white !important;
font-weight: bold !important;
transition: all 0.3s ease !important;
}
.gr-button-primary:hover {
transform: translateY(-2px);
box-shadow: 0 10px 20px rgba(102, 126, 234, 0.3);
}
.gr-button-secondary {
background: linear-gradient(45deg, #f093fb, #f5576c) !important;
border: none !important;
color: white !important;
transition: all 0.3s ease !important;
}
/* 그룹 스타일 */
.gr-group {
background: rgba(255, 255, 255, 0.8) !important;
border: 1px solid rgba(102, 126, 234, 0.2) !important;
border-radius: 15px !important;
padding: 20px !important;
margin: 10px 0 !important;
backdrop-filter: blur(10px) !important;
}
/* 탭 스타일 */
.gr-tab {
background: rgba(255, 255, 255, 0.9) !important;
border-radius: 10px !important;
padding: 15px !important;
}
/* 입력 필드 스타일 */
.gr-textbox, .gr-dropdown, .gr-slider {
border: 2px solid rgba(102, 126, 234, 0.3) !important;
border-radius: 10px !important;
transition: all 0.3s ease !important;
}
.gr-textbox:focus, .gr-dropdown:focus {
border-color: #667eea !important;
box-shadow: 0 0 10px rgba(102, 126, 234, 0.2) !important;
}
/* 품질 정보 스타일 */
.quality-info {
background: linear-gradient(135deg, #f093fb20, #f5576c20);
padding: 15px;
border-radius: 10px;
margin: 10px 0;
border: 1px solid rgba(240, 147, 251, 0.3);
}
/* 애니메이션 */
@keyframes fadeIn {
from {
opacity: 0;
transform: translateY(20px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
.gr-row, .gr-column {
animation: fadeIn 0.5s ease-out;
}
/* 스크롤바 스타일 */
::-webkit-scrollbar {
width: 10px;
}
::-webkit-scrollbar-track {
background: rgba(255, 255, 255, 0.1);
border-radius: 10px;
}
::-webkit-scrollbar-thumb {
background: linear-gradient(45deg, #667eea, #764ba2);
border-radius: 10px;
}
/* 마크다운 스타일 */
.gr-markdown {
color: #4a5568 !important;
}
.gr-markdown h3 {
color: #667eea !important;
font-weight: 600 !important;
margin: 15px 0 !important;
}
"""
) as demo:
with gr.Column(elem_classes="main-container"):
gr.HTML(
"""
"""
)
# 사용법 가이드 추가
with gr.Accordion("📖 사용법 가이드", open=False):
gr.Markdown("""
### 🎯 빠른 시작
1. **장르 & 스타일 선택**: 원하는 음악 장르와 곡 스타일(듀엣, 솔로 등)을 선택합니다
2. **AI 작사**: 주제를 입력하고 'AI 작사' 버튼으로 자동 가사를 생성합니다
3. **품질 설정**: Draft(빠름) → Standard(권장) → High Quality → Ultra 중 선택
4. **다중 생성**: "Best of 3/5/10" 선택하면 여러 번 생성하여 최고 품질을 자동 선택합니다
5. **프리뷰**: 전체 생성 전 10초 프리뷰로 빠르게 확인할 수 있습니다
### 💡 품질 향상 팁
- **고품질 생성**: "High Quality" + "Best of 5" 조합 추천
- **빠른 테스트**: "Draft" + "프리뷰" 기능 활용
- **장르 특화**: 장르 프리셋 선택 후 "스마트 향상" 체크
- **가사 구조**: [verse], [chorus], [bridge] 태그 적극 활용
- **다국어 지원**: 한국어로 주제를 입력하면 한국어 가사가 생성됩니다
### ⚠️ OpenAI API 설정
AI 작사 기능을 사용하려면 환경변수에 OpenAI API 키를 설정해야 합니다:
```bash
export LLM_API="your-openai-api-key"
# 또는
export OPENAI_API_KEY="your-openai-api-key"
```
""")
with gr.Tab("🎵 Enhanced Text2Music", elem_classes="gr-tab"):
create_text2music_ui(
gr=gr,
text2music_process_func=text2music_process_func,
sample_data_func=sample_data_func,
load_data_func=load_data_func,
)
return demo
if __name__ == "__main__":
print("🚀 ACE-Step PRO 시작 중...")
demo = create_main_demo_ui()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=True # 공유 링크 생성
)