ACE-Singer / ui /components.py
ginipick's picture
Update ui/components.py
2b8dfa8 verified
raw
history blame
62.6 kB
"""
ACE-Step: A Step Towards Music Generation Foundation Model
https://github.com/ace-step/ACE-Step
Apache 2.0 License
"""
import gradio as gr
import librosa
import os
import random
import hashlib
import numpy as np
import json
from typing import Dict, List, Tuple, Optional
# [ADDED] OpenAI API μ„€μ •
try:
from openai import OpenAI
api_key = os.getenv("LLM_API")
if api_key:
client = OpenAI(api_key=api_key)
client_available = True
print("βœ… OpenAI API client initialized successfully")
else:
client = None
client_available = False
print("⚠️ Warning: No OpenAI API key found. AI lyrics generation will be disabled.")
print("Set environment variable: export LLM_API='your-openai-api-key'")
except Exception as e:
client = None
client_available = False
print(f"❌ Warning: Failed to initialize OpenAI client: {e}")
TAG_DEFAULT = "funk, pop, soul, rock, melodic, guitar, drums, bass, keyboard, percussion, 105 BPM, energetic, upbeat, groovy, vibrant, dynamic"
LYRIC_DEFAULT = """[verse]
Neon lights they flicker bright
City hums in dead of night
Rhythms pulse through concrete veins
Lost in echoes of refrains
[verse]
Bassline groovin' in my chest
Heartbeats match the city's zest
Electric whispers fill the air
Synthesized dreams everywhere
[chorus]
Turn it up and let it flow
Feel the fire let it grow
In this rhythm we belong
Hear the night sing out our song
[verse]
Guitar strings they start to weep
Wake the soul from silent sleep
Every note a story told
In this night we're bold and gold
[bridge]
Voices blend in harmony
Lost in pure cacophony
Timeless echoes timeless cries
Soulful shouts beneath the skies
[verse]
Keyboard dances on the keys
Melodies on evening breeze
Catch the tune and hold it tight
In this moment we take flight
"""
# [ADDED] AI μž‘μ‚¬ μ‹œμŠ€ν…œ ν”„λ‘¬ν”„νŠΈ
LYRIC_SYSTEM_PROMPT = """λ„ˆλŠ” λ…Έλž˜ 가사λ₯Ό μž‘μ‚¬ν•˜λŠ” μ „λ¬Έκ°€ 역할이닀. μ΄μš©μžκ°€ μž…λ ₯ν•˜λŠ” μ£Όμ œμ— 따라 이에 κ΄€λ ¨λœ λ…Έλž˜ 가사λ₯Ό μž‘μ„±ν•˜λΌ. κ°€μ‚¬μ˜ κ·œμΉ™μ€ "[ ]"둜 κ΅¬λΆ„ν•˜μ—¬, λ‹€μŒ μ˜ˆμ‹œλ₯Ό μ°Έμ‘°ν•˜λΌ.
μ˜ˆμ‹œ:
[verse]
Neon lights they flicker bright
City hums in dead of night
Rhythms pulse through concrete veins
Lost in echoes of refrains
[verse]
Bassline groovin' in my chest
Heartbeats match the city's zest
Electric whispers fill the air
Synthesized dreams everywhere
[chorus]
Turn it up and let it flow
Feel the fire let it grow
In this rhythm we belong
Hear the night sing out our song
[verse]
Guitar strings they start to weep
Wake the soul from silent sleep
Every note a story told
In this night we're bold and gold
[bridge]
Voices blend in harmony
Lost in pure cacophony
Timeless echoes timeless cries
Soulful shouts beneath the skies
[verse]
Keyboard dances on the keys
Melodies on evening breeze
Catch the tune and hold it tight
In this moment we take flight
κ·œμΉ™:
1. λ°˜λ“œμ‹œ [verse], [chorus], [bridge] λ“±μ˜ ꡬ쑰 νƒœκ·Έλ₯Ό μ‚¬μš©ν•  것
2. μž…λ ₯ 언어와 λ™μΌν•œ μ–Έμ–΄λ‘œ 가사λ₯Ό μž‘μ„±ν•  것
3. 각 μ„Ήμ…˜μ€ 4-8쀄 μ •λ„λ‘œ ꡬ성할 것
4. μ£Όμ œμ™€ 감정에 λ§žλŠ” 운율과 리듬감 μžˆλŠ” 가사λ₯Ό μž‘μ„±ν•  것"""
# [ADDED] AI μž‘μ‚¬ 생성 ν•¨μˆ˜
def generate_lyrics_with_ai(theme: str, genre: str = None) -> str:
"""AIλ₯Ό μ‚¬μš©ν•˜μ—¬ 주제 기반 가사 생성"""
print(f"🎡 AI μž‘μ‚¬ μ‹œμž‘: 주제='{theme}', μž₯λ₯΄='{genre}'")
if not client_available or client is None:
print("❌ OpenAI client not available, returning default lyrics")
return LYRIC_DEFAULT
if not theme or theme.strip() == "":
print("⚠️ Empty theme, returning default lyrics")
return LYRIC_DEFAULT
try:
# μž₯λ₯΄ 정보가 있으면 ν”„λ‘¬ν”„νŠΈμ— μΆ”κ°€
user_prompt = f"λ‹€μŒ 주제둜 λ…Έλž˜ 가사λ₯Ό μž‘μ„±ν•΄μ£Όμ„Έμš”: {theme}"
if genre and genre != "Custom":
user_prompt += f"\nμž₯λ₯΄: {genre}"
print(f"πŸ“ OpenAI API 호좜 쀑...")
# [MODIFIED] μ‚¬μš©μžκ°€ μ œμ‹œν•œ API ν˜•μ‹μ„ ν‘œμ€€ ν˜•μ‹μœΌλ‘œ λ³€ν™˜
# μ‹€μ œλ‘œλŠ” client.responses.createκ°€ μ•„λ‹Œ client.chat.completions.createλ₯Ό μ‚¬μš©
response = client.chat.completions.create(
model="gpt-4o-mini", # gpt-4.1-miniλŠ” μ‘΄μž¬ν•˜μ§€ μ•ŠλŠ” λͺ¨λΈλͺ…μ΄λ―€λ‘œ gpt-4o-mini μ‚¬μš©
messages=[
{
"role": "system",
"content": LYRIC_SYSTEM_PROMPT
},
{
"role": "user",
"content": user_prompt
}
],
temperature=0.8,
max_tokens=1500,
top_p=1
)
generated_lyrics = response.choices[0].message.content
print(f"βœ… AI μž‘μ‚¬ μ™„λ£Œ")
print(f"μƒμ„±λœ 가사 미리보기: {generated_lyrics[:100]}...")
return generated_lyrics
except Exception as e:
print(f"❌ AI μž‘μ‚¬ 생성 였λ₯˜: {e}")
import traceback
print(f"상세 였λ₯˜: {traceback.format_exc()}")
return LYRIC_DEFAULT
# ν™•μž₯된 μž₯λ₯΄ 프리셋 (κΈ°μ‘΄ + κ°œμ„ λœ νƒœκ·Έ)
GENRE_PRESETS = {
"Modern Pop": "pop, synth, drums, guitar, 120 bpm, upbeat, catchy, vibrant, female vocals, polished vocals, radio-ready, commercial, layered vocals",
"Rock": "rock, electric guitar, drums, bass, 130 bpm, energetic, rebellious, gritty, male vocals, raw vocals, power chords, driving rhythm",
"Hip Hop": "hip hop, 808 bass, hi-hats, synth, 90 bpm, bold, urban, intense, male vocals, rhythmic vocals, trap beats, punchy drums",
"Country": "country, acoustic guitar, steel guitar, fiddle, 100 bpm, heartfelt, rustic, warm, male vocals, twangy vocals, storytelling, americana",
"EDM": "edm, synth, bass, kick drum, 128 bpm, euphoric, pulsating, energetic, instrumental, progressive build, festival anthem, electronic",
"Reggae": "reggae, guitar, bass, drums, 80 bpm, chill, soulful, positive, male vocals, smooth vocals, offbeat rhythm, island vibes",
"Classical": "classical, orchestral, strings, piano, 60 bpm, elegant, emotive, timeless, instrumental, dynamic range, sophisticated harmony",
"Jazz": "jazz, saxophone, piano, double bass, 110 bpm, smooth, improvisational, soulful, male vocals, crooning vocals, swing feel, sophisticated",
"Metal": "metal, electric guitar, double kick drum, bass, 160 bpm, aggressive, intense, heavy, male vocals, screamed vocals, distorted, powerful",
"R&B": "r&b, synth, bass, drums, 85 bpm, sultry, groovy, romantic, female vocals, silky vocals, smooth production, neo-soul"
}
# ν’ˆμ§ˆ 프리셋 μ‹œμŠ€ν…œ μΆ”κ°€
QUALITY_PRESETS = {
"Draft (Fast)": {
"infer_step": 50,
"guidance_scale": 10.0,
"scheduler_type": "euler",
"omega_scale": 5.0,
"use_erg_diffusion": False,
"use_erg_tag": True,
"description": "λΉ λ₯Έ μ΄ˆμ•ˆ 생성 (1-2λΆ„)"
},
"Standard": {
"infer_step": 150,
"guidance_scale": 15.0,
"scheduler_type": "euler",
"omega_scale": 10.0,
"use_erg_diffusion": True,
"use_erg_tag": True,
"description": "ν‘œμ€€ ν’ˆμ§ˆ (3-5λΆ„)"
},
"High Quality": {
"infer_step": 200,
"guidance_scale": 18.0,
"scheduler_type": "heun",
"omega_scale": 15.0,
"use_erg_diffusion": True,
"use_erg_tag": True,
"description": "κ³ ν’ˆμ§ˆ 생성 (8-12λΆ„)"
},
"Ultra (Best)": {
"infer_step": 299,
"guidance_scale": 20.0,
"scheduler_type": "heun",
"omega_scale": 20.0,
"use_erg_diffusion": True,
"use_erg_tag": True,
"description": "졜고 ν’ˆμ§ˆ (15-20λΆ„)"
}
}
# 닀쀑 μ‹œλ“œ 생성 μ„€μ •
MULTI_SEED_OPTIONS = {
"Single": 1,
"Best of 3": 3,
"Best of 5": 5,
"Best of 10": 10
}
class MusicGenerationCache:
"""생성 κ²°κ³Ό 캐싱 μ‹œμŠ€ν…œ"""
def __init__(self):
self.cache = {}
self.max_cache_size = 50
def get_cache_key(self, params):
# μ€‘μš”ν•œ νŒŒλΌλ―Έν„°λ§ŒμœΌλ‘œ ν•΄μ‹œ 생성
key_params = {k: v for k, v in params.items()
if k in ['prompt', 'lyrics', 'infer_step', 'guidance_scale', 'audio_duration']}
return hashlib.md5(str(sorted(key_params.items())).encode()).hexdigest()[:16]
def get_cached_result(self, params):
key = self.get_cache_key(params)
return self.cache.get(key)
def cache_result(self, params, result):
if len(self.cache) >= self.max_cache_size:
oldest_key = next(iter(self.cache))
del self.cache[oldest_key]
key = self.get_cache_key(params)
self.cache[key] = result
# μ „μ—­ μΊμ‹œ μΈμŠ€ν„΄μŠ€
generation_cache = MusicGenerationCache()
def enhance_prompt_with_genre(base_prompt: str, genre: str) -> str:
"""μž₯λ₯΄μ— λ”°λ₯Έ 슀마트 ν”„λ‘¬ν”„νŠΈ ν™•μž₯"""
if genre == "Custom" or not genre:
return base_prompt
# μž₯λ₯΄λ³„ μΆ”κ°€ κ°œμ„  νƒœκ·Έ
genre_enhancements = {
"Modern Pop": ["polished production", "mainstream appeal", "hook-driven"],
"Rock": ["guitar-driven", "powerful drums", "energetic performance"],
"Hip Hop": ["rhythmic flow", "urban atmosphere", "bass-heavy"],
"Country": ["acoustic warmth", "storytelling melody", "authentic feel"],
"EDM": ["electronic atmosphere", "build-ups", "dance-friendly"],
"Reggae": ["laid-back groove", "tropical vibes", "rhythmic guitar"],
"Classical": ["orchestral depth", "musical sophistication", "timeless beauty"],
"Jazz": ["musical complexity", "improvisational spirit", "sophisticated harmony"],
"Metal": ["aggressive energy", "powerful sound", "intense atmosphere"],
"R&B": ["smooth groove", "soulful expression", "rhythmic sophistication"]
}
if genre in genre_enhancements:
additional_tags = ", ".join(genre_enhancements[genre])
return f"{base_prompt}, {additional_tags}"
return base_prompt
def calculate_quality_score(audio_path: str) -> float:
"""κ°„λ‹¨ν•œ ν’ˆμ§ˆ 점수 계산 (μ‹€μ œ κ΅¬ν˜„μ—μ„œλŠ” 더 λ³΅μž‘ν•œ λ©”νŠΈλ¦­ μ‚¬μš©)"""
try:
y, sr = librosa.load(audio_path)
# κΈ°λ³Έ ν’ˆμ§ˆ λ©”νŠΈλ¦­
rms_energy = np.sqrt(np.mean(y**2))
spectral_centroid = np.mean(librosa.feature.spectral_centroid(y=y, sr=sr))
zero_crossing_rate = np.mean(librosa.feature.zero_crossing_rate(y))
# μ •κ·œν™”λœ 점수 (0-100)
energy_score = min(rms_energy * 1000, 40) # 0-40점
spectral_score = min(spectral_centroid / 100, 40) # 0-40점
clarity_score = min((1 - zero_crossing_rate) * 20, 20) # 0-20점
total_score = energy_score + spectral_score + clarity_score
return round(total_score, 1)
except:
return 50.0 # κΈ°λ³Έκ°’
def update_tags_from_preset(preset_name):
if preset_name == "Custom":
return ""
return GENRE_PRESETS.get(preset_name, "")
def update_quality_preset(preset_name):
"""ν’ˆμ§ˆ 프리셋 적용"""
if preset_name not in QUALITY_PRESETS:
return (100, 15.0, "euler", 10.0, True, True)
preset = QUALITY_PRESETS[preset_name]
return (
preset.get("infer_step", 100),
preset.get("guidance_scale", 15.0),
preset.get("scheduler_type", "euler"),
preset.get("omega_scale", 10.0),
preset.get("use_erg_diffusion", True),
preset.get("use_erg_tag", True)
)
def create_enhanced_process_func(original_func):
"""κΈ°μ‘΄ ν•¨μˆ˜λ₯Ό ν–₯μƒλœ κΈ°λŠ₯으둜 λž˜ν•‘"""
def enhanced_func(
audio_duration, prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric,
audio2audio_enable=False, ref_audio_strength=0.5, ref_audio_input=None,
lora_name_or_path="none", multi_seed_mode="Single",
enable_smart_enhancement=True, genre_preset="Custom", **kwargs
):
print(f"🎡 Enhanced generation started")
print(f"Parameters: duration={audio_duration}, prompt='{prompt[:50]}...', multi_seed={multi_seed_mode}")
# 슀마트 ν”„λ‘¬ν”„νŠΈ ν™•μž₯
if enable_smart_enhancement and genre_preset != "Custom":
enhanced_prompt = enhance_prompt_with_genre(prompt, genre_preset)
print(f"Enhanced prompt: {enhanced_prompt[:100]}...")
else:
enhanced_prompt = prompt
# μΊμ‹œ 확인
cache_params = {
'prompt': enhanced_prompt, 'lyrics': lyrics, 'audio_duration': audio_duration,
'infer_step': infer_step, 'guidance_scale': guidance_scale
}
cached_result = generation_cache.get_cached_result(cache_params)
if cached_result:
print("Using cached result")
return cached_result
# 닀쀑 μ‹œλ“œ 생성
num_candidates = MULTI_SEED_OPTIONS.get(multi_seed_mode, 1)
print(f"Generating {num_candidates} candidates")
if num_candidates == 1:
# κΈ°μ‘΄ ν•¨μˆ˜ 호좜
result = original_func(
audio_duration, enhanced_prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs
)
else:
# 닀쀑 μ‹œλ“œ 생성을 μœ„ν•œ μž„μ‹œ κ΅¬ν˜„
result = original_func(
audio_duration, enhanced_prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs
)
# κ²°κ³Ό μΊμ‹œ
generation_cache.cache_result(cache_params, result)
print(f"Generation completed")
return result
return enhanced_func
def create_output_ui(task_name="Text2Music"):
# For many consumer-grade GPU devices, only one batch can be run
output_audio1 = gr.Audio(type="filepath", label=f"{task_name} Generated Audio 1")
with gr.Accordion(f"{task_name} Parameters & Quality Info", open=False):
input_params_json = gr.JSON(label=f"{task_name} Parameters")
# ν’ˆμ§ˆ 정보 ν‘œμ‹œ μΆ”κ°€
with gr.Row():
quality_score = gr.Number(label="Quality Score (0-100)", value=0, interactive=False)
generation_info = gr.Textbox(
label="Generation Info",
value="",
interactive=False,
max_lines=2
)
outputs = [output_audio1]
return outputs, input_params_json
def dump_func(*args):
"""더미 ν•¨μˆ˜ - μ‹€μ œ μŒμ•… 생성 λŒ€μ‹  둜그만 좜λ ₯"""
print(f"🎡 Dummy function called with {len(args)} arguments")
if args:
print(f"Parameters preview: duration={args[0] if len(args) > 0 else 'N/A'}, prompt='{args[1][:50] if len(args) > 1 else 'N/A'}...'")
# κ°€μ§œ κ²°κ³Ό λ°˜ν™˜ (μ‹€μ œ κ΅¬ν˜„μ—μ„œλŠ” μ§„μ§œ μŒμ•… 생성 κ²°κ³Ό)
dummy_result = [
None, # μ˜€λ””μ˜€ 파일 경둜 (None이면 μ˜€λ””μ˜€ 생성 μ•ˆλ¨)
{
"prompt": args[1] if len(args) > 1 else "test",
"lyrics": args[2] if len(args) > 2 else "test lyrics",
"audio_duration": args[0] if len(args) > 0 else 30,
"status": "μ™„λ£Œ (더미 λͺ¨λ“œ - μ‹€μ œ μŒμ•… 생성 μ•ˆλ¨)",
"infer_step": args[3] if len(args) > 3 else 150,
"guidance_scale": args[4] if len(args) > 4 else 15.0,
"scheduler_type": args[5] if len(args) > 5 else "euler",
"cfg_type": args[6] if len(args) > 6 else "apg",
"omega_scale": args[7] if len(args) > 7 else 10.0,
"actual_seeds": [1234],
"guidance_interval": args[9] if len(args) > 9 else 0.5,
"guidance_interval_decay": args[10] if len(args) > 10 else 0.0,
"min_guidance_scale": args[11] if len(args) > 11 else 3.0,
"use_erg_tag": args[12] if len(args) > 12 else True,
"use_erg_lyric": args[13] if len(args) > 13 else False,
"use_erg_diffusion": args[14] if len(args) > 14 else True,
"oss_steps": [],
"guidance_scale_text": args[16] if len(args) > 16 else 0.0,
"guidance_scale_lyric": args[17] if len(args) > 17 else 0.0,
"audio2audio_enable": args[18] if len(args) > 18 else False,
"ref_audio_strength": args[19] if len(args) > 19 else 0.5,
"ref_audio_input": args[20] if len(args) > 20 else None,
"audio_path": None
}
]
return dummy_result
def create_text2music_ui(
gr,
text2music_process_func,
sample_data_func=None,
load_data_func=None,
):
# ν–₯μƒλœ ν”„λ‘œμ„ΈμŠ€ ν•¨μˆ˜ 생성
enhanced_process_func = create_enhanced_process_func(text2music_process_func)
with gr.Row():
with gr.Column():
# ν’ˆμ§ˆ 및 μ„±λŠ₯ μ„€μ • μ„Ήμ…˜ μΆ”κ°€
with gr.Group():
gr.Markdown("### ⚑ ν’ˆμ§ˆ & μ„±λŠ₯ μ„€μ •")
with gr.Row():
quality_preset = gr.Dropdown(
choices=list(QUALITY_PRESETS.keys()),
value="Standard",
label="ν’ˆμ§ˆ 프리셋",
scale=2
)
multi_seed_mode = gr.Dropdown(
choices=list(MULTI_SEED_OPTIONS.keys()),
value="Single",
label="닀쀑 생성 λͺ¨λ“œ",
scale=2,
info="μ—¬λŸ¬ 번 μƒμ„±ν•˜μ—¬ 졜고 ν’ˆμ§ˆ 선택"
)
preset_description = gr.Textbox(
value=QUALITY_PRESETS["Standard"]["description"],
label="μ„€λͺ…",
interactive=False,
max_lines=1
)
with gr.Row(equal_height=True):
# add markdown, tags and lyrics examples are from ai music generation community
audio_duration = gr.Slider(
-1,
240.0,
step=0.00001,
value=-1,
label="Audio Duration",
interactive=True,
info="-1 means random duration (30 ~ 240).",
scale=7,
)
sample_bnt = gr.Button("Sample", variant="secondary", scale=1)
preview_bnt = gr.Button("🎡 Preview", variant="secondary", scale=2)
# audio2audio
with gr.Row(equal_height=True):
audio2audio_enable = gr.Checkbox(
label="Enable Audio2Audio",
value=False,
info="Check to enable Audio-to-Audio generation using a reference audio.",
elem_id="audio2audio_checkbox"
)
lora_name_or_path = gr.Dropdown(
label="Lora Name or Path",
choices=["ACE-Step/ACE-Step-v1-chinese-rap-LoRA", "none"],
value="none",
allow_custom_value=True,
)
ref_audio_input = gr.Audio(
type="filepath",
label="Reference Audio (for Audio2Audio)",
visible=False,
elem_id="ref_audio_input",
show_download_button=True
)
ref_audio_strength = gr.Slider(
label="Refer audio strength",
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.5,
elem_id="ref_audio_strength",
visible=False,
interactive=True,
)
def toggle_ref_audio_visibility(is_checked):
return (
gr.update(visible=is_checked, elem_id="ref_audio_input"),
gr.update(visible=is_checked, elem_id="ref_audio_strength"),
)
audio2audio_enable.change(
fn=toggle_ref_audio_visibility,
inputs=[audio2audio_enable],
outputs=[ref_audio_input, ref_audio_strength],
)
with gr.Column(scale=2):
with gr.Group():
gr.Markdown("""### 🎼 슀마트 ν”„λ‘¬ν”„νŠΈ μ‹œμŠ€ν…œ
<center>μž₯λ₯΄ 선택 μ‹œ μžλ™μœΌλ‘œ μ΅œμ ν™”λœ νƒœκ·Έκ°€ μΆ”κ°€λ©λ‹ˆλ‹€. 콀마둜 κ΅¬λΆ„ν•˜μ—¬ νƒœκ·Έλ₯Ό μž…λ ₯ν•˜μ„Έμš”.</center>""")
with gr.Row():
genre_preset = gr.Dropdown(
choices=["Custom"] + list(GENRE_PRESETS.keys()),
value="Custom",
label="μž₯λ₯΄ 프리셋",
scale=1,
)
enable_smart_enhancement = gr.Checkbox(
label="슀마트 ν–₯상",
value=True,
info="μžλ™ νƒœκ·Έ μ΅œμ ν™”",
scale=1
)
prompt = gr.Textbox(
lines=2,
label="Tags",
max_lines=4,
value=TAG_DEFAULT,
placeholder="콀마둜 κ΅¬λΆ„λœ νƒœκ·Έλ“€...",
)
# [ADDED] AI μž‘μ‚¬ μ‹œμŠ€ν…œ UI
with gr.Group():
gr.Markdown("""### πŸ€– AI μž‘μ‚¬ μ‹œμŠ€ν…œ
<center>주제λ₯Ό μž…λ ₯ν•˜κ³  'AI μž‘μ‚¬' λ²„νŠΌμ„ ν΄λ¦­ν•˜λ©΄ μžλ™μœΌλ‘œ 가사가 μƒμ„±λ©λ‹ˆλ‹€.</center>""")
with gr.Row():
lyric_theme_input = gr.Textbox(
label="μž‘μ‚¬ 주제",
placeholder="예: μ²«μ‚¬λž‘μ˜ μ„€λ ˜, μ΄λ³„μ˜ μ•„ν””, κ΅°λŒ€κ°€λŠ” λ‚¨μžμ˜ ν•œμˆ¨, 희망찬 내일...",
scale=3,
interactive=True
)
generate_lyrics_btn = gr.Button("πŸ€– AI μž‘μ‚¬", variant="secondary", scale=1)
# API μƒνƒœ ν‘œμ‹œ
api_status = gr.Textbox(
value="βœ… AI μž‘μ‚¬ κΈ°λŠ₯ ν™œμ„±ν™”λ¨" if client_available else "❌ API ν‚€κ°€ μ„€μ •λ˜μ§€ μ•ŠμŒ (export LLM_API='your-key')",
label="API μƒνƒœ",
interactive=False,
max_lines=1,
scale=1
)
with gr.Group():
gr.Markdown("""### πŸ“ 가사 μž…λ ₯
<center>ꡬ쑰 νƒœκ·Έ [verse], [chorus], [bridge] μ‚¬μš©μ„ ꢌμž₯ν•©λ‹ˆλ‹€.<br>[instrumental] λ˜λŠ” [inst]λ₯Ό μ‚¬μš©ν•˜λ©΄ 연주곑을 μƒμ„±ν•©λ‹ˆλ‹€.</center>""")
lyrics = gr.Textbox(
lines=9,
label="Lyrics",
max_lines=13,
value=LYRIC_DEFAULT,
placeholder="가사λ₯Ό μž…λ ₯ν•˜μ„Έμš”. [verse], [chorus] λ“±μ˜ ꡬ쑰 νƒœκ·Έ μ‚¬μš©μ„ ꢌμž₯ν•©λ‹ˆλ‹€."
)
with gr.Accordion("Basic Settings", open=False):
infer_step = gr.Slider(
minimum=1,
maximum=300,
step=1,
value=150,
label="Infer Steps",
interactive=True,
)
guidance_scale = gr.Slider(
minimum=0.0,
maximum=30.0,
step=0.1,
value=15.0,
label="Guidance Scale",
interactive=True,
info="When guidance_scale_lyric > 1 and guidance_scale_text > 1, the guidance scale will not be applied.",
)
guidance_scale_text = gr.Slider(
minimum=0.0,
maximum=10.0,
step=0.1,
value=0.0,
label="Guidance Scale Text",
interactive=True,
info="Guidance scale for text condition. It can only apply to cfg. set guidance_scale_text=5.0, guidance_scale_lyric=1.5 for start",
)
guidance_scale_lyric = gr.Slider(
minimum=0.0,
maximum=10.0,
step=0.1,
value=0.0,
label="Guidance Scale Lyric",
interactive=True,
)
manual_seeds = gr.Textbox(
label="manual seeds (default None)",
placeholder="1,2,3,4",
value=None,
info="Seed for the generation",
)
with gr.Accordion("Advanced Settings", open=False):
scheduler_type = gr.Radio(
["euler", "heun"],
value="euler",
label="Scheduler Type",
elem_id="scheduler_type",
info="Scheduler type for the generation. euler is recommended. heun will take more time.",
)
cfg_type = gr.Radio(
["cfg", "apg", "cfg_star"],
value="apg",
label="CFG Type",
elem_id="cfg_type",
info="CFG type for the generation. apg is recommended. cfg and cfg_star are almost the same.",
)
use_erg_tag = gr.Checkbox(
label="use ERG for tag",
value=True,
info="Use Entropy Rectifying Guidance for tag. It will multiple a temperature to the attention to make a weaker tag condition and make better diversity.",
)
use_erg_lyric = gr.Checkbox(
label="use ERG for lyric",
value=False,
info="The same but apply to lyric encoder's attention.",
)
use_erg_diffusion = gr.Checkbox(
label="use ERG for diffusion",
value=True,
info="The same but apply to diffusion model's attention.",
)
omega_scale = gr.Slider(
minimum=-100.0,
maximum=100.0,
step=0.1,
value=10.0,
label="Granularity Scale",
interactive=True,
info="Granularity scale for the generation. Higher values can reduce artifacts",
)
guidance_interval = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.5,
label="Guidance Interval",
interactive=True,
info="Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)",
)
guidance_interval_decay = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.0,
label="Guidance Interval Decay",
interactive=True,
info="Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.",
)
min_guidance_scale = gr.Slider(
minimum=0.0,
maximum=200.0,
step=0.1,
value=3.0,
label="Min Guidance Scale",
interactive=True,
info="Min guidance scale for guidance interval decay's end scale",
)
oss_steps = gr.Textbox(
label="OSS Steps",
placeholder="16, 29, 52, 96, 129, 158, 172, 183, 189, 200",
value=None,
info="Optimal Steps for the generation. But not test well",
)
text2music_bnt = gr.Button("🎡 Generate Music", variant="primary", size="lg")
# [ADDED] AI μž‘μ‚¬ 이벀트 ν•Έλ“€λŸ¬
def handle_ai_lyrics_generation(theme, genre):
"""AI μž‘μ‚¬ λ²„νŠΌ 클릭 처리"""
print(f"πŸ€– AI μž‘μ‚¬ λ²„νŠΌ 클릭: 주제='{theme}', μž₯λ₯΄='{genre}'")
if not theme or theme.strip() == "":
return "⚠️ μž‘μ‚¬ 주제λ₯Ό μž…λ ₯ν•΄μ£Όμ„Έμš”!"
try:
generated_lyrics = generate_lyrics_with_ai(theme, genre)
return generated_lyrics
except Exception as e:
print(f"μž‘μ‚¬ 생성 쀑 였λ₯˜: {e}")
return f"❌ μž‘μ‚¬ 생성 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€: {str(e)}"
generate_lyrics_btn.click(
fn=handle_ai_lyrics_generation,
inputs=[lyric_theme_input, genre_preset],
outputs=[lyrics]
)
# λͺ¨λ“  UI μš”μ†Œκ°€ μ •μ˜λœ ν›„ 이벀트 ν•Έλ“€λŸ¬ μ„€μ •
genre_preset.change(
fn=update_tags_from_preset,
inputs=[genre_preset],
outputs=[prompt]
)
quality_preset.change(
fn=lambda x: QUALITY_PRESETS.get(x, {}).get("description", ""),
inputs=[quality_preset],
outputs=[preset_description]
)
quality_preset.change(
fn=update_quality_preset,
inputs=[quality_preset],
outputs=[infer_step, guidance_scale, scheduler_type, omega_scale, use_erg_diffusion, use_erg_tag]
)
with gr.Column():
outputs, input_params_json = create_output_ui()
# μ‹€μ‹œκ°„ 프리뷰 κΈ°λŠ₯
def generate_preview(prompt, lyrics, genre_preset):
"""10초 프리뷰 생성"""
preview_params = {
"audio_duration": 10,
"infer_step": 50,
"guidance_scale": 12.0,
"scheduler_type": "euler",
"cfg_type": "apg",
"omega_scale": 5.0,
}
enhanced_prompt = enhance_prompt_with_genre(prompt, genre_preset) if genre_preset != "Custom" else prompt
try:
# μ‹€μ œ κ΅¬ν˜„μ—μ„œλŠ” λΉ λ₯Έ 생성 λͺ¨λ“œ μ‚¬μš©
result = enhanced_process_func(
preview_params["audio_duration"],
enhanced_prompt,
lyrics[:200], # 가사 μΌλΆ€λ§Œ μ‚¬μš©
preview_params["infer_step"],
preview_params["guidance_scale"],
preview_params["scheduler_type"],
preview_params["cfg_type"],
preview_params["omega_scale"],
None, # manual_seeds
0.5, # guidance_interval
0.0, # guidance_interval_decay
3.0, # min_guidance_scale
True, # use_erg_tag
False, # use_erg_lyric
True, # use_erg_diffusion
None, # oss_steps
0.0, # guidance_scale_text
0.0, # guidance_scale_lyric
multi_seed_mode="Single"
)
return result[0] if result else None
except Exception as e:
return f"프리뷰 생성 μ‹€νŒ¨: {str(e)}"
preview_bnt.click(
fn=generate_preview,
inputs=[prompt, lyrics, genre_preset],
outputs=[outputs[0]]
)
with gr.Tab("retake"):
retake_variance = gr.Slider(
minimum=0.0, maximum=1.0, step=0.01, value=0.2, label="variance"
)
retake_seeds = gr.Textbox(
label="retake seeds (default None)", placeholder="", value=None
)
retake_bnt = gr.Button("Retake", variant="primary")
retake_outputs, retake_input_params_json = create_output_ui("Retake")
def retake_process_func(json_data, retake_variance, retake_seeds):
return enhanced_process_func(
json_data.get("audio_duration", 30),
json_data.get("prompt", ""),
json_data.get("lyrics", ""),
json_data.get("infer_step", 100),
json_data.get("guidance_scale", 15.0),
json_data.get("scheduler_type", "euler"),
json_data.get("cfg_type", "apg"),
json_data.get("omega_scale", 10.0),
retake_seeds,
json_data.get("guidance_interval", 0.5),
json_data.get("guidance_interval_decay", 0.0),
json_data.get("min_guidance_scale", 3.0),
json_data.get("use_erg_tag", True),
json_data.get("use_erg_lyric", False),
json_data.get("use_erg_diffusion", True),
json_data.get("oss_steps", None),
json_data.get("guidance_scale_text", 0.0),
json_data.get("guidance_scale_lyric", 0.0),
audio2audio_enable=json_data.get("audio2audio_enable", False),
ref_audio_strength=json_data.get("ref_audio_strength", 0.5),
ref_audio_input=json_data.get("ref_audio_input", None),
lora_name_or_path=json_data.get("lora_name_or_path", "none"),
multi_seed_mode="Best of 3", # retakeλŠ” μžλ™μœΌλ‘œ 닀쀑 생성
retake_variance=retake_variance,
task="retake"
)
retake_bnt.click(
fn=retake_process_func,
inputs=[
input_params_json,
retake_variance,
retake_seeds,
],
outputs=retake_outputs + [retake_input_params_json],
)
with gr.Tab("repainting"):
retake_variance = gr.Slider(
minimum=0.0, maximum=1.0, step=0.01, value=0.2, label="variance"
)
retake_seeds = gr.Textbox(
label="repaint seeds (default None)", placeholder="", value=None
)
repaint_start = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=0.0,
label="Repaint Start Time",
interactive=True,
)
repaint_end = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=30.0,
label="Repaint End Time",
interactive=True,
)
repaint_source = gr.Radio(
["text2music", "last_repaint", "upload"],
value="text2music",
label="Repaint Source",
elem_id="repaint_source",
)
repaint_source_audio_upload = gr.Audio(
label="Upload Audio",
type="filepath",
visible=False,
elem_id="repaint_source_audio_upload",
show_download_button=True,
)
repaint_source.change(
fn=lambda x: gr.update(
visible=x == "upload", elem_id="repaint_source_audio_upload"
),
inputs=[repaint_source],
outputs=[repaint_source_audio_upload],
)
repaint_bnt = gr.Button("Repaint", variant="primary")
repaint_outputs, repaint_input_params_json = create_output_ui("Repaint")
def repaint_process_func(
text2music_json_data,
repaint_json_data,
retake_variance,
retake_seeds,
repaint_start,
repaint_end,
repaint_source,
repaint_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
):
if repaint_source == "upload":
src_audio_path = repaint_source_audio_upload
audio_duration = librosa.get_duration(filename=src_audio_path)
json_data = {"audio_duration": audio_duration}
elif repaint_source == "text2music":
json_data = text2music_json_data
src_audio_path = json_data["audio_path"]
elif repaint_source == "last_repaint":
json_data = repaint_json_data
src_audio_path = json_data["audio_path"]
return enhanced_process_func(
json_data["audio_duration"],
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds=retake_seeds,
retake_variance=retake_variance,
task="repaint",
repaint_start=repaint_start,
repaint_end=repaint_end,
src_audio_path=src_audio_path,
lora_name_or_path="none"
)
repaint_bnt.click(
fn=repaint_process_func,
inputs=[
input_params_json,
repaint_input_params_json,
retake_variance,
retake_seeds,
repaint_start,
repaint_end,
repaint_source,
repaint_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
],
outputs=repaint_outputs + [repaint_input_params_json],
)
with gr.Tab("edit"):
edit_prompt = gr.Textbox(lines=2, label="Edit Tags", max_lines=4)
edit_lyrics = gr.Textbox(lines=9, label="Edit Lyrics", max_lines=13)
retake_seeds = gr.Textbox(
label="edit seeds (default None)", placeholder="", value=None
)
edit_type = gr.Radio(
["only_lyrics", "remix"],
value="only_lyrics",
label="Edit Type",
elem_id="edit_type",
info="`only_lyrics` will keep the whole song the same except lyrics difference. Make your diffrence smaller, e.g. one lyrc line change.\nremix can change the song melody and genre",
)
edit_n_min = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.6,
label="edit_n_min",
interactive=True,
)
edit_n_max = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=1.0,
label="edit_n_max",
interactive=True,
)
def edit_type_change_func(edit_type):
if edit_type == "only_lyrics":
n_min = 0.6
n_max = 1.0
elif edit_type == "remix":
n_min = 0.2
n_max = 0.4
return n_min, n_max
edit_type.change(
edit_type_change_func,
inputs=[edit_type],
outputs=[edit_n_min, edit_n_max],
)
edit_source = gr.Radio(
["text2music", "last_edit", "upload"],
value="text2music",
label="Edit Source",
elem_id="edit_source",
)
edit_source_audio_upload = gr.Audio(
label="Upload Audio",
type="filepath",
visible=False,
elem_id="edit_source_audio_upload",
show_download_button=True,
)
edit_source.change(
fn=lambda x: gr.update(
visible=x == "upload", elem_id="edit_source_audio_upload"
),
inputs=[edit_source],
outputs=[edit_source_audio_upload],
)
edit_bnt = gr.Button("Edit", variant="primary")
edit_outputs, edit_input_params_json = create_output_ui("Edit")
def edit_process_func(
text2music_json_data,
edit_input_params_json,
edit_source,
edit_source_audio_upload,
prompt,
lyrics,
edit_prompt,
edit_lyrics,
edit_n_min,
edit_n_max,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds,
):
if edit_source == "upload":
src_audio_path = edit_source_audio_upload
audio_duration = librosa.get_duration(filename=src_audio_path)
json_data = {"audio_duration": audio_duration}
elif edit_source == "text2music":
json_data = text2music_json_data
src_audio_path = json_data["audio_path"]
elif edit_source == "last_edit":
json_data = edit_input_params_json
src_audio_path = json_data["audio_path"]
if not edit_prompt:
edit_prompt = prompt
if not edit_lyrics:
edit_lyrics = lyrics
return enhanced_process_func(
json_data["audio_duration"],
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
task="edit",
src_audio_path=src_audio_path,
edit_target_prompt=edit_prompt,
edit_target_lyrics=edit_lyrics,
edit_n_min=edit_n_min,
edit_n_max=edit_n_max,
retake_seeds=retake_seeds,
lora_name_or_path="none"
)
edit_bnt.click(
fn=edit_process_func,
inputs=[
input_params_json,
edit_input_params_json,
edit_source,
edit_source_audio_upload,
prompt,
lyrics,
edit_prompt,
edit_lyrics,
edit_n_min,
edit_n_max,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds,
],
outputs=edit_outputs + [edit_input_params_json],
)
with gr.Tab("extend"):
extend_seeds = gr.Textbox(
label="extend seeds (default None)", placeholder="", value=None
)
left_extend_length = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=0.0,
label="Left Extend Length",
interactive=True,
)
right_extend_length = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=30.0,
label="Right Extend Length",
interactive=True,
)
extend_source = gr.Radio(
["text2music", "last_extend", "upload"],
value="text2music",
label="Extend Source",
elem_id="extend_source",
)
extend_source_audio_upload = gr.Audio(
label="Upload Audio",
type="filepath",
visible=False,
elem_id="extend_source_audio_upload",
show_download_button=True,
)
extend_source.change(
fn=lambda x: gr.update(
visible=x == "upload", elem_id="extend_source_audio_upload"
),
inputs=[extend_source],
outputs=[extend_source_audio_upload],
)
extend_bnt = gr.Button("Extend", variant="primary")
extend_outputs, extend_input_params_json = create_output_ui("Extend")
def extend_process_func(
text2music_json_data,
extend_input_params_json,
extend_seeds,
left_extend_length,
right_extend_length,
extend_source,
extend_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
):
if extend_source == "upload":
src_audio_path = extend_source_audio_upload
# get audio duration
audio_duration = librosa.get_duration(filename=src_audio_path)
json_data = {"audio_duration": audio_duration}
elif extend_source == "text2music":
json_data = text2music_json_data
src_audio_path = json_data["audio_path"]
elif extend_source == "last_extend":
json_data = extend_input_params_json
src_audio_path = json_data["audio_path"]
repaint_start = -left_extend_length
repaint_end = json_data["audio_duration"] + right_extend_length
return enhanced_process_func(
json_data["audio_duration"],
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds=extend_seeds,
retake_variance=1.0,
task="extend",
repaint_start=repaint_start,
repaint_end=repaint_end,
src_audio_path=src_audio_path,
lora_name_or_path="none"
)
extend_bnt.click(
fn=extend_process_func,
inputs=[
input_params_json,
extend_input_params_json,
extend_seeds,
left_extend_length,
right_extend_length,
extend_source,
extend_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
],
outputs=extend_outputs + [extend_input_params_json],
)
def json2output(json_data):
return (
json_data["audio_duration"],
json_data["prompt"],
json_data["lyrics"],
json_data["infer_step"],
json_data["guidance_scale"],
json_data["scheduler_type"],
json_data["cfg_type"],
json_data["omega_scale"],
", ".join(map(str, json_data["actual_seeds"])),
json_data["guidance_interval"],
json_data["guidance_interval_decay"],
json_data["min_guidance_scale"],
json_data["use_erg_tag"],
json_data["use_erg_lyric"],
json_data["use_erg_diffusion"],
", ".join(map(str, json_data["oss_steps"])),
(
json_data["guidance_scale_text"]
if "guidance_scale_text" in json_data
else 0.0
),
(
json_data["guidance_scale_lyric"]
if "guidance_scale_lyric" in json_data
else 0.0
),
(
json_data["audio2audio_enable"]
if "audio2audio_enable" in json_data
else False
),
(
json_data["ref_audio_strength"]
if "ref_audio_strength" in json_data
else 0.5
),
(
json_data["ref_audio_input"]
if "ref_audio_input" in json_data
else None
),
)
def sample_data(lora_name_or_path_):
if sample_data_func:
json_data = sample_data_func(lora_name_or_path_)
return json2output(json_data)
return {}
sample_bnt.click(
sample_data,
inputs=[lora_name_or_path],
outputs=[
audio_duration,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
audio2audio_enable,
ref_audio_strength,
ref_audio_input,
],
)
# 메인 생성 λ²„νŠΌ 이벀트 (ν–₯μƒλœ ν•¨μˆ˜ μ‚¬μš©)
text2music_bnt.click(
fn=enhanced_process_func,
inputs=[
audio_duration,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
audio2audio_enable,
ref_audio_strength,
ref_audio_input,
lora_name_or_path,
multi_seed_mode,
enable_smart_enhancement,
genre_preset
],
outputs=outputs + [input_params_json],
)
def create_main_demo_ui(
text2music_process_func=dump_func,
sample_data_func=dump_func,
load_data_func=dump_func,
):
with gr.Blocks(
title="ACE-Step Model 1.0 DEMO - Enhanced with AI Lyrics",
theme=gr.themes.Soft(),
css="""
.gradio-container {
max-width: 1200px !important;
}
.quality-info {
background: linear-gradient(45deg, #f0f8ff, #e6f3ff);
padding: 10px;
border-radius: 8px;
margin: 5px 0;
}
.ai-lyrics-section {
background: linear-gradient(45deg, #f0fff0, #e6ffe6);
padding: 15px;
border-radius: 10px;
margin: 10px 0;
border: 2px solid #90EE90;
}
"""
) as demo:
gr.Markdown(
"""
<h1 style="text-align: center;">🎡 ACE-Step PRO with AI Lyrics</h1>
<div style="text-align: center; margin: 20px;">
<p><strong>πŸš€ μƒˆλ‘œμš΄ κΈ°λŠ₯:</strong> πŸ€– AI μž‘μ‚¬ | ν’ˆμ§ˆ 프리셋 | 닀쀑 생성 | 슀마트 ν”„λ‘¬ν”„νŠΈ | μ‹€μ‹œκ°„ 프리뷰 | ν’ˆμ§ˆ 점수</p>
<p>
<a href="https://ace-step.github.io/" target='_blank'>Project</a> |
<a href="https://huggingface.co/ACE-Step/ACE-Step-v1-3.5B">Checkpoints</a> |
<a href="https://discord.gg/rjAZz2xBdG" target='_blank'>Discord</a>
</p>
</div>
"""
)
# μ‚¬μš©λ²• κ°€μ΄λ“œ μΆ”κ°€
with gr.Accordion("πŸ“– μ‚¬μš©λ²• κ°€μ΄λ“œ", open=False):
gr.Markdown("""
### 🎯 λΉ λ₯Έ μ‹œμž‘
1. **πŸ€– AI μž‘μ‚¬**: 주제λ₯Ό μž…λ ₯ν•˜κ³  'AI μž‘μ‚¬' λ²„νŠΌμ„ ν΄λ¦­ν•˜λ©΄ μžλ™μœΌλ‘œ 가사가 μƒμ„±λ©λ‹ˆλ‹€
2. **μž₯λ₯΄ 선택**: μ›ν•˜λŠ” μŒμ•… μž₯λ₯΄λ₯Ό μ„ νƒν•˜λ©΄ μžλ™μœΌλ‘œ μ΅œμ ν™”λœ νƒœκ·Έκ°€ μ μš©λ©λ‹ˆλ‹€
3. **ν’ˆμ§ˆ μ„€μ •**: Draft(빠름) β†’ Standard(ꢌμž₯) β†’ High Quality β†’ Ultra 쀑 선택
4. **닀쀑 생성**: "Best of 3/5/10" μ„ νƒν•˜λ©΄ μ—¬λŸ¬ 번 μƒμ„±ν•˜μ—¬ 졜고 ν’ˆμ§ˆμ„ μžλ™ μ„ νƒν•©λ‹ˆλ‹€
5. **프리뷰**: 전체 생성 μ „ 10초 ν”„λ¦¬λ·°λ‘œ λΉ λ₯΄κ²Œ 확인할 수 μžˆμŠ΅λ‹ˆλ‹€
### πŸ€– AI μž‘μ‚¬ κΈ°λŠ₯
- **λ‹€κ΅­μ–΄ 지원**: ν•œκ΅­μ–΄, μ˜μ–΄ λ“± μž…λ ₯ 언어와 λ™μΌν•œ μ–Έμ–΄λ‘œ 가사 생성
- **주제 μ˜ˆμ‹œ**: "μ²«μ‚¬λž‘μ˜ μ„€λ ˜", "μ΄λ³„μ˜ μ•„ν””", "κ΅°λŒ€κ°€λŠ” λ‚¨μžμ˜ ν•œμˆ¨", "희망찬 내일"
- **ꡬ쑰 νƒœκ·Έ**: [verse], [chorus], [bridge] νƒœκ·Έκ°€ μžλ™μœΌλ‘œ ν¬ν•¨λ©λ‹ˆλ‹€
- **μž₯λ₯΄ 연동**: μ„ νƒν•œ μž₯λ₯΄μ— λ§žλŠ” μŠ€νƒ€μΌμ˜ 가사가 μƒμ„±λ©λ‹ˆλ‹€
### πŸ’‘ ν’ˆμ§ˆ ν–₯상 팁
- **κ³ ν’ˆμ§ˆ 생성**: "High Quality" + "Best of 5" μ‘°ν•© μΆ”μ²œ
- **λΉ λ₯Έ ν…ŒμŠ€νŠΈ**: "Draft" + "프리뷰" κΈ°λŠ₯ ν™œμš©
- **μž₯λ₯΄ νŠΉν™”**: μž₯λ₯΄ 프리셋 선택 ν›„ "슀마트 ν–₯상" 체크
- **가사 ꡬ쑰**: [verse], [chorus], [bridge] νƒœκ·Έ 적극 ν™œμš©
### βš™οΈ API μ„€μ •
AI μž‘μ‚¬ κΈ°λŠ₯을 μ‚¬μš©ν•˜λ €λ©΄ ν™˜κ²½λ³€μˆ˜μ— OpenAI API ν‚€λ₯Ό μ„€μ •ν•΄μ•Ό ν•©λ‹ˆλ‹€:
```bash
export LLM_API="your-openai-api-key"
```
""")
with gr.Tab("🎡 Enhanced Text2Music with AI Lyrics"):
create_text2music_ui(
gr=gr,
text2music_process_func=text2music_process_func,
sample_data_func=sample_data_func,
load_data_func=load_data_func,
)
return demo
if __name__ == "__main__":
print("πŸš€ ACE-Step PRO with AI Lyrics μ‹œμž‘ 쀑...")
# API ν‚€ μƒνƒœ 확인
if client_available:
print("βœ… OpenAI API μ‚¬μš© κ°€λŠ₯ - AI μž‘μ‚¬ κΈ°λŠ₯ ν™œμ„±ν™”λ¨")
else:
print("❌ OpenAI API μ‚¬μš© λΆˆκ°€ - ν™˜κ²½λ³€μˆ˜λ₯Ό ν™•μΈν•˜μ„Έμš”")
print("μ„€μ • 방법: export LLM_API='your-openai-api-key'")
demo = create_main_demo_ui()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=True # 곡유 링크 생성
)