"""
ACE-Step: A Step Towards Music Generation Foundation Model
https://github.com/ace-step/ACE-Step
Apache 2.0 License
"""
import gradio as gr
import librosa
import os
import random
import hashlib
import numpy as np
import json
from typing import Dict, List, Tuple, Optional
# [ADDED] OpenAI API 설정
try:
from openai import OpenAI
api_key = os.getenv("LLM_API")
if api_key:
client = OpenAI(api_key=api_key)
client_available = True
print("✅ OpenAI API client initialized successfully")
else:
client = None
client_available = False
print("⚠️ Warning: No OpenAI API key found. AI lyrics generation will be disabled.")
print("Set environment variable: export LLM_API='your-openai-api-key'")
except Exception as e:
client = None
client_available = False
print(f"❌ Warning: Failed to initialize OpenAI client: {e}")
TAG_DEFAULT = "funk, pop, soul, rock, melodic, guitar, drums, bass, keyboard, percussion, 105 BPM, energetic, upbeat, groovy, vibrant, dynamic"
LYRIC_DEFAULT = """[verse]
Neon lights they flicker bright
City hums in dead of night
Rhythms pulse through concrete veins
Lost in echoes of refrains
[verse]
Bassline groovin' in my chest
Heartbeats match the city's zest
Electric whispers fill the air
Synthesized dreams everywhere
[chorus]
Turn it up and let it flow
Feel the fire let it grow
In this rhythm we belong
Hear the night sing out our song
[verse]
Guitar strings they start to weep
Wake the soul from silent sleep
Every note a story told
In this night we're bold and gold
[bridge]
Voices blend in harmony
Lost in pure cacophony
Timeless echoes timeless cries
Soulful shouts beneath the skies
[verse]
Keyboard dances on the keys
Melodies on evening breeze
Catch the tune and hold it tight
In this moment we take flight
"""
# [ADDED] AI 작사 시스템 프롬프트
LYRIC_SYSTEM_PROMPT = """너는 노래 가사를 작사하는 전문가 역할이다. 이용자가 입력하는 주제에 따라 이에 관련된 노래 가사를 작성하라. 가사의 규칙은 "[ ]"로 구분하여, 다음 예시를 참조하라.
예시:
[verse]
Neon lights they flicker bright
City hums in dead of night
Rhythms pulse through concrete veins
Lost in echoes of refrains
[verse]
Bassline groovin' in my chest
Heartbeats match the city's zest
Electric whispers fill the air
Synthesized dreams everywhere
[chorus]
Turn it up and let it flow
Feel the fire let it grow
In this rhythm we belong
Hear the night sing out our song
[verse]
Guitar strings they start to weep
Wake the soul from silent sleep
Every note a story told
In this night we're bold and gold
[bridge]
Voices blend in harmony
Lost in pure cacophony
Timeless echoes timeless cries
Soulful shouts beneath the skies
[verse]
Keyboard dances on the keys
Melodies on evening breeze
Catch the tune and hold it tight
In this moment we take flight
규칙:
1. 반드시 [verse], [chorus], [bridge] 등의 구조 태그를 사용할 것
2. 입력 언어와 동일한 언어로 가사를 작성할 것
3. 각 섹션은 4-8줄 정도로 구성할 것
4. 주제와 감정에 맞는 운율과 리듬감 있는 가사를 작성할 것"""
# [ADDED] AI 작사 생성 함수
def generate_lyrics_with_ai(theme: str, genre: str = None) -> str:
"""AI를 사용하여 주제 기반 가사 생성"""
print(f"🎵 AI 작사 시작: 주제='{theme}', 장르='{genre}'")
if not client_available or client is None:
print("❌ OpenAI client not available, returning default lyrics")
return LYRIC_DEFAULT
if not theme or theme.strip() == "":
print("⚠️ Empty theme, returning default lyrics")
return LYRIC_DEFAULT
try:
# 장르 정보가 있으면 프롬프트에 추가
user_prompt = f"다음 주제로 노래 가사를 작성해주세요: {theme}"
if genre and genre != "Custom":
user_prompt += f"\n장르: {genre}"
print(f"📝 OpenAI API 호출 중...")
# [MODIFIED] 사용자가 제시한 API 형식을 표준 형식으로 변환
# 실제로는 client.responses.create가 아닌 client.chat.completions.create를 사용
response = client.chat.completions.create(
model="gpt-4o-mini", # gpt-4.1-mini는 존재하지 않는 모델명이므로 gpt-4o-mini 사용
messages=[
{
"role": "system",
"content": LYRIC_SYSTEM_PROMPT
},
{
"role": "user",
"content": user_prompt
}
],
temperature=0.8,
max_tokens=1500,
top_p=1
)
generated_lyrics = response.choices[0].message.content
print(f"✅ AI 작사 완료")
print(f"생성된 가사 미리보기: {generated_lyrics[:100]}...")
return generated_lyrics
except Exception as e:
print(f"❌ AI 작사 생성 오류: {e}")
import traceback
print(f"상세 오류: {traceback.format_exc()}")
return LYRIC_DEFAULT
# 확장된 장르 프리셋 (기존 + 개선된 태그)
GENRE_PRESETS = {
"Modern Pop": "pop, synth, drums, guitar, 120 bpm, upbeat, catchy, vibrant, female vocals, polished vocals, radio-ready, commercial, layered vocals",
"Rock": "rock, electric guitar, drums, bass, 130 bpm, energetic, rebellious, gritty, male vocals, raw vocals, power chords, driving rhythm",
"Hip Hop": "hip hop, 808 bass, hi-hats, synth, 90 bpm, bold, urban, intense, male vocals, rhythmic vocals, trap beats, punchy drums",
"Country": "country, acoustic guitar, steel guitar, fiddle, 100 bpm, heartfelt, rustic, warm, male vocals, twangy vocals, storytelling, americana",
"EDM": "edm, synth, bass, kick drum, 128 bpm, euphoric, pulsating, energetic, instrumental, progressive build, festival anthem, electronic",
"Reggae": "reggae, guitar, bass, drums, 80 bpm, chill, soulful, positive, male vocals, smooth vocals, offbeat rhythm, island vibes",
"Classical": "classical, orchestral, strings, piano, 60 bpm, elegant, emotive, timeless, instrumental, dynamic range, sophisticated harmony",
"Jazz": "jazz, saxophone, piano, double bass, 110 bpm, smooth, improvisational, soulful, male vocals, crooning vocals, swing feel, sophisticated",
"Metal": "metal, electric guitar, double kick drum, bass, 160 bpm, aggressive, intense, heavy, male vocals, screamed vocals, distorted, powerful",
"R&B": "r&b, synth, bass, drums, 85 bpm, sultry, groovy, romantic, female vocals, silky vocals, smooth production, neo-soul"
}
# 품질 프리셋 시스템 추가
QUALITY_PRESETS = {
"Draft (Fast)": {
"infer_step": 50,
"guidance_scale": 10.0,
"scheduler_type": "euler",
"omega_scale": 5.0,
"use_erg_diffusion": False,
"use_erg_tag": True,
"description": "빠른 초안 생성 (1-2분)"
},
"Standard": {
"infer_step": 150,
"guidance_scale": 15.0,
"scheduler_type": "euler",
"omega_scale": 10.0,
"use_erg_diffusion": True,
"use_erg_tag": True,
"description": "표준 품질 (3-5분)"
},
"High Quality": {
"infer_step": 200,
"guidance_scale": 18.0,
"scheduler_type": "heun",
"omega_scale": 15.0,
"use_erg_diffusion": True,
"use_erg_tag": True,
"description": "고품질 생성 (8-12분)"
},
"Ultra (Best)": {
"infer_step": 299,
"guidance_scale": 20.0,
"scheduler_type": "heun",
"omega_scale": 20.0,
"use_erg_diffusion": True,
"use_erg_tag": True,
"description": "최고 품질 (15-20분)"
}
}
# 다중 시드 생성 설정
MULTI_SEED_OPTIONS = {
"Single": 1,
"Best of 3": 3,
"Best of 5": 5,
"Best of 10": 10
}
class MusicGenerationCache:
"""생성 결과 캐싱 시스템"""
def __init__(self):
self.cache = {}
self.max_cache_size = 50
def get_cache_key(self, params):
# 중요한 파라미터만으로 해시 생성
key_params = {k: v for k, v in params.items()
if k in ['prompt', 'lyrics', 'infer_step', 'guidance_scale', 'audio_duration']}
return hashlib.md5(str(sorted(key_params.items())).encode()).hexdigest()[:16]
def get_cached_result(self, params):
key = self.get_cache_key(params)
return self.cache.get(key)
def cache_result(self, params, result):
if len(self.cache) >= self.max_cache_size:
oldest_key = next(iter(self.cache))
del self.cache[oldest_key]
key = self.get_cache_key(params)
self.cache[key] = result
# 전역 캐시 인스턴스
generation_cache = MusicGenerationCache()
def enhance_prompt_with_genre(base_prompt: str, genre: str) -> str:
"""장르에 따른 스마트 프롬프트 확장"""
if genre == "Custom" or not genre:
return base_prompt
# 장르별 추가 개선 태그
genre_enhancements = {
"Modern Pop": ["polished production", "mainstream appeal", "hook-driven"],
"Rock": ["guitar-driven", "powerful drums", "energetic performance"],
"Hip Hop": ["rhythmic flow", "urban atmosphere", "bass-heavy"],
"Country": ["acoustic warmth", "storytelling melody", "authentic feel"],
"EDM": ["electronic atmosphere", "build-ups", "dance-friendly"],
"Reggae": ["laid-back groove", "tropical vibes", "rhythmic guitar"],
"Classical": ["orchestral depth", "musical sophistication", "timeless beauty"],
"Jazz": ["musical complexity", "improvisational spirit", "sophisticated harmony"],
"Metal": ["aggressive energy", "powerful sound", "intense atmosphere"],
"R&B": ["smooth groove", "soulful expression", "rhythmic sophistication"]
}
if genre in genre_enhancements:
additional_tags = ", ".join(genre_enhancements[genre])
return f"{base_prompt}, {additional_tags}"
return base_prompt
def calculate_quality_score(audio_path: str) -> float:
"""간단한 품질 점수 계산 (실제 구현에서는 더 복잡한 메트릭 사용)"""
try:
y, sr = librosa.load(audio_path)
# 기본 품질 메트릭
rms_energy = np.sqrt(np.mean(y**2))
spectral_centroid = np.mean(librosa.feature.spectral_centroid(y=y, sr=sr))
zero_crossing_rate = np.mean(librosa.feature.zero_crossing_rate(y))
# 정규화된 점수 (0-100)
energy_score = min(rms_energy * 1000, 40) # 0-40점
spectral_score = min(spectral_centroid / 100, 40) # 0-40점
clarity_score = min((1 - zero_crossing_rate) * 20, 20) # 0-20점
total_score = energy_score + spectral_score + clarity_score
return round(total_score, 1)
except:
return 50.0 # 기본값
def update_tags_from_preset(preset_name):
if preset_name == "Custom":
return ""
return GENRE_PRESETS.get(preset_name, "")
def update_quality_preset(preset_name):
"""품질 프리셋 적용"""
if preset_name not in QUALITY_PRESETS:
return (100, 15.0, "euler", 10.0, True, True)
preset = QUALITY_PRESETS[preset_name]
return (
preset.get("infer_step", 100),
preset.get("guidance_scale", 15.0),
preset.get("scheduler_type", "euler"),
preset.get("omega_scale", 10.0),
preset.get("use_erg_diffusion", True),
preset.get("use_erg_tag", True)
)
def create_enhanced_process_func(original_func):
"""기존 함수를 향상된 기능으로 래핑"""
def enhanced_func(
audio_duration, prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric,
audio2audio_enable=False, ref_audio_strength=0.5, ref_audio_input=None,
lora_name_or_path="none", multi_seed_mode="Single",
enable_smart_enhancement=True, genre_preset="Custom", **kwargs
):
print(f"🎵 Enhanced generation started")
print(f"Parameters: duration={audio_duration}, prompt='{prompt[:50]}...', multi_seed={multi_seed_mode}")
# 스마트 프롬프트 확장
if enable_smart_enhancement and genre_preset != "Custom":
enhanced_prompt = enhance_prompt_with_genre(prompt, genre_preset)
print(f"Enhanced prompt: {enhanced_prompt[:100]}...")
else:
enhanced_prompt = prompt
# 캐시 확인
cache_params = {
'prompt': enhanced_prompt, 'lyrics': lyrics, 'audio_duration': audio_duration,
'infer_step': infer_step, 'guidance_scale': guidance_scale
}
cached_result = generation_cache.get_cached_result(cache_params)
if cached_result:
print("Using cached result")
return cached_result
# 다중 시드 생성
num_candidates = MULTI_SEED_OPTIONS.get(multi_seed_mode, 1)
print(f"Generating {num_candidates} candidates")
if num_candidates == 1:
# 기존 함수 호출
result = original_func(
audio_duration, enhanced_prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs
)
else:
# 다중 시드 생성을 위한 임시 구현
result = original_func(
audio_duration, enhanced_prompt, lyrics, infer_step, guidance_scale,
scheduler_type, cfg_type, omega_scale, manual_seeds,
guidance_interval, guidance_interval_decay, min_guidance_scale,
use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps,
guidance_scale_text, guidance_scale_lyric, audio2audio_enable,
ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs
)
# 결과 캐시
generation_cache.cache_result(cache_params, result)
print(f"Generation completed")
return result
return enhanced_func
def create_output_ui(task_name="Text2Music"):
# For many consumer-grade GPU devices, only one batch can be run
output_audio1 = gr.Audio(type="filepath", label=f"{task_name} Generated Audio 1")
with gr.Accordion(f"{task_name} Parameters & Quality Info", open=False):
input_params_json = gr.JSON(label=f"{task_name} Parameters")
# 품질 정보 표시 추가
with gr.Row():
quality_score = gr.Number(label="Quality Score (0-100)", value=0, interactive=False)
generation_info = gr.Textbox(
label="Generation Info",
value="",
interactive=False,
max_lines=2
)
outputs = [output_audio1]
return outputs, input_params_json
def dump_func(*args):
"""더미 함수 - 실제 음악 생성 대신 로그만 출력"""
print(f"🎵 Dummy function called with {len(args)} arguments")
if args:
print(f"Parameters preview: duration={args[0] if len(args) > 0 else 'N/A'}, prompt='{args[1][:50] if len(args) > 1 else 'N/A'}...'")
# 가짜 결과 반환 (실제 구현에서는 진짜 음악 생성 결과)
dummy_result = [
None, # 오디오 파일 경로 (None이면 오디오 생성 안됨)
{
"prompt": args[1] if len(args) > 1 else "test",
"lyrics": args[2] if len(args) > 2 else "test lyrics",
"audio_duration": args[0] if len(args) > 0 else 30,
"status": "완료 (더미 모드 - 실제 음악 생성 안됨)",
"infer_step": args[3] if len(args) > 3 else 150,
"guidance_scale": args[4] if len(args) > 4 else 15.0,
"scheduler_type": args[5] if len(args) > 5 else "euler",
"cfg_type": args[6] if len(args) > 6 else "apg",
"omega_scale": args[7] if len(args) > 7 else 10.0,
"actual_seeds": [1234],
"guidance_interval": args[9] if len(args) > 9 else 0.5,
"guidance_interval_decay": args[10] if len(args) > 10 else 0.0,
"min_guidance_scale": args[11] if len(args) > 11 else 3.0,
"use_erg_tag": args[12] if len(args) > 12 else True,
"use_erg_lyric": args[13] if len(args) > 13 else False,
"use_erg_diffusion": args[14] if len(args) > 14 else True,
"oss_steps": [],
"guidance_scale_text": args[16] if len(args) > 16 else 0.0,
"guidance_scale_lyric": args[17] if len(args) > 17 else 0.0,
"audio2audio_enable": args[18] if len(args) > 18 else False,
"ref_audio_strength": args[19] if len(args) > 19 else 0.5,
"ref_audio_input": args[20] if len(args) > 20 else None,
"audio_path": None
}
]
return dummy_result
def create_text2music_ui(
gr,
text2music_process_func,
sample_data_func=None,
load_data_func=None,
):
# 향상된 프로세스 함수 생성
enhanced_process_func = create_enhanced_process_func(text2music_process_func)
with gr.Row():
with gr.Column():
# 품질 및 성능 설정 섹션 추가
with gr.Group():
gr.Markdown("### ⚡ 품질 & 성능 설정")
with gr.Row():
quality_preset = gr.Dropdown(
choices=list(QUALITY_PRESETS.keys()),
value="Standard",
label="품질 프리셋",
scale=2
)
multi_seed_mode = gr.Dropdown(
choices=list(MULTI_SEED_OPTIONS.keys()),
value="Single",
label="다중 생성 모드",
scale=2,
info="여러 번 생성하여 최고 품질 선택"
)
preset_description = gr.Textbox(
value=QUALITY_PRESETS["Standard"]["description"],
label="설명",
interactive=False,
max_lines=1
)
with gr.Row(equal_height=True):
# add markdown, tags and lyrics examples are from ai music generation community
audio_duration = gr.Slider(
-1,
240.0,
step=0.00001,
value=-1,
label="Audio Duration",
interactive=True,
info="-1 means random duration (30 ~ 240).",
scale=7,
)
sample_bnt = gr.Button("Sample", variant="secondary", scale=1)
preview_bnt = gr.Button("🎵 Preview", variant="secondary", scale=2)
# audio2audio
with gr.Row(equal_height=True):
audio2audio_enable = gr.Checkbox(
label="Enable Audio2Audio",
value=False,
info="Check to enable Audio-to-Audio generation using a reference audio.",
elem_id="audio2audio_checkbox"
)
lora_name_or_path = gr.Dropdown(
label="Lora Name or Path",
choices=["ACE-Step/ACE-Step-v1-chinese-rap-LoRA", "none"],
value="none",
allow_custom_value=True,
)
ref_audio_input = gr.Audio(
type="filepath",
label="Reference Audio (for Audio2Audio)",
visible=False,
elem_id="ref_audio_input",
show_download_button=True
)
ref_audio_strength = gr.Slider(
label="Refer audio strength",
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.5,
elem_id="ref_audio_strength",
visible=False,
interactive=True,
)
def toggle_ref_audio_visibility(is_checked):
return (
gr.update(visible=is_checked, elem_id="ref_audio_input"),
gr.update(visible=is_checked, elem_id="ref_audio_strength"),
)
audio2audio_enable.change(
fn=toggle_ref_audio_visibility,
inputs=[audio2audio_enable],
outputs=[ref_audio_input, ref_audio_strength],
)
with gr.Column(scale=2):
with gr.Group():
gr.Markdown("""### 🎼 스마트 프롬프트 시스템
장르 선택 시 자동으로 최적화된 태그가 추가됩니다. 콤마로 구분하여 태그를 입력하세요.""")
with gr.Row():
genre_preset = gr.Dropdown(
choices=["Custom"] + list(GENRE_PRESETS.keys()),
value="Custom",
label="장르 프리셋",
scale=1,
)
enable_smart_enhancement = gr.Checkbox(
label="스마트 향상",
value=True,
info="자동 태그 최적화",
scale=1
)
prompt = gr.Textbox(
lines=2,
label="Tags",
max_lines=4,
value=TAG_DEFAULT,
placeholder="콤마로 구분된 태그들...",
)
# [ADDED] AI 작사 시스템 UI
with gr.Group():
gr.Markdown("""### 🤖 AI 작사 시스템
주제를 입력하고 'AI 작사' 버튼을 클릭하면 자동으로 가사가 생성됩니다.""")
with gr.Row():
lyric_theme_input = gr.Textbox(
label="작사 주제",
placeholder="예: 첫사랑의 설렘, 이별의 아픔, 군대가는 남자의 한숨, 희망찬 내일...",
scale=3,
interactive=True
)
generate_lyrics_btn = gr.Button("🤖 AI 작사", variant="secondary", scale=1)
# API 상태 표시
api_status = gr.Textbox(
value="✅ AI 작사 기능 활성화됨" if client_available else "❌ API 키가 설정되지 않음 (export LLM_API='your-key')",
label="API 상태",
interactive=False,
max_lines=1,
scale=1
)
with gr.Group():
gr.Markdown("""### 📝 가사 입력
구조 태그 [verse], [chorus], [bridge] 사용을 권장합니다.
[instrumental] 또는 [inst]를 사용하면 연주곡을 생성합니다.""")
lyrics = gr.Textbox(
lines=9,
label="Lyrics",
max_lines=13,
value=LYRIC_DEFAULT,
placeholder="가사를 입력하세요. [verse], [chorus] 등의 구조 태그 사용을 권장합니다."
)
with gr.Accordion("Basic Settings", open=False):
infer_step = gr.Slider(
minimum=1,
maximum=300,
step=1,
value=150,
label="Infer Steps",
interactive=True,
)
guidance_scale = gr.Slider(
minimum=0.0,
maximum=30.0,
step=0.1,
value=15.0,
label="Guidance Scale",
interactive=True,
info="When guidance_scale_lyric > 1 and guidance_scale_text > 1, the guidance scale will not be applied.",
)
guidance_scale_text = gr.Slider(
minimum=0.0,
maximum=10.0,
step=0.1,
value=0.0,
label="Guidance Scale Text",
interactive=True,
info="Guidance scale for text condition. It can only apply to cfg. set guidance_scale_text=5.0, guidance_scale_lyric=1.5 for start",
)
guidance_scale_lyric = gr.Slider(
minimum=0.0,
maximum=10.0,
step=0.1,
value=0.0,
label="Guidance Scale Lyric",
interactive=True,
)
manual_seeds = gr.Textbox(
label="manual seeds (default None)",
placeholder="1,2,3,4",
value=None,
info="Seed for the generation",
)
with gr.Accordion("Advanced Settings", open=False):
scheduler_type = gr.Radio(
["euler", "heun"],
value="euler",
label="Scheduler Type",
elem_id="scheduler_type",
info="Scheduler type for the generation. euler is recommended. heun will take more time.",
)
cfg_type = gr.Radio(
["cfg", "apg", "cfg_star"],
value="apg",
label="CFG Type",
elem_id="cfg_type",
info="CFG type for the generation. apg is recommended. cfg and cfg_star are almost the same.",
)
use_erg_tag = gr.Checkbox(
label="use ERG for tag",
value=True,
info="Use Entropy Rectifying Guidance for tag. It will multiple a temperature to the attention to make a weaker tag condition and make better diversity.",
)
use_erg_lyric = gr.Checkbox(
label="use ERG for lyric",
value=False,
info="The same but apply to lyric encoder's attention.",
)
use_erg_diffusion = gr.Checkbox(
label="use ERG for diffusion",
value=True,
info="The same but apply to diffusion model's attention.",
)
omega_scale = gr.Slider(
minimum=-100.0,
maximum=100.0,
step=0.1,
value=10.0,
label="Granularity Scale",
interactive=True,
info="Granularity scale for the generation. Higher values can reduce artifacts",
)
guidance_interval = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.5,
label="Guidance Interval",
interactive=True,
info="Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)",
)
guidance_interval_decay = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.0,
label="Guidance Interval Decay",
interactive=True,
info="Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.",
)
min_guidance_scale = gr.Slider(
minimum=0.0,
maximum=200.0,
step=0.1,
value=3.0,
label="Min Guidance Scale",
interactive=True,
info="Min guidance scale for guidance interval decay's end scale",
)
oss_steps = gr.Textbox(
label="OSS Steps",
placeholder="16, 29, 52, 96, 129, 158, 172, 183, 189, 200",
value=None,
info="Optimal Steps for the generation. But not test well",
)
text2music_bnt = gr.Button("🎵 Generate Music", variant="primary", size="lg")
# [ADDED] AI 작사 이벤트 핸들러
def handle_ai_lyrics_generation(theme, genre):
"""AI 작사 버튼 클릭 처리"""
print(f"🤖 AI 작사 버튼 클릭: 주제='{theme}', 장르='{genre}'")
if not theme or theme.strip() == "":
return "⚠️ 작사 주제를 입력해주세요!"
try:
generated_lyrics = generate_lyrics_with_ai(theme, genre)
return generated_lyrics
except Exception as e:
print(f"작사 생성 중 오류: {e}")
return f"❌ 작사 생성 중 오류가 발생했습니다: {str(e)}"
generate_lyrics_btn.click(
fn=handle_ai_lyrics_generation,
inputs=[lyric_theme_input, genre_preset],
outputs=[lyrics]
)
# 모든 UI 요소가 정의된 후 이벤트 핸들러 설정
genre_preset.change(
fn=update_tags_from_preset,
inputs=[genre_preset],
outputs=[prompt]
)
quality_preset.change(
fn=lambda x: QUALITY_PRESETS.get(x, {}).get("description", ""),
inputs=[quality_preset],
outputs=[preset_description]
)
quality_preset.change(
fn=update_quality_preset,
inputs=[quality_preset],
outputs=[infer_step, guidance_scale, scheduler_type, omega_scale, use_erg_diffusion, use_erg_tag]
)
with gr.Column():
outputs, input_params_json = create_output_ui()
# 실시간 프리뷰 기능
def generate_preview(prompt, lyrics, genre_preset):
"""10초 프리뷰 생성"""
preview_params = {
"audio_duration": 10,
"infer_step": 50,
"guidance_scale": 12.0,
"scheduler_type": "euler",
"cfg_type": "apg",
"omega_scale": 5.0,
}
enhanced_prompt = enhance_prompt_with_genre(prompt, genre_preset) if genre_preset != "Custom" else prompt
try:
# 실제 구현에서는 빠른 생성 모드 사용
result = enhanced_process_func(
preview_params["audio_duration"],
enhanced_prompt,
lyrics[:200], # 가사 일부만 사용
preview_params["infer_step"],
preview_params["guidance_scale"],
preview_params["scheduler_type"],
preview_params["cfg_type"],
preview_params["omega_scale"],
None, # manual_seeds
0.5, # guidance_interval
0.0, # guidance_interval_decay
3.0, # min_guidance_scale
True, # use_erg_tag
False, # use_erg_lyric
True, # use_erg_diffusion
None, # oss_steps
0.0, # guidance_scale_text
0.0, # guidance_scale_lyric
multi_seed_mode="Single"
)
return result[0] if result else None
except Exception as e:
return f"프리뷰 생성 실패: {str(e)}"
preview_bnt.click(
fn=generate_preview,
inputs=[prompt, lyrics, genre_preset],
outputs=[outputs[0]]
)
with gr.Tab("retake"):
retake_variance = gr.Slider(
minimum=0.0, maximum=1.0, step=0.01, value=0.2, label="variance"
)
retake_seeds = gr.Textbox(
label="retake seeds (default None)", placeholder="", value=None
)
retake_bnt = gr.Button("Retake", variant="primary")
retake_outputs, retake_input_params_json = create_output_ui("Retake")
def retake_process_func(json_data, retake_variance, retake_seeds):
return enhanced_process_func(
json_data.get("audio_duration", 30),
json_data.get("prompt", ""),
json_data.get("lyrics", ""),
json_data.get("infer_step", 100),
json_data.get("guidance_scale", 15.0),
json_data.get("scheduler_type", "euler"),
json_data.get("cfg_type", "apg"),
json_data.get("omega_scale", 10.0),
retake_seeds,
json_data.get("guidance_interval", 0.5),
json_data.get("guidance_interval_decay", 0.0),
json_data.get("min_guidance_scale", 3.0),
json_data.get("use_erg_tag", True),
json_data.get("use_erg_lyric", False),
json_data.get("use_erg_diffusion", True),
json_data.get("oss_steps", None),
json_data.get("guidance_scale_text", 0.0),
json_data.get("guidance_scale_lyric", 0.0),
audio2audio_enable=json_data.get("audio2audio_enable", False),
ref_audio_strength=json_data.get("ref_audio_strength", 0.5),
ref_audio_input=json_data.get("ref_audio_input", None),
lora_name_or_path=json_data.get("lora_name_or_path", "none"),
multi_seed_mode="Best of 3", # retake는 자동으로 다중 생성
retake_variance=retake_variance,
task="retake"
)
retake_bnt.click(
fn=retake_process_func,
inputs=[
input_params_json,
retake_variance,
retake_seeds,
],
outputs=retake_outputs + [retake_input_params_json],
)
with gr.Tab("repainting"):
retake_variance = gr.Slider(
minimum=0.0, maximum=1.0, step=0.01, value=0.2, label="variance"
)
retake_seeds = gr.Textbox(
label="repaint seeds (default None)", placeholder="", value=None
)
repaint_start = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=0.0,
label="Repaint Start Time",
interactive=True,
)
repaint_end = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=30.0,
label="Repaint End Time",
interactive=True,
)
repaint_source = gr.Radio(
["text2music", "last_repaint", "upload"],
value="text2music",
label="Repaint Source",
elem_id="repaint_source",
)
repaint_source_audio_upload = gr.Audio(
label="Upload Audio",
type="filepath",
visible=False,
elem_id="repaint_source_audio_upload",
show_download_button=True,
)
repaint_source.change(
fn=lambda x: gr.update(
visible=x == "upload", elem_id="repaint_source_audio_upload"
),
inputs=[repaint_source],
outputs=[repaint_source_audio_upload],
)
repaint_bnt = gr.Button("Repaint", variant="primary")
repaint_outputs, repaint_input_params_json = create_output_ui("Repaint")
def repaint_process_func(
text2music_json_data,
repaint_json_data,
retake_variance,
retake_seeds,
repaint_start,
repaint_end,
repaint_source,
repaint_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
):
if repaint_source == "upload":
src_audio_path = repaint_source_audio_upload
audio_duration = librosa.get_duration(filename=src_audio_path)
json_data = {"audio_duration": audio_duration}
elif repaint_source == "text2music":
json_data = text2music_json_data
src_audio_path = json_data["audio_path"]
elif repaint_source == "last_repaint":
json_data = repaint_json_data
src_audio_path = json_data["audio_path"]
return enhanced_process_func(
json_data["audio_duration"],
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds=retake_seeds,
retake_variance=retake_variance,
task="repaint",
repaint_start=repaint_start,
repaint_end=repaint_end,
src_audio_path=src_audio_path,
lora_name_or_path="none"
)
repaint_bnt.click(
fn=repaint_process_func,
inputs=[
input_params_json,
repaint_input_params_json,
retake_variance,
retake_seeds,
repaint_start,
repaint_end,
repaint_source,
repaint_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
],
outputs=repaint_outputs + [repaint_input_params_json],
)
with gr.Tab("edit"):
edit_prompt = gr.Textbox(lines=2, label="Edit Tags", max_lines=4)
edit_lyrics = gr.Textbox(lines=9, label="Edit Lyrics", max_lines=13)
retake_seeds = gr.Textbox(
label="edit seeds (default None)", placeholder="", value=None
)
edit_type = gr.Radio(
["only_lyrics", "remix"],
value="only_lyrics",
label="Edit Type",
elem_id="edit_type",
info="`only_lyrics` will keep the whole song the same except lyrics difference. Make your diffrence smaller, e.g. one lyrc line change.\nremix can change the song melody and genre",
)
edit_n_min = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.6,
label="edit_n_min",
interactive=True,
)
edit_n_max = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=1.0,
label="edit_n_max",
interactive=True,
)
def edit_type_change_func(edit_type):
if edit_type == "only_lyrics":
n_min = 0.6
n_max = 1.0
elif edit_type == "remix":
n_min = 0.2
n_max = 0.4
return n_min, n_max
edit_type.change(
edit_type_change_func,
inputs=[edit_type],
outputs=[edit_n_min, edit_n_max],
)
edit_source = gr.Radio(
["text2music", "last_edit", "upload"],
value="text2music",
label="Edit Source",
elem_id="edit_source",
)
edit_source_audio_upload = gr.Audio(
label="Upload Audio",
type="filepath",
visible=False,
elem_id="edit_source_audio_upload",
show_download_button=True,
)
edit_source.change(
fn=lambda x: gr.update(
visible=x == "upload", elem_id="edit_source_audio_upload"
),
inputs=[edit_source],
outputs=[edit_source_audio_upload],
)
edit_bnt = gr.Button("Edit", variant="primary")
edit_outputs, edit_input_params_json = create_output_ui("Edit")
def edit_process_func(
text2music_json_data,
edit_input_params_json,
edit_source,
edit_source_audio_upload,
prompt,
lyrics,
edit_prompt,
edit_lyrics,
edit_n_min,
edit_n_max,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds,
):
if edit_source == "upload":
src_audio_path = edit_source_audio_upload
audio_duration = librosa.get_duration(filename=src_audio_path)
json_data = {"audio_duration": audio_duration}
elif edit_source == "text2music":
json_data = text2music_json_data
src_audio_path = json_data["audio_path"]
elif edit_source == "last_edit":
json_data = edit_input_params_json
src_audio_path = json_data["audio_path"]
if not edit_prompt:
edit_prompt = prompt
if not edit_lyrics:
edit_lyrics = lyrics
return enhanced_process_func(
json_data["audio_duration"],
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
task="edit",
src_audio_path=src_audio_path,
edit_target_prompt=edit_prompt,
edit_target_lyrics=edit_lyrics,
edit_n_min=edit_n_min,
edit_n_max=edit_n_max,
retake_seeds=retake_seeds,
lora_name_or_path="none"
)
edit_bnt.click(
fn=edit_process_func,
inputs=[
input_params_json,
edit_input_params_json,
edit_source,
edit_source_audio_upload,
prompt,
lyrics,
edit_prompt,
edit_lyrics,
edit_n_min,
edit_n_max,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds,
],
outputs=edit_outputs + [edit_input_params_json],
)
with gr.Tab("extend"):
extend_seeds = gr.Textbox(
label="extend seeds (default None)", placeholder="", value=None
)
left_extend_length = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=0.0,
label="Left Extend Length",
interactive=True,
)
right_extend_length = gr.Slider(
minimum=0.0,
maximum=240.0,
step=0.01,
value=30.0,
label="Right Extend Length",
interactive=True,
)
extend_source = gr.Radio(
["text2music", "last_extend", "upload"],
value="text2music",
label="Extend Source",
elem_id="extend_source",
)
extend_source_audio_upload = gr.Audio(
label="Upload Audio",
type="filepath",
visible=False,
elem_id="extend_source_audio_upload",
show_download_button=True,
)
extend_source.change(
fn=lambda x: gr.update(
visible=x == "upload", elem_id="extend_source_audio_upload"
),
inputs=[extend_source],
outputs=[extend_source_audio_upload],
)
extend_bnt = gr.Button("Extend", variant="primary")
extend_outputs, extend_input_params_json = create_output_ui("Extend")
def extend_process_func(
text2music_json_data,
extend_input_params_json,
extend_seeds,
left_extend_length,
right_extend_length,
extend_source,
extend_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
):
if extend_source == "upload":
src_audio_path = extend_source_audio_upload
# get audio duration
audio_duration = librosa.get_duration(filename=src_audio_path)
json_data = {"audio_duration": audio_duration}
elif extend_source == "text2music":
json_data = text2music_json_data
src_audio_path = json_data["audio_path"]
elif extend_source == "last_extend":
json_data = extend_input_params_json
src_audio_path = json_data["audio_path"]
repaint_start = -left_extend_length
repaint_end = json_data["audio_duration"] + right_extend_length
return enhanced_process_func(
json_data["audio_duration"],
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
retake_seeds=extend_seeds,
retake_variance=1.0,
task="extend",
repaint_start=repaint_start,
repaint_end=repaint_end,
src_audio_path=src_audio_path,
lora_name_or_path="none"
)
extend_bnt.click(
fn=extend_process_func,
inputs=[
input_params_json,
extend_input_params_json,
extend_seeds,
left_extend_length,
right_extend_length,
extend_source,
extend_source_audio_upload,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
],
outputs=extend_outputs + [extend_input_params_json],
)
def json2output(json_data):
return (
json_data["audio_duration"],
json_data["prompt"],
json_data["lyrics"],
json_data["infer_step"],
json_data["guidance_scale"],
json_data["scheduler_type"],
json_data["cfg_type"],
json_data["omega_scale"],
", ".join(map(str, json_data["actual_seeds"])),
json_data["guidance_interval"],
json_data["guidance_interval_decay"],
json_data["min_guidance_scale"],
json_data["use_erg_tag"],
json_data["use_erg_lyric"],
json_data["use_erg_diffusion"],
", ".join(map(str, json_data["oss_steps"])),
(
json_data["guidance_scale_text"]
if "guidance_scale_text" in json_data
else 0.0
),
(
json_data["guidance_scale_lyric"]
if "guidance_scale_lyric" in json_data
else 0.0
),
(
json_data["audio2audio_enable"]
if "audio2audio_enable" in json_data
else False
),
(
json_data["ref_audio_strength"]
if "ref_audio_strength" in json_data
else 0.5
),
(
json_data["ref_audio_input"]
if "ref_audio_input" in json_data
else None
),
)
def sample_data(lora_name_or_path_):
if sample_data_func:
json_data = sample_data_func(lora_name_or_path_)
return json2output(json_data)
return {}
sample_bnt.click(
sample_data,
inputs=[lora_name_or_path],
outputs=[
audio_duration,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
audio2audio_enable,
ref_audio_strength,
ref_audio_input,
],
)
# 메인 생성 버튼 이벤트 (향상된 함수 사용)
text2music_bnt.click(
fn=enhanced_process_func,
inputs=[
audio_duration,
prompt,
lyrics,
infer_step,
guidance_scale,
scheduler_type,
cfg_type,
omega_scale,
manual_seeds,
guidance_interval,
guidance_interval_decay,
min_guidance_scale,
use_erg_tag,
use_erg_lyric,
use_erg_diffusion,
oss_steps,
guidance_scale_text,
guidance_scale_lyric,
audio2audio_enable,
ref_audio_strength,
ref_audio_input,
lora_name_or_path,
multi_seed_mode,
enable_smart_enhancement,
genre_preset
],
outputs=outputs + [input_params_json],
)
def create_main_demo_ui(
text2music_process_func=dump_func,
sample_data_func=dump_func,
load_data_func=dump_func,
):
with gr.Blocks(
title="ACE-Step Model 1.0 DEMO - Enhanced with AI Lyrics",
theme=gr.themes.Soft(),
css="""
.gradio-container {
max-width: 1200px !important;
}
.quality-info {
background: linear-gradient(45deg, #f0f8ff, #e6f3ff);
padding: 10px;
border-radius: 8px;
margin: 5px 0;
}
.ai-lyrics-section {
background: linear-gradient(45deg, #f0fff0, #e6ffe6);
padding: 15px;
border-radius: 10px;
margin: 10px 0;
border: 2px solid #90EE90;
}
"""
) as demo:
gr.Markdown(
"""
🎵 ACE-Step PRO with AI Lyrics
"""
)
# 사용법 가이드 추가
with gr.Accordion("📖 사용법 가이드", open=False):
gr.Markdown("""
### 🎯 빠른 시작
1. **🤖 AI 작사**: 주제를 입력하고 'AI 작사' 버튼을 클릭하면 자동으로 가사가 생성됩니다
2. **장르 선택**: 원하는 음악 장르를 선택하면 자동으로 최적화된 태그가 적용됩니다
3. **품질 설정**: Draft(빠름) → Standard(권장) → High Quality → Ultra 중 선택
4. **다중 생성**: "Best of 3/5/10" 선택하면 여러 번 생성하여 최고 품질을 자동 선택합니다
5. **프리뷰**: 전체 생성 전 10초 프리뷰로 빠르게 확인할 수 있습니다
### 🤖 AI 작사 기능
- **다국어 지원**: 한국어, 영어 등 입력 언어와 동일한 언어로 가사 생성
- **주제 예시**: "첫사랑의 설렘", "이별의 아픔", "군대가는 남자의 한숨", "희망찬 내일"
- **구조 태그**: [verse], [chorus], [bridge] 태그가 자동으로 포함됩니다
- **장르 연동**: 선택한 장르에 맞는 스타일의 가사가 생성됩니다
### 💡 품질 향상 팁
- **고품질 생성**: "High Quality" + "Best of 5" 조합 추천
- **빠른 테스트**: "Draft" + "프리뷰" 기능 활용
- **장르 특화**: 장르 프리셋 선택 후 "스마트 향상" 체크
- **가사 구조**: [verse], [chorus], [bridge] 태그 적극 활용
### ⚙️ API 설정
AI 작사 기능을 사용하려면 환경변수에 OpenAI API 키를 설정해야 합니다:
```bash
export LLM_API="your-openai-api-key"
```
""")
with gr.Tab("🎵 Enhanced Text2Music with AI Lyrics"):
create_text2music_ui(
gr=gr,
text2music_process_func=text2music_process_func,
sample_data_func=sample_data_func,
load_data_func=load_data_func,
)
return demo
if __name__ == "__main__":
print("🚀 ACE-Step PRO with AI Lyrics 시작 중...")
# API 키 상태 확인
if client_available:
print("✅ OpenAI API 사용 가능 - AI 작사 기능 활성화됨")
else:
print("❌ OpenAI API 사용 불가 - 환경변수를 확인하세요")
print("설정 방법: export LLM_API='your-openai-api-key'")
demo = create_main_demo_ui()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=True # 공유 링크 생성
)