import gradio as gr import librosa import os import random import hashlib import numpy as np import json from typing import Dict, List, Tuple, Optional # [MODIFIED] OpenAI 라이브러리 사용 방식 수정 try: import openai api_key = os.getenv("LLM_API") or os.getenv("OPENAI_API_KEY") if api_key: openai.api_key = api_key client_available = True print("✅ OpenAI API client initialized successfully") else: client_available = False print("⚠️ Warning: No OpenAI API key found. AI lyrics generation will be disabled.") except Exception as e: client_available = False print(f"❌ Warning: Failed to initialize OpenAI client: {e}") # ─── openai 초기화 부분 바로 아래에 추가 ─── from packaging import version def _chat_completion(**kwargs): """SDK 버전에 맞춰 ChatCompletion 호출을 추상화""" if version.parse(openai.__version__) >= version.parse("1.0.0"): # v1 스타일 return openai.chat.completions.create(**kwargs) else: # 구버전 스타일 return openai.ChatCompletion.create(**kwargs) TAG_DEFAULT = "funk, pop, soul, rock, melodic, guitar, drums, bass, keyboard, percussion, 105 BPM, energetic, upbeat, groovy, vibrant, dynamic, duet, male and female vocals" LYRIC_DEFAULT = """[verse - male] Neon lights they flicker bright City hums in dead of night Rhythms pulse through concrete veins Lost in echoes of refrains [verse - female] Bassline groovin' in my chest Heartbeats match the city's zest Electric whispers fill the air Synthesized dreams everywhere [chorus - duet] Turn it up and let it flow Feel the fire let it grow In this rhythm we belong Hear the night sing out our song [verse - male] Guitar strings they start to weep Wake the soul from silent sleep Every note a story told In this night we're bold and gold [bridge - female] Voices blend in harmony Lost in pure cacophony Timeless echoes timeless cries Soulful shouts beneath the skies [verse - duet] Keyboard dances on the keys Melodies on evening breeze Catch the tune and hold it tight In this moment we take flight """ # 확장된 장르 프리셋 (기존 + 개선된 태그) GENRE_PRESETS = { "Modern Pop": "pop, synth, drums, guitar, 120 bpm, upbeat, catchy, vibrant, polished vocals, radio-ready, commercial, layered vocals", "Rock": "rock, electric guitar, drums, bass, 130 bpm, energetic, rebellious, gritty, powerful vocals, raw vocals, power chords, driving rhythm", "Hip Hop": "hip hop, 808 bass, hi-hats, synth, 90 bpm, bold, urban, intense, rhythmic vocals, trap beats, punchy drums", "Country": "country, acoustic guitar, steel guitar, fiddle, 100 bpm, heartfelt, rustic, warm, twangy vocals, storytelling, americana", "EDM": "edm, synth, bass, kick drum, 128 bpm, euphoric, pulsating, energetic, instrumental, progressive build, festival anthem, electronic", "Reggae": "reggae, guitar, bass, drums, 80 bpm, chill, soulful, positive, smooth vocals, offbeat rhythm, island vibes", "Classical": "classical, orchestral, strings, piano, 60 bpm, elegant, emotive, timeless, instrumental, dynamic range, sophisticated harmony", "Jazz": "jazz, saxophone, piano, double bass, 110 bpm, smooth, improvisational, soulful, crooning vocals, swing feel, sophisticated", "Metal": "metal, electric guitar, double kick drum, bass, 160 bpm, aggressive, intense, heavy, powerful vocals, distorted, powerful", "R&B": "r&b, synth, bass, drums, 85 bpm, sultry, groovy, romantic, silky vocals, smooth production, neo-soul", "K-Pop": "k-pop, synth, bass, drums, 128 bpm, catchy, energetic, polished, mixed vocals, electronic elements, danceable", "Ballad": "ballad, piano, strings, acoustic guitar, 70 bpm, emotional, heartfelt, romantic, expressive vocals, orchestral arrangement" } # 곡 스타일 옵션 SONG_STYLES = { "듀엣 (남녀 혼성)": "duet, male and female vocals, harmonious, call and response", "솔로 (남성)": "solo, male vocals, powerful voice", "솔로 (여성)": "solo, female vocals, emotional voice", "그룹 (혼성)": "group vocals, mixed gender, layered harmonies", "합창": "choir, multiple voices, choral arrangement", "랩/힙합": "rap vocals, rhythmic flow, urban style", "인스트루멘탈": "instrumental, no vocals" } # AI 작사 시스템 프롬프트 LYRIC_SYSTEM_PROMPT = """너는 노래 가사를 작사하는 전문가 역할이다. 이용자가 입력하는 주제와 스타일에 따라 관련된 노래 가사를 작성하라. 가사 작성 규칙: 1. 구조 태그는 반드시 "[ ]"로 구분한다 2. 사용 가능한 구조 태그: [verse], [chorus], [bridge], [intro], [outro], [pre-chorus] 3. 듀엣인 경우 [verse - male], [verse - female], [chorus - duet] 형식으로 파트를 명시한다 4. 입력 언어와 동일한 언어로 가사를 작성한다 5. 각 구조는 4-8줄 정도로 작성한다 6. 음악 장르와 분위기에 맞는 가사를 작성한다 예시 형식: [verse - male] 첫 번째 구절 가사 두 번째 구절 가사 ... [chorus - duet] 후렴구 가사 ... """ def generate_lyrics_with_ai(prompt: str, genre: str, song_style: str) -> str: """AI를 사용하여 가사 생성""" print(f"🎵 generate_lyrics_with_ai called with: prompt='{prompt}', genre='{genre}', style='{song_style}'") # [MODIFIED] client_available 체크 + openai API 호출로 변경 if not client_available: print("❌ OpenAI client not available, returning default lyrics") return LYRIC_DEFAULT if not prompt or prompt.strip() == "": print("⚠️ Empty prompt, returning default lyrics") return LYRIC_DEFAULT try: # 언어 감지 및 스타일 정보 추가 style_info = "" if "듀엣" in song_style: style_info = "남녀 듀엣 형식으로 파트를 나누어 작성해주세요. [verse - male], [verse - female], [chorus - duet] 형식을 사용하세요." elif "솔로 (남성)" in song_style: style_info = "남성 솔로 가수를 위한 가사를 작성해주세요." elif "솔로 (여성)" in song_style: style_info = "여성 솔로 가수를 위한 가사를 작성해주세요." elif "그룹" in song_style: style_info = "그룹이 부르는 형식으로 파트를 나누어 작성해주세요." elif "인스트루멘탈" in song_style: return "[instrumental]\n\n[inst]\n\n[instrumental break]\n\n[inst]" user_prompt = f""" 주제: {prompt} 장르: {genre} 스타일: {style_info} 위 정보를 바탕으로 노래 가사를 작성해주세요. 입력된 언어와 동일한 언어로 작성하고, 구조 태그를 반드시 포함해주세요. """ print(f"📝 Sending request to OpenAI...") # [MODIFIED] openai.ChatCompletion 사용 response = _chat_completion( model="gpt-4.1-mini", messages=[ {"role": "system", "content": LYRIC_SYSTEM_PROMPT}, {"role": "user", "content": user_prompt} ], temperature=0.8, max_tokens=1000, ) generated_lyrics = response.choices[0].message.content print(f"✅ Generated lyrics successfully") return generated_lyrics except Exception as e: print(f"❌ AI 가사 생성 오류: {e}") return LYRIC_DEFAULT # 품질 프리셋 시스템 추가 QUALITY_PRESETS = { "Draft (Fast)": { "infer_step": 50, "guidance_scale": 10.0, "scheduler_type": "euler", "omega_scale": 5.0, "use_erg_diffusion": False, "use_erg_tag": True, "description": "빠른 초안 생성 (1-2분)" }, "Standard": { "infer_step": 150, "guidance_scale": 15.0, "scheduler_type": "euler", "omega_scale": 10.0, "use_erg_diffusion": True, "use_erg_tag": True, "description": "표준 품질 (3-5분)" }, "High Quality": { "infer_step": 200, "guidance_scale": 18.0, "scheduler_type": "heun", "omega_scale": 15.0, "use_erg_diffusion": True, "use_erg_tag": True, "description": "고품질 생성 (8-12분)" }, "Ultra (Best)": { "infer_step": 299, "guidance_scale": 20.0, "scheduler_type": "heun", "omega_scale": 20.0, "use_erg_diffusion": True, "use_erg_tag": True, "description": "최고 품질 (15-20분)" } } # 다중 시드 생성 설정 MULTI_SEED_OPTIONS = { "Single": 1, "Best of 3": 3, "Best of 5": 5, "Best of 10": 10 } class MusicGenerationCache: """생성 결과 캐싱 시스템""" def __init__(self): self.cache = {} self.max_cache_size = 50 def get_cache_key(self, params): # 중요한 파라미터만으로 해시 생성 key_params = {k: v for k, v in params.items() if k in ['prompt', 'lyrics', 'infer_step', 'guidance_scale', 'audio_duration']} return hashlib.md5(str(sorted(key_params.items())).encode()).hexdigest()[:16] def get_cached_result(self, params): key = self.get_cache_key(params) return self.cache.get(key) def cache_result(self, params, result): if len(self.cache) >= self.max_cache_size: oldest_key = next(iter(self.cache)) del self.cache[oldest_key] key = self.get_cache_key(params) self.cache[key] = result # 전역 캐시 인스턴스 generation_cache = MusicGenerationCache() def enhance_prompt_with_genre(base_prompt: str, genre: str, song_style: str) -> str: """장르와 스타일에 따른 스마트 프롬프트 확장""" enhanced_prompt = base_prompt if genre != "Custom" and genre: # 장르별 추가 개선 태그 genre_enhancements = { "Modern Pop": ["polished production", "mainstream appeal", "hook-driven"], "Rock": ["guitar-driven", "powerful drums", "energetic performance"], "Hip Hop": ["rhythmic flow", "urban atmosphere", "bass-heavy"], "Country": ["acoustic warmth", "storytelling melody", "authentic feel"], "EDM": ["electronic atmosphere", "build-ups", "dance-friendly"], "Reggae": ["laid-back groove", "tropical vibes", "rhythmic guitar"], "Classical": ["orchestral depth", "musical sophistication", "timeless beauty"], "Jazz": ["musical complexity", "improvisational spirit", "sophisticated harmony"], "Metal": ["aggressive energy", "powerful sound", "intense atmosphere"], "R&B": ["smooth groove", "soulful expression", "rhythmic sophistication"], "K-Pop": ["catchy hooks", "dynamic arrangement", "polished production"], "Ballad": ["emotional depth", "slow tempo", "heartfelt delivery"] } if genre in genre_enhancements: additional_tags = ", ".join(genre_enhancements[genre]) enhanced_prompt = f"{base_prompt}, {additional_tags}" # 스타일 태그 추가 if song_style in SONG_STYLES: style_tags = SONG_STYLES[song_style] enhanced_prompt = f"{enhanced_prompt}, {style_tags}" return enhanced_prompt def calculate_quality_score(audio_path: str) -> float: """간단한 품질 점수 계산 (실제 구현에서는 더 복잡한 메트릭 사용)""" try: y, sr = librosa.load(audio_path) # 기본 품질 메트릭 rms_energy = np.sqrt(np.mean(y**2)) spectral_centroid = np.mean(librosa.feature.spectral_centroid(y=y, sr=sr)) zero_crossing_rate = np.mean(librosa.feature.zero_crossing_rate(y)) # 정규화된 점수 (0-100) energy_score = min(rms_energy * 1000, 40) # 0-40점 spectral_score = min(spectral_centroid / 100, 40) # 0-40점 clarity_score = min((1 - zero_crossing_rate) * 20, 20) # 0-20점 total_score = energy_score + spectral_score + clarity_score return round(total_score, 1) except: return 50.0 # 기본값 def update_quality_preset(preset_name): """품질 프리셋 적용""" if preset_name not in QUALITY_PRESETS: return (100, 15.0, "euler", 10.0, True, True) preset = QUALITY_PRESETS[preset_name] return ( preset.get("infer_step", 100), preset.get("guidance_scale", 15.0), preset.get("scheduler_type", "euler"), preset.get("omega_scale", 10.0), preset.get("use_erg_diffusion", True), preset.get("use_erg_tag", True) ) def create_enhanced_process_func(original_func): """기존 함수를 향상된 기능으로 래핑""" def enhanced_func( audio_duration, prompt, lyrics, infer_step, guidance_scale, scheduler_type, cfg_type, omega_scale, manual_seeds, guidance_interval, guidance_interval_decay, min_guidance_scale, use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps, guidance_scale_text, guidance_scale_lyric, audio2audio_enable=False, ref_audio_strength=0.5, ref_audio_input=None, lora_name_or_path="none", multi_seed_mode="Single", enable_smart_enhancement=True, genre_preset="Custom", song_style="듀엣 (남녀 혼성)", **kwargs ): # 스마트 프롬프트 확장 if enable_smart_enhancement: prompt = enhance_prompt_with_genre(prompt, genre_preset, song_style) # 캐시 확인 cache_params = { 'prompt': prompt, 'lyrics': lyrics, 'audio_duration': audio_duration, 'infer_step': infer_step, 'guidance_scale': guidance_scale } cached_result = generation_cache.get_cached_result(cache_params) if cached_result: return cached_result # 다중 시드 생성 num_candidates = MULTI_SEED_OPTIONS.get(multi_seed_mode, 1) if num_candidates == 1: # 기존 함수 호출 result = original_func( audio_duration, prompt, lyrics, infer_step, guidance_scale, scheduler_type, cfg_type, omega_scale, manual_seeds, guidance_interval, guidance_interval_decay, min_guidance_scale, use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps, guidance_scale_text, guidance_scale_lyric, audio2audio_enable, ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs ) else: # 다중 시드 생성 및 최적 선택 candidates = [] for i in range(num_candidates): seed = random.randint(1, 10000) try: result = original_func( audio_duration, prompt, lyrics, infer_step, guidance_scale, scheduler_type, cfg_type, omega_scale, str(seed), guidance_interval, guidance_interval_decay, min_guidance_scale, use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps, guidance_scale_text, guidance_scale_lyric, audio2audio_enable, ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs ) if result and len(result) > 0: audio_path = result[0] # 첫 번째 결과가 오디오 파일 경로 if audio_path and os.path.exists(audio_path): quality_score = calculate_quality_score(audio_path) candidates.append({ "result": result, "quality_score": quality_score, "seed": seed }) except Exception as e: print(f"Generation {i+1} failed: {e}") continue if candidates: # 최고 품질 선택 best_candidate = max(candidates, key=lambda x: x["quality_score"]) result = best_candidate["result"] # 품질 정보 추가 if len(result) > 1 and isinstance(result[1], dict): result[1]["quality_score"] = best_candidate["quality_score"] result[1]["selected_seed"] = best_candidate["seed"] result[1]["candidates_count"] = len(candidates) else: # 모든 생성 실패시 기본 생성 result = original_func( audio_duration, prompt, lyrics, infer_step, guidance_scale, scheduler_type, cfg_type, omega_scale, manual_seeds, guidance_interval, guidance_interval_decay, min_guidance_scale, use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps, guidance_scale_text, guidance_scale_lyric, audio2audio_enable, ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs ) # 결과 캐시 generation_cache.cache_result(cache_params, result) return result return enhanced_func def create_output_ui(task_name="Text2Music"): # For many consumer-grade GPU devices, only one batch can be run output_audio1 = gr.Audio(type="filepath", label=f"{task_name} Generated Audio 1") with gr.Accordion(f"{task_name} Parameters & Quality Info", open=False): input_params_json = gr.JSON(label=f"{task_name} Parameters") # 품질 정보 표시 추가 with gr.Row(): quality_score = gr.Number(label="Quality Score (0-100)", value=0, interactive=False) generation_info = gr.Textbox( label="Generation Info", value="", interactive=False, max_lines=2 ) outputs = [output_audio1] return outputs, input_params_json def dump_func(*args): print(args) return [] def create_text2music_ui( gr, text2music_process_func, sample_data_func=None, load_data_func=None, ): # 향상된 프로세스 함수 생성 enhanced_process_func = create_enhanced_process_func(text2music_process_func) # UI 요소를 저장할 딕셔너리 ui = {} with gr.Row(): with gr.Column(): # 품질 및 성능 설정 섹션 추가 with gr.Group(): gr.Markdown("### ⚡ 품질 & 성능 설정") with gr.Row(): ui['quality_preset'] = gr.Dropdown( choices=list(QUALITY_PRESETS.keys()), value="Standard", label="품질 프리셋", scale=2, interactive=True ) ui['multi_seed_mode'] = gr.Dropdown( choices=list(MULTI_SEED_OPTIONS.keys()), value="Single", label="다중 생성 모드", scale=2, info="여러 번 생성하여 최고 품질 선택", interactive=True ) ui['preset_description'] = gr.Textbox( value=QUALITY_PRESETS["Standard"]["description"], label="설명", interactive=False, max_lines=1 ) with gr.Row(equal_height=True): ui['audio_duration'] = gr.Slider( -1, 240.0, step=0.00001, value=-1, label="Audio Duration", interactive=True, info="-1 means random duration (30 ~ 240).", scale=7, ) ui['random_bnt'] = gr.Button("🎲 Random", variant="secondary", scale=1) ui['preview_bnt'] = gr.Button("🎵 Preview", variant="secondary", scale=2) # audio2audio with gr.Row(equal_height=True): ui['audio2audio_enable'] = gr.Checkbox( label="Enable Audio2Audio", value=False, info="Check to enable Audio-to-Audio generation using a reference audio.", elem_id="audio2audio_checkbox" ) ui['lora_name_or_path'] = gr.Dropdown( label="Lora Name or Path", choices=["ACE-Step/ACE-Step-v1-chinese-rap-LoRA", "none"], value="none", allow_custom_value=True, ) ui['ref_audio_input'] = gr.Audio( type="filepath", label="Reference Audio (for Audio2Audio)", visible=False, elem_id="ref_audio_input", show_download_button=True ) ui['ref_audio_strength'] = gr.Slider( label="Refer audio strength", minimum=0.0, maximum=1.0, step=0.01, value=0.5, elem_id="ref_audio_strength", visible=False, interactive=True, ) with gr.Column(scale=2): with gr.Group(): gr.Markdown("""### 🎼 스마트 프롬프트 시스템
장르와 스타일을 선택하면 자동으로 최적화된 태그가 추가됩니다.
""") with gr.Row(): ui['genre_preset'] = gr.Dropdown( choices=["Custom"] + list(GENRE_PRESETS.keys()), value="Custom", label="장르 프리셋", scale=1, interactive=True ) ui['song_style'] = gr.Dropdown( choices=list(SONG_STYLES.keys()), value="듀엣 (남녀 혼성)", label="곡 스타일", scale=1, interactive=True ) ui['enable_smart_enhancement'] = gr.Checkbox( label="스마트 향상", value=True, info="자동 태그 최적화", scale=1 ) ui['prompt'] = gr.Textbox( lines=2, label="Tags", max_lines=4, value=TAG_DEFAULT, placeholder="콤마로 구분된 태그들...", interactive=True ) with gr.Group(): gr.Markdown("""### 📝 AI 작사 시스템
주제를 입력하고 'AI 작사' 버튼을 클릭하면 자동으로 가사가 생성됩니다.
""") with gr.Row(): ui['lyric_prompt'] = gr.Textbox( label="작사 주제", placeholder="예: 첫사랑의 설렘, 이별의 아픔, 희망찬 내일...", scale=3, interactive=True ) ui['generate_lyrics_btn'] = gr.Button("🤖 AI 작사", variant="secondary", scale=1) ui['lyrics'] = gr.Textbox( lines=9, label="Lyrics", max_lines=13, value=LYRIC_DEFAULT, placeholder="가사를 입력하세요. [verse], [chorus] 등의 구조 태그 사용을 권장합니다.", interactive=True ) with gr.Accordion("Basic Settings", open=False): ui['infer_step'] = gr.Slider( minimum=1, maximum=300, step=1, value=150, label="Infer Steps", interactive=True, ) ui['guidance_scale'] = gr.Slider( minimum=0.0, maximum=30.0, step=0.1, value=15.0, label="Guidance Scale", interactive=True, info="When guidance_scale_lyric > 1 and guidance_scale_text > 1, the guidance scale will not be applied.", ) ui['guidance_scale_text'] = gr.Slider( minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="Guidance Scale Text", interactive=True, info="Guidance scale for text condition. It can only apply to cfg. set guidance_scale_text=5.0, guidance_scale_lyric=1.5 for start", ) ui['guidance_scale_lyric'] = gr.Slider( minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="Guidance Scale Lyric", interactive=True, ) ui['manual_seeds'] = gr.Textbox( label="manual seeds (default None)", placeholder="1,2,3,4", value=None, info="Seed for the generation", ) with gr.Accordion("Advanced Settings", open=False): ui['scheduler_type'] = gr.Radio( ["euler", "heun"], value="euler", label="Scheduler Type", elem_id="scheduler_type", info="Scheduler type for the generation. euler is recommended. heun will take more time.", ) ui['cfg_type'] = gr.Radio( ["cfg", "apg", "cfg_star"], value="apg", label="CFG Type", elem_id="cfg_type", info="CFG type for the generation. apg is recommended. cfg and cfg_star are almost the same.", ) ui['use_erg_tag'] = gr.Checkbox( label="use ERG for tag", value=True, info="Use Entropy Rectifying Guidance for tag. It will multiple a temperature to the attention to make a weaker tag condition and make better diversity.", ) ui['use_erg_lyric'] = gr.Checkbox( label="use ERG for lyric", value=False, info="The same but apply to lyric encoder's attention.", ) ui['use_erg_diffusion'] = gr.Checkbox( label="use ERG for diffusion", value=True, info="The same but apply to diffusion model's attention.", ) ui['omega_scale'] = gr.Slider( minimum=-100.0, maximum=100.0, step=0.1, value=10.0, label="Granularity Scale", interactive=True, info="Granularity scale for the generation. Higher values can reduce artifacts", ) ui['guidance_interval'] = gr.Slider( minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Guidance Interval", interactive=True, info="Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)", ) ui['guidance_interval_decay'] = gr.Slider( minimum=0.0, maximum=1.0, step=0.01, value=0.0, label="Guidance Interval Decay", interactive=True, info="Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.", ) ui['min_guidance_scale'] = gr.Slider( minimum=0.0, maximum=200.0, step=0.1, value=3.0, label="Min Guidance Scale", interactive=True, info="Min guidance scale for guidance interval decay's end scale", ) ui['oss_steps'] = gr.Textbox( label="OSS Steps", placeholder="16, 29, 52, 96, 129, 158, 172, 183, 189, 200", value=None, info="Optimal Steps for the generation. But not test well", ) ui['text2music_bnt'] = gr.Button("🎵 Generate Music", variant="primary", size="lg") with gr.Column(): outputs, input_params_json = create_output_ui() # (retake, repainting, edit, extend 등 탭들은 생략) # [MODIFIED] 아래부터는 @gr.on(...) 대신 최신 이벤트 바인딩 방식으로 연결 # 1) Audio2Audio 토글 def _toggle_audio2audio(x): return (gr.update(visible=x), gr.update(visible=x)) ui['audio2audio_enable'].change( fn=_toggle_audio2audio, inputs=[ui['audio2audio_enable']], outputs=[ui['ref_audio_input'], ui['ref_audio_strength']] ) # 2) 장르 변경 핸들러 def update_tags_for_genre(genre, style): print(f"🎵 Genre changed: {genre}, Style: {style}") if genre == "Custom": return TAG_DEFAULT tags = GENRE_PRESETS.get(genre, TAG_DEFAULT) if style in SONG_STYLES: tags = f"{tags}, {SONG_STYLES[style]}" return tags ui['genre_preset'].change( fn=update_tags_for_genre, inputs=[ui['genre_preset'], ui['song_style']], outputs=[ui['prompt']] ) # 3) 곡 스타일 변경 핸들러 def update_tags_for_style(genre, style): print(f"🎤 Style changed: {style}, Genre: {genre}") if genre == "Custom": base_tags = TAG_DEFAULT else: base_tags = GENRE_PRESETS.get(genre, TAG_DEFAULT) if style in SONG_STYLES: return f"{base_tags}, {SONG_STYLES[style]}" return base_tags ui['song_style'].change( fn=update_tags_for_style, inputs=[ui['genre_preset'], ui['song_style']], outputs=[ui['prompt']] ) # 4) 품질 프리셋 변경 def update_quality_settings(preset): print(f"⚡ Quality preset: {preset}") if preset not in QUALITY_PRESETS: return ("", 150, 15.0, "euler", 10.0, True, True) p = QUALITY_PRESETS[preset] return ( p["description"], p["infer_step"], p["guidance_scale"], p["scheduler_type"], p["omega_scale"], p["use_erg_diffusion"], p["use_erg_tag"] ) ui['quality_preset'].change( fn=update_quality_settings, inputs=[ui['quality_preset']], outputs=[ ui['preset_description'], ui['infer_step'], ui['guidance_scale'], ui['scheduler_type'], ui['omega_scale'], ui['use_erg_diffusion'], ui['use_erg_tag'] ] ) # 5) AI 작사 def generate_lyrics_handler(prompt, genre, style): print(f"🤖 Generate lyrics: {prompt}") if not prompt or prompt.strip() == "": # Gradio 최신 버전에서는 gr.Warning 대신 이벤트 바인딩 후 return이 기본 return "⚠️ 작사 주제를 입력해주세요!" return generate_lyrics_with_ai(prompt, genre, style) ui['generate_lyrics_btn'].click( fn=generate_lyrics_handler, inputs=[ui['lyric_prompt'], ui['genre_preset'], ui['song_style']], outputs=[ui['lyrics']] ) # 6) Random 버튼 def random_generation(genre, style): print("🎲 Random generation") if genre == "Custom": genre = random.choice(list(GENRE_PRESETS.keys())) themes = ["도시의 밤", "첫사랑", "여름 해변", "가을 정취"] theme = random.choice(themes) duration = random.choice([30, 60, 90, 120]) tags = GENRE_PRESETS.get(genre, TAG_DEFAULT) if style in SONG_STYLES: tags = f"{tags}, {SONG_STYLES[style]}" new_lyrics = generate_lyrics_with_ai(theme, genre, style) return [ duration, tags, new_lyrics, 150, 15.0, "euler", "apg", 10.0, str(random.randint(1, 10000)), 0.5, 0.0, 3.0, True, False, True, None, 0.0, 0.0, False, 0.5, None ] ui['random_bnt'].click( fn=random_generation, inputs=[ui['genre_preset'], ui['song_style']], outputs=[ ui['audio_duration'], ui['prompt'], ui['lyrics'], ui['infer_step'], ui['guidance_scale'], ui['scheduler_type'], ui['cfg_type'], ui['omega_scale'], ui['manual_seeds'], ui['guidance_interval'], ui['guidance_interval_decay'], ui['min_guidance_scale'], ui['use_erg_tag'], ui['use_erg_lyric'], ui['use_erg_diffusion'], ui['oss_steps'], ui['guidance_scale_text'], ui['guidance_scale_lyric'], ui['audio2audio_enable'], ui['ref_audio_strength'], ui['ref_audio_input'] ] ) # 7) 메인 생성 버튼 ui['text2music_bnt'].click( fn=enhanced_process_func, inputs=[ ui['audio_duration'], ui['prompt'], ui['lyrics'], ui['infer_step'], ui['guidance_scale'], ui['scheduler_type'], ui['cfg_type'], ui['omega_scale'], ui['manual_seeds'], ui['guidance_interval'], ui['guidance_interval_decay'], ui['min_guidance_scale'], ui['use_erg_tag'], ui['use_erg_lyric'], ui['use_erg_diffusion'], ui['oss_steps'], ui['guidance_scale_text'], ui['guidance_scale_lyric'], ui['audio2audio_enable'], ui['ref_audio_strength'], ui['ref_audio_input'], ui['lora_name_or_path'], ui['multi_seed_mode'], ui['enable_smart_enhancement'], ui['genre_preset'], ui['song_style'] ], outputs=outputs + [input_params_json] ) print("✅ 이벤트 핸들러 연결 완료!") def create_main_demo_ui( text2music_process_func=dump_func, sample_data_func=dump_func, load_data_func=dump_func, ): with gr.Blocks( title="ACE-Step Model 1.0 DEMO - Enhanced", theme=gr.themes.Soft(), css=""" /* 그라디언트 배경 */ .gradio-container { max-width: 1200px !important; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); min-height: 100vh; } /* 메인 컨테이너 스타일 */ .main-container { background: rgba(255, 255, 255, 0.95); border-radius: 20px; padding: 30px; margin: 20px auto; box-shadow: 0 20px 40px rgba(0, 0, 0, 0.1); } /* 헤더 스타일 */ .header-title { background: linear-gradient(45deg, #667eea, #764ba2); -webkit-background-clip: text; -webkit-text-fill-color: transparent; font-size: 3em; font-weight: bold; text-align: center; margin-bottom: 10px; } /* 버튼 스타일 */ .gr-button-primary { background: linear-gradient(45deg, #667eea, #764ba2) !important; border: none !important; color: white !important; font-weight: bold !important; transition: all 0.3s ease !important; } .gr-button-primary:hover { transform: translateY(-2px); box-shadow: 0 10px 20px rgba(102, 126, 234, 0.3); } .gr-button-secondary { background: linear-gradient(45deg, #f093fb, #f5576c) !important; border: none !important; color: white !important; transition: all 0.3s ease !important; } /* 그룹 스타일 */ .gr-group { background: rgba(255, 255, 255, 0.8) !important; border: 1px solid rgba(102, 126, 234, 0.2) !important; border-radius: 15px !important; padding: 20px !important; margin: 10px 0 !important; backdrop-filter: blur(10px) !important; } /* 탭 스타일 */ .gr-tab { background: rgba(255, 255, 255, 0.9) !important; border-radius: 10px !important; padding: 15px !important; } /* 입력 필드 스타일 */ .gr-textbox, .gr-dropdown, .gr-slider { border: 2px solid rgba(102, 126, 234, 0.3) !important; border-radius: 10px !important; transition: all 0.3s ease !important; } .gr-textbox:focus, .gr-dropdown:focus { border-color: #667eea !important; box-shadow: 0 0 10px rgba(102, 126, 234, 0.2) !important; } /* 품질 정보 스타일 */ .quality-info { background: linear-gradient(135deg, #f093fb20, #f5576c20); padding: 15px; border-radius: 10px; margin: 10px 0; border: 1px solid rgba(240, 147, 251, 0.3); } /* 애니메이션 */ @keyframes fadeIn { from { opacity: 0; transform: translateY(20px); } to { opacity: 1; transform: translateY(0); } } .gr-row, .gr-column { animation: fadeIn 0.5s ease-out; } /* 스크롤바 스타일 */ ::-webkit-scrollbar { width: 10px; } ::-webkit-scrollbar-track { background: rgba(255, 255, 255, 0.1); border-radius: 10px; } ::-webkit-scrollbar-thumb { background: linear-gradient(45deg, #667eea, #764ba2); border-radius: 10px; } /* 마크다운 스타일 */ .gr-markdown { color: #4a5568 !important; } .gr-markdown h3 { color: #667eea !important; font-weight: 600 !important; margin: 15px 0 !important; } """ ) as demo: with gr.Column(elem_classes="main-container"): gr.HTML( """

🎵 ACE-Step PRO

🚀 새로운 기능: AI 작사 | 품질 프리셋 | 다중 생성 | 스마트 프롬프트 | 실시간 프리뷰

📄 Project | 🤗 Checkpoints | 💬 Discord

""" ) # 사용법 가이드 추가 with gr.Accordion("📖 사용법 가이드", open=False): gr.Markdown(""" ### 🎯 빠른 시작 1. **장르 & 스타일 선택**: 원하는 음악 장르와 곡 스타일(듀엣, 솔로 등)을 선택합니다 2. **AI 작사**: 주제를 입력하고 'AI 작사' 버튼으로 자동 가사를 생성합니다 3. **품질 설정**: Draft(빠름) → Standard(권장) → High Quality → Ultra 중 선택 4. **다중 생성**: "Best of 3/5/10" 선택하면 여러 번 생성하여 최고 품질을 자동 선택합니다 5. **프리뷰**: 전체 생성 전 10초 프리뷰로 빠르게 확인할 수 있습니다 ### 💡 품질 향상 팁 - **고품질 생성**: "High Quality" + "Best of 5" 조합 추천 - **빠른 테스트**: "Draft" + "프리뷰" 기능 활용 - **장르 특화**: 장르 프리셋 선택 후 "스마트 향상" 체크 - **가사 구조**: [verse], [chorus], [bridge] 태그 적극 활용 - **다국어 지원**: 한국어로 주제를 입력하면 한국어 가사가 생성됩니다 ### ⚠️ OpenAI API 설정 AI 작사 기능을 사용하려면 환경변수에 OpenAI API 키를 설정해야 합니다: ```bash export LLM_API="your-openai-api-key" # 또는 export OPENAI_API_KEY="your-openai-api-key" ``` """) with gr.Tab("🎵 Enhanced Text2Music", elem_classes="gr-tab"): create_text2music_ui( gr=gr, text2music_process_func=text2music_process_func, sample_data_func=sample_data_func, load_data_func=load_data_func, ) return demo if __name__ == "__main__": print("🚀 ACE-Step PRO 시작 중...") demo = create_main_demo_ui() demo.launch( server_name="0.0.0.0", server_port=7860, share=True, # 공유 링크 ssr_mode=False # ← SSR 비활성화 )