import gradio as gr import librosa import os import random import hashlib import numpy as np import json from typing import Dict, List, Tuple, Optional # [MODIFIED] OpenAI 라이브러리 사용 방식 수정 try: import openai api_key = os.getenv("LLM_API") or os.getenv("OPENAI_API_KEY") if api_key: openai.api_key = api_key client_available = True print("✅ OpenAI API client initialized successfully") else: client_available = False print("⚠️ Warning: No OpenAI API key found. AI lyrics generation will be disabled.") except Exception as e: client_available = False print(f"❌ Warning: Failed to initialize OpenAI client: {e}") # ─── openai 초기화 부분 바로 아래에 추가 ─── from packaging import version def _chat_completion(**kwargs): """SDK 버전에 맞춰 ChatCompletion 호출을 추상화""" if version.parse(openai.__version__) >= version.parse("1.0.0"): # v1 스타일 return openai.chat.completions.create(**kwargs) else: # 구버전 스타일 return openai.ChatCompletion.create(**kwargs) TAG_DEFAULT = "funk, pop, soul, rock, melodic, guitar, drums, bass, keyboard, percussion, 105 BPM, energetic, upbeat, groovy, vibrant, dynamic, duet, male and female vocals" LYRIC_DEFAULT = """[verse - male] Neon lights they flicker bright City hums in dead of night Rhythms pulse through concrete veins Lost in echoes of refrains [verse - female] Bassline groovin' in my chest Heartbeats match the city's zest Electric whispers fill the air Synthesized dreams everywhere [chorus - duet] Turn it up and let it flow Feel the fire let it grow In this rhythm we belong Hear the night sing out our song [verse - male] Guitar strings they start to weep Wake the soul from silent sleep Every note a story told In this night we're bold and gold [bridge - female] Voices blend in harmony Lost in pure cacophony Timeless echoes timeless cries Soulful shouts beneath the skies [verse - duet] Keyboard dances on the keys Melodies on evening breeze Catch the tune and hold it tight In this moment we take flight """ # 확장된 장르 프리셋 (기존 + 개선된 태그) GENRE_PRESETS = { "Modern Pop": "pop, synth, drums, guitar, 120 bpm, upbeat, catchy, vibrant, polished vocals, radio-ready, commercial, layered vocals", "Rock": "rock, electric guitar, drums, bass, 130 bpm, energetic, rebellious, gritty, powerful vocals, raw vocals, power chords, driving rhythm", "Hip Hop": "hip hop, 808 bass, hi-hats, synth, 90 bpm, bold, urban, intense, rhythmic vocals, trap beats, punchy drums", "Country": "country, acoustic guitar, steel guitar, fiddle, 100 bpm, heartfelt, rustic, warm, twangy vocals, storytelling, americana", "EDM": "edm, synth, bass, kick drum, 128 bpm, euphoric, pulsating, energetic, instrumental, progressive build, festival anthem, electronic", "Reggae": "reggae, guitar, bass, drums, 80 bpm, chill, soulful, positive, smooth vocals, offbeat rhythm, island vibes", "Classical": "classical, orchestral, strings, piano, 60 bpm, elegant, emotive, timeless, instrumental, dynamic range, sophisticated harmony", "Jazz": "jazz, saxophone, piano, double bass, 110 bpm, smooth, improvisational, soulful, crooning vocals, swing feel, sophisticated", "Metal": "metal, electric guitar, double kick drum, bass, 160 bpm, aggressive, intense, heavy, powerful vocals, distorted, powerful", "R&B": "r&b, synth, bass, drums, 85 bpm, sultry, groovy, romantic, silky vocals, smooth production, neo-soul", "K-Pop": "k-pop, synth, bass, drums, 128 bpm, catchy, energetic, polished, mixed vocals, electronic elements, danceable", "Ballad": "ballad, piano, strings, acoustic guitar, 70 bpm, emotional, heartfelt, romantic, expressive vocals, orchestral arrangement" } # 곡 스타일 옵션 SONG_STYLES = { "듀엣 (남녀 혼성)": "duet, male and female vocals, harmonious, call and response", "솔로 (남성)": "solo, male vocals, powerful voice", "솔로 (여성)": "solo, female vocals, emotional voice", "그룹 (혼성)": "group vocals, mixed gender, layered harmonies", "합창": "choir, multiple voices, choral arrangement", "랩/힙합": "rap vocals, rhythmic flow, urban style", "인스트루멘탈": "instrumental, no vocals" } # AI 작사 시스템 프롬프트 LYRIC_SYSTEM_PROMPT = """너는 노래 가사를 작사하는 전문가 역할이다. 이용자가 입력하는 주제와 스타일에 따라 관련된 노래 가사를 작성하라. 가사 작성 규칙: 1. 구조 태그는 반드시 "[ ]"로 구분한다 2. 사용 가능한 구조 태그: [verse], [chorus], [bridge], [intro], [outro], [pre-chorus] 3. 듀엣인 경우 [verse - male], [verse - female], [chorus - duet] 형식으로 파트를 명시한다 4. 입력 언어와 동일한 언어로 가사를 작성한다 5. 각 구조는 4-8줄 정도로 작성한다 6. 음악 장르와 분위기에 맞는 가사를 작성한다 예시 형식: [verse - male] 첫 번째 구절 가사 두 번째 구절 가사 ... [chorus - duet] 후렴구 가사 ... """ def generate_lyrics_with_ai(prompt: str, genre: str, song_style: str) -> str: """AI를 사용하여 가사 생성""" print(f"🎵 generate_lyrics_with_ai called with: prompt='{prompt}', genre='{genre}', style='{song_style}'") # [MODIFIED] client_available 체크 + openai API 호출로 변경 if not client_available: print("❌ OpenAI client not available, returning default lyrics") return LYRIC_DEFAULT if not prompt or prompt.strip() == "": print("⚠️ Empty prompt, returning default lyrics") return LYRIC_DEFAULT try: # 언어 감지 및 스타일 정보 추가 style_info = "" if "듀엣" in song_style: style_info = "남녀 듀엣 형식으로 파트를 나누어 작성해주세요. [verse - male], [verse - female], [chorus - duet] 형식을 사용하세요." elif "솔로 (남성)" in song_style: style_info = "남성 솔로 가수를 위한 가사를 작성해주세요." elif "솔로 (여성)" in song_style: style_info = "여성 솔로 가수를 위한 가사를 작성해주세요." elif "그룹" in song_style: style_info = "그룹이 부르는 형식으로 파트를 나누어 작성해주세요." elif "인스트루멘탈" in song_style: return "[instrumental]\n\n[inst]\n\n[instrumental break]\n\n[inst]" user_prompt = f""" 주제: {prompt} 장르: {genre} 스타일: {style_info} 위 정보를 바탕으로 노래 가사를 작성해주세요. 입력된 언어와 동일한 언어로 작성하고, 구조 태그를 반드시 포함해주세요. """ print(f"📝 Sending request to OpenAI...") # [MODIFIED] openai.ChatCompletion 사용 response = _chat_completion( model="gpt-4.1-mini", messages=[ {"role": "system", "content": LYRIC_SYSTEM_PROMPT}, {"role": "user", "content": user_prompt} ], temperature=0.8, max_tokens=1000, ) generated_lyrics = response.choices[0].message.content print(f"✅ Generated lyrics successfully") return generated_lyrics except Exception as e: print(f"❌ AI 가사 생성 오류: {e}") return LYRIC_DEFAULT # 품질 프리셋 시스템 추가 QUALITY_PRESETS = { "Draft (Fast)": { "infer_step": 50, "guidance_scale": 10.0, "scheduler_type": "euler", "omega_scale": 5.0, "use_erg_diffusion": False, "use_erg_tag": True, "description": "빠른 초안 생성 (1-2분)" }, "Standard": { "infer_step": 150, "guidance_scale": 15.0, "scheduler_type": "euler", "omega_scale": 10.0, "use_erg_diffusion": True, "use_erg_tag": True, "description": "표준 품질 (3-5분)" }, "High Quality": { "infer_step": 200, "guidance_scale": 18.0, "scheduler_type": "heun", "omega_scale": 15.0, "use_erg_diffusion": True, "use_erg_tag": True, "description": "고품질 생성 (8-12분)" }, "Ultra (Best)": { "infer_step": 299, "guidance_scale": 20.0, "scheduler_type": "heun", "omega_scale": 20.0, "use_erg_diffusion": True, "use_erg_tag": True, "description": "최고 품질 (15-20분)" } } # 다중 시드 생성 설정 MULTI_SEED_OPTIONS = { "Single": 1, "Best of 3": 3, "Best of 5": 5, "Best of 10": 10 } class MusicGenerationCache: """생성 결과 캐싱 시스템""" def __init__(self): self.cache = {} self.max_cache_size = 50 def get_cache_key(self, params): # 중요한 파라미터만으로 해시 생성 key_params = {k: v for k, v in params.items() if k in ['prompt', 'lyrics', 'infer_step', 'guidance_scale', 'audio_duration']} return hashlib.md5(str(sorted(key_params.items())).encode()).hexdigest()[:16] def get_cached_result(self, params): key = self.get_cache_key(params) return self.cache.get(key) def cache_result(self, params, result): if len(self.cache) >= self.max_cache_size: oldest_key = next(iter(self.cache)) del self.cache[oldest_key] key = self.get_cache_key(params) self.cache[key] = result # 전역 캐시 인스턴스 generation_cache = MusicGenerationCache() def enhance_prompt_with_genre(base_prompt: str, genre: str, song_style: str) -> str: """장르와 스타일에 따른 스마트 프롬프트 확장""" enhanced_prompt = base_prompt if genre != "Custom" and genre: # 장르별 추가 개선 태그 genre_enhancements = { "Modern Pop": ["polished production", "mainstream appeal", "hook-driven"], "Rock": ["guitar-driven", "powerful drums", "energetic performance"], "Hip Hop": ["rhythmic flow", "urban atmosphere", "bass-heavy"], "Country": ["acoustic warmth", "storytelling melody", "authentic feel"], "EDM": ["electronic atmosphere", "build-ups", "dance-friendly"], "Reggae": ["laid-back groove", "tropical vibes", "rhythmic guitar"], "Classical": ["orchestral depth", "musical sophistication", "timeless beauty"], "Jazz": ["musical complexity", "improvisational spirit", "sophisticated harmony"], "Metal": ["aggressive energy", "powerful sound", "intense atmosphere"], "R&B": ["smooth groove", "soulful expression", "rhythmic sophistication"], "K-Pop": ["catchy hooks", "dynamic arrangement", "polished production"], "Ballad": ["emotional depth", "slow tempo", "heartfelt delivery"] } if genre in genre_enhancements: additional_tags = ", ".join(genre_enhancements[genre]) enhanced_prompt = f"{base_prompt}, {additional_tags}" # 스타일 태그 추가 if song_style in SONG_STYLES: style_tags = SONG_STYLES[song_style] enhanced_prompt = f"{enhanced_prompt}, {style_tags}" return enhanced_prompt def calculate_quality_score(audio_path: str) -> float: """간단한 품질 점수 계산 (실제 구현에서는 더 복잡한 메트릭 사용)""" try: y, sr = librosa.load(audio_path) # 기본 품질 메트릭 rms_energy = np.sqrt(np.mean(y**2)) spectral_centroid = np.mean(librosa.feature.spectral_centroid(y=y, sr=sr)) zero_crossing_rate = np.mean(librosa.feature.zero_crossing_rate(y)) # 정규화된 점수 (0-100) energy_score = min(rms_energy * 1000, 40) # 0-40점 spectral_score = min(spectral_centroid / 100, 40) # 0-40점 clarity_score = min((1 - zero_crossing_rate) * 20, 20) # 0-20점 total_score = energy_score + spectral_score + clarity_score return round(total_score, 1) except: return 50.0 # 기본값 def update_quality_preset(preset_name): """품질 프리셋 적용""" if preset_name not in QUALITY_PRESETS: return (100, 15.0, "euler", 10.0, True, True) preset = QUALITY_PRESETS[preset_name] return ( preset.get("infer_step", 100), preset.get("guidance_scale", 15.0), preset.get("scheduler_type", "euler"), preset.get("omega_scale", 10.0), preset.get("use_erg_diffusion", True), preset.get("use_erg_tag", True) ) def create_enhanced_process_func(original_func): """기존 함수를 향상된 기능으로 래핑""" def enhanced_func( audio_duration, prompt, lyrics, infer_step, guidance_scale, scheduler_type, cfg_type, omega_scale, manual_seeds, guidance_interval, guidance_interval_decay, min_guidance_scale, use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps, guidance_scale_text, guidance_scale_lyric, audio2audio_enable=False, ref_audio_strength=0.5, ref_audio_input=None, lora_name_or_path="none", multi_seed_mode="Single", enable_smart_enhancement=True, genre_preset="Custom", song_style="듀엣 (남녀 혼성)", **kwargs ): # 스마트 프롬프트 확장 if enable_smart_enhancement: prompt = enhance_prompt_with_genre(prompt, genre_preset, song_style) # 캐시 확인 cache_params = { 'prompt': prompt, 'lyrics': lyrics, 'audio_duration': audio_duration, 'infer_step': infer_step, 'guidance_scale': guidance_scale } cached_result = generation_cache.get_cached_result(cache_params) if cached_result: return cached_result # 다중 시드 생성 num_candidates = MULTI_SEED_OPTIONS.get(multi_seed_mode, 1) if num_candidates == 1: # 기존 함수 호출 result = original_func( audio_duration, prompt, lyrics, infer_step, guidance_scale, scheduler_type, cfg_type, omega_scale, manual_seeds, guidance_interval, guidance_interval_decay, min_guidance_scale, use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps, guidance_scale_text, guidance_scale_lyric, audio2audio_enable, ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs ) else: # 다중 시드 생성 및 최적 선택 candidates = [] for i in range(num_candidates): seed = random.randint(1, 10000) try: result = original_func( audio_duration, prompt, lyrics, infer_step, guidance_scale, scheduler_type, cfg_type, omega_scale, str(seed), guidance_interval, guidance_interval_decay, min_guidance_scale, use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps, guidance_scale_text, guidance_scale_lyric, audio2audio_enable, ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs ) if result and len(result) > 0: audio_path = result[0] # 첫 번째 결과가 오디오 파일 경로 if audio_path and os.path.exists(audio_path): quality_score = calculate_quality_score(audio_path) candidates.append({ "result": result, "quality_score": quality_score, "seed": seed }) except Exception as e: print(f"Generation {i+1} failed: {e}") continue if candidates: # 최고 품질 선택 best_candidate = max(candidates, key=lambda x: x["quality_score"]) result = best_candidate["result"] # 품질 정보 추가 if len(result) > 1 and isinstance(result[1], dict): result[1]["quality_score"] = best_candidate["quality_score"] result[1]["selected_seed"] = best_candidate["seed"] result[1]["candidates_count"] = len(candidates) else: # 모든 생성 실패시 기본 생성 result = original_func( audio_duration, prompt, lyrics, infer_step, guidance_scale, scheduler_type, cfg_type, omega_scale, manual_seeds, guidance_interval, guidance_interval_decay, min_guidance_scale, use_erg_tag, use_erg_lyric, use_erg_diffusion, oss_steps, guidance_scale_text, guidance_scale_lyric, audio2audio_enable, ref_audio_strength, ref_audio_input, lora_name_or_path, **kwargs ) # 결과 캐시 generation_cache.cache_result(cache_params, result) return result return enhanced_func def create_output_ui(task_name="Text2Music"): # For many consumer-grade GPU devices, only one batch can be run output_audio1 = gr.Audio(type="filepath", label=f"{task_name} Generated Audio 1") with gr.Accordion(f"{task_name} Parameters & Quality Info", open=False): input_params_json = gr.JSON(label=f"{task_name} Parameters") # 품질 정보 표시 추가 with gr.Row(): quality_score = gr.Number(label="Quality Score (0-100)", value=0, interactive=False) generation_info = gr.Textbox( label="Generation Info", value="", interactive=False, max_lines=2 ) outputs = [output_audio1] return outputs, input_params_json def dump_func(*args): print(args) return [] def create_text2music_ui( gr, text2music_process_func, sample_data_func=None, load_data_func=None, ): # 향상된 프로세스 함수 생성 enhanced_process_func = create_enhanced_process_func(text2music_process_func) # UI 요소를 저장할 딕셔너리 ui = {} with gr.Row(): with gr.Column(): # 품질 및 성능 설정 섹션 추가 with gr.Group(): gr.Markdown("### ⚡ 품질 & 성능 설정") with gr.Row(): ui['quality_preset'] = gr.Dropdown( choices=list(QUALITY_PRESETS.keys()), value="Standard", label="품질 프리셋", scale=2, interactive=True ) ui['multi_seed_mode'] = gr.Dropdown( choices=list(MULTI_SEED_OPTIONS.keys()), value="Single", label="다중 생성 모드", scale=2, info="여러 번 생성하여 최고 품질 선택", interactive=True ) ui['preset_description'] = gr.Textbox( value=QUALITY_PRESETS["Standard"]["description"], label="설명", interactive=False, max_lines=1 ) with gr.Row(equal_height=True): ui['audio_duration'] = gr.Slider( -1, 240.0, step=0.00001, value=-1, label="Audio Duration", interactive=True, info="-1 means random duration (30 ~ 240).", scale=7, ) ui['random_bnt'] = gr.Button("🎲 Random", variant="secondary", scale=1) ui['preview_bnt'] = gr.Button("🎵 Preview", variant="secondary", scale=2) # audio2audio with gr.Row(equal_height=True): ui['audio2audio_enable'] = gr.Checkbox( label="Enable Audio2Audio", value=False, info="Check to enable Audio-to-Audio generation using a reference audio.", elem_id="audio2audio_checkbox" ) ui['lora_name_or_path'] = gr.Dropdown( label="Lora Name or Path", choices=["ACE-Step/ACE-Step-v1-chinese-rap-LoRA", "none"], value="none", allow_custom_value=True, ) ui['ref_audio_input'] = gr.Audio( type="filepath", label="Reference Audio (for Audio2Audio)", visible=False, elem_id="ref_audio_input", show_download_button=True ) ui['ref_audio_strength'] = gr.Slider( label="Refer audio strength", minimum=0.0, maximum=1.0, step=0.01, value=0.5, elem_id="ref_audio_strength", visible=False, interactive=True, ) with gr.Column(scale=2): with gr.Group(): gr.Markdown("""### 🎼 스마트 프롬프트 시스템
🚀 새로운 기능: AI 작사 | 품질 프리셋 | 다중 생성 | 스마트 프롬프트 | 실시간 프리뷰