Bils commited on
Commit
cbc01c8
Β·
verified Β·
1 Parent(s): 3e34a93

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +291 -287
app.py CHANGED
@@ -13,30 +13,22 @@ from pydub import AudioSegment
13
  from dotenv import load_dotenv
14
  import tempfile
15
  import spaces
16
-
17
- # Coqui TTS
18
  from TTS.api import TTS
19
 
20
- # ---------------------------------------------------------------------
21
- # Load Environment Variables
22
- # ---------------------------------------------------------------------
23
  load_dotenv()
24
  HF_TOKEN = os.getenv("HF_TOKEN")
25
 
26
- # ---------------------------------------------------------------------
27
- # Global Model Caches
28
- # ---------------------------------------------------------------------
29
  LLAMA_PIPELINES = {}
30
  MUSICGEN_MODELS = {}
31
  TTS_MODELS = {}
32
 
33
- # ---------------------------------------------------------------------
34
- # Helper Functions
35
- # ---------------------------------------------------------------------
36
  def get_llama_pipeline(model_id: str, token: str):
37
- """
38
- Returns a cached LLaMA pipeline if available; otherwise, loads it.
39
- """
40
  if model_id in LLAMA_PIPELINES:
41
  return LLAMA_PIPELINES[model_id]
42
 
@@ -52,339 +44,351 @@ def get_llama_pipeline(model_id: str, token: str):
52
  LLAMA_PIPELINES[model_id] = text_pipeline
53
  return text_pipeline
54
 
55
-
56
  def get_musicgen_model(model_key: str = "facebook/musicgen-large"):
57
- """
58
- Returns a cached MusicGen model if available; otherwise, loads it.
59
- Uses the 'large' variant for higher quality outputs.
60
- """
61
  if model_key in MUSICGEN_MODELS:
62
  return MUSICGEN_MODELS[model_key]
63
 
64
  model = MusicgenForConditionalGeneration.from_pretrained(model_key)
65
  processor = AutoProcessor.from_pretrained(model_key)
66
-
67
  device = "cuda" if torch.cuda.is_available() else "cpu"
68
  model.to(device)
69
  MUSICGEN_MODELS[model_key] = (model, processor)
70
  return model, processor
71
 
72
-
73
  def get_tts_model(model_name: str = "tts_models/en/ljspeech/tacotron2-DDC"):
74
- """
75
- Returns a cached TTS model if available; otherwise, loads it.
76
- """
77
  if model_name in TTS_MODELS:
78
  return TTS_MODELS[model_name]
79
-
80
  tts_model = TTS(model_name)
81
  TTS_MODELS[model_name] = tts_model
82
  return tts_model
83
 
84
-
85
- # ---------------------------------------------------------------------
86
- # Script Generation Function
87
- # ---------------------------------------------------------------------
88
  @spaces.GPU(duration=100)
89
  def generate_script(user_prompt: str, model_id: str, token: str, duration: int):
90
- """
91
- Generates a script, sound design suggestions, and music ideas from a user prompt.
92
- Returns a tuple of strings: (voice_script, sound_design, music_suggestions).
93
- """
94
  try:
95
  text_pipeline = get_llama_pipeline(model_id, token)
96
-
97
- system_prompt = (
98
- "You are an expert radio imaging producer specializing in sound design and music. "
99
- f"Based on the user's concept and the selected duration of {duration} seconds, produce the following: "
100
- "1. A concise voice-over script. Prefix this section with 'Voice-Over Script:'.\n"
101
- "2. Suggestions for sound design. Prefix this section with 'Sound Design Suggestions:'.\n"
102
- "3. Music styles or track recommendations. Prefix this section with 'Music Suggestions:'."
103
- )
104
- combined_prompt = f"{system_prompt}\nUser concept: {user_prompt}\nOutput:"
105
-
106
  with torch.inference_mode():
107
  result = text_pipeline(
108
- combined_prompt,
109
- max_new_tokens=300,
110
  do_sample=True,
111
- temperature=0.8
 
112
  )
113
 
114
- generated_text = result[0]["generated_text"]
115
- if "Output:" in generated_text:
116
- generated_text = generated_text.split("Output:")[-1].strip()
117
-
118
- # Default placeholders
119
- voice_script = "No voice-over script found."
120
- sound_design = "No sound design suggestions found."
121
- music_suggestions = "No music suggestions found."
122
-
123
- # Voice-Over Script
124
- if "Voice-Over Script:" in generated_text:
125
- parts = generated_text.split("Voice-Over Script:")
126
- voice_script_part = parts[1]
127
- if "Sound Design Suggestions:" in voice_script_part:
128
- voice_script = voice_script_part.split("Sound Design Suggestions:")[0].strip()
129
- else:
130
- voice_script = voice_script_part.strip()
131
-
132
- # Sound Design
133
- if "Sound Design Suggestions:" in generated_text:
134
- parts = generated_text.split("Sound Design Suggestions:")
135
- sound_design_part = parts[1]
136
- if "Music Suggestions:" in sound_design_part:
137
- sound_design = sound_design_part.split("Music Suggestions:")[0].strip()
138
- else:
139
- sound_design = sound_design_part.strip()
140
-
141
- # Music Suggestions
142
- if "Music Suggestions:" in generated_text:
143
- parts = generated_text.split("Music Suggestions:")
144
- music_suggestions = parts[1].strip()
145
-
146
- return voice_script, sound_design, music_suggestions
147
 
148
  except Exception as e:
149
- return f"Error generating script: {e}", "", ""
150
-
151
 
152
- # ---------------------------------------------------------------------
153
- # Voice-Over Generation Function
154
- # ---------------------------------------------------------------------
155
  @spaces.GPU(duration=100)
156
- def generate_voice(script: str, tts_model_name: str = "tts_models/en/ljspeech/tacotron2-DDC"):
157
- """
158
- Generates a voice-over from the provided script using the Coqui TTS model.
159
- Returns the file path to the generated .wav file.
160
- """
161
  try:
162
  if not script.strip():
163
- return "Error: No script provided."
164
-
165
  tts_model = get_tts_model(tts_model_name)
166
-
167
- # Generate and save voice
168
- output_path = os.path.join(tempfile.gettempdir(), "voice_over.wav")
169
  tts_model.tts_to_file(text=script, file_path=output_path)
170
  return output_path
171
-
172
  except Exception as e:
173
- return f"Error generating voice: {e}"
174
-
175
 
176
- # ---------------------------------------------------------------------
177
- # Music Generation Function
178
- # ---------------------------------------------------------------------
179
  @spaces.GPU(duration=100)
180
  def generate_music(prompt: str, audio_length: int):
181
- """
182
- Generates music from the 'facebook/musicgen-large' model based on the prompt.
183
- Returns the file path to the generated .wav file.
184
- """
185
  try:
186
- if not prompt.strip():
187
- return "Error: No music suggestion provided."
188
-
189
- model_key = "facebook/musicgen-large"
190
- musicgen_model, musicgen_processor = get_musicgen_model(model_key)
191
-
192
  device = "cuda" if torch.cuda.is_available() else "cpu"
193
- inputs = musicgen_processor(text=[prompt], padding=True, return_tensors="pt").to(device)
194
-
195
  with torch.inference_mode():
196
- outputs = musicgen_model.generate(**inputs, max_new_tokens=audio_length)
197
-
198
  audio_data = outputs[0, 0].cpu().numpy()
199
  normalized_audio = (audio_data / max(abs(audio_data)) * 32767).astype("int16")
200
-
201
- output_path = f"{tempfile.gettempdir()}/musicgen_large_generated_music.wav"
202
  write(output_path, 44100, normalized_audio)
203
-
204
  return output_path
205
-
206
  except Exception as e:
207
- return f"Error generating music: {e}"
208
-
209
 
210
- # ---------------------------------------------------------------------
211
- # Audio Blending with Duration Sync & Ducking
212
- # ---------------------------------------------------------------------
213
  @spaces.GPU(duration=100)
214
- def blend_audio(voice_path: str, music_path: str, ducking: bool, duck_level: int = 10):
215
- """
216
- Blends two audio files (voice and music).
217
- 1. If music < voice, loops the music until it meets/exceeds the voice duration.
218
- 2. If music > voice, trims music to the voice duration.
219
- 3. If ducking=True, the music is attenuated by 'duck_level' dB while the voice is playing.
220
- Returns the file path to the blended .wav file.
221
- """
222
  try:
223
- if not os.path.isfile(voice_path) or not os.path.isfile(music_path):
224
- return "Error: Missing audio files for blending."
225
-
226
  voice = AudioSegment.from_wav(voice_path)
227
  music = AudioSegment.from_wav(music_path)
228
-
229
- voice_len = len(voice) # in milliseconds
230
- music_len = len(music) # in milliseconds
231
-
232
- # 1) If the music is shorter than the voice, loop it:
233
- if music_len < voice_len:
234
- looped_music = AudioSegment.empty()
235
- # Keep appending until we exceed voice length
236
- while len(looped_music) < voice_len:
237
- looped_music += music
238
- music = looped_music
239
-
240
- # 2) If the music is longer than the voice, truncate it:
241
- if len(music) > voice_len:
242
- music = music[:voice_len]
243
-
244
- # Now music and voice are the same length
245
  if ducking:
246
- # Step 1: Reduce music dB while voice is playing
247
  ducked_music = music - duck_level
248
- # Step 2: Overlay voice on top of ducked music
249
  final_audio = ducked_music.overlay(voice)
250
  else:
251
- # No ducking, just overlay
252
  final_audio = music.overlay(voice)
253
-
254
- output_path = os.path.join(tempfile.gettempdir(), "blended_output.wav")
255
  final_audio.export(output_path, format="wav")
256
  return output_path
257
-
258
  except Exception as e:
259
- return f"Error blending audio: {e}"
260
-
261
-
262
- # ---------------------------------------------------------------------
263
- # Gradio Interface
264
- # ---------------------------------------------------------------------
265
- with gr.Blocks() as demo:
266
- gr.Markdown("""
267
- # 🎧 AI Promo Studio
268
- Welcome to **AI Promo Studio**, your all-in-one solution for creating professional, engaging audio promos with minimal effort!
269
-
270
- This next-generation platform uses powerful AI models to handle:
271
- - **Script Generation**: Craft concise and impactful copy with LLaMA.
272
- - **Voice Synthesis**: Convert text into natural-sounding voice-overs using Coqui TTS.
273
- - **Music Production**: Generate custom music tracks with MusicGen Large for sound bed.
274
- - **Seamless Blending**: Easily combine voice and musicβ€”loop or trim tracks to match your desired promo length, with optional ducking to keep the voice front and center.
275
-
276
- Whether you’re a radio producer, podcaster, or content creator, **AI Promo Studio** streamlines your entire production pipelineβ€”cutting hours of manual editing down to a few clicks.
277
- """)
278
-
279
-
280
- with gr.Tabs():
281
- # Step 1: Generate Script
282
- with gr.Tab("Step 1: Generate Script"):
283
- with gr.Row():
284
- user_prompt = gr.Textbox(
285
- label="Promo Idea",
286
- placeholder="E.g., A 30-second promo for a morning show...",
287
- lines=2
288
- )
289
- llama_model_id = gr.Textbox(
290
- label="LLaMA Model ID",
291
- value="meta-llama/Meta-Llama-3-8B-Instruct",
292
- placeholder="Enter a valid Hugging Face model ID"
293
- )
294
- duration = gr.Slider(
295
- label="Desired Promo Duration (seconds)",
296
- minimum=15,
297
- maximum=60,
298
- step=15,
299
- value=30
300
- )
301
-
302
- generate_script_button = gr.Button("Generate Script")
303
- script_output = gr.Textbox(label="Generated Voice-Over Script", lines=5, interactive=False)
304
- sound_design_output = gr.Textbox(label="Sound Design Suggestions", lines=3, interactive=False)
305
- music_suggestion_output = gr.Textbox(label="Music Suggestions", lines=3, interactive=False)
306
-
307
- generate_script_button.click(
308
- fn=lambda user_prompt, model_id, dur: generate_script(user_prompt, model_id, HF_TOKEN, dur),
309
- inputs=[user_prompt, llama_model_id, duration],
310
- outputs=[script_output, sound_design_output, music_suggestion_output],
311
- )
312
-
313
- # Step 2: Generate Voice
314
- with gr.Tab("Step 2: Generate Voice"):
315
- gr.Markdown("Generate the voice-over using a Coqui TTS model.")
316
- selected_tts_model = gr.Dropdown(
317
- label="TTS Model",
318
- choices=[
319
- "tts_models/en/ljspeech/tacotron2-DDC",
320
- "tts_models/en/ljspeech/vits",
321
- "tts_models/en/sam/tacotron-DDC",
322
- ],
323
- value="tts_models/en/ljspeech/tacotron2-DDC",
324
- multiselect=False
325
- )
326
- generate_voice_button = gr.Button("Generate Voice-Over")
327
- voice_audio_output = gr.Audio(label="Voice-Over (WAV)", type="filepath")
328
-
329
- generate_voice_button.click(
330
- fn=lambda script, tts_model: generate_voice(script, tts_model),
331
- inputs=[script_output, selected_tts_model],
332
- outputs=voice_audio_output,
333
- )
334
-
335
- # Step 3: Generate Music (MusicGen Large)
336
- with gr.Tab("Step 3: Generate Music"):
337
- gr.Markdown("Generate a music track with the **MusicGen Large** model.")
338
- audio_length = gr.Slider(
339
- label="Music Length (tokens)",
340
- minimum=128,
341
- maximum=1024,
342
- step=64,
343
- value=512,
344
- info="Increase tokens for longer audio, but be mindful of inference time."
345
- )
346
- generate_music_button = gr.Button("Generate Music")
347
- music_output = gr.Audio(label="Generated Music (WAV)", type="filepath")
348
-
349
- generate_music_button.click(
350
- fn=lambda music_suggestion, length: generate_music(music_suggestion, length),
351
- inputs=[music_suggestion_output, audio_length],
352
- outputs=[music_output],
353
- )
354
-
355
- # Step 4: Blend Audio (Loop/Trim + Ducking)
356
- with gr.Tab("Step 4: Blend Audio"):
357
- gr.Markdown("**Music** will be looped or trimmed to match **Voice** duration, then optionally ducked.")
358
- ducking_checkbox = gr.Checkbox(label="Enable Ducking?", value=True)
359
- duck_level_slider = gr.Slider(
360
- label="Ducking Level (dB attenuation)",
361
- minimum=0,
362
- maximum=20,
363
- step=1,
364
- value=10
365
- )
366
- blend_button = gr.Button("Blend Voice + Music")
367
- blended_output = gr.Audio(label="Final Blended Output (WAV)", type="filepath")
368
-
369
- blend_button.click(
370
- fn=blend_audio,
371
- inputs=[voice_audio_output, music_output, ducking_checkbox, duck_level_slider],
372
- outputs=blended_output
373
- )
374
-
375
- # Footer
376
- gr.Markdown("""
377
- <hr>
378
- <p style="text-align: center; font-size: 0.9em;">
379
- Created with ❀️ by <a href="https://bilsimaging.com" target="_blank">bilsimaging.com</a>
380
- </p>
381
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382
 
383
- # Visitor Badge
384
- gr.HTML("""
385
- <a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FBils%2Fradiogold">
386
- <img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FBils%2Fradiogold&countColor=%23263759" />
387
- </a>
388
- """)
 
 
 
 
 
389
 
390
- demo.launch(debug=True)
 
 
13
  from dotenv import load_dotenv
14
  import tempfile
15
  import spaces
 
 
16
  from TTS.api import TTS
17
 
18
+ # -----------------------------------------------------------
19
+ # Initialization & Environment Setup
20
+ # -----------------------------------------------------------
21
  load_dotenv()
22
  HF_TOKEN = os.getenv("HF_TOKEN")
23
 
24
+ # -----------------------------------------------------------
25
+ # Model Cache Management
26
+ # -----------------------------------------------------------
27
  LLAMA_PIPELINES = {}
28
  MUSICGEN_MODELS = {}
29
  TTS_MODELS = {}
30
 
 
 
 
31
  def get_llama_pipeline(model_id: str, token: str):
 
 
 
32
  if model_id in LLAMA_PIPELINES:
33
  return LLAMA_PIPELINES[model_id]
34
 
 
44
  LLAMA_PIPELINES[model_id] = text_pipeline
45
  return text_pipeline
46
 
 
47
  def get_musicgen_model(model_key: str = "facebook/musicgen-large"):
 
 
 
 
48
  if model_key in MUSICGEN_MODELS:
49
  return MUSICGEN_MODELS[model_key]
50
 
51
  model = MusicgenForConditionalGeneration.from_pretrained(model_key)
52
  processor = AutoProcessor.from_pretrained(model_key)
 
53
  device = "cuda" if torch.cuda.is_available() else "cpu"
54
  model.to(device)
55
  MUSICGEN_MODELS[model_key] = (model, processor)
56
  return model, processor
57
 
 
58
  def get_tts_model(model_name: str = "tts_models/en/ljspeech/tacotron2-DDC"):
 
 
 
59
  if model_name in TTS_MODELS:
60
  return TTS_MODELS[model_name]
 
61
  tts_model = TTS(model_name)
62
  TTS_MODELS[model_name] = tts_model
63
  return tts_model
64
 
65
+ # -----------------------------------------------------------
66
+ # Core Functionality
67
+ # -----------------------------------------------------------
 
68
  @spaces.GPU(duration=100)
69
  def generate_script(user_prompt: str, model_id: str, token: str, duration: int):
 
 
 
 
70
  try:
71
  text_pipeline = get_llama_pipeline(model_id, token)
72
+ system_prompt = f"""You are a professional audio producer creating {duration}-second content. Generate:
73
+ 1. Voice script (clear and concise)
74
+ 2. Sound design suggestions (specific effects)
75
+ 3. Music style recommendations (genre, tempo)"""
76
+
77
+ full_prompt = f"{system_prompt}\nClient brief: {user_prompt}\nOutput:"
78
+
 
 
 
79
  with torch.inference_mode():
80
  result = text_pipeline(
81
+ full_prompt,
82
+ max_new_tokens=400,
83
  do_sample=True,
84
+ temperature=0.7,
85
+ top_p=0.9
86
  )
87
 
88
+ generated_text = result[0]["generated_text"].split("Output:")[-1].strip()
89
+
90
+ # Parse sections
91
+ sections = {
92
+ "Voice-Over Script:": "",
93
+ "Sound Design Suggestions:": "",
94
+ "Music Suggestions:": ""
95
+ }
96
+
97
+ current_section = None
98
+ for line in generated_text.split('\n'):
99
+ for section in sections:
100
+ if section in line:
101
+ current_section = section
102
+ line = line.replace(section, '').strip()
103
+ if current_section:
104
+ sections[current_section] += line + '\n'
105
+
106
+ return (
107
+ sections["Voice-Over Script:"].strip() or "No script generated",
108
+ sections["Sound Design Suggestions:"].strip() or "No sound design suggestions",
109
+ sections["Music Suggestions:"].strip() or "No music suggestions"
110
+ )
 
 
 
 
 
 
 
 
 
 
111
 
112
  except Exception as e:
113
+ return f"Error: {str(e)}", "", ""
 
114
 
 
 
 
115
  @spaces.GPU(duration=100)
116
+ def generate_voice(script: str, tts_model_name: str):
 
 
 
 
117
  try:
118
  if not script.strip():
119
+ return None
 
120
  tts_model = get_tts_model(tts_model_name)
121
+ output_path = f"{tempfile.gettempdir()}/voice_temp.wav"
 
 
122
  tts_model.tts_to_file(text=script, file_path=output_path)
123
  return output_path
 
124
  except Exception as e:
125
+ print(f"Voice generation error: {e}")
126
+ return None
127
 
 
 
 
128
  @spaces.GPU(duration=100)
129
  def generate_music(prompt: str, audio_length: int):
 
 
 
 
130
  try:
131
+ model, processor = get_musicgen_model()
 
 
 
 
 
132
  device = "cuda" if torch.cuda.is_available() else "cpu"
133
+ inputs = processor(text=[prompt], padding=True, return_tensors="pt").to(device)
134
+
135
  with torch.inference_mode():
136
+ outputs = model.generate(**inputs, max_new_tokens=audio_length)
137
+
138
  audio_data = outputs[0, 0].cpu().numpy()
139
  normalized_audio = (audio_data / max(abs(audio_data)) * 32767).astype("int16")
140
+ output_path = f"{tempfile.gettempdir()}/music_temp.wav"
 
141
  write(output_path, 44100, normalized_audio)
 
142
  return output_path
 
143
  except Exception as e:
144
+ print(f"Music generation error: {e}")
145
+ return None
146
 
 
 
 
147
  @spaces.GPU(duration=100)
148
+ def blend_audio(voice_path: str, music_path: str, ducking: bool, duck_level: int):
 
 
 
 
 
 
 
149
  try:
 
 
 
150
  voice = AudioSegment.from_wav(voice_path)
151
  music = AudioSegment.from_wav(music_path)
152
+
153
+ # Adjust music length
154
+ if len(music) < len(voice):
155
+ loops_needed = (len(voice) // len(music)) + 1
156
+ music = music * loops_needed
157
+ music = music[:len(voice)]
158
+
159
+ # Ducking effect
 
 
 
 
 
 
 
 
 
160
  if ducking:
 
161
  ducked_music = music - duck_level
 
162
  final_audio = ducked_music.overlay(voice)
163
  else:
 
164
  final_audio = music.overlay(voice)
165
+
166
+ output_path = f"{tempfile.gettempdir()}/final_mix.wav"
167
  final_audio.export(output_path, format="wav")
168
  return output_path
 
169
  except Exception as e:
170
+ print(f"Mixing error: {e}")
171
+ return None
172
+
173
+ # -----------------------------------------------------------
174
+ # Enhanced UI Components
175
+ # -----------------------------------------------------------
176
+ custom_css = """
177
+ #main-container {
178
+ max-width: 1200px;
179
+ margin: 0 auto;
180
+ padding: 20px;
181
+ background: #f5f5f5;
182
+ border-radius: 15px;
183
+ box-shadow: 0 4px 6px rgba(0,0,0,0.1);
184
+ }
185
+
186
+ .header {
187
+ text-align: center;
188
+ padding: 2em;
189
+ background: linear-gradient(135deg, #2b5876 0%, #4e4376 100%);
190
+ color: white;
191
+ border-radius: 15px;
192
+ margin-bottom: 2em;
193
+ }
194
+
195
+ .tab-nav {
196
+ background: none !important;
197
+ border: none !important;
198
+ }
199
+
200
+ .tab-button {
201
+ padding: 1em 2em !important;
202
+ border-radius: 8px !important;
203
+ margin: 0 5px !important;
204
+ transition: all 0.3s ease !important;
205
+ }
206
+
207
+ .tab-button:hover {
208
+ transform: translateY(-2px);
209
+ box-shadow: 0 3px 6px rgba(0,0,0,0.1);
210
+ }
211
+
212
+ .dark-btn {
213
+ background: linear-gradient(135deg, #434343 0%, #000000 100%) !important;
214
+ color: white !important;
215
+ border: none !important;
216
+ padding: 12px 24px !important;
217
+ border-radius: 8px !important;
218
+ }
219
+
220
+ .output-card {
221
+ background: white !important;
222
+ border-radius: 10px !important;
223
+ padding: 20px !important;
224
+ box-shadow: 0 2px 4px rgba(0,0,0,0.05) !important;
225
+ }
226
+
227
+ .progress-indicator {
228
+ color: #666;
229
+ font-style: italic;
230
+ margin-top: 10px;
231
+ }
232
+ """
233
+
234
+ with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as demo:
235
+ with gr.Column(elem_id="main-container"):
236
+ # Header Section
237
+ with gr.Column(elem_classes="header"):
238
+ gr.Markdown("""
239
+ # πŸŽ™οΈ AI Promo Studio
240
+ **Professional Audio Production Suite Powered by AI**
241
+ """)
242
+
243
+ # Main Workflow Tabs
244
+ with gr.Tabs(elem_classes="tab-nav"):
245
+ # Script Generation
246
+ with gr.Tab("πŸ“ Script Design", elem_classes="tab-button"):
247
+ with gr.Row(equal_height=False):
248
+ with gr.Column(scale=2):
249
+ gr.Markdown("### 🎯 Project Brief")
250
+ user_prompt = gr.Textbox(
251
+ label="Describe your promo concept",
252
+ placeholder="e.g., 'An intense 30-second movie trailer intro with epic orchestral music and dramatic sound effects...'",
253
+ lines=4
254
+ )
255
+ with gr.Row():
256
+ duration = gr.Slider(
257
+ label="Duration (seconds)",
258
+ minimum=15,
259
+ maximum=120,
260
+ step=15,
261
+ value=30,
262
+ interactive=True
263
+ )
264
+ llama_model_id = gr.Dropdown(
265
+ label="AI Model",
266
+ choices=["meta-llama/Meta-Llama-3-8B-Instruct"],
267
+ value="meta-llama/Meta-Llama-3-8B-Instruct",
268
+ interactive=True
269
+ )
270
+ generate_btn = gr.Button("Generate Script πŸš€", elem_classes="dark-btn")
271
+
272
+ with gr.Column(scale=1, elem_classes="output-card"):
273
+ gr.Markdown("### πŸ“„ Generated Content")
274
+ script_output = gr.Textbox(label="Voice Script", lines=6)
275
+ sound_design_output = gr.Textbox(label="Sound Design", lines=3)
276
+ music_suggestion_output = gr.Textbox(label="Music Style", lines=3)
277
+
278
+ # Voice Production
279
+ with gr.Tab("πŸŽ™οΈ Voice Production", elem_classes="tab-button"):
280
+ with gr.Row():
281
+ with gr.Column(scale=1):
282
+ gr.Markdown("### πŸ”Š Voice Settings")
283
+ tts_model = gr.Dropdown(
284
+ label="Voice Model",
285
+ choices=[
286
+ "tts_models/en/ljspeech/tacotron2-DDC",
287
+ "tts_models/en/ljspeech/vits",
288
+ "tts_models/en/sam/tacotron-DDC"
289
+ ],
290
+ value="tts_models/en/ljspeech/tacotron2-DDC",
291
+ interactive=True
292
+ )
293
+ with gr.Row():
294
+ voice_preview_btn = gr.Button("Preview Sample", elem_classes="dark-btn")
295
+ voice_generate_btn = gr.Button("Generate Full Voiceover", elem_classes="dark-btn")
296
+ with gr.Column(scale=1, elem_classes="output-card"):
297
+ gr.Markdown("### 🎧 Voice Preview")
298
+ voice_audio = gr.Audio(
299
+ label="Generated Voice",
300
+ interactive=False,
301
+ waveform_options={"show_controls": True}
302
+ )
303
+
304
+ # Music Production
305
+ with gr.Tab("🎡 Music Design", elem_classes="tab-button"):
306
+ with gr.Row():
307
+ with gr.Column(scale=1):
308
+ gr.Markdown("### 🎹 Music Parameters")
309
+ audio_length = gr.Slider(
310
+ label="Generation Length",
311
+ minimum=256,
312
+ maximum=1024,
313
+ step=64,
314
+ value=512,
315
+ info="Higher values = longer generation time"
316
+ )
317
+ music_generate_btn = gr.Button("Generate Music Track", elem_classes="dark-btn")
318
+ with gr.Column(scale=1, elem_classes="output-card"):
319
+ gr.Markdown("### 🎢 Music Preview")
320
+ music_output = gr.Audio(
321
+ label="Generated Music",
322
+ interactive=False,
323
+ waveform_options={"show_controls": True}
324
+ )
325
+
326
+ # Final Mix
327
+ with gr.Tab("πŸ”Š Final Mix", elem_classes="tab-button"):
328
+ with gr.Row():
329
+ with gr.Column(scale=1):
330
+ gr.Markdown("### 🎚️ Mixing Console")
331
+ ducking_enabled = gr.Checkbox(
332
+ label="Enable Voice Ducking",
333
+ value=True,
334
+ info="Automatically lower music during voice segments"
335
+ )
336
+ duck_level = gr.Slider(
337
+ label="Ducking Intensity (dB)",
338
+ minimum=3,
339
+ maximum=20,
340
+ step=1,
341
+ value=10
342
+ )
343
+ mix_btn = gr.Button("Generate Final Mix", elem_classes="dark-btn")
344
+ with gr.Column(scale=1, elem_classes="output-card"):
345
+ gr.Markdown("### 🎧 Final Production")
346
+ final_mix = gr.Audio(
347
+ label="Mixed Output",
348
+ interactive=False,
349
+ waveform_options={"show_controls": True}
350
+ )
351
+
352
+ # Footer
353
+ with gr.Column(elem_classes="output-card"):
354
+ gr.Markdown("""
355
+ <div style="text-align: center; padding: 1.5em 0;">
356
+ <a href="https://bilsimaging.com" target="_blank">
357
+ <img src="https://bilsimaging.com/logo.png" alt="Bils Imaging" style="height: 35px; margin-right: 15px;">
358
+ </a>
359
+ <a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FBils%2Fradiogold">
360
+ <img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FBils%2Fradiogold&countColor=%23263759" />
361
+ </a>
362
+ </div>
363
+ <p style="text-align: center; color: #666; font-size: 0.9em;">
364
+ Professional Audio Production Suite v2.1 Β© 2024 | Bils Imaging
365
+ </p>
366
+ """)
367
+
368
+ # Event Handling
369
+ generate_btn.click(
370
+ generate_script,
371
+ inputs=[user_prompt, llama_model_id, gr.Textbox(HF_TOKEN, visible=False), duration],
372
+ outputs=[script_output, sound_design_output, music_suggestion_output]
373
+ )
374
+
375
+ voice_generate_btn.click(
376
+ generate_voice,
377
+ inputs=[script_output, tts_model],
378
+ outputs=voice_audio
379
+ )
380
 
381
+ music_generate_btn.click(
382
+ generate_music,
383
+ inputs=[music_suggestion_output, audio_length],
384
+ outputs=music_output
385
+ )
386
+
387
+ mix_btn.click(
388
+ blend_audio,
389
+ inputs=[voice_audio, music_output, ducking_enabled, duck_level],
390
+ outputs=final_mix
391
+ )
392
 
393
+ if __name__ == "__main__":
394
+ demo.launch(debug=True)