Bils commited on
Commit
2169070
·
verified ·
1 Parent(s): 464b686

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -184
app.py CHANGED
@@ -25,12 +25,10 @@ MODEL_CONFIG = {
25
  "llama_models": {
26
  "Meta-Llama-3-8B": "meta-llama/Meta-Llama-3-8B-Instruct",
27
  "Mistral-7B": "mistralai/Mistral-7B-Instruct-v0.2",
28
- "Phi-3-mini": "microsoft/Phi-3-mini-4k-instruct"
29
  },
30
  "tts_models": {
31
  "Standard English": "tts_models/en/ljspeech/tacotron2-DDC",
32
  "High Quality": "tts_models/en/ljspeech/vits",
33
- "Fast Inference": "tts_models/en/sam/tacotron-DDC"
34
  }
35
  }
36
 
@@ -45,19 +43,17 @@ class ModelManager:
45
 
46
  def get_llama_pipeline(self, model_id, token):
47
  if model_id not in self.llama_pipelines:
48
- tokenizer = AutoTokenizer.from_pretrained(model_id, token=token)
49
  model = AutoModelForCausalLM.from_pretrained(
50
  model_id,
51
- token=token,
52
  torch_dtype=torch.float16,
53
- device_map="auto",
54
- attn_implementation="flash_attention_2"
55
  )
56
  self.llama_pipelines[model_id] = pipeline(
57
  "text-generation",
58
  model=model,
59
- tokenizer=tokenizer,
60
- device_map="auto"
61
  )
62
  return self.llama_pipelines[model_id]
63
 
@@ -65,8 +61,6 @@ class ModelManager:
65
  if model_key not in self.musicgen_models:
66
  model = MusicgenForConditionalGeneration.from_pretrained(model_key)
67
  processor = AutoProcessor.from_pretrained(model_key)
68
- device = "cuda" if torch.cuda.is_available() else "cpu"
69
- model.to(device)
70
  self.musicgen_models[model_key] = (model, processor)
71
  return self.musicgen_models[model_key]
72
 
@@ -80,34 +74,26 @@ model_manager = ModelManager()
80
  # -------------------------------
81
  # Core Functions
82
  # -------------------------------
83
- @spaces.GPU(duration=120)
84
- def generate_script(user_prompt, model_id, duration, temperature=0.7, max_tokens=512):
85
  try:
86
  text_pipeline = model_manager.get_llama_pipeline(model_id, HF_TOKEN)
87
 
88
- system_prompt = f"""You are an AI audio production assistant. Create content for a {duration}-second promo:
89
- 1. Voice Script: [Clear, engaging narration]
90
- 2. Sound Design: [3-5 specific sound effects]
91
- 3. Music: [Genre, tempo, mood suggestions]
92
-
93
- Keep sections concise and production-ready."""
94
 
95
- messages = [
96
- {"role": "system", "content": system_prompt},
97
- {"role": "user", "content": user_prompt}
98
- ]
99
 
100
- response = text_pipeline(
101
- messages,
102
- max_new_tokens=max_tokens,
103
  temperature=temperature,
104
- do_sample=True,
105
- top_p=0.95,
106
- eos_token_id=text_pipeline.tokenizer.eos_token_id
107
  )
108
 
109
- return parse_generated_content(response[0]['generated_text'][-1]['content'])
110
-
111
  except Exception as e:
112
  return f"Error: {str(e)}", "", ""
113
 
@@ -136,68 +122,48 @@ def parse_generated_content(text):
136
 
137
  return sections["Voice Script"].strip(), sections["Sound Design"].strip(), sections["Music"].strip()
138
 
139
- @spaces.GPU(duration=100)
140
  def generate_voice(script, tts_model, speed=1.0):
141
  try:
142
  if not script.strip():
143
- raise ValueError("Empty script")
144
-
145
  tts = model_manager.get_tts_model(tts_model)
146
- output_path = os.path.join(tempfile.gettempdir(), "enhanced_voice.wav")
147
-
148
- tts.tts_to_file(
149
- text=script,
150
- file_path=output_path,
151
- speed=speed
152
- )
153
  return output_path
154
  except Exception as e:
155
  return f"Error: {str(e)}"
156
 
157
- @spaces.GPU(duration=150)
158
- def generate_music(prompt, duration_sec=30, temperature=1.0, guidance_scale=3.0):
159
  try:
160
  model, processor = model_manager.get_musicgen_model()
161
- device = "cuda" if torch.cuda.is_available() else "cpu"
162
 
163
- inputs = processor(
164
- text=[prompt],
165
- padding=True,
166
- return_tensors="pt",
167
- ).to(device)
168
-
169
- audio_values = model.generate(
170
- **inputs,
171
- max_new_tokens=int(duration_sec * 50),
172
- temperature=temperature,
173
- guidance_scale=guidance_scale,
174
- do_sample=True
175
- )
176
-
177
- output_path = os.path.join(tempfile.gettempdir(), "enhanced_music.wav")
178
- write(output_path, 32000, audio_values[0, 0].cpu().numpy())
179
  return output_path
180
  except Exception as e:
181
  return f"Error: {str(e)}"
182
 
183
- def blend_audio(voice_path, music_path, ducking=True, duck_level=10, crossfade=500):
184
  try:
185
  voice = AudioSegment.from_wav(voice_path)
186
  music = AudioSegment.from_wav(music_path)
187
-
 
188
  if len(music) < len(voice):
189
- loops = (len(voice) // len(music)) + 1
190
- music = music * loops
191
-
192
- music = music[:len(voice)].fade_out(crossfade)
193
-
194
  if ducking:
195
- ducked_music = music - duck_level
196
- mixed = ducked_music.overlay(voice.fade_in(crossfade))
197
- else:
198
- mixed = music.overlay(voice)
199
 
200
- output_path = os.path.join(tempfile.gettempdir(), "enhanced_mix.wav")
 
201
  mixed.export(output_path, format="wav")
202
  return output_path
203
  except Exception as e:
@@ -206,132 +172,84 @@ def blend_audio(voice_path, music_path, ducking=True, duck_level=10, crossfade=5
206
  # -------------------------------
207
  # Gradio Interface
208
  # -------------------------------
209
- theme = gr.themes.Soft(
210
- primary_hue="blue",
211
- secondary_hue="teal",
212
- ).set(
213
- body_text_color_dark='#FFFFFF',
214
- background_fill_primary_dark='#1F1F1F'
215
- )
216
-
217
- with gr.Blocks(theme=theme, title="AI Audio Studio Pro") as demo:
218
  gr.Markdown("""
219
- # 🎙️ AI Audio Studio Pro
220
- *Next-generation audio production powered by AI*
221
  """)
222
-
223
  with gr.Tabs():
224
- with gr.Tab("🎯 Concept Development"):
 
225
  with gr.Row():
226
- with gr.Column(scale=2):
227
- concept_input = gr.Textbox(
228
- label="Your Concept",
229
- placeholder="Describe your audio project...",
230
- lines=3,
231
- max_lines=6
232
- )
233
- with gr.Accordion("Advanced Settings", open=False):
234
- with gr.Row():
235
- model_selector = gr.Dropdown(
236
- choices=list(MODEL_CONFIG["llama_models"].values()),
237
- label="AI Model",
238
- value=MODEL_CONFIG["llama_models"]["Meta-Llama-3-8B"]
239
- )
240
- duration_slider = gr.Slider(15, 120, value=30, step=15, label="Duration (seconds)")
241
- with gr.Row():
242
- temp_slider = gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Creativity")
243
- token_slider = gr.Slider(128, 1024, value=512, step=128, label="Max Length")
244
-
245
- generate_btn = gr.Button("✨ Generate Concept", variant="primary")
246
-
247
- with gr.Column(scale=1):
248
- script_output = gr.Textbox(label="Voice Script", interactive=True)
249
- sound_output = gr.Textbox(label="Sound Design", interactive=True)
250
- music_output = gr.Textbox(label="Music Suggestions", interactive=True)
251
 
252
- generate_btn.click(
253
- generate_script,
254
- inputs=[concept_input, model_selector, duration_slider, temp_slider, token_slider],
255
- outputs=[script_output, sound_output, music_output]
 
 
 
 
 
256
  )
 
 
257
 
258
- with gr.Tab("🗣️ Voice Production"):
259
- with gr.Row():
260
- with gr.Column():
261
- tts_model = gr.Dropdown(
262
- choices=list(MODEL_CONFIG["tts_models"].values()),
263
- label="Voice Model",
264
- value=MODEL_CONFIG["tts_models"]["Standard English"]
265
- )
266
- speed_slider = gr.Slider(0.5, 2.0, value=1.0, step=0.1, label="Speaking Rate")
267
- voice_btn = gr.Button("🎙️ Generate Voiceover", variant="primary")
268
- with gr.Column():
269
- voice_preview = gr.Audio(label="Preview", interactive=False)
270
- voice_btn.click(
271
- generate_voice,
272
- inputs=[script_output, tts_model, speed_slider],
273
- outputs=voice_preview
274
- )
275
-
276
- with gr.Tab("🎶 Music Production"):
277
- with gr.Row():
278
- with gr.Column():
279
- with gr.Accordion("Music Parameters", open=True):
280
- music_duration = gr.Slider(10, 120, value=30, label="Duration (seconds)")
281
- music_temp = gr.Slider(0.1, 2.0, value=1.0, label="Creativity")
282
- guidance_scale = gr.Slider(1.0, 5.0, value=3.0, label="Focus")
283
- music_btn = gr.Button("🎵 Generate Music", variant="primary")
284
- with gr.Column():
285
- music_preview = gr.Audio(label="Preview", interactive=False)
286
- music_btn.click(
287
- generate_music,
288
- inputs=[music_output, music_duration, music_temp, guidance_scale],
289
- outputs=music_preview
290
- )
291
 
292
- with gr.Tab("🔊 Final Mix"):
293
  with gr.Row():
294
- with gr.Column():
295
- ducking_toggle = gr.Checkbox(value=True, label="Enable Voice Ducking")
296
- duck_level = gr.Slider(0, 30, value=12, label="Ducking Strength (dB)")
297
- crossfade_time = gr.Slider(0, 2000, value=500, label="Crossfade (ms)")
298
- mix_btn = gr.Button("🚀 Create Final Mix", variant="primary")
299
- with gr.Column():
300
- final_mix = gr.Audio(label="Master Output", interactive=False)
301
- mix_btn.click(
302
- blend_audio,
303
- inputs=[voice_preview, music_preview, ducking_toggle, duck_level, crossfade_time],
304
- outputs=final_mix
305
- )
306
-
307
- with gr.Accordion("📚 Example Prompts", open=False):
308
- gr.Examples(
309
- examples=[
310
- ["A 30-second tech podcast intro with futuristic sounds"],
311
- ["A 15-second radio ad for a coffee shop with morning vibes"],
312
- ["A 60-second documentary trailer with epic orchestral music"]
313
- ],
314
- inputs=concept_input
315
- )
316
-
317
- with gr.Row():
318
- gr.Markdown("### System Resources")
319
- gpu_status = gr.Textbox(label="GPU Utilization", interactive=False)
320
- ram_status = gr.Textbox(label="RAM Usage", interactive=False)
321
 
322
- # Custom Footer
323
  gr.Markdown("""
324
- <hr>
325
- <p style="text-align: center; font-size: 0.9em;">
326
- Created with ❤️ by <a href="https://bilsimaging.com" target="_blank">bilsimaging.com</a>
327
- </p>
 
 
 
 
328
  """)
 
 
 
 
 
 
 
329
 
330
- gr.HTML("""
331
- <a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FBils%2Fradiogold">
332
- <img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FBils%2Fradiogold&countColor=%23263759" />
333
- </a>
334
- """)
 
 
 
 
 
 
 
 
 
 
 
 
335
 
336
  if __name__ == "__main__":
337
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
25
  "llama_models": {
26
  "Meta-Llama-3-8B": "meta-llama/Meta-Llama-3-8B-Instruct",
27
  "Mistral-7B": "mistralai/Mistral-7B-Instruct-v0.2",
 
28
  },
29
  "tts_models": {
30
  "Standard English": "tts_models/en/ljspeech/tacotron2-DDC",
31
  "High Quality": "tts_models/en/ljspeech/vits",
 
32
  }
33
  }
34
 
 
43
 
44
  def get_llama_pipeline(self, model_id, token):
45
  if model_id not in self.llama_pipelines:
46
+ tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=token)
47
  model = AutoModelForCausalLM.from_pretrained(
48
  model_id,
49
+ use_auth_token=token,
50
  torch_dtype=torch.float16,
51
+ device_map="auto"
 
52
  )
53
  self.llama_pipelines[model_id] = pipeline(
54
  "text-generation",
55
  model=model,
56
+ tokenizer=tokenizer
 
57
  )
58
  return self.llama_pipelines[model_id]
59
 
 
61
  if model_key not in self.musicgen_models:
62
  model = MusicgenForConditionalGeneration.from_pretrained(model_key)
63
  processor = AutoProcessor.from_pretrained(model_key)
 
 
64
  self.musicgen_models[model_key] = (model, processor)
65
  return self.musicgen_models[model_key]
66
 
 
74
  # -------------------------------
75
  # Core Functions
76
  # -------------------------------
77
+ @spaces.GPU
78
+ def generate_script(user_prompt, model_id, duration, temperature=0.7):
79
  try:
80
  text_pipeline = model_manager.get_llama_pipeline(model_id, HF_TOKEN)
81
 
82
+ prompt = f"""Create a {duration}-second audio promo script with these elements:
83
+ 1. Voice Script: [clear narration]
84
+ 2. Sound Design: [3-5 effects]
85
+ 3. Music: [genre/tempo]
 
 
86
 
87
+ Concept: {user_prompt}"""
 
 
 
88
 
89
+ result = text_pipeline(
90
+ prompt,
91
+ max_new_tokens=300,
92
  temperature=temperature,
93
+ do_sample=True
 
 
94
  )
95
 
96
+ return parse_generated_content(result[0]["generated_text"])
 
97
  except Exception as e:
98
  return f"Error: {str(e)}", "", ""
99
 
 
122
 
123
  return sections["Voice Script"].strip(), sections["Sound Design"].strip(), sections["Music"].strip()
124
 
125
+ @spaces.GPU
126
  def generate_voice(script, tts_model, speed=1.0):
127
  try:
128
  if not script.strip():
129
+ return "Error: No script provided"
130
+
131
  tts = model_manager.get_tts_model(tts_model)
132
+ output_path = os.path.join(tempfile.gettempdir(), "voice.wav")
133
+ tts.tts_to_file(text=script, file_path=output_path)
 
 
 
 
 
134
  return output_path
135
  except Exception as e:
136
  return f"Error: {str(e)}"
137
 
138
+ @spaces.GPU
139
+ def generate_music(prompt, duration_sec=30):
140
  try:
141
  model, processor = model_manager.get_musicgen_model()
142
+ inputs = processor(text=[prompt], padding=True, return_tensors="pt")
143
 
144
+ audio_values = model.generate(**inputs, max_new_tokens=int(duration_sec * 50))
145
+ output_path = os.path.join(tempfile.gettempdir(), "music.wav")
146
+ write(output_path, 44100, audio_values[0, 0].cpu().numpy())
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  return output_path
148
  except Exception as e:
149
  return f"Error: {str(e)}"
150
 
151
+ def blend_audio(voice_path, music_path, ducking=True, duck_level=10):
152
  try:
153
  voice = AudioSegment.from_wav(voice_path)
154
  music = AudioSegment.from_wav(music_path)
155
+
156
+ # Align durations
157
  if len(music) < len(voice):
158
+ music = music * (len(voice) // len(music) + 1)
159
+ music = music[:len(voice)]
160
+
161
+ # Apply ducking
 
162
  if ducking:
163
+ music = music - duck_level
 
 
 
164
 
165
+ mixed = music.overlay(voice)
166
+ output_path = os.path.join(tempfile.gettempdir(), "final_mix.wav")
167
  mixed.export(output_path, format="wav")
168
  return output_path
169
  except Exception as e:
 
172
  # -------------------------------
173
  # Gradio Interface
174
  # -------------------------------
175
+ with gr.Blocks(title="AI Radio Studio", css=".gradio-container {max-width: 800px !important}") as demo:
 
 
 
 
 
 
 
 
176
  gr.Markdown("""
177
+ # 🎙️ AI Radio Studio
178
+ *Create professional audio content in 4 easy steps*
179
  """)
180
+
181
  with gr.Tabs():
182
+ with gr.Tab("1️⃣ Concept"):
183
+ concept_input = gr.Textbox(label="Your Idea", placeholder="Describe your radio promo...", lines=3)
184
  with gr.Row():
185
+ model_select = gr.Dropdown(
186
+ choices=list(MODEL_CONFIG["llama_models"].values()),
187
+ label="AI Model",
188
+ value="meta-llama/Meta-Llama-3-8B-Instruct"
189
+ )
190
+ duration_select = gr.Slider(15, 60, 30, step=15, label="Duration (sec)")
191
+ generate_btn = gr.Button("Generate Script", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
+ script_output = gr.Textbox(label="Voice Script", interactive=True)
194
+ sound_output = gr.Textbox(label="Sound Effects", interactive=True)
195
+ music_output = gr.Textbox(label="Music Style", interactive=True)
196
+
197
+ with gr.Tab("2️⃣ Voice"):
198
+ tts_select = gr.Dropdown(
199
+ choices=list(MODEL_CONFIG["tts_models"].values()),
200
+ label="Voice Model",
201
+ value="tts_models/en/ljspeech/tacotron2-DDC"
202
  )
203
+ voice_btn = gr.Button("Generate Voiceover", variant="primary")
204
+ voice_preview = gr.Audio(label="Preview", type="filepath")
205
 
206
+ with gr.Tab("3️⃣ Music"):
207
+ music_btn = gr.Button("Generate Music", variant="primary")
208
+ music_preview = gr.Audio(label="Preview", type="filepath")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
210
+ with gr.Tab("4️⃣ Mix"):
211
  with gr.Row():
212
+ ducking_toggle = gr.Checkbox(True, label="Duck Music")
213
+ duck_level = gr.Slider(0, 20, 10, label="Duck Level (dB)")
214
+ mix_btn = gr.Button("Create Final Mix", variant="primary")
215
+ final_mix = gr.Audio(label="Final Output", type="filepath")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
 
217
+ # Footer Section
218
  gr.Markdown("""
219
+ <div style="text-align: center; margin-top: 20px; padding: 15px; border-top: 1px solid #e0e0e0;">
220
+ <p style="font-size: 0.9em; color: #666;">
221
+ Created with ❤️ by <a href="https://bilsimaging.com" target="_blank">bilsimaging.com</a>
222
+ </p>
223
+ <a href="https://visitorbadge.io/status?path=https://huggingface.co/spaces/Bils/radiogold">
224
+ <img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FBils%2Fradiogold&countColor=%23263759"/>
225
+ </a>
226
+ </div>
227
  """)
228
+
229
+ # Event Handlers
230
+ generate_btn.click(
231
+ generate_script,
232
+ inputs=[concept_input, model_select, duration_select],
233
+ outputs=[script_output, sound_output, music_output]
234
+ )
235
 
236
+ voice_btn.click(
237
+ generate_voice,
238
+ inputs=[script_output, tts_select],
239
+ outputs=voice_preview
240
+ )
241
+
242
+ music_btn.click(
243
+ generate_music,
244
+ inputs=[music_output],
245
+ outputs=music_preview
246
+ )
247
+
248
+ mix_btn.click(
249
+ blend_audio,
250
+ inputs=[voice_preview, music_preview, ducking_toggle, duck_level],
251
+ outputs=final_mix
252
+ )
253
 
254
  if __name__ == "__main__":
255
  demo.launch(server_name="0.0.0.0", server_port=7860)