bluenevus commited on
Commit
0d24647
·
verified ·
1 Parent(s): 02ebd05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -57
app.py CHANGED
@@ -1,4 +1,7 @@
1
  import gradio as gr
 
 
 
2
  import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
  from huggingface_hub import snapshot_download, login
@@ -7,7 +10,9 @@ import os
7
  import spaces
8
  import warnings
9
  from snac import SNAC
10
- import numpy as np
 
 
11
 
12
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
13
  logger = logging.getLogger(__name__)
@@ -22,8 +27,6 @@ model = None
22
  tokenizer = None
23
  snac_model = None
24
 
25
- EMOTIVE_TAGS = ["<laugh>", "<sigh>", "<gasp>", "<cry>", "<yawn>"]
26
-
27
  @spaces.GPU()
28
  def load_model():
29
  global model, tokenizer, snac_model
@@ -56,6 +59,40 @@ def load_model():
56
  logger.error(f"Error loading model: {str(e)}")
57
  raise
58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  def process_prompt(prompt, voice, tokenizer, device):
60
  prompt = f"{voice}: {prompt}"
61
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids
@@ -146,69 +183,73 @@ def generate_speech(text, voice, temperature, top_p, repetition_penalty, max_new
146
  progress(0.8, "Converting to audio...")
147
  audio_samples = redistribute_codes(code_list, snac_model)
148
 
149
- return (24000, audio_samples)
150
  except Exception as e:
151
  print(f"Error generating speech: {e}")
152
  return None
153
 
154
- with gr.Blocks(title="Orpheus Text-to-Speech") as demo:
155
- gr.Markdown(f"""
156
- # 🎵 [Orpheus Text-to-Speech](https://github.com/canopyai/Orpheus-TTS)
157
- Enter your text below and hear it converted to natural-sounding speech with the Orpheus TTS model.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
 
159
- ## Tips for better prompts:
160
- - Add paralinguistic elements like {", ".join(EMOTIVE_TAGS)} or `uhm` for more human-like speech.
161
- - Longer text prompts generally work better than very short phrases
162
- """)
163
 
164
  with gr.Row():
165
- with gr.Column():
166
- text_input = gr.Textbox(
167
- label="Text Input",
168
- placeholder="Enter the text you want to convert to speech...",
169
- lines=8
170
- )
171
- voice_select = gr.Dropdown(
172
- choices=["tara", "leah", "jess", "leo", "dan", "mia", "zac", "zoe"],
173
- value="tara",
174
- label="Voice"
175
- )
176
- with gr.Accordion("Advanced Options", open=False):
177
- temperature = gr.Slider(
178
- minimum=0.1, maximum=1.0, value=0.6, step=0.1,
179
- label="Temperature",
180
- info="Higher values increase randomness in the output"
181
- )
182
- top_p = gr.Slider(
183
- minimum=0.1, maximum=1.0, value=0.95, step=0.05,
184
- label="Top-p",
185
- info="Lower values increase determinism in the output"
186
- )
187
- repetition_penalty = gr.Slider(
188
- minimum=1.0, maximum=2.0, value=1.1, step=0.1,
189
- label="Repetition Penalty",
190
- info="Higher values discourage repetitive patterns"
191
- )
192
- max_new_tokens = gr.Slider(
193
- minimum=100, maximum=2000, value=1200, step=100,
194
- label="Max Length",
195
- info="Maximum length of generated audio (in tokens)"
196
- )
197
-
198
- with gr.Row():
199
- submit_btn = gr.Button("Generate Speech", variant="primary")
200
- clear_btn = gr.Button("Clear")
201
-
202
- with gr.Column():
203
- audio_output = gr.Audio(label="Generated Speech")
204
 
205
- submit_btn.click(
206
- generate_speech,
207
- inputs=[text_input, voice_select, temperature, top_p, repetition_penalty, max_new_tokens],
208
- outputs=audio_output
209
- )
210
 
211
- clear_btn.click(lambda: "", inputs=None, outputs=text_input)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
 
213
  if __name__ == "__main__":
214
  try:
 
1
  import gradio as gr
2
+ import google.generativeai as genai
3
+ import numpy as np
4
+ import re
5
  import torch
6
  from transformers import AutoModelForCausalLM, AutoTokenizer
7
  from huggingface_hub import snapshot_download, login
 
10
  import spaces
11
  import warnings
12
  from snac import SNAC
13
+ from dotenv import load_dotenv
14
+
15
+ load_dotenv()
16
 
17
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
18
  logger = logging.getLogger(__name__)
 
27
  tokenizer = None
28
  snac_model = None
29
 
 
 
30
  @spaces.GPU()
31
  def load_model():
32
  global model, tokenizer, snac_model
 
59
  logger.error(f"Error loading model: {str(e)}")
60
  raise
61
 
62
+ @spaces.GPU()
63
+ def generate_podcast_script(api_key, content, uploaded_file, duration, num_hosts):
64
+ try:
65
+ genai.configure(api_key=api_key)
66
+ model = genai.GenerativeModel('gemini-2.5-pro-preview-03-25')
67
+
68
+ combined_content = content or ""
69
+ if uploaded_file:
70
+ file_content = uploaded_file.read().decode('utf-8')
71
+ combined_content += "\n" + file_content if combined_content else file_content
72
+
73
+ prompt = f"""
74
+ Create a podcast script for {'one person' if num_hosts == 1 else 'two people'} discussing:
75
+ {combined_content}
76
+
77
+ Duration: {duration}. Include natural speech, humor, and occasional off-topic thoughts.
78
+ Use speech fillers like um, ah. Vary emotional tone.
79
+
80
+ Format: {'Monologue' if num_hosts == 1 else 'Alternating dialogue'} without speaker labels.
81
+ Separate {'paragraphs' if num_hosts == 1 else 'lines'} with blank lines.
82
+
83
+ Use emotion tags in angle brackets: <laugh>, <sigh>, <chuckle>, <cough>, <sniffle>, <groan>, <yawn>, <gasp>.
84
+
85
+ Example: "I can't believe I stayed up all night <yawn> only to find out the meeting was canceled <groan>."
86
+
87
+ Ensure content flows naturally and stays on topic. Match the script length to {duration}.
88
+ """
89
+
90
+ response = model.generate_content(prompt)
91
+ return re.sub(r'[^a-zA-Z0-9\s.,?!<>]', '', response.text)
92
+ except Exception as e:
93
+ logger.error(f"Error generating podcast script: {str(e)}")
94
+ raise
95
+
96
  def process_prompt(prompt, voice, tokenizer, device):
97
  prompt = f"{voice}: {prompt}"
98
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids
 
183
  progress(0.8, "Converting to audio...")
184
  audio_samples = redistribute_codes(code_list, snac_model)
185
 
186
+ return (24000, audio_samples) # Return sample rate and audio
187
  except Exception as e:
188
  print(f"Error generating speech: {e}")
189
  return None
190
 
191
+ @spaces.GPU()
192
+ def render_podcast(api_key, script, voice1, voice2, num_hosts):
193
+ try:
194
+ lines = [line for line in script.split('\n') if line.strip()]
195
+ audio_segments = []
196
+
197
+ for i, line in enumerate(lines):
198
+ voice = voice1 if num_hosts == 1 or i % 2 == 0 else voice2
199
+ try:
200
+ result = generate_speech(line, voice, temperature=0.6, top_p=0.95, repetition_penalty=1.1, max_new_tokens=1200)
201
+ if result is not None:
202
+ sample_rate, audio = result
203
+ audio_segments.append(audio)
204
+ except Exception as e:
205
+ logger.error(f"Error processing audio segment: {str(e)}")
206
+
207
+ if not audio_segments:
208
+ logger.warning("No valid audio segments were generated.")
209
+ return (24000, np.zeros(24000, dtype=np.float32))
210
+
211
+ podcast_audio = np.concatenate(audio_segments)
212
+ podcast_audio = np.clip(podcast_audio, -1, 1)
213
+ podcast_audio = (podcast_audio * 32767).astype(np.int16)
214
+
215
+ return (24000, podcast_audio)
216
+ except Exception as e:
217
+ logger.error(f"Error rendering podcast: {str(e)}")
218
+ raise
219
+
220
+ with gr.Blocks() as demo:
221
+ gr.Markdown("# AI Podcast Generator")
222
 
223
+ api_key_input = gr.Textbox(label="Enter your Gemini API Key", type="password")
 
 
 
224
 
225
  with gr.Row():
226
+ content_input = gr.Textbox(label="Paste your content (optional)", lines=4)
227
+ document_upload = gr.File(label="Upload Document (optional)")
228
+
229
+ duration = gr.Radio(["1-5 min", "5-10 min", "10-15 min"], label="Estimated podcast duration", value="1-5 min")
230
+ num_hosts = gr.Radio([1, 2], label="Number of podcast hosts", value=2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
+ voice_options = ["tara", "leah", "jess", "leo", "dan", "mia", "zac", "zoe"]
233
+ voice1_select = gr.Dropdown(label="Select Voice 1", choices=voice_options, value="tara")
234
+ voice2_select = gr.Dropdown(label="Select Voice 2", choices=voice_options, value="leo")
 
 
235
 
236
+ generate_btn = gr.Button("Generate Script")
237
+ script_output = gr.Textbox(label="Generated Script", lines=10)
238
+
239
+ render_btn = gr.Button("Render Podcast")
240
+ audio_output = gr.Audio(label="Generated Podcast")
241
+
242
+ generate_btn.click(generate_podcast_script,
243
+ inputs=[api_key_input, content_input, document_upload, duration, num_hosts],
244
+ outputs=script_output)
245
+
246
+ render_btn.click(render_podcast,
247
+ inputs=[api_key_input, script_output, voice1_select, voice2_select, num_hosts],
248
+ outputs=audio_output)
249
+
250
+ num_hosts.change(lambda x: gr.update(visible=x == 2),
251
+ inputs=[num_hosts],
252
+ outputs=[voice2_select])
253
 
254
  if __name__ == "__main__":
255
  try: