File size: 3,658 Bytes
74245b5
 
 
 
 
 
2a527b6
74245b5
 
e07f35b
 
74245b5
ac81409
74245b5
d0f551e
 
 
 
74245b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2a527b6
 
 
 
 
 
 
 
ac81409
74245b5
d0f551e
 
ac81409
74245b5
d0f551e
 
74245b5
 
d0f551e
74245b5
 
 
 
 
 
 
 
 
 
 
ac81409
 
 
 
74245b5
ac81409
74245b5
 
 
 
 
d0f551e
 
74245b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0f551e
 
74245b5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# app.py

import gradio as gr
import torch
import torchaudio
import google.generativeai as genai
from e2_tts_pytorch import E2TTS, DurationPredictor
import numpy as np
import os
import requests
from tqdm import tqdm

# (Keep the model loading and initialization code as before)

def generate_podcast_script(api_key, content, duration):
    genai.configure(api_key=api_key)
    model = genai.GenerativeModel('gemini-2.5-pro-preview-03-25')
    
    prompt = f"""
    Create a podcast script for two people discussing the following content:
    {content}
    
    The podcast should last approximately {duration}. Include natural speech patterns,
    humor, and occasional off-topic chit-chat. Use speech fillers like "um", "ah",
    "yes", "I see", "Ok now". Vary the emotional tone (e.g., regular, happy, sad, surprised)
    and indicate these in [square brackets]. Format the script as follows:

    Host 1: [emotion] Dialog
    Host 2: [emotion] Dialog
    
    Ensure the conversation flows naturally and stays relevant to the topic.
    """
    response = model.generate_content(prompt)
    return response.text

def text_to_speech(text, speaker_id):
    # For simplicity, we'll use a random mel spectrogram as input
    # In a real scenario, you'd use the actual mel spectrogram from the cloned voice
    mel = torch.randn(1, 80, 100)
    
    # Generate speech
    with torch.no_grad():
        sampled = e2tts.sample(mel[:, :5], text=[text])
    
    return sampled.cpu().numpy().squeeze()

def create_podcast(api_key, content, duration, voice1, voice2):
    script = generate_podcast_script(api_key, content, duration)
    return render_podcast(api_key, script, voice1, voice2)

def gradio_interface(api_key, content, duration, voice1, voice2):
    script = generate_podcast_script(api_key, content, duration)
    return script

def render_podcast(api_key, script, voice1, voice2):
    lines = script.split('\n')
    audio_segments = []
    
    for line in lines:
        if line.startswith("Host 1:"):
            audio = text_to_speech(line[7:], speaker_id=0)
            audio_segments.append(audio)
        elif line.startswith("Host 2:"):
            audio = text_to_speech(line[7:], speaker_id=1)
            audio_segments.append(audio)
    
    if not audio_segments:
        return (22050, np.zeros(22050))  # Return silence if no audio was generated
    
    # Concatenate audio segments
    podcast_audio = np.concatenate(audio_segments)
    return (22050, podcast_audio)  # Assuming 22050 Hz sample rate

# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("# AI Podcast Generator")
    
    api_key_input = gr.Textbox(label="Enter your Gemini API Key", type="password")
    
    with gr.Row():
        content_input = gr.Textbox(label="Paste your content or upload a document")
        document_upload = gr.File(label="Upload Document")
    
    duration = gr.Radio(["1-5 min", "5-10 min", "10-15 min"], label="Estimated podcast duration")
    
    with gr.Row():
        voice1_upload = gr.Audio(label="Upload Voice 1", type="filepath")
        voice2_upload = gr.Audio(label="Upload Voice 2", type="filepath")
    
    generate_btn = gr.Button("Generate Script")
    script_output = gr.Textbox(label="Generated Script", lines=10)
    
    render_btn = gr.Button("Render Podcast")
    audio_output = gr.Audio(label="Generated Podcast")
    
    generate_btn.click(gradio_interface, inputs=[api_key_input, content_input, duration, voice1_upload, voice2_upload], outputs=script_output)
    render_btn.click(render_podcast, inputs=[api_key_input, script_output, voice1_upload, voice2_upload], outputs=audio_output)

demo.launch()