Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,30 +1,36 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
-
from gtts import gTTS
|
4 |
-
from moviepy.editor import CompositeVideoClip, ImageClip, AudioFileClip, concatenate_videoclips
|
5 |
from diffusers import StableDiffusionPipeline
|
6 |
import torch
|
7 |
from PIL import Image, ImageDraw, ImageFont
|
8 |
import scipy.io.wavfile
|
9 |
-
import
|
|
|
10 |
|
11 |
# Load and Initialize Models
|
|
|
12 |
script_generator = pipeline("text-generation", model="gpt2", truncation=True, max_length=100)
|
13 |
-
|
|
|
|
|
|
|
|
|
14 |
music_generator = pipeline("text-to-audio", model="facebook/musicgen-small", device="cpu")
|
15 |
|
|
|
|
|
|
|
16 |
|
17 |
# Generate Comedy Script
|
18 |
def generate_comedy_script(prompt):
|
19 |
script = script_generator(prompt)[0]['generated_text']
|
20 |
return script
|
21 |
|
22 |
-
# Convert Text to Speech
|
23 |
def text_to_speech(script):
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
return audio_file
|
28 |
|
29 |
# Create Images Using Stable Diffusion
|
30 |
def create_images_from_script(script):
|
@@ -37,7 +43,7 @@ def create_images_from_script(script):
|
|
37 |
image_paths.append(img_path)
|
38 |
return image_paths
|
39 |
|
40 |
-
#
|
41 |
def generate_fun_music(prompt, output_music_file="fun_music.wav"):
|
42 |
# Generate music based on the prompt using MusicGen
|
43 |
response = music_generator(prompt)
|
@@ -141,3 +147,4 @@ app.launch()
|
|
141 |
|
142 |
|
143 |
|
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
|
|
|
|
3 |
from diffusers import StableDiffusionPipeline
|
4 |
import torch
|
5 |
from PIL import Image, ImageDraw, ImageFont
|
6 |
import scipy.io.wavfile
|
7 |
+
from TTS.api import TTS # Coqui TTS (open source)
|
8 |
+
from moviepy.editor import CompositeVideoClip, ImageClip, AudioFileClip, concatenate_videoclips
|
9 |
|
10 |
# Load and Initialize Models
|
11 |
+
# Use GPT-2 (open-source) for text generation
|
12 |
script_generator = pipeline("text-generation", model="gpt2", truncation=True, max_length=100)
|
13 |
+
|
14 |
+
# Use Stable Diffusion (open-source) for image generation
|
15 |
+
image_generator = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base", torch_dtype=torch.float16).to("cpu")
|
16 |
+
|
17 |
+
# Use MusicGen (open-source) for music generation
|
18 |
music_generator = pipeline("text-to-audio", model="facebook/musicgen-small", device="cpu")
|
19 |
|
20 |
+
# Use Coqui TTS (open-source) for text-to-speech
|
21 |
+
tts = TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC", progress_bar=False, gpu=False)
|
22 |
+
|
23 |
|
24 |
# Generate Comedy Script
|
25 |
def generate_comedy_script(prompt):
|
26 |
script = script_generator(prompt)[0]['generated_text']
|
27 |
return script
|
28 |
|
29 |
+
# Convert Text to Speech using Coqui TTS
|
30 |
def text_to_speech(script):
|
31 |
+
output_audio = 'output.wav'
|
32 |
+
tts.tts_to_file(text=script, file_path=output_audio)
|
33 |
+
return output_audio
|
|
|
34 |
|
35 |
# Create Images Using Stable Diffusion
|
36 |
def create_images_from_script(script):
|
|
|
43 |
image_paths.append(img_path)
|
44 |
return image_paths
|
45 |
|
46 |
+
# Generate Fun Music Track using MusicGen
|
47 |
def generate_fun_music(prompt, output_music_file="fun_music.wav"):
|
48 |
# Generate music based on the prompt using MusicGen
|
49 |
response = music_generator(prompt)
|
|
|
147 |
|
148 |
|
149 |
|
150 |
+
|