Manasa1 commited on
Commit
4786e02
·
verified ·
1 Parent(s): 0314936

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -13
app.py CHANGED
@@ -1,20 +1,17 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
  from gtts import gTTS
4
- from moviepy.editor import CompositeVideoClip, ImageClip, AudioFileClip, VideoFileClip, concatenate_videoclips
5
  from diffusers import StableDiffusionPipeline
6
  import torch
7
- from PIL import Image
8
- import numpy as np
9
- from scipy.io.wavfile import write
10
  import random
11
 
12
  # Load and Initialize Models
13
  script_generator = pipeline("text-generation", model="gpt2", truncation=True, max_length=100)
14
  image_generator = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16).to("cpu")
15
- # For example, use a specific available model if MusicGen isn't accessible
16
- music_generator = pipeline("text-to-audio", model="facebook/musicgen-melody", device="cpu")
17
-
18
 
19
 
20
  # Generate Comedy Script
@@ -29,7 +26,6 @@ def text_to_speech(script):
29
  tts.save(audio_file)
30
  return audio_file
31
 
32
-
33
  # Create Images Using Stable Diffusion
34
  def create_images_from_script(script):
35
  lines = script.split('. ')
@@ -41,13 +37,18 @@ def create_images_from_script(script):
41
  image_paths.append(img_path)
42
  return image_paths
43
 
44
- # Generate Fun Music Track
45
  def generate_fun_music(prompt, output_music_file="fun_music.wav"):
46
- # Generate music based on the prompt
47
- music = music_generator(prompt)[0]['generated_music']
 
 
 
 
 
48
  # Save the generated music to a file
49
- with open(output_music_file, 'wb') as f:
50
- f.write(music)
51
  return output_music_file
52
 
53
  # Create Video from Generated Images
@@ -91,6 +92,7 @@ def generate_kids_animation_with_music(theme, output_video_file="kids_animation.
91
  frame_path = f'/tmp/kids_temp_{i}.png'
92
  img.save(frame_path)
93
  clips.append(ImageClip(frame_path).set_duration(1).set_position(('center', 'center')))
 
94
  final_video = CompositeVideoClip(clips, size=(800, 400))
95
  final_video = final_video.set_audio(AudioFileClip(music_file))
96
  final_video.write_videofile(output_video_file, fps=24)
@@ -138,3 +140,4 @@ app.launch()
138
 
139
 
140
 
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
  from gtts import gTTS
4
+ from moviepy.editor import CompositeVideoClip, ImageClip, AudioFileClip, concatenate_videoclips
5
  from diffusers import StableDiffusionPipeline
6
  import torch
7
+ from PIL import Image, ImageDraw, ImageFont
8
+ import scipy.io.wavfile
 
9
  import random
10
 
11
  # Load and Initialize Models
12
  script_generator = pipeline("text-generation", model="gpt2", truncation=True, max_length=100)
13
  image_generator = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16).to("cpu")
14
+ music_generator = pipeline("text-to-audio", model="facebook/musicgen-small", device="cpu")
 
 
15
 
16
 
17
  # Generate Comedy Script
 
26
  tts.save(audio_file)
27
  return audio_file
28
 
 
29
  # Create Images Using Stable Diffusion
30
  def create_images_from_script(script):
31
  lines = script.split('. ')
 
37
  image_paths.append(img_path)
38
  return image_paths
39
 
40
+ # Update: Generate Fun Music Track using MusicGen
41
  def generate_fun_music(prompt, output_music_file="fun_music.wav"):
42
+ # Generate music based on the prompt using MusicGen
43
+ response = music_generator(prompt)
44
+
45
+ # Extract audio and sampling rate from the response
46
+ audio_data = response["audio"]
47
+ sampling_rate = response["sampling_rate"]
48
+
49
  # Save the generated music to a file
50
+ scipy.io.wavfile.write(output_music_file, rate=sampling_rate, data=audio_data)
51
+
52
  return output_music_file
53
 
54
  # Create Video from Generated Images
 
92
  frame_path = f'/tmp/kids_temp_{i}.png'
93
  img.save(frame_path)
94
  clips.append(ImageClip(frame_path).set_duration(1).set_position(('center', 'center')))
95
+
96
  final_video = CompositeVideoClip(clips, size=(800, 400))
97
  final_video = final_video.set_audio(AudioFileClip(music_file))
98
  final_video.write_videofile(output_video_file, fps=24)
 
140
 
141
 
142
 
143
+