AI_Comedy_Show / app.py
Manasa1's picture
Update app.py
ec5dc91 verified
raw
history blame
6.3 kB
import gradio as gr
from transformers import pipeline
from diffusers import StableDiffusionPipeline
import torch
from PIL import Image, ImageDraw, ImageFont
import scipy.io.wavfile
from TTS.api import TTS
from moviepy.editor import CompositeVideoClip, ImageClip, AudioFileClip, concatenate_videoclips, VideoFileClip
import os
from groq import Groq
import asyncio
import aiohttp
from dotenv import load_dotenv
import speech_recognition as sr
# Load environment variables
load_dotenv()
# Initialize Clients
groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))
# Use GPT-3.5-turbo for text generation
async def generate_comedy_script(prompt):
chat_completion = await groq_client.chat.completions.create(
messages=[
{
"role": "system",
"content": "You are a comedy writer. Generate a short, funny script based on the given prompt."
},
{
"role": "user",
"content": prompt
}
],
model="mixtral-8x7b-32768",
max_tokens=200
)
return chat_completion.choices[0].message.content
# Use Coqui TTS for text-to-speech (unchanged)
tts = TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC", progress_bar=False, gpu=False)
# Use MusicGen for music generation (unchanged)
music_generator = pipeline("text-to-audio", model="facebook/musicgen-small", device="cpu")
# Use Fluently Anime (Stable Diffusion) for anime image generation (unchanged)
model_id = "fluently/Fluently-anime"
anime_image_generator = StableDiffusionPipeline.from_pretrained(model_id).to("cpu")
# Convert Text to Speech using Coqui TTS (unchanged)
def text_to_speech(script):
output_audio = 'output.wav'
tts.tts_to_file(text=script, file_path=output_audio)
return output_audio
# Create Anime Images Using Fluently Anime (unchanged)
def create_images_from_script(script):
lines = script.split('. ')
image_paths = []
for i, line in enumerate(lines):
img = anime_image_generator(line).images[0]
img_path = f'/tmp/anime_image_{i}.png'
img.save(img_path)
image_paths.append(img_path)
return image_paths
# Generate Fun Music Track using MusicGen (unchanged)
def generate_fun_music(prompt, output_music_file="fun_music.wav"):
response = music_generator(prompt)
audio_data = response["audio"]
sampling_rate = response["sampling_rate"]
scipy.io.wavfile.write(output_music_file, rate=sampling_rate, data=audio_data)
return output_music_file
# Create Video from Generated Anime Images (unchanged)
def generate_text_video(script):
image_paths = create_images_from_script(script)
video_clip = ImageSequenceClip(image_paths, fps=24)
video_path = "/tmp/final_video.mp4"
video_clip.write_videofile(video_path, codec='libx264')
return video_path
# Combine Audio and Video (unchanged)
def combine_audio_video(video_file, audio_file):
video = VideoFileClip(video_file)
audio = AudioFileClip(audio_file)
final_video = video.set_audio(audio)
return final_video
# Main Function to Generate Comedy Animation
async def generate_comedy_and_animation(prompt):
script = await generate_comedy_script(prompt)
audio_file = text_to_speech(script)
video_file = generate_text_video(script)
fun_music = generate_fun_music(prompt)
final_video = combine_audio_video(video_file, fun_music)
return script, audio_file, final_video
# Generate Kids Content (unchanged)
def generate_kids_content(theme):
music_file = generate_fun_music(theme, output_music_file="kids_music.wav")
clips = []
for i in range(5):
img = Image.new('RGB', (800, 400), color=(0, 0, 255))
d = ImageDraw.Draw(img)
fnt = ImageFont.load_default()
d.text((10, 180), f"Kids Music: {theme}", font=fnt, fill=(255, 255, 0))
frame_path = f'/tmp/kids_temp_{i}.png'
img.save(frame_path)
clips.append(ImageClip(frame_path).set_duration(1).set_position(('center', 'center')))
final_video = concatenate_videoclips(clips, method="compose").set_audio(AudioFileClip(music_file))
final_video.write_videofile("/tmp/kids_animation.mp4", fps=24)
return music_file, "/tmp/kids_animation.mp4"
# New function for speech-to-text using SpeechRecognition
def transcribe_audio(audio_file):
recognizer = sr.Recognizer()
with sr.AudioFile(audio_file) as source:
audio = recognizer.record(source)
try:
return recognizer.recognize_google(audio)
except sr.UnknownValueError:
return "Speech recognition could not understand the audio"
except sr.RequestError as e:
return f"Could not request results from speech recognition service; {e}"
# Gradio Interface
with gr.Blocks() as app:
gr.Markdown("## AI Comedy and Kids Content Generator")
# Comedy Animation Tab
with gr.Tab("Generate Comedy Animation"):
prompt_input = gr.Textbox(label="Comedy Prompt")
generate_btn = gr.Button("Generate Comedy Script and Animation")
comedy_script = gr.Textbox(label="Generated Script")
comedy_audio = gr.Audio(label="Generated Audio")
comedy_video = gr.Video(label="Generated Animation")
generate_btn.click(
generate_comedy_and_animation,
inputs=prompt_input,
outputs=[comedy_script, comedy_audio, comedy_video]
)
# Kids Music Animation Tab
with gr.Tab("Generate Kids Music Animation"):
theme_input = gr.Textbox(label="Kids Music Theme")
generate_music_btn = gr.Button("Generate Kids Music and Animation")
kids_music_audio = gr.Audio(label="Generated Music")
kids_music_video = gr.Video(label="Generated Kids Animation")
generate_music_btn.click(
generate_kids_content,
inputs=theme_input,
outputs=[kids_music_audio, kids_music_video]
)
# Speech-to-Text Tab
with gr.Tab("Speech-to-Text"):
audio_input = gr.Audio(label="Upload Audio")
transcribe_btn = gr.Button("Transcribe Audio")
transcription_output = gr.Textbox(label="Transcription")
transcribe_btn.click(
transcribe_audio,
inputs=audio_input,
outputs=transcription_output
)
app.launch()