ZeeAI1's picture
Create app.py
9ef21f1 verified
raw
history blame
2.15 kB
import streamlit as st
from moviepy.editor import *
from transformers import pipeline
from TTS.api import TTS
import tempfile, os
st.title("πŸ“ Text-to-Video App with Voice Clone")
# Caching for faster reloads
@st.cache_resource()
def load_models():
video_gen = pipeline('text-to-video-generation', model='cerspense/zeroscope_v2_XS') # extra small version
tts_model = TTS("tts_models/en/ljspeech/tacotron2-DDC", progress_bar=False) # lighter TTS model
return video_gen, tts_model
video_gen, tts_model = load_models()
# Input
input_text = st.text_area("Enter short text (max 100 chars):", max_chars=100)
voice_file = st.file_uploader("Upload your voice sample (short WAV):", type=["wav"])
if st.button("Generate"):
if input_text and voice_file:
with st.spinner("Creating video (may take a minute)..."):
# Short video (15 frames only)
video_output = video_gen(input_text, num_frames=15)
video_tensor = video_output["video"]
video_np = (video_tensor * 255).astype('uint8')
video_filename = tempfile.mktemp(suffix=".mp4")
clips = [ImageClip(frame).set_duration(0.2) for frame in video_np]
video_clip = concatenate_videoclips(clips)
video_clip.write_videofile(video_filename, fps=5)
# Short audio clip
voice_path = tempfile.mktemp(suffix=".wav")
audio_filename = tempfile.mktemp(suffix=".wav")
with open(voice_path, "wb") as f:
f.write(voice_file.read())
tts_model.tts_to_file(text=input_text, speaker_wav=voice_path, file_path=audio_filename)
# Combine video and audio
final_clip = VideoFileClip(video_filename).set_audio(AudioFileClip(audio_filename))
final_video_path = tempfile.mktemp(suffix=".mp4")
final_clip.write_videofile(final_video_path, fps=5)
st.video(final_video_path)
# Cleanup
for f in [video_filename, audio_filename, voice_path, final_video_path]:
os.remove(f)
else:
st.warning("Provide both text and a voice sample.")