INVIDEO_BASIC / app.py
gnosticdev's picture
Update app.py
a889be2 verified
raw
history blame
34.2 kB
import os
import asyncio
import logging
import tempfile
import requests
from datetime import datetime
import edge_tts
import gradio as gr
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from keybert import KeyBERT
from moviepy.editor import VideoFileClip, concatenate_videoclips, AudioFileClip, CompositeAudioClip, concatenate_audioclips
import re
import math
import shutil
import json
from collections import Counter
# Logging configuration
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler(),
logging.FileHandler('video_generator_full.log', encoding='utf-8')
]
)
logger = logging.getLogger(__name__)
logger.info("="*80)
logger.info("STARTING VIDEO GENERATOR EXECUTION")
logger.info("="*80)
# Pexels API Key
PEXELS_API_KEY = os.environ.get("PEXELS_API_KEY")
if not PEXELS_API_KEY:
logger.critical("PEXELS_API_KEY environment variable not found.")
# Uncomment to force fail if not set:
# raise ValueError("Pexels API key not configured")
# Model Initialization
MODEL_NAME = "datificate/gpt2-small-spanish"
logger.info(f"Initializing GPT-2 model: {MODEL_NAME}")
tokenizer = None
model = None
try:
tokenizer = GPT2Tokenizer.from_pretrained(MODEL_NAME)
model = GPT2LMHeadModel.from_pretrained(MODEL_NAME).eval()
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
logger.info(f"GPT-2 model loaded | Vocab size: {len(tokenizer)}")
except Exception as e:
logger.error(f"CRITICAL FAILURE loading GPT-2: {str(e)}", exc_info=True)
tokenizer = model = None
logger.info("Loading KeyBERT model...")
kw_model = None
try:
kw_model = KeyBERT('distilbert-base-multilingual-cased')
logger.info("KeyBERT initialized successfully")
except Exception as e:
logger.error(f"FAILURE loading KeyBERT: {str(e)}", exc_info=True)
kw_model = None
def buscar_videos_pexels(query, api_key, per_page=5):
if not api_key:
logger.warning("Cannot search Pexels: API Key not configured.")
return []
logger.debug(f"Searching Pexels: '{query}' | Results per page: {per_page}")
headers = {"Authorization": api_key}
try:
params = {
"query": query,
"per_page": per_page,
"orientation": "landscape",
"size": "medium"
}
response = requests.get(
"https://api.pexels.com/videos/search",
headers=headers,
params=params,
timeout=20
)
response.raise_for_status()
data = response.json()
videos = data.get('videos', [])
logger.info(f"Pexels: Found {len(videos)} videos for '{query}'")
return videos
except requests.exceptions.RequestException as e:
logger.error(f"Pexels connection error for '{query}': {str(e)}")
except json.JSONDecodeError:
logger.error(f"Pexels: Invalid JSON received | Status: {response.status_code} | Response: {response.text[:200]}...")
except Exception as e:
logger.error(f"Unexpected Pexels error for '{query}': {str(e)}", exc_info=True)
return []
def generate_script(prompt, max_length=150):
logger.info(f"Generando guión | Prompt: '{prompt[:50]}...' | Max length: {max_length}")
if not tokenizer or not model:
logger.warning("GPT-2 models not available - Using original prompt as script.")
return prompt
try:
enhanced_prompt = f"Escribe un guion corto, interesante y coherente sobre: {prompt}"
inputs = tokenizer(enhanced_prompt, return_tensors="pt", truncation=True, max_length=512)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
inputs = {k: v.to(device) for k, v in inputs.items()}
outputs = model.generate(
**inputs,
max_length=max_length,
do_sample=True,
top_p=0.9,
top_k=40,
temperature=0.7,
repetition_penalty=1.2,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id,
no_repeat_ngram_size=3
)
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
text = re.sub(r'<[^>]+>', '', text)
text = text.strip()
sentences = text.split('.')
if sentences and sentences[0].strip():
final_text = sentences[0].strip() + '.'
if len(sentences) > 1 and sentences[1].strip() and len(final_text.split()) < max_length * 0.5:
final_text += " " + sentences[1].strip() + "."
final_text = final_text.replace("..", ".")
logger.info(f"Guion generado (Truncado): '{final_text[:100]}...'")
return final_text.strip()
logger.info(f"Guion generado (sin oraciones completas): '{text[:100]}...'")
return text.strip()
except Exception as e:
logger.error(f"Error generating script with GPT-2: {str(e)}", exc_info=True)
logger.warning("Using original prompt as script due to generation error.")
return prompt.strip()
async def text_to_speech(text, output_path, voice="es-ES-ElviraNeural"):
logger.info(f"Converting text to speech | Chars: {len(text)} | Voice: {voice} | Output: {output_path}")
if not text or not text.strip():
logger.warning("Empty text for TTS")
return False
try:
communicate = edge_tts.Communicate(text, voice)
await communicate.save(output_path)
if os.path.exists(output_path) and os.path.getsize(output_path) > 100:
logger.info(f"Audio saved successfully to: {output_path} | Size: {os.path.getsize(output_path)} bytes")
return True
else:
logger.error(f"TTS saved small or empty file to: {output_path}")
return False
except Exception as e:
logger.error(f"Error in TTS: {str(e)}", exc_info=True)
return False
def download_video_file(url, temp_dir):
if not url:
logger.warning("Video URL not provided for download")
return None
try:
logger.info(f"Downloading video from: {url[:80]}...")
os.makedirs(temp_dir, exist_ok=True)
file_name = f"video_dl_{datetime.now().strftime('%Y%m%d_%H%M%S_%f')}.mp4"
output_path = os.path.join(temp_dir, file_name)
with requests.get(url, stream=True, timeout=60) as r:
r.raise_for_status()
with open(output_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
if os.path.exists(output_path) and os.path.getsize(output_path) > 1000:
logger.info(f"Video downloaded successfully: {output_path} | Size: {os.path.getsize(output_path)} bytes")
return output_path
else:
logger.warning(f"Download seems incomplete or empty for {url[:80]}... File: {output_path} Size: {os.path.getsize(output_path) if os.path.exists(output_path) else 'N/A'} bytes")
if os.path.exists(output_path):
os.remove(output_path)
return None
except requests.exceptions.RequestException as e:
logger.error(f"Download error for {url[:80]}... : {str(e)}")
except Exception as e:
logger.error(f"Unexpected error downloading {url[:80]}... : {str(e)}", exc_info=True)
return None
def loop_audio_to_length(audio_clip, target_duration):
logger.debug(f"Adjusting audio | Current duration: {audio_clip.duration:.2f}s | Target: {target_duration:.2f}s")
if audio_clip.duration is None or audio_clip.duration <= 0:
logger.warning("Audio clip has zero or negative duration, cannot loop.")
return AudioFileClip(filename="")
if audio_clip.duration >= target_duration:
logger.debug("Audio clip already longer or equal to target.")
return audio_clip.subclip(0, target_duration)
loops = math.ceil(target_duration / audio_clip.duration)
logger.debug(f"Creating {loops} audio loops")
audio_segments = [audio_clip] * loops
try:
looped_audio = concatenate_audioclips(audio_segments)
final_looped_audio = looped_audio.subclip(0, target_duration)
# Close the temporary concatenated clip
try: looped_audio.close()
except: pass
return final_looped_audio
except Exception as e:
logger.error(f"Error concatenating audio clips for looping: {str(e)}", exc_info=True)
return audio_clip.subclip(0, min(audio_clip.duration, target_duration))
def extract_visual_keywords_from_script(script_text):
logger.info("Extracting keywords from script")
if not script_text or not script_text.strip():
logger.warning("Empty script, cannot extract keywords.")
return ["naturaleza", "ciudad", "paisaje"]
clean_text = re.sub(r'[^\w\sáéíóúñÁÉÍÓÚÑ]', '', script_text)
keywords_list = []
if kw_model:
try:
logger.debug("Attempting KeyBERT extraction...")
keywords1 = kw_model.extract_keywords(clean_text, keyphrase_ngram_range=(1, 1), stop_words='spanish', top_n=5)
keywords2 = kw_model.extract_keywords(clean_text, keyphrase_ngram_range=(2, 2), stop_words='spanish', top_n=3)
all_keywords = keywords1 + keywords2
all_keywords.sort(key=lambda item: item[1], reverse=True)
seen_keywords = set()
for keyword, score in all_keywords:
formatted_keyword = keyword.lower().replace(" ", "+")
if formatted_keyword and formatted_keyword not in seen_keywords:
keywords_list.append(formatted_keyword)
seen_keywords.add(formatted_keyword)
if len(keywords_list) >= 5:
break
if keywords_list:
logger.debug(f"KeyBERT extracted keywords: {keywords_list}")
return keywords_list
except Exception as e:
logger.warning(f"KeyBERT failed: {str(e)}. Trying simple method.")
logger.debug("Extracting keywords with simple method...")
words = clean_text.lower().split()
stop_words = {"el", "la", "los", "las", "de", "en", "y", "a", "que", "es", "un", "una", "con", "para", "del", "al", "por", "su", "sus", "se", "lo", "le", "me", "te", "nos", "os", "les", "mi", "tu", "nuestro", "vuestro", "este", "ese", "aquel", "esta", "esa", "aquella", "esto", "eso", "aquello", "mis", "tus", "nuestros", "vuestros", "estas", "esas", "aquellas", "si", "no", "más", "menos", "sin", "sobre", "bajo", "entre", "hasta", "desde", "durante", "mediante", "según", "versus", "via", "cada", "todo", "todos", "toda", "todas", "poco", "pocos", "poca", "pocas", "mucho", "muchos", "mucha", "muchas", "varios", "varias", "otro", "otros", "otra", "otras", "mismo", "misma", "mismos", "mismas", "tan", "tanto", "tanta", "tantos", "tantas", "tal", "tales", "cual", "cuales", "cuyo", "cuya", "cuyos", "cuyas", "quien", "quienes", "cuan", "cuanto", "cuanta", "cuantos", "cuantas", "como", "donde", "cuando", "porque", "aunque", "mientras", "siempre", "nunca", "jamás", "muy", "casi", "solo", "solamente", "incluso", "apenas", "quizás", "tal vez", "acaso", "claro", "cierto", "obvio", "evidentemente", "realmente", "simplemente", "generalmente", "especialmente", "principalmente", "posiblemente", "probablemente", "difícilmente", "fácilmente", "rápidamente", "lentamente", "bien", "mal", "mejor", "peor", "arriba", "abajo", "adelante", "atrás", "cerca", "lejos", "dentro", "fuera", "encima", "debajo", "frente", "detrás", "antes", "después", "luego", "pronto", "tarde", "todavía", "ya", "aun", "aún", "quizá"}
valid_words = [word for word in words if len(word) > 3 and word not in stop_words]
if not valid_words:
logger.warning("No valid keywords found with simple method. Using default keywords.")
return ["naturaleza", "ciudad", "paisaje"]
word_counts = Counter(valid_words)
top_keywords = [word.replace(" ", "+") for word, _ in word_counts.most_common(5)]
if not top_keywords:
logger.warning("Simple method produced no keywords. Using default keywords.")
return ["naturaleza", "ciudad", "paisaje"]
logger.info(f"Final keywords: {top_keywords}")
return top_keywords
def crear_video(prompt_type, input_text, musica_file=None):
logger.info("="*80)
logger.info(f"STARTING VIDEO CREATION | Type: {prompt_type}")
logger.debug(f"Input: '{input_text[:100]}...'")
start_time = datetime.now()
temp_dir_intermediate = None
audio_tts = None
musica_audio = None
video_base = None
video_final = None
try:
# 1. Generate or use script
if prompt_type == "Generar Guion con IA":
guion = generate_script(input_text)
else:
guion = input_text.strip()
logger.info(f"Final script ({len(guion)} chars): '{guion[:100]}...'")
if not guion.strip():
logger.error("Resulting script is empty.")
raise ValueError("The script is empty.")
temp_dir_intermediate = tempfile.mkdtemp(prefix="video_gen_intermediate_")
logger.info(f"Intermediate temporary directory created: {temp_dir_intermediate}")
temp_intermediate_files = []
# 2. Generate voice audio
logger.info("Generating voice audio...")
voz_path = os.path.join(temp_dir_intermediate, "voz.mp3")
if not asyncio.run(text_to_speech(guion, voz_path)):
logger.error("Failed to generate voice audio.")
raise ValueError("Error generating voice audio.")
temp_intermediate_files.append(voz_path)
audio_tts = AudioFileClip(voz_path)
audio_duration = audio_tts.duration
logger.info(f"Voice audio duration: {audio_duration:.2f} seconds")
if audio_duration < 0.5:
logger.error(f"Voice audio duration ({audio_duration:.2f}s) is too short.")
raise ValueError("Generated voice audio is too short.")
# 3. Extract keywords
logger.info("Extracting keywords...")
try:
keywords = extract_visual_keywords_from_script(guion)
logger.info(f"Identified keywords: {keywords}")
except Exception as e:
logger.error(f"Error extracting keywords: {str(e)}", exc_info=True)
keywords = ["naturaleza", "paisaje"]
if not keywords:
keywords = ["video", "background"]
# 4. Search and download videos
logger.info("Searching videos on Pexels...")
videos_data = []
total_desired_videos = 10
per_page_per_keyword = max(1, total_desired_videos // len(keywords))
for keyword in keywords:
if len(videos_data) >= total_desired_videos: break
try:
videos = buscar_videos_pexels(keyword, PEXELS_API_KEY, per_page=per_page_per_keyword)
if videos:
videos_data.extend(videos)
logger.info(f"Found {len(videos)} videos for '{keyword}'. Total data: {len(videos_data)}")
except Exception as e:
logger.warning(f"Error searching videos for '{keyword}': {str(e)}")
if len(videos_data) < total_desired_videos / 2:
logger.warning(f"Few videos found ({len(videos_data)}). Trying generic keywords.")
generic_keywords = ["nature", "city", "background", "abstract"]
for keyword in generic_keywords:
if len(videos_data) >= total_desired_videos: break
try:
videos = buscar_videos_pexels(keyword, PEXELS_API_KEY, per_page=2)
if videos:
videos_data.extend(videos)
logger.info(f"Found {len(videos)} videos for '{keyword}' (generic). Total data: {len(videos_data)}")
except Exception as e:
logger.warning(f"Error searching generic videos for '{keyword}': {str(e)}")
if not videos_data:
logger.error("No videos found on Pexels for any keyword.")
raise ValueError("No suitable videos found on Pexels.")
video_paths = []
logger.info(f"Attempting to download {len(videos_data)} found videos...")
for video in videos_data:
if 'video_files' not in video or not video['video_files']:
logger.debug(f"Skipping video without video files: {video.get('id')}")
continue
try:
best_quality = None
for vf in sorted(video['video_files'], key=lambda x: x.get('width', 0) * x.get('height', 0), reverse=True):
if 'link' in vf:
best_quality = vf
break
if best_quality and 'link' in best_quality:
path = download_video_file(best_quality['link'], temp_dir_intermediate)
if path:
video_paths.append(path)
temp_intermediate_files.append(path)
logger.info(f"Video downloaded OK from {best_quality['link'][:50]}...")
else:
logger.warning(f"Could not download video from {best_quality['link'][:50]}...")
else:
logger.warning(f"No valid download link found for video {video.get('id')}.")
except Exception as e:
logger.warning(f"Error processing/downloading video {video.get('id')}: {str(e)}")
logger.info(f"Downloaded {len(video_paths)} usable video files.")
if not video_paths:
logger.error("Could not download any usable video file.")
raise ValueError("Could not download any usable video from Pexels.")
# 5. Process and concatenate video clips
logger.info("Processing and concatenating downloaded videos...")
clips = []
current_duration = 0
min_clip_duration = 0.5
max_clip_segment = 10.0
for i, path in enumerate(video_paths):
if current_duration >= audio_duration + max_clip_segment:
logger.debug(f"Video base sufficient ({current_duration:.1f}s >= {audio_duration:.1f}s + {max_clip_segment:.1f}s buffer). Stopping processing remaining source clips.")
break
clip = None
try:
logger.debug(f"[{i+1}/{len(video_paths)}] Opening clip: {path}")
clip = VideoFileClip(path)
if clip.reader is None or clip.duration is None or clip.duration <= 0:
logger.warning(f"[{i+1}/{len(video_paths)}] Clip {path} seems invalid (reader is None or duration <= 0). Skipping.")
continue
remaining_needed = audio_duration - current_duration
potential_use_duration = min(clip.duration, max_clip_segment)
if remaining_needed > 0:
segment_duration = min(potential_use_duration, remaining_needed + min_clip_duration)
segment_duration = max(min_clip_duration, segment_duration)
segment_duration = min(segment_duration, clip.duration)
if segment_duration >= min_clip_duration:
try:
sub = clip.subclip(0, segment_duration)
clips.append(sub)
current_duration += sub.duration
logger.debug(f"[{i+1}/{len(video_paths)}] Segment added: {sub.duration:.1f}s (total video: {current_duration:.1f}/{audio_duration:.1f}s)")
# sub.close() # Decided against closing subclips explicitly here
except Exception as sub_e:
logger.warning(f"[{i+1}/{len(video_paths)}] Error creating subclip from {path} ({segment_duration:.1f}s): {str(sub_e)}")
continue
else:
logger.debug(f"[{i+1}/{len(video_paths)}] Clip {path} ({clip.duration:.1f}s) doesn't contribute sufficient segment ({segment_duration:.1f}s needed from it). Skipping.")
else:
logger.debug(f"[{i+1}/{len(video_paths)}] Video base duration already reached. Skipping clip.")
except Exception as e:
logger.warning(f"[{i+1}/{len(video_paths)}] Error processing video {path}: {str(e)}", exc_info=True)
continue
finally:
if clip is not None:
try:
clip.close()
logger.debug(f"[{i+1}/{len(video_paths)}] Clip {path} closed.")
except Exception as close_e:
logger.warning(f"[{i+1}/{len(video_paths)}] Error closing clip {path}: {str(close_e)}")
logger.info(f"Source clip processing finished. Obtained {len(clips)} valid clips.")
if not clips:
logger.error("No valid video clips available to create the sequence.")
raise ValueError("No valid video clips available to create the video.")
logger.info(f"Concatenating {len(clips)} video clips.")
# Use the default "chain" method for simple concatenation
video_base = concatenate_videoclips(clips) # Removed method="compose"
logger.info(f"Base video duration after initial concatenation: {video_base.duration:.2f}s")
# --- REVISED REPETITION LOGIC ---
if video_base.duration < audio_duration:
logger.info(f"Base video ({video_base.duration:.2f}s) is shorter than audio ({audio_duration:.2f}s). Repeating...")
num_full_repeats = int(audio_duration // video_base.duration)
remaining_duration = audio_duration % video_base.duration
repeated_clips_list = [video_base] * num_full_repeats
if remaining_duration > 0:
try:
remaining_clip = video_base.subclip(0, remaining_duration)
repeated_clips_list.append(remaining_clip)
logger.debug(f"Adding remaining segment: {remaining_duration:.2f}s")
except Exception as e:
logger.warning(f"Error creating subclip for remaining duration {remaining_duration:.2f}s: {str(e)}")
if repeated_clips_list:
logger.info(f"Concatenating {len(repeated_clips_list)} parts for repetition.")
video_base_repeated = concatenate_videoclips(repeated_clips_list)
logger.info(f"Duration of repeated video base: {video_base_repeated.duration:.2f}s")
try: video_base.close()
except: pass
video_base = video_base_repeated
else:
logger.error("Failed to create repeated video clips list.")
pass
if video_base.duration > audio_duration:
logger.info(f"Trimming video base ({video_base.duration:.2f}s) to match audio duration ({audio_duration:.2f}s).")
trimmed_video_base = video_base.subclip(0, audio_duration)
try: video_base.close()
except: pass
video_base = trimmed_video_base
logger.info(f"Final base video duration: {video_base.duration:.2f}s")
# 6. Handle background music
logger.info("Processing audio...")
final_audio = audio_tts
if musica_file:
try:
music_path = os.path.join(temp_dir_intermediate, "musica_bg.mp3")
shutil.copyfile(musica_file, music_path)
temp_intermediate_files.append(music_path)
logger.info(f"Background music copied to: {music_path}")
musica_audio = AudioFileClip(music_path)
logger.debug(f"Original music duration: {musica_audio.duration:.2f}s")
musica_audio_looped = loop_audio_to_length(musica_audio, video_base.duration)
logger.debug(f"Music adjusted to video duration: {musica_audio_looped.duration:.2f}s")
try: musica_audio.close()
except: pass
musica_audio = musica_audio_looped
final_audio = CompositeAudioClip([
musica_audio.volumex(0.2),
audio_tts.volumex(1.0)
])
logger.info("Audio mix completed (voice + music).")
except Exception as e:
logger.warning(f"Error processing background music: {str(e)}", exc_info=True)
final_audio = audio_tts
logger.warning("Using voice audio only due to music processing error.")
if final_audio.duration is not None and final_audio.duration > video_base.duration:
final_audio = final_audio.subclip(0, video_base.duration)
elif final_audio.duration is not None and final_audio.duration < video_base.duration:
logger.warning(f"Final audio duration ({final_audio.duration:.2f}s) is less than video base ({video_base.duration:.2f}s).")
# 7. Create final video
logger.info("Rendering final video...")
video_final = video_base.set_audio(final_audio)
output_filename = "final_video.mp4"
output_path = os.path.join(temp_dir_intermediate, output_filename)
logger.info(f"Writing final video to: {output_path}")
video_final.write_videofile(
output_path,
fps=24,
threads=4,
codec="libx264",
audio_codec="aac",
preset="medium",
logger='bar'
)
total_time = (datetime.now() - start_time).total_seconds()
logger.info(f"VIDEO PROCESS FINISHED | Output: {output_path} | Total time: {total_time:.2f}s")
return output_path
except ValueError as ve:
logger.error(f"CONTROLLED ERROR in crear_video: {str(ve)}")
raise ve
except Exception as e:
logger.critical(f"CRITICAL UNHANDLED ERROR in crear_video: {str(e)}", exc_info=True)
raise e
finally:
logger.info("Starting cleanup of intermediate temporary files...")
try:
if audio_tts is not None:
try: audio_tts.close()
except: pass
if musica_audio is not None:
try: musica_audio.close()
except: pass
if video_base is not None:
try: video_base.close()
except: pass
if video_final is not None:
try: video_final.close()
except: pass
except Exception as e:
logger.warning(f"Error during final clip closing in finally: {str(e)}")
if temp_dir_intermediate and os.path.exists(temp_dir_intermediate):
final_output_in_temp = os.path.join(temp_dir_intermediate, "final_video.mp4")
for path in temp_intermediate_files:
try:
if os.path.isfile(path) and path != final_output_in_temp:
logger.debug(f"Deleting temporary file: {path}")
os.remove(path)
except Exception as e:
logger.warning(f"Could not delete temporary file {path}: {str(e)}")
logger.info(f"Intermediate temporary directory {temp_dir_intermediate} will persist for Gradio to read the final video.")
def run_app(prompt_type, prompt_ia, prompt_manual, musica_file):
logger.info("="*80)
logger.info("REQUEST RECEIVED IN INTERFACE")
input_text = prompt_ia if prompt_type == "Generar Guion con IA" else prompt_manual
if not input_text or not input_text.strip():
logger.warning("Empty input text.")
return None, gr.update(value="⚠️ Por favor, ingresa texto para el guion o el tema.")
logger.info(f"Input Type: {prompt_type}")
logger.debug(f"Input Text: '{input_text[:100]}...'")
if musica_file:
logger.info(f"Music file received: {musica_file}")
else:
logger.info("No music file provided.")
try:
logger.info("Calling crear_video...")
video_path = crear_video(prompt_type, input_text, musica_file)
if video_path and os.path.exists(video_path):
logger.info(f"crear_video returned path: {video_path}")
logger.info(f"Size of returned video file: {os.path.getsize(video_path)} bytes")
return video_path, gr.update(value="✅ Video generado exitosamente.", interactive=False)
else:
logger.error(f"crear_video did not return a valid path or file does not exist: {video_path}")
return None, gr.update(value="❌ Error: La generación del video falló o el archivo no se creó correctamente.", interactive=False)
except ValueError as ve:
logger.warning(f"Validation error during video creation: {str(ve)}")
return None, gr.update(value=f"⚠️ Error de validación: {str(ve)}", interactive=False)
except Exception as e:
logger.critical(f"Critical error during video creation: {str(e)}", exc_info=True)
return None, gr.update(value=f"❌ Error inesperado: {str(e)}", interactive=False)
finally:
logger.info("End of run_app handler.")
# Gradio Interface
with gr.Blocks(title="Generador de Videos con IA", theme=gr.themes.Soft(), css="""
.gradio-container {max-width: 800px; margin: auto;}
h1 {text-align: center;}
""") as app:
gr.Markdown("# 🎬 Automatic AI Video Generator")
gr.Markdown("Generate short videos from a topic or script, using stock footage from Pexels and generated voice.")
with gr.Row():
with gr.Column():
prompt_type = gr.Radio(
["Generar Guion con IA", "Usar Mi Guion"],
label="Input Method",
value="Generar Guion con IA"
)
with gr.Column(visible=True) as ia_guion_column:
prompt_ia = gr.Textbox(
label="Topic for AI",
lines=2,
placeholder="Ex: A natural landscape with mountains and rivers at sunrise, showing the beauty of nature...",
max_lines=4,
value=""
)
with gr.Column(visible=False) as manual_guion_column:
prompt_manual = gr.Textbox(
label="Your Full Script",
lines=5,
placeholder="Ex: In this video, we will explore the mysteries of the ocean. We will see fascinating marine life and vibrant coral reefs. Join us on this underwater adventure!",
max_lines=10,
value=""
)
musica_input = gr.Audio(
label="Background Music (optional)",
type="filepath",
interactive=True,
value=None
)
generate_btn = gr.Button("✨ Generate Video", variant="primary")
with gr.Column():
video_output = gr.Video(
label="Generated Video",
interactive=False,
height=400
)
status_output = gr.Textbox(
label="Status",
interactive=False,
show_label=False,
placeholder="Waiting for action...",
value="Waiting for input..."
)
prompt_type.change(
lambda x: (gr.update(visible=x == "Generar Guion con IA"),
gr.update(visible=x == "Usar Mi Guion")),
inputs=prompt_type,
outputs=[ia_guion_column, manual_guion_column]
)
generate_btn.click(
lambda: (None, gr.update(value="⏳ Processing... This can take 2-5 minutes depending on length and resources.", interactive=False)),
outputs=[video_output, status_output],
queue=True,
).then(
run_app,
inputs=[prompt_type, prompt_ia, prompt_manual, musica_input],
outputs=[video_output, status_output]
)
gr.Markdown("### Instructions:")
gr.Markdown("""
1. **Pexels API Key:** Ensure you have set the `PEXELS_API_KEY` environment variable.
2. **Select Input Method**:
- "Generate Script with AI": Describe a topic (e.g., "The beauty of mountains"). AI will generate a short script.
- "Use My Script": Write the full script for your video.
3. **Upload Music** (optional): Select an audio file (MP3, WAV, etc.) for background music.
4. **Click "✨ Generate Video"**.
5. Wait for the video to process. Processing time may vary. Check the status box.
6. If there are errors, check the `video_generator_full.log` file for details.
""")
gr.Markdown("---")
gr.Markdown("Developed by [Your Name/Company/Alias - Optional]")
if __name__ == "__main__":
logger.info("Verifying critical dependencies...")
try:
from moviepy.editor import VideoFileClip
logger.info("MoviePy imported correctly. FFmpeg seems accessible.")
except Exception as e:
logger.critical(f"Failed to import MoviePy, often indicates FFmpeg issues. Ensure it is installed and in PATH. Error: {e}")
logger.info("Starting Gradio app...")
try:
app.launch(server_name="0.0.0.0", server_port=7860, share=False)
except Exception as e:
logger.critical(f"Could not launch app: {str(e)}", exc_info=True)
raise