Spaces:
Running
Running
import os | |
import re | |
import requests | |
import numpy as np | |
import gradio as gr | |
from datetime import datetime | |
from moviepy.editor import * | |
from transformers import pipeline, AutoTokenizer, AutoModel | |
import torch | |
import torch.nn.functional as F | |
import edge_tts | |
import tempfile | |
import logging | |
from sklearn.metrics.pairwise import cosine_similarity | |
from sklearn.feature_extraction.text import TfidfVectorizer | |
from nltk.tokenize import sent_tokenize | |
import nltk | |
# Descargar recursos para NLTK | |
nltk.download('punkt') | |
# Configuraci贸n avanzada | |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') | |
logger = logging.getLogger(__name__) | |
# Configuraci贸n de modelos | |
PEXELS_API_KEY = os.getenv("PEXELS_API_KEY") | |
HF_TOKEN = os.getenv("HF_TOKEN") # Para modelos privados | |
# 1. Modelo para generaci贸n de guiones (MBART grande para espa帽ol) | |
script_generator = pipeline( | |
"text2text-generation", | |
model="facebook/mbart-large-50", | |
tokenizer="facebook/mbart-large-50", | |
device=0 if torch.cuda.is_available() else -1 | |
) | |
# 2. Modelo para embeddings sem谩nticos (multiling眉e) | |
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/paraphrase-multilingual-mpnet-base-v2") | |
embedding_model = AutoModel.from_pretrained("sentence-transformers/paraphrase-multilingual-mpnet-base-v2") | |
# 3. Lista de voces disponibles | |
VOICES = [v for v in edge_tts.list_voices() if 'es' in v['ShortName'] or 'en' in v['ShortName']] | |
VOICE_NAMES = [f"{v['Name']} ({v['Gender']}, {v['LocaleName']})" for v in VOICES] | |
def generar_guion_avanzado(prompt): | |
"""Genera un guion largo y detallado usando IA""" | |
try: | |
response = script_generator( | |
f"Escribe un guion detallado para un video de YouTube sobre '{prompt}' con introducci贸n, 3 puntos principales y conclusi贸n. Usa un estilo atractivo y profesional.", | |
max_length=1000, | |
num_beams=5, | |
temperature=0.7, | |
top_k=50, | |
top_p=0.95, | |
do_sample=True | |
) | |
return response[0]['generated_text'] | |
except Exception as e: | |
logger.error(f"Error en generaci贸n de guion: {str(e)}") | |
# Fallback a guion predefinido | |
return f""" | |
隆Hola a todos! Hoy exploraremos el fascinante tema de {prompt}. | |
En este video cubriremos tres aspectos clave: | |
1. Primer aspecto importante sobre {prompt} | |
2. Segundo elemento crucial | |
3. Tercer punto que no te puedes perder | |
隆Quedaos hasta el final para descubrir algo incre铆ble! | |
""" | |
def obtener_embeddings(textos): | |
"""Obtiene embeddings sem谩nticos para los textos""" | |
inputs = tokenizer(textos, padding=True, truncation=True, return_tensors="pt", max_length=512) | |
with torch.no_grad(): | |
outputs = embedding_model(**inputs) | |
embeddings = outputs.last_hidden_state.mean(dim=1).cpu().numpy() | |
return embeddings | |
def buscar_videos_semanticos(query, guion, num_videos=5): | |
"""Busca videos usando an谩lisis sem谩ntico""" | |
try: | |
# Dividir el guion en oraciones | |
oraciones = sent_tokenize(guion) | |
# Obtener embeddings para cada oraci贸n | |
embeddings_oraciones = obtener_embeddings(oraciones) | |
# Embedding para la consulta general | |
embedding_query = obtener_embeddings([query])[0] | |
# Calcular similitud entre consulta y cada oraci贸n | |
similitudes = cosine_similarity([embedding_query], embeddings_oraciones)[0] | |
# Seleccionar las oraciones m谩s relevantes | |
indices_relevantes = np.argsort(similitudes)[-3:] | |
oraciones_relevantes = [oraciones[i] for i in indices_relevantes] | |
# Extraer palabras clave de las oraciones relevantes | |
vectorizer = TfidfVectorizer(stop_words=['el', 'la', 'los', 'las', 'de', 'en', 'y']) | |
tfidf = vectorizer.fit_transform(oraciones_relevantes) | |
palabras = vectorizer.get_feature_names_out() | |
scores = np.asarray(tfidf.sum(axis=0)).ravel() | |
indices_importantes = np.argsort(scores)[-5:] | |
palabras_clave = [palabras[i] for i in indices_importantes] | |
# Realizar b煤squeda en Pexels | |
headers = {"Authorization": PEXELS_API_KEY} | |
response = requests.get( | |
f"https://api.pexels.com/videos/search?query={'+'.join(palabras_clave)}&per_page={num_videos}", | |
headers=headers, | |
timeout=20 | |
) | |
videos = response.json().get('videos', []) | |
logger.info(f"Encontrados {len(videos)} videos para palabras clave: {palabras_clave}") | |
# Seleccionar los mejores videos por calidad | |
videos_ordenados = sorted( | |
videos, | |
key=lambda x: x.get('width', 0) * x.get('height', 0), | |
reverse=True | |
) | |
return videos_ordenados[:num_videos] | |
except Exception as e: | |
logger.error(f"Error en b煤squeda sem谩ntica: {str(e)}") | |
# Fallback a b煤squeda simple | |
response = requests.get( | |
f"https://api.pexels.com/videos/search?query={query}&per_page={num_videos}", | |
headers={"Authorization": PEXELS_API_KEY}, | |
timeout=10 | |
) | |
return response.json().get('videos', [])[:num_videos] | |
def crear_video_inteligente(prompt, custom_script, voz_index, musica=None): | |
try: | |
# 1. Generar o usar guion | |
guion = custom_script if custom_script else generar_guion_avanzado(prompt) | |
logger.info(f"Guion generado:\n{guion}") | |
# 2. Seleccionar voz | |
voz_seleccionada = VOICES[voz_index]['ShortName'] | |
# 3. Generar archivo de voz | |
voz_archivo = "voz.mp3" | |
communicate = edge_tts.Communicate(guion, voz_seleccionada) | |
communicate.save(voz_archivo) | |
# 4. Buscar videos usando an谩lisis sem谩ntico | |
videos_data = buscar_videos_semanticos(prompt, guion, num_videos=5) | |
if not videos_data: | |
raise Exception("No se encontraron videos relevantes") | |
# 5. Descargar y preparar videos | |
clips = [] | |
for video in videos_data: | |
# Seleccionar la mejor calidad de video | |
video_files = sorted( | |
video['video_files'], | |
key=lambda x: x.get('width', 0) * x.get('height', 0), | |
reverse=True | |
) | |
video_url = video_files[0]['link'] | |
# Descargar video | |
response = requests.get(video_url, stream=True) | |
temp_video = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') | |
for chunk in response.iter_content(chunk_size=1024*1024): | |
temp_video.write(chunk) | |
temp_video.close() | |
# Crear clip | |
clip = VideoFileClip(temp_video.name) | |
clips.append(clip) | |
# 6. Procesar audio | |
audio = AudioFileClip(voz_archivo) | |
total_duration = audio.duration | |
if musica: | |
musica_clip = AudioFileClip(musica.name) | |
if musica_clip.duration < total_duration: | |
musica_clip = musica_clip.loop(duration=total_duration) | |
audio = CompositeAudioClip([audio, musica_clip.volumex(0.25)]) | |
# 7. Crear video con sincronizaci贸n inteligente | |
# Calcular duraci贸n por clip | |
clip_durations = [c.duration for c in clips] | |
total_clip_duration = sum(clip_durations) | |
# Ajustar clips para que coincidan con la duraci贸n del audio | |
if total_clip_duration < total_duration: | |
# Repetir la secuencia de videos si es necesario | |
repetitions = int(total_duration / total_clip_duration) + 1 | |
extended_clips = clips * repetitions | |
final_clip = concatenate_videoclips(extended_clips).subclip(0, total_duration) | |
else: | |
# Ajustar velocidad para coincidir con la duraci贸n | |
speed_factor = total_clip_duration / total_duration | |
adjusted_clips = [clip.fx(vfx.speedx, speed_factor) for clip in clips] | |
final_clip = concatenate_videoclips(adjusted_clips) | |
final_clip = final_clip.set_audio(audio) | |
# 8. Guardar video final | |
output_path = f"video_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4" | |
final_clip.write_videofile( | |
output_path, | |
codec="libx264", | |
audio_codec="aac", | |
threads=4, | |
preset='medium', | |
fps=24 | |
) | |
return output_path | |
except Exception as e: | |
logger.error(f"ERROR: {str(e)}") | |
return None | |
finally: | |
# Limpieza | |
if os.path.exists(voz_archivo): | |
os.remove(voz_archivo) | |
# Interfaz profesional | |
with gr.Blocks(theme=gr.themes.Soft(), title="Generador de Videos con IA") as app: | |
gr.Markdown("# 馃幀 GENERADOR AVANZADO DE VIDEOS CON IA") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
gr.Markdown("### Configuraci贸n del Contenido") | |
prompt = gr.Textbox(label="Tema principal", placeholder="Ej: 'Los misterios del universo'") | |
custom_script = gr.TextArea( | |
label="Guion personalizado (opcional)", | |
placeholder="O escribe tu propio guion aqu铆...", | |
lines=8 | |
) | |
voz = gr.Dropdown( | |
label="Selecciona una voz profesional", | |
choices=VOICE_NAMES, | |
value=VOICE_NAMES[0], | |
type="index" | |
) | |
musica = gr.File( | |
label="M煤sica de fondo profesional (opcional)", | |
file_types=["audio"], | |
type="filepath" | |
) | |
btn = gr.Button("馃殌 Generar Video Profesional", variant="primary", size="lg") | |
with gr.Column(scale=2): | |
output = gr.Video( | |
label="Video Resultante", | |
format="mp4", | |
interactive=False, | |
elem_id="video-output" | |
) | |
with gr.Accordion("Detalles t茅cnicos", open=False): | |
gr.Markdown(""" | |
**Tecnolog铆as utilizadas:** | |
- Generaci贸n de guiones: Meta MBART-large-50 | |
- B煤squeda sem谩ntica: Sentence Transformers multiling眉e | |
- S铆ntesis de voz: Microsoft Edge TTS | |
- Procesamiento de video: MoviePy | |
""") | |
# Ejemplos profesionales | |
gr.Examples( | |
examples=[ | |
["Los secretos de la inteligencia artificial", "", 0, None], | |
["Lugares hist贸ricos de Europa", "", 3, None], | |
["Innovaciones tecnol贸gicas del futuro", "", 5, None] | |
], | |
inputs=[prompt, custom_script, voz, musica], | |
label="Ejemplos profesionales" | |
) | |
btn.click( | |
fn=crear_video_inteligente, | |
inputs=[prompt, custom_script, voz, musica], | |
outputs=output | |
) | |
# CSS para mejor visualizaci贸n | |
app.css = """ | |
#video-output { | |
border-radius: 12px; | |
box-shadow: 0 6px 16px rgba(0,0,0,0.15); | |
margin: 20px auto; | |
max-width: 100%; | |
} | |
""" | |
if __name__ == "__main__": | |
app.launch(server_name="0.0.0.0", server_port=7860) |