Spaces:
Sleeping
Sleeping
File size: 5,854 Bytes
43fcbe8 d68572a 38ff849 8337d0b fa691a5 d68572a 38ff849 d68572a 9143db2 ab6a3fb fa691a5 53ae22b d68572a 38ff849 30c3706 60e6f97 ab6a3fb fa691a5 38ff849 d68572a 9143db2 30c3706 d68572a 77ffd33 30c3706 d68572a 30c3706 9143db2 30c3706 d68572a 9143db2 30c3706 9143db2 30c3706 9143db2 30c3706 ae19496 60e6f97 30c3706 9143db2 60e6f97 9143db2 30c3706 9143db2 60e6f97 30c3706 38ff849 30c3706 07b3b3d d68572a 60e6f97 d68572a 60e6f97 d68572a 60e6f97 d68572a 30c3706 d68572a 30c3706 c7b9a72 30c3706 60e6f97 30c3706 60e6f97 d68572a 30c3706 60e6f97 d68572a 60e6f97 9143db2 30c3706 d68572a 60e6f97 9143db2 30c3706 60e6f97 30c3706 60e6f97 30c3706 60e6f97 d68572a 30c3706 60e6f97 30c3706 9143db2 38ff849 60e6f97 9143db2 60e6f97 9143db2 22e9f48 d68572a 30c3706 60e6f97 22e9f48 60e6f97 3e716f3 38ff849 d68572a 60e6f97 ab6a3fb 38ff849 fa201eb 60e6f97 38ff849 fa201eb 60e6f97 720c3d5 07b3b3d 30c3706 60e6f97 30c3706 c9d2e08 30c3706 d7f3a60 9143db2 60e6f97 9143db2 30c3706 60e6f97 8337d0b d68572a 30c3706 9e5ee0a d7f3a60 07b3b3d 60e6f97 30c3706 60e6f97 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
import os
import subprocess
import requests
import gradio as gr
from moviepy.editor import *
from datetime import datetime
import logging
import re
import torch
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import warnings
# Configuraci贸n inicial
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Suprimir warnings no deseados
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
PEXELS_API_KEY = os.getenv("PEXELS_API_KEY")
# Lista de voces v谩lidas
VOICES = [
"es-MX-DaliaNeural", "es-ES-ElviraNeural", "es-AR-ElenaNeural",
"es-MX-JorgeNeural", "es-ES-AlvaroNeural", "es-AR-TomasNeural",
"en-US-JennyNeural", "fr-FR-DeniseNeural", "de-DE-KatjaNeural"
]
# Cargar modelo GPT-2 con configuraci贸n optimizada
try:
tokenizer = GPT2Tokenizer.from_pretrained("datificate/gpt2-small-spanish")
model = GPT2LMHeadModel.from_pretrained("datificate/gpt2-small-spanish")
logger.info("Modelo GPT-2 cargado correctamente")
except Exception as e:
logger.error(f"Error cargando modelo: {str(e)}")
model = None
tokenizer = None
def generar_texto(tema):
"""Genera texto largo sobre el tema sin estructuras predefinidas"""
if model is None or tokenizer is None:
return f"Contenido sobre {tema}. " * 50
try:
# Prompt directo y simple
prompt = f"Describe detalladamente {tema}"
# Codificar el texto con truncamiento
inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
# Generar texto con par谩metros optimizados
outputs = model.generate(
inputs.input_ids,
max_length=800,
do_sample=True,
temperature=0.7,
top_k=40,
num_return_sequences=1,
pad_token_id=tokenizer.eos_token_id
)
texto = tokenizer.decode(outputs[0], skip_special_tokens=True)
return re.sub(r'\s+', ' ', texto).strip()
except Exception as e:
logger.error(f"Error generando texto: {str(e)}")
return f"Texto generado sobre {tema}. " * 50
def obtener_videos(tema):
"""Obtiene videos de Pexels con manejo robusto de errores"""
try:
headers = {"Authorization": PEXELS_API_KEY}
response = requests.get(
f"https://api.pexels.com/videos/search?query={tema}&per_page=3",
headers=headers,
timeout=10
)
return response.json().get("videos", [])[:3]
except Exception as e:
logger.error(f"Error obteniendo videos: {str(e)}")
return []
def crear_video(prompt, voz_seleccionada):
try:
# 1. Generar texto
texto = generar_texto(prompt)
logger.info(f"Texto generado: {len(texto)} caracteres")
# 2. Crear narraci贸n de voz
voz_file = "narracion.mp3"
subprocess.run([
'edge-tts',
'--voice', voz_seleccionada,
'--text', texto,
'--write-media', voz_file
], check=True)
audio = AudioFileClip(voz_file)
duracion = audio.duration
# 3. Obtener y procesar videos
videos = obtener_videos(prompt) or obtener_videos("nature")
clips = []
for i, video in enumerate(videos):
try:
# Seleccionar video de mayor calidad
video_file = max(video['video_files'], key=lambda x: x.get('width', 0))
temp_file = f"temp_{i}.mp4"
# Descargar video
with requests.get(video_file['link'], stream=True) as r:
r.raise_for_status()
with open(temp_file, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
# Procesar clip
clip = VideoFileClip(temp_file)
clip_duration = min(duracion/len(videos), clip.duration)
clips.append(clip.subclip(0, clip_duration))
except Exception as e:
logger.error(f"Error procesando video {i}: {str(e)}")
# 4. Crear video final
if not clips:
final_clip = ColorClip((1280, 720), (0, 0, 0), duration=duracion)
else:
final_clip = concatenate_videoclips(clips).set_duration(duracion)
final_clip = final_clip.set_audio(audio)
# 5. Exportar video
output_file = f"video_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4"
final_clip.write_videofile(
output_file,
fps=24,
codec="libx264",
audio_codec="aac",
threads=2,
preset='fast'
)
return output_file
except Exception as e:
logger.error(f"Error cr铆tico: {str(e)}")
return None
finally:
# Limpieza de archivos temporales
for f in [voz_file, *[f"temp_{i}.mp4" for i in range(3)]]:
if os.path.exists(f):
try:
os.remove(f)
except:
pass
# Interfaz minimalista
with gr.Blocks() as app:
with gr.Row():
with gr.Column():
tema = gr.Textbox(label="Tema del video")
voz = gr.Dropdown(label="Voz", choices=VOICES, value=VOICES[0])
btn = gr.Button("Generar Video")
with gr.Column():
video = gr.Video(label="Resultado")
btn.click(
fn=crear_video,
inputs=[tema, voz],
outputs=video
)
if __name__ == "__main__":
app.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
) |