Spaces:
Sleeping
Sleeping
File size: 5,622 Bytes
43fcbe8 77c11ae 38ff849 8337d0b fa691a5 77c11ae 38ff849 77c11ae 99b44b3 77c11ae 9143db2 99b44b3 fa691a5 77c11ae 38ff849 77c11ae 99b44b3 38ff849 99b44b3 cbfaa69 d68572a 77c11ae 99b44b3 d5141b3 99b44b3 d5141b3 99b44b3 d5141b3 99b44b3 d5141b3 99b44b3 38ff849 99b44b3 d5141b3 99b44b3 d5141b3 99b44b3 d5141b3 99b44b3 d68572a 99b44b3 c7b9a72 99b44b3 77c11ae 60e6f97 99b44b3 cbfaa69 fa201eb cbfaa69 77c11ae fa201eb cbfaa69 720c3d5 c9d2e08 cbfaa69 77c11ae cbfaa69 99b44b3 8337d0b cbfaa69 d5141b3 9e5ee0a d7f3a60 07b3b3d cbfaa69 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 |
import os
import re
import requests
import gradio as gr
from moviepy.editor import *
import edge_tts
import tempfile
import logging
from datetime import datetime
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.tokenize import sent_tokenize
from transformers import pipeline
import torch
import asyncio
# Configuraci贸n inicial
nltk.download('punkt', quiet=True)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Configuraci贸n de modelos
PEXELS_API_KEY = os.getenv("PEXELS_API_KEY")
MODEL_NAME = "DeepESP/gpt2-spanish"
# Soluci贸n robusta para obtener voces
async def get_voices():
try:
voices = await edge_tts.list_voices()
voice_names = []
for v in voices:
try:
name = v.get('Name', v.get('ShortName', 'Desconocido'))
gender = v.get('Gender', 'Desconocido')
locale = v.get('Locale', v.get('Language', 'Desconocido'))
voice_names.append(f"{name} ({gender}, {locale})")
except Exception as e:
logger.warning(f"Error procesando voz: {v} - {str(e)}")
continue
return voice_names, voices
except Exception as e:
logger.error(f"Error al obtener voces: {str(e)}")
return [], []
# Obtener voces de forma s铆ncrona para la inicializaci贸n
import nest_asyncio
nest_asyncio.apply()
VOICE_NAMES, VOICES = [], []
async def get_voices():
voces = await edge_tts.list_voices()
voice_names = [f"{v['Name']} ({v['Gender']}, {v['LocaleName']})" for v in voces]
return voice_names, voces
async def get_and_set_voices():
global VOICE_NAMES, VOICES
try:
VOICE_NAMES, VOICES = await get_voices()
if not VOICES:
raise Exception("No se obtuvieron voces.")
except Exception as e:
logging.warning(f"No se pudieron cargar voces din谩micamente: {e}")
VOICE_NAMES = ["Voz Predeterminada (Femenino, es-ES)"]
VOICES = [{'ShortName': 'es-ES-ElviraNeural'}]
asyncio.get_event_loop().run_until_complete(get_and_set_voices())
def generar_guion_profesional(prompt):
"""Genera guiones con respaldo robusto"""
try:
generator = pipeline(
"text-generation",
model=MODEL_NAME,
device=0 if torch.cuda.is_available() else -1
)
response = generator(
f"Escribe un guion profesional para un video de YouTube sobre '{prompt}':\n\n1. Introducci贸n\n2. Desarrollo\n3. Conclusi贸n\n\n",
max_length=800,
temperature=0.7,
num_return_sequences=1
)
return response[0]['generated_text']
except Exception as e:
logger.error(f"Error generando guion: {str(e)}")
return f"""Gui贸n de respaldo sobre {prompt}:
1. INTRODUCCI脫N: Hoy exploraremos {prompt}
2. DESARROLLO: Aspectos clave sobre el tema
3. CONCLUSI脫N: Resumen y cierre"""
def buscar_videos_avanzado(prompt, guion, num_videos=3):
"""B煤squeda con m煤ltiples respaldos"""
try:
palabras = re.findall(r'\b\w{4,}\b', prompt.lower())[:5]
response = requests.get(
f"https://api.pexels.com/videos/search?query={'+'.join(palabras)}&per_page={num_videos}",
headers={"Authorization": PEXELS_API_KEY},
timeout=10
)
return response.json().get('videos', [])[:num_videos]
except Exception as e:
logger.error(f"Error buscando videos: {str(e)}")
return []
async def crear_video_profesional(prompt, custom_script, voz_index, musica=None):
try:
# 1. Generar gui贸n
guion = custom_script if custom_script else generar_guion_profesional(prompt)
# 2. Configurar voz
voz_seleccionada = VOICES[voz_indimport os
import asyncio
from concurrent.futures import ThreadPoolExecutor
import gradio as gr
# Configuraci贸n CR脥TICA para evitar timeouts
GRADIO_TIMEOUT = 600 # 10 minutos (en segundos)
MAX_VIDEO_DURATION = 120 # 2 minutos (evita procesos eternos)
async def crear_video_profesional(prompt, custom_script, voz_index, musica=None):
try:
# 1. Simulamos un proceso largo (隆esto es lo que causa el timeout!)
# Reemplaza esto con tu l贸gica real de generaci贸n
await asyncio.sleep(30) # Solo para prueba
# 2. Devuelve un video de prueba (eliminar en producci贸n)
return "video_prueba.mp4"
except Exception as e:
print(f"ERROR: {str(e)}")
return None
# 馃憞 **Soluci贸n M谩gica**: Ejecuci贸n en hilos separados
def run_async_with_timeout(prompt, script, voz_index, musica=None):
with ThreadPoolExecutor() as executor:
future = executor.submit(
lambda: asyncio.run(crear_video_profesional(prompt, script, voz_index, musica))
)
return future.result(timeout=GRADIO_TIMEOUT)
# Interfaz Minimalista (para enfocarnos en el timeout)
with gr.Blocks() as app:
with gr.Row():
prompt = gr.Textbox(label="Tema")
btn = gr.Button("Generar")
output = gr.Video()
btn.click(
fn=run_async_with_timeout, # 馃憟 Usamos el wrapper anti-timeout
inputs=[prompt, gr.Textbox(visible=False), gr.Number(visible=False)],
outputs=output
)
if __name__ == "__main__":
app.launch(
server_name="0.0.0.0",
server_port=7860,
# 鈿狅笍 Configuraci贸n CLAVE para el timeout
app_kwargs={"timeout": GRADIO_TIMEOUT}
) |