Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -10,25 +10,47 @@ from datetime import datetime
|
|
10 |
import numpy as np
|
11 |
from sklearn.feature_extraction.text import TfidfVectorizer
|
12 |
import nltk
|
|
|
13 |
import random
|
14 |
from transformers import pipeline
|
15 |
import torch
|
16 |
import asyncio
|
|
|
|
|
|
|
|
|
17 |
|
|
|
18 |
nltk.download('punkt', quiet=True)
|
19 |
-
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(
|
20 |
logger = logging.getLogger(__name__)
|
21 |
|
|
|
22 |
PEXELS_API_KEY = os.getenv("PEXELS_API_KEY")
|
23 |
-
MODEL_NAME = "DeepESP/gpt2-spanish"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
-
# Obtener voces
|
26 |
-
|
27 |
-
return asyncio.run(edge_tts.list_voices())
|
28 |
|
29 |
-
|
30 |
-
VOICE_NAMES = [
|
|
|
|
|
|
|
31 |
|
|
|
|
|
|
|
|
|
32 |
|
33 |
def generar_guion_profesional(prompt):
|
34 |
try:
|
@@ -44,12 +66,12 @@ def generar_guion_profesional(prompt):
|
|
44 |
"2. Tres secciones detalladas con subt铆tulos\n"
|
45 |
"3. Conclusi贸n impactante\n"
|
46 |
"Usa un estilo natural para narraci贸n:",
|
47 |
-
max_length=
|
48 |
temperature=0.7,
|
49 |
top_k=50,
|
50 |
top_p=0.95,
|
51 |
num_return_sequences=1,
|
52 |
-
truncation=True
|
53 |
)
|
54 |
guion = response[0]['generated_text']
|
55 |
if len(guion.split()) < 100:
|
@@ -89,8 +111,6 @@ def generar_guion_profesional(prompt):
|
|
89 |
驴Listos para profundizar? 隆Empecemos!
|
90 |
"""
|
91 |
|
92 |
-
from nltk.tokenize import sent_tokenize
|
93 |
-
|
94 |
def buscar_videos_avanzado(prompt, guion, num_videos=5):
|
95 |
try:
|
96 |
oraciones = sent_tokenize(guion)
|
@@ -102,6 +122,7 @@ def buscar_videos_avanzado(prompt, guion, num_videos=5):
|
|
102 |
palabras_clave = [palabras[i] for i in indices_importantes]
|
103 |
palabras_prompt = re.findall(r'\b\w{4,}\b', prompt.lower())
|
104 |
todas_palabras = list(set(palabras_clave + palabras_prompt))[:5]
|
|
|
105 |
headers = {"Authorization": PEXELS_API_KEY}
|
106 |
response = requests.get(
|
107 |
f"https://api.pexels.com/videos/search?query={'+'.join(todas_palabras)}&per_page={num_videos}",
|
@@ -130,55 +151,85 @@ async def crear_video_profesional(prompt, custom_script, voz_index, musica=None)
|
|
130 |
try:
|
131 |
guion = custom_script if custom_script else generar_guion_profesional(prompt)
|
132 |
logger.info(f"Guion generado ({len(guion.split())} palabras)")
|
|
|
133 |
voz_seleccionada = VOICES[voz_index]['ShortName'] if VOICES else 'es-ES-ElviraNeural'
|
|
|
134 |
voz_archivo = "voz.mp3"
|
135 |
await edge_tts.Communicate(guion, voz_seleccionada).save(voz_archivo)
|
136 |
audio = AudioFileClip(voz_archivo)
|
137 |
duracion_total = audio.duration
|
|
|
138 |
videos_data = buscar_videos_avanzado(prompt, guion)
|
139 |
if not videos_data:
|
140 |
-
raise Exception("No se encontraron videos")
|
|
|
141 |
clips = []
|
142 |
for video in videos_data[:3]:
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
video_final = video_final.set_audio(audio)
|
152 |
-
|
153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
return output_path
|
|
|
155 |
except Exception as e:
|
156 |
-
logger.error(f"
|
157 |
return None
|
|
|
158 |
finally:
|
159 |
if voz_archivo and os.path.exists(voz_archivo):
|
160 |
os.remove(voz_archivo)
|
161 |
|
162 |
-
|
163 |
-
|
164 |
-
|
|
|
165 |
|
166 |
-
with gr.Blocks(title="Generador de Videos") as app:
|
167 |
with gr.Row():
|
168 |
-
with gr.Column():
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
with gr.Column():
|
175 |
-
output = gr.Video(label="Resultado", format="mp4")
|
176 |
-
|
177 |
-
btn.click(
|
178 |
-
fn=run_async_wrapper,
|
179 |
-
inputs=[prompt, custom_script, voz, musica],
|
180 |
-
outputs=output
|
181 |
-
)
|
182 |
-
|
183 |
-
if __name__ == "__main__":
|
184 |
-
app.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
10 |
import numpy as np
|
11 |
from sklearn.feature_extraction.text import TfidfVectorizer
|
12 |
import nltk
|
13 |
+
from nltk.tokenize import sent_tokenize
|
14 |
import random
|
15 |
from transformers import pipeline
|
16 |
import torch
|
17 |
import asyncio
|
18 |
+
import nest_asyncio
|
19 |
+
|
20 |
+
# Aplicar patch para event loop en entornos como Jupyter o Gradio
|
21 |
+
nest_asyncio.apply()
|
22 |
|
23 |
+
# Configuraci贸n inicial
|
24 |
nltk.download('punkt', quiet=True)
|
25 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
26 |
logger = logging.getLogger(__name__)
|
27 |
|
28 |
+
# Variables de configuraci贸n
|
29 |
PEXELS_API_KEY = os.getenv("PEXELS_API_KEY")
|
30 |
+
MODEL_NAME = "DeepESP/gpt2-spanish" # Modelo en espa帽ol
|
31 |
+
|
32 |
+
# Funci贸n async para obtener voces de edge-tts
|
33 |
+
async def get_voices():
|
34 |
+
try:
|
35 |
+
voices = await edge_tts.list_voices()
|
36 |
+
return voices
|
37 |
+
except Exception as e:
|
38 |
+
logger.error(f"Error obteniendo voces: {e}")
|
39 |
+
return []
|
40 |
|
41 |
+
# Obtener voces sincr贸nicamente para inicializar
|
42 |
+
VOICES = asyncio.get_event_loop().run_until_complete(get_voices())
|
|
|
43 |
|
44 |
+
# Preparar lista segura de nombres de voces
|
45 |
+
VOICE_NAMES = [
|
46 |
+
f"{v.get('Name', 'Desconocido')} ({v.get('Gender', 'Desconocido')}, {v.get('LocaleName', 'es-ES')})"
|
47 |
+
for v in VOICES
|
48 |
+
]
|
49 |
|
50 |
+
# Fallback si no se pudieron obtener voces
|
51 |
+
if not VOICES:
|
52 |
+
VOICE_NAMES = ["Voz Predeterminada (Femenino, es-ES)"]
|
53 |
+
VOICES = [{'ShortName': 'es-ES-ElviraNeural'}]
|
54 |
|
55 |
def generar_guion_profesional(prompt):
|
56 |
try:
|
|
|
66 |
"2. Tres secciones detalladas con subt铆tulos\n"
|
67 |
"3. Conclusi贸n impactante\n"
|
68 |
"Usa un estilo natural para narraci贸n:",
|
69 |
+
max_length=1500,
|
70 |
temperature=0.7,
|
71 |
top_k=50,
|
72 |
top_p=0.95,
|
73 |
num_return_sequences=1,
|
74 |
+
truncation=True # Para evitar warnings y l铆mites
|
75 |
)
|
76 |
guion = response[0]['generated_text']
|
77 |
if len(guion.split()) < 100:
|
|
|
111 |
驴Listos para profundizar? 隆Empecemos!
|
112 |
"""
|
113 |
|
|
|
|
|
114 |
def buscar_videos_avanzado(prompt, guion, num_videos=5):
|
115 |
try:
|
116 |
oraciones = sent_tokenize(guion)
|
|
|
122 |
palabras_clave = [palabras[i] for i in indices_importantes]
|
123 |
palabras_prompt = re.findall(r'\b\w{4,}\b', prompt.lower())
|
124 |
todas_palabras = list(set(palabras_clave + palabras_prompt))[:5]
|
125 |
+
|
126 |
headers = {"Authorization": PEXELS_API_KEY}
|
127 |
response = requests.get(
|
128 |
f"https://api.pexels.com/videos/search?query={'+'.join(todas_palabras)}&per_page={num_videos}",
|
|
|
151 |
try:
|
152 |
guion = custom_script if custom_script else generar_guion_profesional(prompt)
|
153 |
logger.info(f"Guion generado ({len(guion.split())} palabras)")
|
154 |
+
|
155 |
voz_seleccionada = VOICES[voz_index]['ShortName'] if VOICES else 'es-ES-ElviraNeural'
|
156 |
+
|
157 |
voz_archivo = "voz.mp3"
|
158 |
await edge_tts.Communicate(guion, voz_seleccionada).save(voz_archivo)
|
159 |
audio = AudioFileClip(voz_archivo)
|
160 |
duracion_total = audio.duration
|
161 |
+
|
162 |
videos_data = buscar_videos_avanzado(prompt, guion)
|
163 |
if not videos_data:
|
164 |
+
raise Exception("No se encontraron videos relevantes")
|
165 |
+
|
166 |
clips = []
|
167 |
for video in videos_data[:3]:
|
168 |
+
video_files = sorted(
|
169 |
+
video['video_files'],
|
170 |
+
key=lambda x: x.get('width', 0) * x.get('height', 0),
|
171 |
+
reverse=True
|
172 |
+
)
|
173 |
+
video_url = video_files[0]['link']
|
174 |
+
response = requests.get(video_url, stream=True)
|
175 |
+
temp_video = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
|
176 |
+
for chunk in response.iter_content(chunk_size=1024 * 1024):
|
177 |
+
temp_video.write(chunk)
|
178 |
+
temp_video.close()
|
179 |
+
clip = VideoFileClip(temp_video.name)
|
180 |
+
clips.append(clip)
|
181 |
+
|
182 |
+
duracion_por_clip = duracion_total / len(clips)
|
183 |
+
|
184 |
+
clips_procesados = []
|
185 |
+
for clip in clips:
|
186 |
+
if clip.duration < duracion_por_clip:
|
187 |
+
clip = clip.loop(duration=duracion_por_clip)
|
188 |
+
else:
|
189 |
+
clip = clip.subclip(0, duracion_por_clip)
|
190 |
+
clips_procesados.append(clip)
|
191 |
+
|
192 |
+
video_final = concatenate_videoclips(clips_procesados)
|
193 |
+
|
194 |
+
if musica:
|
195 |
+
musica_clip = AudioFileClip(musica.name)
|
196 |
+
if musica_clip.duration < duracion_total:
|
197 |
+
musica_clip = musica_clip.loop(duration=duracion_total)
|
198 |
+
else:
|
199 |
+
musica_clip = musica_clip.subclip(0, duracion_total)
|
200 |
+
audio = CompositeAudioClip([audio, musica_clip.volumex(0.25)])
|
201 |
+
|
202 |
video_final = video_final.set_audio(audio)
|
203 |
+
|
204 |
+
output_path = f"video_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4"
|
205 |
+
video_final.write_videofile(
|
206 |
+
output_path,
|
207 |
+
codec="libx264",
|
208 |
+
audio_codec="aac",
|
209 |
+
threads=2,
|
210 |
+
preset='fast',
|
211 |
+
fps=24
|
212 |
+
)
|
213 |
+
|
214 |
return output_path
|
215 |
+
|
216 |
except Exception as e:
|
217 |
+
logger.error(f"ERROR: {str(e)}")
|
218 |
return None
|
219 |
+
|
220 |
finally:
|
221 |
if voz_archivo and os.path.exists(voz_archivo):
|
222 |
os.remove(voz_archivo)
|
223 |
|
224 |
+
# Interfaz Gradio
|
225 |
+
|
226 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="Generador de Videos Profesional") as app:
|
227 |
+
gr.Markdown("# 馃幀 GENERADOR DE VIDEOS CON IA")
|
228 |
|
|
|
229 |
with gr.Row():
|
230 |
+
with gr.Column(scale=1):
|
231 |
+
gr.Markdown("### Configuraci贸n del Contenido")
|
232 |
+
prompt = gr.Textbox(label="Tema principal", placeholder="Ej: 'Los misterios de la antigua Grecia'")
|
233 |
+
custom_script = gr.TextArea(
|
234 |
+
label="Guion personalizado (opcional)",
|
235 |
+
placeholder="Pega aqu铆 tu propio guion completo...",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|