Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,105 +1,87 @@
|
|
1 |
-
import os
|
2 |
import gradio as gr
|
3 |
-
import requests
|
4 |
-
import tempfile
|
5 |
-
import asyncio
|
6 |
import edge_tts
|
7 |
-
|
8 |
-
|
9 |
-
VideoFileClip, AudioFileClip, concatenate_videoclips,
|
10 |
-
CompositeAudioClip, afx
|
11 |
-
)
|
12 |
-
from transformers import pipeline
|
13 |
import logging
|
14 |
-
import
|
|
|
|
|
|
|
15 |
|
16 |
-
|
|
|
17 |
|
18 |
-
|
19 |
-
|
20 |
|
21 |
-
#
|
22 |
-
|
23 |
|
24 |
-
#
|
25 |
-
|
26 |
-
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
]
|
38 |
|
39 |
-
#
|
40 |
-
|
41 |
-
|
42 |
|
43 |
-
#
|
44 |
-
|
45 |
try:
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
logger.info("Convirtiendo texto en voz...")
|
50 |
-
voz_id = voz_str.split(" ")[0]
|
51 |
-
short_name = VOICE_MAP.get(voz_id, "es-ES-ElviraNeural")
|
52 |
-
voz_path = "voz.mp3"
|
53 |
-
await edge_tts.Communicate(text=texto, voice=short_name).save(voz_path)
|
54 |
-
voz_clip = AudioFileClip(voz_path)
|
55 |
-
|
56 |
-
logger.info("Descargando clips de video...")
|
57 |
-
video_urls = buscar_videos_mock()
|
58 |
-
clips = []
|
59 |
-
for url in video_urls:
|
60 |
-
r = requests.get(url, stream=True)
|
61 |
-
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as f:
|
62 |
-
for chunk in r.iter_content(1024 * 1024):
|
63 |
-
f.write(chunk)
|
64 |
-
f.flush()
|
65 |
-
clip = VideoFileClip(f.name).subclip(0, 5)
|
66 |
-
clips.append(clip)
|
67 |
-
|
68 |
-
video = concatenate_videoclips(clips).set_audio(voz_clip)
|
69 |
-
|
70 |
-
logger.info("Descargando música de fondo...")
|
71 |
-
music_url = buscar_musica_mock()
|
72 |
-
r = requests.get(music_url, stream=True)
|
73 |
-
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as f:
|
74 |
-
for chunk in r.iter_content(1024 * 1024):
|
75 |
-
f.write(chunk)
|
76 |
-
f.flush()
|
77 |
-
music_clip = AudioFileClip(f.name)
|
78 |
-
music_loop = afx.audio_loop(music_clip, duration=video.duration).volumex(0.3)
|
79 |
-
|
80 |
-
logger.info("Combinando audio de voz y música...")
|
81 |
-
audio_final = CompositeAudioClip([video.audio, music_loop])
|
82 |
-
video = video.set_audio(audio_final)
|
83 |
-
|
84 |
-
output_path = f"video_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4"
|
85 |
-
logger.info("Renderizando video final...")
|
86 |
-
video.write_videofile(output_path, fps=24, logger=logger)
|
87 |
-
|
88 |
-
return output_path
|
89 |
except Exception as e:
|
90 |
-
|
91 |
-
return
|
92 |
-
|
93 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
with gr.Blocks() as app:
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
-
|
101 |
-
inputs=[prompt, voz],
|
102 |
-
outputs=output)
|
103 |
|
104 |
-
|
105 |
-
|
|
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
2 |
import edge_tts
|
3 |
+
import asyncio
|
4 |
+
import os
|
|
|
|
|
|
|
|
|
5 |
import logging
|
6 |
+
import torch
|
7 |
+
from transformers import pipeline, set_seed
|
8 |
+
from moviepy.editor import *
|
9 |
+
from dotenv import load_dotenv
|
10 |
|
11 |
+
# Configurar logs visibles en Hugging Face
|
12 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
|
13 |
|
14 |
+
# Cargar variables de entorno si hay .env
|
15 |
+
load_dotenv()
|
16 |
|
17 |
+
# Verificar si CUDA está disponible
|
18 |
+
device = 0 if torch.cuda.is_available() else -1
|
19 |
|
20 |
+
# Inicializar generador de texto
|
21 |
+
generator = pipeline("text-generation", model="gpt2", device=device)
|
22 |
+
set_seed(42)
|
23 |
|
24 |
+
# Asíncrono: convertir texto a voz con edge-tts
|
25 |
+
async def text_to_speech(text, output_path, voice="es-MX-DaliaNeural"):
|
26 |
+
tts = edge_tts.Communicate(text=text, voice=voice)
|
27 |
+
await tts.save(output_path)
|
28 |
|
29 |
+
def generate_video(prompt, background_music_path="musica.mp3"):
|
30 |
+
logging.info("🚀 Generando guion con IA...")
|
31 |
+
result = generator(prompt, max_length=500, do_sample=True, truncation=True)
|
32 |
+
script = result[0]['generated_text']
|
33 |
+
logging.info("🗣 Guion generado.")
|
|
|
34 |
|
35 |
+
# Guardar guion a texto plano
|
36 |
+
with open("guion.txt", "w") as f:
|
37 |
+
f.write(script)
|
38 |
|
39 |
+
# Convertir texto a voz (bloqueo controlado)
|
40 |
+
output_audio = "voz.mp3"
|
41 |
try:
|
42 |
+
asyncio.run(text_to_speech(script, output_audio))
|
43 |
+
logging.info("🎤 Voz generada.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
except Exception as e:
|
45 |
+
logging.error(f"❌ Error generando voz: {e}")
|
46 |
+
return None, script
|
47 |
+
|
48 |
+
# Cargar clip de voz
|
49 |
+
voice_clip = AudioFileClip(output_audio)
|
50 |
+
duration = voice_clip.duration
|
51 |
+
|
52 |
+
# Video negro (fondo) + voz
|
53 |
+
video = ColorClip(size=(1280, 720), color=(0, 0, 0), duration=duration)
|
54 |
+
|
55 |
+
# Música en loop si es más corta que la voz
|
56 |
+
if os.path.exists(background_music_path):
|
57 |
+
music = AudioFileClip(background_music_path)
|
58 |
+
if music.duration < duration:
|
59 |
+
loops = int(duration // music.duration) + 1
|
60 |
+
music = concatenate_audioclips([music] * loops)
|
61 |
+
music = music.subclip(0, duration)
|
62 |
+
final_audio = CompositeAudioClip([music.volumex(0.2), voice_clip])
|
63 |
+
else:
|
64 |
+
final_audio = voice_clip
|
65 |
+
|
66 |
+
video = video.set_audio(final_audio)
|
67 |
+
output_path = "video_generado.mp4"
|
68 |
+
video.write_videofile(output_path, fps=24, codec="libx264", audio_codec="aac")
|
69 |
+
|
70 |
+
return output_path, script
|
71 |
+
|
72 |
+
# Interfaz de Gradio
|
73 |
with gr.Blocks() as app:
|
74 |
+
gr.Markdown("# 🎬 Generador de video IA + Voz + Música")
|
75 |
+
prompt = gr.Textbox(label="Prompt del guion")
|
76 |
+
boton = gr.Button("Generar video")
|
77 |
+
salida_video = gr.Video()
|
78 |
+
salida_texto = gr.Textbox(label="Guion generado")
|
79 |
+
|
80 |
+
def ejecutar(prompt):
|
81 |
+
video, script = generate_video(prompt)
|
82 |
+
return video, script
|
83 |
|
84 |
+
boton.click(ejecutar, inputs=prompt, outputs=[salida_video, salida_texto])
|
|
|
|
|
85 |
|
86 |
+
# Lanzar app
|
87 |
+
app.launch(debug=True)
|