|
|
|
|
|
|
|
|
|
import gradio as gr |
|
import torch |
|
import os |
|
import yaml |
|
from PIL import Image |
|
import shutil |
|
import gc |
|
import subprocess |
|
import google.generativeai as genai |
|
import numpy as np |
|
import imageio |
|
from pathlib import Path |
|
import huggingface_hub |
|
import json |
|
|
|
from inference import create_ltx_video_pipeline, load_image_to_tensor_with_resize_and_crop, calculate_padding, ConditioningItem |
|
from dreamo_helpers import dreamo_generator_singleton |
|
import ltx_video.pipelines.crf_compressor as crf_compressor |
|
|
|
|
|
config_file_path = "configs/ltxv-13b-0.9.8-distilled.yaml" |
|
with open(config_file_path, "r") as file: PIPELINE_CONFIG_YAML = yaml.safe_load(file) |
|
|
|
LTX_REPO = "Lightricks/LTX-Video" |
|
models_dir = "downloaded_models_gradio_cpu_init" |
|
Path(models_dir).mkdir(parents=True, exist_ok=True) |
|
WORKSPACE_DIR = "aduc_workspace" |
|
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY") |
|
|
|
VIDEO_FPS = 30 |
|
VIDEO_DURATION_SECONDS = 3 |
|
VIDEO_TOTAL_FRAMES = VIDEO_DURATION_SECONDS * VIDEO_FPS |
|
CONVERGENCE_FRAMES = 8 |
|
MAX_REFS = 5 |
|
|
|
print("Baixando e criando pipelines LTX na CPU...") |
|
distilled_model_actual_path = huggingface_hub.hf_hub_download(repo_id=LTX_REPO, filename=PIPELINE_CONFIG_YAML["checkpoint_path"], local_dir=models_dir, local_dir_use_symlinks=False) |
|
pipeline_instance = create_ltx_video_pipeline(ckpt_path=distilled_model_actual_path, precision=PIPELINE_CONFIG_YAML["precision"], text_encoder_model_name_or_path=PIPELINE_CONFIG_YAML["text_encoder_model_name_or_path"], sampler=PIPELINE_CONFIG_YAML["sampler"], device='cpu') |
|
print("Modelos LTX prontos (na CPU).") |
|
|
|
|
|
|
|
|
|
def get_static_scenes_storyboard(num_fragments: int, prompt: str, initial_image_path: str, progress=gr.Progress()): |
|
progress(0.5, desc="[Fotógrafo Gemini] Descrevendo as cenas estáticas...") |
|
if not initial_image_path: raise gr.Error("Por favor, forneça uma imagem de referência inicial.") |
|
if not GEMINI_API_KEY: raise gr.Error("Chave da API Gemini não configurada!") |
|
genai.configure(api_key=GEMINI_API_KEY) |
|
|
|
prompt_file = "prompts/photographer_prompt.txt" |
|
try: |
|
script_dir = os.path.dirname(os.path.abspath(__file__)) |
|
prompt_file_path = os.path.join(script_dir, prompt_file) |
|
with open(prompt_file_path, "r", encoding="utf-8") as f: template = f.read() |
|
except FileNotFoundError: raise gr.Error(f"Arquivo de prompt '{prompt_file}' não encontrado!") |
|
|
|
director_prompt = template.format(user_prompt=prompt, num_fragments=int(num_fragments)) |
|
model = genai.GenerativeModel('gemini-2.0-flash') |
|
img = Image.open(initial_image_path) |
|
response = model.generate_content([director_prompt, img]) |
|
|
|
try: |
|
cleaned_response = response.text.strip().replace("```json", "").replace("```", "") |
|
if not cleaned_response: raise ValueError("A resposta do Gemini estava vazia.") |
|
storyboard_data = json.loads(cleaned_response) |
|
return storyboard_data.get("scene_storyboard", []) |
|
except (json.JSONDecodeError, ValueError) as e: |
|
raise gr.Error(f"O Fotógrafo retornou uma resposta inválida. Erro: {e}. Resposta Bruta: '{response.text}'") |
|
|
|
def get_motion_storyboard(user_prompt: str, keyframe_image_paths: list, progress=gr.Progress()): |
|
progress(0.5, desc="[Diretor Gemini] Criando o roteiro de movimento...") |
|
if not keyframe_image_paths: raise gr.Error("Nenhuma imagem-chave fornecida para o diretor de cena.") |
|
if not GEMINI_API_KEY: raise gr.Error("Chave da API Gemini não configurada!") |
|
genai.configure(api_key=GEMINI_API_KEY) |
|
|
|
prompt_file = "prompts/director_motion_prompt.txt" |
|
try: |
|
script_dir = os.path.dirname(os.path.abspath(__file__)) |
|
prompt_file_path = os.path.join(script_dir, prompt_file) |
|
with open(prompt_file_path, "r", encoding="utf-8") as f: template = f.read() |
|
except FileNotFoundError: raise gr.Error(f"Arquivo de prompt '{prompt_file}' não encontrado!") |
|
|
|
director_prompt = template.format(user_prompt=user_prompt, num_fragments=len(keyframe_image_paths)) |
|
|
|
model_contents = [director_prompt] |
|
for img_path in keyframe_image_paths: |
|
img = Image.open(img_path) |
|
model_contents.append(img) |
|
|
|
model = genai.GenerativeModel('gemini-2.0-flash') |
|
response = model.generate_content(model_contents) |
|
|
|
try: |
|
cleaned_response = response.text.strip().replace("```json", "").replace("```", "") |
|
if not cleaned_response: raise ValueError("A resposta do Gemini estava vazia.") |
|
storyboard_data = json.loads(cleaned_response) |
|
return storyboard_data.get("motion_storyboard", []) |
|
except (json.JSONDecodeError, ValueError) as e: |
|
raise gr.Error(f"O Diretor de Cena retornou uma resposta inválida. Erro: {e}. Resposta Bruta: '{response.text}'") |
|
|
|
def run_sequential_keyframe_generation(storyboard, initial_ref_image_path, *reference_args): |
|
if not storyboard: raise gr.Error("Nenhum roteiro para gerar imagens-chave.") |
|
if not initial_ref_image_path: raise gr.Error("A imagem de referência inicial é obrigatória.") |
|
|
|
ref_paths = reference_args[:MAX_REFS] |
|
ref_tasks = reference_args[MAX_REFS:] |
|
|
|
with Image.open(initial_ref_image_path) as img: |
|
width, height = img.size |
|
width, height = (width // 32) * 32, (height // 32) * 32 |
|
|
|
keyframe_paths, log_history = [], "" |
|
current_ref_image_path = initial_ref_image_path |
|
|
|
try: |
|
dreamo_generator_singleton.to_gpu() |
|
for i, prompt in enumerate(storyboard): |
|
log_history += f"Pintando Cena Sequencial {i+1}/{len(storyboard)}...\n" |
|
yield {keyframe_log_output: gr.update(value=log_history), keyframe_gallery_output: gr.update(value=keyframe_paths)} |
|
|
|
reference_items_for_dreamo = [] |
|
|
|
reference_items_for_dreamo.append({ |
|
'image_np': np.array(Image.open(current_ref_image_path).convert("RGB")), |
|
'task': ref_tasks[0] |
|
}) |
|
|
|
for j in range(1, MAX_REFS): |
|
if ref_paths[j]: |
|
reference_items_for_dreamo.append({ |
|
'image_np': np.array(Image.open(ref_paths[j]).convert("RGB")), |
|
'task': ref_tasks[j] |
|
}) |
|
|
|
output_path = os.path.join(WORKSPACE_DIR, f"keyframe_image_{i+1}.png") |
|
image = dreamo_generator_singleton.generate_image_with_gpu_management( |
|
reference_items=reference_items_for_dreamo, |
|
prompt=prompt, |
|
width=width, |
|
height=height |
|
) |
|
image.save(output_path) |
|
keyframe_paths.append(output_path) |
|
current_ref_image_path = output_path |
|
|
|
log_history += f"Cena {i+1} pintada. A próxima cena usará '{os.path.basename(output_path)}' como referência.\n" |
|
yield { |
|
keyframe_log_output: gr.update(value=log_history), |
|
keyframe_gallery_output: gr.update(value=keyframe_paths), |
|
keyframe_images_state: keyframe_paths, |
|
ref_image_inputs[0]: gr.update(value=current_ref_image_path) |
|
} |
|
finally: |
|
dreamo_generator_singleton.to_cpu() |
|
log_history += "\nPintura sequencial de todas as cenas concluída!" |
|
yield {keyframe_log_output: gr.update(value=log_history)} |
|
|
|
def extract_final_frames_video(input_video_path: str, output_video_path: str, num_frames: int): |
|
if not os.path.exists(input_video_path): raise gr.Error(f"Erro Interno: Vídeo de entrada para extração não encontrado: {input_video_path}") |
|
try: |
|
command_probe = f"ffprobe -v error -select_streams v:0 -count_frames -show_entries stream=nb_read_frames -of default=noprint_wrappers=1:nokey=1 \"{input_video_path}\"" |
|
result_probe = subprocess.run(command_probe, shell=True, check=True, capture_output=True, text=True) |
|
total_frames = int(result_probe.stdout.strip()) |
|
start_frame_index = total_frames - num_frames |
|
if start_frame_index < 0: |
|
print(f"Aviso: O vídeo tem menos de {num_frames} frames. Usando o vídeo inteiro como convergência.") |
|
shutil.copyfile(input_video_path, output_video_path) |
|
return output_video_path |
|
command_extract = f"ffmpeg -y -i \"{input_video_path}\" -vf \"select='gte(n,{start_frame_index})'\" -c:v libx264 -preset ultrafast -an \"{output_video_path}\"" |
|
subprocess.run(command_extract, shell=True, check=True, capture_output=True, text=True) |
|
return output_video_path |
|
except (subprocess.CalledProcessError, ValueError) as e: |
|
error_message = f"FFmpeg/FFprobe falhou ao extrair os frames finais: {e}" |
|
if hasattr(e, 'stderr'): error_message += f"\nDetalhes: {e.stderr}" |
|
raise gr.Error(error_message) |
|
|
|
def load_conditioning_tensor(media_path: str, height: int, width: int) -> torch.Tensor: |
|
if media_path.lower().endswith(('.png', '.jpg', '.jpeg')): |
|
return load_image_to_tensor_with_resize_and_crop(media_path, height, width) |
|
elif media_path.lower().endswith('.mp4'): |
|
try: |
|
with imageio.get_reader(media_path) as reader: |
|
first_frame = reader.get_data(0) |
|
image = Image.fromarray(first_frame).convert("RGB").resize((width, height)) |
|
image = np.array(image) |
|
frame_tensor = torch.from_numpy(image).float() |
|
frame_tensor = crf_compressor.compress(frame_tensor / 255.0) * 255.0 |
|
frame_tensor = frame_tensor.permute(2, 0, 1) |
|
frame_tensor = (frame_tensor / 127.5) - 1.0 |
|
return frame_tensor.unsqueeze(0).unsqueeze(2) |
|
except Exception as e: |
|
raise gr.Error(f"Falha ao ler o primeiro frame do vídeo de convergência '{media_path}': {e}") |
|
else: |
|
raise gr.Error(f"Formato de arquivo de condicionamento não suportado: {media_path}") |
|
|
|
def run_ltx_animation(current_fragment_index, motion_prompt, conditioning_items_data, width, height, seed, cfg, progress=gr.Progress()): |
|
progress(0, desc=f"[Animador LTX] Gerando Cena {current_fragment_index}...") |
|
output_path = os.path.join(WORKSPACE_DIR, f"fragment_{current_fragment_index}.mp4") |
|
target_device = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
try: |
|
pipeline_instance.to(target_device) |
|
conditioning_items = [] |
|
for (path, start_frame, strength) in conditioning_items_data: |
|
tensor = load_conditioning_tensor(path, height, width) |
|
conditioning_items.append(ConditioningItem(tensor.to(target_device), start_frame, strength)) |
|
|
|
n_val = round((float(VIDEO_TOTAL_FRAMES) - 1.0) / 8.0) |
|
actual_num_frames = int(n_val * 8 + 1) |
|
padded_h, padded_w = ((height - 1) // 32 + 1) * 32, ((width - 1) // 32 + 1) * 32 |
|
padding_vals = calculate_padding(height, width, padded_h, padded_w) |
|
for cond_item in conditioning_items: cond_item.media_item = torch.nn.functional.pad(cond_item.media_item, padding_vals) |
|
timesteps = PIPELINE_CONFIG_YAML.get("first_pass", {}).get("timesteps") |
|
kwargs = {"prompt": motion_prompt, "negative_prompt": "blurry, distorted, bad quality, artifacts", "height": padded_h, "width": padded_w, "num_frames": actual_num_frames, "frame_rate": VIDEO_FPS, "generator": torch.Generator(device=target_device).manual_seed(int(seed) + current_fragment_index), "output_type": "pt", "guidance_scale": float(cfg), "timesteps": timesteps, "conditioning_items": conditioning_items, "vae_per_channel_normalize": True, "decode_timestep": PIPELINE_CONFIG_YAML["decode_timestep"], "decode_noise_scale": PIPELINE_CONFIG_YAML["decode_noise_scale"], "stochastic_sampling": PIPELINE_CONFIG_YAML["stochastic_sampling"], "image_cond_noise_scale": 0.15, "is_video": True, "mixed_precision": (PIPELINE_CONFIG_YAML["precision"] == "mixed_precision"), "offload_to_cpu": False, "enhance_prompt": False} |
|
result_tensor = pipeline_instance(**kwargs).images |
|
pad_l, pad_r, pad_t, pad_b = padding_vals |
|
slice_h, slice_w = (-pad_b if pad_b > 0 else None), (-pad_r if pad_r > 0 else None) |
|
cropped_tensor = result_tensor[:, :, :VIDEO_TOTAL_FRAMES, pad_t:slice_h, pad_l:slice_w] |
|
video_np = (cropped_tensor[0].permute(1, 2, 3, 0).cpu().float().numpy() * 255).astype(np.uint8) |
|
with imageio.get_writer(output_path, fps=VIDEO_FPS, codec='libx264', quality=8) as writer: |
|
for i, frame in enumerate(video_np): progress(i / len(video_np), desc=f"Renderizando frame {i+1}/{len(video_np)}..."); writer.append_data(frame) |
|
return output_path |
|
finally: |
|
pipeline_instance.to('cpu'); gc.collect(); torch.cuda.empty_cache() |
|
|
|
def run_full_video_production(prompt_geral, keyframe_image_paths, seed, cfg): |
|
if not keyframe_image_paths: raise gr.Error("Imagens-chave estão faltando.") |
|
|
|
log_history = "Iniciando Etapa 3: Geração do Roteiro de Movimento...\n" |
|
yield {video_production_log_output: gr.update(value=log_history)} |
|
motion_storyboard = get_motion_storyboard(prompt_geral, keyframe_image_paths) |
|
if not motion_storyboard or len(motion_storyboard) != len(keyframe_image_paths): |
|
raise gr.Error("Falha ao gerar o roteiro de movimento ou o número de prompts não corresponde ao número de imagens.") |
|
log_history += "Roteiro de movimento gerado com sucesso.\n\nIniciando Etapa 4: Produção dos Vídeos com Convergência Física...\n" |
|
yield {video_production_log_output: gr.update(value=log_history)} |
|
|
|
with Image.open(keyframe_image_paths[0]) as img: width, height = img.size |
|
|
|
video_fragments = [] |
|
num_keyframes = len(keyframe_image_paths) |
|
n_val = round((float(VIDEO_TOTAL_FRAMES) - 1.0) / 8.0) |
|
actual_num_frames = int(n_val * 8 + 1) |
|
end_frame_index = actual_num_frames - 1 |
|
|
|
previous_media_path = keyframe_image_paths[0] |
|
|
|
for i in range(num_keyframes): |
|
current_motion_prompt = motion_storyboard[i] |
|
|
|
log_message = f"\n--- Preparando Fragmento {i+1}/{num_keyframes} ---\n" |
|
log_message += f"Motor de partida (convergência): {os.path.basename(previous_media_path)}\n" |
|
log_history += log_message |
|
yield {video_production_log_output: gr.update(value=log_history)} |
|
|
|
start_media_path = previous_media_path |
|
|
|
if i < num_keyframes - 1: |
|
end_image_path = keyframe_image_paths[i+1] |
|
conditioning_items_data = [(start_media_path, 0, 1.0), (end_image_path, end_frame_index, 1.0)] |
|
log_message = f"Ponto final (alvo): {os.path.basename(end_image_path)}\n" |
|
else: |
|
conditioning_items_data = [(start_media_path, 0, 1.0)] |
|
log_message = "Animação final livre (sem ponto final definido).\n" |
|
|
|
log_history += log_message |
|
yield {video_production_log_output: gr.update(value=log_history)} |
|
|
|
full_fragment_path = run_ltx_animation(i + 1, current_motion_prompt, conditioning_items_data, width, height, seed, cfg) |
|
video_fragments.append(full_fragment_path) |
|
|
|
log_message = f"Fragmento {i+1} concluído: {os.path.basename(full_fragment_path)}\n" |
|
log_history += log_message |
|
yield { |
|
video_production_log_output: gr.update(value=log_history), |
|
fragment_gallery_output: gr.update(value=video_fragments), |
|
fragment_list_state: video_fragments, |
|
final_fragments_display: gr.update(value=video_fragments) |
|
} |
|
|
|
if i < num_keyframes - 1: |
|
convergence_video_path = os.path.join(WORKSPACE_DIR, f"convergence_clip_{i+1}.mp4") |
|
log_message = f"Extraindo {CONVERGENCE_FRAMES} frames de convergência para a próxima etapa...\n" |
|
log_history += log_message |
|
yield {video_production_log_output: gr.update(value=log_history)} |
|
extract_final_frames_video(full_fragment_path, convergence_video_path, CONVERGENCE_FRAMES) |
|
previous_media_path = convergence_video_path |
|
|
|
log_history += "\nProdução de todas as cenas de vídeo concluída!" |
|
yield {video_production_log_output: gr.update(value=log_history)} |
|
|
|
def concatenate_masterpiece(fragment_paths: list, progress=gr.Progress()): |
|
progress(0.5, desc="Montando a obra-prima final...") |
|
list_file_path = os.path.join(WORKSPACE_DIR, "concat_list.txt") |
|
final_output_path = os.path.join(WORKSPACE_DIR, "obra_prima_final.mp4") |
|
with open(list_file_path, "w") as f: |
|
for path in fragment_paths: f.write(f"file '{os.path.abspath(path)}'\n") |
|
command = f"ffmpeg -y -f concat -safe 0 -i {list_file_path} -c copy {final_output_path}" |
|
try: |
|
subprocess.run(command, shell=True, check=True, capture_output=True, text=True) |
|
return final_output_path |
|
except subprocess.CalledProcessError as e: |
|
raise gr.Error(f"FFmpeg falhou ao unir os vídeos: {e.stderr}") |
|
|
|
with gr.Blocks(theme=gr.themes.Soft()) as demo: |
|
gr.Markdown("# LTX Video - Storyboard em Vídeo (ADUC-SDR)\n*By Carlex & Gemini & DreamO*") |
|
|
|
scene_storyboard_state = gr.State([]) |
|
keyframe_images_state = gr.State([]) |
|
fragment_list_state = gr.State([]) |
|
prompt_geral_state = gr.State("") |
|
|
|
if os.path.exists(WORKSPACE_DIR): shutil.rmtree(WORKSPACE_DIR) |
|
os.makedirs(WORKSPACE_DIR) |
|
|
|
with gr.Tabs(): |
|
with gr.TabItem("ETAPA 1: O FOTÓGRAFO (Roteiro de Cenas)"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
num_fragments_input = gr.Slider(2, 10, 4, step=1, label="Número de Cenas") |
|
prompt_input = gr.Textbox(label="Ideia Geral (Prompt)") |
|
image_input = gr.Image(type="filepath", label="Imagem de Referência Principal") |
|
director_button = gr.Button("▶️ 1. Gerar Roteiro de Cenas", variant="primary") |
|
with gr.Column(): |
|
storyboard_to_show = gr.JSON(label="Roteiro de Cenas Gerado") |
|
|
|
with gr.TabItem("ETAPA 2: O PINTOR (Imagens-Chave)"): |
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
gr.Markdown("### Controles do Pintor (DreamO)\nUse os botões `+` e `-` para adicionar ou remover slots de referência opcionais (até 5 no total).") |
|
|
|
visible_references_state = gr.State(1) |
|
ref_image_inputs = [] |
|
ref_task_inputs = [] |
|
|
|
with gr.Blocks() as ref_blocks: |
|
for i in range(MAX_REFS): |
|
is_visible = i < 1 |
|
label_prefix = f"Referência {i+1}" |
|
if i == 0: |
|
label_prefix += " (Sequencial)" |
|
default_task = "style" |
|
is_interactive = False |
|
else: |
|
label_prefix += " (Opcional, Fixa)" |
|
default_task = "ip" |
|
is_interactive = True |
|
|
|
with gr.Row(visible=is_visible) as ref_row: |
|
img = gr.Image(label=label_prefix, type="filepath", interactive=is_interactive) |
|
task = gr.Dropdown(choices=["ip", "id", "style"], value=default_task, label=f"Tarefa para Ref {i+1}") |
|
ref_image_inputs.append(img) |
|
ref_task_inputs.append(task) |
|
|
|
with gr.Row(): |
|
add_ref_button = gr.Button("➕ Adicionar Referência") |
|
remove_ref_button = gr.Button("➖ Remover Referência") |
|
|
|
photographer_button = gr.Button("▶️ 2. Pintar Imagens-Chave em Sequência", variant="primary") |
|
keyframe_log_output = gr.Textbox(label="Diário de Bordo do Pintor", lines=5, interactive=False) |
|
|
|
with gr.Column(scale=1): |
|
keyframe_gallery_output = gr.Gallery(label="Imagens-Chave Pintadas", object_fit="contain", height="auto", type="filepath") |
|
|
|
with gr.TabItem("ETAPA 3: PRODUÇÃO (Gerar Vídeos)"): |
|
gr.Markdown("Nesta etapa, o sistema irá primeiro gerar o roteiro de movimento e depois animar os clipes, **usando o final de um clipe para dar partida no próximo**.") |
|
with gr.Row(): |
|
with gr.Column(): |
|
keyframes_to_render = gr.Gallery(label="Imagens-Chave para Animar", object_fit="contain", height="auto", interactive=False) |
|
animator_button = gr.Button("▶️ 3. Produzir Cenas em Vídeo", variant="primary", interactive=False) |
|
video_production_log_output = gr.Textbox(label="Diário de Bordo da Produção", lines=10, interactive=False) |
|
with gr.Column(): |
|
fragment_gallery_output = gr.Gallery(label="Cenas Produzidas (Vídeos)", object_fit="contain", height="auto") |
|
with gr.Row(): |
|
seed_number = gr.Number(42, label="Seed") |
|
cfg_slider = gr.Slider(1.0, 10.0, 2.5, step=0.1, label="CFG") |
|
|
|
with gr.TabItem("ETAPA 4: PÓS-PRODUÇÃO"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
editor_button = gr.Button("▶️ 4. Concatenar Vídeo Final", variant="primary") |
|
final_fragments_display = gr.JSON(label="Fragmentos a Concatenar") |
|
with gr.Column(): |
|
final_video_output = gr.Video(label="A Obra-Prima Final") |
|
|
|
|
|
|
|
def on_director_success(storyboard_list, img_path, prompt_geral): |
|
if not storyboard_list: raise gr.Error("O storyboard está vazio ou em formato inválido.") |
|
return storyboard_list, img_path, prompt_geral, gr.update(value=storyboard_list), gr.update(value=img_path) |
|
|
|
director_button.click( |
|
fn=get_static_scenes_storyboard, |
|
inputs=[num_fragments_input, prompt_input, image_input], |
|
outputs=[scene_storyboard_state] |
|
).then( |
|
fn=on_director_success, |
|
inputs=[scene_storyboard_state, image_input, prompt_input], |
|
outputs=[scene_storyboard_state, ref_image_inputs[0], prompt_geral_state, storyboard_to_show, ref_image_inputs[0]] |
|
) |
|
|
|
def update_reference_visibility(current_count, action): |
|
if action == "add": new_count = min(MAX_REFS, current_count + 1) |
|
else: new_count = max(1, current_count - 1) |
|
updates = [gr.update(visible=(i < new_count)) for i in range(MAX_REFS)] |
|
return [new_count] + updates |
|
|
|
all_ref_rows = [comp.parent for comp in ref_image_inputs] |
|
add_ref_button.click(fn=update_reference_visibility, inputs=[visible_references_state, gr.State("add")], outputs=[visible_references_state] + all_ref_rows) |
|
remove_ref_button.click(fn=update_reference_visibility, inputs=[visible_references_state, gr.State("remove")], outputs=[visible_references_state] + all_ref_rows) |
|
|
|
photographer_button.click( |
|
fn=run_sequential_keyframe_generation, |
|
inputs=[scene_storyboard_state, ref_image_inputs[0]] + ref_image_inputs + ref_task_inputs, |
|
outputs=[keyframe_log_output, keyframe_gallery_output, keyframe_images_state, ref_image_inputs[0]] |
|
).then( |
|
lambda paths: {keyframes_to_render: gr.update(value=paths), animator_button: gr.update(interactive=True)}, |
|
inputs=[keyframe_images_state], |
|
outputs=[keyframes_to_render, animator_button] |
|
) |
|
|
|
animator_button.click( |
|
fn=run_full_video_production, |
|
inputs=[prompt_geral_state, keyframe_images_state, seed_number, cfg_slider], |
|
outputs=[video_production_log_output, fragment_gallery_output, fragment_list_state, final_fragments_display] |
|
) |
|
|
|
editor_button.click( |
|
fn=concatenate_masterpiece, |
|
inputs=[fragment_list_state], |
|
outputs=[final_video_output] |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.queue().launch(server_name="0.0.0.0", share=True) |