dangthr commited on
Commit
925790f
·
verified ·
1 Parent(s): 9ac909b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -213
app.py CHANGED
@@ -1,10 +1,9 @@
1
- import gradio as gr
2
  import torch
3
- import spaces
4
  import numpy as np
5
  import random
6
  import os
7
  import yaml
 
8
  from pathlib import Path
9
  import imageio
10
  import tempfile
@@ -87,10 +86,9 @@ pipeline_instance.to(target_inference_device)
87
  if latent_upsampler_instance:
88
  latent_upsampler_instance.to(target_inference_device)
89
 
90
-
91
  # --- Helper function for dimension calculation ---
92
- MIN_DIM_SLIDER = 256 # As defined in the sliders minimum attribute
93
- TARGET_FIXED_SIDE = 768 # Desired fixed side length as per requirement
94
 
95
  def calculate_new_dimensions(orig_w, orig_h):
96
  """
@@ -99,59 +97,38 @@ def calculate_new_dimensions(orig_w, orig_h):
99
  both are multiples of 32, and within [MIN_DIM_SLIDER, MAX_IMAGE_SIZE].
100
  """
101
  if orig_w == 0 or orig_h == 0:
102
- # Default to TARGET_FIXED_SIDE square if original dimensions are invalid
103
  return int(TARGET_FIXED_SIDE), int(TARGET_FIXED_SIDE)
104
 
105
- if orig_w >= orig_h: # Landscape or square
106
  new_h = TARGET_FIXED_SIDE
107
  aspect_ratio = orig_w / orig_h
108
  new_w_ideal = new_h * aspect_ratio
109
-
110
- # Round to nearest multiple of 32
111
  new_w = round(new_w_ideal / 32) * 32
112
-
113
- # Clamp to [MIN_DIM_SLIDER, MAX_IMAGE_SIZE]
114
  new_w = max(MIN_DIM_SLIDER, min(new_w, MAX_IMAGE_SIZE))
115
- # Ensure new_h is also clamped (TARGET_FIXED_SIDE should be within these bounds if configured correctly)
116
  new_h = max(MIN_DIM_SLIDER, min(new_h, MAX_IMAGE_SIZE))
117
- else: # Portrait
118
  new_w = TARGET_FIXED_SIDE
119
- aspect_ratio = orig_h / orig_w # Use H/W ratio for portrait scaling
120
  new_h_ideal = new_w * aspect_ratio
121
-
122
- # Round to nearest multiple of 32
123
  new_h = round(new_h_ideal / 32) * 32
124
-
125
- # Clamp to [MIN_DIM_SLIDER, MAX_IMAGE_SIZE]
126
  new_h = max(MIN_DIM_SLIDER, min(new_h, MAX_IMAGE_SIZE))
127
- # Ensure new_w is also clamped
128
  new_w = max(MIN_DIM_SLIDER, min(new_w, MAX_IMAGE_SIZE))
129
 
130
  return int(new_h), int(new_w)
131
 
132
- def get_duration(prompt, negative_prompt, input_image_filepath, input_video_filepath,
133
- height_ui, width_ui, mode,
134
- duration_ui, # Removed ui_steps
135
- ui_frames_to_use,
136
- seed_ui, randomize_seed, ui_guidance_scale, improve_texture_flag,
137
- progress):
138
- if duration_ui > 7:
139
- return 75
140
- else:
141
- return 60
142
-
143
- @spaces.GPU(duration=get_duration)
144
- def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath,
145
- height_ui, width_ui, mode,
146
- duration_ui,
147
- ui_frames_to_use,
148
- seed_ui, randomize_seed, ui_guidance_scale, improve_texture_flag,
149
- progress=gr.Progress(track_tqdm=True)):
150
 
151
  if randomize_seed:
152
  seed_ui = random.randint(0, 2**32 - 1)
153
  seed_everething(int(seed_ui))
154
 
 
 
 
155
  target_frames_ideal = duration_ui * FPS
156
  target_frames_rounded = round(target_frames_ideal)
157
  if target_frames_rounded < 1:
@@ -168,9 +145,7 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
168
 
169
  height_padded = ((actual_height - 1) // 32 + 1) * 32
170
  width_padded = ((actual_width - 1) // 32 + 1) * 32
171
- num_frames_padded = ((actual_num_frames - 2) // 8 + 1) * 8 + 1
172
- if num_frames_padded != actual_num_frames:
173
- print(f"Warning: actual_num_frames ({actual_num_frames}) and num_frames_padded ({num_frames_padded}) differ. Using num_frames_padded for pipeline.")
174
 
175
  padding_values = calculate_padding(actual_height, actual_width, height_padded, width_padded)
176
 
@@ -217,7 +192,7 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
217
  call_kwargs["conditioning_items"] = [ConditioningItem(media_tensor.to(target_inference_device), 0, 1.0)]
218
  except Exception as e:
219
  print(f"Error loading image {input_image_filepath}: {e}")
220
- raise gr.Error(f"Could not load image: {e}")
221
  elif mode == "video-to-video" and input_video_filepath:
222
  try:
223
  call_kwargs["media_items"] = load_media_file(
@@ -229,7 +204,7 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
229
  ).to(target_inference_device)
230
  except Exception as e:
231
  print(f"Error loading video {input_video_filepath}: {e}")
232
- raise gr.Error(f"Could not load video: {e}")
233
 
234
  print(f"Moving models to {target_inference_device} for inference (if not already there)...")
235
 
@@ -240,19 +215,16 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
240
  result_images_tensor = None
241
  if improve_texture_flag:
242
  if not active_latent_upsampler:
243
- raise gr.Error("Spatial upscaler model not loaded or improve_texture not selected, cannot use multi-scale.")
244
 
245
  multi_scale_pipeline_obj = LTXMultiScalePipeline(pipeline_instance, active_latent_upsampler)
246
 
247
  first_pass_args = PIPELINE_CONFIG_YAML.get("first_pass", {}).copy()
248
- first_pass_args["guidance_scale"] = float(ui_guidance_scale) # UI overrides YAML
249
- # num_inference_steps will be derived from len(timesteps) in the pipeline
250
  first_pass_args.pop("num_inference_steps", None)
251
 
252
-
253
  second_pass_args = PIPELINE_CONFIG_YAML.get("second_pass", {}).copy()
254
- second_pass_args["guidance_scale"] = float(ui_guidance_scale) # UI overrides YAML
255
- # num_inference_steps will be derived from len(timesteps) in the pipeline
256
  second_pass_args.pop("num_inference_steps", None)
257
 
258
  multi_scale_call_kwargs = call_kwargs.copy()
@@ -269,12 +241,11 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
269
  first_pass_config_from_yaml = PIPELINE_CONFIG_YAML.get("first_pass", {})
270
 
271
  single_pass_call_kwargs["timesteps"] = first_pass_config_from_yaml.get("timesteps")
272
- single_pass_call_kwargs["guidance_scale"] = float(ui_guidance_scale) # UI overrides YAML
273
  single_pass_call_kwargs["stg_scale"] = first_pass_config_from_yaml.get("stg_scale")
274
  single_pass_call_kwargs["rescaling_scale"] = first_pass_config_from_yaml.get("rescaling_scale")
275
  single_pass_call_kwargs["skip_block_list"] = first_pass_config_from_yaml.get("skip_block_list")
276
 
277
- # Remove keys that might conflict or are not used in single pass / handled by above
278
  single_pass_call_kwargs.pop("num_inference_steps", None)
279
  single_pass_call_kwargs.pop("first_pass", None)
280
  single_pass_call_kwargs.pop("second_pass", None)
@@ -284,7 +255,7 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
284
  result_images_tensor = pipeline_instance(**single_pass_call_kwargs).images
285
 
286
  if result_images_tensor is None:
287
- raise gr.Error("Generation failed.")
288
 
289
  pad_left, pad_right, pad_top, pad_bottom = padding_values
290
  slice_h_end = -pad_bottom if pad_bottom > 0 else None
@@ -295,192 +266,98 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
295
  ]
296
 
297
  video_np = result_images_tensor[0].permute(1, 2, 3, 0).cpu().float().numpy()
298
-
299
  video_np = np.clip(video_np, 0, 1)
300
  video_np = (video_np * 255).astype(np.uint8)
301
 
302
- temp_dir = tempfile.mkdtemp()
303
- timestamp = random.randint(10000,99999)
304
- output_video_path = os.path.join(temp_dir, f"output_{timestamp}.mp4")
305
 
306
  try:
307
  with imageio.get_writer(output_video_path, fps=call_kwargs["frame_rate"], macro_block_size=1) as video_writer:
308
  for frame_idx in range(video_np.shape[0]):
309
- progress(frame_idx / video_np.shape[0], desc="Saving video")
310
  video_writer.append_data(video_np[frame_idx])
 
 
311
  except Exception as e:
312
  print(f"Error saving video with macro_block_size=1: {e}")
313
  try:
314
  with imageio.get_writer(output_video_path, fps=call_kwargs["frame_rate"], format='FFMPEG', codec='libx264', quality=8) as video_writer:
315
- for frame_idx in range(video_np.shape[0]):
316
- progress(frame_idx / video_np.shape[0], desc="Saving video (fallback ffmpeg)")
317
  video_writer.append_data(video_np[frame_idx])
 
 
318
  except Exception as e2:
319
  print(f"Fallback video saving error: {e2}")
320
- raise gr.Error(f"Failed to save video: {e2}")
321
 
322
  return output_video_path, seed_ui
323
 
324
- def update_task_image():
325
- return "image-to-video"
326
-
327
- def update_task_text():
328
- return "text-to-video"
329
-
330
- def update_task_video():
331
- return "video-to-video"
332
-
333
- # --- Gradio UI Definition ---
334
- css="""
335
- #col-container {
336
- margin: 0 auto;
337
- max-width: 900px;
338
- }
339
- """
340
-
341
- with gr.Blocks(css=css) as demo:
342
- gr.Markdown("# LTX Video 0.9.7 Distilled")
343
- gr.Markdown("Fast high quality video generation. [Model](https://huggingface.co/Lightricks/LTX-Video/blob/main/ltxv-13b-0.9.7-distilled.safetensors) [GitHub](https://github.com/Lightricks/LTX-Video) [Diffusers](#)")
344
 
345
- with gr.Row():
346
- with gr.Column():
347
- with gr.Tab("image-to-video") as image_tab:
348
- video_i_hidden = gr.Textbox(label="video_i", visible=False, value=None)
349
- image_i2v = gr.Image(label="Input Image", type="filepath", sources=["upload", "webcam", "clipboard"])
350
- i2v_prompt = gr.Textbox(label="Prompt", value="The creature from the image starts to move", lines=3)
351
- i2v_button = gr.Button("Generate Image-to-Video", variant="primary")
352
- with gr.Tab("text-to-video") as text_tab:
353
- image_n_hidden = gr.Textbox(label="image_n", visible=False, value=None)
354
- video_n_hidden = gr.Textbox(label="video_n", visible=False, value=None)
355
- t2v_prompt = gr.Textbox(label="Prompt", value="A majestic dragon flying over a medieval castle", lines=3)
356
- t2v_button = gr.Button("Generate Text-to-Video", variant="primary")
357
- with gr.Tab("video-to-video", visible=False) as video_tab:
358
- image_v_hidden = gr.Textbox(label="image_v", visible=False, value=None)
359
- video_v2v = gr.Video(label="Input Video", sources=["upload", "webcam"]) # type defaults to filepath
360
- frames_to_use = gr.Slider(label="Frames to use from input video", minimum=9, maximum=MAX_NUM_FRAMES, value=9, step=8, info="Number of initial frames to use for conditioning/transformation. Must be N*8+1.")
361
- v2v_prompt = gr.Textbox(label="Prompt", value="Change the style to cinematic anime", lines=3)
362
- v2v_button = gr.Button("Generate Video-to-Video", variant="primary")
363
-
364
- duration_input = gr.Slider(
365
- label="Video Duration (seconds)",
366
- minimum=0.3,
367
- maximum=8.5,
368
- value=2,
369
- step=0.1,
370
- info=f"Target video duration (0.3s to 8.5s)"
371
- )
372
- improve_texture = gr.Checkbox(label="Improve Texture (multi-scale)", value=True, info="Uses a two-pass generation for better quality, but is slower. Recommended for final output.")
373
-
374
- with gr.Column():
375
- output_video = gr.Video(label="Generated Video", interactive=False)
376
- # gr.DeepLinkButton()
377
-
378
- with gr.Accordion("Advanced settings", open=False):
379
- mode = gr.Dropdown(["text-to-video", "image-to-video", "video-to-video"], label="task", value="image-to-video", visible=False)
380
- negative_prompt_input = gr.Textbox(label="Negative Prompt", value="worst quality, inconsistent motion, blurry, jittery, distorted", lines=2)
381
- with gr.Row():
382
- seed_input = gr.Number(label="Seed", value=42, precision=0, minimum=0, maximum=2**32-1)
383
- randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=True)
384
- with gr.Row():
385
- guidance_scale_input = gr.Slider(label="Guidance Scale (CFG)", minimum=1.0, maximum=10.0, value=PIPELINE_CONFIG_YAML.get("first_pass", {}).get("guidance_scale", 1.0), step=0.1, info="Controls how much the prompt influences the output. Higher values = stronger influence.")
386
- with gr.Row():
387
- height_input = gr.Slider(label="Height", value=512, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
388
- width_input = gr.Slider(label="Width", value=704, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
389
-
390
-
391
- # --- Event handlers for updating dimensions on upload ---
392
- def handle_image_upload_for_dims(image_filepath, current_h, current_w):
393
- if not image_filepath: # Image cleared or no image initially
394
- # Keep current slider values if image is cleared or no input
395
- return gr.update(value=current_h), gr.update(value=current_w)
396
- try:
397
- img = Image.open(image_filepath)
398
- orig_w, orig_h = img.size
399
- new_h, new_w = calculate_new_dimensions(orig_w, orig_h)
400
- return gr.update(value=new_h), gr.update(value=new_w)
401
- except Exception as e:
402
- print(f"Error processing image for dimension update: {e}")
403
- # Keep current slider values on error
404
- return gr.update(value=current_h), gr.update(value=current_w)
405
-
406
- def handle_video_upload_for_dims(video_filepath, current_h, current_w):
407
- if not video_filepath: # Video cleared or no video initially
408
- return gr.update(value=current_h), gr.update(value=current_w)
409
- try:
410
- # Ensure video_filepath is a string for os.path.exists and imageio
411
- video_filepath_str = str(video_filepath)
412
- if not os.path.exists(video_filepath_str):
413
- print(f"Video file path does not exist for dimension update: {video_filepath_str}")
414
- return gr.update(value=current_h), gr.update(value=current_w)
415
-
416
- orig_w, orig_h = -1, -1
417
- with imageio.get_reader(video_filepath_str) as reader:
418
- meta = reader.get_meta_data()
419
- if 'size' in meta:
420
- orig_w, orig_h = meta['size']
421
- else:
422
- # Fallback: read first frame if 'size' not in metadata
423
- try:
424
- first_frame = reader.get_data(0)
425
- # Shape is (h, w, c) for frames
426
- orig_h, orig_w = first_frame.shape[0], first_frame.shape[1]
427
- except Exception as e_frame:
428
- print(f"Could not get video size from metadata or first frame: {e_frame}")
429
- return gr.update(value=current_h), gr.update(value=current_w)
430
-
431
- if orig_w == -1 or orig_h == -1: # If dimensions couldn't be determined
432
- print(f"Could not determine dimensions for video: {video_filepath_str}")
433
- return gr.update(value=current_h), gr.update(value=current_w)
434
-
435
- new_h, new_w = calculate_new_dimensions(orig_w, orig_h)
436
- return gr.update(value=new_h), gr.update(value=new_w)
437
- except Exception as e:
438
- # Log type of video_filepath for debugging if it's not a path-like string
439
- print(f"Error processing video for dimension update: {e} (Path: {video_filepath}, Type: {type(video_filepath)})")
440
- return gr.update(value=current_h), gr.update(value=current_w)
441
-
442
 
443
- image_i2v.upload(
444
- fn=handle_image_upload_for_dims,
445
- inputs=[image_i2v, height_input, width_input],
446
- outputs=[height_input, width_input]
447
- )
448
- video_v2v.upload(
449
- fn=handle_video_upload_for_dims,
450
- inputs=[video_v2v, height_input, width_input],
451
- outputs=[height_input, width_input]
452
- )
453
-
454
- image_tab.select(
455
- fn=update_task_image,
456
- outputs=[mode]
457
- )
458
- text_tab.select(
459
- fn=update_task_text,
460
- outputs=[mode]
461
- )
462
 
463
- t2v_inputs = [t2v_prompt, negative_prompt_input, image_n_hidden, video_n_hidden,
464
- height_input, width_input, mode,
465
- duration_input, frames_to_use,
466
- seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
467
 
468
- i2v_inputs = [i2v_prompt, negative_prompt_input, image_i2v, video_i_hidden,
469
- height_input, width_input, mode,
470
- duration_input, frames_to_use,
471
- seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
472
-
473
- v2v_inputs = [v2v_prompt, negative_prompt_input, image_v_hidden, video_v2v,
474
- height_input, width_input, mode,
475
- duration_input, frames_to_use,
476
- seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
477
-
478
- t2v_button.click(fn=generate, inputs=t2v_inputs, outputs=[output_video, seed_input], api_name="text_to_video")
479
- i2v_button.click(fn=generate, inputs=i2v_inputs, outputs=[output_video, seed_input], api_name="image_to_video")
480
- v2v_button.click(fn=generate, inputs=v2v_inputs, outputs=[output_video, seed_input], api_name="video_to_video")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
481
 
482
  if __name__ == "__main__":
483
  if os.path.exists(models_dir) and os.path.isdir(models_dir):
484
  print(f"Model directory: {Path(models_dir).resolve()}")
485
 
486
- demo.queue().launch(debug=True, share=False, mcp_server=True)
 
 
1
  import torch
 
2
  import numpy as np
3
  import random
4
  import os
5
  import yaml
6
+ import argparse
7
  from pathlib import Path
8
  import imageio
9
  import tempfile
 
86
  if latent_upsampler_instance:
87
  latent_upsampler_instance.to(target_inference_device)
88
 
 
89
  # --- Helper function for dimension calculation ---
90
+ MIN_DIM_SLIDER = 256
91
+ TARGET_FIXED_SIDE = 768
92
 
93
  def calculate_new_dimensions(orig_w, orig_h):
94
  """
 
97
  both are multiples of 32, and within [MIN_DIM_SLIDER, MAX_IMAGE_SIZE].
98
  """
99
  if orig_w == 0 or orig_h == 0:
 
100
  return int(TARGET_FIXED_SIDE), int(TARGET_FIXED_SIDE)
101
 
102
+ if orig_w >= orig_h:
103
  new_h = TARGET_FIXED_SIDE
104
  aspect_ratio = orig_w / orig_h
105
  new_w_ideal = new_h * aspect_ratio
 
 
106
  new_w = round(new_w_ideal / 32) * 32
 
 
107
  new_w = max(MIN_DIM_SLIDER, min(new_w, MAX_IMAGE_SIZE))
 
108
  new_h = max(MIN_DIM_SLIDER, min(new_h, MAX_IMAGE_SIZE))
109
+ else:
110
  new_w = TARGET_FIXED_SIDE
111
+ aspect_ratio = orig_h / orig_w
112
  new_h_ideal = new_w * aspect_ratio
 
 
113
  new_h = round(new_h_ideal / 32) * 32
 
 
114
  new_h = max(MIN_DIM_SLIDER, min(new_h, MAX_IMAGE_SIZE))
 
115
  new_w = max(MIN_DIM_SLIDER, min(new_w, MAX_IMAGE_SIZE))
116
 
117
  return int(new_h), int(new_w)
118
 
119
+ def generate(prompt, negative_prompt="worst quality, inconsistent motion, blurry, jittery, distorted",
120
+ input_image_filepath=None, input_video_filepath=None,
121
+ height_ui=512, width_ui=704, mode="text-to-video",
122
+ duration_ui=2.0, ui_frames_to_use=9,
123
+ seed_ui=42, randomize_seed=True, ui_guidance_scale=None, improve_texture_flag=True):
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  if randomize_seed:
126
  seed_ui = random.randint(0, 2**32 - 1)
127
  seed_everething(int(seed_ui))
128
 
129
+ if ui_guidance_scale is None:
130
+ ui_guidance_scale = PIPELINE_CONFIG_YAML.get("first_pass", {}).get("guidance_scale", 1.0)
131
+
132
  target_frames_ideal = duration_ui * FPS
133
  target_frames_rounded = round(target_frames_ideal)
134
  if target_frames_rounded < 1:
 
145
 
146
  height_padded = ((actual_height - 1) // 32 + 1) * 32
147
  width_padded = ((actual_width - 1) // 32 + 1) * 32
148
+ num_frames_padded = ((actual_num_frames - 2) // 8 + 1) * 8 + 1
 
 
149
 
150
  padding_values = calculate_padding(actual_height, actual_width, height_padded, width_padded)
151
 
 
192
  call_kwargs["conditioning_items"] = [ConditioningItem(media_tensor.to(target_inference_device), 0, 1.0)]
193
  except Exception as e:
194
  print(f"Error loading image {input_image_filepath}: {e}")
195
+ raise RuntimeError(f"Could not load image: {e}")
196
  elif mode == "video-to-video" and input_video_filepath:
197
  try:
198
  call_kwargs["media_items"] = load_media_file(
 
204
  ).to(target_inference_device)
205
  except Exception as e:
206
  print(f"Error loading video {input_video_filepath}: {e}")
207
+ raise RuntimeError(f"Could not load video: {e}")
208
 
209
  print(f"Moving models to {target_inference_device} for inference (if not already there)...")
210
 
 
215
  result_images_tensor = None
216
  if improve_texture_flag:
217
  if not active_latent_upsampler:
218
+ raise RuntimeError("Spatial upscaler model not loaded or improve_texture not selected, cannot use multi-scale.")
219
 
220
  multi_scale_pipeline_obj = LTXMultiScalePipeline(pipeline_instance, active_latent_upsampler)
221
 
222
  first_pass_args = PIPELINE_CONFIG_YAML.get("first_pass", {}).copy()
223
+ first_pass_args["guidance_scale"] = float(ui_guidance_scale)
 
224
  first_pass_args.pop("num_inference_steps", None)
225
 
 
226
  second_pass_args = PIPELINE_CONFIG_YAML.get("second_pass", {}).copy()
227
+ second_pass_args["guidance_scale"] = float(ui_guidance_scale)
 
228
  second_pass_args.pop("num_inference_steps", None)
229
 
230
  multi_scale_call_kwargs = call_kwargs.copy()
 
241
  first_pass_config_from_yaml = PIPELINE_CONFIG_YAML.get("first_pass", {})
242
 
243
  single_pass_call_kwargs["timesteps"] = first_pass_config_from_yaml.get("timesteps")
244
+ single_pass_call_kwargs["guidance_scale"] = float(ui_guidance_scale)
245
  single_pass_call_kwargs["stg_scale"] = first_pass_config_from_yaml.get("stg_scale")
246
  single_pass_call_kwargs["rescaling_scale"] = first_pass_config_from_yaml.get("rescaling_scale")
247
  single_pass_call_kwargs["skip_block_list"] = first_pass_config_from_yaml.get("skip_block_list")
248
 
 
249
  single_pass_call_kwargs.pop("num_inference_steps", None)
250
  single_pass_call_kwargs.pop("first_pass", None)
251
  single_pass_call_kwargs.pop("second_pass", None)
 
255
  result_images_tensor = pipeline_instance(**single_pass_call_kwargs).images
256
 
257
  if result_images_tensor is None:
258
+ raise RuntimeError("Generation failed.")
259
 
260
  pad_left, pad_right, pad_top, pad_bottom = padding_values
261
  slice_h_end = -pad_bottom if pad_bottom > 0 else None
 
266
  ]
267
 
268
  video_np = result_images_tensor[0].permute(1, 2, 3, 0).cpu().float().numpy()
 
269
  video_np = np.clip(video_np, 0, 1)
270
  video_np = (video_np * 255).astype(np.uint8)
271
 
272
+ timestamp = random.randint(10000, 99999)
273
+ output_video_path = f"output_{timestamp}.mp4"
 
274
 
275
  try:
276
  with imageio.get_writer(output_video_path, fps=call_kwargs["frame_rate"], macro_block_size=1) as video_writer:
277
  for frame_idx in range(video_np.shape[0]):
 
278
  video_writer.append_data(video_np[frame_idx])
279
+ if frame_idx % 10 == 0:
280
+ print(f"Saving frame {frame_idx + 1}/{video_np.shape[0]}")
281
  except Exception as e:
282
  print(f"Error saving video with macro_block_size=1: {e}")
283
  try:
284
  with imageio.get_writer(output_video_path, fps=call_kwargs["frame_rate"], format='FFMPEG', codec='libx264', quality=8) as video_writer:
285
+ for frame_idx in range(video_np.shape[0]):
 
286
  video_writer.append_data(video_np[frame_idx])
287
+ if frame_idx % 10 == 0:
288
+ print(f"Saving frame {frame_idx + 1}/{video_np.shape[0]} (fallback)")
289
  except Exception as e2:
290
  print(f"Fallback video saving error: {e2}")
291
+ raise RuntimeError(f"Failed to save video: {e2}")
292
 
293
  return output_video_path, seed_ui
294
 
295
+ def main():
296
+ parser = argparse.ArgumentParser(description="LTX Video Generation from Command Line")
297
+ parser.add_argument("--prompt", required=True, help="Text prompt for video generation")
298
+ parser.add_argument("--negative-prompt", default="worst quality, inconsistent motion, blurry, jittery, distorted",
299
+ help="Negative prompt")
300
+ parser.add_argument("--mode", choices=["text-to-video", "image-to-video", "video-to-video"],
301
+ default="text-to-video", help="Generation mode")
302
+ parser.add_argument("--input-image", help="Input image path for image-to-video mode")
303
+ parser.add_argument("--input-video", help="Input video path for video-to-video mode")
304
+ parser.add_argument("--duration", type=float, default=2.0, help="Video duration in seconds (0.3-8.5)")
305
+ parser.add_argument("--height", type=int, default=512, help="Video height (must be divisible by 32)")
306
+ parser.add_argument("--width", type=int, default=704, help="Video width (must be divisible by 32)")
307
+ parser.add_argument("--seed", type=int, default=42, help="Random seed")
308
+ parser.add_argument("--randomize-seed", action="store_true", help="Use random seed")
309
+ parser.add_argument("--guidance-scale", type=float, help="Guidance scale for generation")
310
+ parser.add_argument("--no-improve-texture", action="store_true", help="Disable texture improvement (faster)")
311
+ parser.add_argument("--frames-to-use", type=int, default=9, help="Frames to use from input video (for video-to-video)")
 
 
 
312
 
313
+ args = parser.parse_args()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314
 
315
+ # Validate parameters
316
+ if args.mode == "image-to-video" and not args.input_image:
317
+ print("Error: --input-image is required for image-to-video mode")
318
+ return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
 
320
+ if args.mode == "video-to-video" and not args.input_video:
321
+ print("Error: --input-video is required for video-to-video mode")
322
+ return
 
323
 
324
+ # Ensure dimensions are divisible by 32
325
+ args.height = ((args.height - 1) // 32 + 1) * 32
326
+ args.width = ((args.width - 1) // 32 + 1) * 32
327
+
328
+ print(f"Starting video generation...")
329
+ print(f"Prompt: {args.prompt}")
330
+ print(f"Mode: {args.mode}")
331
+ print(f"Duration: {args.duration}s")
332
+ print(f"Resolution: {args.width}x{args.height}")
333
+
334
+ try:
335
+ output_path, used_seed = generate(
336
+ prompt=args.prompt,
337
+ negative_prompt=args.negative_prompt,
338
+ input_image_filepath=args.input_image,
339
+ input_video_filepath=args.input_video,
340
+ height_ui=args.height,
341
+ width_ui=args.width,
342
+ mode=args.mode,
343
+ duration_ui=args.duration,
344
+ ui_frames_to_use=args.frames_to_use,
345
+ seed_ui=args.seed,
346
+ randomize_seed=args.randomize_seed,
347
+ ui_guidance_scale=args.guidance_scale,
348
+ improve_texture_flag=not args.no_improve_texture
349
+ )
350
+
351
+ print(f"\nVideo generation completed!")
352
+ print(f"Output saved to: {output_path}")
353
+ print(f"Used seed: {used_seed}")
354
+
355
+ except Exception as e:
356
+ print(f"Error during generation: {e}")
357
+ raise
358
 
359
  if __name__ == "__main__":
360
  if os.path.exists(models_dir) and os.path.isdir(models_dir):
361
  print(f"Model directory: {Path(models_dir).resolve()}")
362
 
363
+ main()