Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ import requests
|
|
7 |
from requests.adapters import HTTPAdapter
|
8 |
from urllib3.util.retry import Retry
|
9 |
import json
|
|
|
10 |
|
11 |
os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
|
12 |
|
@@ -54,6 +55,14 @@ if IN_HF_SPACE:
|
|
54 |
except ImportError:
|
55 |
print("spacesモジュールのインポートに失敗しました。Hugging Face Space環境外かもしれません")
|
56 |
GPU_AVAILABLE = torch.cuda.is_available()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
from PIL import Image
|
59 |
from diffusers import AutoencoderKLHunyuanVideo
|
@@ -61,8 +70,22 @@ from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPToke
|
|
61 |
from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
|
62 |
from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
|
63 |
from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
|
|
|
64 |
from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
from diffusers_helper.thread_utils import AsyncStream, async_run
|
67 |
from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
|
68 |
from transformers import SiglipImageProcessor, SiglipVisionModel
|
@@ -72,7 +95,7 @@ from diffusers_helper.bucket_tools import find_nearest_bucket
|
|
72 |
outputs_folder = './outputs/'
|
73 |
os.makedirs(outputs_folder, exist_ok=True)
|
74 |
|
75 |
-
# Spaces
|
76 |
if not IN_HF_SPACE:
|
77 |
# 非Spaces環境でのみCUDAメモリを取得
|
78 |
try:
|
@@ -109,6 +132,40 @@ else:
|
|
109 |
models = {}
|
110 |
cpu_fallback_mode = not GPU_AVAILABLE # GPUが利用できない場合、CPU代替モードを使用
|
111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
# モデルロード関数を使用
|
113 |
def load_models():
|
114 |
global models, cpu_fallback_mode, GPU_INITIALIZED
|
@@ -141,7 +198,7 @@ def load_models():
|
|
141 |
feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
|
142 |
image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=dtype).to(model_device)
|
143 |
|
144 |
-
transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('tori29umai/
|
145 |
|
146 |
print("すべてのモデルの読み込みに成功しました")
|
147 |
except Exception as e:
|
@@ -162,7 +219,7 @@ def load_models():
|
|
162 |
feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
|
163 |
image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=dtype).to('cpu')
|
164 |
|
165 |
-
transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('tori29umai/
|
166 |
|
167 |
print("CPUモードですべてのモデルの読み込みに成功しました")
|
168 |
|
@@ -250,7 +307,7 @@ def load_models():
|
|
250 |
return {}
|
251 |
|
252 |
|
253 |
-
# Hugging Face Spaces
|
254 |
if IN_HF_SPACE and 'spaces' in globals() and GPU_AVAILABLE:
|
255 |
try:
|
256 |
@spaces.GPU
|
@@ -273,6 +330,10 @@ if IN_HF_SPACE and 'spaces' in globals() and GPU_AVAILABLE:
|
|
273 |
# 装飾子がエラーの場合、非装飾子版を直接使用
|
274 |
def initialize_models():
|
275 |
return load_models()
|
|
|
|
|
|
|
|
|
276 |
|
277 |
|
278 |
# 以下の関数内部でモデルの取得を遅延させる
|
@@ -369,10 +430,17 @@ stream = AsyncStream()
|
|
369 |
|
370 |
|
371 |
@torch.no_grad()
|
372 |
-
def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache):
|
373 |
global last_update_time
|
374 |
last_update_time = time.time()
|
375 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
376 |
# 動画の長さを5秒以下に制限
|
377 |
total_second_length = min(total_second_length, 3.0)
|
378 |
|
@@ -507,13 +575,14 @@ def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_wind
|
|
507 |
load_model_as_complete(vae, target_device=device)
|
508 |
|
509 |
start_latent = vae_encode(input_image_pt, vae)
|
|
|
510 |
except Exception as e:
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
|
518 |
# CLIP Vision
|
519 |
last_update_time = time.time()
|
@@ -588,6 +657,14 @@ def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_wind
|
|
588 |
try:
|
589 |
output_filename = os.path.join(outputs_folder, f'{job_id}_final_{total_generated_latent_frames}.mp4')
|
590 |
save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=18)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
591 |
stream.output_queue.push(('file', output_filename))
|
592 |
except Exception as e:
|
593 |
print(f"最終動画保存中にエラーが発生しました: {e}")
|
@@ -816,6 +893,13 @@ def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_wind
|
|
816 |
save_start_time = time.time()
|
817 |
save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=18)
|
818 |
print(f"動画保存完了、所要時間: {time.time() - save_start_time:.2f}秒")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
819 |
|
820 |
print(f'デコード完了。現在の潜在変数形状 {real_history_latents.shape}; ピクセル形状 {history_pixels.shape}')
|
821 |
|
@@ -876,10 +960,10 @@ def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_wind
|
|
876 |
return
|
877 |
|
878 |
|
879 |
-
# Hugging Face Spaces
|
880 |
if IN_HF_SPACE and 'spaces' in globals():
|
881 |
@spaces.GPU
|
882 |
-
def process_with_gpu(input_image, prompt, n_prompt, seed, total_second_length, use_teacache):
|
883 |
global stream
|
884 |
assert input_image is not None, '入力画像がありません!'
|
885 |
|
@@ -897,8 +981,8 @@ if IN_HF_SPACE and 'spaces' in globals():
|
|
897 |
try:
|
898 |
stream = AsyncStream()
|
899 |
|
900 |
-
#
|
901 |
-
async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache)
|
902 |
|
903 |
output_filename = None
|
904 |
prev_output_filename = None
|
@@ -960,7 +1044,8 @@ if IN_HF_SPACE and 'spaces' in globals():
|
|
960 |
|
961 |
process = process_with_gpu
|
962 |
else:
|
963 |
-
|
|
|
964 |
global stream
|
965 |
assert input_image is not None, '入力画像がありません!'
|
966 |
|
@@ -977,8 +1062,8 @@ else:
|
|
977 |
try:
|
978 |
stream = AsyncStream()
|
979 |
|
980 |
-
#
|
981 |
-
async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache)
|
982 |
|
983 |
output_filename = None
|
984 |
prev_output_filename = None
|
@@ -1020,7 +1105,7 @@ else:
|
|
1020 |
except Exception as e:
|
1021 |
print(f"出力処理中にエラーが発生しました: {e}")
|
1022 |
# 長時間更新がないか確認
|
1023 |
-
current_time = time.time()
|
1024 |
if current_time - last_update_time > 60: # 60秒間更新がない場合、処理がフリーズした可能性
|
1025 |
print(f"処理がフリーズした可能性があります。{current_time - last_update_time:.1f}秒間更新がありません")
|
1026 |
|
@@ -1071,7 +1156,7 @@ def end_process():
|
|
1071 |
|
1072 |
|
1073 |
quick_prompts = [
|
1074 |
-
'The camera smoothly orbits around the center of the scene, keeping the center point fixed
|
1075 |
]
|
1076 |
quick_prompts = [[x] for x in quick_prompts]
|
1077 |
|
@@ -1245,7 +1330,7 @@ with block:
|
|
1245 |
|
1246 |
prompt = gr.Textbox(
|
1247 |
label="プロンプト / Prompt",
|
1248 |
-
value='The camera smoothly orbits around the center of the scene, keeping the center point fixed
|
1249 |
elem_id="prompt-input"
|
1250 |
)
|
1251 |
|
@@ -1257,7 +1342,14 @@ with block:
|
|
1257 |
)
|
1258 |
example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)
|
1259 |
|
1260 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1261 |
with gr.Row(elem_classes="button-container"):
|
1262 |
start_button = gr.Button(
|
1263 |
value="生成開始 / Generate",
|
@@ -1332,12 +1424,11 @@ with block:
|
|
1332 |
error_message = gr.HTML('', elem_id='error-message', visible=True)
|
1333 |
|
1334 |
# 処理関数
|
1335 |
-
ips = [input_image, prompt, n_prompt, seed, total_second_length, use_teacache]
|
1336 |
|
1337 |
# 開始と終了ボタンのイベント
|
1338 |
start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
|
1339 |
end_button.click(fn=end_process)
|
1340 |
|
1341 |
|
1342 |
-
block.launch()
|
1343 |
-
|
|
|
7 |
from requests.adapters import HTTPAdapter
|
8 |
from urllib3.util.retry import Retry
|
9 |
import json
|
10 |
+
import subprocess # FFmpeg実行用に追加
|
11 |
|
12 |
os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
|
13 |
|
|
|
55 |
except ImportError:
|
56 |
print("spacesモジュールのインポートに失敗しました。Hugging Face Space環境外かもしれません")
|
57 |
GPU_AVAILABLE = torch.cuda.is_available()
|
58 |
+
else:
|
59 |
+
# ローカル環境ではCUDA利用可能性を直接チェック
|
60 |
+
GPU_AVAILABLE = torch.cuda.is_available()
|
61 |
+
if GPU_AVAILABLE:
|
62 |
+
print(f"GPU利用可能: デバイス名 {torch.cuda.get_device_name(0)}")
|
63 |
+
print(f"GPUメモリ: {torch.cuda.get_device_properties(0).total_memory / 1e9} GB")
|
64 |
+
else:
|
65 |
+
print("GPU利用不可: CPUモードで実行します")
|
66 |
|
67 |
from PIL import Image
|
68 |
from diffusers import AutoencoderKLHunyuanVideo
|
|
|
70 |
from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
|
71 |
from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
|
72 |
from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
|
73 |
+
from utils.lora_utils import merge_lora_to_state_dict
|
74 |
from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
|
75 |
+
|
76 |
+
# memory.pyからのインポートを修正
|
77 |
+
from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
|
78 |
+
|
79 |
+
# ローカル環境用のMEMORY_IN_HF_SPACE変数を定義
|
80 |
+
MEMORY_IN_HF_SPACE = False
|
81 |
+
if IN_HF_SPACE:
|
82 |
+
try:
|
83 |
+
# memory.pyにIN_HF_SPACEが定義されている場合はそれを使用
|
84 |
+
from diffusers_helper.memory import IN_HF_SPACE as MEMORY_IN_HF_SPACE
|
85 |
+
except ImportError:
|
86 |
+
# 定義されていない場合はローカル変数を使用
|
87 |
+
MEMORY_IN_HF_SPACE = IN_HF_SPACE
|
88 |
+
|
89 |
from diffusers_helper.thread_utils import AsyncStream, async_run
|
90 |
from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
|
91 |
from transformers import SiglipImageProcessor, SiglipVisionModel
|
|
|
95 |
outputs_folder = './outputs/'
|
96 |
os.makedirs(outputs_folder, exist_ok=True)
|
97 |
|
98 |
+
# ローカル/Spaces環境に基づいてメモリ設定を行う
|
99 |
if not IN_HF_SPACE:
|
100 |
# 非Spaces環境でのみCUDAメモリを取得
|
101 |
try:
|
|
|
132 |
models = {}
|
133 |
cpu_fallback_mode = not GPU_AVAILABLE # GPUが利用できない場合、CPU代替モードを使用
|
134 |
|
135 |
+
# FFmpegで動画を左右反転する関数を追加
|
136 |
+
def flip_video_horizontally(input_file, output_file):
|
137 |
+
"""FFmpegを使用して動画を水平方向に反転します"""
|
138 |
+
try:
|
139 |
+
# 一時ファイル名を作成(元ファイル名の拡張子前に_flipped追加)
|
140 |
+
temp_output = os.path.splitext(output_file)[0] + "_flipped" + os.path.splitext(output_file)[1]
|
141 |
+
|
142 |
+
# FFmpegコマンドを実行
|
143 |
+
cmd = [
|
144 |
+
'ffmpeg',
|
145 |
+
'-i', input_file,
|
146 |
+
'-vf', 'hflip', # 水平方向に反転
|
147 |
+
'-c:v', 'libx264', # 同じコーデックを使用
|
148 |
+
'-crf', '18', # 元のCRF値と同じ
|
149 |
+
'-y', # 既存ファイルを上書き
|
150 |
+
temp_output
|
151 |
+
]
|
152 |
+
|
153 |
+
result = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
154 |
+
|
155 |
+
# 成功したら、元のファイルを置き換え
|
156 |
+
if os.path.exists(temp_output):
|
157 |
+
import shutil
|
158 |
+
shutil.move(temp_output, output_file)
|
159 |
+
print(f"動画を水平方向に反転して保存しました: {output_file}")
|
160 |
+
return True
|
161 |
+
else:
|
162 |
+
print("動画の反転処理に失敗しました")
|
163 |
+
return False
|
164 |
+
except Exception as e:
|
165 |
+
print(f"FFmpegによる動画反転中にエラーが発生しました: {e}")
|
166 |
+
traceback.print_exc()
|
167 |
+
return False
|
168 |
+
|
169 |
# モデルロード関数を使用
|
170 |
def load_models():
|
171 |
global models, cpu_fallback_mode, GPU_INITIALIZED
|
|
|
198 |
feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
|
199 |
image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=dtype).to(model_device)
|
200 |
|
201 |
+
transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('tori29umai/FramePackI2V_HY_rotate_indoor', torch_dtype=transformer_dtype).to(model_device)
|
202 |
|
203 |
print("すべてのモデルの読み込みに成功しました")
|
204 |
except Exception as e:
|
|
|
219 |
feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
|
220 |
image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=dtype).to('cpu')
|
221 |
|
222 |
+
transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('tori29umai/FramePackI2V_HY_rotate_indoor', torch_dtype=transformer_dtype).to('cpu')
|
223 |
|
224 |
print("CPUモードですべてのモデルの読み込みに成功しました")
|
225 |
|
|
|
307 |
return {}
|
308 |
|
309 |
|
310 |
+
# ローカル環境とHugging Face Spaces環境で分岐
|
311 |
if IN_HF_SPACE and 'spaces' in globals() and GPU_AVAILABLE:
|
312 |
try:
|
313 |
@spaces.GPU
|
|
|
330 |
# 装飾子がエラーの場合、非装飾子版を直接使用
|
331 |
def initialize_models():
|
332 |
return load_models()
|
333 |
+
else:
|
334 |
+
# ローカル環境用の関数定義
|
335 |
+
def initialize_models():
|
336 |
+
return load_models()
|
337 |
|
338 |
|
339 |
# 以下の関数内部でモデルの取得を遅延させる
|
|
|
430 |
|
431 |
|
432 |
@torch.no_grad()
|
433 |
+
def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, rotation_direction):
|
434 |
global last_update_time
|
435 |
last_update_time = time.time()
|
436 |
|
437 |
+
# 回転方向の処理
|
438 |
+
is_counterclockwise = "counter" in rotation_direction.lower()
|
439 |
+
if is_counterclockwise:
|
440 |
+
print("反時計回りモードが選択されました。画像を左右反転して処理します。")
|
441 |
+
# 画像を水平方向に反転
|
442 |
+
input_image = np.fliplr(input_image).copy()
|
443 |
+
|
444 |
# 動画の長さを5秒以下に制限
|
445 |
total_second_length = min(total_second_length, 3.0)
|
446 |
|
|
|
575 |
load_model_as_complete(vae, target_device=device)
|
576 |
|
577 |
start_latent = vae_encode(input_image_pt, vae)
|
578 |
+
|
579 |
except Exception as e:
|
580 |
+
error_msg = f"VAEエンコーディング中にエラーが発生しました: {e}"
|
581 |
+
print(error_msg)
|
582 |
+
traceback.print_exc()
|
583 |
+
stream.output_queue.push(('error', error_msg))
|
584 |
+
stream.output_queue.push(('end', None))
|
585 |
+
return
|
586 |
|
587 |
# CLIP Vision
|
588 |
last_update_time = time.time()
|
|
|
657 |
try:
|
658 |
output_filename = os.path.join(outputs_folder, f'{job_id}_final_{total_generated_latent_frames}.mp4')
|
659 |
save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=18)
|
660 |
+
|
661 |
+
# 反時計回りモードの場合、最終出力も反転
|
662 |
+
if is_counterclockwise:
|
663 |
+
print("反時計回りモード: 最終出力動画を水平方向に反転します")
|
664 |
+
flip_result = flip_video_horizontally(output_filename, output_filename)
|
665 |
+
if not flip_result:
|
666 |
+
print("警告: 最終動画の反転に失敗しました。元の動画を使用します。")
|
667 |
+
|
668 |
stream.output_queue.push(('file', output_filename))
|
669 |
except Exception as e:
|
670 |
print(f"最終動画保存中にエラーが発生しました: {e}")
|
|
|
893 |
save_start_time = time.time()
|
894 |
save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=18)
|
895 |
print(f"動画保存完了、所要時間: {time.time() - save_start_time:.2f}秒")
|
896 |
+
|
897 |
+
# 反時計回りモードの場合、FFmpegで反転処理
|
898 |
+
if is_counterclockwise:
|
899 |
+
print("反時計回りモード: 出力動画を水平方向に反転します")
|
900 |
+
flip_result = flip_video_horizontally(output_filename, output_filename)
|
901 |
+
if not flip_result:
|
902 |
+
print("警告: 動画の反転に失敗しました。元の動画を使用します。")
|
903 |
|
904 |
print(f'デコード完了。現在の潜在変数形状 {real_history_latents.shape}; ピクセル形状 {history_pixels.shape}')
|
905 |
|
|
|
960 |
return
|
961 |
|
962 |
|
963 |
+
# ローカル環境とHugging Face Spaces環境で処理関数を分岐
|
964 |
if IN_HF_SPACE and 'spaces' in globals():
|
965 |
@spaces.GPU
|
966 |
+
def process_with_gpu(input_image, prompt, n_prompt, seed, total_second_length, use_teacache, rotation_direction):
|
967 |
global stream
|
968 |
assert input_image is not None, '入力画像がありません!'
|
969 |
|
|
|
981 |
try:
|
982 |
stream = AsyncStream()
|
983 |
|
984 |
+
# ワーカーを非同期で起動(rotation_directionパラメータを追加)
|
985 |
+
async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, rotation_direction)
|
986 |
|
987 |
output_filename = None
|
988 |
prev_output_filename = None
|
|
|
1044 |
|
1045 |
process = process_with_gpu
|
1046 |
else:
|
1047 |
+
# ローカル環境用の処理関数
|
1048 |
+
def process(input_image, prompt, n_prompt, seed, total_second_length, use_teacache, rotation_direction):
|
1049 |
global stream
|
1050 |
assert input_image is not None, '入力画像がありません!'
|
1051 |
|
|
|
1062 |
try:
|
1063 |
stream = AsyncStream()
|
1064 |
|
1065 |
+
# ワーカーを非同期で起動(rotation_directionパラメータを追加)
|
1066 |
+
async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, rotation_direction)
|
1067 |
|
1068 |
output_filename = None
|
1069 |
prev_output_filename = None
|
|
|
1105 |
except Exception as e:
|
1106 |
print(f"出力処理中にエラーが発生しました: {e}")
|
1107 |
# 長時間更新がないか確認
|
1108 |
+
current_time = time.time()
|
1109 |
if current_time - last_update_time > 60: # 60秒間更新がない場合、処理がフリーズした可能性
|
1110 |
print(f"処理がフリーズした可能性があります。{current_time - last_update_time:.1f}秒間更新がありません")
|
1111 |
|
|
|
1156 |
|
1157 |
|
1158 |
quick_prompts = [
|
1159 |
+
'The camera smoothly orbits around the center of the scene, gradually pulling back while keeping the center point fixed in view.',
|
1160 |
]
|
1161 |
quick_prompts = [[x] for x in quick_prompts]
|
1162 |
|
|
|
1330 |
|
1331 |
prompt = gr.Textbox(
|
1332 |
label="プロンプト / Prompt",
|
1333 |
+
value= 'The camera smoothly orbits around the center of the scene, gradually pulling back while keeping the center point fixed in view.',
|
1334 |
elem_id="prompt-input"
|
1335 |
)
|
1336 |
|
|
|
1342 |
)
|
1343 |
example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)
|
1344 |
|
1345 |
+
|
1346 |
+
rotation_direction = gr.Dropdown(
|
1347 |
+
["時計回り / clockwise", "反時計回り / counterclockwise"],
|
1348 |
+
label="回転の向き / Direction of rotation",
|
1349 |
+
value="時計回り / clockwise" # デフォルト値
|
1350 |
+
)
|
1351 |
+
|
1352 |
+
|
1353 |
with gr.Row(elem_classes="button-container"):
|
1354 |
start_button = gr.Button(
|
1355 |
value="生成開始 / Generate",
|
|
|
1424 |
error_message = gr.HTML('', elem_id='error-message', visible=True)
|
1425 |
|
1426 |
# 処理関数
|
1427 |
+
ips = [input_image, prompt, n_prompt, seed, total_second_length, use_teacache, rotation_direction]
|
1428 |
|
1429 |
# 開始と終了ボタンのイベント
|
1430 |
start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
|
1431 |
end_button.click(fn=end_process)
|
1432 |
|
1433 |
|
1434 |
+
block.launch()
|
|