Spaces:
Running
on
Zero
Running
on
Zero
bugfix
Browse files
app.py
CHANGED
@@ -201,7 +201,7 @@ def process_motion_transfer(source, prompt, mt_repaint_option, mt_repaint_image)
|
|
201 |
# 保存上传的文件
|
202 |
input_video_path = save_uploaded_file(source)
|
203 |
if input_video_path is None:
|
204 |
-
return None, None, None
|
205 |
|
206 |
print(f"DEBUG: Repaint option: {mt_repaint_option}")
|
207 |
print(f"DEBUG: Repaint image: {mt_repaint_image}")
|
@@ -255,8 +255,7 @@ def process_motion_transfer(source, prompt, mt_repaint_option, mt_repaint_image)
|
|
255 |
pred_tracks, pred_visibility = generate_tracking_cotracker(video_tensor)
|
256 |
tracking_path, tracking_tensor = das.visualize_tracking_cotracker(pred_tracks, pred_visibility)
|
257 |
print('Export tracking video via cotracker')
|
258 |
-
|
259 |
-
# 返回处理结果,但不应用跟踪
|
260 |
return tracking_path, video_tensor, tracking_tensor, repaint_img_tensor, fps
|
261 |
except Exception as e:
|
262 |
import traceback
|
@@ -483,9 +482,10 @@ with gr.Blocks(title="Diffusion as Shader") as demo:
|
|
483 |
output_video = gr.Video(label="Generated Video")
|
484 |
|
485 |
with left_column:
|
486 |
-
|
487 |
gr.Markdown("Upload a video or image, We will extract the motion and space structure from it")
|
488 |
source_preview = gr.Video(label="Source Preview")
|
|
|
489 |
|
490 |
def update_source_preview(file):
|
491 |
if file is None:
|
@@ -502,21 +502,23 @@ with gr.Blocks(title="Diffusion as Shader") as demo:
|
|
502 |
gr.Markdown("### 2. Enter the prompt")
|
503 |
common_prompt = gr.Textbox(label="Describe the scene and the motion you want to create: ", lines=2)
|
504 |
|
|
|
505 |
with gr.Tabs() as task_tabs:
|
506 |
# Motion Transfer tab
|
507 |
with gr.TabItem("Motion Transfer"):
|
508 |
-
gr.Markdown("### Motion Transfer")
|
509 |
|
|
|
|
|
510 |
# Simplified controls - Radio buttons for Yes/No and separate file upload
|
511 |
with gr.Row():
|
512 |
mt_repaint_option = gr.Radio(
|
513 |
-
label="Repaint First Frame",
|
514 |
choices=["No", "Yes"],
|
515 |
value="No"
|
516 |
)
|
517 |
-
gr.Markdown("
|
518 |
|
519 |
-
mt_repaint_upload = gr.UploadButton("
|
520 |
mt_repaint_preview = gr.Image(label="Repaint Image Preview")
|
521 |
|
522 |
# 上传文件后更新预览
|
@@ -550,6 +552,8 @@ with gr.Blocks(title="Diffusion as Shader") as demo:
|
|
550 |
gr.Markdown("Object Manipulation is not available in Huggingface Space, please deploy our [GitHub project](https://github.com/IGL-HKUST/DiffusionAsShader) on your own machine")
|
551 |
|
552 |
examples_list = load_examples()
|
|
|
|
|
553 |
if examples_list:
|
554 |
with gr.Blocks() as examples_block:
|
555 |
gr.Examples(
|
|
|
201 |
# 保存上传的文件
|
202 |
input_video_path = save_uploaded_file(source)
|
203 |
if input_video_path is None:
|
204 |
+
return None, None, None, None, None
|
205 |
|
206 |
print(f"DEBUG: Repaint option: {mt_repaint_option}")
|
207 |
print(f"DEBUG: Repaint image: {mt_repaint_image}")
|
|
|
255 |
pred_tracks, pred_visibility = generate_tracking_cotracker(video_tensor)
|
256 |
tracking_path, tracking_tensor = das.visualize_tracking_cotracker(pred_tracks, pred_visibility)
|
257 |
print('Export tracking video via cotracker')
|
258 |
+
|
|
|
259 |
return tracking_path, video_tensor, tracking_tensor, repaint_img_tensor, fps
|
260 |
except Exception as e:
|
261 |
import traceback
|
|
|
482 |
output_video = gr.Video(label="Generated Video")
|
483 |
|
484 |
with left_column:
|
485 |
+
gr.Markdown("### 1. Upload Source")
|
486 |
gr.Markdown("Upload a video or image, We will extract the motion and space structure from it")
|
487 |
source_preview = gr.Video(label="Source Preview")
|
488 |
+
source_upload = gr.UploadButton("Upload Source", file_types=["image", "video"])
|
489 |
|
490 |
def update_source_preview(file):
|
491 |
if file is None:
|
|
|
502 |
gr.Markdown("### 2. Enter the prompt")
|
503 |
common_prompt = gr.Textbox(label="Describe the scene and the motion you want to create: ", lines=2)
|
504 |
|
505 |
+
gr.Markdown("### 3. Select a task")
|
506 |
with gr.Tabs() as task_tabs:
|
507 |
# Motion Transfer tab
|
508 |
with gr.TabItem("Motion Transfer"):
|
|
|
509 |
|
510 |
+
gr.Markdown("#### 3.1 Process the first frame of Source")
|
511 |
+
gr.Markdown("DaS can produce novel videos while maintaining the features of the first frame and all the motion of the Source. You can use FLUX.1 to repaint the first frame of the Source")
|
512 |
# Simplified controls - Radio buttons for Yes/No and separate file upload
|
513 |
with gr.Row():
|
514 |
mt_repaint_option = gr.Radio(
|
515 |
+
label="Repaint First Frame (Optional)",
|
516 |
choices=["No", "Yes"],
|
517 |
value="No"
|
518 |
)
|
519 |
+
gr.Markdown("Or if you want to use your own image as repainted first frame, please upload the image in below.")
|
520 |
|
521 |
+
mt_repaint_upload = gr.UploadButton("Upload Repaint Image (Optional)", file_types=["image"])
|
522 |
mt_repaint_preview = gr.Image(label="Repaint Image Preview")
|
523 |
|
524 |
# 上传文件后更新预览
|
|
|
552 |
gr.Markdown("Object Manipulation is not available in Huggingface Space, please deploy our [GitHub project](https://github.com/IGL-HKUST/DiffusionAsShader) on your own machine")
|
553 |
|
554 |
examples_list = load_examples()
|
555 |
+
gr.Markdown("### Examples (For Workflow Demo Only)")
|
556 |
+
gr.Markdown("The following examples are only for demonstrating DaS's workflow and output quality. If you want to actually generate tracking or videos, the program will not run unless you manually upload files from your devices.")
|
557 |
if examples_list:
|
558 |
with gr.Blocks() as examples_block:
|
559 |
gr.Examples(
|