Spaces:
Runtime error
Runtime error
change to GPU version
Browse files- README.md +1 -1
- app.py +11 -8
- requirements.txt +2 -1
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 💊
|
|
| 4 |
colorFrom: purple
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
|
|
|
| 4 |
colorFrom: purple
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 4.36.1
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
import torch
|
| 4 |
import cv2
|
|
@@ -113,6 +114,7 @@ def images_to_video(image_list, output_path, fps=10):
|
|
| 113 |
writer.close()
|
| 114 |
|
| 115 |
|
|
|
|
| 116 |
def NaRCan_make_video(edit_canonical, pth_path, frames_path):
|
| 117 |
# load NaRCan model
|
| 118 |
checkpoint_g_old = torch.load(os.path.join(pth_path, "homography_g.pth"))
|
|
@@ -190,6 +192,7 @@ def NaRCan_make_video(edit_canonical, pth_path, frames_path):
|
|
| 190 |
return edit_video_path
|
| 191 |
|
| 192 |
|
|
|
|
| 193 |
def edit_with_pnp(input_video, prompt, num_steps, guidance_scale, seed, n_prompt, control_type="Lineart"):
|
| 194 |
video_name = input_video.split('/')[-1]
|
| 195 |
if video_name in video_to_image:
|
|
@@ -201,9 +204,9 @@ def edit_with_pnp(input_video, prompt, num_steps, guidance_scale, seed, n_prompt
|
|
| 201 |
|
| 202 |
if control_type == "Lineart":
|
| 203 |
# Load the control net model for lineart
|
| 204 |
-
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_lineart")
|
| 205 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
| 206 |
-
"runwayml/stable-diffusion-v1-5", controlnet=controlnet
|
| 207 |
)
|
| 208 |
pipe.to(device)
|
| 209 |
# lineart
|
|
@@ -228,9 +231,9 @@ def edit_with_pnp(input_video, prompt, num_steps, guidance_scale, seed, n_prompt
|
|
| 228 |
|
| 229 |
else:
|
| 230 |
# Load the control net model for canny
|
| 231 |
-
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny")
|
| 232 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
| 233 |
-
"runwayml/stable-diffusion-v1-5", controlnet=controlnet
|
| 234 |
)
|
| 235 |
pipe.to(device)
|
| 236 |
# canny
|
|
@@ -285,10 +288,10 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 285 |
do_inversion = gr.State(value=True)
|
| 286 |
|
| 287 |
with gr.Row():
|
| 288 |
-
input_video = gr.Video(label="Input Video", interactive=False, elem_id="input_video", value='examples/bear.mp4')
|
| 289 |
-
output_video = gr.Video(label="Edited Video", interactive=False, elem_id="output_video")
|
| 290 |
-
input_video.style(height=365, width=365)
|
| 291 |
-
output_video.style(height=365, width=365)
|
| 292 |
|
| 293 |
|
| 294 |
with gr.Row():
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import spaces
|
| 3 |
import numpy as np
|
| 4 |
import torch
|
| 5 |
import cv2
|
|
|
|
| 114 |
writer.close()
|
| 115 |
|
| 116 |
|
| 117 |
+
@spaces.GPU
|
| 118 |
def NaRCan_make_video(edit_canonical, pth_path, frames_path):
|
| 119 |
# load NaRCan model
|
| 120 |
checkpoint_g_old = torch.load(os.path.join(pth_path, "homography_g.pth"))
|
|
|
|
| 192 |
return edit_video_path
|
| 193 |
|
| 194 |
|
| 195 |
+
@spaces.GPU
|
| 196 |
def edit_with_pnp(input_video, prompt, num_steps, guidance_scale, seed, n_prompt, control_type="Lineart"):
|
| 197 |
video_name = input_video.split('/')[-1]
|
| 198 |
if video_name in video_to_image:
|
|
|
|
| 204 |
|
| 205 |
if control_type == "Lineart":
|
| 206 |
# Load the control net model for lineart
|
| 207 |
+
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_lineart", torch_dtype=torch.float16)
|
| 208 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
| 209 |
+
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
| 210 |
)
|
| 211 |
pipe.to(device)
|
| 212 |
# lineart
|
|
|
|
| 231 |
|
| 232 |
else:
|
| 233 |
# Load the control net model for canny
|
| 234 |
+
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16)
|
| 235 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
| 236 |
+
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
| 237 |
)
|
| 238 |
pipe.to(device)
|
| 239 |
# canny
|
|
|
|
| 288 |
do_inversion = gr.State(value=True)
|
| 289 |
|
| 290 |
with gr.Row():
|
| 291 |
+
input_video = gr.Video(label="Input Video", interactive=False, elem_id="input_video", value='examples/bear.mp4', height=365, width=365)
|
| 292 |
+
output_video = gr.Video(label="Edited Video", interactive=False, elem_id="output_video", height=365, width=365)
|
| 293 |
+
# input_video.style(height=365, width=365)
|
| 294 |
+
# output_video.style(height=365, width=365)
|
| 295 |
|
| 296 |
|
| 297 |
with gr.Row():
|
requirements.txt
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
-
gradio
|
| 2 |
numpy
|
|
|
|
| 3 |
torch==2.3.1
|
| 4 |
opencv-python==4.10.0.82
|
| 5 |
imageio==2.34.1
|
|
|
|
| 1 |
+
gradio
|
| 2 |
numpy
|
| 3 |
+
spaces
|
| 4 |
torch==2.3.1
|
| 5 |
opencv-python==4.10.0.82
|
| 6 |
imageio==2.34.1
|