Yaron Koresh commited on
Commit
8a2ea7d
·
verified ·
1 Parent(s): 8a757aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -39
app.py CHANGED
@@ -20,6 +20,7 @@ from multiprocessing import Process, Queue
20
 
21
  # external
22
 
 
23
  import torch
24
  import gradio as gr
25
  from numpy import asarray as array
@@ -200,48 +201,50 @@ def generate_random_string(length):
200
  characters = str(ascii_letters + digits)
201
  return ''.join(random.choice(characters) for _ in range(length))
202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  def handle_generate(*inp):
204
 
205
  calc_out = []
206
 
207
- def calc(img,p1,p2,motion):
208
- global last_motion
209
- global pipe
210
-
211
- if last_motion != motion:
212
- if last_motion != "":
213
- pipe.unload_lora_weights()
214
- if motion != "":
215
- pipe.load_lora_weights(motion, adapter_name="motion")
216
- pipe.fuse_lora()
217
- pipe.set_adapters("motion", [0.7])
218
- last_motion = motion
219
-
220
- pipe.to(device,dtype=dtype)
221
-
222
- if not img:
223
- img = pipe(
224
- prompt=p1,
225
- height=height,
226
- width=width,
227
- guidance_scale=accu,
228
- num_inference_steps=step,
229
- max_sequence_length=seq,
230
- generator=torch.Generator("cuda").manual_seed(0)
231
- ).images[0]
232
-
233
- calc_out.append(
234
- pipe(
235
- prompt=p1,
236
- negative_prompt=p2,
237
- height=height,
238
- width=width,
239
- ip_adapter_image=img.convert("RGB"),
240
- num_inference_steps=step,
241
- guidance_scale=accu,
242
- num_frames=(fps*time)
243
- )
244
- )
245
 
246
  inp = list(inp)
247
 
@@ -319,7 +322,6 @@ def ui():
319
  run_button = gr.Button("START",elem_classes="btn",scale=0)
320
  with gr.Row():
321
  result.append(gr.Image(interactive=False,elem_classes="image-container", label="Result", show_label=False, type='filepath', show_share_button=False))
322
- result.append(gr.Image(interactive=False,elem_classes="image-container", label="Result", show_label=False, type='filepath', show_share_button=False))
323
 
324
  gr.on(
325
  triggers=[
 
20
 
21
  # external
22
 
23
+ import spaces
24
  import torch
25
  import gradio as gr
26
  from numpy import asarray as array
 
201
  characters = str(ascii_letters + digits)
202
  return ''.join(random.choice(characters) for _ in range(length))
203
 
204
+ @spaces.GPU(duration=140)
205
+ def pipe_generate(img,p1,p2,motion):
206
+ global last_motion
207
+ global pipe
208
+
209
+ if last_motion != motion:
210
+ if last_motion != "":
211
+ pipe.unload_lora_weights()
212
+ if motion != "":
213
+ pipe.load_lora_weights(motion, adapter_name="motion")
214
+ pipe.fuse_lora()
215
+ pipe.set_adapters("motion", [0.7])
216
+ last_motion = motion
217
+
218
+ pipe.to(device,dtype=dtype)
219
+
220
+ if not img:
221
+ img = pipe(
222
+ prompt=p1,
223
+ height=height,
224
+ width=width,
225
+ guidance_scale=accu,
226
+ num_inference_steps=step,
227
+ max_sequence_length=seq,
228
+ generator=torch.Generator("cuda").manual_seed(0)
229
+ ).images[0]
230
+
231
+ return pipe(
232
+ prompt=p1,
233
+ negative_prompt=p2,
234
+ height=height,
235
+ width=width,
236
+ ip_adapter_image=img.convert("RGB"),
237
+ num_inference_steps=step,
238
+ guidance_scale=accu,
239
+ num_frames=(fps*time)
240
+ )
241
+
242
  def handle_generate(*inp):
243
 
244
  calc_out = []
245
 
246
+ def calc(*args):
247
+ calc_out.append(pipe_generate(*args))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
 
249
  inp = list(inp)
250
 
 
322
  run_button = gr.Button("START",elem_classes="btn",scale=0)
323
  with gr.Row():
324
  result.append(gr.Image(interactive=False,elem_classes="image-container", label="Result", show_label=False, type='filepath', show_share_button=False))
 
325
 
326
  gr.on(
327
  triggers=[