Yaron Koresh commited on
Commit
3a6781f
·
verified ·
1 Parent(s): 205077f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -9
app.py CHANGED
@@ -14,11 +14,9 @@ import warnings
14
  import time
15
  import asyncio
16
  from functools import partial
17
- from concurrent.futures import ProcessPoolExecutor as Pool
18
 
19
  # external
20
 
21
- import spaces
22
  import torch
23
  import gradio as gr
24
  from numpy import asarray as array
@@ -64,10 +62,10 @@ result = []
64
  seq=512
65
  fast=True
66
  fps=20
67
- time=1
68
  width=896
69
  height=896
70
- step=40
71
  accu=8.5
72
 
73
  # ui data
@@ -177,7 +175,6 @@ def generate_random_string(length):
177
  characters = str(ascii_letters + digits)
178
  return ''.join(random.choice(characters) for _ in range(length))
179
 
180
- @spaces.GPU(duration=180)
181
  def pipe_generate(img,p1,p2,motion):
182
  global last_motion
183
  global pipe
@@ -201,7 +198,7 @@ def pipe_generate(img,p1,p2,motion):
201
  guidance_scale=accu,
202
  num_inference_steps=step,
203
  max_sequence_length=seq,
204
- generator=torch.Generator("cuda").manual_seed(0)
205
  ).images[0]
206
 
207
  return pipe(
@@ -251,15 +248,15 @@ def ui():
251
  with gr.Row(elem_id="col-container"):
252
  with gr.Column():
253
  with gr.Row():
254
- img = gr.Image(label="STATIC PHOTO",show_label=True,container=True,type="pil")
255
  with gr.Row():
256
  prompt = gr.Textbox(
257
  elem_id="prompt",
258
- placeholder="PROMPT",
259
  container=False,
260
  max_lines=1
261
  )
262
- with gr.Row(visible=False):
263
  prompt2 = gr.Textbox(
264
  elem_id="prompt2",
265
  placeholder="EXCLUDE",
 
14
  import time
15
  import asyncio
16
  from functools import partial
 
17
 
18
  # external
19
 
 
20
  import torch
21
  import gradio as gr
22
  from numpy import asarray as array
 
62
  seq=512
63
  fast=True
64
  fps=20
65
+ time=3
66
  width=896
67
  height=896
68
+ step=50
69
  accu=8.5
70
 
71
  # ui data
 
175
  characters = str(ascii_letters + digits)
176
  return ''.join(random.choice(characters) for _ in range(length))
177
 
 
178
  def pipe_generate(img,p1,p2,motion):
179
  global last_motion
180
  global pipe
 
198
  guidance_scale=accu,
199
  num_inference_steps=step,
200
  max_sequence_length=seq,
201
+ generator=torch.Generator(device).manual_seed(0)
202
  ).images[0]
203
 
204
  return pipe(
 
248
  with gr.Row(elem_id="col-container"):
249
  with gr.Column():
250
  with gr.Row():
251
+ img = gr.Image(label="UPLOAD PHOTO",show_label=True,container=True,type="pil")
252
  with gr.Row():
253
  prompt = gr.Textbox(
254
  elem_id="prompt",
255
+ placeholder="INCLUDE",
256
  container=False,
257
  max_lines=1
258
  )
259
+ with gr.Row():
260
  prompt2 = gr.Textbox(
261
  elem_id="prompt2",
262
  placeholder="EXCLUDE",