Yaron Koresh commited on
Commit
843e793
·
verified ·
1 Parent(s): d849b8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -46
app.py CHANGED
@@ -119,9 +119,7 @@ def xpath_finder(str,pattern):
119
  except:
120
  return ""
121
 
122
- @gpu(string(string,string),device=True,inline=True)
123
  def translate(text,lang):
124
-
125
  if text == None or lang == None:
126
  return ""
127
  text = re.sub(f'[{string.punctuation}]', '', re.sub('[\s+]', ' ', text)).lower().strip()
@@ -157,13 +155,12 @@ def translate(text,lang):
157
  print(ret)
158
  return ret
159
 
160
- @gpu(string(int),device=True,inline=True)
161
  def generate_random_string(length):
162
  characters = str(string.ascii_letters + string.digits)
163
  return ''.join(random.choice(characters) for _ in range(length))
164
 
165
- @gpu(void(),device=True,inline=True)
166
- def Piper():
167
 
168
  global last_motion
169
  global ip_loaded
@@ -171,79 +168,72 @@ def Piper():
171
 
172
  x = grid(1)
173
 
174
- if last_motion != pinp["motion"]:
175
  pipe.unload_lora_weights()
176
- if pinp["motion"] != "":
177
- pipe.load_lora_weights(pinp["motion"], adapter_name="motion")
178
  pipe.fuse_lora()
179
  pipe.set_adapters(["motion"], [0.7])
180
- last_motion = pinp["motion"]
181
 
182
  pipe.to(device,dtype)
183
 
184
- if pinp["negative"]=="":
185
  out[x] = pipe(
186
- prompt=pinp["positive"],
187
  height=height,
188
  width=width,
189
- ip_adapter_image=pinp["image"].convert("RGB").resize((width,height)),
190
  num_inference_steps=step,
191
  guidance_scale=accu,
192
  num_frames=(fps*time)
193
  )
194
 
195
  out[x] = pipe(
196
- prompt=pinp["positive"],
197
- negative_prompt=pinp["negative"],
198
  height=height,
199
  width=width,
200
- ip_adapter_image=pinp["image"].convert("RGB").resize((width,height)),
201
  num_inference_steps=step,
202
  guidance_scale=accu,
203
  num_frames=(fps*time)
204
  )
205
 
206
- @gpu(void(),device=True,inline=True)
207
- def infer():
208
- global pinp
209
  global out
210
-
 
 
211
  out = [""]
212
  out.remove("")
213
-
214
- p1 = str(pm["p"])
215
-
216
- neg = str(pm["n"])
217
- if neg != "":
218
- neg = f"{neg} where in the image"
219
 
220
- _do = ['photographed', 'realistic', 'dynamic poze', 'deep field', 'reasonable', "natural", 'rough', 'best quality', 'focused', "highly detailed"]
221
- if p1 != "":
222
- _do.append(f"a new {p1} content in the image")
223
- posi = ", ".join(_do)
224
 
225
- if pm["i"] == None:
226
  return None
227
 
228
- pinp={"image":pm["i"],"positive":posi,"negative":neg,"motion":pm["m"]}
 
229
 
 
 
 
 
 
230
  ln = len(result)
231
- Piper[ln,32]()
 
 
232
  for i in range(ln):
233
  name = generate_random_string[1,32](12)+".png"
234
  export_to_gif(out[i].frames[0],name,fps=fps)
235
  out[i] = name
236
-
237
- @cpu(string[:](),cache=True,parallel=True)
238
- def handle():
239
- global pm
240
- p1_en = translate[1,32](p1,"english")
241
- p2_en = translate[1,32](p2,"english")
242
- pm = {"p":p1_en,"n":p2_en,"m":m,"i":i}
243
- infer[1,32]()
244
  return out
245
 
246
- @gpu(void(),device=True,inline=True)
247
  def ui():
248
  with gr.Blocks(theme=gr.themes.Soft(),css=css,js=js) as demo:
249
  with gr.Column(elem_id="col-container"):
@@ -299,7 +289,7 @@ def ui():
299
  result.append(gr.Image(interactive=False,elem_classes="image-container", label="Result", show_label=False, type='filepath', show_share_button=False))
300
  demo.queue().launch()
301
 
302
- @gpu(void(),device=True,inline=True)
303
  def pre():
304
  global pipe
305
  pipe = AnimateDiffPipeline.from_pretrained(base, vae=vae, motion_adapter=adapter, torch_dtype=dtype).to(device)
@@ -316,7 +306,7 @@ def pre():
316
  pipe.enable_vae_slicing()
317
  pipe.enable_free_init(method="butterworth", use_fast_sampling=fast)
318
 
319
- @gpu(void(),device=True,inline=True)
320
  def events():
321
  gr.on(
322
  triggers=[
@@ -325,18 +315,18 @@ def events():
325
  prompt2.submit
326
  ],
327
  fn=handle,
 
328
  output=result
329
  )
330
 
331
- @cpu(void(),cache=True,parallel=True)
332
  def entry():
333
  os.chdir(os.path.abspath(os.path.dirname(__file__)))
334
  pre[1,32]()
335
- ui[1,32]()
336
  events[1,32]()
337
 
338
  # entry
339
 
340
- entry[1,32]()
341
 
342
  # end
 
119
  except:
120
  return ""
121
 
 
122
  def translate(text,lang):
 
123
  if text == None or lang == None:
124
  return ""
125
  text = re.sub(f'[{string.punctuation}]', '', re.sub('[\s+]', ' ', text)).lower().strip()
 
155
  print(ret)
156
  return ret
157
 
 
158
  def generate_random_string(length):
159
  characters = str(string.ascii_letters + string.digits)
160
  return ''.join(random.choice(characters) for _ in range(length))
161
 
162
+ @gpu(void())
163
+ def calc():
164
 
165
  global last_motion
166
  global ip_loaded
 
168
 
169
  x = grid(1)
170
 
171
+ if last_motion != inp["motion"]:
172
  pipe.unload_lora_weights()
173
+ if inp["motion"] != "":
174
+ pipe.load_lora_weights(inp["motion"], adapter_name="motion")
175
  pipe.fuse_lora()
176
  pipe.set_adapters(["motion"], [0.7])
177
+ last_motion = inp["motion"]
178
 
179
  pipe.to(device,dtype)
180
 
181
+ if inp["negative"]=="":
182
  out[x] = pipe(
183
+ prompt=inp["positive"],
184
  height=height,
185
  width=width,
186
+ ip_adapter_image=inp["image"].convert("RGB").resize((width,height)),
187
  num_inference_steps=step,
188
  guidance_scale=accu,
189
  num_frames=(fps*time)
190
  )
191
 
192
  out[x] = pipe(
193
+ prompt=inp["positive"],
194
+ negative_prompt=inp["negative"],
195
  height=height,
196
  width=width,
197
+ ip_adapter_image=inp["image"].convert("RGB").resize((width,height)),
198
  num_inference_steps=step,
199
  guidance_scale=accu,
200
  num_frames=(fps*time)
201
  )
202
 
203
+ def handle(*args):
204
+ global inp
 
205
  global out
206
+
207
+ inp = args
208
+
209
  out = [""]
210
  out.remove("")
 
 
 
 
 
 
211
 
212
+ inp[1] = translate(inp[1],"english")
213
+ inp[2] = translate(inp[2],"english")
 
 
214
 
215
+ if inp[0] == None:
216
  return None
217
 
218
+ if inp[2] != "":
219
+ inp[2] = f"{inp[2]} where in the image"
220
 
221
+ _do = ['photographed', 'realistic', 'dynamic poze', 'deep field', 'reasonable', "natural", 'rough', 'best quality', 'focused', "highly detailed"]
222
+ if inp[1] != "":
223
+ _do.append(f"a new {inp[1]} content in the image")
224
+ inp[1] = ", ".join(_do)
225
+
226
  ln = len(result)
227
+
228
+ calc[ln,32]()
229
+
230
  for i in range(ln):
231
  name = generate_random_string[1,32](12)+".png"
232
  export_to_gif(out[i].frames[0],name,fps=fps)
233
  out[i] = name
234
+
 
 
 
 
 
 
 
235
  return out
236
 
 
237
  def ui():
238
  with gr.Blocks(theme=gr.themes.Soft(),css=css,js=js) as demo:
239
  with gr.Column(elem_id="col-container"):
 
289
  result.append(gr.Image(interactive=False,elem_classes="image-container", label="Result", show_label=False, type='filepath', show_share_button=False))
290
  demo.queue().launch()
291
 
292
+ @gpu(void())
293
  def pre():
294
  global pipe
295
  pipe = AnimateDiffPipeline.from_pretrained(base, vae=vae, motion_adapter=adapter, torch_dtype=dtype).to(device)
 
306
  pipe.enable_vae_slicing()
307
  pipe.enable_free_init(method="butterworth", use_fast_sampling=fast)
308
 
309
+ @gpu(void())
310
  def events():
311
  gr.on(
312
  triggers=[
 
315
  prompt2.submit
316
  ],
317
  fn=handle,
318
+ input=[img,prompt,prompt2,motion]
319
  output=result
320
  )
321
 
 
322
  def entry():
323
  os.chdir(os.path.abspath(os.path.dirname(__file__)))
324
  pre[1,32]()
325
+ ui()
326
  events[1,32]()
327
 
328
  # entry
329
 
330
+ entry()
331
 
332
  # end