Yaron Koresh commited on
Commit
688d8e9
·
verified ·
1 Parent(s): 4afc319

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -6
app.py CHANGED
@@ -16,7 +16,7 @@ import threading
16
  import asyncio
17
  from queue import Queue as BlockingQueue
18
  from functools import partial
19
- from multiprocessing import Process, , Queue
20
 
21
  # external
22
 
@@ -27,7 +27,7 @@ from lxml.html import fromstring
27
  from diffusers.utils import export_to_gif, load_image
28
  from huggingface_hub import hf_hub_download
29
  from safetensors.torch import load_file, save_file
30
- from diffusers import DiffusionPipeline, AnimateDiffPipeline, MotionAdapter, EulerAncestralDiscreteScheduler, DDIMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL, UNet3DConditionModel
31
 
32
  # logging
33
 
@@ -64,12 +64,13 @@ result = []
64
 
65
  # precision data
66
 
67
- fast=True
 
68
  fps=10
69
  time=1
70
  width=896
71
  height=896
72
- step=25
73
  accu=7.5
74
 
75
  # ui data
@@ -115,7 +116,7 @@ function custom(){
115
  }
116
  """
117
 
118
- # torch pipe
119
 
120
  pipe = AnimateDiffPipeline.from_pretrained(base, motion_adapter=adapter).to(device)
121
  pipe.scheduler = DDIMScheduler(
@@ -130,6 +131,8 @@ pipe.scheduler = DDIMScheduler(
130
  pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin")
131
  pipe.enable_free_init(method="butterworth", use_fast_sampling=fast)
132
 
 
 
133
  # Parallelism
134
 
135
  def parallel(*pairs):
@@ -215,7 +218,18 @@ def handle_generate(*inp):
215
  last_motion = motion
216
 
217
  pipe.to(device,dtype=dtype)
218
-
 
 
 
 
 
 
 
 
 
 
 
219
  calc_out.append(
220
  pipe(
221
  prompt=p1,
 
16
  import asyncio
17
  from queue import Queue as BlockingQueue
18
  from functools import partial
19
+ from multiprocessing import Process, Queue
20
 
21
  # external
22
 
 
27
  from diffusers.utils import export_to_gif, load_image
28
  from huggingface_hub import hf_hub_download
29
  from safetensors.torch import load_file, save_file
30
+ from diffusers import FluxPipeline, DiffusionPipeline, AnimateDiffPipeline, MotionAdapter, EulerAncestralDiscreteScheduler, DDIMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL, UNet3DConditionModel
31
 
32
  # logging
33
 
 
64
 
65
  # precision data
66
 
67
+ seq=512
68
+ fast=False
69
  fps=10
70
  time=1
71
  width=896
72
  height=896
73
+ step=50
74
  accu=7.5
75
 
76
  # ui data
 
116
  }
117
  """
118
 
119
+ # torch pipes
120
 
121
  pipe = AnimateDiffPipeline.from_pretrained(base, motion_adapter=adapter).to(device)
122
  pipe.scheduler = DDIMScheduler(
 
131
  pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin")
132
  pipe.enable_free_init(method="butterworth", use_fast_sampling=fast)
133
 
134
+ pipe_flux = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to(device,dtype=dtype)
135
+
136
  # Parallelism
137
 
138
  def parallel(*pairs):
 
218
  last_motion = motion
219
 
220
  pipe.to(device,dtype=dtype)
221
+
222
+ if not img:
223
+ img = pipe(
224
+ prompt=p1,
225
+ height=height,
226
+ width=width,
227
+ guidance_scale=accu,
228
+ num_inference_steps=step,
229
+ max_sequence_length=seq,
230
+ generator=torch.Generator("cuda").manual_seed(0)
231
+ ).images[0]
232
+
233
  calc_out.append(
234
  pipe(
235
  prompt=p1,