Spaces:
Running
Running
Yaron Koresh
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -39,7 +39,7 @@ import gradio as gr
|
|
39 |
from lxml.html import fromstring
|
40 |
from huggingface_hub import hf_hub_download
|
41 |
from safetensors.torch import load_file, save_file
|
42 |
-
from diffusers import
|
43 |
from PIL import Image, ImageDraw, ImageFont
|
44 |
from transformers import pipeline, T5ForConditionalGeneration, T5Tokenizer, CLIPTextModel, CLIPTokenizer, T5EncoderModel
|
45 |
from refiners.fluxion.utils import manual_seed
|
@@ -450,9 +450,9 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
450 |
|
451 |
# precision data
|
452 |
|
453 |
-
seq=
|
454 |
-
image_steps=
|
455 |
-
img_accu=
|
456 |
|
457 |
# ui data
|
458 |
|
@@ -515,7 +515,7 @@ function custom(){
|
|
515 |
|
516 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
517 |
#good_vae = AutoencoderKL.from_pretrained("ostris/Flex.1-alpha", subfolder="vae", torch_dtype=dtype).to(device)
|
518 |
-
image_pipe =
|
519 |
#image_pipe.enable_model_cpu_offload()
|
520 |
|
521 |
torch.cuda.empty_cache()
|
|
|
39 |
from lxml.html import fromstring
|
40 |
from huggingface_hub import hf_hub_download
|
41 |
from safetensors.torch import load_file, save_file
|
42 |
+
from diffusers import FluxPipeline, AutoencoderTiny, AutoencoderKL, FluxPipeline, FlowMatchEulerDiscreteScheduler
|
43 |
from PIL import Image, ImageDraw, ImageFont
|
44 |
from transformers import pipeline, T5ForConditionalGeneration, T5Tokenizer, CLIPTextModel, CLIPTokenizer, T5EncoderModel
|
45 |
from refiners.fluxion.utils import manual_seed
|
|
|
450 |
|
451 |
# precision data
|
452 |
|
453 |
+
seq=256
|
454 |
+
image_steps=8
|
455 |
+
img_accu=0
|
456 |
|
457 |
# ui data
|
458 |
|
|
|
515 |
|
516 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
517 |
#good_vae = AutoencoderKL.from_pretrained("ostris/Flex.1-alpha", subfolder="vae", torch_dtype=dtype).to(device)
|
518 |
+
image_pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype, vae=taef1).to(device) # ostris/Flex.1-alpha
|
519 |
#image_pipe.enable_model_cpu_offload()
|
520 |
|
521 |
torch.cuda.empty_cache()
|