Update src/pipeline.py
Browse files- src/pipeline.py +4 -4
src/pipeline.py
CHANGED
@@ -56,10 +56,10 @@ def load_pipeline() -> Pipeline:
|
|
56 |
cache_dir, subfolder="transformer", torch_dtype=torch.bfloat16, quantization_config=config
|
57 |
)
|
58 |
text_encoder_2 = T5EncoderModel.from_pretrained(
|
59 |
-
|
60 |
)
|
61 |
text_encoder = CLIPTextModel.from_pretrained(
|
62 |
-
|
63 |
)
|
64 |
# vae=AutoencoderKL.from_pretrained(ckpt_id, subfolder="vae", torch_dtype=dtype)
|
65 |
pipeline = DiffusionPipeline.from_pretrained(
|
@@ -73,8 +73,8 @@ def load_pipeline() -> Pipeline:
|
|
73 |
torch.backends.cudnn.benchmark = True
|
74 |
torch.backends.cuda.matmul.allow_tf32 = True
|
75 |
torch.cuda.set_per_process_memory_fraction(0.95)
|
76 |
-
pipeline.text_encoder.to(memory_format=torch.channels_last)
|
77 |
-
pipeline.transformer.to(memory_format=torch.channels_last)
|
78 |
# torch.jit.enable_onednn_fusion(True)
|
79 |
|
80 |
|
|
|
56 |
cache_dir, subfolder="transformer", torch_dtype=torch.bfloat16, quantization_config=config
|
57 |
)
|
58 |
text_encoder_2 = T5EncoderModel.from_pretrained(
|
59 |
+
cache_dir, subfolder="text_encoder_2", torch_dtype=torch.bfloat16, quantization_config=config
|
60 |
)
|
61 |
text_encoder = CLIPTextModel.from_pretrained(
|
62 |
+
cache_dir, subfolder="text_encoder",torch_dtype=torch.bfloat16, quantization_config=config
|
63 |
)
|
64 |
# vae=AutoencoderKL.from_pretrained(ckpt_id, subfolder="vae", torch_dtype=dtype)
|
65 |
pipeline = DiffusionPipeline.from_pretrained(
|
|
|
73 |
torch.backends.cudnn.benchmark = True
|
74 |
torch.backends.cuda.matmul.allow_tf32 = True
|
75 |
torch.cuda.set_per_process_memory_fraction(0.95)
|
76 |
+
# pipeline.text_encoder.to(memory_format=torch.channels_last)
|
77 |
+
# pipeline.transformer.to(memory_format=torch.channels_last)
|
78 |
# torch.jit.enable_onednn_fusion(True)
|
79 |
|
80 |
|