Keshabwi66 commited on
Commit
122cc9f
·
verified ·
1 Parent(s): 1063931

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -55,7 +55,7 @@ def pil_to_tensor(images):
55
  args = Args()
56
 
57
  # Define the data type for model weights
58
- weight_dtype = torch.float16
59
 
60
  if args.seed is not None:
61
  set_seed(args.seed)
@@ -66,32 +66,32 @@ noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_pa
66
  vae = AutoencoderKL.from_pretrained(
67
  args.pretrained_model_name_or_path,
68
  subfolder="vae",
69
- torch_dtype=torch.float16,
70
  )
71
  unet = UNet2DConditionModel.from_pretrained(
72
  args.pretrained_model_name_or_path,
73
  subfolder="unet",
74
- torch_dtype=torch.float16,
75
  )
76
  image_encoder = CLIPVisionModelWithProjection.from_pretrained(
77
  args.pretrained_model_name_or_path,
78
  subfolder="image_encoder",
79
- torch_dtype=torch.float16,
80
  )
81
  unet_encoder = UNet2DConditionModel_ref.from_pretrained(
82
  args.pretrained_model_name_or_path,
83
  subfolder="unet_encoder",
84
- torch_dtype=torch.float16,
85
  )
86
  text_encoder_one = CLIPTextModel.from_pretrained(
87
  args.pretrained_model_name_or_path,
88
  subfolder="text_encoder",
89
- torch_dtype=torch.float16,
90
  )
91
  text_encoder_two = CLIPTextModelWithProjection.from_pretrained(
92
  args.pretrained_model_name_or_path,
93
  subfolder="text_encoder_2",
94
- torch_dtype=torch.float16,
95
  )
96
  tokenizer_one = AutoTokenizer.from_pretrained(
97
  args.pretrained_model_name_or_path,
@@ -128,7 +128,7 @@ pipe = TryonPipeline.from_pretrained(
128
  scheduler = noise_scheduler,
129
  image_encoder=image_encoder,
130
  unet_encoder = unet_encoder,
131
- torch_dtype=torch.float16,
132
  ).to(device)
133
  # pipe.enable_sequential_cpu_offload()
134
  # pipe.enable_model_cpu_offload()
 
55
  args = Args()
56
 
57
  # Define the data type for model weights
58
+ weight_dtype = torch.float32
59
 
60
  if args.seed is not None:
61
  set_seed(args.seed)
 
66
  vae = AutoencoderKL.from_pretrained(
67
  args.pretrained_model_name_or_path,
68
  subfolder="vae",
69
+ torch_dtype=torch.float32,
70
  )
71
  unet = UNet2DConditionModel.from_pretrained(
72
  args.pretrained_model_name_or_path,
73
  subfolder="unet",
74
+ torch_dtype=torch.float32,
75
  )
76
  image_encoder = CLIPVisionModelWithProjection.from_pretrained(
77
  args.pretrained_model_name_or_path,
78
  subfolder="image_encoder",
79
+ torch_dtype=torch.float32,
80
  )
81
  unet_encoder = UNet2DConditionModel_ref.from_pretrained(
82
  args.pretrained_model_name_or_path,
83
  subfolder="unet_encoder",
84
+ torch_dtype=torch.float32,
85
  )
86
  text_encoder_one = CLIPTextModel.from_pretrained(
87
  args.pretrained_model_name_or_path,
88
  subfolder="text_encoder",
89
+ torch_dtype=torch.float32,
90
  )
91
  text_encoder_two = CLIPTextModelWithProjection.from_pretrained(
92
  args.pretrained_model_name_or_path,
93
  subfolder="text_encoder_2",
94
+ torch_dtype=torch.float32,
95
  )
96
  tokenizer_one = AutoTokenizer.from_pretrained(
97
  args.pretrained_model_name_or_path,
 
128
  scheduler = noise_scheduler,
129
  image_encoder=image_encoder,
130
  unet_encoder = unet_encoder,
131
+ torch_dtype=torch.float32,
132
  ).to(device)
133
  # pipe.enable_sequential_cpu_offload()
134
  # pipe.enable_model_cpu_offload()