1inkusFace commited on
Commit
cb5e540
·
verified ·
1 Parent(s): 83d0be8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -12
app.py CHANGED
@@ -128,10 +128,10 @@ def infer_60(
128
  progress=gr.Progress(track_tqdm=True),
129
  ):
130
  pipe.vae=vaeX.to('cpu')
131
- pipe.transformer=ll_transformer
132
- pipe.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
133
- pipe.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
134
- pipe.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
135
  seed = random.randint(0, MAX_SEED)
136
  generator = torch.Generator(device='cuda').manual_seed(seed)
137
  print('-- generating image --')
@@ -178,10 +178,10 @@ def infer_90(
178
  progress=gr.Progress(track_tqdm=True),
179
  ):
180
  pipe.vae=vaeX.to('cpu')
181
- pipe.transformer=ll_transformer
182
- pipe.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
183
- pipe.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
184
- pipe.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
185
  seed = random.randint(0, MAX_SEED)
186
  generator = torch.Generator(device='cuda').manual_seed(seed)
187
  print('-- generating image --')
@@ -228,10 +228,10 @@ def infer_110(
228
  progress=gr.Progress(track_tqdm=True),
229
  ):
230
  pipe.vae=vaeX.to('cpu')
231
- pipe.transformer=ll_transformer
232
- pipe.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
233
- pipe.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
234
- pipe.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
235
  seed = random.randint(0, MAX_SEED)
236
  generator = torch.Generator(device='cuda').manual_seed(seed)
237
  print('-- generating image --')
 
128
  progress=gr.Progress(track_tqdm=True),
129
  ):
130
  pipe.vae=vaeX.to('cpu')
131
+ pipe.config.transformer=ll_transformer
132
+ pipe.config.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
133
+ pipe.config.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
134
+ pipe.config.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
135
  seed = random.randint(0, MAX_SEED)
136
  generator = torch.Generator(device='cuda').manual_seed(seed)
137
  print('-- generating image --')
 
178
  progress=gr.Progress(track_tqdm=True),
179
  ):
180
  pipe.vae=vaeX.to('cpu')
181
+ pipe.config.transformer=ll_transformer
182
+ pipe.config.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
183
+ pipe.config.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
184
+ pipe.config.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
185
  seed = random.randint(0, MAX_SEED)
186
  generator = torch.Generator(device='cuda').manual_seed(seed)
187
  print('-- generating image --')
 
228
  progress=gr.Progress(track_tqdm=True),
229
  ):
230
  pipe.vae=vaeX.to('cpu')
231
+ pipe.config.transformer=ll_transformer
232
+ pipe.config.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
233
+ pipe.config.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
234
+ pipe.config.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
235
  seed = random.randint(0, MAX_SEED)
236
  generator = torch.Generator(device='cuda').manual_seed(seed)
237
  print('-- generating image --')