Falln87 commited on
Commit
ebebe58
·
verified ·
1 Parent(s): f08e6cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -1,18 +1,20 @@
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
- from diffusers import DiffusionPipeline
5
  import torch
6
 
 
 
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
9
  if torch.cuda.is_available():
10
  torch.cuda.max_memory_allocated(device=device)
11
- pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", token=hf_token, torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
  pipe.enable_xformers_memory_efficient_attention()
13
  pipe = pipe.to(device)
14
  else:
15
- pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", token=hf_token, use_safetensors=True)
16
  pipe = pipe.to(device)
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
+ from diffusers.models.modeling_outputs import Transformer2DModelOutput
5
  import torch
6
 
7
+
8
+
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
 
11
  if torch.cuda.is_available():
12
  torch.cuda.max_memory_allocated(device=device)
13
+ pipe = Transformer2DModelOutput("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
14
  pipe.enable_xformers_memory_efficient_attention()
15
  pipe = pipe.to(device)
16
  else:
17
+ pipe = Transformer2DModelOutput("stabilityai/stable-diffusion-3-medium-diffusers", use_safetensors=True)
18
  pipe = pipe.to(device)
19
 
20
  MAX_SEED = np.iinfo(np.int32).max