ehristoforu commited on
Commit
fc6e54f
·
verified ·
1 Parent(s): aa73c1e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -28
app.py CHANGED
@@ -9,12 +9,15 @@ import numpy as np
9
  from PIL import Image
10
  import spaces
11
  import torch
12
- from diffusers import StableDiffusionXLPipeline, KDPM2AncestralDiscreteScheduler, AutoencoderKL
13
-
 
 
 
14
  DESCRIPTION = """
15
- # Proteus ```V0.3```
16
 
17
- Model by [dataautogpt3](https://huggingface.co/dataautogpt3)
18
 
19
  Demo by [ehristoforu](https://huggingface.co/ehristoforu)
20
  """
@@ -29,27 +32,19 @@ ENABLE_CPU_OFFLOAD = 0
29
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
30
 
31
 
32
- if torch.cuda.is_available():
33
- pipe = StableDiffusionXLPipeline.from_pretrained(
34
- "dataautogpt3/ProteusV0.3",
35
- use_safetensors=False,
36
- )
37
- if ENABLE_CPU_OFFLOAD:
38
- pipe.enable_model_cpu_offload()
39
- else:
40
- vae = AutoencoderKL.from_pretrained(
41
- "madebyollin/sdxl-vae-fp16-fix",
42
- torch_dtype=torch.float16
43
- )
44
- pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
45
- pipe.to(device)
46
- print("Loaded on Device!")
47
- pipe.load_lora_weights("stabilityai/stable-diffusion-xl-base-1.0", weight_name="sd_xl_offset_example-lora_1.0.safetensors")
48
- pipe.fuse_lora(lora_scale=0.1)
49
- if USE_TORCH_COMPILE:
50
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
51
- print("Model Compiled!")
52
 
 
 
 
 
 
 
 
 
53
 
54
  def save_image(img):
55
  unique_name = str(uuid.uuid4()) + ".png"
@@ -87,7 +82,7 @@ def generate(
87
  width=width,
88
  height=height,
89
  guidance_scale=guidance_scale,
90
- num_inference_steps=35,
91
  num_images_per_prompt=1,
92
  output_type="pil",
93
  ).images
@@ -115,7 +110,7 @@ footer {
115
  visibility: hidden
116
  }
117
  '''
118
- with gr.Blocks(title="Proteus V0.3", css=css) as demo:
119
  gr.Markdown(DESCRIPTION)
120
  gr.DuplicateButton(
121
  value="Duplicate Space for private use",
@@ -157,14 +152,14 @@ with gr.Blocks(title="Proteus V0.3", css=css) as demo:
157
  minimum=512,
158
  maximum=1536,
159
  step=8,
160
- value=768,
161
  )
162
  height = gr.Slider(
163
  label="Height",
164
  minimum=512,
165
  maximum=1536,
166
  step=8,
167
- value=768,
168
  )
169
  with gr.Row():
170
  guidance_scale = gr.Slider(
 
9
  from PIL import Image
10
  import spaces
11
  import torch
12
+ from diffusers import (
13
+ StableDiffusionXLPipeline,
14
+ KDPM2AncestralDiscreteScheduler,
15
+ AutoencoderKL
16
+ )
17
  DESCRIPTION = """
18
+ # Mobius
19
 
20
+ Model by [Corcel.io](https://huggingface.co/Corcelio/mobius)
21
 
22
  Demo by [ehristoforu](https://huggingface.co/ehristoforu)
23
  """
 
32
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
33
 
34
 
35
+ vae = AutoencoderKL.from_pretrained(
36
+ "madebyollin/sdxl-vae-fp16-fix",
37
+ torch_dtype=torch.float16
38
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ # Configure the pipeline
41
+ pipe = StableDiffusionXLPipeline.from_pretrained(
42
+ "Corcelio/mobius",
43
+ vae=vae,
44
+ torch_dtype=torch.float16
45
+ )
46
+ pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
47
+ pipe.to('cuda')
48
 
49
  def save_image(img):
50
  unique_name = str(uuid.uuid4()) + ".png"
 
82
  width=width,
83
  height=height,
84
  guidance_scale=guidance_scale,
85
+ num_inference_steps=25,
86
  num_images_per_prompt=1,
87
  output_type="pil",
88
  ).images
 
110
  visibility: hidden
111
  }
112
  '''
113
+ with gr.Blocks(title="Mobius", css=css) as demo:
114
  gr.Markdown(DESCRIPTION)
115
  gr.DuplicateButton(
116
  value="Duplicate Space for private use",
 
152
  minimum=512,
153
  maximum=1536,
154
  step=8,
155
+ value=1024,
156
  )
157
  height = gr.Slider(
158
  label="Height",
159
  minimum=512,
160
  maximum=1536,
161
  step=8,
162
+ value=1024,
163
  )
164
  with gr.Row():
165
  guidance_scale = gr.Slider(