radames commited on
Commit
4a0c863
·
1 Parent(s): deda693
server/config.py CHANGED
@@ -16,8 +16,6 @@ class Args(BaseModel):
16
  pipeline: str
17
  ssl_certfile: str | None
18
  ssl_keyfile: str | None
19
- sfast: bool
20
- onediff: bool = False
21
  compel: bool = False
22
  debug: bool = False
23
  pruna: bool = False
@@ -112,18 +110,6 @@ parser.add_argument(
112
  default=False,
113
  help="Compel",
114
  )
115
- parser.add_argument(
116
- "--sfast",
117
- action="store_true",
118
- default=False,
119
- help="Enable Stable Fast",
120
- )
121
- parser.add_argument(
122
- "--onediff",
123
- action="store_true",
124
- default=False,
125
- help="Enable OneDiff",
126
- )
127
  parser.add_argument(
128
  "--pruna",
129
  action="store_true",
 
16
  pipeline: str
17
  ssl_certfile: str | None
18
  ssl_keyfile: str | None
 
 
19
  compel: bool = False
20
  debug: bool = False
21
  pruna: bool = False
 
110
  default=False,
111
  help="Compel",
112
  )
 
 
 
 
 
 
 
 
 
 
 
 
113
  parser.add_argument(
114
  "--pruna",
115
  action="store_true",
server/main.py CHANGED
@@ -5,7 +5,6 @@ from fastapi.middleware.cors import CORSMiddleware
5
  from fastapi.staticfiles import StaticFiles
6
  from fastapi import Request
7
  import markdown2
8
- from pipelines.utils.safety_checker import SafetyChecker
9
  from PIL import Image
10
  import logging
11
  from config import config, Args
@@ -53,8 +52,10 @@ class App:
53
  self.pipeline = pipeline_instance
54
  self.app = FastAPI()
55
  self.conn_manager = ConnectionManager()
56
- self.safety_checker: SafetyChecker | None = None
57
  if self.args.safety_checker:
 
 
58
  self.safety_checker = SafetyChecker(device=device.type)
59
  self.init_app()
60
 
@@ -80,9 +81,13 @@ class App:
80
  # Handle websocket disconnection event
81
  code = disconnect_error.code
82
  if code == 1006: # ABNORMAL_CLOSURE
83
- logging.info(f"WebSocket abnormally closed for user {user_id}: Connection was closed without a proper close handshake")
 
 
84
  else:
85
- logging.info(f"WebSocket disconnected for user {user_id} with code {code}: {disconnect_error.reason}")
 
 
86
  except RuntimeError as e:
87
  if any(err in str(e) for err in ERROR_MESSAGES):
88
  logging.info(f"WebSocket disconnected for user {user_id}: {e}")
@@ -268,10 +273,14 @@ class App:
268
  # Handle websocket disconnection event
269
  code = disconnect_error.code
270
  if code == 1006: # ABNORMAL_CLOSURE
271
- logging.info(f"WebSocket abnormally closed during streaming for user {user_id}: Connection was closed without a proper close handshake")
 
 
272
  else:
273
- logging.info(f"WebSocket disconnected during streaming for user {user_id} with code {code}: {disconnect_error.reason}")
274
-
 
 
275
  # Clean disconnection without error response
276
  await self.conn_manager.disconnect(user_id)
277
  raise HTTPException(status_code=204, detail="Connection closed")
 
5
  from fastapi.staticfiles import StaticFiles
6
  from fastapi import Request
7
  import markdown2
 
8
  from PIL import Image
9
  import logging
10
  from config import config, Args
 
52
  self.pipeline = pipeline_instance
53
  self.app = FastAPI()
54
  self.conn_manager = ConnectionManager()
55
+ self.safety_checker = None
56
  if self.args.safety_checker:
57
+ from pipelines.utils.safety_checker import SafetyChecker
58
+
59
  self.safety_checker = SafetyChecker(device=device.type)
60
  self.init_app()
61
 
 
81
  # Handle websocket disconnection event
82
  code = disconnect_error.code
83
  if code == 1006: # ABNORMAL_CLOSURE
84
+ logging.info(
85
+ f"WebSocket abnormally closed for user {user_id}: Connection was closed without a proper close handshake"
86
+ )
87
  else:
88
+ logging.info(
89
+ f"WebSocket disconnected for user {user_id} with code {code}: {disconnect_error.reason}"
90
+ )
91
  except RuntimeError as e:
92
  if any(err in str(e) for err in ERROR_MESSAGES):
93
  logging.info(f"WebSocket disconnected for user {user_id}: {e}")
 
273
  # Handle websocket disconnection event
274
  code = disconnect_error.code
275
  if code == 1006: # ABNORMAL_CLOSURE
276
+ logging.info(
277
+ f"WebSocket abnormally closed during streaming for user {user_id}: Connection was closed without a proper close handshake"
278
+ )
279
  else:
280
+ logging.info(
281
+ f"WebSocket disconnected during streaming for user {user_id} with code {code}: {disconnect_error.reason}"
282
+ )
283
+
284
  # Clean disconnection without error response
285
  await self.conn_manager.disconnect(user_id)
286
  raise HTTPException(status_code=204, detail="Connection closed")
server/pipelines/IPcompositionHyperSD15.py CHANGED
@@ -129,19 +129,6 @@ class Pipeline:
129
  # pipe.unet = oneflow_compile(pipe.unet, options=compile_options)
130
  # pipe.vae.decoder = oneflow_compile(pipe.vae.decoder, options=compile_options)
131
 
132
- if args.sfast:
133
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
134
- compile,
135
- CompilationConfig,
136
- )
137
-
138
- config = CompilationConfig.Default()
139
- # config.enable_xformers = True
140
- config.enable_triton = True
141
- config.enable_cuda_graph = True
142
- # cofig.
143
- self.pipe = compile(self.pipe, config=config)
144
-
145
  self.pipe.set_progress_bar_config(disable=True)
146
  self.pipe.to(device=device)
147
  if device.type != "mps":
 
129
  # pipe.unet = oneflow_compile(pipe.unet, options=compile_options)
130
  # pipe.vae.decoder = oneflow_compile(pipe.vae.decoder, options=compile_options)
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  self.pipe.set_progress_bar_config(disable=True)
133
  self.pipe.to(device=device)
134
  if device.type != "mps":
server/pipelines/IPcompositionHyperSDXL.py CHANGED
@@ -130,18 +130,6 @@ class Pipeline:
130
  self.pipe.scheduler = TCDScheduler.from_config(self.pipe.scheduler.config)
131
  self.pipe.set_ip_adapter_scale([0.8])
132
 
133
- if args.sfast:
134
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
135
- compile,
136
- CompilationConfig,
137
- )
138
-
139
- config = CompilationConfig.Default()
140
- # config.enable_xformers = True
141
- config.enable_triton = True
142
- config.enable_cuda_graph = True
143
- self.pipe = compile(self.pipe, config=config)
144
-
145
  self.pipe.set_progress_bar_config(disable=True)
146
  self.pipe.to(device=device)
147
  if device.type != "mps":
 
130
  self.pipe.scheduler = TCDScheduler.from_config(self.pipe.scheduler.config)
131
  self.pipe.set_ip_adapter_scale([0.8])
132
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  self.pipe.set_progress_bar_config(disable=True)
134
  self.pipe.to(device=device)
135
  if device.type != "mps":
server/pipelines/controlnet.py CHANGED
@@ -179,28 +179,6 @@ class Pipeline:
179
  smash_config["compiler"] = "stable_fast"
180
  self.pipe = smash(model=self.pipe, smash_config=smash_config)
181
 
182
- if args.sfast:
183
- print("\nRunning sfast compile\n")
184
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
185
- compile,
186
- CompilationConfig,
187
- )
188
-
189
- config = CompilationConfig.Default()
190
- config.enable_xformers = True
191
- config.enable_triton = True
192
- config.enable_cuda_graph = True
193
- self.pipe = compile(self.pipe, config=config)
194
-
195
- if args.onediff:
196
- print("\nRunning onediff compile\n")
197
- from onediff.infer_compiler import oneflow_compile
198
-
199
- self.pipe.unet = oneflow_compile(self.pipe.unet)
200
- self.pipe.vae.encoder = oneflow_compile(self.pipe.vae.encoder)
201
- self.pipe.vae.decoder = oneflow_compile(self.pipe.vae.decoder)
202
- self.pipe.controlnet = oneflow_compile(self.pipe.controlnet)
203
-
204
  self.canny_torch = SobelOperator(device=device)
205
  self.pipe.set_progress_bar_config(disable=True)
206
  self.pipe.to(device=device, dtype=torch_dtype)
 
179
  smash_config["compiler"] = "stable_fast"
180
  self.pipe = smash(model=self.pipe, smash_config=smash_config)
181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182
  self.canny_torch = SobelOperator(device=device)
183
  self.pipe.set_progress_bar_config(disable=True)
184
  self.pipe.to(device=device, dtype=torch_dtype)
server/pipelines/controlnetDepthFlashSD.py CHANGED
@@ -169,18 +169,6 @@ class Pipeline:
169
  )
170
  self.pipe.fuse_lora()
171
 
172
- if args.sfast:
173
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
174
- compile,
175
- CompilationConfig,
176
- )
177
-
178
- config = CompilationConfig.Default()
179
- # config.enable_xformers = True
180
- config.enable_triton = True
181
- config.enable_cuda_graph = True
182
- self.pipe = compile(self.pipe, config=config)
183
-
184
  self.pipe.set_progress_bar_config(disable=True)
185
  self.pipe.to(device=device)
186
  if device.type != "mps":
 
169
  )
170
  self.pipe.fuse_lora()
171
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  self.pipe.set_progress_bar_config(disable=True)
173
  self.pipe.to(device=device)
174
  if device.type != "mps":
server/pipelines/controlnetDepthHyperSD.py CHANGED
@@ -169,18 +169,6 @@ class Pipeline:
169
 
170
  self.pipe.fuse_lora()
171
 
172
- if args.sfast:
173
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
174
- compile,
175
- CompilationConfig,
176
- )
177
-
178
- config = CompilationConfig.Default()
179
- # config.enable_xformers = True
180
- config.enable_triton = True
181
- config.enable_cuda_graph = True
182
- self.pipe = compile(self.pipe, config=config)
183
-
184
  self.pipe.set_progress_bar_config(disable=True)
185
  self.pipe.to(device=device)
186
  if device.type != "mps":
 
169
 
170
  self.pipe.fuse_lora()
171
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  self.pipe.set_progress_bar_config(disable=True)
173
  self.pipe.to(device=device)
174
  if device.type != "mps":
server/pipelines/controlnetDepthHyperSDXL.py CHANGED
@@ -168,18 +168,6 @@ class Pipeline:
168
 
169
  self.pipe.fuse_lora()
170
 
171
- if args.sfast:
172
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
173
- compile,
174
- CompilationConfig,
175
- )
176
-
177
- config = CompilationConfig.Default()
178
- # config.enable_xformers = True
179
- config.enable_triton = True
180
- config.enable_cuda_graph = True
181
- self.pipe = compile(self.pipe, config=config)
182
-
183
  self.pipe.set_progress_bar_config(disable=True)
184
  self.pipe.to(device=device)
185
  if device.type != "mps":
 
168
 
169
  self.pipe.fuse_lora()
170
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  self.pipe.set_progress_bar_config(disable=True)
172
  self.pipe.to(device=device)
173
  if device.type != "mps":
server/pipelines/controlnetFlashSD.py CHANGED
@@ -161,18 +161,6 @@ class Pipeline:
161
 
162
  self.canny_torch = SobelOperator(device=device)
163
 
164
- if args.sfast:
165
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
166
- compile,
167
- CompilationConfig,
168
- )
169
-
170
- config = CompilationConfig.Default()
171
- # config.enable_xformers = True
172
- config.enable_triton = True
173
- config.enable_cuda_graph = True
174
- self.pipe = compile(self.pipe, config=config)
175
-
176
  self.pipe.set_progress_bar_config(disable=True)
177
  self.pipe.to(device=device)
178
  if device.type != "mps":
 
161
 
162
  self.canny_torch = SobelOperator(device=device)
163
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  self.pipe.set_progress_bar_config(disable=True)
165
  self.pipe.to(device=device)
166
  if device.type != "mps":
server/pipelines/controlnetFlashSDXL.py CHANGED
@@ -162,18 +162,6 @@ class Pipeline:
162
 
163
  self.canny_torch = SobelOperator(device=device)
164
 
165
- if args.sfast:
166
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
167
- compile,
168
- CompilationConfig,
169
- )
170
-
171
- config = CompilationConfig.Default()
172
- # config.enable_xformers = True
173
- config.enable_triton = True
174
- config.enable_cuda_graph = True
175
- self.pipe = compile(self.pipe, config=config)
176
-
177
  self.pipe.set_progress_bar_config(disable=True)
178
  self.pipe.to(device=device)
179
  if device.type != "mps":
 
162
 
163
  self.canny_torch = SobelOperator(device=device)
164
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  self.pipe.set_progress_bar_config(disable=True)
166
  self.pipe.to(device=device)
167
  if device.type != "mps":
server/pipelines/controlnetHyperSD.py CHANGED
@@ -177,18 +177,6 @@ class Pipeline:
177
  self.pipe.fuse_lora()
178
  self.canny_torch = SobelOperator(device=device)
179
 
180
- if args.sfast:
181
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
182
- compile,
183
- CompilationConfig,
184
- )
185
-
186
- config = CompilationConfig.Default()
187
- config.enable_xformers = True
188
- config.enable_triton = True
189
- config.enable_cuda_graph = True
190
- self.pipe = compile(self.pipe, config=config)
191
-
192
  self.pipe.set_progress_bar_config(disable=True)
193
  self.pipe.to(device=device)
194
  if device.type != "mps":
 
177
  self.pipe.fuse_lora()
178
  self.canny_torch = SobelOperator(device=device)
179
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  self.pipe.set_progress_bar_config(disable=True)
181
  self.pipe.to(device=device)
182
  if device.type != "mps":
server/pipelines/controlnetHyperSDXL.py CHANGED
@@ -182,18 +182,6 @@ class Pipeline:
182
  self.pipe.fuse_lora()
183
  self.canny_torch = SobelOperator(device=device)
184
 
185
- if args.sfast:
186
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
187
- compile,
188
- CompilationConfig,
189
- )
190
-
191
- config = CompilationConfig.Default()
192
- # config.enable_xformers = True
193
- config.enable_triton = True
194
- config.enable_cuda_graph = True
195
- self.pipe = compile(self.pipe, config=config)
196
-
197
  self.pipe.set_progress_bar_config(disable=True)
198
  self.pipe.to(device=device)
199
  if device.type != "mps":
 
182
  self.pipe.fuse_lora()
183
  self.canny_torch = SobelOperator(device=device)
184
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  self.pipe.set_progress_bar_config(disable=True)
186
  self.pipe.to(device=device)
187
  if device.type != "mps":
server/pipelines/controlnetLoraSDXL-Lightning.py CHANGED
@@ -202,18 +202,6 @@ class Pipeline:
202
  self.pipe.set_progress_bar_config(disable=True)
203
  self.pipe.to(device=device, dtype=torch_dtype)
204
 
205
- if args.sfast:
206
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
207
- compile,
208
- CompilationConfig,
209
- )
210
-
211
- config = CompilationConfig.Default()
212
- config.enable_xformers = True
213
- config.enable_triton = True
214
- config.enable_cuda_graph = True
215
- self.pipe = compile(self.pipe, config=config)
216
-
217
  if device.type != "mps":
218
  self.pipe.unet.to(memory_format=torch.channels_last)
219
 
 
202
  self.pipe.set_progress_bar_config(disable=True)
203
  self.pipe.to(device=device, dtype=torch_dtype)
204
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  if device.type != "mps":
206
  self.pipe.unet.to(memory_format=torch.channels_last)
207
 
server/pipelines/controlnetLoraSDXL.py CHANGED
@@ -194,18 +194,6 @@ class Pipeline:
194
  self.pipe.set_progress_bar_config(disable=True)
195
  self.pipe.to(device=device, dtype=torch_dtype).to(device)
196
 
197
- if args.sfast:
198
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
199
- compile,
200
- CompilationConfig,
201
- )
202
-
203
- config = CompilationConfig.Default()
204
- config.enable_xformers = True
205
- config.enable_triton = True
206
- config.enable_cuda_graph = True
207
- self.pipe = compile(self.pipe, config=config)
208
-
209
  if device.type != "mps":
210
  self.pipe.unet.to(memory_format=torch.channels_last)
211
 
 
194
  self.pipe.set_progress_bar_config(disable=True)
195
  self.pipe.to(device=device, dtype=torch_dtype).to(device)
196
 
 
 
 
 
 
 
 
 
 
 
 
 
197
  if device.type != "mps":
198
  self.pipe.unet.to(memory_format=torch.channels_last)
199
 
server/pipelines/controlnetMistoLineHyperSDXL.py CHANGED
@@ -186,18 +186,6 @@ class Pipeline:
186
  "TheMistoAI/MistoLine", filename="MTEED.pth", subfolder="Anyline"
187
  ).to(device)
188
 
189
- if args.sfast:
190
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
191
- compile,
192
- CompilationConfig,
193
- )
194
-
195
- config = CompilationConfig.Default()
196
- # config.enable_xformers = True
197
- config.enable_triton = True
198
- config.enable_cuda_graph = True
199
- self.pipe = compile(self.pipe, config=config)
200
-
201
  self.pipe.set_progress_bar_config(disable=True)
202
  self.pipe.to(device=device)
203
  if device.type != "mps":
 
186
  "TheMistoAI/MistoLine", filename="MTEED.pth", subfolder="Anyline"
187
  ).to(device)
188
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  self.pipe.set_progress_bar_config(disable=True)
190
  self.pipe.to(device=device)
191
  if device.type != "mps":
server/pipelines/controlnetSDTurbo.py CHANGED
@@ -174,28 +174,6 @@ class Pipeline:
174
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
175
  ).to(device)
176
 
177
- if args.sfast:
178
- print("\nRunning sfast compile\n")
179
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
180
- compile,
181
- CompilationConfig,
182
- )
183
-
184
- config = CompilationConfig.Default()
185
- config.enable_xformers = True
186
- config.enable_triton = True
187
- config.enable_cuda_graph = True
188
- self.pipe = compile(self.pipe, config=config)
189
-
190
- if args.onediff:
191
- print("\nRunning onediff compile\n")
192
- from onediff.infer_compiler import oneflow_compile
193
-
194
- self.pipe.unet = oneflow_compile(self.pipe.unet)
195
- self.pipe.vae.encoder = oneflow_compile(self.pipe.vae.encoder)
196
- self.pipe.vae.decoder = oneflow_compile(self.pipe.vae.decoder)
197
- self.pipe.controlnet = oneflow_compile(self.pipe.controlnet)
198
-
199
  self.canny_torch = SobelOperator(device=device)
200
 
201
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
 
174
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
175
  ).to(device)
176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
  self.canny_torch = SobelOperator(device=device)
178
 
179
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
server/pipelines/controlnetSDXLTurbo.py CHANGED
@@ -180,18 +180,6 @@ class Pipeline:
180
  )
181
  self.canny_torch = SobelOperator(device=device)
182
 
183
- if args.sfast:
184
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
185
- compile,
186
- CompilationConfig,
187
- )
188
-
189
- config = CompilationConfig.Default()
190
- config.enable_xformers = True
191
- config.enable_triton = True
192
- config.enable_cuda_graph = True
193
- self.pipe = compile(self.pipe, config=config)
194
-
195
  self.pipe.set_progress_bar_config(disable=True)
196
  self.pipe.to(device=device, dtype=torch_dtype).to(device)
197
  if device.type != "mps":
 
180
  )
181
  self.canny_torch = SobelOperator(device=device)
182
 
 
 
 
 
 
 
 
 
 
 
 
 
183
  self.pipe.set_progress_bar_config(disable=True)
184
  self.pipe.to(device=device, dtype=torch_dtype).to(device)
185
  if device.type != "mps":
server/pipelines/controlnetSegmindVegaRT.py CHANGED
@@ -188,18 +188,6 @@ class Pipeline:
188
  base_model, subfolder="scheduler"
189
  )
190
 
191
- if args.sfast:
192
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
193
- compile,
194
- CompilationConfig,
195
- )
196
-
197
- config = CompilationConfig.Default()
198
- config.enable_xformers = True
199
- config.enable_triton = True
200
- config.enable_cuda_graph = True
201
- self.pipe = compile(self.pipe, config=config)
202
-
203
  self.pipe.set_progress_bar_config(disable=True)
204
  self.pipe.to(device=device, dtype=torch_dtype).to(device)
205
  if device.type != "mps":
 
188
  base_model, subfolder="scheduler"
189
  )
190
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  self.pipe.set_progress_bar_config(disable=True)
192
  self.pipe.to(device=device, dtype=torch_dtype).to(device)
193
  if device.type != "mps":
server/pipelines/img2img.py CHANGED
@@ -105,18 +105,6 @@ class Pipeline:
105
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
106
  ).to(device)
107
 
108
- if args.sfast:
109
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
110
- compile,
111
- CompilationConfig,
112
- )
113
-
114
- config = CompilationConfig.Default()
115
- config.enable_xformers = True
116
- config.enable_triton = True
117
- config.enable_cuda_graph = True
118
- self.pipe = compile(self.pipe, config=config)
119
-
120
  self.pipe.set_progress_bar_config(disable=True)
121
  self.pipe.to(device=device, dtype=torch_dtype)
122
  if device.type != "mps":
 
105
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
106
  ).to(device)
107
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  self.pipe.set_progress_bar_config(disable=True)
109
  self.pipe.to(device=device, dtype=torch_dtype)
110
  if device.type != "mps":
server/pipelines/img2imgSDTurbo.py CHANGED
@@ -102,7 +102,7 @@ class Pipeline:
102
  if args.pruna:
103
  # Create and smash your model
104
  smash_config = SmashConfig()
105
- # smash_config["cacher"] = "deepcache"
106
  smash_config["compiler"] = "stable_fast"
107
  self.pipe = smash(model=self.pipe, smash_config=smash_config)
108
 
 
102
  if args.pruna:
103
  # Create and smash your model
104
  smash_config = SmashConfig()
105
+ smash_config["cacher"] = "deepcache"
106
  smash_config["compiler"] = "stable_fast"
107
  self.pipe = smash(model=self.pipe, smash_config=smash_config)
108
 
server/pipelines/img2imgSDXL-Lightning.py CHANGED
@@ -143,18 +143,6 @@ class Pipeline:
143
  smash_config["compiler"] = "stable_fast"
144
  self.pipe = smash(model=self.pipe, smash_config=smash_config)
145
 
146
- if args.sfast:
147
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
148
- compile,
149
- CompilationConfig,
150
- )
151
-
152
- config = CompilationConfig.Default()
153
- config.enable_xformers = True
154
- config.enable_triton = True
155
- config.enable_cuda_graph = True
156
- self.pipe = compile(self.pipe, config=config)
157
-
158
  self.pipe.set_progress_bar_config(disable=True)
159
  self.pipe.to(device=device, dtype=torch_dtype)
160
  if device.type != "mps":
 
143
  smash_config["compiler"] = "stable_fast"
144
  self.pipe = smash(model=self.pipe, smash_config=smash_config)
145
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  self.pipe.set_progress_bar_config(disable=True)
147
  self.pipe.to(device=device, dtype=torch_dtype)
148
  if device.type != "mps":
server/pipelines/img2imgSDXLTurbo.py CHANGED
@@ -121,18 +121,6 @@ class Pipeline:
121
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
122
  ).to(device)
123
 
124
- if args.sfast:
125
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
126
- compile,
127
- CompilationConfig,
128
- )
129
-
130
- config = CompilationConfig.Default()
131
- config.enable_xformers = True
132
- config.enable_triton = True
133
- config.enable_cuda_graph = True
134
- self.pipe = compile(self.pipe, config=config)
135
-
136
  if device.type != "mps":
137
  self.pipe.unet.to(memory_format=torch.channels_last)
138
 
 
121
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
122
  ).to(device)
123
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  if device.type != "mps":
125
  self.pipe.unet.to(memory_format=torch.channels_last)
126
 
server/pipelines/img2imgSDXS512.py CHANGED
@@ -102,18 +102,6 @@ class Pipeline:
102
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
103
  ).to(device)
104
 
105
- if args.sfast:
106
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
107
- compile,
108
- CompilationConfig,
109
- )
110
-
111
- config = CompilationConfig.Default()
112
- config.enable_xformers = True
113
- config.enable_triton = True
114
- config.enable_cuda_graph = True
115
- self.pipe = compile(self.pipe, config=config)
116
-
117
  self.pipe.set_progress_bar_config(disable=True)
118
  self.pipe.to(device=device, dtype=torch_dtype)
119
  if device.type != "mps":
 
102
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
103
  ).to(device)
104
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  self.pipe.set_progress_bar_config(disable=True)
106
  self.pipe.to(device=device, dtype=torch_dtype)
107
  if device.type != "mps":
server/pipelines/img2imgSegmindVegaRT.py CHANGED
@@ -121,17 +121,6 @@ class Pipeline:
121
  self.pipe.scheduler = LCMScheduler.from_pretrained(
122
  base_model, subfolder="scheduler"
123
  )
124
- if args.sfast:
125
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
126
- compile,
127
- CompilationConfig,
128
- )
129
-
130
- config = CompilationConfig.Default()
131
- config.enable_xformers = True
132
- config.enable_triton = True
133
- config.enable_cuda_graph = True
134
- self.pipe = compile(self.pipe, config=config)
135
 
136
  self.pipe.set_progress_bar_config(disable=True)
137
  self.pipe.to(device=device, dtype=torch_dtype)
 
121
  self.pipe.scheduler = LCMScheduler.from_pretrained(
122
  base_model, subfolder="scheduler"
123
  )
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  self.pipe.set_progress_bar_config(disable=True)
126
  self.pipe.to(device=device, dtype=torch_dtype)
server/pipelines/txt2img.py CHANGED
@@ -94,18 +94,6 @@ class Pipeline:
94
  smash_config["compiler"] = "stable_fast"
95
  self.pipe = smash(model=self.pipe, smash_config=smash_config)
96
 
97
- if args.sfast:
98
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
99
- compile,
100
- CompilationConfig,
101
- )
102
-
103
- config = CompilationConfig.Default()
104
- config.enable_xformers = True
105
- config.enable_triton = True
106
- config.enable_cuda_graph = True
107
- self.pipe = compile(self.pipe, config=config)
108
-
109
  self.pipe.set_progress_bar_config(disable=True)
110
  self.pipe.to(device=device, dtype=torch_dtype)
111
  if device.type != "mps":
 
94
  smash_config["compiler"] = "stable_fast"
95
  self.pipe = smash(model=self.pipe, smash_config=smash_config)
96
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  self.pipe.set_progress_bar_config(disable=True)
98
  self.pipe.to(device=device, dtype=torch_dtype)
99
  if device.type != "mps":
server/pipelines/txt2imgLora.py CHANGED
@@ -119,18 +119,6 @@ class Pipeline:
119
 
120
  self.pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0)
121
 
122
- if args.sfast:
123
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
124
- compile,
125
- CompilationConfig,
126
- )
127
-
128
- config = CompilationConfig.Default()
129
- config.enable_xformers = True
130
- config.enable_triton = True
131
- config.enable_cuda_graph = True
132
- self.pipe = compile(self.pipe, config=config)
133
-
134
  if args.compel:
135
  self.compel_proc = Compel(
136
  tokenizer=self.pipe.tokenizer,
 
119
 
120
  self.pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0)
121
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  if args.compel:
123
  self.compel_proc = Compel(
124
  tokenizer=self.pipe.tokenizer,
server/pipelines/txt2imgLoraSDXL.py CHANGED
@@ -108,18 +108,6 @@ class Pipeline:
108
  self.pipe.set_progress_bar_config(disable=True)
109
  self.pipe.to(device=device, dtype=torch_dtype).to(device)
110
 
111
- if args.sfast:
112
- from sfast.compilers.stable_diffusion_pipeline_compiler import (
113
- compile,
114
- CompilationConfig,
115
- )
116
-
117
- config = CompilationConfig.Default()
118
- config.enable_xformers = True
119
- config.enable_triton = True
120
- config.enable_cuda_graph = True
121
- self.pipe = compile(self.pipe, config=config)
122
-
123
  if device.type != "mps":
124
  self.pipe.unet.to(memory_format=torch.channels_last)
125
 
 
108
  self.pipe.set_progress_bar_config(disable=True)
109
  self.pipe.to(device=device, dtype=torch_dtype).to(device)
110
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  if device.type != "mps":
112
  self.pipe.unet.to(memory_format=torch.channels_last)
113
 
server/pipelines/utils/safety_checker.py CHANGED
@@ -143,10 +143,10 @@ class SafetyChecker:
143
 
144
  self.device = device
145
  self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
146
- "CompVis/stable-diffusion-safety-checker"
147
  ).to(device)
148
  self.feature_extractor = CLIPFeatureExtractor.from_pretrained(
149
- "openai/clip-vit-base-patch32"
150
  )
151
 
152
  def __call__(
 
143
 
144
  self.device = device
145
  self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
146
+ "CompVis/stable-diffusion-safety-checker",
147
  ).to(device)
148
  self.feature_extractor = CLIPFeatureExtractor.from_pretrained(
149
+ "openai/clip-vit-base-patch32",
150
  )
151
 
152
  def __call__(
server/requirements.txt CHANGED
@@ -20,7 +20,7 @@ setuptools
20
  mpmath
21
  controlnet-aux
22
  sentencepiece
23
- optimum-quanto # has to be optimum-quanto==0.2.5 for pruna int4
24
  gguf
25
  types-Pillow
26
  mypy
 
20
  mpmath
21
  controlnet-aux
22
  sentencepiece
23
+ optimum-quanto==0.2.5
24
  gguf
25
  types-Pillow
26
  mypy