Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -40,7 +40,7 @@ masterpiece, newest, absurdres
|
|
| 40 |
torch.backends.cudnn.deterministic = True
|
| 41 |
torch.backends.cudnn.benchmark = False
|
| 42 |
|
| 43 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 44 |
|
| 45 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
| 46 |
if randomize_seed:
|
|
@@ -72,6 +72,20 @@ def get_scheduler(scheduler_config: Dict, name: str) -> Optional[Callable]:
|
|
| 72 |
}
|
| 73 |
return scheduler_factory_map.get(name, lambda: None)()
|
| 74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
@spaces.GPU(enable_queue=False)
|
| 76 |
def generate(
|
| 77 |
prompt: str,
|
|
@@ -81,9 +95,10 @@ def generate(
|
|
| 81 |
height: int = 1024,
|
| 82 |
guidance_scale: float = 5.0,
|
| 83 |
num_inference_steps: int = 26,
|
| 84 |
-
sampler: str = "
|
| 85 |
clip_skip: int = 1,
|
| 86 |
):
|
|
|
|
| 87 |
if torch.cuda.is_available():
|
| 88 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 89 |
MODEL,
|
|
@@ -94,6 +109,7 @@ def generate(
|
|
| 94 |
add_watermarker=False,
|
| 95 |
use_auth_token=HF_TOKEN
|
| 96 |
)
|
|
|
|
| 97 |
|
| 98 |
generator = seed_everything(seed)
|
| 99 |
pipe.scheduler = get_scheduler(pipe.scheduler.config, sampler)
|
|
|
|
| 40 |
torch.backends.cudnn.deterministic = True
|
| 41 |
torch.backends.cudnn.benchmark = False
|
| 42 |
|
| 43 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 44 |
|
| 45 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
| 46 |
if randomize_seed:
|
|
|
|
| 72 |
}
|
| 73 |
return scheduler_factory_map.get(name, lambda: None)()
|
| 74 |
|
| 75 |
+
def load_pipeline(model_name):
|
| 76 |
+
if torch.cuda.is_available():
|
| 77 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 78 |
+
model_name,
|
| 79 |
+
torch_dtype=torch.float16,
|
| 80 |
+
custom_pipeline="lpw_stable_diffusion_xl",
|
| 81 |
+
safety_checker=None,
|
| 82 |
+
use_safetensors=True,
|
| 83 |
+
add_watermarker=False,
|
| 84 |
+
use_auth_token=HF_TOKEN
|
| 85 |
+
)
|
| 86 |
+
pipe.to(device)
|
| 87 |
+
return pipe
|
| 88 |
+
|
| 89 |
@spaces.GPU(enable_queue=False)
|
| 90 |
def generate(
|
| 91 |
prompt: str,
|
|
|
|
| 95 |
height: int = 1024,
|
| 96 |
guidance_scale: float = 5.0,
|
| 97 |
num_inference_steps: int = 26,
|
| 98 |
+
sampler: str = "Eul""er a",
|
| 99 |
clip_skip: int = 1,
|
| 100 |
):
|
| 101 |
+
"""
|
| 102 |
if torch.cuda.is_available():
|
| 103 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 104 |
MODEL,
|
|
|
|
| 109 |
add_watermarker=False,
|
| 110 |
use_auth_token=HF_TOKEN
|
| 111 |
)
|
| 112 |
+
"""
|
| 113 |
|
| 114 |
generator = seed_everything(seed)
|
| 115 |
pipe.scheduler = get_scheduler(pipe.scheduler.config, sampler)
|