Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -20,6 +20,7 @@ class Model:
|
|
| 20 |
|
| 21 |
models = [
|
| 22 |
Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style "),
|
|
|
|
| 23 |
Model("Archer", "nitrosocke/archer-diffusion", "archer style "),
|
| 24 |
Model("Modern Disney", "nitrosocke/mo-di-diffusion", "modern disney style "),
|
| 25 |
Model("Classic Disney", "nitrosocke/classic-anim-diffusion", "classic disney style "),
|
|
@@ -66,6 +67,7 @@ else:
|
|
| 66 |
|
| 67 |
if torch.cuda.is_available():
|
| 68 |
pipe = pipe.to("cuda")
|
|
|
|
| 69 |
|
| 70 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
| 71 |
|
|
@@ -132,6 +134,7 @@ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width,
|
|
| 132 |
|
| 133 |
if torch.cuda.is_available():
|
| 134 |
pipe = pipe.to("cuda")
|
|
|
|
| 135 |
last_mode = "txt2img"
|
| 136 |
|
| 137 |
prompt = current_model.prefix + prompt
|
|
@@ -175,6 +178,7 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
|
|
| 175 |
|
| 176 |
if torch.cuda.is_available():
|
| 177 |
pipe = pipe.to("cuda")
|
|
|
|
| 178 |
last_mode = "img2img"
|
| 179 |
|
| 180 |
prompt = current_model.prefix + prompt
|
|
@@ -184,7 +188,7 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
|
|
| 184 |
prompt,
|
| 185 |
negative_prompt = neg_prompt,
|
| 186 |
num_images_per_prompt=n_images,
|
| 187 |
-
|
| 188 |
num_inference_steps = int(steps),
|
| 189 |
strength = strength,
|
| 190 |
guidance_scale = guidance,
|
|
|
|
| 20 |
|
| 21 |
models = [
|
| 22 |
Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style "),
|
| 23 |
+
Model("Dreamlike Diffusion 1.0", "dreamlike-art/dreamlike-diffusion-1.0", "dreamlikeart "),
|
| 24 |
Model("Archer", "nitrosocke/archer-diffusion", "archer style "),
|
| 25 |
Model("Modern Disney", "nitrosocke/mo-di-diffusion", "modern disney style "),
|
| 26 |
Model("Classic Disney", "nitrosocke/classic-anim-diffusion", "classic disney style "),
|
|
|
|
| 67 |
|
| 68 |
if torch.cuda.is_available():
|
| 69 |
pipe = pipe.to("cuda")
|
| 70 |
+
pipe.enable_xformers_memory_efficient_attention()
|
| 71 |
|
| 72 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
| 73 |
|
|
|
|
| 134 |
|
| 135 |
if torch.cuda.is_available():
|
| 136 |
pipe = pipe.to("cuda")
|
| 137 |
+
pipe.enable_xformers_memory_efficient_attention()
|
| 138 |
last_mode = "txt2img"
|
| 139 |
|
| 140 |
prompt = current_model.prefix + prompt
|
|
|
|
| 178 |
|
| 179 |
if torch.cuda.is_available():
|
| 180 |
pipe = pipe.to("cuda")
|
| 181 |
+
pipe.enable_xformers_memory_efficient_attention()
|
| 182 |
last_mode = "img2img"
|
| 183 |
|
| 184 |
prompt = current_model.prefix + prompt
|
|
|
|
| 188 |
prompt,
|
| 189 |
negative_prompt = neg_prompt,
|
| 190 |
num_images_per_prompt=n_images,
|
| 191 |
+
image = img,
|
| 192 |
num_inference_steps = int(steps),
|
| 193 |
strength = strength,
|
| 194 |
guidance_scale = guidance,
|