Spaces:
Runtime error
Runtime error
modify the codes.
Browse files- app.py +11 -1
- ldm/modules/attention.py +1 -1
- ldm/modules/diffusionmodules/model.py +1 -1
- scripts/rendertext_tool.py +9 -6
app.py
CHANGED
|
@@ -7,6 +7,8 @@ import os
|
|
| 7 |
import torch
|
| 8 |
import time
|
| 9 |
from PIL import Image
|
|
|
|
|
|
|
| 10 |
|
| 11 |
def process_multi_wrapper(rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3,
|
| 12 |
shared_prompt,
|
|
@@ -104,7 +106,15 @@ def load_ckpt(model_ckpt = "LAION-Glyph-10M-Epoch-5"):
|
|
| 104 |
allow_run_generation = False
|
| 105 |
return output_str, None, allow_run_generation
|
| 106 |
|
| 107 |
-
SAVE_MEMORY =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
cfg = OmegaConf.load("config.yaml")
|
| 109 |
model = load_model_from_config(cfg, "laion10M_epoch_6_model_wo_ema.ckpt", verbose=True)
|
| 110 |
# model = load_model_from_config(cfg, "model_wo_ema.ckpt", verbose=True)
|
|
|
|
| 7 |
import torch
|
| 8 |
import time
|
| 9 |
from PIL import Image
|
| 10 |
+
from cldm.hack import disable_verbosity, enable_sliced_attention
|
| 11 |
+
from pytorch_lightning import seed_everything
|
| 12 |
|
| 13 |
def process_multi_wrapper(rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3,
|
| 14 |
shared_prompt,
|
|
|
|
| 106 |
allow_run_generation = False
|
| 107 |
return output_str, None, allow_run_generation
|
| 108 |
|
| 109 |
+
SAVE_MEMORY = False
|
| 110 |
+
shared_seed = 0
|
| 111 |
+
if shared_seed == -1:
|
| 112 |
+
shared_seed = random.randint(0, 65535)
|
| 113 |
+
seed_everything(shared_seed)
|
| 114 |
+
|
| 115 |
+
disable_verbosity()
|
| 116 |
+
if SAVE_MEMORY:
|
| 117 |
+
enable_sliced_attention()
|
| 118 |
cfg = OmegaConf.load("config.yaml")
|
| 119 |
model = load_model_from_config(cfg, "laion10M_epoch_6_model_wo_ema.ckpt", verbose=True)
|
| 120 |
# model = load_model_from_config(cfg, "model_wo_ema.ckpt", verbose=True)
|
ldm/modules/attention.py
CHANGED
|
@@ -15,7 +15,7 @@ try:
|
|
| 15 |
XFORMERS_IS_AVAILBLE = True
|
| 16 |
except:
|
| 17 |
XFORMERS_IS_AVAILBLE = False
|
| 18 |
-
|
| 19 |
DETERMISTIC = False
|
| 20 |
|
| 21 |
def exists(val):
|
|
|
|
| 15 |
XFORMERS_IS_AVAILBLE = True
|
| 16 |
except:
|
| 17 |
XFORMERS_IS_AVAILBLE = False
|
| 18 |
+
# XFORMERS_IS_AVAILBLE = False
|
| 19 |
DETERMISTIC = False
|
| 20 |
|
| 21 |
def exists(val):
|
ldm/modules/diffusionmodules/model.py
CHANGED
|
@@ -15,7 +15,7 @@ try:
|
|
| 15 |
except:
|
| 16 |
XFORMERS_IS_AVAILBLE = False
|
| 17 |
print("No module 'xformers'. Proceeding without it.")
|
| 18 |
-
|
| 19 |
|
| 20 |
def get_timestep_embedding(timesteps, embedding_dim):
|
| 21 |
"""
|
|
|
|
| 15 |
except:
|
| 16 |
XFORMERS_IS_AVAILBLE = False
|
| 17 |
print("No module 'xformers'. Proceeding without it.")
|
| 18 |
+
# XFORMERS_IS_AVAILBLE = False
|
| 19 |
|
| 20 |
def get_timestep_embedding(timesteps, embedding_dim):
|
| 21 |
"""
|
scripts/rendertext_tool.py
CHANGED
|
@@ -2,9 +2,9 @@ from cldm.ddim_hacked import DDIMSampler
|
|
| 2 |
import torch
|
| 3 |
from annotator.render_images import render_text_image_custom
|
| 4 |
from pytorch_lightning import seed_everything
|
| 5 |
-
save_memory = False
|
| 6 |
-
from cldm.hack import disable_verbosity
|
| 7 |
-
disable_verbosity()
|
| 8 |
import random
|
| 9 |
import einops
|
| 10 |
import numpy as np
|
|
@@ -95,6 +95,9 @@ class Render_Text:
|
|
| 95 |
shared_eta, shared_a_prompt, shared_n_prompt,
|
| 96 |
only_show_rendered_image=False
|
| 97 |
):
|
|
|
|
|
|
|
|
|
|
| 98 |
with torch.no_grad(), \
|
| 99 |
self.precision_scope("cuda"), \
|
| 100 |
self.model.ema_scope("Sampling on Benchmark Prompts"):
|
|
@@ -136,9 +139,9 @@ class Render_Text:
|
|
| 136 |
|
| 137 |
H, W = shared_image_resolution, shared_image_resolution
|
| 138 |
|
| 139 |
-
if shared_seed == -1:
|
| 140 |
-
|
| 141 |
-
seed_everything(shared_seed)
|
| 142 |
|
| 143 |
if torch.cuda.is_available() and self.save_memory:
|
| 144 |
print("low_vram_shift: is_diffusing", False)
|
|
|
|
| 2 |
import torch
|
| 3 |
from annotator.render_images import render_text_image_custom
|
| 4 |
from pytorch_lightning import seed_everything
|
| 5 |
+
# save_memory = False
|
| 6 |
+
# from cldm.hack import disable_verbosity
|
| 7 |
+
# disable_verbosity()
|
| 8 |
import random
|
| 9 |
import einops
|
| 10 |
import numpy as np
|
|
|
|
| 95 |
shared_eta, shared_a_prompt, shared_n_prompt,
|
| 96 |
only_show_rendered_image=False
|
| 97 |
):
|
| 98 |
+
if shared_seed == -1:
|
| 99 |
+
shared_seed = random.randint(0, 65535)
|
| 100 |
+
seed_everything(shared_seed)
|
| 101 |
with torch.no_grad(), \
|
| 102 |
self.precision_scope("cuda"), \
|
| 103 |
self.model.ema_scope("Sampling on Benchmark Prompts"):
|
|
|
|
| 139 |
|
| 140 |
H, W = shared_image_resolution, shared_image_resolution
|
| 141 |
|
| 142 |
+
# if shared_seed == -1:
|
| 143 |
+
# shared_seed = random.randint(0, 65535)
|
| 144 |
+
# seed_everything(shared_seed)
|
| 145 |
|
| 146 |
if torch.cuda.is_available() and self.save_memory:
|
| 147 |
print("low_vram_shift: is_diffusing", False)
|