response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Args:
x: HxWxC image, [0, 1]
sf: down-scale factor
Return:
bicubicly downsampled LR image | def bicubic_degradation(x, sf=3):
"""
Args:
x: HxWxC image, [0, 1]
sf: down-scale factor
Return:
bicubicly downsampled LR image
"""
x = util.imresize_np(x, scale=1 / sf)
return x |
blur + bicubic downsampling
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2018learning,
title={Learning a single convolutional super-resolution network for multiple degradations},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={3262--3271},
year={2018}
} | def srmd_degradation(x, k, sf=3):
"""blur + bicubic downsampling
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2018learning,
title={Learning a single convolutional super-resolution network for multiple degradations},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={3262--3271},
year={2018}
}
"""
x = ndimage.filters.convolve(
x, np.expand_dims(k, axis=2), mode="wrap"
) # 'nearest' | 'mirror'
x = bicubic_degradation(x, sf=sf)
return x |
bicubic downsampling + blur
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2019deep,
title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={1671--1681},
year={2019}
} | def dpsr_degradation(x, k, sf=3):
"""bicubic downsampling + blur
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2019deep,
title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={1671--1681},
year={2019}
}
"""
x = bicubic_degradation(x, sf=sf)
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode="wrap")
return x |
blur + downsampling
Args:
x: HxWxC image, [0, 1]/[0, 255]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image | def classical_degradation(x, k, sf=3):
"""blur + downsampling
Args:
x: HxWxC image, [0, 1]/[0, 255]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
"""
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode="wrap")
# x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
st = 0
return x[st::sf, st::sf, ...] |
USM sharpening. borrowed from real-ESRGAN
Input image: I; Blurry image: B.
1. K = I + weight * (I - B)
2. Mask = 1 if abs(I - B) > threshold, else: 0
3. Blur mask:
4. Out = Mask * K + (1 - Mask) * I
Args:
img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
weight (float): Sharp weight. Default: 1.
radius (float): Kernel size of Gaussian blur. Default: 50.
threshold (int): | def add_sharpening(img, weight=0.5, radius=50, threshold=10):
"""USM sharpening. borrowed from real-ESRGAN
Input image: I; Blurry image: B.
1. K = I + weight * (I - B)
2. Mask = 1 if abs(I - B) > threshold, else: 0
3. Blur mask:
4. Out = Mask * K + (1 - Mask) * I
Args:
img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
weight (float): Sharp weight. Default: 1.
radius (float): Kernel size of Gaussian blur. Default: 50.
threshold (int):
"""
if radius % 2 == 0:
radius += 1
blur = cv2.GaussianBlur(img, (radius, radius), 0)
residual = img - blur
mask = np.abs(residual) * 255 > threshold
mask = mask.astype("float32")
soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
K = img + weight * residual
K = np.clip(K, 0, 1)
return soft_mask * K + (1 - soft_mask) * img |
This is the degradation model of BSRGAN from the paper
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
----------
img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
sf: scale factor
isp_model: camera ISP model
Returns
-------
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] | def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
"""
This is the degradation model of BSRGAN from the paper
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
----------
img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
sf: scale factor
isp_model: camera ISP model
Returns
-------
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
"""
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
sf_ori = sf
h1, w1 = img.shape[:2]
img = img.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop
h, w = img.shape[:2]
if h < lq_patchsize * sf or w < lq_patchsize * sf:
raise ValueError(f"img size ({h1}X{w1}) is too small!")
hq = img.copy()
if sf == 4 and random.random() < scale2_prob: # downsample1
if np.random.rand() < 0.5:
img = cv2.resize(
img,
(int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
interpolation=random.choice([1, 2, 3]),
)
else:
img = util.imresize_np(img, 1 / 2, True)
img = np.clip(img, 0.0, 1.0)
sf = 2
shuffle_order = random.sample(range(7), 7)
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
if idx1 > idx2: # keep downsample3 last
shuffle_order[idx1], shuffle_order[idx2] = (
shuffle_order[idx2],
shuffle_order[idx1],
)
for i in shuffle_order:
if i == 0:
img = add_blur(img, sf=sf)
elif i == 1:
img = add_blur(img, sf=sf)
elif i == 2:
a, b = img.shape[1], img.shape[0]
# downsample2
if random.random() < 0.75:
sf1 = random.uniform(1, 2 * sf)
img = cv2.resize(
img,
(int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
interpolation=random.choice([1, 2, 3]),
)
else:
k = fspecial("gaussian", 25, random.uniform(0.1, 0.6 * sf))
k_shifted = shift_pixel(k, sf)
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
img = ndimage.filters.convolve(
img, np.expand_dims(k_shifted, axis=2), mode="mirror"
)
img = img[0::sf, 0::sf, ...] # nearest downsampling
img = np.clip(img, 0.0, 1.0)
elif i == 3:
# downsample3
img = cv2.resize(
img,
(int(1 / sf * a), int(1 / sf * b)),
interpolation=random.choice([1, 2, 3]),
)
img = np.clip(img, 0.0, 1.0)
elif i == 4:
# add Gaussian noise
img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8)
elif i == 5:
# add JPEG noise
if random.random() < jpeg_prob:
img = add_JPEG_noise(img)
elif i == 6:
# add processed camera sensor noise
if random.random() < isp_prob and isp_model is not None:
with torch.no_grad():
img, hq = isp_model.forward(img.copy(), hq)
# add final JPEG compression noise
img = add_JPEG_noise(img)
# random crop
img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
return img, hq |
This is the degradation model of BSRGAN from the paper
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
----------
sf: scale factor
isp_model: camera ISP model
Returns
-------
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] | def degradation_bsrgan_variant(image, sf=4, isp_model=None):
"""
This is the degradation model of BSRGAN from the paper
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
----------
sf: scale factor
isp_model: camera ISP model
Returns
-------
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
"""
image = util.uint2single(image)
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
h1, w1 = image.shape[:2]
image = image.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop
h, w = image.shape[:2]
image.copy()
if sf == 4 and random.random() < scale2_prob: # downsample1
if np.random.rand() < 0.5:
image = cv2.resize(
image,
(int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
interpolation=random.choice([1, 2, 3]),
)
else:
image = util.imresize_np(image, 1 / 2, True)
image = np.clip(image, 0.0, 1.0)
sf = 2
shuffle_order = random.sample(range(7), 7)
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
if idx1 > idx2: # keep downsample3 last
shuffle_order[idx1], shuffle_order[idx2] = (
shuffle_order[idx2],
shuffle_order[idx1],
)
for i in shuffle_order:
if i == 0:
image = add_blur(image, sf=sf)
# elif i == 1:
# image = add_blur(image, sf=sf)
if i == 0:
pass
elif i == 2:
a, b = image.shape[1], image.shape[0]
# downsample2
if random.random() < 0.8:
sf1 = random.uniform(1, 2 * sf)
image = cv2.resize(
image,
(int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
interpolation=random.choice([1, 2, 3]),
)
else:
k = fspecial("gaussian", 25, random.uniform(0.1, 0.6 * sf))
k_shifted = shift_pixel(k, sf)
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
image = ndimage.filters.convolve(
image, np.expand_dims(k_shifted, axis=2), mode="mirror"
)
image = image[0::sf, 0::sf, ...] # nearest downsampling
image = np.clip(image, 0.0, 1.0)
elif i == 3:
# downsample3
image = cv2.resize(
image,
(int(1 / sf * a), int(1 / sf * b)),
interpolation=random.choice([1, 2, 3]),
)
image = np.clip(image, 0.0, 1.0)
elif i == 4:
# add Gaussian noise
image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2)
elif i == 5:
# add JPEG noise
if random.random() < jpeg_prob:
image = add_JPEG_noise(image)
#
# elif i == 6:
# # add processed camera sensor noise
# if random.random() < isp_prob and isp_model is not None:
# with torch.no_grad():
# img, hq = isp_model.forward(img.copy(), hq)
# add final JPEG compression noise
image = add_JPEG_noise(image)
image = util.single2uint(image)
example = {"image": image}
return example |
imgs: list, N images of size WxHxC | def imssave(imgs, img_path):
"""
imgs: list, N images of size WxHxC
"""
img_name, ext = os.path.splitext(os.path.basename(img_path))
for i, img in enumerate(imgs):
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
new_path = os.path.join(
os.path.dirname(img_path), img_name + str("_s{:04d}".format(i)) + ".png"
)
cv2.imwrite(new_path, img) |
split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
will be splitted.
Args:
original_dataroot:
taget_dataroot:
p_size: size of small images
p_overlap: patch size in training is a good choice
p_max: images with smaller size than (p_max)x(p_max) keep unchanged. | def split_imageset(
original_dataroot,
taget_dataroot,
n_channels=3,
p_size=800,
p_overlap=96,
p_max=1000,
):
"""
split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
will be splitted.
Args:
original_dataroot:
taget_dataroot:
p_size: size of small images
p_overlap: patch size in training is a good choice
p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
"""
paths = get_image_paths(original_dataroot)
for img_path in paths:
# img_name, ext = os.path.splitext(os.path.basename(img_path))
img = imread_uint(img_path, n_channels=n_channels)
patches = patches_from_image(img, p_size, p_overlap, p_max)
imssave(patches, os.path.join(taget_dataroot, os.path.basename(img_path))) |
Converts a torch Tensor into an image Numpy array of BGR channel order
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) | def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
"""
Converts a torch Tensor into an image Numpy array of BGR channel order
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
"""
tensor = (
tensor.squeeze().float().cpu().clamp_(*min_max)
) # squeeze first, then clamp
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
n_dim = tensor.dim()
if n_dim == 4:
n_img = len(tensor)
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 3:
img_np = tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 2:
img_np = tensor.numpy()
else:
raise TypeError(
"Only support 4D, 3D and 2D tensor. But received with dimension: {:d}".format(
n_dim
)
)
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
return img_np.astype(out_type) |
Kai Zhang (github: https://github.com/cszn) | def augment_img(img, mode=0):
"""Kai Zhang (github: https://github.com/cszn)"""
if mode == 0:
return img
elif mode == 1:
return np.flipud(np.rot90(img))
elif mode == 2:
return np.flipud(img)
elif mode == 3:
return np.rot90(img, k=3)
elif mode == 4:
return np.flipud(np.rot90(img, k=2))
elif mode == 5:
return np.rot90(img)
elif mode == 6:
return np.rot90(img, k=2)
elif mode == 7:
return np.flipud(np.rot90(img, k=3)) |
Kai Zhang (github: https://github.com/cszn) | def augment_img_tensor4(img, mode=0):
"""Kai Zhang (github: https://github.com/cszn)"""
if mode == 0:
return img
elif mode == 1:
return img.rot90(1, [2, 3]).flip([2])
elif mode == 2:
return img.flip([2])
elif mode == 3:
return img.rot90(3, [2, 3])
elif mode == 4:
return img.rot90(2, [2, 3]).flip([2])
elif mode == 5:
return img.rot90(1, [2, 3])
elif mode == 6:
return img.rot90(2, [2, 3])
elif mode == 7:
return img.rot90(3, [2, 3]).flip([2]) |
Kai Zhang (github: https://github.com/cszn) | def augment_img_tensor(img, mode=0):
"""Kai Zhang (github: https://github.com/cszn)"""
img_size = img.size()
img_np = img.data.cpu().numpy()
if len(img_size) == 3:
img_np = np.transpose(img_np, (1, 2, 0))
elif len(img_size) == 4:
img_np = np.transpose(img_np, (2, 3, 1, 0))
img_np = augment_img(img_np, mode=mode)
img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
if len(img_size) == 3:
img_tensor = img_tensor.permute(2, 0, 1)
elif len(img_size) == 4:
img_tensor = img_tensor.permute(3, 2, 0, 1)
return img_tensor.type_as(img) |
same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1] | def rgb2ycbcr(img, only_y=True):
"""same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.0
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(
img,
[
[65.481, -37.797, 112.0],
[128.553, -74.203, -93.786],
[24.966, 112.0, -18.214],
],
) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.0
return rlt.astype(in_img_type) |
same as matlab ycbcr2rgb
Input:
uint8, [0, 255]
float, [0, 1] | def ycbcr2rgb(img):
"""same as matlab ycbcr2rgb
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.0
# convert
rlt = np.matmul(
img,
[
[0.00456621, 0.00456621, 0.00456621],
[0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0],
],
) * 255.0 + [-222.921, 135.576, -276.836]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.0
return rlt.astype(in_img_type) |
bgr version of rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1] | def bgr2ycbcr(img, only_y=True):
"""bgr version of rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.0
# convert
if only_y:
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(
img,
[
[24.966, 112.0, -18.214],
[128.553, -74.203, -93.786],
[65.481, -37.797, 112.0],
],
) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.0
return rlt.astype(in_img_type) |
calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255] | def calculate_ssim(img1, img2, border=0):
"""calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
"""
# img1 = img1.squeeze()
# img2 = img2.squeeze()
if not img1.shape == img2.shape:
raise ValueError("Input images must have the same dimensions.")
h, w = img1.shape[:2]
img1 = img1[border : h - border, border : w - border]
img2 = img2[border : h - border, border : w - border]
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1[:, :, i], img2[:, :, i]))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError("Wrong input image dimensions.") |
Overwrite model.train with this function to make sure train/eval mode
does not change anymore. | def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self |
Read pfm file.
Args:
path (str): path to file
Returns:
tuple: (data, scale) | def read_pfm(path):
"""Read pfm file.
Args:
path (str): path to file
Returns:
tuple: (data, scale)
"""
with open(path, "rb") as file:
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header.decode("ascii") == "PF":
color = True
elif header.decode("ascii") == "Pf":
color = False
else:
raise Exception("Not a PFM file: " + path)
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
if dim_match:
width, height = list(map(int, dim_match.groups()))
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().decode("ascii").rstrip())
if scale < 0:
# little-endian
endian = "<"
scale = -scale
else:
# big-endian
endian = ">"
data = np.fromfile(file, endian + "f")
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale |
Write pfm file.
Args:
path (str): pathto file
image (array): data
scale (int, optional): Scale. Defaults to 1. | def write_pfm(path, image, scale=1):
"""Write pfm file.
Args:
path (str): pathto file
image (array): data
scale (int, optional): Scale. Defaults to 1.
"""
with open(path, "wb") as file:
color = None
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32.")
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif (
len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
): # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write("PF\n" if color else "Pf\n".encode())
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write("%f\n".encode() % scale)
image.tofile(file) |
Read image and output RGB image (0-1).
Args:
path (str): path to file
Returns:
array: RGB image (0-1) | def read_image(path):
"""Read image and output RGB image (0-1).
Args:
path (str): path to file
Returns:
array: RGB image (0-1)
"""
img = cv2.imread(path)
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
return img |
Resize image and make it fit for network.
Args:
img (array): image
Returns:
tensor: data ready for network | def resize_image(img):
"""Resize image and make it fit for network.
Args:
img (array): image
Returns:
tensor: data ready for network
"""
height_orig = img.shape[0]
width_orig = img.shape[1]
if width_orig > height_orig:
scale = width_orig / 384
else:
scale = height_orig / 384
height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
img_resized = (
torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
)
img_resized = img_resized.unsqueeze(0)
return img_resized |
Resize depth map and bring to CPU (numpy).
Args:
depth (tensor): depth
width (int): image width
height (int): image height
Returns:
array: processed depth | def resize_depth(depth, width, height):
"""Resize depth map and bring to CPU (numpy).
Args:
depth (tensor): depth
width (int): image width
height (int): image height
Returns:
array: processed depth
"""
depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
depth_resized = cv2.resize(
depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
)
return depth_resized |
Write depth map to pfm and png file.
Args:
path (str): filepath without extension
depth (array): depth | def write_depth(path, depth, bits=1):
"""Write depth map to pfm and png file.
Args:
path (str): filepath without extension
depth (array): depth
"""
write_pfm(path + ".pfm", depth.astype(np.float32))
depth_min = depth.min()
depth_max = depth.max()
max_val = (2 ** (8 * bits)) - 1
if depth_max - depth_min > np.finfo("float").eps:
out = max_val * (depth - depth_min) / (depth_max - depth_min)
else:
out = np.zeros(depth.shape, dtype=depth.type)
if bits == 1:
cv2.imwrite(path + ".png", out.astype("uint8"))
elif bits == 2:
cv2.imwrite(path + ".png", out.astype("uint16"))
return |
Rezise the sample to ensure the given size. Keeps aspect ratio.
Args:
sample (dict): sample
size (tuple): image size
Returns:
tuple: new size | def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
Args:
sample (dict): sample
size (tuple): image size
Returns:
tuple: new size
"""
shape = list(sample["disparity"].shape)
if shape[0] >= size[0] and shape[1] >= size[1]:
return sample
scale = [0, 0]
scale[0] = size[0] / shape[0]
scale[1] = size[1] / shape[1]
scale = max(scale)
shape[0] = math.ceil(scale * shape[0])
shape[1] = math.ceil(scale * shape[1])
# resize
sample["image"] = cv2.resize(
sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
)
sample["disparity"] = cv2.resize(
sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
)
sample["mask"] = cv2.resize(
sample["mask"].astype(np.float32),
tuple(shape[::-1]),
interpolation=cv2.INTER_NEAREST,
)
sample["mask"] = sample["mask"].astype(bool)
return tuple(shape) |
Overwrite model.train with this function to make sure train/eval mode
does not change anymore. | def disabled_train(self):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self |
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
) | def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
# We perform two matmuls with the same number of ops.
# The first computes the weight matrix, the second computes
# the combination of the value vectors.
matmul_ops = 2 * b * (num_spatial**2) * c
model.total_ops += th.DoubleTensor([matmul_ops]) |
grabs all text up to the first occurrence of ':'
uses the grabbed text as a sub-prompt, and takes the value following ':' as weight
if ':' has no value defined, defaults to 1.0
repeats until no text remaining | def split_weighted_subprompts(text):
"""
grabs all text up to the first occurrence of ':'
uses the grabbed text as a sub-prompt, and takes the value following ':' as weight
if ':' has no value defined, defaults to 1.0
repeats until no text remaining
"""
remaining = len(text)
prompts = []
weights = []
while remaining > 0:
if ":" in text:
idx = text.index(":") # first occurrence from start
# grab up to index as sub-prompt
prompt = text[:idx]
remaining -= idx
# remove from main text
text = text[idx + 1 :]
# find value for weight
if " " in text:
idx = text.index(" ") # first occurence
else: # no space, read to end
idx = len(text)
if idx != 0:
try:
weight = float(text[:idx])
except: # couldn't treat as float
print(
f"Warning: '{text[:idx]}' is not a value, are you missing a space?"
)
weight = 1.0
else: # no value found
weight = 1.0
# remove from main text
remaining -= idx
text = text[idx + 1 :]
# append the sub-prompt and its weight
prompts.append(prompt)
weights.append(weight)
else: # no : found
if len(text) > 0: # there is still text though
# take remainder as weight 1
prompts.append(text)
weights.append(1.0)
remaining = 0
return prompts, weights |
Appends dimensions to the end of a tensor until it has target_dims dimensions. | def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
dims_to_append = target_dims - x.ndim
if dims_to_append < 0:
raise ValueError(
f"input has {x.ndim} dims but target_dims is {target_dims}, which is less"
)
return x[(...,) + (None,) * dims_to_append] |
Calculates the noise level (sigma_down) to step down to and the amount
of noise to add (sigma_up) when doing an ancestral sampling step. | def get_ancestral_step(sigma_from, sigma_to):
"""Calculates the noise level (sigma_down) to step down to and the amount
of noise to add (sigma_up) when doing an ancestral sampling step."""
sigma_up = (
sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2
) ** 0.5
sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
return sigma_down, sigma_up |
Converts a denoiser output to a Karras ODE derivative. | def to_d(x, sigma, denoised):
"""Converts a denoiser output to a Karras ODE derivative."""
return (x - denoised) / append_dims(sigma, x.ndim) |
Calculates the noise level (sigma_down) to step down to and the amount
of noise to add (sigma_up) when doing an ancestral sampling step. | def get_ancestral_step(sigma_from, sigma_to):
"""Calculates the noise level (sigma_down) to step down to and the amount
of noise to add (sigma_up) when doing an ancestral sampling step."""
sigma_up = (
sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2
) ** 0.5
sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
return sigma_down, sigma_up |
Implements Algorithm 2 (Euler steps) from Karras et al. (2022). | def sample_euler(
model,
x,
sigmas,
extra_args=None,
callback=None,
disable=None,
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
):
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
gamma = (
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1)
if s_tmin <= sigmas[i] <= s_tmax
else 0.0
)
eps = torch.randn_like(x) * s_noise
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
denoised = model(x, sigma_hat * s_in, **extra_args)
d = to_d(x, sigma_hat, denoised)
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigmas[i],
"sigma_hat": sigma_hat,
"denoised": denoised,
}
)
dt = sigmas[i + 1] - sigma_hat
# Euler method
x = x + d * dt
return x |
Ancestral sampling with Euler method steps. | def sample_euler_ancestral(
model, x, sigmas, extra_args=None, callback=None, disable=None
):
"""Ancestral sampling with Euler method steps."""
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigmas[i],
"sigma_hat": sigmas[i],
"denoised": denoised,
}
)
d = to_d(x, sigmas[i], denoised)
# Euler method
dt = sigma_down - sigmas[i]
x = x + d * dt
x = x + torch.randn_like(x) * sigma_up
return x |
Implements Algorithm 2 (Heun steps) from Karras et al. (2022). | def sample_heun(
model,
x,
sigmas,
extra_args=None,
callback=None,
disable=None,
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
):
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
gamma = (
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1)
if s_tmin <= sigmas[i] <= s_tmax
else 0.0
)
eps = torch.randn_like(x) * s_noise
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
denoised = model(x, sigma_hat * s_in, **extra_args)
d = to_d(x, sigma_hat, denoised)
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigmas[i],
"sigma_hat": sigma_hat,
"denoised": denoised,
}
)
dt = sigmas[i + 1] - sigma_hat
if sigmas[i + 1] == 0:
# Euler method
x = x + d * dt
else:
# Heun's method
x_2 = x + d * dt
denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
d_prime = (d + d_2) / 2
x = x + d_prime * dt
return x |
A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022). | def sample_dpm_2(
model,
x,
sigmas,
extra_args=None,
callback=None,
disable=None,
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
):
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
gamma = (
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1)
if s_tmin <= sigmas[i] <= s_tmax
else 0.0
)
eps = torch.randn_like(x) * s_noise
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
denoised = model(x, sigma_hat * s_in, **extra_args)
d = to_d(x, sigma_hat, denoised)
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigmas[i],
"sigma_hat": sigma_hat,
"denoised": denoised,
}
)
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
sigma_mid = ((sigma_hat ** (1 / 3) + sigmas[i + 1] ** (1 / 3)) / 2) ** 3
dt_1 = sigma_mid - sigma_hat
dt_2 = sigmas[i + 1] - sigma_hat
x_2 = x + d * dt_1
denoised_2 = model(x_2, sigma_mid * s_in, **extra_args)
d_2 = to_d(x_2, sigma_mid, denoised_2)
x = x + d_2 * dt_2
return x |
Ancestral sampling with DPM-Solver inspired second-order steps. | def sample_dpm_2_ancestral(
model, x, sigmas, extra_args=None, callback=None, disable=None
):
"""Ancestral sampling with DPM-Solver inspired second-order steps."""
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigmas[i],
"sigma_hat": sigmas[i],
"denoised": denoised,
}
)
d = to_d(x, sigmas[i], denoised)
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
sigma_mid = ((sigmas[i] ** (1 / 3) + sigma_down ** (1 / 3)) / 2) ** 3
dt_1 = sigma_mid - sigmas[i]
dt_2 = sigma_down - sigmas[i]
x_2 = x + d * dt_1
denoised_2 = model(x_2, sigma_mid * s_in, **extra_args)
d_2 = to_d(x_2, sigma_mid, denoised_2)
x = x + d_2 * dt_2
x = x + torch.randn_like(x) * sigma_up
return x |
Zero out the parameters of a module and return it. | def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module |
Removes segments. Positive values shave the first segments, negative shave the last segments. | def shave_segments(path, n_shave_prefix_segments=1):
"""
Removes segments. Positive values shave the first segments, negative shave the last segments.
"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(".")[n_shave_prefix_segments:])
else:
return ".".join(path.split(".")[:n_shave_prefix_segments]) |
Updates paths inside resnets to the new naming scheme (local renaming) | def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
"""
Updates paths inside resnets to the new naming scheme (local renaming)
"""
mapping = []
for old_item in old_list:
new_item = old_item.replace("in_layers.0", "norm1")
new_item = new_item.replace("in_layers.2", "conv1")
new_item = new_item.replace("out_layers.0", "norm2")
new_item = new_item.replace("out_layers.3", "conv2")
new_item = new_item.replace("emb_layers.1", "time_emb_proj")
new_item = new_item.replace("skip_connection", "conv_shortcut")
new_item = shave_segments(
new_item, n_shave_prefix_segments=n_shave_prefix_segments
)
mapping.append({"old": old_item, "new": new_item})
return mapping |
Updates paths inside resnets to the new naming scheme (local renaming) | def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0):
"""
Updates paths inside resnets to the new naming scheme (local renaming)
"""
mapping = []
for old_item in old_list:
new_item = old_item
new_item = new_item.replace("nin_shortcut", "conv_shortcut")
new_item = shave_segments(
new_item, n_shave_prefix_segments=n_shave_prefix_segments
)
mapping.append({"old": old_item, "new": new_item})
return mapping |
Updates paths inside attentions to the new naming scheme (local renaming) | def renew_attention_paths(old_list, n_shave_prefix_segments=0):
"""
Updates paths inside attentions to the new naming scheme (local renaming)
"""
mapping = []
for old_item in old_list:
new_item = old_item
# new_item = new_item.replace('norm.weight', 'group_norm.weight')
# new_item = new_item.replace('norm.bias', 'group_norm.bias')
# new_item = new_item.replace('proj_out.weight', 'proj_attn.weight')
# new_item = new_item.replace('proj_out.bias', 'proj_attn.bias')
# new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
mapping.append({"old": old_item, "new": new_item})
return mapping |
Updates paths inside attentions to the new naming scheme (local renaming) | def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0):
"""
Updates paths inside attentions to the new naming scheme (local renaming)
"""
mapping = []
for old_item in old_list:
new_item = old_item
new_item = new_item.replace("norm.weight", "group_norm.weight")
new_item = new_item.replace("norm.bias", "group_norm.bias")
new_item = new_item.replace("q.weight", "query.weight")
new_item = new_item.replace("q.bias", "query.bias")
new_item = new_item.replace("k.weight", "key.weight")
new_item = new_item.replace("k.bias", "key.bias")
new_item = new_item.replace("v.weight", "value.weight")
new_item = new_item.replace("v.bias", "value.bias")
new_item = new_item.replace("proj_out.weight", "proj_attn.weight")
new_item = new_item.replace("proj_out.bias", "proj_attn.bias")
new_item = shave_segments(
new_item, n_shave_prefix_segments=n_shave_prefix_segments
)
mapping.append({"old": old_item, "new": new_item})
return mapping |
This does the final conversion step: take locally converted weights and apply a global renaming
to them. It splits attention layers, and takes into account additional replacements
that may arise.
Assigns the weights to the new checkpoint. | def assign_to_checkpoint(
paths,
checkpoint,
old_checkpoint,
attention_paths_to_split=None,
additional_replacements=None,
config=None,
):
"""
This does the final conversion step: take locally converted weights and apply a global renaming
to them. It splits attention layers, and takes into account additional replacements
that may arise.
Assigns the weights to the new checkpoint.
"""
assert isinstance(
paths, list
), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
old_tensor = old_checkpoint[path]
channels = old_tensor.shape[0] // 3
target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3
old_tensor = old_tensor.reshape(
(num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]
)
query, key, value = old_tensor.split(channels // num_heads, dim=1)
checkpoint[path_map["query"]] = query.reshape(target_shape)
checkpoint[path_map["key"]] = key.reshape(target_shape)
checkpoint[path_map["value"]] = value.reshape(target_shape)
for path in paths:
new_path = path["new"]
# These have already been assigned
if (
attention_paths_to_split is not None
and new_path in attention_paths_to_split
):
continue
# Global renaming happens here
new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
if additional_replacements is not None:
for replacement in additional_replacements:
new_path = new_path.replace(replacement["old"], replacement["new"])
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0]
else:
checkpoint[new_path] = old_checkpoint[path["old"]] |
Creates a config for the diffusers based on the config of the LDM model. | def create_unet_diffusers_config(original_config):
"""
Creates a config for the diffusers based on the config of the LDM model.
"""
unet_params = original_config.model.params.unet_config.params
block_out_channels = [
unet_params.model_channels * mult for mult in unet_params.channel_mult
]
down_block_types = []
resolution = 1
for i in range(len(block_out_channels)):
block_type = (
"CrossAttnDownBlock2D"
if resolution in unet_params.attention_resolutions
else "DownBlock2D"
)
down_block_types.append(block_type)
if i != len(block_out_channels) - 1:
resolution *= 2
up_block_types = []
for i in range(len(block_out_channels)):
block_type = (
"CrossAttnUpBlock2D"
if resolution in unet_params.attention_resolutions
else "UpBlock2D"
)
up_block_types.append(block_type)
resolution //= 2
config = dict(
sample_size=unet_params.image_size,
in_channels=unet_params.in_channels,
out_channels=unet_params.out_channels,
down_block_types=tuple(down_block_types),
up_block_types=tuple(up_block_types),
block_out_channels=tuple(block_out_channels),
layers_per_block=unet_params.num_res_blocks,
cross_attention_dim=unet_params.context_dim,
attention_head_dim=unet_params.num_heads,
)
return config |
Creates a config for the diffusers based on the config of the LDM model. | def create_vae_diffusers_config(original_config):
"""
Creates a config for the diffusers based on the config of the LDM model.
"""
vae_params = original_config.model.params.first_stage_config.params.ddconfig
_ = original_config.model.params.first_stage_config.params.embed_dim
block_out_channels = [vae_params.ch * mult for mult in vae_params.ch_mult]
down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels)
up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels)
config = dict(
sample_size=vae_params.resolution,
in_channels=vae_params.in_channels,
out_channels=vae_params.out_ch,
down_block_types=tuple(down_block_types),
up_block_types=tuple(up_block_types),
block_out_channels=tuple(block_out_channels),
latent_channels=vae_params.z_channels,
layers_per_block=vae_params.num_res_blocks,
)
return config |
Takes a state dict and a config, and returns a converted checkpoint. | def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False):
"""
Takes a state dict and a config, and returns a converted checkpoint.
"""
# extract state_dict for UNet
unet_state_dict = {}
keys = list(checkpoint.keys())
unet_key = "model.diffusion_model."
# at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA
if sum(k.startswith("model_ema") for k in keys) > 100:
print(f"Checkpoint {path} has both EMA and non-EMA weights.")
if extract_ema:
print(
"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA"
" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag."
)
for key in keys:
if key.startswith("model.diffusion_model"):
flat_ema_key = "model_ema." + "".join(key.split(".")[1:])
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(
flat_ema_key
)
else:
print(
"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA"
" weights (usually better for inference), please make sure to add the `--extract_ema` flag."
)
for key in keys:
if key.startswith(unet_key):
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key)
new_checkpoint = {}
new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict[
"time_embed.0.weight"
]
new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict[
"time_embed.0.bias"
]
new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict[
"time_embed.2.weight"
]
new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict[
"time_embed.2.bias"
]
new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
# Retrieves the keys for the input blocks only
num_input_blocks = len(
{
".".join(layer.split(".")[:2])
for layer in unet_state_dict
if "input_blocks" in layer
}
)
input_blocks = {
layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key]
for layer_id in range(num_input_blocks)
}
# Retrieves the keys for the middle blocks only
num_middle_blocks = len(
{
".".join(layer.split(".")[:2])
for layer in unet_state_dict
if "middle_block" in layer
}
)
middle_blocks = {
layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key]
for layer_id in range(num_middle_blocks)
}
# Retrieves the keys for the output blocks only
num_output_blocks = len(
{
".".join(layer.split(".")[:2])
for layer in unet_state_dict
if "output_blocks" in layer
}
)
output_blocks = {
layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key]
for layer_id in range(num_output_blocks)
}
for i in range(1, num_input_blocks):
block_id = (i - 1) // (config["layers_per_block"] + 1)
layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1)
resnets = [
key
for key in input_blocks[i]
if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key
]
attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
if f"input_blocks.{i}.0.op.weight" in unet_state_dict:
new_checkpoint[
f"down_blocks.{block_id}.downsamplers.0.conv.weight"
] = unet_state_dict.pop(f"input_blocks.{i}.0.op.weight")
new_checkpoint[
f"down_blocks.{block_id}.downsamplers.0.conv.bias"
] = unet_state_dict.pop(f"input_blocks.{i}.0.op.bias")
paths = renew_resnet_paths(resnets)
meta_path = {
"old": f"input_blocks.{i}.0",
"new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}",
}
assign_to_checkpoint(
paths,
new_checkpoint,
unet_state_dict,
additional_replacements=[meta_path],
config=config,
)
if len(attentions):
paths = renew_attention_paths(attentions)
meta_path = {
"old": f"input_blocks.{i}.1",
"new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
assign_to_checkpoint(
paths,
new_checkpoint,
unet_state_dict,
additional_replacements=[meta_path],
config=config,
)
resnet_0 = middle_blocks[0]
attentions = middle_blocks[1]
resnet_1 = middle_blocks[2]
resnet_0_paths = renew_resnet_paths(resnet_0)
assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)
resnet_1_paths = renew_resnet_paths(resnet_1)
assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)
attentions_paths = renew_attention_paths(attentions)
meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(
attentions_paths,
new_checkpoint,
unet_state_dict,
additional_replacements=[meta_path],
config=config,
)
for i in range(num_output_blocks):
block_id = i // (config["layers_per_block"] + 1)
layer_in_block_id = i % (config["layers_per_block"] + 1)
output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
output_block_list = {}
for layer in output_block_layers:
layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
if layer_id in output_block_list:
output_block_list[layer_id].append(layer_name)
else:
output_block_list[layer_id] = [layer_name]
if len(output_block_list) > 1:
resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
attentions = [
key for key in output_blocks[i] if f"output_blocks.{i}.1" in key
]
resnet_0_paths = renew_resnet_paths(resnets)
paths = renew_resnet_paths(resnets)
meta_path = {
"old": f"output_blocks.{i}.0",
"new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}",
}
assign_to_checkpoint(
paths,
new_checkpoint,
unet_state_dict,
additional_replacements=[meta_path],
config=config,
)
if ["conv.weight", "conv.bias"] in output_block_list.values():
index = list(output_block_list.values()).index(
["conv.weight", "conv.bias"]
)
new_checkpoint[
f"up_blocks.{block_id}.upsamplers.0.conv.weight"
] = unet_state_dict[f"output_blocks.{i}.{index}.conv.weight"]
new_checkpoint[
f"up_blocks.{block_id}.upsamplers.0.conv.bias"
] = unet_state_dict[f"output_blocks.{i}.{index}.conv.bias"]
# Clear attentions as they have been attributed above.
if len(attentions) == 2:
attentions = []
if len(attentions):
paths = renew_attention_paths(attentions)
meta_path = {
"old": f"output_blocks.{i}.1",
"new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
assign_to_checkpoint(
paths,
new_checkpoint,
unet_state_dict,
additional_replacements=[meta_path],
config=config,
)
else:
resnet_0_paths = renew_resnet_paths(
output_block_layers, n_shave_prefix_segments=1
)
for path in resnet_0_paths:
old_path = ".".join(["output_blocks", str(i), path["old"]])
new_path = ".".join(
[
"up_blocks",
str(block_id),
"resnets",
str(layer_in_block_id),
path["new"],
]
)
new_checkpoint[new_path] = unet_state_dict[old_path]
return new_checkpoint |
Download all files from model_list[model_name] | def download_model(models, model_name):
"""Download all files from model_list[model_name]"""
for file in models[model_name]:
download_file(file["file_name"], file["file_path"], file["file_url"])
return |
linear interpolation | def lerp(a, b, x):
"linear interpolation"
return a + x * (b - a) |
6t^5 - 15t^4 + 10t^3 | def fade(t):
"6t^5 - 15t^4 + 10t^3"
return 6 * t**5 - 15 * t**4 + 10 * t**3 |
grad converts h to the right gradient vector and return the dot product with (x,y) | def gradient(h, x, y):
"grad converts h to the right gradient vector and return the dot product with (x,y)"
vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]])
g = vectors[h % 4]
return g[:, :, 0] * x + g[:, :, 1] * y |
returns transformation matrix which transforms from pose to world | def pose3d_rpy(x, y, z, roll, pitch, yaw):
"""returns transformation matrix which transforms from pose to world"""
return translation3d(x, y, z) @ rotation3d_rpy(roll, pitch, yaw) |
Simple function to allows us to change the title dynamically.
Normally you can use `st.set_page_config` to change the title but it can only be used once per app. | def set_page_title(title):
"""
Simple function to allows us to change the title dynamically.
Normally you can use `st.set_page_config` to change the title but it can only be used once per app.
"""
st.sidebar.markdown(
unsafe_allow_html=True,
body=f"""
<iframe height=0 srcdoc="<script>
const title = window.parent.document.querySelector('title') \
const oldObserver = window.parent.titleObserver
if (oldObserver) {{
oldObserver.disconnect()
}} \
const newObserver = new MutationObserver(function(mutations) {{
const target = mutations[0].target
if (target.text !== '{title}') {{
target.text = '{title}'
}}
}}) \
newObserver.observe(title, {{ childList: true }})
window.parent.titleObserver = newObserver \
title.text = '{title}'
</script>" />
""",
) |
Return a human readable size from bytes. | def human_readable_size(size, decimal_places=3):
"""Return a human readable size from bytes."""
for unit in ["B", "KB", "MB", "GB", "TB"]:
if size < 1024.0:
break
size /= 1024.0
return f"{size:.{decimal_places}f}{unit}" |
Load the different models. We also reuse the models that are already in memory to speed things up instead of loading them again. | def load_models(
use_LDSR=False,
LDSR_model="model",
use_GFPGAN=False,
GFPGAN_model="GFPGANv1.4",
use_RealESRGAN=False,
RealESRGAN_model="RealESRGAN_x4plus",
CustomModel_available=False,
custom_model="Stable Diffusion v1.5",
):
"""Load the different models. We also reuse the models that are already in memory to speed things up instead of loading them again."""
# model_manager.init()
logger.info("Loading models.")
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text("")
# check what models we want to use and if the they are already loaded.
with server_state_lock["LDSR"]:
if use_LDSR:
if "LDSR" in server_state and server_state["LDSR"].name == LDSR_model:
logger.info("LDSR already loaded")
else:
if "LDSR" in server_state:
del server_state["LDSR"]
# Load GFPGAN
if os.path.exists(st.session_state["defaults"].general.LDSR_dir):
try:
server_state["LDSR"] = load_LDSR(model_name=LDSR_model)
logger.info("Loaded LDSR")
except Exception:
import traceback
logger.error("Error loading LDSR:", file=sys.stderr)
logger.error(traceback.format_exc(), file=sys.stderr)
else:
if "LDSR" in server_state and not server_state["keep_all_models_loaded"]:
logger.debug(
"LDSR was in memory but we won't use it. Removing to save VRAM."
)
del server_state["LDSR"]
with server_state_lock["GFPGAN"]:
if use_GFPGAN:
if "GFPGAN" in server_state and server_state["GFPGAN"].name == GFPGAN_model:
logger.info("GFPGAN already loaded")
else:
if "GFPGAN" in server_state:
del server_state["GFPGAN"]
# Load GFPGAN
if os.path.exists(st.session_state["defaults"].general.GFPGAN_dir):
try:
server_state["GFPGAN"] = load_GFPGAN(GFPGAN_model)
logger.info(f"Loaded GFPGAN: {GFPGAN_model}")
except Exception:
import traceback
logger.error("Error loading GFPGAN:", file=sys.stderr)
logger.error(traceback.format_exc(), file=sys.stderr)
else:
if "GFPGAN" in server_state and not server_state["keep_all_models_loaded"]:
del server_state["GFPGAN"]
with server_state_lock["RealESRGAN"]:
if use_RealESRGAN:
if (
"RealESRGAN" in server_state
and server_state["RealESRGAN"].model.name == RealESRGAN_model
):
logger.info("RealESRGAN already loaded")
else:
# Load RealESRGAN
try:
# We first remove the variable in case it has something there,
# some errors can load the model incorrectly and leave things in memory.
del server_state["RealESRGAN"]
except KeyError as e:
logger.error(e)
pass
if os.path.exists(st.session_state["defaults"].general.RealESRGAN_dir):
# st.session_state is used for keeping the models in memory across multiple pages or runs.
server_state["RealESRGAN"] = load_RealESRGAN(RealESRGAN_model)
logger.info(
"Loaded RealESRGAN with model "
+ server_state["RealESRGAN"].model.name
)
else:
if (
"RealESRGAN" in server_state
and not server_state["keep_all_models_loaded"]
):
del server_state["RealESRGAN"]
with server_state_lock["model"], server_state_lock["modelCS"], server_state_lock[
"modelFS"
], server_state_lock["loaded_model"]:
if "model" in server_state:
if "model" in server_state and server_state["loaded_model"] == custom_model:
# if the float16 or no_half options have changed since the last time the model was loaded then we need to reload the model.
if (
(
"float16" in server_state
and server_state["float16"]
!= st.session_state["defaults"].general.use_float16
)
or (
"no_half" in server_state
and server_state["no_half"]
!= st.session_state["defaults"].general.no_half
)
or (
"optimized" in server_state
and server_state["optimized"]
!= st.session_state["defaults"].general.optimized
)
):
logger.info(
"Model options changed, deleting the model from memory."
)
del server_state["float16"]
del server_state["no_half"]
del server_state["model"]
del server_state["modelCS"]
del server_state["modelFS"]
del server_state["loaded_model"]
del server_state["optimized"]
server_state["float16"] = st.session_state[
"defaults"
].general.use_float16
server_state["no_half"] = st.session_state[
"defaults"
].general.no_half
server_state["optimized"] = st.session_state[
"defaults"
].general.optimized
load_models(
use_LDSR=st.session_state["use_LDSR"],
LDSR_model=st.session_state["LDSR_model"],
use_GFPGAN=st.session_state["use_GFPGAN"],
GFPGAN_model=st.session_state["GFPGAN_model"],
use_RealESRGAN=st.session_state["use_RealESRGAN"],
RealESRGAN_model=st.session_state["RealESRGAN_model"],
CustomModel_available=server_state["CustomModel_available"],
custom_model=st.session_state["custom_model"],
)
else:
logger.info("Model already loaded")
return
else:
try:
del server_state["model"]
del server_state["modelCS"]
del server_state["modelFS"]
del server_state["loaded_model"]
except KeyError as e:
logger.error(e)
pass
# if the model from txt2vid is in memory we need to remove it to improve performance.
with server_state_lock["pipe"]:
if "pipe" in server_state and not server_state["keep_all_models_loaded"]:
del server_state["pipe"]
if (
"textual_inversion" in st.session_state
and not server_state["keep_all_models_loaded"]
):
del st.session_state["textual_inversion"]
# At this point the model is either
# not loaded yet or have been deleted from memory:
# load new model into memory
server_state["custom_model"] = custom_model
config, device, model, modelCS, modelFS = load_sd_model(custom_model)
server_state["device"] = device
server_state["model"] = model
server_state["modelCS"] = modelCS
server_state["modelFS"] = modelFS
server_state["loaded_model"] = custom_model
server_state["float16"] = st.session_state["defaults"].general.use_float16
server_state["no_half"] = st.session_state["defaults"].general.no_half
server_state["optimized"] = st.session_state["defaults"].general.optimized
# trying to disable multiprocessing as it makes it so streamlit cant stop when the
# model is loaded in memory and you need to kill the process sometimes.
try:
server_state["model"].args.use_multiprocessing_for_evaluation = False
except AttributeError:
pass
if st.session_state.defaults.general.enable_attention_slicing:
server_state["model"].enable_attention_slicing()
if st.session_state.defaults.general.enable_minimal_memory_usage:
server_state["model"].enable_minimal_memory_usage()
logger.info("Model loaded.")
return True |
Appends dimensions to the end of a tensor until it has target_dims dimensions. | def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
dims_to_append = target_dims - x.ndim
if dims_to_append < 0:
raise ValueError(
f"input has {x.ndim} dims but target_dims is {target_dims}, which is less"
)
return x[(...,) + (None,) * dims_to_append] |
Constructs the noise schedule of Karras et al. (2022). | def get_sigmas_karras(n, sigma_min, sigma_max, rho=7.0, device="cpu"):
"""Constructs the noise schedule of Karras et al. (2022)."""
ramp = torch.linspace(0, 1, n)
min_inv_rho = sigma_min ** (1 / rho)
max_inv_rho = sigma_max ** (1 / rho)
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return append_zero(sigmas).to(device) |
Explanation:
Getting good results in/out-painting with stable diffusion can be challenging.
Although there are simpler effective solutions for in-painting, out-painting can be especially challenging because there is no color data
in the masked area to help prompt the generator. Ideally, even for in-painting we'd like work effectively without that data as well.
Provided here is my take on a potential solution to this problem.
By taking a fourier transform of the masked src img we get a function that tells us the presence and orientation of each feature scale in the unmasked src.
Shaping the init/seed noise for in/outpainting to the same distribution of feature scales, orientations, and positions increases output coherence
by helping keep features aligned. This technique is applicable to any continuous generation task such as audio or video, each of which can
be conceptualized as a series of out-painting steps where the last half of the input "frame" is erased. For multi-channel data such as color
or stereo sound the "color tone" or histogram of the seed noise can be matched to improve quality (using scikit-image currently)
This method is quite robust and has the added benefit of being fast independently of the size of the out-painted area.
The effects of this method include things like helping the generator integrate the pre-existing view distance and camera angle.
Carefully managing color and brightness with histogram matching is also essential to achieving good coherence.
noise_q controls the exponent in the fall-off of the distribution can be any positive number, lower values means higher detail (range > 0, default 1.)
color_variation controls how much freedom is allowed for the colors/palette of the out-painted area (range 0..1, default 0.01)
This code is provided as is under the Unlicense (https://unlicense.org/)
Although you have no obligation to do so, if you found this code helpful please find it in your heart to credit me [parlance-zz].
Questions or comments can be sent to [email protected] (https://github.com/parlance-zz/)
This code is part of a new branch of a discord bot I am working on integrating with diffusers (https://github.com/parlance-zz/g-diffuser-bot) | def get_matched_noise(_np_src_image, np_mask_rgb, noise_q, color_variation):
"""
Explanation:
Getting good results in/out-painting with stable diffusion can be challenging.
Although there are simpler effective solutions for in-painting, out-painting can be especially challenging because there is no color data
in the masked area to help prompt the generator. Ideally, even for in-painting we'd like work effectively without that data as well.
Provided here is my take on a potential solution to this problem.
By taking a fourier transform of the masked src img we get a function that tells us the presence and orientation of each feature scale in the unmasked src.
Shaping the init/seed noise for in/outpainting to the same distribution of feature scales, orientations, and positions increases output coherence
by helping keep features aligned. This technique is applicable to any continuous generation task such as audio or video, each of which can
be conceptualized as a series of out-painting steps where the last half of the input "frame" is erased. For multi-channel data such as color
or stereo sound the "color tone" or histogram of the seed noise can be matched to improve quality (using scikit-image currently)
This method is quite robust and has the added benefit of being fast independently of the size of the out-painted area.
The effects of this method include things like helping the generator integrate the pre-existing view distance and camera angle.
Carefully managing color and brightness with histogram matching is also essential to achieving good coherence.
noise_q controls the exponent in the fall-off of the distribution can be any positive number, lower values means higher detail (range > 0, default 1.)
color_variation controls how much freedom is allowed for the colors/palette of the out-painted area (range 0..1, default 0.01)
This code is provided as is under the Unlicense (https://unlicense.org/)
Although you have no obligation to do so, if you found this code helpful please find it in your heart to credit me [parlance-zz].
Questions or comments can be sent to [email protected] (https://github.com/parlance-zz/)
This code is part of a new branch of a discord bot I am working on integrating with diffusers (https://github.com/parlance-zz/g-diffuser-bot)
"""
global DEBUG_MODE
global TMP_ROOT_PATH
width = _np_src_image.shape[0]
height = _np_src_image.shape[1]
num_channels = _np_src_image.shape[2]
np_src_image = _np_src_image[:] * (1.0 - np_mask_rgb)
np_mask_grey = np.sum(np_mask_rgb, axis=2) / 3.0
(np.sum(np_src_image, axis=2) / 3.0)
np.ones((width, height), dtype=bool)
img_mask = np_mask_grey > 1e-6
ref_mask = np_mask_grey < 1e-3
windowed_image = _np_src_image * (1.0 - _get_masked_window_rgb(np_mask_grey))
windowed_image /= np.max(windowed_image)
windowed_image += (
np.average(_np_src_image) * np_mask_rgb
) # / (1.-np.average(np_mask_rgb)) # rather than leave the masked area black, we get better results from fft by filling the average unmasked color
# windowed_image += np.average(_np_src_image) * (np_mask_rgb * (1.- np_mask_rgb)) / (1.-np.average(np_mask_rgb)) # compensate for darkening across the mask transition area
# _save_debug_img(windowed_image, "windowed_src_img")
src_fft = _fft2(windowed_image) # get feature statistics from masked src img
src_dist = np.absolute(src_fft)
src_phase = src_fft / src_dist
# _save_debug_img(src_dist, "windowed_src_dist")
noise_window = _get_gaussian_window(
width, height, mode=1
) # start with simple gaussian noise
noise_rgb = np.random.random_sample((width, height, num_channels))
noise_grey = np.sum(noise_rgb, axis=2) / 3.0
noise_rgb *= color_variation # the colorfulness of the starting noise is blended to greyscale with a parameter
for c in range(num_channels):
noise_rgb[:, :, c] += (1.0 - color_variation) * noise_grey
noise_fft = _fft2(noise_rgb)
for c in range(num_channels):
noise_fft[:, :, c] *= noise_window
noise_rgb = np.real(_ifft2(noise_fft))
shaped_noise_fft = _fft2(noise_rgb)
shaped_noise_fft[:, :, :] = (
np.absolute(shaped_noise_fft[:, :, :]) ** 2 * (src_dist**noise_q) * src_phase
) # perform the actual shaping
brightness_variation = 0.0 # color_variation # todo: temporarily tieing brightness variation to color variation for now
contrast_adjusted_np_src = (
_np_src_image[:] * (brightness_variation + 1.0) - brightness_variation * 2.0
)
# scikit-image is used for histogram matching, very convenient!
shaped_noise = np.real(_ifft2(shaped_noise_fft))
shaped_noise -= np.min(shaped_noise)
shaped_noise /= np.max(shaped_noise)
shaped_noise[img_mask, :] = skimage.exposure.match_histograms(
shaped_noise[img_mask, :] ** 1.0,
contrast_adjusted_np_src[ref_mask, :],
channel_axis=1,
)
shaped_noise = _np_src_image[:] * (1.0 - np_mask_rgb) + shaped_noise * np_mask_rgb
# _save_debug_img(shaped_noise, "shaped_noise")
matched_noise = np.zeros((width, height, num_channels))
matched_noise = shaped_noise[:]
# matched_noise[all_mask,:] = skimage.exposure.match_histograms(shaped_noise[all_mask,:], _np_src_image[ref_mask,:], channel_axis=1)
# matched_noise = _np_src_image[:] * (1. - np_mask_rgb) + matched_noise * np_mask_rgb
# _save_debug_img(matched_noise, "matched_noise")
"""
todo:
color_variation doesnt have to be a single number, the overall color tone of the out-painted area could be param controlled
"""
return np.clip(matched_noise, 0.0, 1.0) |
A folder picker that has a text_input field next to it and a button to select the folder.
Returns the text_input field with the folder path. | def folder_picker(
label="Select:",
value="",
help="",
folder_button_label="Select",
folder_button_help="",
folder_button_key="",
):
"""A folder picker that has a text_input field next to it and a button to select the folder.
Returns the text_input field with the folder path."""
import tkinter as tk
from tkinter import filedialog
# Set up tkinter
root = tk.Tk()
root.withdraw()
# Make folder picker dialog appear on top of other windows
root.wm_attributes("-topmost", 1)
col1, col2 = st.columns([2, 1], gap="small")
with col1:
dirname = st.empty()
with col2:
st.write("")
st.write("")
folder_picker = st.empty()
# Folder picker button
# st.title('Folder Picker')
# st.write('Please select a folder:')
# Create a label and add a random number of invisible characters
# to it so no two buttons inside a form are the same.
# folder_button_label = ''.join(random.choice(f"{folder_button_label}") for _ in range(5))
folder_button_label = f"{str(folder_button_label)}{'' * random.randint(1, 500)}"
clicked = folder_button_key + "" * random.randint(5, 500)
# try:
# clicked = folder_picker.button(folder_button_label, help=folder_button_help, key=folder_button_key)
# except StreamlitAPIException:
clicked = folder_picker.form_submit_button(
folder_button_label, help=folder_button_help
)
if clicked:
dirname = dirname.text_input(
label, filedialog.askdirectory(master=root), help=help
)
else:
dirname = dirname.text_input(label, value, help=help)
return dirname |
Constructs an exponential noise schedule. | def get_sigmas_exponential(n, sigma_min, sigma_max, device="cpu"):
"""Constructs an exponential noise schedule."""
sigmas = torch.linspace(
math.log(sigma_max), math.log(sigma_min), n, device=device
).exp()
return append_zero(sigmas) |
Constructs a continuous VP noise schedule. | def get_sigmas_vp(n, beta_d=19.9, beta_min=0.1, eps_s=1e-3, device="cpu"):
"""Constructs a continuous VP noise schedule."""
t = torch.linspace(1, eps_s, n, device=device)
sigmas = torch.sqrt(torch.exp(beta_d * t**2 / 2 + beta_min * t) - 1)
return append_zero(sigmas) |
Converts a denoiser output to a Karras ODE derivative. | def to_d(x, sigma, denoised):
"""Converts a denoiser output to a Karras ODE derivative."""
return (x - denoised) / append_dims(sigma, x.ndim) |
Loads Stable Diffusion model by name | def load_sd_model(model_name: str):
"""Loads Stable Diffusion model by name"""
ckpt_path = st.session_state.defaults.general.default_model_path
if model_name != st.session_state.defaults.general.default_model:
ckpt_path = os.path.join("models", "custom", f"{model_name}.ckpt")
if st.session_state.defaults.general.optimized:
config = OmegaConf.load(st.session_state.defaults.general.optimized_config)
sd = load_sd_from_config(ckpt_path)
li, lo = [], []
for key, v_ in sd.items():
sp = key.split(".")
if (sp[0]) == "model":
if "input_blocks" in sp:
li.append(key)
elif "middle_block" in sp:
li.append(key)
elif "time_embed" in sp:
li.append(key)
else:
lo.append(key)
for key in li:
sd["model1." + key[6:]] = sd.pop(key)
for key in lo:
sd["model2." + key[6:]] = sd.pop(key)
device = (
torch.device(f"cuda:{st.session_state.defaults.general.gpu}")
if torch.cuda.is_available()
else torch.device("cpu")
)
model = instantiate_from_config(config.modelUNet)
_, _ = model.load_state_dict(sd, strict=False)
model.cuda()
model.eval()
model.turbo = st.session_state.defaults.general.optimized_turbo
modelCS = instantiate_from_config(config.modelCondStage)
_, _ = modelCS.load_state_dict(sd, strict=False)
modelCS.cond_stage_model.device = device
modelCS.eval()
modelFS = instantiate_from_config(config.modelFirstStage)
_, _ = modelFS.load_state_dict(sd, strict=False)
modelFS.eval()
del sd
if not st.session_state.defaults.general.no_half:
model = model.half().to(device)
modelCS = modelCS.half().to(device)
modelFS = modelFS.half().to(device)
return config, device, model, modelCS, modelFS
else:
config = OmegaConf.load(st.session_state.defaults.general.default_model_config)
model = load_model_from_config(config, ckpt_path)
device = (
torch.device(f"cuda:{st.session_state.defaults.general.gpu}")
if torch.cuda.is_available()
else torch.device("cpu")
)
model = (
model if st.session_state.defaults.general.no_half else model.half()
).to(device)
return config, device, model, None, None |
Find the optimal update_preview_frequency value maximizing
performance while minimizing the time between updates. | def optimize_update_preview_frequency(
current_chunk_speed,
previous_chunk_speed_list,
update_preview_frequency,
update_preview_frequency_list,
):
"""Find the optimal update_preview_frequency value maximizing
performance while minimizing the time between updates."""
from statistics import mean
previous_chunk_avg_speed = mean(previous_chunk_speed_list)
previous_chunk_speed_list.append(current_chunk_speed)
current_chunk_avg_speed = mean(previous_chunk_speed_list)
if current_chunk_avg_speed >= previous_chunk_avg_speed:
# print(f"{current_chunk_speed} >= {previous_chunk_speed}")
update_preview_frequency_list.append(update_preview_frequency + 1)
else:
# print(f"{current_chunk_speed} <= {previous_chunk_speed}")
update_preview_frequency_list.append(update_preview_frequency - 1)
update_preview_frequency = round(mean(update_preview_frequency_list))
return (
current_chunk_speed,
previous_chunk_speed_list,
update_preview_frequency,
update_preview_frequency_list,
) |
Moves only unet to fp16 and to CUDA, while keepping lighter models on CPUs | def enable_minimal_memory_usage(model):
"""Moves only unet to fp16 and to CUDA, while keepping lighter models on CPUs"""
model.unet.to(torch.float16).to(torch.device("cuda"))
model.enable_attention_slicing(1)
torch.cuda.empty_cache()
torch_gc() |
this function tests if prompt is too long, and if so, adds a message to comments | def check_prompt_length(prompt, comments):
"""this function tests if prompt is too long, and if so, adds a message to comments"""
tokenizer = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).cond_stage_model.tokenizer
max_length = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).cond_stage_model.max_length
info = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).cond_stage_model.tokenizer(
[prompt],
truncation=True,
max_length=max_length,
return_overflowing_tokens=True,
padding="max_length",
return_tensors="pt",
)
ovf = info["overflowing_tokens"][0]
overflowing_count = ovf.shape[0]
if overflowing_count == 0:
return
vocab = {v: k for k, v in tokenizer.get_vocab().items()}
overflowing_words = [vocab.get(int(x), "") for x in ovf]
overflowing_text = tokenizer.convert_tokens_to_string("".join(overflowing_words))
comments.append(
f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n"
) |
Determines and returns the next sequence number to use when saving an
image in the specified directory.
If a prefix is given, only consider files whose names start with that
prefix, and strip the prefix from filenames before extracting their
sequence number.
The sequence starts at 0. | def get_next_sequence_number(path, prefix=""):
"""
Determines and returns the next sequence number to use when saving an
image in the specified directory.
If a prefix is given, only consider files whose names start with that
prefix, and strip the prefix from filenames before extracting their
sequence number.
The sequence starts at 0.
"""
result = -1
for p in Path(path).iterdir():
if p.name.endswith((".png", ".jpg")) and p.name.startswith(prefix):
tmp = p.name[len(prefix) :]
try:
result = max(int(tmp.split("-")[0]), result)
except ValueError:
pass
return result + 1 |
this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch | def process_images(
outpath,
func_init,
func_sample,
prompt,
seed,
sampler_name,
save_grid,
batch_size,
n_iter,
steps,
cfg_scale,
width,
height,
prompt_matrix,
use_GFPGAN: bool = True,
GFPGAN_model: str = "GFPGANv1.4",
use_RealESRGAN: bool = False,
realesrgan_model_name: str = "RealESRGAN_x4plus",
use_LDSR: bool = False,
LDSR_model_name: str = "model",
ddim_eta=0.0,
normalize_prompt_weights=True,
init_img=None,
init_mask=None,
mask_blur_strength=3,
mask_restore=False,
denoising_strength=0.75,
noise_mode=0,
find_noise_steps=1,
resize_mode=None,
uses_loopback=False,
uses_random_seed_loopback=False,
sort_samples=True,
write_info_files=True,
jpg_sample=False,
variant_amount=0.0,
variant_seed=None,
save_individual_images: bool = True,
):
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
torch_gc()
# start time after garbage collection (or before?)
start_time = time.time()
# We will use this date here later for the folder name, need to start_time if not need
datetime.datetime.now()
mem_mon = MemUsageMonitor("MemMon")
mem_mon.start()
if st.session_state.defaults.general.use_sd_concepts_library:
prompt_tokens = re.findall("<([a-zA-Z0-9-]+)>", prompt)
if prompt_tokens:
# compviz
tokenizer = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).cond_stage_model.tokenizer
text_encoder = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).cond_stage_model.transformer
# diffusers
# tokenizer = pipe.tokenizer
# text_encoder = pipe.text_encoder
ext = ("pt", "bin")
if len(prompt_tokens) > 1:
for token_name in prompt_tokens:
embedding_path = os.path.join(
st.session_state["defaults"].general.sd_concepts_library_folder,
token_name,
)
if os.path.exists(embedding_path):
for files in os.listdir(embedding_path):
if files.endswith(ext):
load_learned_embed_in_clip(
f"{os.path.join(embedding_path, files)}",
text_encoder,
tokenizer,
f"<{token_name}>",
)
else:
embedding_path = os.path.join(
st.session_state["defaults"].general.sd_concepts_library_folder,
prompt_tokens[0],
)
if os.path.exists(embedding_path):
for files in os.listdir(embedding_path):
if files.endswith(ext):
load_learned_embed_in_clip(
f"{os.path.join(embedding_path, files)}",
text_encoder,
tokenizer,
f"<{prompt_tokens[0]}>",
)
#
os.makedirs(outpath, exist_ok=True)
sample_path = os.path.join(outpath, "samples")
os.makedirs(sample_path, exist_ok=True)
if "|" not in prompt and prompt.startswith("@"):
prompt = prompt[1:]
negprompt = ""
if "###" in prompt:
prompt, negprompt = prompt.split("###", 1)
prompt = prompt.strip()
negprompt = negprompt.strip()
comments = []
prompt_matrix_parts = []
simple_templating = False
if prompt_matrix:
if prompt.startswith("@"):
simple_templating = True
all_seeds, n_iter, prompt_matrix_parts, all_prompts, frows = oxlamon_matrix(
prompt, seed, n_iter, batch_size
)
else:
all_prompts = []
prompt_matrix_parts = prompt.split("|")
combination_count = 2 ** (len(prompt_matrix_parts) - 1)
for combination_num in range(combination_count):
current = prompt_matrix_parts[0]
for n, text in enumerate(prompt_matrix_parts[1:]):
if combination_num & (2**n) > 0:
current += ("" if text.strip().startswith(",") else ", ") + text
all_prompts.append(current)
n_iter = math.ceil(len(all_prompts) / batch_size)
all_seeds = len(all_prompts) * [seed]
logger.info(
f"Prompt matrix will create {len(all_prompts)} images using a total of {n_iter} batches."
)
else:
if not st.session_state["defaults"].general.no_verify_input:
try:
check_prompt_length(prompt, comments)
except:
import traceback
logger.info("Error verifying input:", file=sys.stderr)
logger.info(traceback.format_exc(), file=sys.stderr)
all_prompts = batch_size * n_iter * [prompt]
all_seeds = [seed + x for x in range(len(all_prompts))]
precision_scope = (
autocast
if st.session_state["defaults"].general.precision == "autocast"
else nullcontext
)
output_images = []
grid_captions = []
stats = []
with torch.no_grad(), precision_scope("cuda"), (
server_state["model"].ema_scope()
if not st.session_state["defaults"].general.optimized
else nullcontext()
):
init_data = func_init()
time.time()
# if variant_amount > 0.0 create noise from base seed
base_x = None
if variant_amount > 0.0:
target_seed_randomizer = seed_to_int("") # random seed
torch.manual_seed(
seed
) # this has to be the single starting seed (not per-iteration)
base_x = create_random_tensors(
[opt_C, height // opt_f, width // opt_f], seeds=[seed]
)
# we don't want all_seeds to be sequential from starting seed with variants,
# since that makes the same variants each time,
# so we add target_seed_randomizer as a random offset
for si in range(len(all_seeds)):
all_seeds[si] += target_seed_randomizer
for n in range(n_iter):
logger.info(f"Iteration: {n+1}/{n_iter}")
prompts = all_prompts[n * batch_size : (n + 1) * batch_size]
captions = prompt_matrix_parts[n * batch_size : (n + 1) * batch_size]
seeds = all_seeds[n * batch_size : (n + 1) * batch_size]
logger.info(prompt)
if st.session_state["defaults"].general.optimized:
server_state["modelCS"].to(st.session_state["defaults"].general.gpu)
uc = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).get_learned_conditioning(len(prompts) * [negprompt])
if isinstance(prompts, tuple):
prompts = list(prompts)
# split the prompt if it has : for weighting
# TODO for speed it might help to have this occur when all_prompts filled??
weighted_subprompts = split_weighted_subprompts(
prompts[0], normalize_prompt_weights
)
# sub-prompt weighting used if more than 1
if len(weighted_subprompts) > 1:
c = torch.zeros_like(
uc
) # i dont know if this is correct.. but it works
for i in range(0, len(weighted_subprompts)):
# note if alpha negative, it functions same as torch.sub
c = torch.add(
c,
(
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).get_learned_conditioning(weighted_subprompts[i][0]),
alpha=weighted_subprompts[i][1],
)
else: # just behave like usual
c = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).get_learned_conditioning(prompts)
shape = [opt_C, height // opt_f, width // opt_f]
if st.session_state["defaults"].general.optimized:
mem = torch.cuda.memory_allocated() / 1e6
server_state["modelCS"].to("cpu")
while torch.cuda.memory_allocated() / 1e6 >= mem:
time.sleep(1)
if noise_mode == 1 or noise_mode == 3:
# TODO params for find_noise_to_image
x = torch.cat(
batch_size
* [
find_noise_for_image(
server_state["model"],
server_state["device"],
init_img.convert("RGB"),
"",
find_noise_steps,
0.0,
normalize=True,
generation_callback=generation_callback,
)
],
dim=0,
)
else:
# we manually generate all input noises because each one should have a specific seed
x = create_random_tensors(shape, seeds=seeds)
if variant_amount > 0.0: # we are making variants
# using variant_seed as sneaky toggle,
# when not None or '' use the variant_seed
# otherwise use seeds
if variant_seed is not None and variant_seed != "":
specified_variant_seed = seed_to_int(variant_seed)
torch.manual_seed(specified_variant_seed)
seeds = [specified_variant_seed]
# finally, slerp base_x noise to target_x noise for creating a variant
x = slerp(
st.session_state["defaults"].general.gpu,
max(0.0, min(1.0, variant_amount)),
base_x,
x,
)
samples_ddim = func_sample(
init_data=init_data,
x=x,
conditioning=c,
unconditional_conditioning=uc,
sampler_name=sampler_name,
)
if st.session_state["defaults"].general.optimized:
server_state["modelFS"].to(st.session_state["defaults"].general.gpu)
x_samples_ddim = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelFS"]
).decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
run_images = []
for i, x_sample in enumerate(x_samples_ddim):
sanitized_prompt = slugify(prompts[i])
percent = i / len(x_samples_ddim)
if "progress_bar" in st.session_state:
st.session_state["progress_bar"].progress(
percent if percent < 100 else 100
)
if sort_samples:
full_path = os.path.join(os.getcwd(), sample_path, sanitized_prompt)
sanitized_prompt = sanitized_prompt[: 120 - len(full_path)]
sample_path_i = os.path.join(sample_path, sanitized_prompt)
# print(f"output folder length: {len(os.path.join(os.getcwd(), sample_path_i))}")
# print(os.path.join(os.getcwd(), sample_path_i))
os.makedirs(sample_path_i, exist_ok=True)
base_count = get_next_sequence_number(sample_path_i)
filename = f"{base_count:05}-{steps}_{sampler_name}_{seeds[i]}"
else:
full_path = os.path.join(os.getcwd(), sample_path)
sample_path_i = sample_path
base_count = get_next_sequence_number(sample_path_i)
filename = f"{base_count:05}-{steps}_{sampler_name}_{seeds[i]}_{sanitized_prompt}"[
: 120 - len(full_path)
] # same as before
x_sample = 255.0 * rearrange(x_sample.cpu().numpy(), "c h w -> h w c")
x_sample = x_sample.astype(np.uint8)
image = Image.fromarray(x_sample)
original_filename = filename
if "preview_image" in st.session_state:
st.session_state["preview_image"].image(image)
#
if (
use_GFPGAN
and server_state["GFPGAN"] is not None
and not use_RealESRGAN
and not use_LDSR
):
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text(
"Running GFPGAN on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
if server_state["GFPGAN"].name != GFPGAN_model:
load_models(
use_LDSR=use_LDSR,
LDSR_model=LDSR_model_name,
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
torch_gc()
with torch.autocast("cuda"):
cropped_faces, restored_faces, restored_img = server_state[
"GFPGAN"
].enhance(
x_sample[:, :, ::-1],
has_aligned=False,
only_center_face=False,
paste_back=True,
)
gfpgan_sample = restored_img[:, :, ::-1]
gfpgan_image = Image.fromarray(gfpgan_sample)
# if st.session_state["GFPGAN_strenght"]:
# gfpgan_sample = Image.blend(image, gfpgan_image, st.session_state["GFPGAN_strenght"])
gfpgan_filename = original_filename + "-gfpgan"
save_sample(
gfpgan_image,
sample_path_i,
gfpgan_filename,
jpg_sample,
prompts,
seeds,
width,
height,
steps,
cfg_scale,
normalize_prompt_weights,
use_GFPGAN,
write_info_files,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
save_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
False,
server_state["loaded_model"],
)
output_images.append(gfpgan_image) # 287
run_images.append(gfpgan_image)
if simple_templating:
grid_captions.append(captions[i] + "\ngfpgan")
#
elif (
use_RealESRGAN
and server_state["RealESRGAN"] is not None
and not use_GFPGAN
):
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text(
"Running RealESRGAN on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
# skip_save = True # #287 >_>
torch_gc()
if server_state["RealESRGAN"].model.name != realesrgan_model_name:
# try_loading_RealESRGAN(realesrgan_model_name)
load_models(
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
output, img_mode = server_state["RealESRGAN"].enhance(
x_sample[:, :, ::-1]
)
esrgan_filename = original_filename + "-esrgan4x"
esrgan_sample = output[:, :, ::-1]
esrgan_image = Image.fromarray(esrgan_sample)
# save_sample(image, sample_path_i, original_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
# normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
# save_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode)
save_sample(
esrgan_image,
sample_path_i,
esrgan_filename,
jpg_sample,
prompts,
seeds,
width,
height,
steps,
cfg_scale,
normalize_prompt_weights,
use_GFPGAN,
write_info_files,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
save_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
False,
server_state["loaded_model"],
)
output_images.append(esrgan_image) # 287
run_images.append(esrgan_image)
if simple_templating:
grid_captions.append(captions[i] + "\nesrgan")
#
elif use_LDSR and "LDSR" in server_state and not use_GFPGAN:
logger.info(
"Running LDSR on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text(
"Running LDSR on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
# skip_save = True # #287 >_>
torch_gc()
if server_state["LDSR"].name != LDSR_model_name:
# try_loading_RealESRGAN(realesrgan_model_name)
load_models(
use_LDSR=use_LDSR,
LDSR_model=LDSR_model_name,
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
result = server_state["LDSR"].superResolution(
image,
ddimSteps=st.session_state["ldsr_sampling_steps"],
preDownScale=st.session_state["preDownScale"],
postDownScale=st.session_state["postDownScale"],
downsample_method=st.session_state["downsample_method"],
)
ldsr_filename = original_filename + "-ldsr4x"
# ldsr_sample = result[:,:,::-1]
# ldsr_image = Image.fromarray(ldsr_sample)
# save_sample(image, sample_path_i, original_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
# normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
# save_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode)
save_sample(
result,
sample_path_i,
ldsr_filename,
jpg_sample,
prompts,
seeds,
width,
height,
steps,
cfg_scale,
normalize_prompt_weights,
use_GFPGAN,
write_info_files,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
save_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
False,
server_state["loaded_model"],
)
output_images.append(result) # 287
run_images.append(result)
if simple_templating:
grid_captions.append(captions[i] + "\nldsr")
#
elif (
use_LDSR
and "LDSR" in server_state
and use_GFPGAN
and "GFPGAN" in server_state
):
logger.info(
"Running GFPGAN+LDSR on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text(
"Running GFPGAN+LDSR on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
if server_state["GFPGAN"].name != GFPGAN_model:
load_models(
use_LDSR=use_LDSR,
LDSR_model=LDSR_model_name,
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
torch_gc()
cropped_faces, restored_faces, restored_img = server_state[
"GFPGAN"
].enhance(
x_sample[:, :, ::-1],
has_aligned=False,
only_center_face=False,
paste_back=True,
)
gfpgan_sample = restored_img[:, :, ::-1]
gfpgan_image = Image.fromarray(gfpgan_sample)
if server_state["LDSR"].name != LDSR_model_name:
# try_loading_RealESRGAN(realesrgan_model_name)
load_models(
use_LDSR=use_LDSR,
LDSR_model=LDSR_model_name,
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
# LDSR.superResolution(gfpgan_image, ddimSteps=100, preDownScale='None', postDownScale='None', downsample_method="Lanczos")
result = server_state["LDSR"].superResolution(
gfpgan_image,
ddimSteps=st.session_state["ldsr_sampling_steps"],
preDownScale=st.session_state["preDownScale"],
postDownScale=st.session_state["postDownScale"],
downsample_method=st.session_state["downsample_method"],
)
ldsr_filename = original_filename + "-gfpgan-ldsr2x"
# ldsr_sample = result[:,:,::-1]
# ldsr_image = Image.fromarray(result)
# save_sample(image, sample_path_i, original_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
# normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
# save_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode)
save_sample(
result,
sample_path_i,
ldsr_filename,
jpg_sample,
prompts,
seeds,
width,
height,
steps,
cfg_scale,
normalize_prompt_weights,
use_GFPGAN,
write_info_files,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
save_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
False,
server_state["loaded_model"],
)
output_images.append(result) # 287
run_images.append(result)
if simple_templating:
grid_captions.append(captions[i] + "\ngfpgan-ldsr")
elif (
use_RealESRGAN
and server_state["RealESRGAN"] is not None
and use_GFPGAN
and server_state["GFPGAN"] is not None
):
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text(
"Running GFPGAN+RealESRGAN on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
# skip_save = True # #287 >_>
torch_gc()
cropped_faces, restored_faces, restored_img = server_state[
"GFPGAN"
].enhance(
x_sample[:, :, ::-1],
has_aligned=False,
only_center_face=False,
paste_back=True,
)
gfpgan_sample = restored_img[:, :, ::-1]
if server_state["RealESRGAN"].model.name != realesrgan_model_name:
# try_loading_RealESRGAN(realesrgan_model_name)
load_models(
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
output, img_mode = server_state["RealESRGAN"].enhance(
gfpgan_sample[:, :, ::-1]
)
gfpgan_esrgan_filename = original_filename + "-gfpgan-esrgan4x"
gfpgan_esrgan_sample = output[:, :, ::-1]
gfpgan_esrgan_image = Image.fromarray(gfpgan_esrgan_sample)
save_sample(
gfpgan_esrgan_image,
sample_path_i,
gfpgan_esrgan_filename,
jpg_sample,
prompts,
seeds,
width,
height,
steps,
cfg_scale,
normalize_prompt_weights,
False,
write_info_files,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
save_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
False,
server_state["loaded_model"],
)
output_images.append(gfpgan_esrgan_image) # 287
run_images.append(gfpgan_esrgan_image)
if simple_templating:
grid_captions.append(captions[i] + "\ngfpgan_esrgan")
#
else:
output_images.append(image)
run_images.append(image)
if mask_restore and init_mask:
# init_mask = init_mask if keep_mask else ImageOps.invert(init_mask)
init_mask = init_mask.filter(
ImageFilter.GaussianBlur(mask_blur_strength)
)
init_mask = init_mask.convert("L")
init_img = init_img.convert("RGB")
image = image.convert("RGB")
if use_RealESRGAN and server_state["RealESRGAN"] is not None:
if (
server_state["RealESRGAN"].model.name
!= realesrgan_model_name
):
# try_loading_RealESRGAN(realesrgan_model_name)
load_models(
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
output, img_mode = server_state["RealESRGAN"].enhance(
np.array(init_img, dtype=np.uint8)
)
init_img = Image.fromarray(output)
init_img = init_img.convert("RGB")
output, img_mode = server_state["RealESRGAN"].enhance(
np.array(init_mask, dtype=np.uint8)
)
init_mask = Image.fromarray(output)
init_mask = init_mask.convert("L")
image = Image.composite(init_img, image, init_mask)
if save_individual_images:
save_sample(
image,
sample_path_i,
filename,
jpg_sample,
prompts,
seeds,
width,
height,
steps,
cfg_scale,
normalize_prompt_weights,
use_GFPGAN,
write_info_files,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
save_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
save_individual_images,
server_state["loaded_model"],
)
# if add_original_image or not simple_templating:
# output_images.append(image)
# if simple_templating:
# grid_captions.append( captions[i] )
if "defaults" in st.session_state:
if st.session_state["defaults"].general.optimized:
mem = torch.cuda.memory_allocated() / 1e6
server_state["modelFS"].to("cpu")
while torch.cuda.memory_allocated() / 1e6 >= mem:
time.sleep(1)
if len(run_images) > 1:
preview_image = image_grid(run_images, n_iter)
else:
preview_image = run_images[0]
# Constrain the final preview image to 1440x900 so we're not sending huge amounts of data
# to the browser
preview_image = constrain_image(preview_image, 1440, 900)
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text("Finished!")
if "preview_image" in st.session_state:
st.session_state["preview_image"].image(preview_image)
if prompt_matrix or save_grid:
if prompt_matrix:
if simple_templating:
grid = image_grid(
output_images,
n_iter,
force_n_rows=frows,
captions=grid_captions,
)
else:
grid = image_grid(
output_images,
n_iter,
force_n_rows=1 << ((len(prompt_matrix_parts) - 1) // 2),
)
try:
grid = draw_prompt_matrix(
grid, width, height, prompt_matrix_parts
)
except:
import traceback
logger.error(
"Error creating prompt_matrix text:", file=sys.stderr
)
logger.error(traceback.format_exc(), file=sys.stderr)
else:
grid = image_grid(output_images, batch_size)
if grid and (batch_size > 1 or n_iter > 1):
output_images.insert(0, grid)
grid_count = get_next_sequence_number(outpath, "grid-")
grid_file = f"grid-{grid_count:05}-{seed}_{slugify(prompts[i].replace(' ', '_')[:120-len(full_path)])}.{grid_ext}"
grid.save(
os.path.join(outpath, grid_file),
grid_format,
quality=grid_quality,
lossless=grid_lossless,
optimize=True,
)
time.time()
mem_max_used, mem_total = mem_mon.read_and_stop()
time_diff = time.time() - start_time
info = f"""
{prompt}
Steps: {steps}, Sampler: {sampler_name}, CFG scale: {cfg_scale}, Seed: {seed}{', Denoising strength: '+str(denoising_strength) if init_img is not None else ''}{', GFPGAN' if use_GFPGAN and server_state["GFPGAN"] is not None else ''}{', '+realesrgan_model_name if use_RealESRGAN and server_state["RealESRGAN"] is not None else ''}{', Prompt Matrix Mode.' if prompt_matrix else ''}""".strip()
stats = f"""
Took { round(time_diff, 2) }s total ({ round(time_diff/(len(all_prompts)),2) }s per image)
Peak memory usage: { -(mem_max_used // -1_048_576) } MiB / { -(mem_total // -1_048_576) } MiB / { round(mem_max_used/mem_total*100, 3) }%"""
for comment in comments:
info += "\n\n" + comment
# mem_mon.stop()
# del mem_mon
torch_gc()
return output_images, seed, info, stats |
helper function to spherically interpolate two arrays v1 v2 | def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
"""helper function to spherically interpolate two arrays v1 v2"""
if not isinstance(v0, np.ndarray):
inputs_are_torch = True
input_device = v0.device
v0 = v0.cpu().numpy()
v1 = v1.cpu().numpy()
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
if np.abs(dot) > DOT_THRESHOLD:
v2 = (1 - t) * v0 + t * v1
else:
theta_0 = np.arccos(dot)
sin_theta_0 = np.sin(theta_0)
theta_t = theta_0 * t
sin_theta_t = np.sin(theta_t)
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
s1 = sin_theta_t / sin_theta_0
v2 = s0 * v0 + s1 * v1
if inputs_are_torch:
v2 = torch.from_numpy(v2).to(input_device)
return v2 |
TODO - docstring here
frames_or_frame_dir: (Union[str, Path, torch.Tensor]):
Either a directory of images, or a tensor of shape (T, C, H, W) in range [0, 255]. | def make_video_pyav(
frames_or_frame_dir: Union[str, Path, torch.Tensor],
audio_filepath: Union[str, Path] = None,
fps: int = 30,
audio_offset: int = 0,
audio_duration: int = 2,
sr: int = 22050,
output_filepath: Union[str, Path] = "output.mp4",
glob_pattern: str = "*.png",
):
"""
TODO - docstring here
frames_or_frame_dir: (Union[str, Path, torch.Tensor]):
Either a directory of images, or a tensor of shape (T, C, H, W) in range [0, 255].
"""
# Torchvision write_video doesn't support pathlib paths
output_filepath = str(output_filepath)
if isinstance(frames_or_frame_dir, (str, Path)):
frames = None
for img in sorted(Path(frames_or_frame_dir).glob(glob_pattern)):
frame = pil_to_tensor(Image.open(img)).unsqueeze(0)
frames = frame if frames is None else torch.cat([frames, frame])
else:
frames = frames_or_frame_dir
# TCHW -> THWC
frames = frames.permute(0, 2, 3, 1)
if audio_filepath:
# Read audio, convert to tensor
audio, sr = librosa.load(
audio_filepath,
sr=sr,
mono=True,
offset=audio_offset,
duration=audio_duration,
)
audio_tensor = torch.tensor(audio).unsqueeze(0)
write_video(
output_filepath,
frames,
fps=fps,
audio_array=audio_tensor,
audio_fps=sr,
audio_codec="aac",
options={"crf": "10", "pix_fmt": "yuv420p"},
)
else:
write_video(
output_filepath,
frames,
fps=fps,
options={"crf": "10", "pix_fmt": "yuv420p"},
)
return output_filepath |
TODO - docstring here
frames_or_frame_dir: (Union[str, Path, torch.Tensor]):
Either a directory of images, or a tensor of shape (T, C, H, W) in range [0, 255]. | def make_video_pyav(
frames_or_frame_dir: Union[str, Path, torch.Tensor],
audio_filepath: Union[str, Path] = None,
fps: int = 30,
audio_offset: int = 0,
audio_duration: int = 2,
sr: int = 22050,
output_filepath: Union[str, Path] = "output.mp4",
glob_pattern: str = "*.png",
):
"""
TODO - docstring here
frames_or_frame_dir: (Union[str, Path, torch.Tensor]):
Either a directory of images, or a tensor of shape (T, C, H, W) in range [0, 255].
"""
# Torchvision write_video doesn't support pathlib paths
output_filepath = str(output_filepath)
if isinstance(frames_or_frame_dir, (str, Path)):
frames = None
for img in sorted(Path(frames_or_frame_dir).glob(glob_pattern)):
frame = pil_to_tensor(Image.open(img)).unsqueeze(0)
frames = frame if frames is None else torch.cat([frames, frame])
else:
frames = frames_or_frame_dir
# TCHW -> THWC
frames = frames.permute(0, 2, 3, 1)
if audio_filepath:
# Read audio, convert to tensor
audio, sr = librosa.load(
audio_filepath,
sr=sr,
mono=True,
offset=audio_offset,
duration=audio_duration,
)
audio_tensor = torch.tensor(audio).unsqueeze(0)
write_video(
output_filepath,
frames,
fps=fps,
audio_array=audio_tensor,
audio_fps=sr,
audio_codec="aac",
options={"crf": "10", "pix_fmt": "yuv420p"},
)
else:
write_video(
output_filepath,
frames,
fps=fps,
options={"crf": "10", "pix_fmt": "yuv420p"},
)
return output_filepath |
prompt = ["blueberry spaghetti", "strawberry spaghetti"], # prompt to dream about
gpu:int = st.session_state['defaults'].general.gpu, # id of the gpu to run on
#name:str = 'test', # name of this project, for the output directory
#rootdir:str = st.session_state['defaults'].general.outdir,
num_steps:int = 200, # number of steps between each pair of sampled points
max_duration_in_seconds:int = 10000, # number of frames to write and then exit the script
num_inference_steps:int = 50, # more (e.g. 100, 200 etc) can create slightly better images
cfg_scale:float = 5.0, # can depend on the prompt. usually somewhere between 3-10 is good
do_loop = False,
use_lerp_for_text = False,
seed = None,
quality:int = 100, # for jpeg compression of the output images
eta:float = 0.0,
width:int = 256,
height:int = 256,
weights_path = "runwayml/stable-diffusion-v1-5",
scheduler="klms", # choices: default, ddim, klms
disable_tqdm = False,
beta_start = 0.0001,
beta_end = 0.00012,
beta_schedule = "scaled_linear" | def txt2vid(
# --------------------------------------
# args you probably want to change
prompts=["blueberry spaghetti", "strawberry spaghetti"], # prompt to dream about
gpu: int = st.session_state["defaults"].general.gpu, # id of the gpu to run on
# name:str = 'test', # name of this project, for the output directory
# rootdir:str = st.session_state['defaults'].general.outdir,
num_steps: int = 200, # number of steps between each pair of sampled points
max_duration_in_seconds: int = 30, # number of frames to write and then exit the script
num_inference_steps: int = 50, # more (e.g. 100, 200 etc) can create slightly better images
cfg_scale: float = 5.0, # can depend on the prompt. usually somewhere between 3-10 is good
save_video=True,
save_video_on_stop=False,
outdir="outputs",
do_loop=False,
use_lerp_for_text=False,
seeds=None,
quality: int = 100, # for jpeg compression of the output images
eta: float = 0.0,
width: int = 256,
height: int = 256,
weights_path="runwayml/stable-diffusion-v1-5",
scheduler="klms", # choices: default, ddim, klms
disable_tqdm=False,
# -----------------------------------------------
beta_start=0.0001,
beta_end=0.00012,
beta_schedule="scaled_linear",
starting_image=None,
# -----------------------------------------------
# from new version
image_file_ext: Optional[str] = ".png",
fps: Optional[int] = 30,
upsample: Optional[bool] = False,
batch_size: Optional[int] = 1,
resume: Optional[bool] = False,
audio_filepath: str = None,
audio_start_sec: Optional[Union[int, float]] = None,
margin: Optional[float] = 1.0,
smooth: Optional[float] = 0.0,
):
"""
prompt = ["blueberry spaghetti", "strawberry spaghetti"], # prompt to dream about
gpu:int = st.session_state['defaults'].general.gpu, # id of the gpu to run on
#name:str = 'test', # name of this project, for the output directory
#rootdir:str = st.session_state['defaults'].general.outdir,
num_steps:int = 200, # number of steps between each pair of sampled points
max_duration_in_seconds:int = 10000, # number of frames to write and then exit the script
num_inference_steps:int = 50, # more (e.g. 100, 200 etc) can create slightly better images
cfg_scale:float = 5.0, # can depend on the prompt. usually somewhere between 3-10 is good
do_loop = False,
use_lerp_for_text = False,
seed = None,
quality:int = 100, # for jpeg compression of the output images
eta:float = 0.0,
width:int = 256,
height:int = 256,
weights_path = "runwayml/stable-diffusion-v1-5",
scheduler="klms", # choices: default, ddim, klms
disable_tqdm = False,
beta_start = 0.0001,
beta_end = 0.00012,
beta_schedule = "scaled_linear"
"""
mem_mon = MemUsageMonitor("MemMon")
mem_mon.start()
seeds = seed_to_int(seeds)
# We add an extra frame because most
# of the time the first frame is just the noise.
# max_duration_in_seconds +=1
assert torch.cuda.is_available()
assert height % 8 == 0 and width % 8 == 0
torch.manual_seed(seeds)
torch_device = f"cuda:{gpu}"
if type(seeds) == list:
prompts = [prompts] * len(seeds)
else:
seeds = [seeds, random.randint(0, 2**32 - 1)]
if type(prompts) == list:
# init the output dir
sanitized_prompt = slugify(prompts[0])
else:
# init the output dir
sanitized_prompt = slugify(prompts)
full_path = os.path.join(
os.getcwd(),
st.session_state["defaults"].general.outdir,
"txt2vid",
"samples",
sanitized_prompt,
)
if len(full_path) > 220:
sanitized_prompt = sanitized_prompt[: 220 - len(full_path)]
full_path = os.path.join(
os.getcwd(),
st.session_state["defaults"].general.outdir,
"txt2vid",
"samples",
sanitized_prompt,
)
os.makedirs(full_path, exist_ok=True)
# Write prompt info to file in output dir so we can keep track of what we did
if st.session_state.write_info_files:
with open(
os.path.join(
full_path,
f"{slugify(str(seeds))}_config.json"
if len(prompts) > 1
else "prompts_config.json",
),
"w",
) as outfile:
outfile.write(
json.dumps(
dict(
prompts=prompts,
gpu=gpu,
num_steps=num_steps,
max_duration_in_seconds=max_duration_in_seconds,
num_inference_steps=num_inference_steps,
cfg_scale=cfg_scale,
do_loop=do_loop,
use_lerp_for_text=use_lerp_for_text,
seeds=seeds,
quality=quality,
eta=eta,
width=width,
height=height,
weights_path=weights_path,
scheduler=scheduler,
disable_tqdm=disable_tqdm,
beta_start=beta_start,
beta_end=beta_end,
beta_schedule=beta_schedule,
),
indent=2,
sort_keys=False,
)
)
# print(scheduler)
default_scheduler = PNDMScheduler(
beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
)
# ------------------------------------------------------------------------------
# Schedulers
ddim_scheduler = DDIMScheduler(
beta_start=beta_start,
beta_end=beta_end,
beta_schedule=beta_schedule,
clip_sample=False,
set_alpha_to_one=False,
)
klms_scheduler = LMSDiscreteScheduler(
beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
)
# flaxddims_scheduler = FlaxDDIMScheduler(
# beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
# )
# flaxddpms_scheduler = FlaxDDPMScheduler(
# beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
# )
# flaxpndms_scheduler = FlaxPNDMScheduler(
# beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
# )
ddpms_scheduler = DDPMScheduler(
beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
)
SCHEDULERS = dict(
default=default_scheduler,
ddim=ddim_scheduler,
klms=klms_scheduler,
ddpms=ddpms_scheduler,
# flaxddims=flaxddims_scheduler,
# flaxddpms=flaxddpms_scheduler,
# flaxpndms=flaxpndms_scheduler,
)
with no_rerun:
with st.session_state["progress_bar_text"].container():
with hc.HyLoader(
"Loading Models...", hc.Loaders.standard_loaders, index=[0]
):
load_diffusers_model(weights_path, torch_device)
if "pipe" not in server_state:
logger.error("wtf")
server_state["pipe"].scheduler = SCHEDULERS[scheduler]
server_state["pipe"].use_multiprocessing_for_evaluation = False
server_state["pipe"].use_multiprocessed_decoding = False
# if do_loop:
##Makes the last prompt loop back to first prompt
# prompts = [prompts, prompts]
# seeds = [seeds, seeds]
# first_seed, *seeds = seeds
# prompts.append(prompts)
# seeds.append(first_seed)
with torch.autocast("cuda"):
# get the conditional text embeddings based on the prompt
text_input = server_state["pipe"].tokenizer(
prompts,
padding="max_length",
max_length=server_state["pipe"].tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
cond_embeddings = server_state["pipe"].text_encoder(
text_input.input_ids.to(torch_device)
)[0]
#
if st.session_state.defaults.general.use_sd_concepts_library:
prompt_tokens = re.findall("<([a-zA-Z0-9-]+)>", str(prompts))
if prompt_tokens:
# compviz
# tokenizer = (st.session_state["model"] if not st.session_state['defaults'].general.optimized else st.session_state.modelCS).cond_stage_model.tokenizer
# text_encoder = (st.session_state["model"] if not st.session_state['defaults'].general.optimized else st.session_state.modelCS).cond_stage_model.transformer
# diffusers
tokenizer = st.session_state.pipe.tokenizer
text_encoder = st.session_state.pipe.text_encoder
ext = ("pt", "bin")
# print (prompt_tokens)
if len(prompt_tokens) > 1:
for token_name in prompt_tokens:
embedding_path = os.path.join(
st.session_state["defaults"].general.sd_concepts_library_folder,
token_name,
)
if os.path.exists(embedding_path):
for files in os.listdir(embedding_path):
if files.endswith(ext):
load_learned_embed_in_clip(
f"{os.path.join(embedding_path, files)}",
text_encoder,
tokenizer,
f"<{token_name}>",
)
else:
embedding_path = os.path.join(
st.session_state["defaults"].general.sd_concepts_library_folder,
prompt_tokens[0],
)
if os.path.exists(embedding_path):
for files in os.listdir(embedding_path):
if files.endswith(ext):
load_learned_embed_in_clip(
f"{os.path.join(embedding_path, files)}",
text_encoder,
tokenizer,
f"<{prompt_tokens[0]}>",
)
# sample a source
init1 = torch.randn(
(1, server_state["pipe"].unet.in_channels, height // 8, width // 8),
device=torch_device,
)
# iterate the loop
frames = []
frame_index = 0
st.session_state["total_frames_avg_duration"] = []
st.session_state["total_frames_avg_speed"] = []
try:
# code for the new StableDiffusionWalkPipeline implementation.
start = timeit.default_timer()
# preview image works but its not the right way to use this, this also do not work properly as it only makes one image and then exits.
# with torch.autocast("cuda"):
# StableDiffusionWalkPipeline.__call__(self=server_state["pipe"],
# prompt=prompts, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=cfg_scale,
# negative_prompt="", num_images_per_prompt=1, eta=0.0,
# callback=txt2vid_generation_callback, callback_steps=1,
# num_interpolation_steps=num_steps,
# fps=30,
# image_file_ext = ".png",
# output_dir=full_path, # Where images/videos will be saved
##name='animals_test', # Subdirectory of output_dir where images/videos will be saved
# upsample = False,
##do_loop=do_loop, # Change to True if you want last prompt to loop back to first prompt
# resume = False,
# audio_filepath = None,
# audio_start_sec = None,
# margin = 1.0,
# smooth = 0.0, )
# works correctly generating all frames but do not show the preview image
# we also do not have control over the generation and cant stop it until the end of it.
# with torch.autocast("cuda"):
# print (prompts)
# video_path = server_state["pipe"].walk(
# prompt=prompts,
# seeds=seeds,
# num_interpolation_steps=num_steps,
# height=height, # use multiples of 64 if > 512. Multiples of 8 if < 512.
# width=width, # use multiples of 64 if > 512. Multiples of 8 if < 512.
# batch_size=4,
# fps=30,
# image_file_ext = ".png",
# eta = 0.0,
# output_dir=full_path, # Where images/videos will be saved
##name='test', # Subdirectory of output_dir where images/videos will be saved
# guidance_scale=cfg_scale, # Higher adheres to prompt more, lower lets model take the wheel
# num_inference_steps=num_inference_steps, # Number of diffusion steps per image generated. 50 is good default
# upsample = False,
##do_loop=do_loop, # Change to True if you want last prompt to loop back to first prompt
# resume = False,
# audio_filepath = None,
# audio_start_sec = None,
# margin = 1.0,
# smooth = 0.0,
# callback=txt2vid_generation_callback, # our callback function will be called with the arguments callback(step, timestep, latents)
# callback_steps=1 # our callback function will be called once this many steps are processed in a single frame
# )
# old code
total_frames = st.session_state.max_duration_in_seconds * fps
while frame_index + 1 <= total_frames:
st.session_state["frame_duration"] = 0
st.session_state["frame_speed"] = 0
st.session_state["current_frame"] = frame_index
# print(f"Second: {second_count+1}/{max_duration_in_seconds}")
# sample the destination
init2 = torch.randn(
(1, server_state["pipe"].unet.in_channels, height // 8, width // 8),
device=torch_device,
)
for i, t in enumerate(np.linspace(0, 1, num_steps)):
start = timeit.default_timer()
logger.info(f"COUNT: {frame_index+1}/{total_frames}")
if use_lerp_for_text:
init = torch.lerp(init1, init2, float(t))
else:
init = slerp(gpu, float(t), init1, init2)
# init = slerp(gpu, float(t), init1, init2)
with autocast("cuda"):
image = diffuse(
server_state["pipe"],
cond_embeddings,
init,
num_inference_steps,
cfg_scale,
eta,
fps=fps,
)
if (
st.session_state["save_individual_images"]
and not st.session_state["use_GFPGAN"]
and not st.session_state["use_RealESRGAN"]
):
# im = Image.fromarray(image)
outpath = os.path.join(full_path, "frame%06d.png" % frame_index)
image.save(outpath, quality=quality)
# send the image to the UI to update it
# st.session_state["preview_image"].image(im)
# append the frames to the frames list so we can use them later.
frames.append(np.asarray(image))
#
# try:
# if st.session_state["use_GFPGAN"] and server_state["GFPGAN"] is not None and not st.session_state["use_RealESRGAN"]:
if (
st.session_state["use_GFPGAN"]
and server_state["GFPGAN"] is not None
):
# print("Running GFPGAN on image ...")
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text(
"Running GFPGAN on image ..."
)
# skip_save = True # #287 >_>
torch_gc()
cropped_faces, restored_faces, restored_img = server_state[
"GFPGAN"
].enhance(
np.array(image)[:, :, ::-1],
has_aligned=False,
only_center_face=False,
paste_back=True,
)
gfpgan_sample = restored_img[:, :, ::-1]
gfpgan_image = Image.fromarray(gfpgan_sample)
outpath = os.path.join(full_path, "frame%06d.png" % frame_index)
gfpgan_image.save(outpath, quality=quality)
# append the frames to the frames list so we can use them later.
frames.append(np.asarray(gfpgan_image))
try:
st.session_state["preview_image"].image(gfpgan_image)
except KeyError:
logger.error("Cant get session_state, skipping image preview.")
# except (AttributeError, KeyError):
# print("Cant perform GFPGAN, skipping.")
# increase frame_index counter.
frame_index += 1
st.session_state["current_frame"] = frame_index
duration = timeit.default_timer() - start
if duration >= 1:
speed = "s/it"
else:
speed = "it/s"
duration = 1 / duration
st.session_state["frame_duration"] = duration
st.session_state["frame_speed"] = speed
if frame_index + 1 > total_frames:
break
init1 = init2
# save the video after the generation is done.
video_path = save_video_to_disk(
frames, seeds, sanitized_prompt, save_video=save_video, outdir=outdir
)
except StopException:
# reset the page title so the percent doesnt stay on it confusing the user.
set_page_title("Stable Diffusion Playground")
if save_video_on_stop:
logger.info("Streamlit Stop Exception Received. Saving video")
video_path = save_video_to_disk(
frames, seeds, sanitized_prompt, save_video=save_video, outdir=outdir
)
else:
video_path = None
# if video_path and "preview_video" in st.session_state:
## show video preview on the UI
# st.session_state["preview_video"].video(open(video_path, 'rb').read())
mem_max_used, mem_total = mem_mon.read_and_stop()
time_diff = time.time() - start
info = f"""
{prompts}
Sampling Steps: {num_steps}, Sampler: {scheduler}, CFG scale: {cfg_scale}, Seed: {seeds}, Max Duration In Seconds: {max_duration_in_seconds}""".strip()
stats = f"""
Took { round(time_diff, 2) }s total ({ round(time_diff/(max_duration_in_seconds),2) }s per image)
Peak memory usage: { -(mem_max_used // -1_048_576) } MiB / { -(mem_total // -1_048_576) } MiB / { round(mem_max_used/mem_total*100, 3) }%"""
return video_path, seeds, info, stats |
this function tests if prompt is too long, and if so, adds a message to comments | def check_prompt_length(prompt, comments):
"""this function tests if prompt is too long, and if so, adds a message to comments"""
tokenizer = (model if not opt.optimized else modelCS).cond_stage_model.tokenizer
max_length = (model if not opt.optimized else modelCS).cond_stage_model.max_length
info = (model if not opt.optimized else modelCS).cond_stage_model.tokenizer(
[prompt],
truncation=True,
max_length=max_length,
return_overflowing_tokens=True,
padding="max_length",
return_tensors="pt",
)
ovf = info["overflowing_tokens"][0]
overflowing_count = ovf.shape[0]
if overflowing_count == 0:
return
vocab = {v: k for k, v in tokenizer.get_vocab().items()}
overflowing_words = [vocab.get(int(x), "") for x in ovf]
overflowing_text = tokenizer.convert_tokens_to_string("".join(overflowing_words))
comments.append(
f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n"
) |
saves the image according to selected parameters. Expects to find generation parameters on image, set by ImageMetadata.set_on_image() | def save_sample(
image,
sample_path_i,
filename,
jpg_sample,
write_info_files,
write_sample_info_to_log_file,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
skip_save,
skip_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
skip_metadata=False,
):
"""saves the image according to selected parameters. Expects to find generation parameters on image, set by ImageMetadata.set_on_image()"""
metadata = ImageMetadata.get_from_image(image)
if not skip_metadata and metadata is None:
print(
"No metadata passed in to save. Set metadata on the image before calling save_sample using the ImageMetadata.set_on_image() function."
)
skip_metadata = True
filename_i = os.path.join(sample_path_i, filename)
if not jpg_sample:
if opt.save_metadata and not skip_metadata:
image.save(f"{filename_i}.png", pnginfo=metadata.as_png_info())
else:
image.save(f"{filename_i}.png")
else:
image.save(f"{filename_i}.jpg", "jpeg", quality=100, optimize=True)
if write_info_files or write_sample_info_to_log_file:
# toggles differ for txt2img vs. img2img:
offset = 0 if init_img is None else 2
toggles = []
if prompt_matrix:
toggles.append(0)
if metadata.normalize_prompt_weights:
toggles.append(1)
if init_img is not None:
if uses_loopback:
toggles.append(2)
if uses_random_seed_loopback:
toggles.append(3)
if not skip_save:
toggles.append(2 + offset)
if not skip_grid:
toggles.append(3 + offset)
if sort_samples:
toggles.append(4 + offset)
if write_info_files:
toggles.append(5 + offset)
if write_sample_info_to_log_file:
toggles.append(6 + offset)
if metadata.GFPGAN:
toggles.append(7 + offset)
info_dict = dict(
target="txt2img" if init_img is None else "img2img",
prompt=metadata.prompt,
ddim_steps=metadata.steps,
toggles=toggles,
sampler_name=sampler_name,
ddim_eta=ddim_eta,
n_iter=n_iter,
batch_size=batch_size,
cfg_scale=metadata.cfg_scale,
seed=metadata.seed,
width=metadata.width,
height=metadata.height,
)
if init_img is not None:
# Not yet any use for these, but they bloat up the files:
# info_dict["init_img"] = init_img
# info_dict["init_mask"] = init_mask
info_dict["denoising_strength"] = denoising_strength
info_dict["resize_mode"] = resize_mode
if write_info_files:
with open(f"{filename_i}.yaml", "w", encoding="utf8") as f:
yaml.dump(info_dict, f, allow_unicode=True, width=10000)
if write_sample_info_to_log_file:
ignore_list = ["prompt", "target", "toggles", "ddim_eta", "batch_size"]
rename_dict = {
"ddim_steps": "steps",
"n_iter": "number",
"sampler_name": "sampler",
} # changes the name of parameters to match with dynamic parameters
sample_log_path = os.path.join(sample_path_i, "log.yaml")
log_dump = info_dict.get(
"prompt"
) # making sure the first item that is listed in the txt is the prompt text
for key, value in info_dict.items():
if key in ignore_list:
continue
found_key = rename_dict.get(key)
if (
key == "cfg_scale"
): # adds zeros to to cfg_scale necessary for dynamic params
value = str(value).zfill(2)
if found_key:
key = found_key
log_dump += f" {key} {value}"
log_dump = (
log_dump + " \n"
) # space at the end for dynamic params to accept the last param
with open(sample_log_path, "a", encoding="utf8") as log_file:
log_file.write(log_dump) |
Determines and returns the next sequence number to use when saving an
image in the specified directory.
If a prefix is given, only consider files whose names start with that
prefix, and strip the prefix from filenames before extracting their
sequence number.
The sequence starts at 0. | def get_next_sequence_number(path, prefix=""):
"""
Determines and returns the next sequence number to use when saving an
image in the specified directory.
If a prefix is given, only consider files whose names start with that
prefix, and strip the prefix from filenames before extracting their
sequence number.
The sequence starts at 0.
"""
# Because when running in bridge-mode, we do not have a dir
if opt.bridge:
return 0
result = -1
for p in Path(path).iterdir():
if p.name.endswith((".png", ".jpg")) and p.name.startswith(prefix):
tmp = p.name[len(prefix) :]
try:
result = max(int(tmp.split("-")[0]), result)
except ValueError:
pass
return result + 1 |
this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch | def process_images(
outpath,
func_init,
func_sample,
prompt,
seed,
sampler_name,
skip_grid,
skip_save,
batch_size,
n_iter,
steps,
cfg_scale,
width,
height,
prompt_matrix,
filter_nsfw,
use_GFPGAN,
use_RealESRGAN,
realesrgan_model_name,
fp,
ddim_eta=0.0,
do_not_save_grid=False,
normalize_prompt_weights=True,
init_img=None,
init_mask=None,
keep_mask=False,
mask_blur_strength=3,
mask_restore=False,
denoising_strength=0.75,
resize_mode=None,
uses_loopback=False,
uses_random_seed_loopback=False,
sort_samples=True,
write_info_files=True,
write_sample_info_to_log_file=False,
jpg_sample=False,
variant_amount=0.0,
variant_seed=None,
imgProcessorTask=False,
job_info: JobInfo = None,
do_color_correction=False,
correction_target=None,
):
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
def numpy_to_pil(images):
"""
Convert a numpy image or a batch of images to a PIL image.
"""
if images.ndim == 3:
images = images[None, ...]
images = (images * 255).round().astype("uint8")
pil_images = [Image.fromarray(image) for image in images]
return pil_images
# load replacement of nsfw content
def load_replacement(x):
try:
hwc = x.shape
y = Image.open("images/nsfw.jpeg").convert("RGB").resize((hwc[1], hwc[0]))
y = (np.array(y) / 255.0).astype(x.dtype)
assert y.shape == x.shape
return y
except Exception:
return x
# check and replace nsfw content
def check_safety(x_image):
global safety_feature_extractor, safety_checker
if safety_feature_extractor is None:
safety_feature_extractor = AutoFeatureExtractor.from_pretrained(
safety_model_id
)
safety_checker = StableDiffusionSafetyChecker.from_pretrained(
safety_model_id
)
safety_checker_input = safety_feature_extractor(
numpy_to_pil(x_image), return_tensors="pt"
)
x_checked_image, has_nsfw_concept = safety_checker(
images=x_image, clip_input=safety_checker_input.pixel_values
)
for i in range(len(has_nsfw_concept)):
if has_nsfw_concept[i]:
x_checked_image[i] = load_replacement(x_checked_image[i])
return x_checked_image, has_nsfw_concept
prompt = prompt or ""
torch_gc()
# start time after garbage collection (or before?)
start_time = time.time()
mem_mon = MemUsageMonitor("MemMon")
mem_mon.start()
if hasattr(model, "embedding_manager"):
load_embeddings(fp)
if not opt.bridge:
os.makedirs(outpath, exist_ok=True)
sample_path = os.path.join(outpath, "samples")
if not opt.bridge:
os.makedirs(sample_path, exist_ok=True)
if "|" not in prompt and prompt.startswith("@"):
prompt = prompt[1:]
negprompt = ""
if "###" in prompt:
prompt, negprompt = prompt.split("###", 1)
prompt = prompt.strip()
negprompt = negprompt.strip()
comments = []
prompt_matrix_parts = []
simple_templating = False
add_original_image = True
if prompt_matrix:
if prompt.startswith("@"):
simple_templating = True
add_original_image = not (use_RealESRGAN or use_GFPGAN)
all_seeds, n_iter, prompt_matrix_parts, all_prompts, frows = oxlamon_matrix(
prompt, seed, n_iter, batch_size
)
else:
all_prompts = []
prompt_matrix_parts = prompt.split("|")
combination_count = 2 ** (len(prompt_matrix_parts) - 1)
for combination_num in range(combination_count):
current = prompt_matrix_parts[0]
for n, text in enumerate(prompt_matrix_parts[1:]):
if combination_num & (2**n) > 0:
current += ("" if text.strip().startswith(",") else ", ") + text
all_prompts.append(current)
n_iter = math.ceil(len(all_prompts) / batch_size)
all_seeds = len(all_prompts) * [seed]
print(
f"Prompt matrix will create {len(all_prompts)} images using a total of {n_iter} batches."
)
else:
if not opt.no_verify_input:
try:
check_prompt_length(prompt, comments)
except:
import traceback
print("Error verifying input:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
all_prompts = batch_size * n_iter * [prompt]
all_seeds = [seed + x for x in range(len(all_prompts))]
original_seeds = all_seeds.copy()
precision_scope = autocast if opt.precision == "autocast" else nullcontext
if job_info:
output_images = job_info.images
else:
output_images = []
grid_captions = []
stats = []
with torch.no_grad(), precision_scope("cuda"), (
model.ema_scope() if not opt.optimized else nullcontext()
):
init_data = func_init()
time.time()
# if variant_amount > 0.0 create noise from base seed
base_x = None
if variant_amount > 0.0:
target_seed_randomizer = seed_to_int("") # random seed
torch.manual_seed(
seed
) # this has to be the single starting seed (not per-iteration)
base_x = create_random_tensors(
[opt_C, height // opt_f, width // opt_f], seeds=[seed]
)
# we don't want all_seeds to be sequential from starting seed with variants,
# since that makes the same variants each time,
# so we add target_seed_randomizer as a random offset
for si in range(len(all_seeds)):
all_seeds[si] += target_seed_randomizer
for n in range(n_iter):
if job_info and job_info.should_stop.is_set():
print("Early exit requested")
break
print(f"Iteration: {n+1}/{n_iter}")
prompts = all_prompts[n * batch_size : (n + 1) * batch_size]
captions = prompt_matrix_parts[n * batch_size : (n + 1) * batch_size]
seeds = all_seeds[n * batch_size : (n + 1) * batch_size]
current_seeds = original_seeds[n * batch_size : (n + 1) * batch_size]
if job_info:
job_info.job_status = (
f"Processing Iteration {n+1}/{n_iter}. Batch size {batch_size}"
)
job_info.rec_steps_imgs.clear()
for idx, (p, s) in enumerate(zip(prompts, seeds)):
job_info.job_status += f"\nItem {idx}: Seed {s}\nPrompt: {p}"
print(f"Current prompt: {p}")
if opt.optimized:
modelCS.to(device)
uc = (model if not opt.optimized else modelCS).get_learned_conditioning(
len(prompts) * [negprompt]
)
if isinstance(prompts, tuple):
prompts = list(prompts)
# split the prompt if it has : for weighting
# TODO for speed it might help to have this occur when all_prompts filled??
weighted_subprompts = split_weighted_subprompts(
prompts[0], normalize_prompt_weights
)
# sub-prompt weighting used if more than 1
if len(weighted_subprompts) > 1:
c = torch.zeros_like(
uc
) # i dont know if this is correct.. but it works
for i in range(0, len(weighted_subprompts)):
# note if alpha negative, it functions same as torch.sub
c = torch.add(
c,
(
model if not opt.optimized else modelCS
).get_learned_conditioning(weighted_subprompts[i][0]),
alpha=weighted_subprompts[i][1],
)
else: # just behave like usual
c = (model if not opt.optimized else modelCS).get_learned_conditioning(
prompts
)
shape = [opt_C, height // opt_f, width // opt_f]
if opt.optimized:
mem = torch.cuda.memory_allocated() / 1e6
modelCS.to("cpu")
while torch.cuda.memory_allocated() / 1e6 >= mem:
time.sleep(1)
cur_variant_amount = variant_amount
if variant_amount == 0.0:
# we manually generate all input noises because each one should have a specific seed
x = create_random_tensors(shape, seeds=seeds)
else: # we are making variants
# using variant_seed as sneaky toggle,
# when not None or '' use the variant_seed
# otherwise use seeds
if variant_seed is not None and variant_seed != "":
specified_variant_seed = seed_to_int(variant_seed)
torch.manual_seed(specified_variant_seed)
target_x = create_random_tensors(
shape, seeds=[specified_variant_seed]
)
# with a variant seed we would end up with the same variant as the basic seed
# does not change. But we can increase the steps to get an interesting result
# that shows more and more deviation of the original image and let us adjust
# how far we will go (using 10 iterations with variation amount set to 0.02 will
# generate an icreasingly variated image which is very interesting for movies)
cur_variant_amount += n * variant_amount
else:
target_x = create_random_tensors(shape, seeds=seeds)
# finally, slerp base_x noise to target_x noise for creating a variant
x = slerp(
device, max(0.0, min(1.0, cur_variant_amount)), base_x, target_x
)
# If optimized then use first stage for preview and store it on cpu until needed
if opt.optimized:
step_preview_model = modelFS
step_preview_model.cpu()
else:
step_preview_model = model
def sample_iteration_callback(image_sample: torch.Tensor, iter_num: int):
"""Called from the sampler every iteration"""
if job_info:
job_info.active_iteration_cnt = iter_num
record_periodic_image = job_info.rec_steps_enabled and (
0 == iter_num % job_info.rec_steps_intrvl
)
if (
record_periodic_image
or job_info.refresh_active_image_requested.is_set()
):
preview_start_time = time.time()
if opt.optimized:
step_preview_model.to(device)
decoded_batch: List[torch.Tensor] = []
# Break up batch to save VRAM
for sample in image_sample:
sample = sample[
None, :
] # expands the tensor as if it still had a batch dimension
decoded_sample = step_preview_model.decode_first_stage(
sample
)[0]
decoded_sample = torch.clamp(
(decoded_sample + 1.0) / 2.0, min=0.0, max=1.0
)
decoded_sample = decoded_sample.cpu()
decoded_batch.append(decoded_sample)
batch_size = len(decoded_batch)
if opt.optimized:
step_preview_model.cpu()
images: List[Image.Image] = []
# Convert tensor to image (copied from code below)
for ddim in decoded_batch:
x_sample = 255.0 * rearrange(ddim.numpy(), "c h w -> h w c")
x_sample = x_sample.astype(np.uint8)
image = Image.fromarray(x_sample)
images.append(image)
caption = f"Iter {iter_num}"
grid = image_grid(
images,
len(images),
force_n_rows=1,
captions=[caption] * len(images),
)
# Save the images if recording steps, and append existing saved steps
if job_info.rec_steps_enabled:
gallery_img_size = tuple(
int(0.25 * dim) for dim in images[0].size
)
job_info.rec_steps_imgs.append(
grid.resize(gallery_img_size)
)
# Notify the requester that the image is updated
if job_info.refresh_active_image_requested.is_set():
if job_info.rec_steps_enabled:
grid_rows = (
None
if batch_size == 1
else len(job_info.rec_steps_imgs)
)
grid = image_grid(
imgs=job_info.rec_steps_imgs[::-1],
batch_size=1,
force_n_rows=grid_rows,
)
job_info.active_image = grid
job_info.refresh_active_image_done.set()
job_info.refresh_active_image_requested.clear()
preview_elapsed_timed = time.time() - preview_start_time
if preview_elapsed_timed / job_info.rec_steps_intrvl > 1:
print(
f"Warning: Preview generation is slowing image generation. It took {preview_elapsed_timed:.2f}s to generate progress images for batch of {batch_size} images!"
)
# Interrupt current iteration?
if job_info.stop_cur_iter.is_set():
job_info.stop_cur_iter.clear()
raise StopIteration()
try:
samples_ddim = func_sample(
init_data=init_data,
x=x,
conditioning=c,
unconditional_conditioning=uc,
sampler_name=sampler_name,
img_callback=sample_iteration_callback,
)
except StopIteration:
print("Skipping iteration")
job_info.job_status = "Skipping iteration"
continue
if opt.optimized:
modelFS.to(device)
for i in range(len(samples_ddim)):
x_samples_ddim = (
model if not opt.optimized else modelFS
).decode_first_stage(samples_ddim[i].unsqueeze(0))
x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
if filter_nsfw:
x_samples_ddim_numpy = x_sample.cpu().permute(0, 2, 3, 1).numpy()
x_checked_image, has_nsfw_concept = check_safety(
x_samples_ddim_numpy
)
x_sample = torch.from_numpy(x_checked_image).permute(0, 3, 1, 2)
sanitized_prompt = (
prompts[i]
.replace(" ", "_")
.translate({ord(x): "" for x in invalid_filename_chars})
)
if variant_seed is not None and variant_seed != "":
if variant_amount == 0.0:
seed_used = f"{current_seeds[i]}-{variant_seed}"
else:
seed_used = f"{seed}-{variant_seed}"
else:
seed_used = f"{current_seeds[i]}"
if sort_samples:
sanitized_prompt = sanitized_prompt[:128] # 200 is too long
sample_path_i = os.path.join(sample_path, sanitized_prompt)
if not opt.bridge:
os.makedirs(sample_path_i, exist_ok=True)
base_count = get_next_sequence_number(sample_path_i)
filename = (
opt.filename_format
or "[STEPS]_[SAMPLER]_[SEED]_[VARIANT_AMOUNT]"
)
else:
sample_path_i = sample_path
base_count = get_next_sequence_number(sample_path_i)
filename = (
opt.filename_format
or "[STEPS]_[SAMPLER]_[SEED]_[VARIANT_AMOUNT]_[PROMPT]"
)
# Add new filenames tags here
filename = f"{base_count:05}-" + filename
filename = filename.replace("[STEPS]", str(steps))
filename = filename.replace("[CFG]", str(cfg_scale))
filename = filename.replace("[PROMPT]", sanitized_prompt[:128])
filename = filename.replace(
"[PROMPT_SPACES]",
prompts[i].translate({ord(x): "" for x in invalid_filename_chars})[
:128
],
)
filename = filename.replace("[WIDTH]", str(width))
filename = filename.replace("[HEIGHT]", str(height))
filename = filename.replace("[SAMPLER]", sampler_name)
filename = filename.replace("[SEED]", seed_used)
filename = filename.replace(
"[VARIANT_AMOUNT]", f"{cur_variant_amount:.2f}"
)
x_sample = 255.0 * rearrange(
x_sample[0].cpu().numpy(), "c h w -> h w c"
)
x_sample = x_sample.astype(np.uint8)
metadata = ImageMetadata(
prompt=prompts[i],
seed=seeds[i],
height=height,
width=width,
steps=steps,
cfg_scale=cfg_scale,
normalize_prompt_weights=normalize_prompt_weights,
denoising_strength=denoising_strength,
GFPGAN=use_GFPGAN,
)
image = Image.fromarray(x_sample)
image = perform_color_correction(
image, correction_target, do_color_correction
)
ImageMetadata.set_on_image(image, metadata)
original_sample = x_sample
original_filename = filename
if use_GFPGAN and GFPGAN is not None and not use_RealESRGAN:
skip_save = True # #287 >_>
torch_gc()
cropped_faces, restored_faces, restored_img = GFPGAN.enhance(
original_sample[:, :, ::-1],
has_aligned=False,
only_center_face=False,
paste_back=True,
)
gfpgan_sample = restored_img[:, :, ::-1]
gfpgan_image = Image.fromarray(gfpgan_sample)
gfpgan_image = perform_color_correction(
gfpgan_image, correction_target, do_color_correction
)
gfpgan_image = perform_masked_image_restoration(
gfpgan_image,
init_img,
init_mask,
mask_blur_strength,
mask_restore,
use_RealESRGAN=False,
RealESRGAN=None,
)
gfpgan_metadata = copy.copy(metadata)
gfpgan_metadata.GFPGAN = True
ImageMetadata.set_on_image(gfpgan_image, gfpgan_metadata)
gfpgan_filename = original_filename + "-gfpgan"
save_sample(
gfpgan_image,
sample_path_i,
gfpgan_filename,
jpg_sample,
write_info_files,
write_sample_info_to_log_file,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
skip_save,
skip_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
skip_metadata=False,
)
output_images.append(gfpgan_image) # 287
# if simple_templating:
# grid_captions.append( captions[i] + "\ngfpgan" )
if use_RealESRGAN and RealESRGAN is not None and not use_GFPGAN:
skip_save = True # #287 >_>
torch_gc()
output, img_mode = RealESRGAN.enhance(original_sample[:, :, ::-1])
esrgan_filename = original_filename + "-esrgan4x"
esrgan_sample = output[:, :, ::-1]
esrgan_image = Image.fromarray(esrgan_sample)
esrgan_image = perform_color_correction(
esrgan_image, correction_target, do_color_correction
)
esrgan_image = perform_masked_image_restoration(
esrgan_image,
init_img,
init_mask,
mask_blur_strength,
mask_restore,
use_RealESRGAN,
RealESRGAN,
)
ImageMetadata.set_on_image(esrgan_image, metadata)
save_sample(
esrgan_image,
sample_path_i,
esrgan_filename,
jpg_sample,
write_info_files,
write_sample_info_to_log_file,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
skip_save,
skip_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
skip_metadata=False,
)
output_images.append(esrgan_image) # 287
# if simple_templating:
# grid_captions.append( captions[i] + "\nesrgan" )
if (
use_RealESRGAN
and RealESRGAN is not None
and use_GFPGAN
and GFPGAN is not None
):
skip_save = True # #287 >_>
torch_gc()
cropped_faces, restored_faces, restored_img = GFPGAN.enhance(
x_sample[:, :, ::-1],
has_aligned=False,
only_center_face=False,
paste_back=True,
)
gfpgan_sample = restored_img[:, :, ::-1]
output, img_mode = RealESRGAN.enhance(gfpgan_sample[:, :, ::-1])
gfpgan_esrgan_filename = original_filename + "-gfpgan-esrgan4x"
gfpgan_esrgan_sample = output[:, :, ::-1]
gfpgan_esrgan_image = Image.fromarray(gfpgan_esrgan_sample)
gfpgan_esrgan_image = perform_color_correction(
gfpgan_esrgan_image, correction_target, do_color_correction
)
gfpgan_esrgan_image = perform_masked_image_restoration(
gfpgan_esrgan_image,
init_img,
init_mask,
mask_blur_strength,
mask_restore,
use_RealESRGAN,
RealESRGAN,
)
ImageMetadata.set_on_image(gfpgan_esrgan_image, metadata)
save_sample(
gfpgan_esrgan_image,
sample_path_i,
gfpgan_esrgan_filename,
jpg_sample,
write_info_files,
write_sample_info_to_log_file,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
skip_save,
skip_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
skip_metadata=False,
)
output_images.append(gfpgan_esrgan_image) # 287
# if simple_templating:
# grid_captions.append( captions[i] + "\ngfpgan_esrgan" )
# this flag is used for imgProcessorTasks like GoBig, will return the image without saving it
if imgProcessorTask is True:
output_images.append(image)
image = perform_masked_image_restoration(
image,
init_img,
init_mask,
mask_blur_strength,
mask_restore,
# RealESRGAN image already processed in if-case above.
use_RealESRGAN=False,
RealESRGAN=None,
)
if not skip_save:
save_sample(
image,
sample_path_i,
filename,
jpg_sample,
write_info_files,
write_sample_info_to_log_file,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
skip_save,
skip_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
False,
)
if add_original_image or not simple_templating:
output_images.append(image)
if simple_templating:
grid_captions.append(captions[i])
# Save the progress images?
if job_info:
if job_info.rec_steps_enabled and (
job_info.rec_steps_to_file or job_info.rec_steps_to_gallery
):
steps_grid = image_grid(job_info.rec_steps_imgs, 1)
if job_info.rec_steps_to_gallery:
gallery_img_size = tuple(2 * dim for dim in image.size)
output_images.append(steps_grid.resize(gallery_img_size))
if job_info.rec_steps_to_file:
steps_grid_filename = f"{original_filename}_step_grid"
save_sample(
steps_grid,
sample_path_i,
steps_grid_filename,
jpg_sample,
write_info_files,
write_sample_info_to_log_file,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
skip_save,
skip_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
False,
)
if opt.optimized:
mem = torch.cuda.memory_allocated() / 1e6
modelFS.to("cpu")
while torch.cuda.memory_allocated() / 1e6 >= mem:
time.sleep(1)
if (prompt_matrix or not skip_grid) and not do_not_save_grid:
grid = None
if prompt_matrix:
if simple_templating:
grid = image_grid(
output_images,
batch_size,
force_n_rows=frows,
captions=grid_captions,
)
else:
grid = image_grid(
output_images,
batch_size,
force_n_rows=1 << ((len(prompt_matrix_parts) - 1) // 2),
)
try:
grid = draw_prompt_matrix(
grid, width, height, prompt_matrix_parts
)
except:
import traceback
print("Error creating prompt_matrix text:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
elif len(output_images) > 0 and (batch_size > 1 or n_iter > 1):
grid = image_grid(output_images, batch_size)
if grid is not None:
grid_count = get_next_sequence_number(outpath, "grid-")
grid_file = f"grid-{grid_count:05}-{seed}_{prompts[i].replace(' ', '_').translate({ord(x): '' for x in invalid_filename_chars})[:128]}.{grid_ext}"
grid.save(
os.path.join(outpath, grid_file),
grid_format,
quality=grid_quality,
lossless=grid_lossless,
optimize=True,
)
if prompt_matrix:
output_images.append(grid)
time.time()
mem_max_used, mem_total = mem_mon.read_and_stop()
time_diff = time.time() - start_time
args_and_names = {
"seed": seed,
"width": width,
"height": height,
"steps": steps,
"cfg_scale": cfg_scale,
"sampler": sampler_name,
}
full_string = f"{prompt}\n" + " ".join([f"{k}:" for k, v in args_and_names.items()])
info = {
"text": full_string,
"entities": [
{
"entity": str(v),
"start": full_string.find(f"{k}:"),
"end": full_string.find(f"{k}:") + len(f"{k} "),
}
for k, v in args_and_names.items()
],
}
# info = f"""
# {prompt} --seed {seed} --W {width} --H {height} -s {steps} -C {cfg_scale} --sampler {sampler_name} {', Denoising strength: '+str(denoising_strength) if init_img is not None else ''}{', GFPGAN' if use_GFPGAN and GFPGAN is not None else ''}{', '+realesrgan_model_name if use_RealESRGAN and RealESRGAN is not None else ''}{', Prompt Matrix Mode.' if prompt_matrix else ''}""".strip()
stats = f"""
Took { round(time_diff, 2) }s total ({ round(time_diff/(len(all_prompts)),2) }s per image)
Peak memory usage: { -(mem_max_used // -1_048_576) } MiB / { -(mem_total // -1_048_576) } MiB / { round(mem_max_used/mem_total*100, 3) }%"""
for comment in comments:
info["text"] += "\n\n" + comment
# mem_mon.stop()
# del mem_mon
torch_gc()
return output_images, seed, info, stats |
Layout functions to define all the streamlit layout here. | def layout():
"""Layout functions to define all the streamlit layout here."""
if not st.session_state["defaults"].debug.enable_hydralit:
st.set_page_config(
page_title="Stable Diffusion Playground",
layout="wide",
initial_sidebar_state="collapsed",
)
# app = st.HydraApp(title='Stable Diffusion WebUI', favicon="", sidebar_state="expanded", layout="wide",
# hide_streamlit_markers=False, allow_url_nav=True , clear_cross_app_sessions=False)
# load css as an external file, function has an option to local or remote url. Potential use when running from cloud infra that might not have access to local path.
load_css(True, "frontend/css/streamlit.main.css")
#
# specify the primary menu definition
menu_data = [
{
"id": "Stable Diffusion",
"label": "Stable Diffusion",
"icon": "bi bi-grid-1x2-fill",
},
{
"id": "Train",
"label": "Train",
"icon": "bi bi-lightbulb-fill",
"submenu": [
{
"id": "Textual Inversion",
"label": "Textual Inversion",
"icon": "bi bi-lightbulb-fill",
},
{
"id": "Fine Tunning",
"label": "Fine Tunning",
"icon": "bi bi-lightbulb-fill",
},
],
},
{
"id": "Model Manager",
"label": "Model Manager",
"icon": "bi bi-cloud-arrow-down-fill",
},
{
"id": "Tools",
"label": "Tools",
"icon": "bi bi-tools",
"submenu": [
{"id": "API Server", "label": "API Server", "icon": "bi bi-server"},
{
"id": "Barfi/BaklavaJS",
"label": "Barfi/BaklavaJS",
"icon": "bi bi-diagram-3-fill",
},
# {'id': 'API Server', 'label': 'API Server', 'icon': 'bi bi-server'},
],
},
{"id": "Settings", "label": "Settings", "icon": "bi bi-gear-fill"},
]
over_theme = {"txc_inactive": "#FFFFFF", "menu_background": "#000000"}
menu_id = hc.nav_bar(
menu_definition=menu_data,
# home_name='Home',
# login_name='Logout',
hide_streamlit_markers=False,
override_theme=over_theme,
sticky_nav=True,
sticky_mode="pinned",
)
#
# if menu_id == "Home":
# st.info("Under Construction. :construction_worker:")
if menu_id == "Stable Diffusion":
# set the page url and title
# st.experimental_set_query_params(page='stable-diffusion')
try:
set_page_title("Stable Diffusion Playground")
except NameError:
st.experimental_rerun()
(
txt2img_tab,
img2img_tab,
txt2vid_tab,
img2txt_tab,
post_processing_tab,
concept_library_tab,
) = st.tabs(
[
"Text-to-Image",
"Image-to-Image",
# "Inpainting",
"Text-to-Video",
"Image-To-Text",
"Post-Processing",
"Concept Library",
]
)
# with home_tab:
# from home import layout
# layout()
with txt2img_tab:
from txt2img import layout
layout()
with img2img_tab:
from img2img import layout
layout()
# with inpainting_tab:
# from inpainting import layout
# layout()
with txt2vid_tab:
from txt2vid import layout
layout()
with img2txt_tab:
from img2txt import layout
layout()
with post_processing_tab:
from post_processing import layout
layout()
with concept_library_tab:
from sd_concept_library import layout
layout()
#
elif menu_id == "Model Manager":
set_page_title("Model Manager - Stable Diffusion Playground")
from ModelManager import layout
layout()
elif menu_id == "Textual Inversion":
from textual_inversion import layout
layout()
elif menu_id == "Fine Tunning":
# from textual_inversion import layout
# layout()
st.info("Under Construction. :construction_worker:")
elif menu_id == "API Server":
set_page_title("API Server - Stable Diffusion Playground")
from APIServer import layout
layout()
elif menu_id == "Barfi/BaklavaJS":
set_page_title("Barfi/BaklavaJS - Stable Diffusion Playground")
from barfi_baklavajs import layout
layout()
elif menu_id == "Settings":
set_page_title("Settings - Stable Diffusion Playground")
from Settings import layout
layout()
# calling dragable input component module at the end, so it works on all pages
draggable_number_input.load() |
Simple function to allows us to change the title dynamically.
Normally you can use `st.set_page_config` to change the title but it can only be used once per app. | def set_page_title(title):
"""
Simple function to allows us to change the title dynamically.
Normally you can use `st.set_page_config` to change the title but it can only be used once per app.
"""
st.sidebar.markdown(
unsafe_allow_html=True,
body=f"""
<iframe height=0 srcdoc="<script>
const title = window.parent.document.querySelector('title') \
const oldObserver = window.parent.titleObserver
if (oldObserver) {{
oldObserver.disconnect()
}} \
const newObserver = new MutationObserver(function(mutations) {{
const target = mutations[0].target
if (target.text !== '{title}') {{
target.text = '{title}'
}}
}}) \
newObserver.observe(title, {{ childList: true }})
window.parent.titleObserver = newObserver \
title.text = '{title}'
</script>" />
""",
) |
Return a human readable size from bytes. | def human_readable_size(size, decimal_places=3):
"""Return a human readable size from bytes."""
for unit in ["B", "KB", "MB", "GB", "TB"]:
if size < 1024.0:
break
size /= 1024.0
return f"{size:.{decimal_places}f}{unit}" |
Load the different models. We also reuse the models that are already in memory to speed things up instead of loading them again. | def load_models(
use_LDSR=False,
LDSR_model="model",
use_GFPGAN=False,
GFPGAN_model="GFPGANv1.4",
use_RealESRGAN=False,
RealESRGAN_model="RealESRGAN_x4plus",
CustomModel_available=False,
custom_model="Stable Diffusion v1.5",
):
"""Load the different models. We also reuse the models that are already in memory to speed things up instead of loading them again."""
# model_manager.init()
logger.info("Loading models.")
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text("")
# check what models we want to use and if the they are already loaded.
with server_state_lock["LDSR"]:
if use_LDSR:
if "LDSR" in server_state and server_state["LDSR"].name == LDSR_model:
logger.info("LDSR already loaded")
else:
if "LDSR" in server_state:
del server_state["LDSR"]
# Load GFPGAN
if os.path.exists(st.session_state["defaults"].general.LDSR_dir):
try:
server_state["LDSR"] = load_LDSR(model_name=LDSR_model)
logger.info("Loaded LDSR")
except Exception:
import traceback
logger.error("Error loading LDSR:", file=sys.stderr)
logger.error(traceback.format_exc(), file=sys.stderr)
else:
if "LDSR" in server_state and not server_state["keep_all_models_loaded"]:
logger.debug(
"LDSR was in memory but we won't use it. Removing to save VRAM."
)
del server_state["LDSR"]
with server_state_lock["GFPGAN"]:
if use_GFPGAN:
if "GFPGAN" in server_state and server_state["GFPGAN"].name == GFPGAN_model:
logger.info("GFPGAN already loaded")
else:
if "GFPGAN" in server_state:
del server_state["GFPGAN"]
# Load GFPGAN
if os.path.exists(st.session_state["defaults"].general.GFPGAN_dir):
try:
server_state["GFPGAN"] = load_GFPGAN(GFPGAN_model)
logger.info(f"Loaded GFPGAN: {GFPGAN_model}")
except Exception:
import traceback
logger.error("Error loading GFPGAN:", file=sys.stderr)
logger.error(traceback.format_exc(), file=sys.stderr)
else:
if "GFPGAN" in server_state and not server_state["keep_all_models_loaded"]:
del server_state["GFPGAN"]
with server_state_lock["RealESRGAN"]:
if use_RealESRGAN:
if (
"RealESRGAN" in server_state
and server_state["RealESRGAN"].model.name == RealESRGAN_model
):
logger.info("RealESRGAN already loaded")
else:
# Load RealESRGAN
try:
# We first remove the variable in case it has something there,
# some errors can load the model incorrectly and leave things in memory.
del server_state["RealESRGAN"]
except KeyError as e:
logger.error(e)
pass
if os.path.exists(st.session_state["defaults"].general.RealESRGAN_dir):
# st.session_state is used for keeping the models in memory across multiple pages or runs.
server_state["RealESRGAN"] = load_RealESRGAN(RealESRGAN_model)
logger.info(
"Loaded RealESRGAN with model "
+ server_state["RealESRGAN"].model.name
)
else:
if (
"RealESRGAN" in server_state
and not server_state["keep_all_models_loaded"]
):
del server_state["RealESRGAN"]
with server_state_lock["model"], server_state_lock["modelCS"], server_state_lock[
"modelFS"
], server_state_lock["loaded_model"]:
if "model" in server_state:
if "model" in server_state and server_state["loaded_model"] == custom_model:
# if the float16 or no_half options have changed since the last time the model was loaded then we need to reload the model.
if (
(
"float16" in server_state
and server_state["float16"]
!= st.session_state["defaults"].general.use_float16
)
or (
"no_half" in server_state
and server_state["no_half"]
!= st.session_state["defaults"].general.no_half
)
or (
"optimized" in server_state
and server_state["optimized"]
!= st.session_state["defaults"].general.optimized
)
):
logger.info(
"Model options changed, deleting the model from memory."
)
del server_state["float16"]
del server_state["no_half"]
del server_state["model"]
del server_state["modelCS"]
del server_state["modelFS"]
del server_state["loaded_model"]
del server_state["optimized"]
server_state["float16"] = st.session_state[
"defaults"
].general.use_float16
server_state["no_half"] = st.session_state[
"defaults"
].general.no_half
server_state["optimized"] = st.session_state[
"defaults"
].general.optimized
load_models(
use_LDSR=st.session_state["use_LDSR"],
LDSR_model=st.session_state["LDSR_model"],
use_GFPGAN=st.session_state["use_GFPGAN"],
GFPGAN_model=st.session_state["GFPGAN_model"],
use_RealESRGAN=st.session_state["use_RealESRGAN"],
RealESRGAN_model=st.session_state["RealESRGAN_model"],
CustomModel_available=server_state["CustomModel_available"],
custom_model=st.session_state["custom_model"],
)
else:
logger.info("Model already loaded")
return
else:
try:
del server_state["model"]
del server_state["modelCS"]
del server_state["modelFS"]
del server_state["loaded_model"]
except KeyError as e:
logger.error(e)
pass
# if the model from txt2vid is in memory we need to remove it to improve performance.
with server_state_lock["pipe"]:
if "pipe" in server_state and not server_state["keep_all_models_loaded"]:
del server_state["pipe"]
if (
"textual_inversion" in st.session_state
and not server_state["keep_all_models_loaded"]
):
del st.session_state["textual_inversion"]
# At this point the model is either
# not loaded yet or have been deleted from memory:
# load new model into memory
server_state["custom_model"] = custom_model
config, device, model, modelCS, modelFS = load_sd_model(custom_model)
server_state["device"] = device
server_state["model"] = model
server_state["modelCS"] = modelCS
server_state["modelFS"] = modelFS
server_state["loaded_model"] = custom_model
server_state["float16"] = st.session_state["defaults"].general.use_float16
server_state["no_half"] = st.session_state["defaults"].general.no_half
server_state["optimized"] = st.session_state["defaults"].general.optimized
# trying to disable multiprocessing as it makes it so streamlit cant stop when the
# model is loaded in memory and you need to kill the process sometimes.
try:
server_state["model"].args.use_multiprocessing_for_evaluation = False
except AttributeError:
pass
if st.session_state.defaults.general.enable_attention_slicing:
server_state["model"].enable_attention_slicing()
if st.session_state.defaults.general.enable_minimal_memory_usage:
server_state["model"].enable_minimal_memory_usage()
logger.info("Model loaded.")
return True |
Appends dimensions to the end of a tensor until it has target_dims dimensions. | def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
dims_to_append = target_dims - x.ndim
if dims_to_append < 0:
raise ValueError(
f"input has {x.ndim} dims but target_dims is {target_dims}, which is less"
)
return x[(...,) + (None,) * dims_to_append] |
Constructs the noise schedule of Karras et al. (2022). | def get_sigmas_karras(n, sigma_min, sigma_max, rho=7.0, device="cpu"):
"""Constructs the noise schedule of Karras et al. (2022)."""
ramp = torch.linspace(0, 1, n)
min_inv_rho = sigma_min ** (1 / rho)
max_inv_rho = sigma_max ** (1 / rho)
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return append_zero(sigmas).to(device) |
Explanation:
Getting good results in/out-painting with stable diffusion can be challenging.
Although there are simpler effective solutions for in-painting, out-painting can be especially challenging because there is no color data
in the masked area to help prompt the generator. Ideally, even for in-painting we'd like work effectively without that data as well.
Provided here is my take on a potential solution to this problem.
By taking a fourier transform of the masked src img we get a function that tells us the presence and orientation of each feature scale in the unmasked src.
Shaping the init/seed noise for in/outpainting to the same distribution of feature scales, orientations, and positions increases output coherence
by helping keep features aligned. This technique is applicable to any continuous generation task such as audio or video, each of which can
be conceptualized as a series of out-painting steps where the last half of the input "frame" is erased. For multi-channel data such as color
or stereo sound the "color tone" or histogram of the seed noise can be matched to improve quality (using scikit-image currently)
This method is quite robust and has the added benefit of being fast independently of the size of the out-painted area.
The effects of this method include things like helping the generator integrate the pre-existing view distance and camera angle.
Carefully managing color and brightness with histogram matching is also essential to achieving good coherence.
noise_q controls the exponent in the fall-off of the distribution can be any positive number, lower values means higher detail (range > 0, default 1.)
color_variation controls how much freedom is allowed for the colors/palette of the out-painted area (range 0..1, default 0.01)
This code is provided as is under the Unlicense (https://unlicense.org/)
Although you have no obligation to do so, if you found this code helpful please find it in your heart to credit me [parlance-zz].
Questions or comments can be sent to [email protected] (https://github.com/parlance-zz/)
This code is part of a new branch of a discord bot I am working on integrating with diffusers (https://github.com/parlance-zz/g-diffuser-bot) | def get_matched_noise(_np_src_image, np_mask_rgb, noise_q, color_variation):
"""
Explanation:
Getting good results in/out-painting with stable diffusion can be challenging.
Although there are simpler effective solutions for in-painting, out-painting can be especially challenging because there is no color data
in the masked area to help prompt the generator. Ideally, even for in-painting we'd like work effectively without that data as well.
Provided here is my take on a potential solution to this problem.
By taking a fourier transform of the masked src img we get a function that tells us the presence and orientation of each feature scale in the unmasked src.
Shaping the init/seed noise for in/outpainting to the same distribution of feature scales, orientations, and positions increases output coherence
by helping keep features aligned. This technique is applicable to any continuous generation task such as audio or video, each of which can
be conceptualized as a series of out-painting steps where the last half of the input "frame" is erased. For multi-channel data such as color
or stereo sound the "color tone" or histogram of the seed noise can be matched to improve quality (using scikit-image currently)
This method is quite robust and has the added benefit of being fast independently of the size of the out-painted area.
The effects of this method include things like helping the generator integrate the pre-existing view distance and camera angle.
Carefully managing color and brightness with histogram matching is also essential to achieving good coherence.
noise_q controls the exponent in the fall-off of the distribution can be any positive number, lower values means higher detail (range > 0, default 1.)
color_variation controls how much freedom is allowed for the colors/palette of the out-painted area (range 0..1, default 0.01)
This code is provided as is under the Unlicense (https://unlicense.org/)
Although you have no obligation to do so, if you found this code helpful please find it in your heart to credit me [parlance-zz].
Questions or comments can be sent to [email protected] (https://github.com/parlance-zz/)
This code is part of a new branch of a discord bot I am working on integrating with diffusers (https://github.com/parlance-zz/g-diffuser-bot)
"""
global DEBUG_MODE
global TMP_ROOT_PATH
width = _np_src_image.shape[0]
height = _np_src_image.shape[1]
num_channels = _np_src_image.shape[2]
np_src_image = _np_src_image[:] * (1.0 - np_mask_rgb)
np_mask_grey = np.sum(np_mask_rgb, axis=2) / 3.0
(np.sum(np_src_image, axis=2) / 3.0)
np.ones((width, height), dtype=bool)
img_mask = np_mask_grey > 1e-6
ref_mask = np_mask_grey < 1e-3
windowed_image = _np_src_image * (1.0 - _get_masked_window_rgb(np_mask_grey))
windowed_image /= np.max(windowed_image)
windowed_image += (
np.average(_np_src_image) * np_mask_rgb
) # / (1.-np.average(np_mask_rgb)) # rather than leave the masked area black, we get better results from fft by filling the average unmasked color
# windowed_image += np.average(_np_src_image) * (np_mask_rgb * (1.- np_mask_rgb)) / (1.-np.average(np_mask_rgb)) # compensate for darkening across the mask transition area
# _save_debug_img(windowed_image, "windowed_src_img")
src_fft = _fft2(windowed_image) # get feature statistics from masked src img
src_dist = np.absolute(src_fft)
src_phase = src_fft / src_dist
# _save_debug_img(src_dist, "windowed_src_dist")
noise_window = _get_gaussian_window(
width, height, mode=1
) # start with simple gaussian noise
noise_rgb = np.random.random_sample((width, height, num_channels))
noise_grey = np.sum(noise_rgb, axis=2) / 3.0
noise_rgb *= color_variation # the colorfulness of the starting noise is blended to greyscale with a parameter
for c in range(num_channels):
noise_rgb[:, :, c] += (1.0 - color_variation) * noise_grey
noise_fft = _fft2(noise_rgb)
for c in range(num_channels):
noise_fft[:, :, c] *= noise_window
noise_rgb = np.real(_ifft2(noise_fft))
shaped_noise_fft = _fft2(noise_rgb)
shaped_noise_fft[:, :, :] = (
np.absolute(shaped_noise_fft[:, :, :]) ** 2 * (src_dist**noise_q) * src_phase
) # perform the actual shaping
brightness_variation = 0.0 # color_variation # todo: temporarily tieing brightness variation to color variation for now
contrast_adjusted_np_src = (
_np_src_image[:] * (brightness_variation + 1.0) - brightness_variation * 2.0
)
# scikit-image is used for histogram matching, very convenient!
shaped_noise = np.real(_ifft2(shaped_noise_fft))
shaped_noise -= np.min(shaped_noise)
shaped_noise /= np.max(shaped_noise)
shaped_noise[img_mask, :] = skimage.exposure.match_histograms(
shaped_noise[img_mask, :] ** 1.0,
contrast_adjusted_np_src[ref_mask, :],
channel_axis=1,
)
shaped_noise = _np_src_image[:] * (1.0 - np_mask_rgb) + shaped_noise * np_mask_rgb
# _save_debug_img(shaped_noise, "shaped_noise")
matched_noise = np.zeros((width, height, num_channels))
matched_noise = shaped_noise[:]
# matched_noise[all_mask,:] = skimage.exposure.match_histograms(shaped_noise[all_mask,:], _np_src_image[ref_mask,:], channel_axis=1)
# matched_noise = _np_src_image[:] * (1. - np_mask_rgb) + matched_noise * np_mask_rgb
# _save_debug_img(matched_noise, "matched_noise")
"""
todo:
color_variation doesnt have to be a single number, the overall color tone of the out-painted area could be param controlled
"""
return np.clip(matched_noise, 0.0, 1.0) |
A folder picker that has a text_input field next to it and a button to select the folder.
Returns the text_input field with the folder path. | def folder_picker(
label="Select:",
value="",
help="",
folder_button_label="Select",
folder_button_help="",
folder_button_key="",
):
"""A folder picker that has a text_input field next to it and a button to select the folder.
Returns the text_input field with the folder path."""
import tkinter as tk
from tkinter import filedialog
# Set up tkinter
root = tk.Tk()
root.withdraw()
# Make folder picker dialog appear on top of other windows
root.wm_attributes("-topmost", 1)
col1, col2 = st.columns([2, 1], gap="small")
with col1:
dirname = st.empty()
with col2:
st.write("")
st.write("")
folder_picker = st.empty()
# Folder picker button
# st.title('Folder Picker')
# st.write('Please select a folder:')
# Create a label and add a random number of invisible characters
# to it so no two buttons inside a form are the same.
# folder_button_label = ''.join(random.choice(f"{folder_button_label}") for _ in range(5))
folder_button_label = f"{str(folder_button_label)}{'' * random.randint(1, 500)}"
clicked = folder_button_key + "" * random.randint(5, 500)
# try:
# clicked = folder_picker.button(folder_button_label, help=folder_button_help, key=folder_button_key)
# except StreamlitAPIException:
clicked = folder_picker.form_submit_button(
folder_button_label, help=folder_button_help
)
if clicked:
dirname = dirname.text_input(
label, filedialog.askdirectory(master=root), help=help
)
else:
dirname = dirname.text_input(label, value, help=help)
return dirname |
Constructs an exponential noise schedule. | def get_sigmas_exponential(n, sigma_min, sigma_max, device="cpu"):
"""Constructs an exponential noise schedule."""
sigmas = torch.linspace(
math.log(sigma_max), math.log(sigma_min), n, device=device
).exp()
return append_zero(sigmas) |
Constructs a continuous VP noise schedule. | def get_sigmas_vp(n, beta_d=19.9, beta_min=0.1, eps_s=1e-3, device="cpu"):
"""Constructs a continuous VP noise schedule."""
t = torch.linspace(1, eps_s, n, device=device)
sigmas = torch.sqrt(torch.exp(beta_d * t**2 / 2 + beta_min * t) - 1)
return append_zero(sigmas) |
Converts a denoiser output to a Karras ODE derivative. | def to_d(x, sigma, denoised):
"""Converts a denoiser output to a Karras ODE derivative."""
return (x - denoised) / append_dims(sigma, x.ndim) |
Loads Stable Diffusion model by name | def load_sd_model(model_name: str):
"""Loads Stable Diffusion model by name"""
ckpt_path = st.session_state.defaults.general.default_model_path
if model_name != st.session_state.defaults.general.base_model:
ckpt_path = os.path.join("models", "custom", f"{model_name}.ckpt")
if st.session_state.defaults.general.optimized:
config = OmegaConf.load(st.session_state.defaults.general.optimized_config)
sd = load_sd_from_config(ckpt_path)
li, lo = [], []
for key, v_ in sd.items():
sp = key.split(".")
if (sp[0]) == "model":
if "input_blocks" in sp:
li.append(key)
elif "middle_block" in sp:
li.append(key)
elif "time_embed" in sp:
li.append(key)
else:
lo.append(key)
for key in li:
sd["model1." + key[6:]] = sd.pop(key)
for key in lo:
sd["model2." + key[6:]] = sd.pop(key)
device = (
torch.device(f"cuda:{st.session_state.defaults.general.gpu}")
if torch.cuda.is_available()
else torch.device("cpu")
)
model = instantiate_from_config(config.modelUNet)
_, _ = model.load_state_dict(sd, strict=False)
model.cuda()
model.eval()
model.turbo = st.session_state.defaults.general.optimized_turbo
modelCS = instantiate_from_config(config.modelCondStage)
_, _ = modelCS.load_state_dict(sd, strict=False)
modelCS.cond_stage_model.device = device
modelCS.eval()
modelFS = instantiate_from_config(config.modelFirstStage)
_, _ = modelFS.load_state_dict(sd, strict=False)
modelFS.eval()
del sd
if not st.session_state.defaults.general.no_half:
model = model.half().to(device)
modelCS = modelCS.half().to(device)
modelFS = modelFS.half().to(device)
return config, device, model, modelCS, modelFS
else:
if os.path.exists(ckpt_path.replace("ckpt", "yaml")):
logger.info(f"Using config file from: {ckpt_path.replace('ckpt','yaml')}")
config = OmegaConf.load(ckpt_path.replace("ckpt", "yaml"))
else:
config = OmegaConf.load(
st.session_state.defaults.general.default_model_config
)
model = load_model_from_config(config, ckpt_path)
device = (
torch.device(f"cuda:{st.session_state.defaults.general.gpu}")
if torch.cuda.is_available()
else torch.device("cpu")
)
model = (
model if st.session_state.defaults.general.no_half else model.half()
).to(device)
return config, device, model, None, None |
Find the optimal update_preview_frequency value maximizing
performance while minimizing the time between updates. | def optimize_update_preview_frequency(
current_chunk_speed,
previous_chunk_speed_list,
update_preview_frequency,
update_preview_frequency_list,
):
"""Find the optimal update_preview_frequency value maximizing
performance while minimizing the time between updates."""
from statistics import mean
previous_chunk_avg_speed = mean(previous_chunk_speed_list)
previous_chunk_speed_list.append(current_chunk_speed)
current_chunk_avg_speed = mean(previous_chunk_speed_list)
if current_chunk_avg_speed >= previous_chunk_avg_speed:
# print(f"{current_chunk_speed} >= {previous_chunk_speed}")
update_preview_frequency_list.append(update_preview_frequency + 1)
else:
# print(f"{current_chunk_speed} <= {previous_chunk_speed}")
update_preview_frequency_list.append(update_preview_frequency - 1)
update_preview_frequency = round(mean(update_preview_frequency_list))
return (
current_chunk_speed,
previous_chunk_speed_list,
update_preview_frequency,
update_preview_frequency_list,
) |
Moves only unet to fp16 and to CUDA, while keepping lighter models on CPUs | def enable_minimal_memory_usage(model):
"""Moves only unet to fp16 and to CUDA, while keepping lighter models on CPUs"""
model.unet.to(torch.float16).to(torch.device("cuda"))
model.enable_attention_slicing(1)
torch.cuda.empty_cache()
torch_gc() |
this function tests if prompt is too long, and if so, adds a message to comments | def check_prompt_length(prompt, comments):
"""this function tests if prompt is too long, and if so, adds a message to comments"""
tokenizer = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).cond_stage_model.tokenizer
max_length = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).cond_stage_model.max_length
info = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).cond_stage_model.tokenizer(
[prompt],
truncation=True,
max_length=max_length,
return_overflowing_tokens=True,
padding="max_length",
return_tensors="pt",
)
ovf = info["overflowing_tokens"][0]
overflowing_count = ovf.shape[0]
if overflowing_count == 0:
return
vocab = {v: k for k, v in tokenizer.get_vocab().items()}
overflowing_words = [vocab.get(int(x), "") for x in ovf]
overflowing_text = tokenizer.convert_tokens_to_string("".join(overflowing_words))
comments.append(
f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n"
) |
Determines and returns the next sequence number to use when saving an
image in the specified directory.
If a prefix is given, only consider files whose names start with that
prefix, and strip the prefix from filenames before extracting their
sequence number.
The sequence starts at 0. | def get_next_sequence_number(path, prefix=""):
"""
Determines and returns the next sequence number to use when saving an
image in the specified directory.
If a prefix is given, only consider files whose names start with that
prefix, and strip the prefix from filenames before extracting their
sequence number.
The sequence starts at 0.
"""
result = -1
for p in Path(path).iterdir():
if p.name.endswith((".png", ".jpg")) and p.name.startswith(prefix):
tmp = p.name[len(prefix) :]
try:
result = max(int(tmp.split("-")[0]), result)
except ValueError:
pass
return result + 1 |
this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch | def process_images(
outpath,
func_init,
func_sample,
prompt,
seed,
sampler_name,
save_grid,
batch_size,
n_iter,
steps,
cfg_scale,
width,
height,
prompt_matrix,
use_GFPGAN: bool = True,
GFPGAN_model: str = "GFPGANv1.4",
use_RealESRGAN: bool = False,
realesrgan_model_name: str = "RealESRGAN_x4plus",
use_LDSR: bool = False,
LDSR_model_name: str = "model",
ddim_eta=0.0,
normalize_prompt_weights=True,
init_img=None,
init_mask=None,
mask_blur_strength=3,
mask_restore=False,
denoising_strength=0.75,
noise_mode=0,
find_noise_steps=1,
resize_mode=None,
uses_loopback=False,
uses_random_seed_loopback=False,
sort_samples=True,
write_info_files=True,
jpg_sample=False,
variant_amount=0.0,
variant_seed=None,
save_individual_images: bool = True,
):
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
torch_gc()
# start time after garbage collection (or before?)
start_time = time.time()
# We will use this date here later for the folder name, need to start_time if not need
datetime.datetime.now()
mem_mon = MemUsageMonitor("MemMon")
mem_mon.start()
if st.session_state.defaults.general.use_sd_concepts_library:
prompt_tokens = re.findall("<([a-zA-Z0-9-]+)>", prompt)
if prompt_tokens:
# compviz
tokenizer = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).cond_stage_model.tokenizer
text_encoder = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).cond_stage_model.transformer
# diffusers
# tokenizer = pipe.tokenizer
# text_encoder = pipe.text_encoder
ext = ("pt", "bin")
if len(prompt_tokens) > 1:
for token_name in prompt_tokens:
embedding_path = os.path.join(
st.session_state["defaults"].general.sd_concepts_library_folder,
token_name,
)
if os.path.exists(embedding_path):
for files in os.listdir(embedding_path):
if files.endswith(ext):
load_learned_embed_in_clip(
f"{os.path.join(embedding_path, files)}",
text_encoder,
tokenizer,
f"<{token_name}>",
)
else:
embedding_path = os.path.join(
st.session_state["defaults"].general.sd_concepts_library_folder,
prompt_tokens[0],
)
if os.path.exists(embedding_path):
for files in os.listdir(embedding_path):
if files.endswith(ext):
load_learned_embed_in_clip(
f"{os.path.join(embedding_path, files)}",
text_encoder,
tokenizer,
f"<{prompt_tokens[0]}>",
)
#
os.makedirs(outpath, exist_ok=True)
sample_path = os.path.join(outpath, "samples")
os.makedirs(sample_path, exist_ok=True)
if "|" not in prompt and prompt.startswith("@"):
prompt = prompt[1:]
negprompt = ""
if "###" in prompt:
prompt, negprompt = prompt.split("###", 1)
prompt = prompt.strip()
negprompt = negprompt.strip()
comments = []
prompt_matrix_parts = []
simple_templating = False
if prompt_matrix:
if prompt.startswith("@"):
simple_templating = True
all_seeds, n_iter, prompt_matrix_parts, all_prompts, frows = oxlamon_matrix(
prompt, seed, n_iter, batch_size
)
else:
all_prompts = []
prompt_matrix_parts = prompt.split("|")
combination_count = 2 ** (len(prompt_matrix_parts) - 1)
for combination_num in range(combination_count):
current = prompt_matrix_parts[0]
for n, text in enumerate(prompt_matrix_parts[1:]):
if combination_num & (2**n) > 0:
current += ("" if text.strip().startswith(",") else ", ") + text
all_prompts.append(current)
n_iter = math.ceil(len(all_prompts) / batch_size)
all_seeds = len(all_prompts) * [seed]
logger.info(
f"Prompt matrix will create {len(all_prompts)} images using a total of {n_iter} batches."
)
else:
if not st.session_state["defaults"].general.no_verify_input:
try:
check_prompt_length(prompt, comments)
except:
import traceback
logger.info("Error verifying input:", file=sys.stderr)
logger.info(traceback.format_exc(), file=sys.stderr)
all_prompts = batch_size * n_iter * [prompt]
all_seeds = [seed + x for x in range(len(all_prompts))]
precision_scope = (
autocast
if st.session_state["defaults"].general.precision == "autocast"
else nullcontext
)
output_images = []
grid_captions = []
stats = []
with torch.no_grad(), precision_scope("cuda"), (
server_state["model"].ema_scope()
if not st.session_state["defaults"].general.optimized
else nullcontext()
):
init_data = func_init()
time.time()
# if variant_amount > 0.0 create noise from base seed
base_x = None
if variant_amount > 0.0:
target_seed_randomizer = seed_to_int("") # random seed
torch.manual_seed(
seed
) # this has to be the single starting seed (not per-iteration)
base_x = create_random_tensors(
[opt_C, height // opt_f, width // opt_f], seeds=[seed]
)
# we don't want all_seeds to be sequential from starting seed with variants,
# since that makes the same variants each time,
# so we add target_seed_randomizer as a random offset
for si in range(len(all_seeds)):
all_seeds[si] += target_seed_randomizer
for n in range(n_iter):
logger.info(f"Iteration: {n+1}/{n_iter}")
prompts = all_prompts[n * batch_size : (n + 1) * batch_size]
captions = prompt_matrix_parts[n * batch_size : (n + 1) * batch_size]
seeds = all_seeds[n * batch_size : (n + 1) * batch_size]
logger.info(prompt)
if st.session_state["defaults"].general.optimized:
server_state["modelCS"].to(st.session_state["defaults"].general.gpu)
uc = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).get_learned_conditioning(len(prompts) * [negprompt])
if isinstance(prompts, tuple):
prompts = list(prompts)
# split the prompt if it has : for weighting
# TODO for speed it might help to have this occur when all_prompts filled??
weighted_subprompts = split_weighted_subprompts(
prompts[0], normalize_prompt_weights
)
# sub-prompt weighting used if more than 1
if len(weighted_subprompts) > 1:
c = torch.zeros_like(
uc
) # i dont know if this is correct.. but it works
for i in range(0, len(weighted_subprompts)):
# note if alpha negative, it functions same as torch.sub
c = torch.add(
c,
(
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).get_learned_conditioning(weighted_subprompts[i][0]),
alpha=weighted_subprompts[i][1],
)
else: # just behave like usual
c = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelCS"]
).get_learned_conditioning(prompts)
shape = [opt_C, height // opt_f, width // opt_f]
if st.session_state["defaults"].general.optimized:
mem = torch.cuda.memory_allocated() / 1e6
server_state["modelCS"].to("cpu")
while torch.cuda.memory_allocated() / 1e6 >= mem:
time.sleep(1)
if noise_mode == 1 or noise_mode == 3:
# TODO params for find_noise_to_image
x = torch.cat(
batch_size
* [
find_noise_for_image(
server_state["model"],
server_state["device"],
init_img.convert("RGB"),
"",
find_noise_steps,
0.0,
normalize=True,
generation_callback=generation_callback,
)
],
dim=0,
)
else:
# we manually generate all input noises because each one should have a specific seed
x = create_random_tensors(shape, seeds=seeds)
if variant_amount > 0.0: # we are making variants
# using variant_seed as sneaky toggle,
# when not None or '' use the variant_seed
# otherwise use seeds
if variant_seed is not None and variant_seed != "":
specified_variant_seed = seed_to_int(variant_seed)
torch.manual_seed(specified_variant_seed)
seeds = [specified_variant_seed]
# finally, slerp base_x noise to target_x noise for creating a variant
x = slerp(
st.session_state["defaults"].general.gpu,
max(0.0, min(1.0, variant_amount)),
base_x,
x,
)
samples_ddim = func_sample(
init_data=init_data,
x=x,
conditioning=c,
unconditional_conditioning=uc,
sampler_name=sampler_name,
)
if st.session_state["defaults"].general.optimized:
server_state["modelFS"].to(st.session_state["defaults"].general.gpu)
x_samples_ddim = (
server_state["model"]
if not st.session_state["defaults"].general.optimized
else server_state["modelFS"]
).decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
run_images = []
for i, x_sample in enumerate(x_samples_ddim):
sanitized_prompt = slugify(prompts[i])
percent = i / len(x_samples_ddim)
if "progress_bar" in st.session_state:
st.session_state["progress_bar"].progress(
percent if percent < 100 else 100
)
if sort_samples:
full_path = os.path.join(os.getcwd(), sample_path, sanitized_prompt)
sanitized_prompt = sanitized_prompt[: 120 - len(full_path)]
sample_path_i = os.path.join(sample_path, sanitized_prompt)
# print(f"output folder length: {len(os.path.join(os.getcwd(), sample_path_i))}")
# print(os.path.join(os.getcwd(), sample_path_i))
os.makedirs(sample_path_i, exist_ok=True)
base_count = get_next_sequence_number(sample_path_i)
filename = f"{base_count:05}-{steps}_{sampler_name}_{seeds[i]}"
else:
full_path = os.path.join(os.getcwd(), sample_path)
sample_path_i = sample_path
base_count = get_next_sequence_number(sample_path_i)
filename = f"{base_count:05}-{steps}_{sampler_name}_{seeds[i]}_{sanitized_prompt}"[
: 120 - len(full_path)
] # same as before
x_sample = 255.0 * rearrange(x_sample.cpu().numpy(), "c h w -> h w c")
x_sample = x_sample.astype(np.uint8)
image = Image.fromarray(x_sample)
original_filename = filename
if "preview_image" in st.session_state:
st.session_state["preview_image"].image(image)
#
if (
use_GFPGAN
and server_state["GFPGAN"] is not None
and not use_RealESRGAN
and not use_LDSR
):
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text(
"Running GFPGAN on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
if server_state["GFPGAN"].name != GFPGAN_model:
load_models(
use_LDSR=use_LDSR,
LDSR_model=LDSR_model_name,
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
torch_gc()
with torch.autocast("cuda"):
cropped_faces, restored_faces, restored_img = server_state[
"GFPGAN"
].enhance(
x_sample[:, :, ::-1],
has_aligned=False,
only_center_face=False,
paste_back=True,
)
gfpgan_sample = restored_img[:, :, ::-1]
gfpgan_image = Image.fromarray(gfpgan_sample)
# if st.session_state["GFPGAN_strenght"]:
# gfpgan_sample = Image.blend(image, gfpgan_image, st.session_state["GFPGAN_strenght"])
gfpgan_filename = original_filename + "-gfpgan"
save_sample(
gfpgan_image,
sample_path_i,
gfpgan_filename,
jpg_sample,
prompts,
seeds,
width,
height,
steps,
cfg_scale,
normalize_prompt_weights,
use_GFPGAN,
write_info_files,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
save_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
False,
server_state["loaded_model"],
)
output_images.append(gfpgan_image) # 287
run_images.append(gfpgan_image)
if simple_templating:
grid_captions.append(captions[i] + "\ngfpgan")
#
elif (
use_RealESRGAN
and server_state["RealESRGAN"] is not None
and not use_GFPGAN
):
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text(
"Running RealESRGAN on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
# skip_save = True # #287 >_>
torch_gc()
if server_state["RealESRGAN"].model.name != realesrgan_model_name:
# try_loading_RealESRGAN(realesrgan_model_name)
load_models(
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
output, img_mode = server_state["RealESRGAN"].enhance(
x_sample[:, :, ::-1]
)
esrgan_filename = original_filename + "-esrgan4x"
esrgan_sample = output[:, :, ::-1]
esrgan_image = Image.fromarray(esrgan_sample)
# save_sample(image, sample_path_i, original_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
# normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
# save_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode)
save_sample(
esrgan_image,
sample_path_i,
esrgan_filename,
jpg_sample,
prompts,
seeds,
width,
height,
steps,
cfg_scale,
normalize_prompt_weights,
use_GFPGAN,
write_info_files,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
save_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
False,
server_state["loaded_model"],
)
output_images.append(esrgan_image) # 287
run_images.append(esrgan_image)
if simple_templating:
grid_captions.append(captions[i] + "\nesrgan")
#
elif use_LDSR and "LDSR" in server_state and not use_GFPGAN:
logger.info(
"Running LDSR on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text(
"Running LDSR on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
# skip_save = True # #287 >_>
torch_gc()
if server_state["LDSR"].name != LDSR_model_name:
# try_loading_RealESRGAN(realesrgan_model_name)
load_models(
use_LDSR=use_LDSR,
LDSR_model=LDSR_model_name,
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
result = server_state["LDSR"].superResolution(
image,
ddimSteps=st.session_state["ldsr_sampling_steps"],
preDownScale=st.session_state["preDownScale"],
postDownScale=st.session_state["postDownScale"],
downsample_method=st.session_state["downsample_method"],
)
ldsr_filename = original_filename + "-ldsr4x"
# ldsr_sample = result[:,:,::-1]
# ldsr_image = Image.fromarray(ldsr_sample)
# save_sample(image, sample_path_i, original_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
# normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
# save_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode)
save_sample(
result,
sample_path_i,
ldsr_filename,
jpg_sample,
prompts,
seeds,
width,
height,
steps,
cfg_scale,
normalize_prompt_weights,
use_GFPGAN,
write_info_files,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
save_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
False,
server_state["loaded_model"],
)
output_images.append(result) # 287
run_images.append(result)
if simple_templating:
grid_captions.append(captions[i] + "\nldsr")
#
elif (
use_LDSR
and "LDSR" in server_state
and use_GFPGAN
and "GFPGAN" in server_state
):
logger.info(
"Running GFPGAN+LDSR on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text(
"Running GFPGAN+LDSR on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
if server_state["GFPGAN"].name != GFPGAN_model:
load_models(
use_LDSR=use_LDSR,
LDSR_model=LDSR_model_name,
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
torch_gc()
cropped_faces, restored_faces, restored_img = server_state[
"GFPGAN"
].enhance(
x_sample[:, :, ::-1],
has_aligned=False,
only_center_face=False,
paste_back=True,
)
gfpgan_sample = restored_img[:, :, ::-1]
gfpgan_image = Image.fromarray(gfpgan_sample)
if server_state["LDSR"].name != LDSR_model_name:
# try_loading_RealESRGAN(realesrgan_model_name)
load_models(
use_LDSR=use_LDSR,
LDSR_model=LDSR_model_name,
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
# LDSR.superResolution(gfpgan_image, ddimSteps=100, preDownScale='None', postDownScale='None', downsample_method="Lanczos")
result = server_state["LDSR"].superResolution(
gfpgan_image,
ddimSteps=st.session_state["ldsr_sampling_steps"],
preDownScale=st.session_state["preDownScale"],
postDownScale=st.session_state["postDownScale"],
downsample_method=st.session_state["downsample_method"],
)
ldsr_filename = original_filename + "-gfpgan-ldsr2x"
# ldsr_sample = result[:,:,::-1]
# ldsr_image = Image.fromarray(result)
# save_sample(image, sample_path_i, original_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
# normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
# save_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode)
save_sample(
result,
sample_path_i,
ldsr_filename,
jpg_sample,
prompts,
seeds,
width,
height,
steps,
cfg_scale,
normalize_prompt_weights,
use_GFPGAN,
write_info_files,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
save_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
False,
server_state["loaded_model"],
)
output_images.append(result) # 287
run_images.append(result)
if simple_templating:
grid_captions.append(captions[i] + "\ngfpgan-ldsr")
elif (
use_RealESRGAN
and server_state["RealESRGAN"] is not None
and use_GFPGAN
and server_state["GFPGAN"] is not None
):
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text(
"Running GFPGAN+RealESRGAN on image %d of %d..."
% (i + 1, len(x_samples_ddim))
)
# skip_save = True # #287 >_>
torch_gc()
cropped_faces, restored_faces, restored_img = server_state[
"GFPGAN"
].enhance(
x_sample[:, :, ::-1],
has_aligned=False,
only_center_face=False,
paste_back=True,
)
gfpgan_sample = restored_img[:, :, ::-1]
if server_state["RealESRGAN"].model.name != realesrgan_model_name:
# try_loading_RealESRGAN(realesrgan_model_name)
load_models(
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
output, img_mode = server_state["RealESRGAN"].enhance(
gfpgan_sample[:, :, ::-1]
)
gfpgan_esrgan_filename = original_filename + "-gfpgan-esrgan4x"
gfpgan_esrgan_sample = output[:, :, ::-1]
gfpgan_esrgan_image = Image.fromarray(gfpgan_esrgan_sample)
save_sample(
gfpgan_esrgan_image,
sample_path_i,
gfpgan_esrgan_filename,
jpg_sample,
prompts,
seeds,
width,
height,
steps,
cfg_scale,
normalize_prompt_weights,
False,
write_info_files,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
save_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
False,
server_state["loaded_model"],
)
output_images.append(gfpgan_esrgan_image) # 287
run_images.append(gfpgan_esrgan_image)
if simple_templating:
grid_captions.append(captions[i] + "\ngfpgan_esrgan")
#
else:
output_images.append(image)
run_images.append(image)
if mask_restore and init_mask:
# init_mask = init_mask if keep_mask else ImageOps.invert(init_mask)
init_mask = init_mask.filter(
ImageFilter.GaussianBlur(mask_blur_strength)
)
init_mask = init_mask.convert("L")
init_img = init_img.convert("RGB")
image = image.convert("RGB")
if use_RealESRGAN and server_state["RealESRGAN"] is not None:
if (
server_state["RealESRGAN"].model.name
!= realesrgan_model_name
):
# try_loading_RealESRGAN(realesrgan_model_name)
load_models(
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
RealESRGAN_model=realesrgan_model_name,
)
output, img_mode = server_state["RealESRGAN"].enhance(
np.array(init_img, dtype=np.uint8)
)
init_img = Image.fromarray(output)
init_img = init_img.convert("RGB")
output, img_mode = server_state["RealESRGAN"].enhance(
np.array(init_mask, dtype=np.uint8)
)
init_mask = Image.fromarray(output)
init_mask = init_mask.convert("L")
image = Image.composite(init_img, image, init_mask)
if save_individual_images:
save_sample(
image,
sample_path_i,
filename,
jpg_sample,
prompts,
seeds,
width,
height,
steps,
cfg_scale,
normalize_prompt_weights,
use_GFPGAN,
write_info_files,
prompt_matrix,
init_img,
uses_loopback,
uses_random_seed_loopback,
save_grid,
sort_samples,
sampler_name,
ddim_eta,
n_iter,
batch_size,
i,
denoising_strength,
resize_mode,
save_individual_images,
server_state["loaded_model"],
)
# if add_original_image or not simple_templating:
# output_images.append(image)
# if simple_templating:
# grid_captions.append( captions[i] )
if "defaults" in st.session_state:
if st.session_state["defaults"].general.optimized:
mem = torch.cuda.memory_allocated() / 1e6
server_state["modelFS"].to("cpu")
while torch.cuda.memory_allocated() / 1e6 >= mem:
time.sleep(1)
if len(run_images) > 1:
preview_image = image_grid(run_images, n_iter)
else:
preview_image = run_images[0]
# Constrain the final preview image to 1440x900 so we're not sending huge amounts of data
# to the browser
preview_image = constrain_image(preview_image, 1440, 900)
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text("Finished!")
if "preview_image" in st.session_state:
st.session_state["preview_image"].image(preview_image)
if prompt_matrix or save_grid:
if prompt_matrix:
if simple_templating:
grid = image_grid(
output_images,
n_iter,
force_n_rows=frows,
captions=grid_captions,
)
else:
grid = image_grid(
output_images,
n_iter,
force_n_rows=1 << ((len(prompt_matrix_parts) - 1) // 2),
)
try:
grid = draw_prompt_matrix(
grid, width, height, prompt_matrix_parts
)
except:
import traceback
logger.error(
"Error creating prompt_matrix text:", file=sys.stderr
)
logger.error(traceback.format_exc(), file=sys.stderr)
else:
grid = image_grid(output_images, batch_size)
if grid and (batch_size > 1 or n_iter > 1):
output_images.insert(0, grid)
grid_count = get_next_sequence_number(outpath, "grid-")
grid_file = f"grid-{grid_count:05}-{seed}_{slugify(prompts[i].replace(' ', '_')[:120-len(full_path)])}.{grid_ext}"
grid.save(
os.path.join(outpath, grid_file),
grid_format,
quality=grid_quality,
lossless=grid_lossless,
optimize=True,
)
time.time()
mem_max_used, mem_total = mem_mon.read_and_stop()
time_diff = time.time() - start_time
info = f"""
{prompt}
Steps: {steps}, Sampler: {sampler_name}, CFG scale: {cfg_scale}, Seed: {seed}{', Denoising strength: '+str(denoising_strength) if init_img is not None else ''}{', GFPGAN' if use_GFPGAN and server_state["GFPGAN"] is not None else ''}{', '+realesrgan_model_name if use_RealESRGAN and server_state["RealESRGAN"] is not None else ''}{', Prompt Matrix Mode.' if prompt_matrix else ''}""".strip()
stats = f"""
Took { round(time_diff, 2) }s total ({ round(time_diff/(len(all_prompts)),2) }s per image)
Peak memory usage: { -(mem_max_used // -1_048_576) } MiB / { -(mem_total // -1_048_576) } MiB / { round(mem_max_used/mem_total*100, 3) }%"""
for comment in comments:
info += "\n\n" + comment
# mem_mon.stop()
# del mem_mon
torch_gc()
return output_images, seed, info, stats |
Download all files from model_list[model_name] | def download_model(models, model_name):
"""Download all files from model_list[model_name]"""
for file in models[model_name]:
download_file(file["file_name"], file["file_path"], file["file_url"])
return |
TODO - docstring here
frames_or_frame_dir: (Union[str, Path, torch.Tensor]):
Either a directory of images, or a tensor of shape (T, C, H, W) in range [0, 255]. | def make_video_pyav(
frames_or_frame_dir: Union[str, Path, torch.Tensor],
audio_filepath: Union[str, Path] = None,
fps: int = 30,
audio_offset: int = 0,
audio_duration: int = 2,
sr: int = 22050,
output_filepath: Union[str, Path] = "output.mp4",
glob_pattern: str = "*.png",
):
"""
TODO - docstring here
frames_or_frame_dir: (Union[str, Path, torch.Tensor]):
Either a directory of images, or a tensor of shape (T, C, H, W) in range [0, 255].
"""
# Torchvision write_video doesn't support pathlib paths
output_filepath = str(output_filepath)
if isinstance(frames_or_frame_dir, (str, Path)):
frames = None
for img in sorted(Path(frames_or_frame_dir).glob(glob_pattern)):
frame = pil_to_tensor(Image.open(img)).unsqueeze(0)
frames = frame if frames is None else torch.cat([frames, frame])
else:
frames = frames_or_frame_dir
# TCHW -> THWC
frames = frames.permute(0, 2, 3, 1)
if audio_filepath:
# Read audio, convert to tensor
audio, sr = librosa.load(
audio_filepath,
sr=sr,
mono=True,
offset=audio_offset,
duration=audio_duration,
)
audio_tensor = torch.tensor(audio).unsqueeze(0)
write_video(
output_filepath,
frames,
fps=fps,
audio_array=audio_tensor,
audio_fps=sr,
audio_codec="aac",
options={"crf": "10", "pix_fmt": "yuv420p"},
)
else:
write_video(
output_filepath,
frames,
fps=fps,
options={"crf": "10", "pix_fmt": "yuv420p"},
)
return output_filepath |
prompt = ["blueberry spaghetti", "strawberry spaghetti"], # prompt to dream about
gpu:int = st.session_state['defaults'].general.gpu, # id of the gpu to run on
#name:str = 'test', # name of this project, for the output directory
#rootdir:str = st.session_state['defaults'].general.outdir,
num_steps:int = 200, # number of steps between each pair of sampled points
max_duration_in_seconds:int = 10000, # number of frames to write and then exit the script
num_inference_steps:int = 50, # more (e.g. 100, 200 etc) can create slightly better images
cfg_scale:float = 5.0, # can depend on the prompt. usually somewhere between 3-10 is good
do_loop = False,
use_lerp_for_text = False,
seed = None,
quality:int = 100, # for jpeg compression of the output images
eta:float = 0.0,
width:int = 256,
height:int = 256,
weights_path = "runwayml/stable-diffusion-v1-5",
scheduler="klms", # choices: default, ddim, klms
disable_tqdm = False,
beta_start = 0.0001,
beta_end = 0.00012,
beta_schedule = "scaled_linear" | def txt2vid(
# --------------------------------------
# args you probably want to change
prompts=["blueberry spaghetti", "strawberry spaghetti"], # prompt to dream about
gpu: int = st.session_state["defaults"].general.gpu, # id of the gpu to run on
# name:str = 'test', # name of this project, for the output directory
# rootdir:str = st.session_state['defaults'].general.outdir,
num_steps: int = 200, # number of steps between each pair of sampled points
max_duration_in_seconds: int = 30, # number of frames to write and then exit the script
num_inference_steps: int = 50, # more (e.g. 100, 200 etc) can create slightly better images
cfg_scale: float = 5.0, # can depend on the prompt. usually somewhere between 3-10 is good
save_video=True,
save_video_on_stop=False,
outdir="outputs",
do_loop=False,
use_lerp_for_text=False,
seeds=None,
quality: int = 100, # for jpeg compression of the output images
eta: float = 0.0,
width: int = 256,
height: int = 256,
weights_path="runwayml/stable-diffusion-v1-5",
scheduler="klms", # choices: default, ddim, klms
disable_tqdm=False,
# -----------------------------------------------
beta_start=0.0001,
beta_end=0.00012,
beta_schedule="scaled_linear",
starting_image=None,
# -----------------------------------------------
# from new version
image_file_ext: Optional[str] = ".png",
fps: Optional[int] = 30,
upsample: Optional[bool] = False,
batch_size: Optional[int] = 1,
resume: Optional[bool] = False,
audio_filepath: str = None,
audio_start_sec: Optional[Union[int, float]] = None,
margin: Optional[float] = 1.0,
smooth: Optional[float] = 0.0,
):
"""
prompt = ["blueberry spaghetti", "strawberry spaghetti"], # prompt to dream about
gpu:int = st.session_state['defaults'].general.gpu, # id of the gpu to run on
#name:str = 'test', # name of this project, for the output directory
#rootdir:str = st.session_state['defaults'].general.outdir,
num_steps:int = 200, # number of steps between each pair of sampled points
max_duration_in_seconds:int = 10000, # number of frames to write and then exit the script
num_inference_steps:int = 50, # more (e.g. 100, 200 etc) can create slightly better images
cfg_scale:float = 5.0, # can depend on the prompt. usually somewhere between 3-10 is good
do_loop = False,
use_lerp_for_text = False,
seed = None,
quality:int = 100, # for jpeg compression of the output images
eta:float = 0.0,
width:int = 256,
height:int = 256,
weights_path = "runwayml/stable-diffusion-v1-5",
scheduler="klms", # choices: default, ddim, klms
disable_tqdm = False,
beta_start = 0.0001,
beta_end = 0.00012,
beta_schedule = "scaled_linear"
"""
mem_mon = MemUsageMonitor("MemMon")
mem_mon.start()
seeds = seed_to_int(seeds)
# We add an extra frame because most
# of the time the first frame is just the noise.
# max_duration_in_seconds +=1
assert torch.cuda.is_available()
assert height % 8 == 0 and width % 8 == 0
torch.manual_seed(seeds)
torch_device = f"cuda:{gpu}"
if type(seeds) == list:
prompts = [prompts] * len(seeds)
else:
seeds = [seeds, random.randint(0, 2**32 - 1)]
if type(prompts) == list:
# init the output dir
sanitized_prompt = slugify(prompts[0])
else:
# init the output dir
sanitized_prompt = slugify(prompts)
full_path = os.path.join(
os.getcwd(),
st.session_state["defaults"].general.outdir,
"txt2vid",
"samples",
sanitized_prompt,
)
if len(full_path) > 220:
sanitized_prompt = sanitized_prompt[: 220 - len(full_path)]
full_path = os.path.join(
os.getcwd(),
st.session_state["defaults"].general.outdir,
"txt2vid",
"samples",
sanitized_prompt,
)
os.makedirs(full_path, exist_ok=True)
# Write prompt info to file in output dir so we can keep track of what we did
if st.session_state.write_info_files:
with open(
os.path.join(
full_path,
f"{slugify(str(seeds))}_config.json"
if len(prompts) > 1
else "prompts_config.json",
),
"w",
) as outfile:
outfile.write(
json.dumps(
dict(
prompts=prompts,
gpu=gpu,
num_steps=num_steps,
max_duration_in_seconds=max_duration_in_seconds,
num_inference_steps=num_inference_steps,
cfg_scale=cfg_scale,
do_loop=do_loop,
use_lerp_for_text=use_lerp_for_text,
seeds=seeds,
quality=quality,
eta=eta,
width=width,
height=height,
weights_path=weights_path,
scheduler=scheduler,
disable_tqdm=disable_tqdm,
beta_start=beta_start,
beta_end=beta_end,
beta_schedule=beta_schedule,
),
indent=2,
sort_keys=False,
)
)
# print(scheduler)
default_scheduler = PNDMScheduler(
beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
)
# ------------------------------------------------------------------------------
# Schedulers
ddim_scheduler = DDIMScheduler(
beta_start=beta_start,
beta_end=beta_end,
beta_schedule=beta_schedule,
clip_sample=False,
set_alpha_to_one=False,
)
klms_scheduler = LMSDiscreteScheduler(
beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
)
# flaxddims_scheduler = FlaxDDIMScheduler(
# beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
# )
# flaxddpms_scheduler = FlaxDDPMScheduler(
# beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
# )
# flaxpndms_scheduler = FlaxPNDMScheduler(
# beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
# )
ddpms_scheduler = DDPMScheduler(
beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
)
SCHEDULERS = dict(
default=default_scheduler,
ddim=ddim_scheduler,
klms=klms_scheduler,
ddpms=ddpms_scheduler,
# flaxddims=flaxddims_scheduler,
# flaxddpms=flaxddpms_scheduler,
# flaxpndms=flaxpndms_scheduler,
)
with no_rerun:
with st.session_state["progress_bar_text"].container():
with hc.HyLoader(
"Loading Models...", hc.Loaders.standard_loaders, index=[0]
):
load_diffusers_model(weights_path, torch_device)
if "pipe" not in server_state:
logger.error("wtf")
server_state["pipe"].scheduler = SCHEDULERS[scheduler]
server_state["pipe"].use_multiprocessing_for_evaluation = False
server_state["pipe"].use_multiprocessed_decoding = False
# if do_loop:
##Makes the last prompt loop back to first prompt
# prompts = [prompts, prompts]
# seeds = [seeds, seeds]
# first_seed, *seeds = seeds
# prompts.append(prompts)
# seeds.append(first_seed)
with torch.autocast("cuda"):
# get the conditional text embeddings based on the prompt
text_input = server_state["pipe"].tokenizer(
prompts,
padding="max_length",
max_length=server_state["pipe"].tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
cond_embeddings = server_state["pipe"].text_encoder(
text_input.input_ids.to(torch_device)
)[0]
#
if st.session_state.defaults.general.use_sd_concepts_library:
prompt_tokens = re.findall("<([a-zA-Z0-9-]+)>", str(prompts))
if prompt_tokens:
# compviz
# tokenizer = (st.session_state["model"] if not st.session_state['defaults'].general.optimized else st.session_state.modelCS).cond_stage_model.tokenizer
# text_encoder = (st.session_state["model"] if not st.session_state['defaults'].general.optimized else st.session_state.modelCS).cond_stage_model.transformer
# diffusers
tokenizer = st.session_state.pipe.tokenizer
text_encoder = st.session_state.pipe.text_encoder
ext = ("pt", "bin")
# print (prompt_tokens)
if len(prompt_tokens) > 1:
for token_name in prompt_tokens:
embedding_path = os.path.join(
st.session_state["defaults"].general.sd_concepts_library_folder,
token_name,
)
if os.path.exists(embedding_path):
for files in os.listdir(embedding_path):
if files.endswith(ext):
load_learned_embed_in_clip(
f"{os.path.join(embedding_path, files)}",
text_encoder,
tokenizer,
f"<{token_name}>",
)
else:
embedding_path = os.path.join(
st.session_state["defaults"].general.sd_concepts_library_folder,
prompt_tokens[0],
)
if os.path.exists(embedding_path):
for files in os.listdir(embedding_path):
if files.endswith(ext):
load_learned_embed_in_clip(
f"{os.path.join(embedding_path, files)}",
text_encoder,
tokenizer,
f"<{prompt_tokens[0]}>",
)
# sample a source
init1 = torch.randn(
(1, server_state["pipe"].unet.in_channels, height // 8, width // 8),
device=torch_device,
)
# iterate the loop
frames = []
frame_index = 0
st.session_state["total_frames_avg_duration"] = []
st.session_state["total_frames_avg_speed"] = []
try:
# code for the new StableDiffusionWalkPipeline implementation.
start = timeit.default_timer()
# preview image works but its not the right way to use this, this also do not work properly as it only makes one image and then exits.
# with torch.autocast("cuda"):
# StableDiffusionWalkPipeline.__call__(self=server_state["pipe"],
# prompt=prompts, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=cfg_scale,
# negative_prompt="", num_images_per_prompt=1, eta=0.0,
# callback=txt2vid_generation_callback, callback_steps=1,
# num_interpolation_steps=num_steps,
# fps=30,
# image_file_ext = ".png",
# output_dir=full_path, # Where images/videos will be saved
##name='animals_test', # Subdirectory of output_dir where images/videos will be saved
# upsample = False,
##do_loop=do_loop, # Change to True if you want last prompt to loop back to first prompt
# resume = False,
# audio_filepath = None,
# audio_start_sec = None,
# margin = 1.0,
# smooth = 0.0, )
# works correctly generating all frames but do not show the preview image
# we also do not have control over the generation and cant stop it until the end of it.
# with torch.autocast("cuda"):
# print (prompts)
# video_path = server_state["pipe"].walk(
# prompt=prompts,
# seeds=seeds,
# num_interpolation_steps=num_steps,
# height=height, # use multiples of 64 if > 512. Multiples of 8 if < 512.
# width=width, # use multiples of 64 if > 512. Multiples of 8 if < 512.
# batch_size=4,
# fps=30,
# image_file_ext = ".png",
# eta = 0.0,
# output_dir=full_path, # Where images/videos will be saved
##name='test', # Subdirectory of output_dir where images/videos will be saved
# guidance_scale=cfg_scale, # Higher adheres to prompt more, lower lets model take the wheel
# num_inference_steps=num_inference_steps, # Number of diffusion steps per image generated. 50 is good default
# upsample = False,
##do_loop=do_loop, # Change to True if you want last prompt to loop back to first prompt
# resume = False,
# audio_filepath = None,
# audio_start_sec = None,
# margin = 1.0,
# smooth = 0.0,
# callback=txt2vid_generation_callback, # our callback function will be called with the arguments callback(step, timestep, latents)
# callback_steps=1 # our callback function will be called once this many steps are processed in a single frame
# )
# old code
total_frames = st.session_state.max_duration_in_seconds * fps
while frame_index + 1 <= total_frames:
st.session_state["frame_duration"] = 0
st.session_state["frame_speed"] = 0
st.session_state["current_frame"] = frame_index
# print(f"Second: {second_count+1}/{max_duration_in_seconds}")
# sample the destination
init2 = torch.randn(
(1, server_state["pipe"].unet.in_channels, height // 8, width // 8),
device=torch_device,
)
for i, t in enumerate(np.linspace(0, 1, num_steps)):
start = timeit.default_timer()
logger.info(f"COUNT: {frame_index+1}/{total_frames}")
if use_lerp_for_text:
init = torch.lerp(init1, init2, float(t))
else:
init = slerp(gpu, float(t), init1, init2)
# init = slerp(gpu, float(t), init1, init2)
with autocast("cuda"):
image = diffuse(
server_state["pipe"],
cond_embeddings,
init,
num_inference_steps,
cfg_scale,
eta,
fps=fps,
)
if (
st.session_state["save_individual_images"]
and not st.session_state["use_GFPGAN"]
and not st.session_state["use_RealESRGAN"]
):
# im = Image.fromarray(image)
outpath = os.path.join(full_path, "frame%06d.png" % frame_index)
image.save(outpath, quality=quality)
# send the image to the UI to update it
# st.session_state["preview_image"].image(im)
# append the frames to the frames list so we can use them later.
frames.append(np.asarray(image))
#
# try:
# if st.session_state["use_GFPGAN"] and server_state["GFPGAN"] is not None and not st.session_state["use_RealESRGAN"]:
if (
st.session_state["use_GFPGAN"]
and server_state["GFPGAN"] is not None
):
# print("Running GFPGAN on image ...")
if "progress_bar_text" in st.session_state:
st.session_state["progress_bar_text"].text(
"Running GFPGAN on image ..."
)
# skip_save = True # #287 >_>
torch_gc()
cropped_faces, restored_faces, restored_img = server_state[
"GFPGAN"
].enhance(
np.array(image)[:, :, ::-1],
has_aligned=False,
only_center_face=False,
paste_back=True,
)
gfpgan_sample = restored_img[:, :, ::-1]
gfpgan_image = Image.fromarray(gfpgan_sample)
outpath = os.path.join(full_path, "frame%06d.png" % frame_index)
gfpgan_image.save(outpath, quality=quality)
# append the frames to the frames list so we can use them later.
frames.append(np.asarray(gfpgan_image))
try:
st.session_state["preview_image"].image(gfpgan_image)
except KeyError:
logger.error("Cant get session_state, skipping image preview.")
# except (AttributeError, KeyError):
# print("Cant perform GFPGAN, skipping.")
# increase frame_index counter.
frame_index += 1
st.session_state["current_frame"] = frame_index
duration = timeit.default_timer() - start
if duration >= 1:
speed = "s/it"
else:
speed = "it/s"
duration = 1 / duration
st.session_state["frame_duration"] = duration
st.session_state["frame_speed"] = speed
if frame_index + 1 > total_frames:
break
init1 = init2
# save the video after the generation is done.
video_path = save_video_to_disk(
frames, seeds, sanitized_prompt, save_video=save_video, outdir=outdir
)
except StopException:
# reset the page title so the percent doesnt stay on it confusing the user.
set_page_title("Stable Diffusion Playground")
if save_video_on_stop:
logger.info("Streamlit Stop Exception Received. Saving video")
video_path = save_video_to_disk(
frames, seeds, sanitized_prompt, save_video=save_video, outdir=outdir
)
else:
video_path = None
# if video_path and "preview_video" in st.session_state:
## show video preview on the UI
# st.session_state["preview_video"].video(open(video_path, 'rb').read())
mem_max_used, mem_total = mem_mon.read_and_stop()
time_diff = time.time() - start
info = f"""
{prompts}
Sampling Steps: {num_steps}, Sampler: {scheduler}, CFG scale: {cfg_scale}, Seed: {seeds}, Max Duration In Seconds: {max_duration_in_seconds}""".strip()
stats = f"""
Took { round(time_diff, 2) }s total ({ round(time_diff/(max_duration_in_seconds),2) }s per image)
Peak memory usage: { -(mem_max_used // -1_048_576) } MiB / { -(mem_total // -1_048_576) } MiB / { round(mem_max_used/mem_total*100, 3) }%"""
return video_path, seeds, info, stats |
Layout functions to define all the streamlit layout here. | def layout():
"""Layout functions to define all the streamlit layout here."""
if not st.session_state["defaults"].debug.enable_hydralit:
st.set_page_config(
page_title="Stable Diffusion Playground",
layout="wide",
initial_sidebar_state="collapsed",
)
# app = st.HydraApp(title='Stable Diffusion WebUI', favicon="", sidebar_state="expanded", layout="wide",
# hide_streamlit_markers=False, allow_url_nav=True , clear_cross_app_sessions=False)
# load css as an external file, function has an option to local or remote url. Potential use when running from cloud infra that might not have access to local path.
load_css(True, "frontend/css/streamlit.main.css")
#
# specify the primary menu definition
menu_data = [
{
"id": "Stable Diffusion",
"label": "Stable Diffusion",
"icon": "bi bi-grid-1x2-fill",
},
{
"id": "Train",
"label": "Train",
"icon": "bi bi-lightbulb-fill",
"submenu": [
{
"id": "Textual Inversion",
"label": "Textual Inversion",
"icon": "bi bi-lightbulb-fill",
},
{
"id": "Fine Tunning",
"label": "Fine Tunning",
"icon": "bi bi-lightbulb-fill",
},
],
},
{
"id": "Model Manager",
"label": "Model Manager",
"icon": "bi bi-cloud-arrow-down-fill",
},
{
"id": "Tools",
"label": "Tools",
"icon": "bi bi-tools",
"submenu": [
{"id": "API Server", "label": "API Server", "icon": "bi bi-server"},
{
"id": "Barfi/BaklavaJS",
"label": "Barfi/BaklavaJS",
"icon": "bi bi-diagram-3-fill",
},
# {'id': 'API Server', 'label': 'API Server', 'icon': 'bi bi-server'},
],
},
{"id": "Settings", "label": "Settings", "icon": "bi bi-gear-fill"},
]
over_theme = {"txc_inactive": "#FFFFFF", "menu_background": "#000000"}
menu_id = hc.nav_bar(
menu_definition=menu_data,
# home_name='Home',
# login_name='Logout',
hide_streamlit_markers=False,
override_theme=over_theme,
sticky_nav=True,
sticky_mode="pinned",
)
#
# if menu_id == "Home":
# st.info("Under Construction. :construction_worker:")
if menu_id == "Stable Diffusion":
# set the page url and title
# st.experimental_set_query_params(page='stable-diffusion')
try:
set_page_title("Stable Diffusion Playground")
except NameError:
st.experimental_rerun()
(
txt2img_tab,
img2img_tab,
txt2vid_tab,
img2txt_tab,
post_processing_tab,
concept_library_tab,
) = st.tabs(
[
"Text-to-Image",
"Image-to-Image",
# "Inpainting",
"Text-to-Video",
"Image-To-Text",
"Post-Processing",
"Concept Library",
]
)
# with home_tab:
# from home import layout
# layout()
with txt2img_tab:
from txt2img import layout
layout()
with img2img_tab:
from img2img import layout
layout()
# with inpainting_tab:
# from inpainting import layout
# layout()
with txt2vid_tab:
from txt2vid import layout
layout()
with img2txt_tab:
from img2txt import layout
layout()
with post_processing_tab:
from post_processing import layout
layout()
with concept_library_tab:
from sd_concept_library import layout
layout()
#
elif menu_id == "Model Manager":
set_page_title("Model Manager - Stable Diffusion Playground")
from ModelManager import layout
layout()
elif menu_id == "Textual Inversion":
from textual_inversion import layout
layout()
elif menu_id == "Fine Tunning":
# from textual_inversion import layout
# layout()
st.info("Under Construction. :construction_worker:")
elif menu_id == "API Server":
set_page_title("API Server - Stable Diffusion Playground")
from APIServer import layout
layout()
elif menu_id == "Barfi/BaklavaJS":
set_page_title("Barfi/BaklavaJS - Stable Diffusion Playground")
from barfi_baklavajs import layout
layout()
elif menu_id == "Settings":
set_page_title("Settings - Stable Diffusion Playground")
from Settings import layout
layout()
# calling dragable input component module at the end, so it works on all pages
draggable_number_input.load() |
Simple function to allows us to change the title dynamically.
Normally you can use `st.set_page_config` to change the title but it can only be used once per app. | def set_page_title(title):
"""
Simple function to allows us to change the title dynamically.
Normally you can use `st.set_page_config` to change the title but it can only be used once per app.
"""
st.sidebar.markdown(
unsafe_allow_html=True,
body=f"""
<iframe height=0 srcdoc="<script>
const title = window.parent.document.querySelector('title') \
const oldObserver = window.parent.titleObserver
if (oldObserver) {{
oldObserver.disconnect()
}} \
const newObserver = new MutationObserver(function(mutations) {{
const target = mutations[0].target
if (target.text !== '{title}') {{
target.text = '{title}'
}}
}}) \
newObserver.observe(title, {{ childList: true }})
window.parent.titleObserver = newObserver \
title.text = '{title}'
</script>" />
""",
) |
Check the contents of the built wheel for any `.so` files that are *not*
abi3 compatible. | def check_is_abi3_compatible(wheel_file: str) -> None:
"""Check the contents of the built wheel for any `.so` files that are *not*
abi3 compatible.
"""
with ZipFile(wheel_file, "r") as wheel:
for file in wheel.namelist():
if not file.endswith(".so"):
continue
if not file.endswith(".abi3.so"):
raise Exception(f"Found non-abi3 lib: {file}") |
Replaces the cpython wheel file with a ABI3 compatible wheel | def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
"""Replaces the cpython wheel file with a ABI3 compatible wheel"""
if tag.abi == "abi3":
# Nothing to do.
return wheel_file
check_is_abi3_compatible(wheel_file)
# HACK: it seems that some older versions of pip will consider a wheel marked
# as macosx_11_0 as incompatible with Big Sur. I haven't done the full archaeology
# here; there are some clues in
# https://github.com/pantsbuild/pants/pull/12857
# https://github.com/pypa/pip/issues/9138
# https://github.com/pypa/packaging/pull/319
# Empirically this seems to work, note that macOS 11 and 10.16 are the same,
# both versions are valid for backwards compatibility.
platform = tag.platform.replace("macosx_11_0", "macosx_10_16")
abi3_tag = Tag(tag.interpreter, "abi3", platform)
dirname = os.path.dirname(wheel_file)
new_wheel_file = os.path.join(
dirname,
f"{name}-{version}-{abi3_tag}.whl",
)
os.rename(wheel_file, new_wheel_file)
print("Renamed wheel to", new_wheel_file)
return new_wheel_file |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.