python_code
stringlengths 0
869k
|
---|
from setuptools import setup
setup(
name="consistency-models",
py_modules=["cm", "evaluations"],
install_requires=[
"blobfile>=1.0.5",
"torch",
"tqdm",
"numpy",
"scipy",
"pandas",
"Cython",
"piq==0.7.0",
"joblib==0.14.0",
"albumentations==0.4.3",
"lmdb",
"clip @ git+https://github.com/openai/CLIP.git",
"mpi4py",
"flash-attn==0.2.8",
"pillow",
],
)
|
from .inception_v3 import InceptionV3
import blobfile as bf
import torch
import torch.distributed as dist
import torch.nn as nn
from cm import dist_util
import numpy as np
import warnings
from scipy import linalg
from PIL import Image
from tqdm import tqdm
def clip_preproc(preproc_fn, x):
return preproc_fn(Image.fromarray(x.astype(np.uint8)))
def all_gather(x, dim=0):
xs = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(xs, x)
return torch.cat(xs, dim=dim)
class FIDStatistics:
def __init__(self, mu: np.ndarray, sigma: np.ndarray, resolution: int):
self.mu = mu
self.sigma = sigma
self.resolution = resolution
def frechet_distance(self, other, eps=1e-6):
"""
Compute the Frechet distance between two sets of statistics.
"""
mu1, sigma1 = self.mu, self.sigma
mu2, sigma2 = other.mu, other.sigma
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert (
mu1.shape == mu2.shape
), f"Training and test mean vectors have different lengths: {mu1.shape}, {mu2.shape}"
assert (
sigma1.shape == sigma2.shape
), f"Training and test covariances have different dimensions: {sigma1.shape}, {sigma2.shape}"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = (
"fid calculation produces singular product; adding %s to diagonal of cov estimates"
% eps
)
warnings.warn(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
class FIDAndIS:
def __init__(
self,
softmax_batch_size=512,
clip_score_batch_size=512,
path="https://openaipublic.blob.core.windows.net/consistency/inception/inception-2015-12-05.pt",
):
import clip
super().__init__()
self.softmax_batch_size = softmax_batch_size
self.clip_score_batch_size = clip_score_batch_size
self.inception = InceptionV3()
with bf.BlobFile(path, "rb") as f:
self.inception.load_state_dict(torch.load(f))
self.inception.eval()
self.inception.to(dist_util.dev())
self.inception_softmax = self.inception.create_softmax_model()
if dist.get_rank() % 8 == 0:
clip_model, self.clip_preproc_fn = clip.load(
"ViT-B/32", device=dist_util.dev()
)
dist.barrier()
if dist.get_rank() % 8 != 0:
clip_model, self.clip_preproc_fn = clip.load(
"ViT-B/32", device=dist_util.dev()
)
dist.barrier()
# Compute the probe features separately from the final projection.
class ProjLayer(nn.Module):
def __init__(self, param):
super().__init__()
self.param = param
def forward(self, x):
return x @ self.param
self.clip_visual = clip_model.visual
self.clip_proj = ProjLayer(self.clip_visual.proj)
self.clip_visual.proj = None
class TextModel(nn.Module):
def __init__(self, clip_model):
super().__init__()
self.clip_model = clip_model
def forward(self, x):
return self.clip_model.encode_text(x)
self.clip_tokenizer = lambda captions: clip.tokenize(captions, truncate=True)
self.clip_text = TextModel(clip_model)
self.clip_logit_scale = clip_model.logit_scale.exp().item()
self.ref_features = {}
self.is_root = not dist.is_initialized() or dist.get_rank() == 0
def get_statistics(self, activations: np.ndarray, resolution: int):
"""
Compute activation statistics for a batch of images.
:param activations: an [N x D] batch of activations.
:return: an FIDStatistics object.
"""
mu = np.mean(activations, axis=0)
sigma = np.cov(activations, rowvar=False)
return FIDStatistics(mu, sigma, resolution)
def get_preds(self, batch, captions=None):
with torch.no_grad():
batch = 127.5 * (batch + 1)
np_batch = batch.to(torch.uint8).cpu().numpy().transpose((0, 2, 3, 1))
pred, spatial_pred = self.inception(batch)
pred, spatial_pred = pred.reshape(
[pred.shape[0], -1]
), spatial_pred.reshape([spatial_pred.shape[0], -1])
clip_in = torch.stack(
[clip_preproc(self.clip_preproc_fn, img) for img in np_batch]
)
clip_pred = self.clip_visual(clip_in.half().to(dist_util.dev()))
if captions is not None:
text_in = self.clip_tokenizer(captions)
text_pred = self.clip_text(text_in.to(dist_util.dev()))
else:
# Hack to easily deal with no captions
text_pred = self.clip_proj(clip_pred.half())
text_pred = text_pred / text_pred.norm(dim=-1, keepdim=True)
return pred, spatial_pred, clip_pred, text_pred, np_batch
def get_inception_score(
self, activations: np.ndarray, split_size: int = 5000
) -> float:
"""
Compute the inception score using a batch of activations.
:param activations: an [N x D] batch of activations.
:param split_size: the number of samples per split. This is used to
make results consistent with other work, even when
using a different number of samples.
:return: an inception score estimate.
"""
softmax_out = []
for i in range(0, len(activations), self.softmax_batch_size):
acts = activations[i : i + self.softmax_batch_size]
with torch.no_grad():
softmax_out.append(
self.inception_softmax(torch.from_numpy(acts).to(dist_util.dev()))
.cpu()
.numpy()
)
preds = np.concatenate(softmax_out, axis=0)
# https://github.com/openai/improved-gan/blob/4f5d1ec5c16a7eceb206f42bfc652693601e1d5c/inception_score/model.py#L46
scores = []
for i in range(0, len(preds), split_size):
part = preds[i : i + split_size]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return float(np.mean(scores))
def get_clip_score(
self, activations: np.ndarray, text_features: np.ndarray
) -> float:
# Sizes should never mismatch, but if they do we want to compute
# _some_ value instead of crash looping.
size = min(len(activations), len(text_features))
activations = activations[:size]
text_features = text_features[:size]
scores_out = []
for i in range(0, len(activations), self.clip_score_batch_size):
acts = activations[i : i + self.clip_score_batch_size]
sub_features = text_features[i : i + self.clip_score_batch_size]
with torch.no_grad():
image_features = self.clip_proj(
torch.from_numpy(acts).half().to(dist_util.dev())
)
image_features = image_features / image_features.norm(
dim=-1, keepdim=True
)
image_features = image_features.detach().cpu().float().numpy()
scores_out.extend(np.sum(sub_features * image_features, axis=-1).tolist())
return np.mean(scores_out) * self.clip_logit_scale
def get_activations(self, data, num_samples, global_batch_size, pr_samples=50000):
if self.is_root:
preds = []
spatial_preds = []
clip_preds = []
pr_images = []
for _ in tqdm(range(0, int(np.ceil(num_samples / global_batch_size)))):
batch, cond, _ = next(data)
batch, cond = batch.to(dist_util.dev()), {
k: v.to(dist_util.dev()) for k, v in cond.items()
}
pred, spatial_pred, clip_pred, _, np_batch = self.get_preds(batch)
pred, spatial_pred, clip_pred = (
all_gather(pred).cpu().numpy(),
all_gather(spatial_pred).cpu().numpy(),
all_gather(clip_pred).cpu().numpy(),
)
if self.is_root:
preds.append(pred)
spatial_preds.append(spatial_pred)
clip_preds.append(clip_pred)
if len(pr_images) * np_batch.shape[0] < pr_samples:
pr_images.append(np_batch)
if self.is_root:
preds, spatial_preds, clip_preds, pr_images = (
np.concatenate(preds, axis=0),
np.concatenate(spatial_preds, axis=0),
np.concatenate(clip_preds, axis=0),
np.concatenate(pr_images, axis=0),
)
# assert len(pr_images) >= pr_samples
return (
preds[:num_samples],
spatial_preds[:num_samples],
clip_preds[:num_samples],
pr_images[:pr_samples],
)
else:
return [], [], [], []
def get_virtual_batch(self, data, num_samples, global_batch_size, resolution):
preds, spatial_preds, clip_preds, batch = self.get_activations(
data, num_samples, global_batch_size, pr_samples=10000
)
if self.is_root:
fid_stats = self.get_statistics(preds, resolution)
spatial_stats = self.get_statistics(spatial_preds, resolution)
clip_stats = self.get_statistics(clip_preds, resolution)
return batch, dict(
mu=fid_stats.mu,
sigma=fid_stats.sigma,
mu_s=spatial_stats.mu,
sigma_s=spatial_stats.sigma,
mu_clip=clip_stats.mu,
sigma_clip=clip_stats.sigma,
)
else:
return None, dict()
def set_ref_batch(self, ref_batch):
with bf.BlobFile(ref_batch, "rb") as f:
data = np.load(f)
fid_stats = FIDStatistics(mu=data["mu"], sigma=data["sigma"], resolution=-1)
spatial_stats = FIDStatistics(
mu=data["mu_s"], sigma=data["sigma_s"], resolution=-1
)
clip_stats = FIDStatistics(
mu=data["mu_clip"], sigma=data["sigma_clip"], resolution=-1
)
self.ref_features[ref_batch] = (fid_stats, spatial_stats, clip_stats)
def get_ref_batch(self, ref_batch):
return self.ref_features[ref_batch]
|
# Ported from the model here:
# https://github.com/NVlabs/stylegan3/blob/407db86e6fe432540a22515310188288687858fa/metrics/frechet_inception_distance.py#L22
#
# I have verified that the spatial features and output features are correct
# within a mean absolute error of ~3e-5.
import collections
import torch
class Conv2dLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kh, kw, stride=1, padding=0):
super().__init__()
self.stride = stride
self.padding = padding
self.weight = torch.nn.Parameter(torch.zeros(out_channels, in_channels, kh, kw))
self.beta = torch.nn.Parameter(torch.zeros(out_channels))
self.mean = torch.nn.Parameter(torch.zeros(out_channels))
self.var = torch.nn.Parameter(torch.zeros(out_channels))
def forward(self, x):
x = torch.nn.functional.conv2d(
x, self.weight.to(x.dtype), stride=self.stride, padding=self.padding
)
x = torch.nn.functional.batch_norm(
x, running_mean=self.mean, running_var=self.var, bias=self.beta, eps=1e-3
)
x = torch.nn.functional.relu(x)
return x
# ----------------------------------------------------------------------------
class InceptionA(torch.nn.Module):
def __init__(self, in_channels, tmp_channels):
super().__init__()
self.conv = Conv2dLayer(in_channels, 64, kh=1, kw=1)
self.tower = torch.nn.Sequential(
collections.OrderedDict(
[
("conv", Conv2dLayer(in_channels, 48, kh=1, kw=1)),
("conv_1", Conv2dLayer(48, 64, kh=5, kw=5, padding=2)),
]
)
)
self.tower_1 = torch.nn.Sequential(
collections.OrderedDict(
[
("conv", Conv2dLayer(in_channels, 64, kh=1, kw=1)),
("conv_1", Conv2dLayer(64, 96, kh=3, kw=3, padding=1)),
("conv_2", Conv2dLayer(96, 96, kh=3, kw=3, padding=1)),
]
)
)
self.tower_2 = torch.nn.Sequential(
collections.OrderedDict(
[
(
"pool",
torch.nn.AvgPool2d(
kernel_size=3, stride=1, padding=1, count_include_pad=False
),
),
("conv", Conv2dLayer(in_channels, tmp_channels, kh=1, kw=1)),
]
)
)
def forward(self, x):
return torch.cat(
[
self.conv(x).contiguous(),
self.tower(x).contiguous(),
self.tower_1(x).contiguous(),
self.tower_2(x).contiguous(),
],
dim=1,
)
# ----------------------------------------------------------------------------
class InceptionB(torch.nn.Module):
def __init__(self, in_channels):
super().__init__()
self.conv = Conv2dLayer(in_channels, 384, kh=3, kw=3, stride=2)
self.tower = torch.nn.Sequential(
collections.OrderedDict(
[
("conv", Conv2dLayer(in_channels, 64, kh=1, kw=1)),
("conv_1", Conv2dLayer(64, 96, kh=3, kw=3, padding=1)),
("conv_2", Conv2dLayer(96, 96, kh=3, kw=3, stride=2)),
]
)
)
self.pool = torch.nn.MaxPool2d(kernel_size=3, stride=2)
def forward(self, x):
return torch.cat(
[
self.conv(x).contiguous(),
self.tower(x).contiguous(),
self.pool(x).contiguous(),
],
dim=1,
)
# ----------------------------------------------------------------------------
class InceptionC(torch.nn.Module):
def __init__(self, in_channels, tmp_channels):
super().__init__()
self.conv = Conv2dLayer(in_channels, 192, kh=1, kw=1)
self.tower = torch.nn.Sequential(
collections.OrderedDict(
[
("conv", Conv2dLayer(in_channels, tmp_channels, kh=1, kw=1)),
(
"conv_1",
Conv2dLayer(
tmp_channels, tmp_channels, kh=1, kw=7, padding=[0, 3]
),
),
(
"conv_2",
Conv2dLayer(tmp_channels, 192, kh=7, kw=1, padding=[3, 0]),
),
]
)
)
self.tower_1 = torch.nn.Sequential(
collections.OrderedDict(
[
("conv", Conv2dLayer(in_channels, tmp_channels, kh=1, kw=1)),
(
"conv_1",
Conv2dLayer(
tmp_channels, tmp_channels, kh=7, kw=1, padding=[3, 0]
),
),
(
"conv_2",
Conv2dLayer(
tmp_channels, tmp_channels, kh=1, kw=7, padding=[0, 3]
),
),
(
"conv_3",
Conv2dLayer(
tmp_channels, tmp_channels, kh=7, kw=1, padding=[3, 0]
),
),
(
"conv_4",
Conv2dLayer(tmp_channels, 192, kh=1, kw=7, padding=[0, 3]),
),
]
)
)
self.tower_2 = torch.nn.Sequential(
collections.OrderedDict(
[
(
"pool",
torch.nn.AvgPool2d(
kernel_size=3, stride=1, padding=1, count_include_pad=False
),
),
("conv", Conv2dLayer(in_channels, 192, kh=1, kw=1)),
]
)
)
def forward(self, x):
return torch.cat(
[
self.conv(x).contiguous(),
self.tower(x).contiguous(),
self.tower_1(x).contiguous(),
self.tower_2(x).contiguous(),
],
dim=1,
)
# ----------------------------------------------------------------------------
class InceptionD(torch.nn.Module):
def __init__(self, in_channels):
super().__init__()
self.tower = torch.nn.Sequential(
collections.OrderedDict(
[
("conv", Conv2dLayer(in_channels, 192, kh=1, kw=1)),
("conv_1", Conv2dLayer(192, 320, kh=3, kw=3, stride=2)),
]
)
)
self.tower_1 = torch.nn.Sequential(
collections.OrderedDict(
[
("conv", Conv2dLayer(in_channels, 192, kh=1, kw=1)),
("conv_1", Conv2dLayer(192, 192, kh=1, kw=7, padding=[0, 3])),
("conv_2", Conv2dLayer(192, 192, kh=7, kw=1, padding=[3, 0])),
("conv_3", Conv2dLayer(192, 192, kh=3, kw=3, stride=2)),
]
)
)
self.pool = torch.nn.MaxPool2d(kernel_size=3, stride=2)
def forward(self, x):
return torch.cat(
[
self.tower(x).contiguous(),
self.tower_1(x).contiguous(),
self.pool(x).contiguous(),
],
dim=1,
)
# ----------------------------------------------------------------------------
class InceptionE(torch.nn.Module):
def __init__(self, in_channels, use_avg_pool):
super().__init__()
self.conv = Conv2dLayer(in_channels, 320, kh=1, kw=1)
self.tower_conv = Conv2dLayer(in_channels, 384, kh=1, kw=1)
self.tower_mixed_conv = Conv2dLayer(384, 384, kh=1, kw=3, padding=[0, 1])
self.tower_mixed_conv_1 = Conv2dLayer(384, 384, kh=3, kw=1, padding=[1, 0])
self.tower_1_conv = Conv2dLayer(in_channels, 448, kh=1, kw=1)
self.tower_1_conv_1 = Conv2dLayer(448, 384, kh=3, kw=3, padding=1)
self.tower_1_mixed_conv = Conv2dLayer(384, 384, kh=1, kw=3, padding=[0, 1])
self.tower_1_mixed_conv_1 = Conv2dLayer(384, 384, kh=3, kw=1, padding=[1, 0])
if use_avg_pool:
self.tower_2_pool = torch.nn.AvgPool2d(
kernel_size=3, stride=1, padding=1, count_include_pad=False
)
else:
self.tower_2_pool = torch.nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.tower_2_conv = Conv2dLayer(in_channels, 192, kh=1, kw=1)
def forward(self, x):
a = self.tower_conv(x)
b = self.tower_1_conv_1(self.tower_1_conv(x))
return torch.cat(
[
self.conv(x).contiguous(),
self.tower_mixed_conv(a).contiguous(),
self.tower_mixed_conv_1(a).contiguous(),
self.tower_1_mixed_conv(b).contiguous(),
self.tower_1_mixed_conv_1(b).contiguous(),
self.tower_2_conv(self.tower_2_pool(x)).contiguous(),
],
dim=1,
)
# ----------------------------------------------------------------------------
class InceptionV3(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers = torch.nn.Sequential(
collections.OrderedDict(
[
("conv", Conv2dLayer(3, 32, kh=3, kw=3, stride=2)),
("conv_1", Conv2dLayer(32, 32, kh=3, kw=3)),
("conv_2", Conv2dLayer(32, 64, kh=3, kw=3, padding=1)),
("pool0", torch.nn.MaxPool2d(kernel_size=3, stride=2)),
("conv_3", Conv2dLayer(64, 80, kh=1, kw=1)),
("conv_4", Conv2dLayer(80, 192, kh=3, kw=3)),
("pool1", torch.nn.MaxPool2d(kernel_size=3, stride=2)),
("mixed", InceptionA(192, tmp_channels=32)),
("mixed_1", InceptionA(256, tmp_channels=64)),
("mixed_2", InceptionA(288, tmp_channels=64)),
("mixed_3", InceptionB(288)),
("mixed_4", InceptionC(768, tmp_channels=128)),
("mixed_5", InceptionC(768, tmp_channels=160)),
("mixed_6", InceptionC(768, tmp_channels=160)),
("mixed_7", InceptionC(768, tmp_channels=192)),
("mixed_8", InceptionD(768)),
("mixed_9", InceptionE(1280, use_avg_pool=True)),
("mixed_10", InceptionE(2048, use_avg_pool=False)),
("pool2", torch.nn.AvgPool2d(kernel_size=8)),
]
)
)
self.output = torch.nn.Linear(2048, 1008)
def forward(
self,
img,
return_features: bool = True,
use_fp16: bool = False,
no_output_bias: bool = False,
):
batch_size, channels, height, width = img.shape # [NCHW]
assert channels == 3
# Cast to float.
x = img.to(torch.float16 if use_fp16 else torch.float32)
# Emulate tf.image.resize_bilinear(x, [299, 299]), including the funky alignment.
new_width, new_height = 299, 299
theta = torch.eye(2, 3, device=x.device)
theta[0, 2] += theta[0, 0] / width - theta[0, 0] / new_width
theta[1, 2] += theta[1, 1] / height - theta[1, 1] / new_height
theta = theta.to(x.dtype).unsqueeze(0).repeat([batch_size, 1, 1])
grid = torch.nn.functional.affine_grid(
theta, [batch_size, channels, new_height, new_width], align_corners=False
)
x = torch.nn.functional.grid_sample(
x, grid, mode="bilinear", padding_mode="border", align_corners=False
)
# Scale dynamic range from [0,255] to [-1,1[.
x -= 128
x /= 128
# Main layers.
intermediate = self.layers[:-6](x)
spatial_features = (
self.layers[-6]
.conv(intermediate)[:, :7]
.permute(0, 2, 3, 1)
.reshape(-1, 2023)
)
features = self.layers[-6:](intermediate).reshape(-1, 2048).to(torch.float32)
if return_features:
return features, spatial_features
# Output layer.
return self.acts_to_probs(features, no_output_bias=no_output_bias)
def acts_to_probs(self, features, no_output_bias: bool = False):
if no_output_bias:
logits = torch.nn.functional.linear(features, self.output.weight)
else:
logits = self.output(features)
probs = torch.nn.functional.softmax(logits, dim=1)
return probs
def create_softmax_model(self):
return SoftmaxModel(self.output.weight)
class SoftmaxModel(torch.nn.Module):
def __init__(self, weight: torch.Tensor):
super().__init__()
self.weight = torch.nn.Parameter(weight.detach().clone())
def forward(self, x):
logits = torch.nn.functional.linear(x, self.weight)
probs = torch.nn.functional.softmax(logits, dim=1)
return probs
|
import argparse
import io
import os
import random
import warnings
import zipfile
from abc import ABC, abstractmethod
from contextlib import contextmanager
from functools import partial
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
from typing import Iterable, Optional, Tuple
import numpy as np
import requests
import tensorflow.compat.v1 as tf
from scipy import linalg
from tqdm.auto import tqdm
INCEPTION_V3_URL = "https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/classify_image_graph_def.pb"
INCEPTION_V3_PATH = "classify_image_graph_def.pb"
FID_POOL_NAME = "pool_3:0"
FID_SPATIAL_NAME = "mixed_6/conv:0"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("ref_batch", help="path to reference batch npz file")
parser.add_argument("sample_batch", help="path to sample batch npz file")
args = parser.parse_args()
config = tf.ConfigProto(
allow_soft_placement=True # allows DecodeJpeg to run on CPU in Inception graph
)
config.gpu_options.allow_growth = True
evaluator = Evaluator(tf.Session(config=config))
print("warming up TensorFlow...")
# This will cause TF to print a bunch of verbose stuff now rather
# than after the next print(), to help prevent confusion.
evaluator.warmup()
print("computing reference batch activations...")
ref_acts = evaluator.read_activations(args.ref_batch)
print("computing/reading reference batch statistics...")
ref_stats, ref_stats_spatial = evaluator.read_statistics(args.ref_batch, ref_acts)
print("computing sample batch activations...")
sample_acts = evaluator.read_activations(args.sample_batch)
print("computing/reading sample batch statistics...")
sample_stats, sample_stats_spatial = evaluator.read_statistics(
args.sample_batch, sample_acts
)
print("Computing evaluations...")
print("Inception Score:", evaluator.compute_inception_score(sample_acts[0]))
print("FID:", sample_stats.frechet_distance(ref_stats))
print("sFID:", sample_stats_spatial.frechet_distance(ref_stats_spatial))
prec, recall = evaluator.compute_prec_recall(ref_acts[0], sample_acts[0])
print("Precision:", prec)
print("Recall:", recall)
class InvalidFIDException(Exception):
pass
class FIDStatistics:
def __init__(self, mu: np.ndarray, sigma: np.ndarray):
self.mu = mu
self.sigma = sigma
def frechet_distance(self, other, eps=1e-6):
"""
Compute the Frechet distance between two sets of statistics.
"""
# https://github.com/bioinf-jku/TTUR/blob/73ab375cdf952a12686d9aa7978567771084da42/fid.py#L132
mu1, sigma1 = self.mu, self.sigma
mu2, sigma2 = other.mu, other.sigma
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert (
mu1.shape == mu2.shape
), f"Training and test mean vectors have different lengths: {mu1.shape}, {mu2.shape}"
assert (
sigma1.shape == sigma2.shape
), f"Training and test covariances have different dimensions: {sigma1.shape}, {sigma2.shape}"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = (
"fid calculation produces singular product; adding %s to diagonal of cov estimates"
% eps
)
warnings.warn(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
class Evaluator:
def __init__(
self,
session,
batch_size=64,
softmax_batch_size=512,
):
self.sess = session
self.batch_size = batch_size
self.softmax_batch_size = softmax_batch_size
self.manifold_estimator = ManifoldEstimator(session)
with self.sess.graph.as_default():
self.image_input = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.softmax_input = tf.placeholder(tf.float32, shape=[None, 2048])
self.pool_features, self.spatial_features = _create_feature_graph(
self.image_input
)
self.softmax = _create_softmax_graph(self.softmax_input)
def warmup(self):
self.compute_activations(np.zeros([1, 8, 64, 64, 3]))
def read_activations(self, npz_path: str) -> Tuple[np.ndarray, np.ndarray]:
with open_npz_array(npz_path, "arr_0") as reader:
return self.compute_activations(reader.read_batches(self.batch_size))
def compute_activations(
self, batches: Iterable[np.ndarray]
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute image features for downstream evals.
:param batches: a iterator over NHWC numpy arrays in [0, 255].
:return: a tuple of numpy arrays of shape [N x X], where X is a feature
dimension. The tuple is (pool_3, spatial).
"""
preds = []
spatial_preds = []
for batch in tqdm(batches):
batch = batch.astype(np.float32)
pred, spatial_pred = self.sess.run(
[self.pool_features, self.spatial_features], {self.image_input: batch}
)
preds.append(pred.reshape([pred.shape[0], -1]))
spatial_preds.append(spatial_pred.reshape([spatial_pred.shape[0], -1]))
return (
np.concatenate(preds, axis=0),
np.concatenate(spatial_preds, axis=0),
)
def read_statistics(
self, npz_path: str, activations: Tuple[np.ndarray, np.ndarray]
) -> Tuple[FIDStatistics, FIDStatistics]:
obj = np.load(npz_path)
if "mu" in list(obj.keys()):
return FIDStatistics(obj["mu"], obj["sigma"]), FIDStatistics(
obj["mu_s"], obj["sigma_s"]
)
return tuple(self.compute_statistics(x) for x in activations)
def compute_statistics(self, activations: np.ndarray) -> FIDStatistics:
mu = np.mean(activations, axis=0)
sigma = np.cov(activations, rowvar=False)
return FIDStatistics(mu, sigma)
def compute_inception_score(
self, activations: np.ndarray, split_size: int = 5000
) -> float:
softmax_out = []
for i in range(0, len(activations), self.softmax_batch_size):
acts = activations[i : i + self.softmax_batch_size]
softmax_out.append(
self.sess.run(self.softmax, feed_dict={self.softmax_input: acts})
)
preds = np.concatenate(softmax_out, axis=0)
# https://github.com/openai/improved-gan/blob/4f5d1ec5c16a7eceb206f42bfc652693601e1d5c/inception_score/model.py#L46
scores = []
for i in range(0, len(preds), split_size):
part = preds[i : i + split_size]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return float(np.mean(scores))
def compute_prec_recall(
self, activations_ref: np.ndarray, activations_sample: np.ndarray
) -> Tuple[float, float]:
radii_1 = self.manifold_estimator.manifold_radii(activations_ref)
radii_2 = self.manifold_estimator.manifold_radii(activations_sample)
pr = self.manifold_estimator.evaluate_pr(
activations_ref, radii_1, activations_sample, radii_2
)
return (float(pr[0][0]), float(pr[1][0]))
class ManifoldEstimator:
"""
A helper for comparing manifolds of feature vectors.
Adapted from https://github.com/kynkaat/improved-precision-and-recall-metric/blob/f60f25e5ad933a79135c783fcda53de30f42c9b9/precision_recall.py#L57
"""
def __init__(
self,
session,
row_batch_size=10000,
col_batch_size=10000,
nhood_sizes=(3,),
clamp_to_percentile=None,
eps=1e-5,
):
"""
Estimate the manifold of given feature vectors.
:param session: the TensorFlow session.
:param row_batch_size: row batch size to compute pairwise distances
(parameter to trade-off between memory usage and performance).
:param col_batch_size: column batch size to compute pairwise distances.
:param nhood_sizes: number of neighbors used to estimate the manifold.
:param clamp_to_percentile: prune hyperspheres that have radius larger than
the given percentile.
:param eps: small number for numerical stability.
"""
self.distance_block = DistanceBlock(session)
self.row_batch_size = row_batch_size
self.col_batch_size = col_batch_size
self.nhood_sizes = nhood_sizes
self.num_nhoods = len(nhood_sizes)
self.clamp_to_percentile = clamp_to_percentile
self.eps = eps
def warmup(self):
feats, radii = (
np.zeros([1, 2048], dtype=np.float32),
np.zeros([1, 1], dtype=np.float32),
)
self.evaluate_pr(feats, radii, feats, radii)
def manifold_radii(self, features: np.ndarray) -> np.ndarray:
num_images = len(features)
# Estimate manifold of features by calculating distances to k-NN of each sample.
radii = np.zeros([num_images, self.num_nhoods], dtype=np.float32)
distance_batch = np.zeros([self.row_batch_size, num_images], dtype=np.float32)
seq = np.arange(max(self.nhood_sizes) + 1, dtype=np.int32)
for begin1 in range(0, num_images, self.row_batch_size):
end1 = min(begin1 + self.row_batch_size, num_images)
row_batch = features[begin1:end1]
for begin2 in range(0, num_images, self.col_batch_size):
end2 = min(begin2 + self.col_batch_size, num_images)
col_batch = features[begin2:end2]
# Compute distances between batches.
distance_batch[
0 : end1 - begin1, begin2:end2
] = self.distance_block.pairwise_distances(row_batch, col_batch)
# Find the k-nearest neighbor from the current batch.
radii[begin1:end1, :] = np.concatenate(
[
x[:, self.nhood_sizes]
for x in _numpy_partition(
distance_batch[0 : end1 - begin1, :], seq, axis=1
)
],
axis=0,
)
if self.clamp_to_percentile is not None:
max_distances = np.percentile(radii, self.clamp_to_percentile, axis=0)
radii[radii > max_distances] = 0
return radii
def evaluate(
self, features: np.ndarray, radii: np.ndarray, eval_features: np.ndarray
):
"""
Evaluate if new feature vectors are at the manifold.
"""
num_eval_images = eval_features.shape[0]
num_ref_images = radii.shape[0]
distance_batch = np.zeros(
[self.row_batch_size, num_ref_images], dtype=np.float32
)
batch_predictions = np.zeros([num_eval_images, self.num_nhoods], dtype=np.int32)
max_realism_score = np.zeros([num_eval_images], dtype=np.float32)
nearest_indices = np.zeros([num_eval_images], dtype=np.int32)
for begin1 in range(0, num_eval_images, self.row_batch_size):
end1 = min(begin1 + self.row_batch_size, num_eval_images)
feature_batch = eval_features[begin1:end1]
for begin2 in range(0, num_ref_images, self.col_batch_size):
end2 = min(begin2 + self.col_batch_size, num_ref_images)
ref_batch = features[begin2:end2]
distance_batch[
0 : end1 - begin1, begin2:end2
] = self.distance_block.pairwise_distances(feature_batch, ref_batch)
# From the minibatch of new feature vectors, determine if they are in the estimated manifold.
# If a feature vector is inside a hypersphere of some reference sample, then
# the new sample lies at the estimated manifold.
# The radii of the hyperspheres are determined from distances of neighborhood size k.
samples_in_manifold = distance_batch[0 : end1 - begin1, :, None] <= radii
batch_predictions[begin1:end1] = np.any(samples_in_manifold, axis=1).astype(
np.int32
)
max_realism_score[begin1:end1] = np.max(
radii[:, 0] / (distance_batch[0 : end1 - begin1, :] + self.eps), axis=1
)
nearest_indices[begin1:end1] = np.argmin(
distance_batch[0 : end1 - begin1, :], axis=1
)
return {
"fraction": float(np.mean(batch_predictions)),
"batch_predictions": batch_predictions,
"max_realisim_score": max_realism_score,
"nearest_indices": nearest_indices,
}
def evaluate_pr(
self,
features_1: np.ndarray,
radii_1: np.ndarray,
features_2: np.ndarray,
radii_2: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Evaluate precision and recall efficiently.
:param features_1: [N1 x D] feature vectors for reference batch.
:param radii_1: [N1 x K1] radii for reference vectors.
:param features_2: [N2 x D] feature vectors for the other batch.
:param radii_2: [N x K2] radii for other vectors.
:return: a tuple of arrays for (precision, recall):
- precision: an np.ndarray of length K1
- recall: an np.ndarray of length K2
"""
features_1_status = np.zeros([len(features_1), radii_2.shape[1]], dtype=np.bool)
features_2_status = np.zeros([len(features_2), radii_1.shape[1]], dtype=np.bool)
for begin_1 in range(0, len(features_1), self.row_batch_size):
end_1 = begin_1 + self.row_batch_size
batch_1 = features_1[begin_1:end_1]
for begin_2 in range(0, len(features_2), self.col_batch_size):
end_2 = begin_2 + self.col_batch_size
batch_2 = features_2[begin_2:end_2]
batch_1_in, batch_2_in = self.distance_block.less_thans(
batch_1, radii_1[begin_1:end_1], batch_2, radii_2[begin_2:end_2]
)
features_1_status[begin_1:end_1] |= batch_1_in
features_2_status[begin_2:end_2] |= batch_2_in
return (
np.mean(features_2_status.astype(np.float64), axis=0),
np.mean(features_1_status.astype(np.float64), axis=0),
)
class DistanceBlock:
"""
Calculate pairwise distances between vectors.
Adapted from https://github.com/kynkaat/improved-precision-and-recall-metric/blob/f60f25e5ad933a79135c783fcda53de30f42c9b9/precision_recall.py#L34
"""
def __init__(self, session):
self.session = session
# Initialize TF graph to calculate pairwise distances.
with session.graph.as_default():
self._features_batch1 = tf.placeholder(tf.float32, shape=[None, None])
self._features_batch2 = tf.placeholder(tf.float32, shape=[None, None])
distance_block_16 = _batch_pairwise_distances(
tf.cast(self._features_batch1, tf.float16),
tf.cast(self._features_batch2, tf.float16),
)
self.distance_block = tf.cond(
tf.reduce_all(tf.math.is_finite(distance_block_16)),
lambda: tf.cast(distance_block_16, tf.float32),
lambda: _batch_pairwise_distances(
self._features_batch1, self._features_batch2
),
)
# Extra logic for less thans.
self._radii1 = tf.placeholder(tf.float32, shape=[None, None])
self._radii2 = tf.placeholder(tf.float32, shape=[None, None])
dist32 = tf.cast(self.distance_block, tf.float32)[..., None]
self._batch_1_in = tf.math.reduce_any(dist32 <= self._radii2, axis=1)
self._batch_2_in = tf.math.reduce_any(
dist32 <= self._radii1[:, None], axis=0
)
def pairwise_distances(self, U, V):
"""
Evaluate pairwise distances between two batches of feature vectors.
"""
return self.session.run(
self.distance_block,
feed_dict={self._features_batch1: U, self._features_batch2: V},
)
def less_thans(self, batch_1, radii_1, batch_2, radii_2):
return self.session.run(
[self._batch_1_in, self._batch_2_in],
feed_dict={
self._features_batch1: batch_1,
self._features_batch2: batch_2,
self._radii1: radii_1,
self._radii2: radii_2,
},
)
def _batch_pairwise_distances(U, V):
"""
Compute pairwise distances between two batches of feature vectors.
"""
with tf.variable_scope("pairwise_dist_block"):
# Squared norms of each row in U and V.
norm_u = tf.reduce_sum(tf.square(U), 1)
norm_v = tf.reduce_sum(tf.square(V), 1)
# norm_u as a column and norm_v as a row vectors.
norm_u = tf.reshape(norm_u, [-1, 1])
norm_v = tf.reshape(norm_v, [1, -1])
# Pairwise squared Euclidean distances.
D = tf.maximum(norm_u - 2 * tf.matmul(U, V, False, True) + norm_v, 0.0)
return D
class NpzArrayReader(ABC):
@abstractmethod
def read_batch(self, batch_size: int) -> Optional[np.ndarray]:
pass
@abstractmethod
def remaining(self) -> int:
pass
def read_batches(self, batch_size: int) -> Iterable[np.ndarray]:
def gen_fn():
while True:
batch = self.read_batch(batch_size)
if batch is None:
break
yield batch
rem = self.remaining()
num_batches = rem // batch_size + int(rem % batch_size != 0)
return BatchIterator(gen_fn, num_batches)
class BatchIterator:
def __init__(self, gen_fn, length):
self.gen_fn = gen_fn
self.length = length
def __len__(self):
return self.length
def __iter__(self):
return self.gen_fn()
class StreamingNpzArrayReader(NpzArrayReader):
def __init__(self, arr_f, shape, dtype):
self.arr_f = arr_f
self.shape = shape
self.dtype = dtype
self.idx = 0
def read_batch(self, batch_size: int) -> Optional[np.ndarray]:
if self.idx >= self.shape[0]:
return None
bs = min(batch_size, self.shape[0] - self.idx)
self.idx += bs
if self.dtype.itemsize == 0:
return np.ndarray([bs, *self.shape[1:]], dtype=self.dtype)
read_count = bs * np.prod(self.shape[1:])
read_size = int(read_count * self.dtype.itemsize)
data = _read_bytes(self.arr_f, read_size, "array data")
return np.frombuffer(data, dtype=self.dtype).reshape([bs, *self.shape[1:]])
def remaining(self) -> int:
return max(0, self.shape[0] - self.idx)
class MemoryNpzArrayReader(NpzArrayReader):
def __init__(self, arr):
self.arr = arr
self.idx = 0
@classmethod
def load(cls, path: str, arr_name: str):
with open(path, "rb") as f:
arr = np.load(f)[arr_name]
return cls(arr)
def read_batch(self, batch_size: int) -> Optional[np.ndarray]:
if self.idx >= self.arr.shape[0]:
return None
res = self.arr[self.idx : self.idx + batch_size]
self.idx += batch_size
return res
def remaining(self) -> int:
return max(0, self.arr.shape[0] - self.idx)
@contextmanager
def open_npz_array(path: str, arr_name: str) -> NpzArrayReader:
with _open_npy_file(path, arr_name) as arr_f:
version = np.lib.format.read_magic(arr_f)
if version == (1, 0):
header = np.lib.format.read_array_header_1_0(arr_f)
elif version == (2, 0):
header = np.lib.format.read_array_header_2_0(arr_f)
else:
yield MemoryNpzArrayReader.load(path, arr_name)
return
shape, fortran, dtype = header
if fortran or dtype.hasobject:
yield MemoryNpzArrayReader.load(path, arr_name)
else:
yield StreamingNpzArrayReader(arr_f, shape, dtype)
def _read_bytes(fp, size, error_template="ran out of data"):
"""
Copied from: https://github.com/numpy/numpy/blob/fb215c76967739268de71aa4bda55dd1b062bc2e/numpy/lib/format.py#L788-L886
Read from file-like object until size bytes are read.
Raises ValueError if not EOF is encountered before size bytes are read.
Non-blocking objects only supported if they derive from io objects.
Required as e.g. ZipExtFile in python 2.6 can return less data than
requested.
"""
data = bytes()
while True:
# io files (default in python3) return None or raise on
# would-block, python2 file will truncate, probably nothing can be
# done about that. note that regular files can't be non-blocking
try:
r = fp.read(size - len(data))
data += r
if len(r) == 0 or len(data) == size:
break
except io.BlockingIOError:
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
raise ValueError(msg % (error_template, size, len(data)))
else:
return data
@contextmanager
def _open_npy_file(path: str, arr_name: str):
with open(path, "rb") as f:
with zipfile.ZipFile(f, "r") as zip_f:
if f"{arr_name}.npy" not in zip_f.namelist():
raise ValueError(f"missing {arr_name} in npz file")
with zip_f.open(f"{arr_name}.npy", "r") as arr_f:
yield arr_f
def _download_inception_model():
if os.path.exists(INCEPTION_V3_PATH):
return
print("downloading InceptionV3 model...")
with requests.get(INCEPTION_V3_URL, stream=True) as r:
r.raise_for_status()
tmp_path = INCEPTION_V3_PATH + ".tmp"
with open(tmp_path, "wb") as f:
for chunk in tqdm(r.iter_content(chunk_size=8192)):
f.write(chunk)
os.rename(tmp_path, INCEPTION_V3_PATH)
def _create_feature_graph(input_batch):
_download_inception_model()
prefix = f"{random.randrange(2**32)}_{random.randrange(2**32)}"
with open(INCEPTION_V3_PATH, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
pool3, spatial = tf.import_graph_def(
graph_def,
input_map={f"ExpandDims:0": input_batch},
return_elements=[FID_POOL_NAME, FID_SPATIAL_NAME],
name=prefix,
)
_update_shapes(pool3)
spatial = spatial[..., :7]
return pool3, spatial
def _create_softmax_graph(input_batch):
_download_inception_model()
prefix = f"{random.randrange(2**32)}_{random.randrange(2**32)}"
with open(INCEPTION_V3_PATH, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
(matmul,) = tf.import_graph_def(
graph_def, return_elements=[f"softmax/logits/MatMul"], name=prefix
)
w = matmul.inputs[1]
logits = tf.matmul(input_batch, w)
return tf.nn.softmax(logits)
def _update_shapes(pool3):
# https://github.com/bioinf-jku/TTUR/blob/73ab375cdf952a12686d9aa7978567771084da42/fid.py#L50-L63
ops = pool3.graph.get_operations()
for op in ops:
for o in op.outputs:
shape = o.get_shape()
if shape._dims is not None: # pylint: disable=protected-access
# shape = [s.value for s in shape] TF 1.x
shape = [s for s in shape] # TF 2.x
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o.__dict__["_shape_val"] = tf.TensorShape(new_shape)
return pool3
def _numpy_partition(arr, kth, **kwargs):
num_workers = min(cpu_count(), len(arr))
chunk_size = len(arr) // num_workers
extra = len(arr) % num_workers
start_idx = 0
batches = []
for i in range(num_workers):
size = chunk_size + (1 if i < extra else 0)
batches.append(arr[start_idx : start_idx + size])
start_idx += size
with ThreadPool(num_workers) as pool:
return list(pool.map(partial(np.partition, kth=kth, **kwargs), batches))
if __name__ == "__main__":
main()
|
"""
Convert an LSUN lmdb database into a directory of images.
"""
import argparse
import io
import os
from PIL import Image
import lmdb
import numpy as np
def read_images(lmdb_path, image_size):
env = lmdb.open(lmdb_path, map_size=1099511627776, max_readers=100, readonly=True)
with env.begin(write=False) as transaction:
cursor = transaction.cursor()
for _, webp_data in cursor:
img = Image.open(io.BytesIO(webp_data))
width, height = img.size
scale = image_size / min(width, height)
img = img.resize(
(int(round(scale * width)), int(round(scale * height))),
resample=Image.BOX,
)
arr = np.array(img)
h, w, _ = arr.shape
h_off = (h - image_size) // 2
w_off = (w - image_size) // 2
arr = arr[h_off : h_off + image_size, w_off : w_off + image_size]
yield arr
def dump_images(out_dir, images, prefix):
if not os.path.exists(out_dir):
os.mkdir(out_dir)
for i, img in enumerate(images):
Image.fromarray(img).save(os.path.join(out_dir, f"{prefix}_{i:07d}.png"))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--image-size", help="new image size", type=int, default=256)
parser.add_argument("--prefix", help="class name", type=str, default="bedroom")
parser.add_argument("lmdb_path", help="path to an LSUN lmdb database")
parser.add_argument("out_dir", help="path to output directory")
args = parser.parse_args()
images = read_images(args.lmdb_path, args.image_size)
dump_images(args.out_dir, images, args.prefix)
if __name__ == "__main__":
main()
|
"""
Train a diffusion model on images.
"""
import argparse
from cm import dist_util, logger
from cm.image_datasets import load_data
from cm.resample import create_named_schedule_sampler
from cm.script_util import (
model_and_diffusion_defaults,
create_model_and_diffusion,
cm_train_defaults,
args_to_dict,
add_dict_to_argparser,
create_ema_and_scales_fn,
)
from cm.train_util import CMTrainLoop
import torch.distributed as dist
import copy
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model and diffusion...")
ema_scale_fn = create_ema_and_scales_fn(
target_ema_mode=args.target_ema_mode,
start_ema=args.start_ema,
scale_mode=args.scale_mode,
start_scales=args.start_scales,
end_scales=args.end_scales,
total_steps=args.total_training_steps,
distill_steps_per_iter=args.distill_steps_per_iter,
)
if args.training_mode == "progdist":
distillation = False
elif "consistency" in args.training_mode:
distillation = True
else:
raise ValueError(f"unknown training mode {args.training_mode}")
model_and_diffusion_kwargs = args_to_dict(
args, model_and_diffusion_defaults().keys()
)
model_and_diffusion_kwargs["distillation"] = distillation
model, diffusion = create_model_and_diffusion(**model_and_diffusion_kwargs)
model.to(dist_util.dev())
model.train()
if args.use_fp16:
model.convert_to_fp16()
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
logger.log("creating data loader...")
if args.batch_size == -1:
batch_size = args.global_batch_size // dist.get_world_size()
if args.global_batch_size % dist.get_world_size() != 0:
logger.log(
f"warning, using smaller global_batch_size of {dist.get_world_size()*batch_size} instead of {args.global_batch_size}"
)
else:
batch_size = args.batch_size
data = load_data(
data_dir=args.data_dir,
batch_size=batch_size,
image_size=args.image_size,
class_cond=args.class_cond,
)
if len(args.teacher_model_path) > 0: # path to the teacher score model.
logger.log(f"loading the teacher model from {args.teacher_model_path}")
teacher_model_and_diffusion_kwargs = copy.deepcopy(model_and_diffusion_kwargs)
teacher_model_and_diffusion_kwargs["dropout"] = args.teacher_dropout
teacher_model_and_diffusion_kwargs["distillation"] = False
teacher_model, teacher_diffusion = create_model_and_diffusion(
**teacher_model_and_diffusion_kwargs,
)
teacher_model.load_state_dict(
dist_util.load_state_dict(args.teacher_model_path, map_location="cpu"),
)
teacher_model.to(dist_util.dev())
teacher_model.eval()
for dst, src in zip(model.parameters(), teacher_model.parameters()):
dst.data.copy_(src.data)
if args.use_fp16:
teacher_model.convert_to_fp16()
else:
teacher_model = None
teacher_diffusion = None
# load the target model for distillation, if path specified.
logger.log("creating the target model")
target_model, _ = create_model_and_diffusion(
**model_and_diffusion_kwargs,
)
target_model.to(dist_util.dev())
target_model.train()
dist_util.sync_params(target_model.parameters())
dist_util.sync_params(target_model.buffers())
for dst, src in zip(target_model.parameters(), model.parameters()):
dst.data.copy_(src.data)
if args.use_fp16:
target_model.convert_to_fp16()
logger.log("training...")
CMTrainLoop(
model=model,
target_model=target_model,
teacher_model=teacher_model,
teacher_diffusion=teacher_diffusion,
training_mode=args.training_mode,
ema_scale_fn=ema_scale_fn,
total_training_steps=args.total_training_steps,
diffusion=diffusion,
data=data,
batch_size=batch_size,
microbatch=args.microbatch,
lr=args.lr,
ema_rate=args.ema_rate,
log_interval=args.log_interval,
save_interval=args.save_interval,
resume_checkpoint=args.resume_checkpoint,
use_fp16=args.use_fp16,
fp16_scale_growth=args.fp16_scale_growth,
schedule_sampler=schedule_sampler,
weight_decay=args.weight_decay,
lr_anneal_steps=args.lr_anneal_steps,
).run_loop()
def create_argparser():
defaults = dict(
data_dir="",
schedule_sampler="uniform",
lr=1e-4,
weight_decay=0.0,
lr_anneal_steps=0,
global_batch_size=2048,
batch_size=-1,
microbatch=-1, # -1 disables microbatches
ema_rate="0.9999", # comma-separated list of EMA values
log_interval=10,
save_interval=10000,
resume_checkpoint="",
use_fp16=False,
fp16_scale_growth=1e-3,
)
defaults.update(model_and_diffusion_defaults())
defaults.update(cm_train_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
|
"""
Generate a large batch of image samples from a model and save them as a large
numpy array. This can be used to produce samples for FID evaluation.
"""
import argparse
import os
import numpy as np
import torch as th
import torch.distributed as dist
from functools import cache
from mpi4py import MPI
from cm import dist_util, logger
from cm.script_util import (
NUM_CLASSES,
model_and_diffusion_defaults,
create_model_and_diffusion,
add_dict_to_argparser,
args_to_dict,
)
from cm.random_util import get_generator
from cm.karras_diffusion import stochastic_iterative_sampler
from evaluations.th_evaluator import FIDAndIS
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
if "consistency" in args.training_mode:
distillation = True
else:
distillation = False
logger.log("creating model and diffusion...")
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys()),
distillation=distillation,
)
model.load_state_dict(
dist_util.load_state_dict(args.model_path, map_location="cpu")
)
model.to(dist_util.dev())
if args.use_fp16:
model.convert_to_fp16()
model.eval()
fid_is = FIDAndIS()
fid_is.set_ref_batch(args.ref_batch)
(
ref_fid_stats,
ref_spatial_stats,
ref_clip_stats,
) = fid_is.get_ref_batch(args.ref_batch)
def sample_generator(ts):
logger.log("sampling...")
all_images = []
all_labels = []
all_preds = []
generator = get_generator(args.generator, args.num_samples, args.seed)
while len(all_images) * args.batch_size < args.num_samples:
model_kwargs = {}
if args.class_cond:
classes = th.randint(
low=0,
high=NUM_CLASSES,
size=(args.batch_size,),
device=dist_util.dev(),
)
model_kwargs["y"] = classes
def denoiser(x_t, sigma):
_, denoised = diffusion.denoise(model, x_t, sigma, **model_kwargs)
if args.clip_denoised:
denoised = denoised.clamp(-1, 1)
return denoised
x_T = (
generator.randn(
*(args.batch_size, 3, args.image_size, args.image_size),
device=dist_util.dev(),
)
* args.sigma_max
)
sample = stochastic_iterative_sampler(
denoiser,
x_T,
ts,
t_min=args.sigma_min,
t_max=args.sigma_max,
rho=diffusion.rho,
steps=args.steps,
generator=generator,
)
pred, spatial_pred, clip_pred, text_pred, _ = fid_is.get_preds(sample)
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
gathered_samples = [
th.zeros_like(sample) for _ in range(dist.get_world_size())
]
gathered_preds = [th.zeros_like(pred) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_samples, sample) # gather not supported with NCCL
dist.all_gather(gathered_preds, pred)
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
all_preds.extend([pred.cpu().numpy() for pred in gathered_preds])
if args.class_cond:
gathered_labels = [
th.zeros_like(classes) for _ in range(dist.get_world_size())
]
dist.all_gather(gathered_labels, classes)
all_labels.extend([labels.cpu().numpy() for labels in gathered_labels])
logger.log(f"created {len(all_images) * args.batch_size} samples")
arr = np.concatenate(all_images, axis=0)
arr = arr[: args.num_samples]
preds = np.concatenate(all_preds, axis=0)
preds = preds[: args.num_samples]
if args.class_cond:
label_arr = np.concatenate(all_labels, axis=0)
label_arr = label_arr[: args.num_samples]
dist.barrier()
logger.log("sampling complete")
return arr, preds
@cache
def get_fid(p, begin=(0,), end=(args.steps - 1,)):
samples, preds = sample_generator(begin + (p,) + end)
is_root = dist.get_rank() == 0
if is_root:
fid_stats = fid_is.get_statistics(preds, -1)
fid = ref_fid_stats.frechet_distance(fid_stats)
fid = MPI.COMM_WORLD.bcast(fid)
# spatial_stats = fid_is.get_statistics(spatial_preds, -1)
# sfid = ref_spatial_stats.frechet_distance(spatial_stats)
# clip_stats = fid_is.get_statistics(clip_preds, -1)
IS = fid_is.get_inception_score(preds)
IS = MPI.COMM_WORLD.bcast(IS)
# clip_fid = fid_is.get_clip_score(clip_preds, text_preds)
# fcd = ref_clip_stats.frechet_distance(clip_stats)
else:
fid = MPI.COMM_WORLD.bcast(None)
IS = MPI.COMM_WORLD.bcast(None)
dist.barrier()
return fid, IS
def ternary_search(before=(0,), after=(17,)):
left = before[-1]
right = after[0]
is_root = dist.get_rank() == 0
while right - left >= 3:
m1 = int(left + (right - left) / 3.0)
m2 = int(right - (right - left) / 3.0)
f1, is1 = get_fid(m1, before, after)
if is_root:
logger.log(f"fid at m1 = {m1} is {f1}, IS is {is1}")
f2, is2 = get_fid(m2, before, after)
if is_root:
logger.log(f"fid at m2 = {m2} is {f2}, IS is {is2}")
if f1 < f2:
right = m2
else:
left = m1
if is_root:
logger.log(f"new interval is [{left}, {right}]")
if right == left:
p = right
elif right - left == 1:
f1, _ = get_fid(left, before, after)
f2, _ = get_fid(right, before, after)
p = m1 if f1 < f2 else m2
elif right - left == 2:
mid = left + 1
f1, _ = get_fid(left, before, after)
f2, _ = get_fid(right, before, after)
fmid, ismid = get_fid(mid, before, after)
if is_root:
logger.log(f"fmid at mid = {mid} is {fmid}, ISmid is {ismid}")
if fmid < f1 and fmid < f2:
p = mid
elif f1 < f2:
p = m1
else:
p = m2
return p
# convert comma separated numbers to tuples
begin = tuple(int(x) for x in args.begin.split(","))
end = tuple(int(x) for x in args.end.split(","))
optimal_p = ternary_search(begin, end)
if dist.get_rank() == 0:
logger.log(f"ternary_search_results: {optimal_p}")
fid, IS = get_fid(optimal_p, begin, end)
logger.log(f"fid at optimal p = {optimal_p} is {fid}, IS is {IS}")
def create_argparser():
defaults = dict(
begin="0",
end="39",
training_mode="consistency_distillation",
generator="determ",
clip_denoised=True,
num_samples=10000,
batch_size=16,
sampler="heun",
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
steps=40,
model_path="",
ref_batch="",
seed=42,
)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
|
"""
Generate a large batch of image samples from a model and save them as a large
numpy array. This can be used to produce samples for FID evaluation.
"""
import argparse
import os
import numpy as np
import torch as th
import torch.distributed as dist
from cm import dist_util, logger
from cm.script_util import (
NUM_CLASSES,
model_and_diffusion_defaults,
create_model_and_diffusion,
add_dict_to_argparser,
args_to_dict,
)
from cm.random_util import get_generator
from cm.karras_diffusion import karras_sample
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
if "consistency" in args.training_mode:
distillation = True
else:
distillation = False
logger.log("creating model and diffusion...")
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys()),
distillation=distillation,
)
model.load_state_dict(
dist_util.load_state_dict(args.model_path, map_location="cpu")
)
model.to(dist_util.dev())
if args.use_fp16:
model.convert_to_fp16()
model.eval()
logger.log("sampling...")
if args.sampler == "multistep":
assert len(args.ts) > 0
ts = tuple(int(x) for x in args.ts.split(","))
else:
ts = None
all_images = []
all_labels = []
generator = get_generator(args.generator, args.num_samples, args.seed)
while len(all_images) * args.batch_size < args.num_samples:
model_kwargs = {}
if args.class_cond:
classes = th.randint(
low=0, high=NUM_CLASSES, size=(args.batch_size,), device=dist_util.dev()
)
model_kwargs["y"] = classes
sample = karras_sample(
diffusion,
model,
(args.batch_size, 3, args.image_size, args.image_size),
steps=args.steps,
model_kwargs=model_kwargs,
device=dist_util.dev(),
clip_denoised=args.clip_denoised,
sampler=args.sampler,
sigma_min=args.sigma_min,
sigma_max=args.sigma_max,
s_churn=args.s_churn,
s_tmin=args.s_tmin,
s_tmax=args.s_tmax,
s_noise=args.s_noise,
generator=generator,
ts=ts,
)
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
gathered_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_samples, sample) # gather not supported with NCCL
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
if args.class_cond:
gathered_labels = [
th.zeros_like(classes) for _ in range(dist.get_world_size())
]
dist.all_gather(gathered_labels, classes)
all_labels.extend([labels.cpu().numpy() for labels in gathered_labels])
logger.log(f"created {len(all_images) * args.batch_size} samples")
arr = np.concatenate(all_images, axis=0)
arr = arr[: args.num_samples]
if args.class_cond:
label_arr = np.concatenate(all_labels, axis=0)
label_arr = label_arr[: args.num_samples]
if dist.get_rank() == 0:
shape_str = "x".join([str(x) for x in arr.shape])
out_path = os.path.join(logger.get_dir(), f"samples_{shape_str}.npz")
logger.log(f"saving to {out_path}")
if args.class_cond:
np.savez(out_path, arr, label_arr)
else:
np.savez(out_path, arr)
dist.barrier()
logger.log("sampling complete")
def create_argparser():
defaults = dict(
training_mode="edm",
generator="determ",
clip_denoised=True,
num_samples=10000,
batch_size=16,
sampler="heun",
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
steps=40,
model_path="",
seed=42,
ts="",
)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
|
"""
Train a diffusion model on images.
"""
import argparse
from cm import dist_util, logger
from cm.image_datasets import load_data
from cm.resample import create_named_schedule_sampler
from cm.script_util import (
model_and_diffusion_defaults,
create_model_and_diffusion,
args_to_dict,
add_dict_to_argparser,
)
from cm.train_util import TrainLoop
import torch.distributed as dist
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model and diffusion...")
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys())
)
model.to(dist_util.dev())
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
logger.log("creating data loader...")
if args.batch_size == -1:
batch_size = args.global_batch_size // dist.get_world_size()
if args.global_batch_size % dist.get_world_size() != 0:
logger.log(
f"warning, using smaller global_batch_size of {dist.get_world_size()*batch_size} instead of {args.global_batch_size}"
)
else:
batch_size = args.batch_size
data = load_data(
data_dir=args.data_dir,
batch_size=batch_size,
image_size=args.image_size,
class_cond=args.class_cond,
)
logger.log("creating data loader...")
logger.log("training...")
TrainLoop(
model=model,
diffusion=diffusion,
data=data,
batch_size=batch_size,
microbatch=args.microbatch,
lr=args.lr,
ema_rate=args.ema_rate,
log_interval=args.log_interval,
save_interval=args.save_interval,
resume_checkpoint=args.resume_checkpoint,
use_fp16=args.use_fp16,
fp16_scale_growth=args.fp16_scale_growth,
schedule_sampler=schedule_sampler,
weight_decay=args.weight_decay,
lr_anneal_steps=args.lr_anneal_steps,
).run_loop()
def create_argparser():
defaults = dict(
data_dir="",
schedule_sampler="uniform",
lr=1e-4,
weight_decay=0.0,
lr_anneal_steps=0,
global_batch_size=2048,
batch_size=-1,
microbatch=-1, # -1 disables microbatches
ema_rate="0.9999", # comma-separated list of EMA values
log_interval=10,
save_interval=10000,
resume_checkpoint="",
use_fp16=False,
fp16_scale_growth=1e-3,
)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
|
from abc import ABC, abstractmethod
import numpy as np
import torch as th
from scipy.stats import norm
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
elif name == "lognormal":
return LogNormalSampler()
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
class ScheduleSampler(ABC):
"""
A distribution over timesteps in the diffusion process, intended to reduce
variance of the objective.
By default, samplers perform unbiased importance sampling, in which the
objective's mean is unchanged.
However, subclasses may override sample() to change how the resampled
terms are reweighted, allowing for actual changes in the objective.
"""
@abstractmethod
def weights(self):
"""
Get a numpy array of weights, one per diffusion step.
The weights needn't be normalized, but must be positive.
"""
def sample(self, batch_size, device):
"""
Importance-sample timesteps for a batch.
:param batch_size: the number of timesteps.
:param device: the torch device to save to.
:return: a tuple (timesteps, weights):
- timesteps: a tensor of timestep indices.
- weights: a tensor of weights to scale the resulting losses.
"""
w = self.weights()
p = w / np.sum(w)
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
indices = th.from_numpy(indices_np).long().to(device)
weights_np = 1 / (len(p) * p[indices_np])
weights = th.from_numpy(weights_np).float().to(device)
return indices, weights
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossAwareSampler(ScheduleSampler):
def update_with_local_losses(self, local_ts, local_losses):
"""
Update the reweighting using losses from a model.
Call this method from each rank with a batch of timesteps and the
corresponding losses for each of those timesteps.
This method will perform synchronization to make sure all of the ranks
maintain the exact same reweighting.
:param local_ts: an integer Tensor of timesteps.
:param local_losses: a 1D Tensor of losses.
"""
batch_sizes = [
th.tensor([0], dtype=th.int32, device=local_ts.device)
for _ in range(dist.get_world_size())
]
dist.all_gather(
batch_sizes,
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
)
# Pad all_gather batches to be the maximum batch size.
batch_sizes = [x.item() for x in batch_sizes]
max_bs = max(batch_sizes)
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
dist.all_gather(timestep_batches, local_ts)
dist.all_gather(loss_batches, local_losses)
timesteps = [
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
]
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
self.update_with_all_losses(timesteps, losses)
@abstractmethod
def update_with_all_losses(self, ts, losses):
"""
Update the reweighting using losses from a model.
Sub-classes should override this method to update the reweighting
using losses from the model.
This method directly updates the reweighting without synchronizing
between workers. It is called by update_with_local_losses from all
ranks with identical arguments. Thus, it should have deterministic
behavior to maintain state across workers.
:param ts: a list of int timesteps.
:param losses: a list of float losses, one per timestep.
"""
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros(
[diffusion.num_timesteps, history_per_term], dtype=np.float64
)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history**2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
class LogNormalSampler:
def __init__(self, p_mean=-1.2, p_std=1.2, even=False):
self.p_mean = p_mean
self.p_std = p_std
self.even = even
if self.even:
self.inv_cdf = lambda x: norm.ppf(x, loc=p_mean, scale=p_std)
self.rank, self.size = dist.get_rank(), dist.get_world_size()
def sample(self, bs, device):
if self.even:
# buckets = [1/G]
start_i, end_i = self.rank * bs, (self.rank + 1) * bs
global_batch_size = self.size * bs
locs = (th.arange(start_i, end_i) + th.rand(bs)) / global_batch_size
log_sigmas = th.tensor(self.inv_cdf(locs), dtype=th.float32, device=device)
else:
log_sigmas = self.p_mean + self.p_std * th.randn(bs, device=device)
sigmas = th.exp(log_sigmas)
weights = th.ones_like(sigmas)
return sigmas, weights
|
import math
import random
from PIL import Image
import blobfile as bf
from mpi4py import MPI
import numpy as np
from torch.utils.data import DataLoader, Dataset
def load_data(
*,
data_dir,
batch_size,
image_size,
class_cond=False,
deterministic=False,
random_crop=False,
random_flip=True,
):
"""
For a dataset, create a generator over (images, kwargs) pairs.
Each images is an NCHW float tensor, and the kwargs dict contains zero or
more keys, each of which map to a batched Tensor of their own.
The kwargs dict can be used for class labels, in which case the key is "y"
and the values are integer tensors of class labels.
:param data_dir: a dataset directory.
:param batch_size: the batch size of each returned pair.
:param image_size: the size to which images are resized.
:param class_cond: if True, include a "y" key in returned dicts for class
label. If classes are not available and this is true, an
exception will be raised.
:param deterministic: if True, yield results in a deterministic order.
:param random_crop: if True, randomly crop the images for augmentation.
:param random_flip: if True, randomly flip the images for augmentation.
"""
if not data_dir:
raise ValueError("unspecified data directory")
all_files = _list_image_files_recursively(data_dir)
classes = None
if class_cond:
# Assume classes are the first part of the filename,
# before an underscore.
class_names = [bf.basename(path).split("_")[0] for path in all_files]
sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}
classes = [sorted_classes[x] for x in class_names]
dataset = ImageDataset(
image_size,
all_files,
classes=classes,
shard=MPI.COMM_WORLD.Get_rank(),
num_shards=MPI.COMM_WORLD.Get_size(),
random_crop=random_crop,
random_flip=random_flip,
)
if deterministic:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=1, drop_last=True
)
else:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True
)
while True:
yield from loader
def _list_image_files_recursively(data_dir):
results = []
for entry in sorted(bf.listdir(data_dir)):
full_path = bf.join(data_dir, entry)
ext = entry.split(".")[-1]
if "." in entry and ext.lower() in ["jpg", "jpeg", "png", "gif"]:
results.append(full_path)
elif bf.isdir(full_path):
results.extend(_list_image_files_recursively(full_path))
return results
class ImageDataset(Dataset):
def __init__(
self,
resolution,
image_paths,
classes=None,
shard=0,
num_shards=1,
random_crop=False,
random_flip=True,
):
super().__init__()
self.resolution = resolution
self.local_images = image_paths[shard:][::num_shards]
self.local_classes = None if classes is None else classes[shard:][::num_shards]
self.random_crop = random_crop
self.random_flip = random_flip
def __len__(self):
return len(self.local_images)
def __getitem__(self, idx):
path = self.local_images[idx]
with bf.BlobFile(path, "rb") as f:
pil_image = Image.open(f)
pil_image.load()
pil_image = pil_image.convert("RGB")
if self.random_crop:
arr = random_crop_arr(pil_image, self.resolution)
else:
arr = center_crop_arr(pil_image, self.resolution)
if self.random_flip and random.random() < 0.5:
arr = arr[:, ::-1]
arr = arr.astype(np.float32) / 127.5 - 1
out_dict = {}
if self.local_classes is not None:
out_dict["y"] = np.array(self.local_classes[idx], dtype=np.int64)
return np.transpose(arr, [2, 0, 1]), out_dict
def center_crop_arr(pil_image, image_size):
# We are not on a new enough PIL to support the `reducing_gap`
# argument, which uses BOX downsampling at powers of two first.
# Thus, we do it by hand to improve downsample quality.
while min(*pil_image.size) >= 2 * image_size:
pil_image = pil_image.resize(
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
)
scale = image_size / min(*pil_image.size)
pil_image = pil_image.resize(
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
)
arr = np.array(pil_image)
crop_y = (arr.shape[0] - image_size) // 2
crop_x = (arr.shape[1] - image_size) // 2
return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
def random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):
min_smaller_dim_size = math.ceil(image_size / max_crop_frac)
max_smaller_dim_size = math.ceil(image_size / min_crop_frac)
smaller_dim_size = random.randrange(min_smaller_dim_size, max_smaller_dim_size + 1)
# We are not on a new enough PIL to support the `reducing_gap`
# argument, which uses BOX downsampling at powers of two first.
# Thus, we do it by hand to improve downsample quality.
while min(*pil_image.size) >= 2 * smaller_dim_size:
pil_image = pil_image.resize(
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
)
scale = smaller_dim_size / min(*pil_image.size)
pil_image = pil_image.resize(
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
)
arr = np.array(pil_image)
crop_y = random.randrange(arr.shape[0] - image_size + 1)
crop_x = random.randrange(arr.shape[1] - image_size + 1)
return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
|
import torch as th
import torch.distributed as dist
from . import dist_util
def get_generator(generator, num_samples=0, seed=0):
if generator == "dummy":
return DummyGenerator()
elif generator == "determ":
return DeterministicGenerator(num_samples, seed)
elif generator == "determ-indiv":
return DeterministicIndividualGenerator(num_samples, seed)
else:
raise NotImplementedError
class DummyGenerator:
def randn(self, *args, **kwargs):
return th.randn(*args, **kwargs)
def randint(self, *args, **kwargs):
return th.randint(*args, **kwargs)
def randn_like(self, *args, **kwargs):
return th.randn_like(*args, **kwargs)
class DeterministicGenerator:
"""
RNG to deterministically sample num_samples samples that does not depend on batch_size or mpi_machines
Uses a single rng and samples num_samples sized randomness and subsamples the current indices
"""
def __init__(self, num_samples, seed=0):
if dist.is_initialized():
self.rank = dist.get_rank()
self.world_size = dist.get_world_size()
else:
print("Warning: Distributed not initialised, using single rank")
self.rank = 0
self.world_size = 1
self.num_samples = num_samples
self.done_samples = 0
self.seed = seed
self.rng_cpu = th.Generator()
if th.cuda.is_available():
self.rng_cuda = th.Generator(dist_util.dev())
self.set_seed(seed)
def get_global_size_and_indices(self, size):
global_size = (self.num_samples, *size[1:])
indices = th.arange(
self.done_samples + self.rank,
self.done_samples + self.world_size * int(size[0]),
self.world_size,
)
indices = th.clamp(indices, 0, self.num_samples - 1)
assert (
len(indices) == size[0]
), f"rank={self.rank}, ws={self.world_size}, l={len(indices)}, bs={size[0]}"
return global_size, indices
def get_generator(self, device):
return self.rng_cpu if th.device(device).type == "cpu" else self.rng_cuda
def randn(self, *size, dtype=th.float, device="cpu"):
global_size, indices = self.get_global_size_and_indices(size)
generator = self.get_generator(device)
return th.randn(*global_size, generator=generator, dtype=dtype, device=device)[
indices
]
def randint(self, low, high, size, dtype=th.long, device="cpu"):
global_size, indices = self.get_global_size_and_indices(size)
generator = self.get_generator(device)
return th.randint(
low, high, generator=generator, size=global_size, dtype=dtype, device=device
)[indices]
def randn_like(self, tensor):
size, dtype, device = tensor.size(), tensor.dtype, tensor.device
return self.randn(*size, dtype=dtype, device=device)
def set_done_samples(self, done_samples):
self.done_samples = done_samples
self.set_seed(self.seed)
def get_seed(self):
return self.seed
def set_seed(self, seed):
self.rng_cpu.manual_seed(seed)
if th.cuda.is_available():
self.rng_cuda.manual_seed(seed)
class DeterministicIndividualGenerator:
"""
RNG to deterministically sample num_samples samples that does not depend on batch_size or mpi_machines
Uses a separate rng for each sample to reduce memoery usage
"""
def __init__(self, num_samples, seed=0):
if dist.is_initialized():
self.rank = dist.get_rank()
self.world_size = dist.get_world_size()
else:
print("Warning: Distributed not initialised, using single rank")
self.rank = 0
self.world_size = 1
self.num_samples = num_samples
self.done_samples = 0
self.seed = seed
self.rng_cpu = [th.Generator() for _ in range(num_samples)]
if th.cuda.is_available():
self.rng_cuda = [th.Generator(dist_util.dev()) for _ in range(num_samples)]
self.set_seed(seed)
def get_size_and_indices(self, size):
indices = th.arange(
self.done_samples + self.rank,
self.done_samples + self.world_size * int(size[0]),
self.world_size,
)
indices = th.clamp(indices, 0, self.num_samples - 1)
assert (
len(indices) == size[0]
), f"rank={self.rank}, ws={self.world_size}, l={len(indices)}, bs={size[0]}"
return (1, *size[1:]), indices
def get_generator(self, device):
return self.rng_cpu if th.device(device).type == "cpu" else self.rng_cuda
def randn(self, *size, dtype=th.float, device="cpu"):
size, indices = self.get_size_and_indices(size)
generator = self.get_generator(device)
return th.cat(
[
th.randn(*size, generator=generator[i], dtype=dtype, device=device)
for i in indices
],
dim=0,
)
def randint(self, low, high, size, dtype=th.long, device="cpu"):
size, indices = self.get_size_and_indices(size)
generator = self.get_generator(device)
return th.cat(
[
th.randint(
low,
high,
generator=generator[i],
size=size,
dtype=dtype,
device=device,
)
for i in indices
],
dim=0,
)
def randn_like(self, tensor):
size, dtype, device = tensor.size(), tensor.dtype, tensor.device
return self.randn(*size, dtype=dtype, device=device)
def set_done_samples(self, done_samples):
self.done_samples = done_samples
def get_seed(self):
return self.seed
def set_seed(self, seed):
[
rng_cpu.manual_seed(i + self.num_samples * seed)
for i, rng_cpu in enumerate(self.rng_cpu)
]
if th.cuda.is_available():
[
rng_cuda.manual_seed(i + self.num_samples * seed)
for i, rng_cuda in enumerate(self.rng_cuda)
]
|
"""
Various utilities for neural networks.
"""
import math
import torch as th
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
class SiLU(nn.Module):
def forward(self, x):
return x * th.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
dims_to_append = target_dims - x.ndim
if dims_to_append < 0:
raise ValueError(
f"input has {x.ndim} dims but target_dims is {target_dims}, which is less"
)
return x[(...,) + (None,) * dims_to_append]
def append_zero(x):
return th.cat([x, x.new_zeros([1])])
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = th.exp(
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
if dim % 2:
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
class CheckpointFunction(th.autograd.Function):
@staticmethod
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with th.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
@staticmethod
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
with th.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = th.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
|
from abc import abstractmethod
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from .fp16_util import convert_module_to_f16, convert_module_to_f32
from .nn import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(
th.randn(embed_dim, spacial_dim**2 + 1) / embed_dim**0.5
)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=1
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
use_checkpoint=False,
attention_type="flash",
encoder_channels=None,
dims=2,
channels_last=False,
use_new_attention_order=False,
):
super().__init__()
self.channels = channels
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.use_checkpoint = use_checkpoint
self.norm = normalization(channels)
self.qkv = conv_nd(dims, channels, channels * 3, 1)
self.attention_type = attention_type
if attention_type == "flash":
self.attention = QKVFlashAttention(channels, self.num_heads)
else:
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads)
self.use_attention_checkpoint = not (
self.use_checkpoint or self.attention_type == "flash"
)
if encoder_channels is not None:
assert attention_type != "flash"
self.encoder_kv = conv_nd(1, encoder_channels, channels * 2, 1)
self.proj_out = zero_module(conv_nd(dims, channels, channels, 1))
def forward(self, x, encoder_out=None):
if encoder_out is None:
return checkpoint(
self._forward, (x,), self.parameters(), self.use_checkpoint
)
else:
return checkpoint(
self._forward, (x, encoder_out), self.parameters(), self.use_checkpoint
)
def _forward(self, x, encoder_out=None):
b, _, *spatial = x.shape
qkv = self.qkv(self.norm(x)).view(b, -1, np.prod(spatial))
if encoder_out is not None:
encoder_out = self.encoder_kv(encoder_out)
h = checkpoint(
self.attention, (qkv, encoder_out), (), self.use_attention_checkpoint
)
else:
h = checkpoint(self.attention, (qkv,), (), self.use_attention_checkpoint)
h = h.view(b, -1, *spatial)
h = self.proj_out(h)
return x + h
class QKVFlashAttention(nn.Module):
def __init__(
self,
embed_dim,
num_heads,
batch_first=True,
attention_dropout=0.0,
causal=False,
device=None,
dtype=None,
**kwargs,
) -> None:
from einops import rearrange
from flash_attn.flash_attention import FlashAttention
assert batch_first
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.causal = causal
assert (
self.embed_dim % num_heads == 0
), "self.kdim must be divisible by num_heads"
self.head_dim = self.embed_dim // num_heads
assert self.head_dim in [16, 32, 64], "Only support head_dim == 16, 32, or 64"
self.inner_attn = FlashAttention(
attention_dropout=attention_dropout, **factory_kwargs
)
self.rearrange = rearrange
def forward(self, qkv, attn_mask=None, key_padding_mask=None, need_weights=False):
qkv = self.rearrange(
qkv, "b (three h d) s -> b s three h d", three=3, h=self.num_heads
)
qkv, _ = self.inner_attn(
qkv,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
causal=self.causal,
)
return self.rearrange(qkv, "b s h d -> b (h d) s")
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
# We perform two matmuls with the same number of ops.
# The first computes the weight matrix, the second computes
# the combination of the value vectors.
matmul_ops = 2 * b * (num_spatial**2) * c
model.total_ops += th.DoubleTensor([matmul_ops])
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/output heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
from einops import rearrange
self.rearrange = rearrange
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
qkv = qkv.half()
qkv = self.rearrange(
qkv, "b (three h d) s -> b s three h d", three=3, h=self.n_heads
)
q, k, v = qkv.transpose(1, 3).transpose(3, 4).split(1, dim=2)
q = q.reshape(bs*self.n_heads, ch, length)
k = k.reshape(bs*self.n_heads, ch, length)
v = v.reshape(bs*self.n_heads, ch, length)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight, dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v)
a = a.float()
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
# class QKVAttention(nn.Module):
# """
# A module which performs QKV attention and splits in a different order.
# """
# def __init__(self, n_heads):
# super().__init__()
# self.n_heads = n_heads
# def forward(self, qkv):
# """
# Apply QKV attention.
# :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
# :return: an [N x (H * C) x T] tensor after attention.
# """
# bs, width, length = qkv.shape
# assert width % (3 * self.n_heads) == 0
# ch = width // (3 * self.n_heads)
# q, k, v = qkv.chunk(3, dim=1)
# scale = 1 / math.sqrt(math.sqrt(ch))
# weight = th.einsum(
# "bct,bcs->bts",
# (q * scale).view(bs * self.n_heads, ch, length),
# (k * scale).view(bs * self.n_heads, ch, length),
# ) # More stable with f16 than dividing afterwards
# weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
# a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
# return a.reshape(bs, -1, length)
# @staticmethod
# def count_flops(model, _x, y):
# return count_flops_attn(model, _x, y)
class QKVAttention(nn.Module):
"""
A module which performs QKV attention. Fallback from Blocksparse if use_fp16=False
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv, encoder_kv=None):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
if encoder_kv is not None:
assert encoder_kv.shape[1] == 2 * ch * self.n_heads
ek, ev = encoder_kv.chunk(2, dim=1)
k = th.cat([ek, k], dim=-1)
v = th.cat([ev, v], dim=-1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts",
(q * scale).view(bs * self.n_heads, ch, length),
(k * scale).view(bs * self.n_heads, ch, -1),
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, -1))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param num_classes: if specified (as an int), then this model will be
class-conditional with `num_classes` classes.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
:param num_heads_channels: if specified, ignore num_heads and instead use
a fixed channel width per attention head.
:param num_heads_upsample: works with num_heads to set a different number
of heads for upsampling. Deprecated.
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
:param resblock_updown: use residual blocks for up/downsampling.
:param use_new_attention_order: use a different attention pattern for potentially
increased efficiency.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
ch = input_ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
ich = input_block_chans.pop()
layers = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=int(model_channels * mult),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(model_channels * mult)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
if level and i == num_res_blocks:
out_ch = ch
layers.append(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
)
if resblock_updown
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
)
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)),
)
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
def forward(self, x, timesteps, y=None):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if self.num_classes is not None:
assert y.shape == (x.shape[0],)
emb = emb + self.label_emb(y)
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
hs.append(h)
h = self.middle_block(h, emb)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb)
h = h.type(x.dtype)
return self.out(h)
|
import argparse
from .karras_diffusion import KarrasDenoiser
from .unet import UNetModel
import numpy as np
NUM_CLASSES = 1000
def cm_train_defaults():
return dict(
teacher_model_path="",
teacher_dropout=0.1,
training_mode="consistency_distillation",
target_ema_mode="fixed",
scale_mode="fixed",
total_training_steps=600000,
start_ema=0.0,
start_scales=40,
end_scales=40,
distill_steps_per_iter=50000,
loss_norm="lpips",
)
def model_and_diffusion_defaults():
"""
Defaults for image training.
"""
res = dict(
sigma_min=0.002,
sigma_max=80.0,
image_size=64,
num_channels=128,
num_res_blocks=2,
num_heads=4,
num_heads_upsample=-1,
num_head_channels=-1,
attention_resolutions="32,16,8",
channel_mult="",
dropout=0.0,
class_cond=False,
use_checkpoint=False,
use_scale_shift_norm=True,
resblock_updown=False,
use_fp16=False,
use_new_attention_order=False,
learn_sigma=False,
weight_schedule="karras",
)
return res
def create_model_and_diffusion(
image_size,
class_cond,
learn_sigma,
num_channels,
num_res_blocks,
channel_mult,
num_heads,
num_head_channels,
num_heads_upsample,
attention_resolutions,
dropout,
use_checkpoint,
use_scale_shift_norm,
resblock_updown,
use_fp16,
use_new_attention_order,
weight_schedule,
sigma_min=0.002,
sigma_max=80.0,
distillation=False,
):
model = create_model(
image_size,
num_channels,
num_res_blocks,
channel_mult=channel_mult,
learn_sigma=learn_sigma,
class_cond=class_cond,
use_checkpoint=use_checkpoint,
attention_resolutions=attention_resolutions,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
dropout=dropout,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
use_new_attention_order=use_new_attention_order,
)
diffusion = KarrasDenoiser(
sigma_data=0.5,
sigma_max=sigma_max,
sigma_min=sigma_min,
distillation=distillation,
weight_schedule=weight_schedule,
)
return model, diffusion
def create_model(
image_size,
num_channels,
num_res_blocks,
channel_mult="",
learn_sigma=False,
class_cond=False,
use_checkpoint=False,
attention_resolutions="16",
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
dropout=0,
resblock_updown=False,
use_fp16=False,
use_new_attention_order=False,
):
if channel_mult == "":
if image_size == 512:
channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
elif image_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif image_size == 128:
channel_mult = (1, 1, 2, 3, 4)
elif image_size == 64:
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f"unsupported image size: {image_size}")
else:
channel_mult = tuple(int(ch_mult) for ch_mult in channel_mult.split(","))
attention_ds = []
for res in attention_resolutions.split(","):
attention_ds.append(image_size // int(res))
return UNetModel(
image_size=image_size,
in_channels=3,
model_channels=num_channels,
out_channels=(3 if not learn_sigma else 6),
num_res_blocks=num_res_blocks,
attention_resolutions=tuple(attention_ds),
dropout=dropout,
channel_mult=channel_mult,
num_classes=(NUM_CLASSES if class_cond else None),
use_checkpoint=use_checkpoint,
use_fp16=use_fp16,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
resblock_updown=resblock_updown,
use_new_attention_order=use_new_attention_order,
)
def create_ema_and_scales_fn(
target_ema_mode,
start_ema,
scale_mode,
start_scales,
end_scales,
total_steps,
distill_steps_per_iter,
):
def ema_and_scales_fn(step):
if target_ema_mode == "fixed" and scale_mode == "fixed":
target_ema = start_ema
scales = start_scales
elif target_ema_mode == "fixed" and scale_mode == "progressive":
target_ema = start_ema
scales = np.ceil(
np.sqrt(
(step / total_steps) * ((end_scales + 1) ** 2 - start_scales**2)
+ start_scales**2
)
- 1
).astype(np.int32)
scales = np.maximum(scales, 1)
scales = scales + 1
elif target_ema_mode == "adaptive" and scale_mode == "progressive":
scales = np.ceil(
np.sqrt(
(step / total_steps) * ((end_scales + 1) ** 2 - start_scales**2)
+ start_scales**2
)
- 1
).astype(np.int32)
scales = np.maximum(scales, 1)
c = -np.log(start_ema) * start_scales
target_ema = np.exp(-c / scales)
scales = scales + 1
elif target_ema_mode == "fixed" and scale_mode == "progdist":
distill_stage = step // distill_steps_per_iter
scales = start_scales // (2**distill_stage)
scales = np.maximum(scales, 2)
sub_stage = np.maximum(
step - distill_steps_per_iter * (np.log2(start_scales) - 1),
0,
)
sub_stage = sub_stage // (distill_steps_per_iter * 2)
sub_scales = 2 // (2**sub_stage)
sub_scales = np.maximum(sub_scales, 1)
scales = np.where(scales == 2, sub_scales, scales)
target_ema = 1.0
else:
raise NotImplementedError
return float(target_ema), int(scales)
return ema_and_scales_fn
def add_dict_to_argparser(parser, default_dict):
for k, v in default_dict.items():
v_type = type(v)
if v is None:
v_type = str
elif isinstance(v, bool):
v_type = str2bool
parser.add_argument(f"--{k}", default=v, type=v_type)
def args_to_dict(args, keys):
return {k: getattr(args, k) for k in keys}
def str2bool(v):
"""
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected")
|
"""
Codebase for "Improved Denoising Diffusion Probabilistic Models".
"""
|
"""
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
import warnings
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "read"), (
"expected file or str, got %s" % filename_or_file
)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, "__float__"):
valstr = "%-8.3g" % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print("WARNING: tried to write empty key-value dict")
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append(
"| %s%s | %s%s |"
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
)
lines.append(dashes)
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "wt")
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, "dtype"):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + "\n")
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "w+t")
self.keys = []
self.sep = ","
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(k)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write("\n")
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write("\n")
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = "events"
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {"tag": k, "simple_value": float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = (
self.step
) # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=""):
os.makedirs(ev_dir, exist_ok=True)
if format == "stdout":
return HumanOutputFormat(sys.stdout)
elif format == "log":
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
elif format == "json":
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
elif format == "csv":
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
elif format == "tensorboard":
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
else:
raise ValueError("Unknown format specified: %s" % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = "wait_" + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
d = mpi_weighted_mean(
self.comm,
{
name: (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()
},
)
if self.comm.rank != 0:
d["dummy"] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
if varname in os.environ:
return int(os.environ[varname])
return 0
def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {}
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv("OPENAI_LOGDIR")
if dir is None:
dir = osp.join(
tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
else:
format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
|
import copy
import functools
import os
import blobfile as bf
import torch as th
import torch.distributed as dist
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.optim import RAdam
from . import dist_util, logger
from .fp16_util import MixedPrecisionTrainer
from .nn import update_ema
from .resample import LossAwareSampler, UniformSampler
from .fp16_util import (
get_param_groups_and_shapes,
make_master_params,
master_params_to_model_params,
)
import numpy as np
# For ImageNet experiments, this was a good default value.
# We found that the lg_loss_scale quickly climbed to
# 20-21 within the first ~1K steps of training.
INITIAL_LOG_LOSS_SCALE = 20.0
class TrainLoop:
def __init__(
self,
*,
model,
diffusion,
data,
batch_size,
microbatch,
lr,
ema_rate,
log_interval,
save_interval,
resume_checkpoint,
use_fp16=False,
fp16_scale_growth=1e-3,
schedule_sampler=None,
weight_decay=0.0,
lr_anneal_steps=0,
):
self.model = model
self.diffusion = diffusion
self.data = data
self.batch_size = batch_size
self.microbatch = microbatch if microbatch > 0 else batch_size
self.lr = lr
self.ema_rate = (
[ema_rate]
if isinstance(ema_rate, float)
else [float(x) for x in ema_rate.split(",")]
)
self.log_interval = log_interval
self.save_interval = save_interval
self.resume_checkpoint = resume_checkpoint
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
self.weight_decay = weight_decay
self.lr_anneal_steps = lr_anneal_steps
self.step = 0
self.resume_step = 0
self.global_batch = self.batch_size * dist.get_world_size()
self.sync_cuda = th.cuda.is_available()
self._load_and_sync_parameters()
self.mp_trainer = MixedPrecisionTrainer(
model=self.model,
use_fp16=self.use_fp16,
fp16_scale_growth=fp16_scale_growth,
)
self.opt = RAdam(
self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay
)
if self.resume_step:
self._load_optimizer_state()
# Model was resumed, either due to a restart or a checkpoint
# being specified at the command line.
self.ema_params = [
self._load_ema_parameters(rate) for rate in self.ema_rate
]
else:
self.ema_params = [
copy.deepcopy(self.mp_trainer.master_params)
for _ in range(len(self.ema_rate))
]
if th.cuda.is_available():
self.use_ddp = True
self.ddp_model = DDP(
self.model,
device_ids=[dist_util.dev()],
output_device=dist_util.dev(),
broadcast_buffers=False,
bucket_cap_mb=128,
find_unused_parameters=False,
)
else:
if dist.get_world_size() > 1:
logger.warn(
"Distributed training requires CUDA. "
"Gradients will not be synchronized properly!"
)
self.use_ddp = False
self.ddp_model = self.model
self.step = self.resume_step
def _load_and_sync_parameters(self):
resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
if resume_checkpoint:
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
if dist.get_rank() == 0:
logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
self.model.load_state_dict(
dist_util.load_state_dict(
resume_checkpoint, map_location=dist_util.dev()
),
)
dist_util.sync_params(self.model.parameters())
dist_util.sync_params(self.model.buffers())
def _load_ema_parameters(self, rate):
ema_params = copy.deepcopy(self.mp_trainer.master_params)
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
if ema_checkpoint:
if dist.get_rank() == 0:
logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...")
state_dict = dist_util.load_state_dict(
ema_checkpoint, map_location=dist_util.dev()
)
ema_params = self.mp_trainer.state_dict_to_master_params(state_dict)
dist_util.sync_params(ema_params)
return ema_params
def _load_optimizer_state(self):
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
opt_checkpoint = bf.join(
bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
)
if bf.exists(opt_checkpoint):
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
state_dict = dist_util.load_state_dict(
opt_checkpoint, map_location=dist_util.dev()
)
self.opt.load_state_dict(state_dict)
def run_loop(self):
while not self.lr_anneal_steps or self.step < self.lr_anneal_steps:
batch, cond = next(self.data)
self.run_step(batch, cond)
if self.step % self.log_interval == 0:
logger.dumpkvs()
if self.step % self.save_interval == 0:
self.save()
# Run for a finite amount of time in integration tests.
if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
return
# Save the last checkpoint if it wasn't already saved.
if (self.step - 1) % self.save_interval != 0:
self.save()
def run_step(self, batch, cond):
self.forward_backward(batch, cond)
took_step = self.mp_trainer.optimize(self.opt)
if took_step:
self.step += 1
self._update_ema()
self._anneal_lr()
self.log_step()
def forward_backward(self, batch, cond):
self.mp_trainer.zero_grad()
for i in range(0, batch.shape[0], self.microbatch):
micro = batch[i : i + self.microbatch].to(dist_util.dev())
micro_cond = {
k: v[i : i + self.microbatch].to(dist_util.dev())
for k, v in cond.items()
}
last_batch = (i + self.microbatch) >= batch.shape[0]
t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
compute_losses = functools.partial(
self.diffusion.training_losses,
self.ddp_model,
micro,
t,
model_kwargs=micro_cond,
)
if last_batch or not self.use_ddp:
losses = compute_losses()
else:
with self.ddp_model.no_sync():
losses = compute_losses()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(
t, losses["loss"].detach()
)
loss = (losses["loss"] * weights).mean()
log_loss_dict(
self.diffusion, t, {k: v * weights for k, v in losses.items()}
)
self.mp_trainer.backward(loss)
def _update_ema(self):
for rate, params in zip(self.ema_rate, self.ema_params):
update_ema(params, self.mp_trainer.master_params, rate=rate)
def _anneal_lr(self):
if not self.lr_anneal_steps:
return
frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
lr = self.lr * (1 - frac_done)
for param_group in self.opt.param_groups:
param_group["lr"] = lr
def log_step(self):
logger.logkv("step", self.step + self.resume_step)
logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
def save(self):
def save_checkpoint(rate, params):
state_dict = self.mp_trainer.master_params_to_state_dict(params)
if dist.get_rank() == 0:
logger.log(f"saving model {rate}...")
if not rate:
filename = f"model{(self.step+self.resume_step):06d}.pt"
else:
filename = f"ema_{rate}_{(self.step+self.resume_step):06d}.pt"
with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
th.save(state_dict, f)
for rate, params in zip(self.ema_rate, self.ema_params):
save_checkpoint(rate, params)
if dist.get_rank() == 0:
with bf.BlobFile(
bf.join(get_blob_logdir(), f"opt{(self.step+self.resume_step):06d}.pt"),
"wb",
) as f:
th.save(self.opt.state_dict(), f)
# Save model parameters last to prevent race conditions where a restart
# loads model at step N, but opt/ema state isn't saved for step N.
save_checkpoint(0, self.mp_trainer.master_params)
dist.barrier()
class CMTrainLoop(TrainLoop):
def __init__(
self,
*,
target_model,
teacher_model,
teacher_diffusion,
training_mode,
ema_scale_fn,
total_training_steps,
**kwargs,
):
super().__init__(**kwargs)
self.training_mode = training_mode
self.ema_scale_fn = ema_scale_fn
self.target_model = target_model
self.teacher_model = teacher_model
self.teacher_diffusion = teacher_diffusion
self.total_training_steps = total_training_steps
if target_model:
self._load_and_sync_target_parameters()
self.target_model.requires_grad_(False)
self.target_model.train()
self.target_model_param_groups_and_shapes = get_param_groups_and_shapes(
self.target_model.named_parameters()
)
self.target_model_master_params = make_master_params(
self.target_model_param_groups_and_shapes
)
if teacher_model:
self._load_and_sync_teacher_parameters()
self.teacher_model.requires_grad_(False)
self.teacher_model.eval()
self.global_step = self.step
if training_mode == "progdist":
self.target_model.eval()
_, scale = ema_scale_fn(self.global_step)
if scale == 1 or scale == 2:
_, start_scale = ema_scale_fn(0)
n_normal_steps = int(np.log2(start_scale // 2)) * self.lr_anneal_steps
step = self.global_step - n_normal_steps
if step != 0:
self.lr_anneal_steps *= 2
self.step = step % self.lr_anneal_steps
else:
self.step = 0
else:
self.step = self.global_step % self.lr_anneal_steps
def _load_and_sync_target_parameters(self):
resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
if resume_checkpoint:
path, name = os.path.split(resume_checkpoint)
target_name = name.replace("model", "target_model")
resume_target_checkpoint = os.path.join(path, target_name)
if bf.exists(resume_target_checkpoint) and dist.get_rank() == 0:
logger.log(
"loading model from checkpoint: {resume_target_checkpoint}..."
)
self.target_model.load_state_dict(
dist_util.load_state_dict(
resume_target_checkpoint, map_location=dist_util.dev()
),
)
dist_util.sync_params(self.target_model.parameters())
dist_util.sync_params(self.target_model.buffers())
def _load_and_sync_teacher_parameters(self):
resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
if resume_checkpoint:
path, name = os.path.split(resume_checkpoint)
teacher_name = name.replace("model", "teacher_model")
resume_teacher_checkpoint = os.path.join(path, teacher_name)
if bf.exists(resume_teacher_checkpoint) and dist.get_rank() == 0:
logger.log(
"loading model from checkpoint: {resume_teacher_checkpoint}..."
)
self.teacher_model.load_state_dict(
dist_util.load_state_dict(
resume_teacher_checkpoint, map_location=dist_util.dev()
),
)
dist_util.sync_params(self.teacher_model.parameters())
dist_util.sync_params(self.teacher_model.buffers())
def run_loop(self):
saved = False
while (
not self.lr_anneal_steps
or self.step < self.lr_anneal_steps
or self.global_step < self.total_training_steps
):
batch, cond = next(self.data)
self.run_step(batch, cond)
saved = False
if (
self.global_step
and self.save_interval != -1
and self.global_step % self.save_interval == 0
):
self.save()
saved = True
th.cuda.empty_cache()
# Run for a finite amount of time in integration tests.
if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
return
if self.global_step % self.log_interval == 0:
logger.dumpkvs()
# Save the last checkpoint if it wasn't already saved.
if not saved:
self.save()
def run_step(self, batch, cond):
self.forward_backward(batch, cond)
took_step = self.mp_trainer.optimize(self.opt)
if took_step:
self._update_ema()
if self.target_model:
self._update_target_ema()
if self.training_mode == "progdist":
self.reset_training_for_progdist()
self.step += 1
self.global_step += 1
self._anneal_lr()
self.log_step()
def _update_target_ema(self):
target_ema, scales = self.ema_scale_fn(self.global_step)
with th.no_grad():
update_ema(
self.target_model_master_params,
self.mp_trainer.master_params,
rate=target_ema,
)
master_params_to_model_params(
self.target_model_param_groups_and_shapes,
self.target_model_master_params,
)
def reset_training_for_progdist(self):
assert self.training_mode == "progdist", "Training mode must be progdist"
if self.global_step > 0:
scales = self.ema_scale_fn(self.global_step)[1]
scales2 = self.ema_scale_fn(self.global_step - 1)[1]
if scales != scales2:
with th.no_grad():
update_ema(
self.teacher_model.parameters(),
self.model.parameters(),
0.0,
)
# reset optimizer
self.opt = RAdam(
self.mp_trainer.master_params,
lr=self.lr,
weight_decay=self.weight_decay,
)
self.ema_params = [
copy.deepcopy(self.mp_trainer.master_params)
for _ in range(len(self.ema_rate))
]
if scales == 2:
self.lr_anneal_steps *= 2
self.teacher_model.eval()
self.step = 0
def forward_backward(self, batch, cond):
self.mp_trainer.zero_grad()
for i in range(0, batch.shape[0], self.microbatch):
micro = batch[i : i + self.microbatch].to(dist_util.dev())
micro_cond = {
k: v[i : i + self.microbatch].to(dist_util.dev())
for k, v in cond.items()
}
last_batch = (i + self.microbatch) >= batch.shape[0]
t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
ema, num_scales = self.ema_scale_fn(self.global_step)
if self.training_mode == "progdist":
if num_scales == self.ema_scale_fn(0)[1]:
compute_losses = functools.partial(
self.diffusion.progdist_losses,
self.ddp_model,
micro,
num_scales,
target_model=self.teacher_model,
target_diffusion=self.teacher_diffusion,
model_kwargs=micro_cond,
)
else:
compute_losses = functools.partial(
self.diffusion.progdist_losses,
self.ddp_model,
micro,
num_scales,
target_model=self.target_model,
target_diffusion=self.diffusion,
model_kwargs=micro_cond,
)
elif self.training_mode == "consistency_distillation":
compute_losses = functools.partial(
self.diffusion.consistency_losses,
self.ddp_model,
micro,
num_scales,
target_model=self.target_model,
teacher_model=self.teacher_model,
teacher_diffusion=self.teacher_diffusion,
model_kwargs=micro_cond,
)
elif self.training_mode == "consistency_training":
compute_losses = functools.partial(
self.diffusion.consistency_losses,
self.ddp_model,
micro,
num_scales,
target_model=self.target_model,
model_kwargs=micro_cond,
)
else:
raise ValueError(f"Unknown training mode {self.training_mode}")
if last_batch or not self.use_ddp:
losses = compute_losses()
else:
with self.ddp_model.no_sync():
losses = compute_losses()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(
t, losses["loss"].detach()
)
loss = (losses["loss"] * weights).mean()
log_loss_dict(
self.diffusion, t, {k: v * weights for k, v in losses.items()}
)
self.mp_trainer.backward(loss)
def save(self):
import blobfile as bf
step = self.global_step
def save_checkpoint(rate, params):
state_dict = self.mp_trainer.master_params_to_state_dict(params)
if dist.get_rank() == 0:
logger.log(f"saving model {rate}...")
if not rate:
filename = f"model{step:06d}.pt"
else:
filename = f"ema_{rate}_{step:06d}.pt"
with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
th.save(state_dict, f)
for rate, params in zip(self.ema_rate, self.ema_params):
save_checkpoint(rate, params)
logger.log("saving optimizer state...")
if dist.get_rank() == 0:
with bf.BlobFile(
bf.join(get_blob_logdir(), f"opt{step:06d}.pt"),
"wb",
) as f:
th.save(self.opt.state_dict(), f)
if dist.get_rank() == 0:
if self.target_model:
logger.log("saving target model state")
filename = f"target_model{step:06d}.pt"
with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
th.save(self.target_model.state_dict(), f)
if self.teacher_model and self.training_mode == "progdist":
logger.log("saving teacher model state")
filename = f"teacher_model{step:06d}.pt"
with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
th.save(self.teacher_model.state_dict(), f)
# Save model parameters last to prevent race conditions where a restart
# loads model at step N, but opt/ema state isn't saved for step N.
save_checkpoint(0, self.mp_trainer.master_params)
dist.barrier()
def log_step(self):
step = self.global_step
logger.logkv("step", step)
logger.logkv("samples", (step + 1) * self.global_batch)
def parse_resume_step_from_filename(filename):
"""
Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
checkpoint's number of steps.
"""
split = filename.split("model")
if len(split) < 2:
return 0
split1 = split[-1].split(".")[0]
try:
return int(split1)
except ValueError:
return 0
def get_blob_logdir():
# You can change this to be a separate path to save checkpoints to
# a blobstore or some external drive.
return logger.get_dir()
def find_resume_checkpoint():
# On your infrastructure, you may want to override this to automatically
# discover the latest checkpoint on your blob storage, etc.
return None
def find_ema_checkpoint(main_checkpoint, step, rate):
if main_checkpoint is None:
return None
filename = f"ema_{rate}_{(step):06d}.pt"
path = bf.join(bf.dirname(main_checkpoint), filename)
if bf.exists(path):
return path
return None
def log_loss_dict(diffusion, ts, losses):
for key, values in losses.items():
logger.logkv_mean(key, values.mean().item())
# Log the quantiles (four quartiles, in particular).
for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
quartile = int(4 * sub_t / diffusion.num_timesteps)
logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
|
"""
Based on: https://github.com/crowsonkb/k-diffusion
"""
import random
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from piq import LPIPS
from torchvision.transforms import RandomCrop
from . import dist_util
from .nn import mean_flat, append_dims, append_zero
from .random_util import get_generator
def get_weightings(weight_schedule, snrs, sigma_data):
if weight_schedule == "snr":
weightings = snrs
elif weight_schedule == "snr+1":
weightings = snrs + 1
elif weight_schedule == "karras":
weightings = snrs + 1.0 / sigma_data**2
elif weight_schedule == "truncated-snr":
weightings = th.clamp(snrs, min=1.0)
elif weight_schedule == "uniform":
weightings = th.ones_like(snrs)
else:
raise NotImplementedError()
return weightings
class KarrasDenoiser:
def __init__(
self,
sigma_data: float = 0.5,
sigma_max=80.0,
sigma_min=0.002,
rho=7.0,
weight_schedule="karras",
distillation=False,
loss_norm="lpips",
):
self.sigma_data = sigma_data
self.sigma_max = sigma_max
self.sigma_min = sigma_min
self.weight_schedule = weight_schedule
self.distillation = distillation
self.loss_norm = loss_norm
if loss_norm == "lpips":
self.lpips_loss = LPIPS(replace_pooling=True, reduction="none")
self.rho = rho
self.num_timesteps = 40
def get_snr(self, sigmas):
return sigmas**-2
def get_sigmas(self, sigmas):
return sigmas
def get_scalings(self, sigma):
c_skip = self.sigma_data**2 / (sigma**2 + self.sigma_data**2)
c_out = sigma * self.sigma_data / (sigma**2 + self.sigma_data**2) ** 0.5
c_in = 1 / (sigma**2 + self.sigma_data**2) ** 0.5
return c_skip, c_out, c_in
def get_scalings_for_boundary_condition(self, sigma):
c_skip = self.sigma_data**2 / (
(sigma - self.sigma_min) ** 2 + self.sigma_data**2
)
c_out = (
(sigma - self.sigma_min)
* self.sigma_data
/ (sigma**2 + self.sigma_data**2) ** 0.5
)
c_in = 1 / (sigma**2 + self.sigma_data**2) ** 0.5
return c_skip, c_out, c_in
def training_losses(self, model, x_start, sigmas, model_kwargs=None, noise=None):
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
terms = {}
dims = x_start.ndim
x_t = x_start + noise * append_dims(sigmas, dims)
model_output, denoised = self.denoise(model, x_t, sigmas, **model_kwargs)
snrs = self.get_snr(sigmas)
weights = append_dims(
get_weightings(self.weight_schedule, snrs, self.sigma_data), dims
)
terms["xs_mse"] = mean_flat((denoised - x_start) ** 2)
terms["mse"] = mean_flat(weights * (denoised - x_start) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
return terms
def consistency_losses(
self,
model,
x_start,
num_scales,
model_kwargs=None,
target_model=None,
teacher_model=None,
teacher_diffusion=None,
noise=None,
):
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
dims = x_start.ndim
def denoise_fn(x, t):
return self.denoise(model, x, t, **model_kwargs)[1]
if target_model:
@th.no_grad()
def target_denoise_fn(x, t):
return self.denoise(target_model, x, t, **model_kwargs)[1]
else:
raise NotImplementedError("Must have a target model")
if teacher_model:
@th.no_grad()
def teacher_denoise_fn(x, t):
return teacher_diffusion.denoise(teacher_model, x, t, **model_kwargs)[1]
@th.no_grad()
def heun_solver(samples, t, next_t, x0):
x = samples
if teacher_model is None:
denoiser = x0
else:
denoiser = teacher_denoise_fn(x, t)
d = (x - denoiser) / append_dims(t, dims)
samples = x + d * append_dims(next_t - t, dims)
if teacher_model is None:
denoiser = x0
else:
denoiser = teacher_denoise_fn(samples, next_t)
next_d = (samples - denoiser) / append_dims(next_t, dims)
samples = x + (d + next_d) * append_dims((next_t - t) / 2, dims)
return samples
@th.no_grad()
def euler_solver(samples, t, next_t, x0):
x = samples
if teacher_model is None:
denoiser = x0
else:
denoiser = teacher_denoise_fn(x, t)
d = (x - denoiser) / append_dims(t, dims)
samples = x + d * append_dims(next_t - t, dims)
return samples
indices = th.randint(
0, num_scales - 1, (x_start.shape[0],), device=x_start.device
)
t = self.sigma_max ** (1 / self.rho) + indices / (num_scales - 1) * (
self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho)
)
t = t**self.rho
t2 = self.sigma_max ** (1 / self.rho) + (indices + 1) / (num_scales - 1) * (
self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho)
)
t2 = t2**self.rho
x_t = x_start + noise * append_dims(t, dims)
dropout_state = th.get_rng_state()
distiller = denoise_fn(x_t, t)
if teacher_model is None:
x_t2 = euler_solver(x_t, t, t2, x_start).detach()
else:
x_t2 = heun_solver(x_t, t, t2, x_start).detach()
th.set_rng_state(dropout_state)
distiller_target = target_denoise_fn(x_t2, t2)
distiller_target = distiller_target.detach()
snrs = self.get_snr(t)
weights = get_weightings(self.weight_schedule, snrs, self.sigma_data)
if self.loss_norm == "l1":
diffs = th.abs(distiller - distiller_target)
loss = mean_flat(diffs) * weights
elif self.loss_norm == "l2":
diffs = (distiller - distiller_target) ** 2
loss = mean_flat(diffs) * weights
elif self.loss_norm == "l2-32":
distiller = F.interpolate(distiller, size=32, mode="bilinear")
distiller_target = F.interpolate(
distiller_target,
size=32,
mode="bilinear",
)
diffs = (distiller - distiller_target) ** 2
loss = mean_flat(diffs) * weights
elif self.loss_norm == "lpips":
if x_start.shape[-1] < 256:
distiller = F.interpolate(distiller, size=224, mode="bilinear")
distiller_target = F.interpolate(
distiller_target, size=224, mode="bilinear"
)
loss = (
self.lpips_loss(
(distiller + 1) / 2.0,
(distiller_target + 1) / 2.0,
)
* weights
)
else:
raise ValueError(f"Unknown loss norm {self.loss_norm}")
terms = {}
terms["loss"] = loss
return terms
def progdist_losses(
self,
model,
x_start,
num_scales,
model_kwargs=None,
teacher_model=None,
teacher_diffusion=None,
noise=None,
):
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
dims = x_start.ndim
def denoise_fn(x, t):
return self.denoise(model, x, t, **model_kwargs)[1]
@th.no_grad()
def teacher_denoise_fn(x, t):
return teacher_diffusion.denoise(teacher_model, x, t, **model_kwargs)[1]
@th.no_grad()
def euler_solver(samples, t, next_t):
x = samples
denoiser = teacher_denoise_fn(x, t)
d = (x - denoiser) / append_dims(t, dims)
samples = x + d * append_dims(next_t - t, dims)
return samples
@th.no_grad()
def euler_to_denoiser(x_t, t, x_next_t, next_t):
denoiser = x_t - append_dims(t, dims) * (x_next_t - x_t) / append_dims(
next_t - t, dims
)
return denoiser
indices = th.randint(0, num_scales, (x_start.shape[0],), device=x_start.device)
t = self.sigma_max ** (1 / self.rho) + indices / num_scales * (
self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho)
)
t = t**self.rho
t2 = self.sigma_max ** (1 / self.rho) + (indices + 0.5) / num_scales * (
self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho)
)
t2 = t2**self.rho
t3 = self.sigma_max ** (1 / self.rho) + (indices + 1) / num_scales * (
self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho)
)
t3 = t3**self.rho
x_t = x_start + noise * append_dims(t, dims)
denoised_x = denoise_fn(x_t, t)
x_t2 = euler_solver(x_t, t, t2).detach()
x_t3 = euler_solver(x_t2, t2, t3).detach()
target_x = euler_to_denoiser(x_t, t, x_t3, t3).detach()
snrs = self.get_snr(t)
weights = get_weightings(self.weight_schedule, snrs, self.sigma_data)
if self.loss_norm == "l1":
diffs = th.abs(denoised_x - target_x)
loss = mean_flat(diffs) * weights
elif self.loss_norm == "l2":
diffs = (denoised_x - target_x) ** 2
loss = mean_flat(diffs) * weights
elif self.loss_norm == "lpips":
if x_start.shape[-1] < 256:
denoised_x = F.interpolate(denoised_x, size=224, mode="bilinear")
target_x = F.interpolate(target_x, size=224, mode="bilinear")
loss = (
self.lpips_loss(
(denoised_x + 1) / 2.0,
(target_x + 1) / 2.0,
)
* weights
)
else:
raise ValueError(f"Unknown loss norm {self.loss_norm}")
terms = {}
terms["loss"] = loss
return terms
def denoise(self, model, x_t, sigmas, **model_kwargs):
import torch.distributed as dist
if not self.distillation:
c_skip, c_out, c_in = [
append_dims(x, x_t.ndim) for x in self.get_scalings(sigmas)
]
else:
c_skip, c_out, c_in = [
append_dims(x, x_t.ndim)
for x in self.get_scalings_for_boundary_condition(sigmas)
]
rescaled_t = 1000 * 0.25 * th.log(sigmas + 1e-44)
model_output = model(c_in * x_t, rescaled_t, **model_kwargs)
denoised = c_out * model_output + c_skip * x_t
return model_output, denoised
def karras_sample(
diffusion,
model,
shape,
steps,
clip_denoised=True,
progress=False,
callback=None,
model_kwargs=None,
device=None,
sigma_min=0.002,
sigma_max=80, # higher for highres?
rho=7.0,
sampler="heun",
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
generator=None,
ts=None,
):
if generator is None:
generator = get_generator("dummy")
if sampler == "progdist":
sigmas = get_sigmas_karras(steps + 1, sigma_min, sigma_max, rho, device=device)
else:
sigmas = get_sigmas_karras(steps, sigma_min, sigma_max, rho, device=device)
x_T = generator.randn(*shape, device=device) * sigma_max
sample_fn = {
"heun": sample_heun,
"dpm": sample_dpm,
"ancestral": sample_euler_ancestral,
"onestep": sample_onestep,
"progdist": sample_progdist,
"euler": sample_euler,
"multistep": stochastic_iterative_sampler,
}[sampler]
if sampler in ["heun", "dpm"]:
sampler_args = dict(
s_churn=s_churn, s_tmin=s_tmin, s_tmax=s_tmax, s_noise=s_noise
)
elif sampler == "multistep":
sampler_args = dict(
ts=ts, t_min=sigma_min, t_max=sigma_max, rho=diffusion.rho, steps=steps
)
else:
sampler_args = {}
def denoiser(x_t, sigma):
_, denoised = diffusion.denoise(model, x_t, sigma, **model_kwargs)
if clip_denoised:
denoised = denoised.clamp(-1, 1)
return denoised
x_0 = sample_fn(
denoiser,
x_T,
sigmas,
generator,
progress=progress,
callback=callback,
**sampler_args,
)
return x_0.clamp(-1, 1)
def get_sigmas_karras(n, sigma_min, sigma_max, rho=7.0, device="cpu"):
"""Constructs the noise schedule of Karras et al. (2022)."""
ramp = th.linspace(0, 1, n)
min_inv_rho = sigma_min ** (1 / rho)
max_inv_rho = sigma_max ** (1 / rho)
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return append_zero(sigmas).to(device)
def to_d(x, sigma, denoised):
"""Converts a denoiser output to a Karras ODE derivative."""
return (x - denoised) / append_dims(sigma, x.ndim)
def get_ancestral_step(sigma_from, sigma_to):
"""Calculates the noise level (sigma_down) to step down to and the amount
of noise to add (sigma_up) when doing an ancestral sampling step."""
sigma_up = (
sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2
) ** 0.5
sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
return sigma_down, sigma_up
@th.no_grad()
def sample_euler_ancestral(model, x, sigmas, generator, progress=False, callback=None):
"""Ancestral sampling with Euler method steps."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
denoised = model(x, sigmas[i] * s_in)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigmas[i],
"sigma_hat": sigmas[i],
"denoised": denoised,
}
)
d = to_d(x, sigmas[i], denoised)
# Euler method
dt = sigma_down - sigmas[i]
x = x + d * dt
x = x + generator.randn_like(x) * sigma_up
return x
@th.no_grad()
def sample_midpoint_ancestral(model, x, ts, generator, progress=False, callback=None):
"""Ancestral sampling with midpoint method steps."""
s_in = x.new_ones([x.shape[0]])
step_size = 1 / len(ts)
if progress:
from tqdm.auto import tqdm
ts = tqdm(ts)
for tn in ts:
dn = model(x, tn * s_in)
dn_2 = model(x + (step_size / 2) * dn, (tn + step_size / 2) * s_in)
x = x + step_size * dn_2
if callback is not None:
callback({"x": x, "tn": tn, "dn": dn, "dn_2": dn_2})
return x
@th.no_grad()
def sample_heun(
denoiser,
x,
sigmas,
generator,
progress=False,
callback=None,
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
):
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
gamma = (
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1)
if s_tmin <= sigmas[i] <= s_tmax
else 0.0
)
eps = generator.randn_like(x) * s_noise
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
denoised = denoiser(x, sigma_hat * s_in)
d = to_d(x, sigma_hat, denoised)
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigmas[i],
"sigma_hat": sigma_hat,
"denoised": denoised,
}
)
dt = sigmas[i + 1] - sigma_hat
if sigmas[i + 1] == 0:
# Euler method
x = x + d * dt
else:
# Heun's method
x_2 = x + d * dt
denoised_2 = denoiser(x_2, sigmas[i + 1] * s_in)
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
d_prime = (d + d_2) / 2
x = x + d_prime * dt
return x
@th.no_grad()
def sample_euler(
denoiser,
x,
sigmas,
generator,
progress=False,
callback=None,
):
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
sigma = sigmas[i]
denoised = denoiser(x, sigma * s_in)
d = to_d(x, sigma, denoised)
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigmas[i],
"denoised": denoised,
}
)
dt = sigmas[i + 1] - sigma
x = x + d * dt
return x
@th.no_grad()
def sample_dpm(
denoiser,
x,
sigmas,
generator,
progress=False,
callback=None,
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
):
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
gamma = (
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1)
if s_tmin <= sigmas[i] <= s_tmax
else 0.0
)
eps = generator.randn_like(x) * s_noise
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
denoised = denoiser(x, sigma_hat * s_in)
d = to_d(x, sigma_hat, denoised)
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigmas[i],
"sigma_hat": sigma_hat,
"denoised": denoised,
}
)
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
sigma_mid = ((sigma_hat ** (1 / 3) + sigmas[i + 1] ** (1 / 3)) / 2) ** 3
dt_1 = sigma_mid - sigma_hat
dt_2 = sigmas[i + 1] - sigma_hat
x_2 = x + d * dt_1
denoised_2 = denoiser(x_2, sigma_mid * s_in)
d_2 = to_d(x_2, sigma_mid, denoised_2)
x = x + d_2 * dt_2
return x
@th.no_grad()
def sample_onestep(
distiller,
x,
sigmas,
generator=None,
progress=False,
callback=None,
):
"""Single-step generation from a distilled model."""
s_in = x.new_ones([x.shape[0]])
return distiller(x, sigmas[0] * s_in)
@th.no_grad()
def stochastic_iterative_sampler(
distiller,
x,
sigmas,
generator,
ts,
progress=False,
callback=None,
t_min=0.002,
t_max=80.0,
rho=7.0,
steps=40,
):
t_max_rho = t_max ** (1 / rho)
t_min_rho = t_min ** (1 / rho)
s_in = x.new_ones([x.shape[0]])
for i in range(len(ts) - 1):
t = (t_max_rho + ts[i] / (steps - 1) * (t_min_rho - t_max_rho)) ** rho
x0 = distiller(x, t * s_in)
next_t = (t_max_rho + ts[i + 1] / (steps - 1) * (t_min_rho - t_max_rho)) ** rho
next_t = np.clip(next_t, t_min, t_max)
x = x0 + generator.randn_like(x) * np.sqrt(next_t**2 - t_min**2)
return x
@th.no_grad()
def sample_progdist(
denoiser,
x,
sigmas,
generator=None,
progress=False,
callback=None,
):
s_in = x.new_ones([x.shape[0]])
sigmas = sigmas[:-1] # skip the zero sigma
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
sigma = sigmas[i]
denoised = denoiser(x, sigma * s_in)
d = to_d(x, sigma, denoised)
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigma,
"denoised": denoised,
}
)
dt = sigmas[i + 1] - sigma
x = x + d * dt
return x
@th.no_grad()
def iterative_colorization(
distiller,
images,
x,
ts,
t_min=0.002,
t_max=80.0,
rho=7.0,
steps=40,
generator=None,
):
def obtain_orthogonal_matrix():
vector = np.asarray([0.2989, 0.5870, 0.1140])
vector = vector / np.linalg.norm(vector)
matrix = np.eye(3)
matrix[:, 0] = vector
matrix = np.linalg.qr(matrix)[0]
if np.sum(matrix[:, 0]) < 0:
matrix = -matrix
return matrix
Q = th.from_numpy(obtain_orthogonal_matrix()).to(dist_util.dev()).to(th.float32)
mask = th.zeros(*x.shape[1:], device=dist_util.dev())
mask[0, ...] = 1.0
def replacement(x0, x1):
x0 = th.einsum("bchw,cd->bdhw", x0, Q)
x1 = th.einsum("bchw,cd->bdhw", x1, Q)
x_mix = x0 * mask + x1 * (1.0 - mask)
x_mix = th.einsum("bdhw,cd->bchw", x_mix, Q)
return x_mix
t_max_rho = t_max ** (1 / rho)
t_min_rho = t_min ** (1 / rho)
s_in = x.new_ones([x.shape[0]])
images = replacement(images, th.zeros_like(images))
for i in range(len(ts) - 1):
t = (t_max_rho + ts[i] / (steps - 1) * (t_min_rho - t_max_rho)) ** rho
x0 = distiller(x, t * s_in)
x0 = th.clamp(x0, -1.0, 1.0)
x0 = replacement(images, x0)
next_t = (t_max_rho + ts[i + 1] / (steps - 1) * (t_min_rho - t_max_rho)) ** rho
next_t = np.clip(next_t, t_min, t_max)
x = x0 + generator.randn_like(x) * np.sqrt(next_t**2 - t_min**2)
return x, images
@th.no_grad()
def iterative_inpainting(
distiller,
images,
x,
ts,
t_min=0.002,
t_max=80.0,
rho=7.0,
steps=40,
generator=None,
):
from PIL import Image, ImageDraw, ImageFont
image_size = x.shape[-1]
# create a blank image with a white background
img = Image.new("RGB", (image_size, image_size), color="white")
# get a drawing context for the image
draw = ImageDraw.Draw(img)
# load a font
font = ImageFont.truetype("arial.ttf", 250)
# draw the letter "C" in black
draw.text((50, 0), "S", font=font, fill=(0, 0, 0))
# convert the image to a numpy array
img_np = np.array(img)
img_np = img_np.transpose(2, 0, 1)
img_th = th.from_numpy(img_np).to(dist_util.dev())
mask = th.zeros(*x.shape, device=dist_util.dev())
mask = mask.reshape(-1, 7, 3, image_size, image_size)
mask[::2, :, img_th > 0.5] = 1.0
mask[1::2, :, img_th < 0.5] = 1.0
mask = mask.reshape(-1, 3, image_size, image_size)
def replacement(x0, x1):
x_mix = x0 * mask + x1 * (1 - mask)
return x_mix
t_max_rho = t_max ** (1 / rho)
t_min_rho = t_min ** (1 / rho)
s_in = x.new_ones([x.shape[0]])
images = replacement(images, -th.ones_like(images))
for i in range(len(ts) - 1):
t = (t_max_rho + ts[i] / (steps - 1) * (t_min_rho - t_max_rho)) ** rho
x0 = distiller(x, t * s_in)
x0 = th.clamp(x0, -1.0, 1.0)
x0 = replacement(images, x0)
next_t = (t_max_rho + ts[i + 1] / (steps - 1) * (t_min_rho - t_max_rho)) ** rho
next_t = np.clip(next_t, t_min, t_max)
x = x0 + generator.randn_like(x) * np.sqrt(next_t**2 - t_min**2)
return x, images
@th.no_grad()
def iterative_superres(
distiller,
images,
x,
ts,
t_min=0.002,
t_max=80.0,
rho=7.0,
steps=40,
generator=None,
):
patch_size = 8
def obtain_orthogonal_matrix():
vector = np.asarray([1] * patch_size**2)
vector = vector / np.linalg.norm(vector)
matrix = np.eye(patch_size**2)
matrix[:, 0] = vector
matrix = np.linalg.qr(matrix)[0]
if np.sum(matrix[:, 0]) < 0:
matrix = -matrix
return matrix
Q = th.from_numpy(obtain_orthogonal_matrix()).to(dist_util.dev()).to(th.float32)
image_size = x.shape[-1]
def replacement(x0, x1):
x0_flatten = (
x0.reshape(-1, 3, image_size, image_size)
.reshape(
-1,
3,
image_size // patch_size,
patch_size,
image_size // patch_size,
patch_size,
)
.permute(0, 1, 2, 4, 3, 5)
.reshape(-1, 3, image_size**2 // patch_size**2, patch_size**2)
)
x1_flatten = (
x1.reshape(-1, 3, image_size, image_size)
.reshape(
-1,
3,
image_size // patch_size,
patch_size,
image_size // patch_size,
patch_size,
)
.permute(0, 1, 2, 4, 3, 5)
.reshape(-1, 3, image_size**2 // patch_size**2, patch_size**2)
)
x0 = th.einsum("bcnd,de->bcne", x0_flatten, Q)
x1 = th.einsum("bcnd,de->bcne", x1_flatten, Q)
x_mix = x0.new_zeros(x0.shape)
x_mix[..., 0] = x0[..., 0]
x_mix[..., 1:] = x1[..., 1:]
x_mix = th.einsum("bcne,de->bcnd", x_mix, Q)
x_mix = (
x_mix.reshape(
-1,
3,
image_size // patch_size,
image_size // patch_size,
patch_size,
patch_size,
)
.permute(0, 1, 2, 4, 3, 5)
.reshape(-1, 3, image_size, image_size)
)
return x_mix
def average_image_patches(x):
x_flatten = (
x.reshape(-1, 3, image_size, image_size)
.reshape(
-1,
3,
image_size // patch_size,
patch_size,
image_size // patch_size,
patch_size,
)
.permute(0, 1, 2, 4, 3, 5)
.reshape(-1, 3, image_size**2 // patch_size**2, patch_size**2)
)
x_flatten[..., :] = x_flatten.mean(dim=-1, keepdim=True)
return (
x_flatten.reshape(
-1,
3,
image_size // patch_size,
image_size // patch_size,
patch_size,
patch_size,
)
.permute(0, 1, 2, 4, 3, 5)
.reshape(-1, 3, image_size, image_size)
)
t_max_rho = t_max ** (1 / rho)
t_min_rho = t_min ** (1 / rho)
s_in = x.new_ones([x.shape[0]])
images = average_image_patches(images)
for i in range(len(ts) - 1):
t = (t_max_rho + ts[i] / (steps - 1) * (t_min_rho - t_max_rho)) ** rho
x0 = distiller(x, t * s_in)
x0 = th.clamp(x0, -1.0, 1.0)
x0 = replacement(images, x0)
next_t = (t_max_rho + ts[i + 1] / (steps - 1) * (t_min_rho - t_max_rho)) ** rho
next_t = np.clip(next_t, t_min, t_max)
x = x0 + generator.randn_like(x) * np.sqrt(next_t**2 - t_min**2)
return x, images
|
"""
Helpers for various likelihood-based losses. These are ported from the original
Ho et al. diffusion models codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
"""
import numpy as np
import torch as th
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for th.exp().
logvar1, logvar2 = [
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ th.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = th.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = cdf_plus - cdf_min
log_probs = th.where(
x < -0.999,
log_cdf_plus,
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
)
assert log_probs.shape == x.shape
return log_probs
|
"""
Helpers to train with 16-bit precision.
"""
import numpy as np
import torch as th
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from . import logger
INITIAL_LOG_LOSS_SCALE = 20.0
def convert_module_to_f16(l):
"""
Convert primitive modules to float16.
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
def convert_module_to_f32(l):
"""
Convert primitive modules to float32, undoing convert_module_to_f16().
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.float()
if l.bias is not None:
l.bias.data = l.bias.data.float()
def make_master_params(param_groups_and_shapes):
"""
Copy model parameters into a (differently-shaped) list of full-precision
parameters.
"""
master_params = []
for param_group, shape in param_groups_and_shapes:
master_param = nn.Parameter(
_flatten_dense_tensors(
[param.detach().float() for (_, param) in param_group]
).view(shape)
)
master_param.requires_grad = True
master_params.append(master_param)
return master_params
def model_grads_to_master_grads(param_groups_and_shapes, master_params):
"""
Copy the gradients from the model parameters into the master parameters
from make_master_params().
"""
for master_param, (param_group, shape) in zip(
master_params, param_groups_and_shapes
):
master_param.grad = _flatten_dense_tensors(
[param_grad_or_zeros(param) for (_, param) in param_group]
).view(shape)
def master_params_to_model_params(param_groups_and_shapes, master_params):
"""
Copy the master parameter data back into the model parameters.
"""
# Without copying to a list, if a generator is passed, this will
# silently not copy any parameters.
for master_param, (param_group, _) in zip(master_params, param_groups_and_shapes):
for (_, param), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
param.detach().copy_(unflat_master_param)
def unflatten_master_params(param_group, master_param):
return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
def get_param_groups_and_shapes(named_model_params):
named_model_params = list(named_model_params)
scalar_vector_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim <= 1],
(-1),
)
matrix_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim > 1],
(1, -1),
)
return [scalar_vector_named_params, matrix_named_params]
def master_params_to_state_dict(
model, param_groups_and_shapes, master_params, use_fp16
):
if use_fp16:
state_dict = model.state_dict()
for master_param, (param_group, _) in zip(
master_params, param_groups_and_shapes
):
for (name, _), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
assert name in state_dict
state_dict[name] = unflat_master_param
else:
state_dict = model.state_dict()
for i, (name, _value) in enumerate(model.named_parameters()):
assert name in state_dict
state_dict[name] = master_params[i]
return state_dict
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [
(name, state_dict[name]) for name, _ in model.named_parameters()
]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for name, _ in model.named_parameters()]
return master_params
def zero_master_grads(master_params):
for param in master_params:
param.grad = None
def zero_grad(model_params):
for param in model_params:
# Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
def param_grad_or_zeros(param):
if param.grad is not None:
return param.grad.data.detach()
else:
return th.zeros_like(param)
class MixedPrecisionTrainer:
def __init__(
self,
*,
model,
use_fp16=False,
fp16_scale_growth=1e-3,
initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE,
):
self.model = model
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.param_groups_and_shapes = None
self.lg_loss_scale = initial_lg_loss_scale
if self.use_fp16:
self.param_groups_and_shapes = get_param_groups_and_shapes(
self.model.named_parameters()
)
self.master_params = make_master_params(self.param_groups_and_shapes)
self.model.convert_to_fp16()
def zero_grad(self):
zero_grad(self.model_params)
def backward(self, loss: th.Tensor):
if self.use_fp16:
loss_scale = 2**self.lg_loss_scale
(loss * loss_scale).backward()
else:
loss.backward()
def optimize(self, opt: th.optim.Optimizer):
if self.use_fp16:
return self._optimize_fp16(opt)
else:
return self._optimize_normal(opt)
def _optimize_fp16(self, opt: th.optim.Optimizer):
logger.logkv_mean("lg_loss_scale", self.lg_loss_scale)
model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
grad_norm, param_norm = self._compute_norms(grad_scale=2**self.lg_loss_scale)
if check_overflow(grad_norm):
self.lg_loss_scale -= 1
logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
zero_master_grads(self.master_params)
return False
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
for p in self.master_params:
p.grad.mul_(1.0 / (2**self.lg_loss_scale))
opt.step()
zero_master_grads(self.master_params)
master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
return True
def _optimize_normal(self, opt: th.optim.Optimizer):
grad_norm, param_norm = self._compute_norms()
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
opt.step()
return True
def _compute_norms(self, grad_scale=1.0):
grad_norm = 0.0
param_norm = 0.0
for p in self.master_params:
with th.no_grad():
param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2
if p.grad is not None:
grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2
return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm)
def master_params_to_state_dict(self, master_params):
return master_params_to_state_dict(
self.model, self.param_groups_and_shapes, master_params, self.use_fp16
)
def state_dict_to_master_params(self, state_dict):
return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
def check_overflow(value):
return (value == float("inf")) or (value == -float("inf")) or (value != value)
|
"""
Helpers for distributed training.
"""
import io
import os
import socket
import blobfile as bf
from mpi4py import MPI
import torch as th
import torch.distributed as dist
# Change this to reflect your cluster layout.
# The GPU for a given rank is (rank % GPUS_PER_NODE).
GPUS_PER_NODE = 8
SETUP_RETRY_COUNT = 3
def setup_dist():
"""
Setup a distributed process group.
"""
if dist.is_initialized():
return
os.environ["CUDA_VISIBLE_DEVICES"] = f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}"
comm = MPI.COMM_WORLD
backend = "gloo" if not th.cuda.is_available() else "nccl"
if backend == "gloo":
hostname = "localhost"
else:
hostname = socket.gethostbyname(socket.getfqdn())
os.environ["MASTER_ADDR"] = comm.bcast(hostname, root=0)
os.environ["RANK"] = str(comm.rank)
os.environ["WORLD_SIZE"] = str(comm.size)
port = comm.bcast(_find_free_port(), root=0)
os.environ["MASTER_PORT"] = str(port)
dist.init_process_group(backend=backend, init_method="env://")
def dev():
"""
Get the device to use for torch.distributed.
"""
if th.cuda.is_available():
return th.device("cuda")
return th.device("cpu")
def load_state_dict(path, **kwargs):
"""
Load a PyTorch file without redundant fetches across MPI ranks.
"""
chunk_size = 2**30 # MPI has a relatively small size limit
if MPI.COMM_WORLD.Get_rank() == 0:
with bf.BlobFile(path, "rb") as f:
data = f.read()
num_chunks = len(data) // chunk_size
if len(data) % chunk_size:
num_chunks += 1
MPI.COMM_WORLD.bcast(num_chunks)
for i in range(0, len(data), chunk_size):
MPI.COMM_WORLD.bcast(data[i : i + chunk_size])
else:
num_chunks = MPI.COMM_WORLD.bcast(None)
data = bytes()
for _ in range(num_chunks):
data += MPI.COMM_WORLD.bcast(None)
return th.load(io.BytesIO(data), **kwargs)
def sync_params(params):
"""
Synchronize a sequence of Tensors across ranks from rank 0.
"""
for p in params:
with th.no_grad():
dist.broadcast(p, 0)
def _find_free_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
finally:
s.close()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import time
import os.path
import subprocess
import shutil
# helpful for kernel development
debug = 0
gen_kernels = [
[ "xgemm_blocksparse_32x32x32_xprop", "fprop", "A32", "B32", "C32" ],
[ "xgemm_blocksparse_32x32x32_xprop", "fprop", "A10", "B10", "C10" ],
[ "xgemm_blocksparse_32x32x32_xprop", "fprop", "A10", "B32", "C10" ],
[ "xgemm_blocksparse_32x32x32_xprop", "fprop", "A7", "B7", "C7" ],
[ "xgemm_blocksparse_32x32x32_xprop", "bprop", "A32", "B32", "C32" ],
[ "xgemm_blocksparse_32x32x32_xprop", "bprop", "A10", "B10", "C10" ],
[ "xgemm_blocksparse_32x32x32_xprop", "bprop", "A32", "B10", "C32" ],
[ "xgemm_blocksparse_32x32x32_xprop", "bprop", "A7", "B7", "C7" ],
[ "xgemm_blocksparse_32x32x32_xprop", "bprop", "A32", "B7", "C32" ],
[ "xgemm_blocksparse_32x32x8_updat", "updat", "A32", "B32", "C32" ],
[ "xgemm_blocksparse_32x32x8_updat", "updat", "A10", "B10", "C10" ],
[ "xgemm_blocksparse_32x32x8_updat", "updat", "A10", "B32", "C10" ],
[ "xgemm_blocksparse_32x32x8_updat", "updat", "A10", "B32", "C32" ],
[ "xgemm_blocksparse_32x32x8_updat", "updat", "A7", "B7", "C7" ],
[ "xgemm_blocksparse_32x32x8_updat", "updat", "A7", "B32", "C7" ],
[ "xgemm_blocksparse_32x32x8_updat", "updat", "A7", "B32", "C32" ],
[ "xconv_blocksparse_32x32x16_fprop", "fprop", "F32", "I32", "O32" ],
[ "xconv_blocksparse_32x32x16_fprop", "fprop", "F16", "I16", "O16" ],
[ "xconv_blocksparse_32x32x16_fprop", "fprop", "F16", "I32", "O32" ],
[ "xconv_blocksparse_32x32x16_fprop", "fprop", "F32", "I32", "O32", "overlapK" ],
[ "xconv_blocksparse_32x32x16_fprop", "fprop", "F16", "I16", "O16", "overlapK" ],
[ "xconv_blocksparse_32x32x16_fprop", "fprop", "F16", "I32", "O32", "overlapK" ],
[ "xconv_blocksparse_32x32x16_bprop", "bprop", "F32", "I32", "O32" ],
[ "xconv_blocksparse_32x32x16_bprop", "bprop", "F16", "I16", "O16" ],
[ "xconv_blocksparse_32x32x16_bprop", "bprop", "F16", "I32", "O32" ],
[ "xconv_blocksparse_32x32x16_bprop", "bprop", "F32", "I32", "O32", "overlapC" ],
[ "xconv_blocksparse_32x32x16_bprop", "bprop", "F16", "I16", "O16", "overlapC" ],
[ "xconv_blocksparse_32x32x16_bprop", "bprop", "F16", "I32", "O32", "overlapC" ],
[ "xconv_blocksparse_32x32x32_updat", "updat", "E32", "I32", "O32" ],
[ "xconv_blocksparse_32x32x32_updat", "updat", "E16", "I16", "O16" ],
[ "xconv_blocksparse_32x32x32_updat", "updat", "E32", "I16", "O16" ],
[ "xconv_blocksparse_32x32x32_updat", "updat", "E16", "I32", "O16" ],
[ "xconv_blocksparse_32x32x32_updat", "updat", "E32", "I16", "O32" ],
[ "xconv_blocksparse_32x32x32_updat", "updat", "E16", "I32", "O32" ],
]
kernel_specs = dict(
xgemm_blocksparse_32x32x32_xprop=dict(basename="gemm_blocksparse_32x32x32", params="xprop_matmul", threads=128, share="(32*33)*4 + 4"),
xgemm_blocksparse_32x32x32_updat=dict(basename="gemm_blocksparse_32x32x32", params="updat_matmul", threads=128, share="(32*32)*4 + 64"),
xgemm_blocksparse_32x32x8_updat =dict(basename="gemm_blocksparse_32x32x8", params="updat_matmul", threads= 32, share="(32* 8)*4 + 64"),
xconv_blocksparse_32x32x32_fprop=dict(basename="conv_blocksparse_32x32x32", params="xprop_conv", threads=128, share="(33+32)*32*2" ),
xconv_blocksparse_32x32x16_fprop=dict(basename="conv_blocksparse_32x32x16", params="xprop_conv", threads= 64, share="(17+16)*32*2" ),
xconv_blocksparse_32x32x16_bprop=dict(basename="conv_blocksparse_32x32x16", params="xprop_conv", threads= 64, share="(16+16)*32*2 + 64" ),
xconv_blocksparse_32x32x32_updat=dict(basename="conv_blocksparse_32x32x32", params="updat_conv", threads=128, share="32*33*4 + 4" ),
)
_params = {
"xprop_matmul": [
"unsigned* param_Layout",
"float* param_C",
"float* param_A",
"float* param_B",
"float param_alpha",
"float param_beta",
"unsigned param_cda",
"unsigned param_cdc",
"unsigned param_m",
],
"updat_matmul": [
"plist8 param_A",
"plist8 param_B",
"unsigned* param_Layout",
"float* param_C",
"float param_alpha",
"float param_beta",
"unsigned param_cda",
"unsigned param_cdb",
"unsigned param_k",
"unsigned param_count",
],
"xprop_conv": [
"unsigned* param_Block",
"unsigned* param_LutMPQ",
"unsigned* param_LutCK",
"float* param_O",
"float* param_F",
"float* param_I",
"float param_alpha",
"unsigned param_TRS",
"unsigned param_magic_TRS",
"unsigned param_shift_TRS",
"unsigned param_CDHW",
"unsigned param_KMPQ",
],
"updat_conv": [
"unsigned* param_Block",
"unsigned* param_LutMPQ",
"unsigned* param_LutCK",
"float* param_O",
"float* param_E",
"float* param_I",
"float param_alpha",
"unsigned param_TRS",
"unsigned param_magic_TRS",
"unsigned param_shift_TRS",
"unsigned param_CDHW",
"unsigned param_KMPQ",
"unsigned param_N",
"unsigned param_sizeF",
],
}
def _get_cache_dir(subdir=None):
cache_dir = os.path.expanduser("~/.cache/blocksparse")
if subdir:
subdir = subdir if isinstance(subdir, list) else [subdir]
cache_dir = os.path.join(cache_dir, *subdir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return cache_dir
base_dir = os.path.dirname(__file__)
maxas_dir = os.path.join(base_dir, "vendor", "maxas")
sass_dir = os.path.join(base_dir, "src", "sass")
_space_re = re.compile(r"\s+")
_share_template = r"""
.shared .align 4 .b32 share[{0}];
"""
_kernel_template = r"""
.version {6}
.target {0}
.address_size 64
// args: {5}
.visible .entry {1}(
{2}
)
{{
{4}
ret;
}}
"""
#.reqntid {3}
def get_ptx_file(kernel_spec, args_spec, kernel_name, arch, ptx_ver):
ptx_dir = _get_cache_dir([arch, 'ptx'])
thread_spec = kernel_spec["threads"]
param_spec = _params[kernel_spec["params"]]
kernel_params = []
for p in param_spec:
ptype, pname = _space_re.split(p)
if ptype == "plist8":
kernel_params.append(" .param .align 8 .b64 %s[8]" % pname)
else:
if ptype[-1] == '*':
ptype = '.u64'
elif ptype == 'float':
ptype = '.f32'
else:
ptype = '.u32'
kernel_params.append(" .param %s %s" % (ptype, pname))
kernel_params = ",\n".join(kernel_params)
if "share" in kernel_spec:
share = _share_template.format(eval(kernel_spec["share"]))
else:
share = ""
kernel_text = _kernel_template.format(arch, kernel_name, kernel_params, thread_spec, share, args_spec, ptx_ver)
kernel_ptx = os.path.join(ptx_dir, kernel_name + ".ptx")
current_text = ""
if os.path.exists(kernel_ptx):
f = open(kernel_ptx, "r")
current_text = f.read()
f.close()
# only write out the kernel if text has changed.
if kernel_text != current_text:
f = open(kernel_ptx, "w")
f.write(kernel_text)
f.close()
return kernel_ptx
include_re = re.compile(r'^<INCLUDE\s+file="([^"]+)"\s*/>')
def extract_includes(name, includes=None):
if not includes:
includes = list()
sass_file = os.path.join(sass_dir, name)
includes.append((sass_file, os.path.getmtime(sass_file)))
for line in open(sass_file, "r"):
match = include_re.search(line)
if match:
extract_includes(match.group(1), includes)
return includes
def run_command(cmdlist):
cmd = " ".join(cmdlist)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode:
raise RuntimeError("Error(%d):\n%s\n%s" % (proc.returncode, cmd, err))
#if debug:
print(cmd)
if out: print(out)
if err: print(err)
def get_kernel(kernel):
major, minor = 5, 0
arch = "sm_%d%d" % (major, minor)
libprefix = "PERL5LIB=%s" % maxas_dir
maxas_i = [libprefix, os.path.join(maxas_dir, "maxas.pl") + " -i -w"]
maxas_p = [libprefix, os.path.join(maxas_dir, "maxas.pl") + " -p"]
sass_name = kernel[0]
kernel_spec = kernel_specs[sass_name]
kernel_name = kernel_spec["basename"]
args_spec = str(kernel[1:])
for opt in kernel[1:]:
maxas_i.append("-D%s 1" % opt)
maxas_p.append("-D%s 1" % opt)
kernel_name += "_" + opt
maxas_i.insert(2, "-k " + kernel_name)
sass_name += ".sass"
cubin_name = kernel_name + ".cubin"
cubin_dir = _get_cache_dir([arch, 'cubin'])
ptx_version = "4.2" if major < 6 else "5.0"
ptx_file = get_ptx_file(kernel_spec, args_spec, kernel_name, arch, ptx_version)
sass_file = os.path.join(sass_dir, sass_name)
cubin_file = os.path.join(cubin_dir, cubin_name)
if not os.path.exists(sass_file):
raise RuntimeError("Missing: %s for kernel: %s" % (sass_name, kernel_name))
ptx_mtime = os.path.getmtime(ptx_file)
cubin_mtime = os.path.getmtime(cubin_file) if os.path.exists(cubin_file) else 0
build_cubin = False
if ptx_mtime > cubin_mtime:
build_cubin = True
includes = extract_includes(sass_name)
for include, include_mtime in includes:
if include_mtime > cubin_mtime:
build_cubin = True
break
if build_cubin:
# build the cubin and run maxas in the same command
# we don't want the chance of a generated cubin not processed by maxas (in case user hits ^C in between these steps)
run_command([ "ptxas -v -arch", arch, "-o", cubin_file, ptx_file, ";" ] + maxas_i + [sass_file, cubin_file])
cubin_mtime = time.time()
# output preprocessed and disassembled versions in debug mode
if debug:
pre_dir = _get_cache_dir([arch, 'pre'])
dump_dir = _get_cache_dir([arch, 'dump'])
pre_file = os.path.join(pre_dir, kernel_name + "_pre.sass")
dump_file = os.path.join(dump_dir, kernel_name + "_dump.sass")
pre_mtime = os.path.getmtime(pre_file) if os.path.exists(pre_file) else 0
dump_mtime = os.path.getmtime(dump_file) if os.path.exists(dump_file) else 0
for include, include_mtime in includes:
if include_mtime > pre_mtime:
run_command(maxas_p + [sass_file, pre_file])
break
# if cubin_mtime > dump_mtime:
# run_command(["nvdisasm -c", cubin_file, ">", dump_file])
return kernel_name, cubin_file
def main():
header_file = os.path.join(base_dir, "build", "blocksparse_kernels.h")
with open(header_file, "w") as output_file:
kernel_map = "\n\nstd::map<std::string, std::pair<const uint8_t*, size_t>> kernel_map_ = {"
for kernel in gen_kernels:
kernel_name, cubin_file = get_kernel(kernel)
kernel_text = "\n\nconst uint8_t %s[] = {" % kernel_name
with open(cubin_file, 'rb') as input_file:
count = 0
byte = input_file.read(1)
use_hex = 'hex' in dir(byte)
while byte:
if count % 32 == 0:
kernel_text += "\n "
count += 1
if use_hex:
kernel_text += "0x" + byte.hex() + ","
else:
kernel_text += "0x" + byte.encode("hex") + ","
byte = input_file.read(1)
kernel_text += "\n};"
kernel_map += "\n { \"%s\", { %s, %d } }," % (kernel_name, kernel_name, count)
output_file.write(kernel_text)
kernel_map += "\n};"
output_file.write(kernel_map)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import setuptools
setuptools.setup(
name='blocksparse',
version='1.13.1',
description='Tensorflow ops for blocksparse matmul, transformer, convolution and related operations.',
author='OpenAI',
maintainer='Scott Gray',
maintainer_email='[email protected]',
install_requires=[
'numpy',
'scipy',
# We don't depend on `tensorflow` or `tensorflow-gpu` here, since one or the other is sufficient.
],
packages=['blocksparse'],
package_data={ 'blocksparse': ['blocksparse_ops.so'] },
url='https://github.com/openai/blocksparse',
license='MIT')
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
import blocksparse.ewops as ew
import blocksparse.norms as norms
import blocksparse.lstm as lstm
from time import time
shapes = [
[ 128, 1024*1 ],
[ 128, 1024*2 ],
]
layernorm = False
class LSTMGatesTest(tf.test.TestCase):
def testLSTMGates(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
for shape1 in shapes:
shape4 = [shape1[0], shape1[1]*4]
for dtype in (tf.float32, tf.float16): #tf.float16, tf.bfloat16
np.random.seed(int(time()))
cpuC = np.random.uniform(-1.0, 1.0, shape1 ).astype(np.float32)
cpuH = np.random.uniform(-1.0, 1.0, shape4 ).astype(np.float32)
cpuE = np.random.uniform(-1.0, 1.0, shape1 ).astype(np.float32)
cpuB = np.random.uniform(-1.0, 1.0, shape4[1:]).astype(np.float32)
cpuG = np.random.uniform(-1.0, 1.0, shape4[1:]).astype(np.float32)
results = []
for device in ("gpu", "cpu"):
with tf.device("/%s:0" % device), tf.name_scope(device):
c = tf.placeholder(tf.float32, cpuC.shape, name="c")
h = tf.placeholder(tf.float32, cpuH.shape, name="h")
e = tf.placeholder(tf.float32, cpuE.shape, name="e")
b = tf.placeholder(tf.float32, cpuB.shape, name="b")
g = tf.placeholder(tf.float32, cpuB.shape, name="g")
feed_dict = {
c : cpuC,
h : cpuH,
e : cpuE,
b : cpuB,
g : cpuG,
}
if device == "gpu" and dtype is not tf.float32:
cf = ew.float_cast(c, dtype=dtype)
hf = ew.float_cast(h, dtype=dtype)
else:
cf, hf = c, h
if layernorm:
hf = norms.layer_norm(hf, g, b, axis=1, segments=4)
bias = None
else:
bias = b
cf, hf = lstm.fused_lstm_gates(cf, hf, bias=bias, forget_bias=1.0)
if device == "gpu" and dtype is not tf.float32:
cf = ew.float_cast(cf, dtype=tf.float32, dx_dtype=dtype)
hf = ew.float_cast(hf, dtype=tf.float32, dx_dtype=dtype)
if layernorm:
dc, dh, dg, db = tf.gradients([cf, hf], [c, h, g, b], [None, e])
results.append( sess.run( [ cf, hf, dc, dh, dg, db ], feed_dict ) )
labels = [" c", " h", "dc", "dh", "dg", "db"]
else:
dc, dh, db = tf.gradients([cf, hf], [c, h, b], [None, e])
results.append( sess.run( [ cf, hf, dc, dh, db ], feed_dict ) )
labels = [" c", " h", "dc", "dh", "db"]
for op, dev, cpu in zip(labels, results[0], results[1]):
dif = np.abs(cpu - dev)
avgval = np.average(abs(cpu))
maxdif = dif.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(dif).sum()) / np.sqrt(np.square(cpu).sum())
print("%s, shape:%12s, op:%s, err:%17.12f, l2_err:%17.12f" % (dtype.name, str(cpu.shape), op, maxdif, l2_err))
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from time import time
import numpy as np
import tensorflow as tf
import blocksparse as bs
ones = 0
out = 0
bench = 0
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
# define inner block structure for masked softmax
def mask_callback(blk_shape, head_idx, qry_idx, key_idx, blk_idx):
# default to enabled
mask = np.ones(blk_shape, dtype=np.bool)
# on the diagonal blocks mask out the upper diagonal
if qry_idx == key_idx:
for q, k in np.ndindex(blk_shape):
if k > q:
mask[q,k] = 0
# if head == 0:
# print(mask.astype(np.uint8))
return mask
class BlocksparseTransformerTest(tf.test.TestCase):
def testBlocksparseTransformerDense(self):
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
batch = 2
heads = 2
state = 64*2
scale = 1.0 / np.sqrt(state/heads)
for bsize in (8, 16, 32, 64):
ctxQ = 16
ctxK = 16
layout = np.ones([heads, ctxQ, ctxK], dtype=np.bool)
bst = bs.BlocksparseTransformer(layout, block_size=bsize)
shapeQ = (batch, ctxQ*bsize, heads*state)
shapeK = (batch, ctxK*bsize, heads*state)
if ones:
cpuQ = np.ones(shapeQ, dtype=np.float32)
cpuK = np.ones(shapeK, dtype=np.float32)
cpuV = np.ones(shapeK, dtype=np.float32)
cpuE = np.ones(shapeQ, dtype=np.float32)
else:
cpuQ = np.random.uniform(-1.0, 1.0, shapeQ).astype(np.float16).astype(np.float32)
cpuK = np.random.uniform(-1.0, 1.0, shapeK).astype(np.float16).astype(np.float32)
cpuV = np.random.uniform(-1.0, 1.0, shapeK).astype(np.float16).astype(np.float32)
cpuE = np.random.uniform(-1.0, 1.0, shapeQ).astype(np.float16).astype(np.float32)
q = tf.placeholder(tf.float32, shapeQ)
k = tf.placeholder(tf.float32, shapeK)
v = tf.placeholder(tf.float32, shapeK)
e = tf.placeholder(tf.float32, shapeQ)
feed_dict = { q: cpuQ, k: cpuK, v: cpuV, e: cpuE }
qf = bs.float_cast(q, dtype=tf.float16)
kf = bs.float_cast(k, dtype=tf.float16)
vf = bs.float_cast(v, dtype=tf.float16)
w = bst.query_key_op(qf, kf, bench=bench)
w = bst.softmax(w, scale=scale)
y = bst.weight_value_op(w, vf, bench=bench)
qf = bs.transpose_0213(tf.reshape(qf, [batch, ctxQ*bsize, heads, state]))
kf = bs.transpose_0213(tf.reshape(kf, [batch, ctxK*bsize, heads, state]))
vf = bs.transpose_0213(tf.reshape(vf, [batch, ctxK*bsize, heads, state]))
W = tf.matmul(qf, kf, transpose_b=True)
W = bs.softmax(W, scale=scale)
Y = tf.matmul(W, vf)
Y = tf.reshape(bs.transpose_0213(Y), [batch, ctxQ*bsize, heads*state])
y = bs.float_cast(y, dtype=tf.float32)
Y = bs.float_cast(Y, dtype=tf.float32)
y, (dq, dk, dv) = sess.run( [ y, tf.gradients(y, [q, k, v], e) ], feed_dict )
Y, (DQ, DK, DV) = sess.run( [ Y, tf.gradients(Y, [q, k, v], e) ], feed_dict )
print("testBlocksparseTransformerDense", bsize)
if not bench:
for op, dev, cpu in [
[ " Y", y, Y ],
[ "DV", dv, DV ],
[ "DK", dk, DK ],
[ "DQ", dq, DQ ],
]:
self.compare_results(op, dev, cpu)
def testBlocksparseTransformerSparse(self):
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
batch = 2
heads = 2
ctx = 16
state = 64*2
scale = 1.0 / np.sqrt(state/heads)
dtype = tf.float32
for bsize in ( 32, ): # 8, 16, 32, 64
layout = np.ones([heads, ctx, ctx], dtype=np.bool)
for q, k in np.ndindex(ctx, ctx):
if k > q:
layout[:,q,k] = 0
bst = bs.BlocksparseTransformer(layout, block_size=bsize, mask_callback=mask_callback)
shape = (batch, ctx*bsize, heads*state)
if ones:
cpuQ = np.ones(shape, dtype=np.float32)
cpuK = np.ones(shape, dtype=np.float32)
cpuV = np.ones(shape, dtype=np.float32)
cpuE = np.ones(shape, dtype=np.float32)
else:
cpuQ = np.random.uniform(-1.0, 1.0, shape).astype(np.float16).astype(np.float32)
cpuK = np.random.uniform(-1.0, 1.0, shape).astype(np.float16).astype(np.float32)
cpuV = np.random.uniform(-1.0, 1.0, shape).astype(np.float16).astype(np.float32)
cpuE = np.random.uniform(-1.0, 1.0, shape).astype(np.float16).astype(np.float32)
q = tf.placeholder(tf.float32, shape)
k = tf.placeholder(tf.float32, shape)
v = tf.placeholder(tf.float32, shape)
e = tf.placeholder(tf.float32, shape)
feed_dict = { q: cpuQ, k: cpuK, v: cpuV, e: cpuE }
qf = bs.float_cast(q, dtype=dtype)
kf = bs.float_cast(k, dtype=dtype)
vf = bs.float_cast(v, dtype=dtype)
w = bst.query_key_op(qf, kf)
a = bst.masked_softmax(w, scale=scale)
y = bst.weight_value_op(a, vf)
w = bs.float_cast(w, dtype=tf.float32)
a = bs.float_cast(a, dtype=tf.float32)
y = bs.float_cast(y, dtype=tf.float32)
dq, dk, dv = tf.gradients(y, [q, k, v], e)
w, a, y, dq, dk, dv = sess.run( [ w, a, y, dq, dk, dv ], feed_dict )
W = bst.nt_test(cpuQ, cpuK)
A = bst.masked_softmax_test(W, scale=scale)
Y = bst.nn_test(A, cpuV)
DV = bst.tn_test( A, cpuE)
DW = bst.nt_test(cpuE, cpuV)
DW = bst.masked_softmax_grad_test(DW, A, scale=scale)
DQ = bst.nn_test( DW, cpuK)
DK = bst.tn_test( DW, cpuQ)
print("testBlocksparseTransformerSparse", 32)
if not bench:
for op, dev, cpu in [
[ "W", w, W ],
[ "A", a, A ],
[ "Y", y, Y ],
[ "DV", dv, DV ],
[ "DK", dk, DK ],
[ "DQ", dq, DQ ],
]:
self.compare_results(op, dev, cpu)
def testBlocksparseTransformerMatmul(self):
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
for bsize in ( 32, ): # 8, 16, 32, 64
dtype_qk = tf.float32
dtype_w = tf.bfloat16
ones = 0
bench = 0
batch = 2
heads = 4
ctx = 16
state = 64*2
scale = 1.0 # / np.sqrt(state/heads)
ctxQ = ctx
ctxK = ctx # *2
layout = np.ones([1, ctxQ, ctxK], dtype=np.bool)
for q, k in np.ndindex(ctx, ctx):
if k > q:
layout[:,q,k] = 0
#layout[:,0,:] = 1
bst = bs.BlocksparseTransformer(layout, heads=heads, block_size=bsize, mask_callback=mask_callback)
q_shape = (batch, ctxQ*bsize, heads*state)
k_shape = (batch, ctxK*bsize, heads*state)
w_shape = (batch, heads, bst.blocks, bsize, bsize)
if ones:
cpuQ = np.ones(q_shape, dtype=np.float32)
cpuK = np.ones(k_shape, dtype=np.float32)
cpuW = np.ones(w_shape, dtype=np.float32)
# cpuQ[0,:,:] = np.eye(bsize, dtype=np.float32)
# cpuK[0,:,:] = np.eye(bsize, dtype=np.float32)
# cpuW[0,0,0,:,:] = np.eye(bsize, dtype=np.float32)
# cpuQ[0,0,0,:] = 1
# cpuK[0,0,0,:] = range(64)
# cpuW[0,0,0,0,:] = 1
else:
cpuQ = np.random.uniform(-1.0, 1.0, q_shape).astype(np.float16).astype(np.float32)
cpuK = np.random.uniform(-1.0, 1.0, k_shape).astype(np.float16).astype(np.float32)
cpuW = np.random.uniform(-1.0, 1.0, w_shape).astype(np.float16).astype(np.float32)
q = tf.placeholder(tf.float32, cpuQ.shape)
k = tf.placeholder(tf.float32, cpuK.shape)
w = tf.placeholder(tf.float32, cpuW.shape)
feed_dict = { q: cpuQ, k: cpuK, w: cpuW }
qf = bs.float_cast(q, dtype=dtype_qk)
kf = bs.float_cast(k, dtype=dtype_qk)
wf = bs.float_cast(w, dtype=dtype_w)
nt = bst.nt_op(qf, kf, bench=bench)
nn = bst.nn_op(wf, kf, bench=bench)
tn = bst.tn_op(wf, qf, bench=bench)
nt = bs.float_cast(nt, dtype=tf.float32)
nn = bs.float_cast(nn, dtype=tf.float32)
tn = bs.float_cast(tn, dtype=tf.float32)
print("testBlocksparseTransformerMatmul", bsize)
nt, nn, tn = sess.run( [ nt, nn, tn ], feed_dict ) # nt, nn, tn
if not bench:
NT = bst.nt_test(cpuQ, cpuK)
NN = bst.nn_test(cpuW, cpuK)
TN = bst.tn_test(cpuW, cpuQ)
for op, dev, cpu in [
[ "NT", nt, NT ],
[ "NN", nn, NN ],
[ "TN", tn, TN ],
]:
self.compare_results(op, dev, cpu)
def atestBlocksparseSoftmax(self):
batch = 1
heads = 1
key = 7
def checker_callback(blk_shape, head_idx, qry_idx, key_idx, blk_idx):
mask = np.ones(blk_shape, dtype=np.bool)
mask[::2,1::2] = False
mask[1::2,::2] = False
return mask
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
# for ctx in (16, 32, 64, 128, 256, 512, 1024, 2048, 4096): #16, 32, 64, 128, 256, 512, 1024, 2048, 4096
# for bsize in (8, 16, 32, 64,): # 8, 16, 32, 64,
# if bsize * (ctx+0) <= 32768:
for ctx in (16,): #16, 32, 64, 128, 256, 512, 1024, 2048, 4096
for bsize in (8, 16, 32, 64, ): # 8, 16, 32, 64,
if bsize * (ctx) <= 32768:
# define outer block structure for blocksparse matmul
layout = np.ones([heads, ctx, ctx], dtype=np.bool)
bst = bs.BlocksparseTransformer(layout, heads=heads, block_size=bsize, mask_callback=checker_callback) # checker_callback
shape = (batch, heads, bst.blocks, bsize, bsize)
print(shape)
if ones:
cpuX = np.ones(shape, dtype=np.float32)
cpuE = np.ones(shape, dtype=np.float32)
else:
cpuX = np.random.normal(0.0, 1.0, shape).astype(np.float16).astype(np.float32)
cpuE = np.random.normal(0.0, 1.0, shape).astype(np.float16).astype(np.float32)
# np.savetxt("cpuX.txt", cpuX.reshape((-1,bsize)), fmt='%5.2f')
# for i, a in enumerate(np.max(cpuX.reshape(-1,bsize), axis=1)):
# print("%2d %.2f" % (i, a))
# print()
x = tf.placeholder(tf.float32, cpuX.shape)
e = tf.placeholder(tf.float32, cpuE.shape)
feed_dict = { x: cpuX, e: cpuE }
xf = bs.float_cast(x, dtype=tf.bfloat16)
y = bst.masked_softmax(xf, scale=0.5, autoregress_at_key=key)
y = bs.float_cast(y, dtype=tf.float32)
dx, = tf.gradients(y, [ x ], e)
y, dx = sess.run( [ y, dx ], feed_dict )
Y = bst.masked_softmax_test(cpuX, scale=0.5, autoregress_at_key=key)
DX = bst.masked_softmax_grad_test(cpuE, Y, scale=0.5)
print("testBlocksparseSoftmax", ctx*bsize, bsize)
for op, dev, cpu in [
[ "Y", y, Y ],
[ "DX", dx, DX ],
]:
self.compare_results(op, dev, cpu)
def testSoftmaxCrossEntropy(self):
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
N = 3 # 80 * 16
for K in (10, 256, 512, 1024*8, 1024*16, 1024*32, 1024*64,): #10, 256, 512, 1024*8, 1024*16, 1024*32, 1024*64
np.random.seed(int(time()))
#cpuX = np.random.uniform(-20.0, 20.0, (N, K)).astype(np.float16).astype(np.float32) #65504
cpuX = np.random.normal(0.0, 1.0, (N, K)).astype(np.float16).astype(np.float32)
cpuE = np.random.normal(0.0, 1.0, (N, )).astype(np.float16).astype(np.float32)
cpuI = np.random.randint(0, K, size=(N, ), dtype=np.uint16)
x = tf.placeholder(tf.float32, cpuX.shape)
e = tf.placeholder(tf.float32, cpuE.shape)
i = tf.placeholder(tf.uint16, cpuI.shape)
feed_dict = { x: cpuX, i: cpuI, e: cpuE }
xf = bs.float_cast(x, dtype=tf.float16)
y = bs.softmax_cross_entropy(logits=xf, labels=i)
Y = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=x, labels=tf.cast(i, tf.int32))
y, (dx,) = sess.run( [ y, tf.gradients(y, [x], e) ], feed_dict )
Y, (DX,) = sess.run( [ Y, tf.gradients(Y, [x], e) ], feed_dict )
print("testSoftmaxCrossEntropy", K)
if not bench:
for op, dev, cpu in [
[ "Y", y, Y ],
[ "DX", dx, DX ],
]:
self.compare_results(op, dev, cpu)
def compare_results(self, op, dev, cpu):
dif = np.abs(cpu - dev)
avgval = np.average(abs(cpu))
maxdif = dif.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(dif).sum()) / np.sqrt(np.square(cpu).sum())
print("op:%3s, err:%17.12f, l2_err:%17.12f shape:%14s" % (op, maxdif, l2_err, str(cpu.shape)))
if out:
dim = cpu.shape[-1]
np.savetxt("%s_dif.txt" % op, dif.reshape((-1,dim)), fmt='%2.0f') #7.5 5.3
np.savetxt("%s_cpu.txt" % op, cpu.reshape((-1,dim)), fmt='%2.0f') #7.5 5.3
np.savetxt("%s_dev.txt" % op, dev.reshape((-1,dim)), fmt='%2.0f') #7.5 5.3
exit()
if __name__ == "__main__":
tf.test.main()
# a = np.zeros((32,32), dtype=np.bool)
# for y, x in np.ndindex(a.shape):
# if x <= y: a[y,x] = True
# b = np.packbits(a.reshape(-1,8)[:,::-1]).view(np.uint32)
# np.unpackbits(b.view(np.uint8))
# b = np.packbits(a.reshape(-1,8)[:,::-1])
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from blocksparse.norms import layer_norm, layer_norm_test, layer_norm_grad_test
import blocksparse.ewops as ew
np.set_printoptions(threshold=8193, linewidth=600, formatter={'int':lambda x: "%4d" % x,'float':lambda x: "%8.6f" % x})
dtypes = [
tf.float32,
tf.float16,
# tf.bfloat16,
]
shapes = [
(4, 32),
(4, 31),
(4, 33),
(64, 32),
(64, 31),
(64, 33),
(512, 1024*8 ),
(512, 1024*4 ),
(512, 1024*1 ),
(512, 112*8 ),
(512, 1024*8-4 ),
(512, 1024*8-1 ),
(512, 1024*8+1 ),
(128, 1024*32-0),
(128, 1024*32-4),
(128, 1024*32-1),
]
one = 0
out = 0
bench = 0
segments = 1
class LayerNormTest(tf.test.TestCase):
def testLayerNorm(self):
# multi-threading screws up benchmarking
conf = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=conf) as sess, tf.device("/gpu:0"):
for N, K in shapes:
for axis in (0,1):
shape_x = [1,1]
shape_b = [K,K]
shape_x[ axis] = K
shape_x[1-axis] = N
shape_b[1-axis] = 1
if one:
X = np.ones(shape_x, dtype=np.float32)
E = np.ones(shape_x, dtype=np.float32)
G = np.ones(shape_b, dtype=np.float32)
B = np.ones(shape_b, dtype=np.float32)
# for n in range(N):
# X[:,n] = np.arange(K)
else:
# X = np.random.uniform(-1.0, 1.0, shape_x).astype(np.float32)
# E = np.random.uniform(-1.0, 1.0, shape_x).astype(np.float32)
# G = np.random.uniform(-1.0, 1.0, shape_b).astype(np.float32)
# B = np.random.uniform(-1.0, 1.0, shape_b).astype(np.float32)
X = np.random.normal(loc=0.0, scale=1.0, size=shape_x).astype(np.float16).astype(np.float32)
E = np.random.normal(loc=0.0, scale=1.0, size=shape_x).astype(np.float16).astype(np.float32)
G = np.random.normal(loc=0.0, scale=1.0, size=shape_b).astype(np.float16).astype(np.float32)
B = np.random.normal(loc=0.0, scale=1.0, size=shape_b).astype(np.float16).astype(np.float32)
x = tf.placeholder(tf.float32, shape_x, name="x")
e = tf.placeholder(tf.float32, shape_x, name="e")
g = tf.placeholder(tf.float32, shape_b, name="g")
b = tf.placeholder(tf.float32, shape_b, name="b")
feed_dict = { x:X, e:E, g:G, b:B }
for dtype in dtypes:
# just test relu on floats (it's hard to match low precision relu with high precision behavior)
relu = False #dtype is tf.float32
print("K:%d N:%d Axis:%d Relu:%d dtype:%s" % (K, N, axis, relu, dtype.name))
Y = layer_norm_test(X, G, B, axis=axis, segments=segments, relu=relu)
DX, DG, DB = layer_norm_grad_test(E, X, G, B, axis=axis, segments=segments, relu=relu)
y = ew.float_cast(x, dtype=dtype)
y = layer_norm(y, g, b, axis=axis, segments=segments, relu=relu, atomics=False, bench=bench)
y = ew.float_cast(y, dtype=tf.float32, dx_dtype=dtype)
d = tf.gradients(y, [x, g, b], e)
#if bench: sess.run(y) #warmup
y, (dx, dg, db) = sess.run( [y, d], feed_dict=feed_dict )
#y, = sess.run( [y,] )
if bench == 0:
for op, cpuA, devA in (
(" y:", Y, y),
("dx:", DX, dx),
("dg:", DG, dg),
("db:", DB, db),):
difA = abs(cpuA - devA)
avgval = np.average(abs(cpuA))
maxdif = difA.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(difA).sum()) / np.sqrt(np.square(cpuA).sum())
#print("max_err: %5.3f, max_val: %7.3f, l1_err: %7.5f, l2_err: %7.5f" % (difO.max(), cpuO.max(), l1_err, l2_err))
print("%s max_err%%:%10.8f L2_err: %12.10f" % (op, 100*max_err, l2_err))
# rtol = 1e-4 if dtype is tf.float32 else 1e-1
# self.assertAllClose(devA, cpuA, rtol=rtol, atol=rtol)
if out:
np.savetxt("out.txt", difA.reshape((-1,N)), fmt='%7.3f')
np.savetxt("outC.txt", cpuA.reshape((-1,N)), fmt='%7.3f')
np.savetxt("outD.txt", devA.reshape((-1,N)), fmt='%7.3f')
exit()
print("")
if __name__ == "__main__":
tf.test.main()
# 4.87% 308.13us 10 30.812us 30.368us 31.295us void layer_norm_mean_CN<uint2, int=16>(float*, uint2 const *, int, int, float)
# 4.62% 292.79us 10 29.279us 28.448us 29.535us void layer_norm_var_CN<uint2, int=16>(float*, uint2 const *, float4 const *, int, int, float)
# 1.82% 114.94us 10 11.494us 10.816us 11.840us void layer_norm_CN<uint2, int=4>(uint2*, uint2 const *, float4 const *, float4 const *, float const *, float const *, int, int, float, int)
# 3.99% 252.35us 10 25.235us 24.672us 25.696us void layer_norm_dg_db_CN<float4, uint2>(float*, float*, float4 const *, uint2 const *, float const *, float const *, float4 const *, float4 const *, float, int, int, int)
# 6.36% 402.78us 10 40.278us 39.839us 41.120us void layer_norm_dx_sum_CN<float4, uint2, int=16>(float*, float*, float4 const *, uint2 const *, float const *, float const *, float4 const *, float4 const *, float, int, int, int)
# 4.29% 271.81us 10 27.180us 26.784us 27.424us void layer_norm_dx_CN<float4, uint2, int=4>(float4*, float4 const *, uint2 const *, float const *, float const *, float4 const *, float4 const *, float4 const *, float4 const *, float, int, int, float, int)
# 0.36% 22.559us 1 22.559us 22.559us 22.559us void layer_norm_NC<uint2, float4, int=256>(uint2*, float*, float*, uint2 const *, float4 const *, float4 const *, float, int, float, int)
# 0.42% 26.560us 1 26.560us 26.560us 26.560us void layer_norm_dg_db_NC<float, Eigen::half, int=0>(float*, float*, float const *, Eigen::half const *, float const *, float const *, float const *, float const *, float, int, int, int)
# 0.76% 47.871us 1 47.871us 47.871us 47.871us void layer_norm_dx_NC<float4, uint2, float4, int=256>(float4*, float4 const *, uint2 const *, float4 const *, float4 const *, float const *, float const *, float, int, float, int)
# 1.93% 140.58us 10 14.057us 13.632us 14.783us void layer_norm_mean_CN<float4, int=16, int=256>(float*, float4 const *, int, int, float)
# 1.58% 114.91us 10 11.491us 11.167us 12.384us void layer_norm_var_CN<float4, int=16, int=256>(float*, float4 const *, float4 const *, int, int, float)
# 1.67% 121.50us 10 12.150us 11.744us 12.608us void layer_norm_CN<float4, int=4>(float4*, float4 const *, float4 const *, float4 const *, float const *, float const *, int, int, float, int)
# 3.29% 239.10us 10 23.910us 23.295us 24.640us void layer_norm_dg_db_CN<float4, float4>(float*, float*, float4 const *, float4 const *, float const *, float const *, float4 const *, float4 const *, float, int, int, int)
# 3.20% 232.80us 10 23.279us 22.880us 23.648us void layer_norm_dx_CN<float4, float4, int=4>(float4*, float4 const *, float4 const *, float const *, float const *, float4 const *, float4 const *, float4 const *, float4 const *, float, int, int, float, int)
# 3.05% 222.08us 10 22.207us 21.888us 22.784us void layer_norm_dx_sum_CN<float4, float4, int=16, int=256>(float*, float*, float4 const *, float4 const *, float const *, float const *, float4 const *, float4 const *, float, int, int, int)
# 0.86% 62.400us 41 1.5210us 672ns 4.6080us [CUDA memset]
# 0.51% 37.088us 1 37.088us 37.088us 37.088us void layer_norm_NC<float4, float4, int=256>(float4*, float*, float*, float4 const *, float4 const *, float4 const *, float, int, float, int)
# 0.61% 44.639us 1 44.639us 44.639us 44.639us void layer_norm_dx_NC<float4, float4, float4, int=256>(float4*, float4 const *, float4 const *, float4 const *, float4 const *, float const *, float const *, float, int, float, int)
# 0.39% 28.576us 1 28.576us 28.576us 28.576us void layer_norm_dg_db_NC<float, float, int=0>(float*, float*, float const *, float const *, float const *, float const *, float const *, float const *, float, int, int, int)
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
import blocksparse.ewops as ew
from time import time
shapes = [
# [64, 16, 10, 10, 16, ],
# [64, 16, 10, 6, 32, ],
# [64, 16, 10, 15, 32, ],
# [64, 16, 5, 256, ],
# [64, 16, 6, 32, ],
# [64, 16, 15, 64, ],
# [64, 16, 51, 64, ],
# [64, 16,256, 64, ],
[ 128, 16, 10, 10, 32 ],
[ 128, 16, 10, 15, 64 ],
[ 128, 16, 10, 6, 64 ],
[ 128, 16, 15, 128, ],
[ 128, 16, 5, 512, ],
[ 128, 16, 6, 32, ],
[ 128, 16, 62, 128, ],
]
class BiasReluTest(tf.test.TestCase):
def testBiasRelu(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
for shapeX in shapes:
axis = len(shapeX)-2
shapeY = list(shapeX)
shapeY[axis] = 1
np.random.seed(int(time()))
cpuX = np.random.uniform(-2**14, 2**14, shapeX).astype(np.float16).astype(np.float32)
cpuE = np.random.uniform(-2**14, 2**14, shapeY).astype(np.float16).astype(np.float32)
for dtype in (tf.float16, ): #tf.float16, tf.float32
results = []
for device in ("gpu", "cpu"):
cast = device == "gpu" and dtype is not tf.float32
with tf.device("/%s:0" % device), tf.name_scope(device):
x = tf.placeholder(tf.float32, cpuX.shape, name="x")
e = tf.placeholder(tf.float32, cpuE.shape, name="e")
feed_dict = { x : cpuX, e : cpuE }
xf = ew.float_cast(x, dtype=dtype) if cast else x
y = ew.reduce_max(xf, axis=axis, keepdims=True)
if cast:
y = ew.float_cast(y, dtype=tf.float32)
dx, = tf.gradients(y, [x], e)
results.append( sess.run( [ y, dx ], feed_dict ) )
for op, dev, cpu in zip(["y", "dx"], results[0], results[1]):
dif = np.abs(cpu - dev)
sum_err = (dif > .01).sum()
pct_err = 100*sum_err / cpu.size
l2_err = np.sqrt(np.square(dif).sum()) / np.sqrt(np.square(cpu).sum())
print("%s, shape:%22s, op:%3s, sum_err: %4d, pct_err: %.4f, l2_err:%17.12f" % (dtype.name, str(cpu.shape), op, sum_err, pct_err, l2_err))
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from time import time
import sys
import networkx
import numpy as np
import tensorflow as tf
import blocksparse as bs
np.set_printoptions(threshold=8193, linewidth=600, formatter={'int':lambda x: "%4d" % x,'float':lambda x: "%8.6f" % x})
dtypes = [
#tf.float32,
tf.float16,
#tf.bfloat16,
]
one = 0
out = 0
bench = 0
depth = 1
l2norm = 0
# multi-threading screws up benchmarking
conf = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
class BlocksparseMatMulTest(tf.test.TestCase):
def atestSparseProj(self):
nhidden = 1024*8
nproj = 1024
N = 64
with self.test_session(config=conf) as sess, tf.device("/gpu:0"):
if one:
X = np.ones((nhidden,N), dtype=np.float32)
Y = np.ones(( nproj,N), dtype=np.float32)
EX = np.ones((nhidden,N), dtype=np.float32)
EY = np.ones(( nproj,N), dtype=np.float32)
else:
X = np.random.uniform(-1.0, 1.0, (nhidden,N)).astype(np.float32)
Y = np.random.uniform(-1.0, 1.0, ( nproj,N)).astype(np.float32)
EX = np.random.uniform(-1.0, 1.0, (nhidden,N)).astype(np.float32)
EY = np.random.uniform(-1.0, 1.0, ( nproj,N)).astype(np.float32)
x = tf.constant(X)
y = tf.constant(Y)
ex = tf.constant(EX)
ey = tf.constant(EY)
sproj = bs.SparseProj(nhidden, nproj)
lut = sproj.gather_lut
SLC = X[lut,:]
ADD = X.copy()
MUL = X.copy()
ADD[lut,:] += Y
MUL[lut,:] *= Y
SLC_DX = np.zeros(x.shape)
SLC_DX[lut,:] = EY
ADD_DX = EX
ADD_DY = EX[lut,:]
MUL_DX = EX.copy()
MUL_DX[lut,:] *= Y
MUL_DY = EX[lut,:] * X[lut,:]
slc_op = sproj.gather(x)
mul_op = sproj.scatter_mul(x, y)
add_op = sproj.scatter_add(x, y)
slc = sess.run( slc_op )
mul = sess.run( mul_op )
add = sess.run( add_op ) # this op overwrites x, run last
slc_dx, = sess.run( bs.gradients(slc_op, [x ], ey) )
add_dx, add_dy = sess.run( bs.gradients(add_op, [x,y], ex) )
mul_dx, mul_dy = sess.run( bs.gradients(mul_op, [x,y], ex) ) # this op overwrites ex, run last
for op, cpuA, devA in (
("slc:", SLC, slc),
("add:", ADD, add),
("mul:", MUL, mul),
("slc_dx:", SLC_DX, slc_dx),
("add_dx:", ADD_DX, add_dx),
("add_dy:", ADD_DY, add_dy),
("mul_dx:", MUL_DX, mul_dx),
("mul_dy:", MUL_DY, mul_dy),
):
difA = abs(cpuA - devA)
avgval = np.average(abs(cpuA))
maxdif = difA.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(difA).sum()) / np.sqrt(np.square(cpuA).sum())
print("%s max_err%%:%11.8f L2_err: %12.10f" % (op, 100*max_err, l2_err))
if out:
np.savetxt("out.txt", difA, fmt='%5.1f')
np.savetxt("outC.txt", cpuA, fmt='%5.1f')
np.savetxt("outD.txt", devA, fmt='%5.1f')
exit()
# def atestBlocksparseMatMulCPU(self):
# n, m = 64*8, 64
# #layout = networkx.generators.barabasi_albert_graph(n, m)
# layout = networkx.generators.random_graphs.watts_strogatz_graph(n, m*2, .2)
# layout = networkx.adjacency_matrix(layout).toarray().astype(np.int32) + np.eye(n, dtype=np.int32)
# layout[0:m,0:m] = 1
# blocks = layout.sum()
# print(100 * blocks / n**2)
# print(layout.sum(axis=0).max())
# with self.test_session(config=conf) as sess, tf.device("/cpu:0"):
# for bsize, axis in ( (32,0), (16,0), (8,0) ): # (32,0), (16,0), (8,0)
# layout = np.ones((4*1024//bsize,4*1024//bsize), dtype=np.int32)
# bsmm = bs.BlocksparseMatMul(layout, block_size=bsize, feature_axis=axis, name="test")
# if one:
# W = np.ones(bsmm.w_shape, dtype=np.float32)
# X = np.ones(bsmm.i_shape(1), dtype=np.float32)
# else:
# W = np.random.uniform(-1.0, 1.0, bsmm.w_shape ).astype(np.float32)
# X = np.random.uniform(-1.0, 1.0, bsmm.i_shape(1)).astype(np.float32)
# w = tf.constant(W)
# x = tf.constant(X)
# y = sess.run( bsmm(x, w, bench=bench) )
# #start = time()
# Y = bsmm.fprop_test(X, W)
# #print("np time:", round(time() - start, 2))
# difY = abs(Y - y)
# avgval = np.average(abs(Y))
# maxdif = difY.max()
# max_err = maxdif if avgval == 0 else maxdif / avgval
# l2_err = np.sqrt(np.square(difY).sum()) / np.sqrt(np.square(Y).sum())
# print("cpu max_err%%: %11.8f L2_err: %12.10f" % (100*max_err, l2_err))
def atestBlocksparseMatMulGated(self):
with self.test_session(config=conf) as sess, tf.device("/gpu:0"):
N = 128
K = 8*56*2*4
n = K//8
m = 30
dtype = tf.float32
repeat = 0
dw_gated = False
block_size = 8
layout = networkx.generators.barabasi_albert_graph(n, m)
layout = networkx.adjacency_matrix(layout).toarray().astype(np.int32) + np.eye(n, dtype=np.int32)
layout[0:m,0:m] = 1
blocks = layout.sum()
n = layout.shape[0]
print(100 * blocks / n**2)
print(layout.sum(axis=0).max())
# layout = np.ones((112,32), dtype=np.int32)
bsmm = bs.BlocksparseMatMul(layout, block_size=block_size, feature_axis=0, name="test")
if one:
X = np.ones(bsmm.i_shape(N), dtype=np.float32)
E = np.ones(bsmm.o_shape(N), dtype=np.float32)
W = np.ones(bsmm.w_shape , dtype=np.float32)
G = np.ones(bsmm.blocks , dtype=np.float32)
else:
X = np.random.uniform(-1.0, 1.0, bsmm.i_shape(N)).astype(np.float32)
E = np.random.uniform(-1.0, 1.0, bsmm.o_shape(N)).astype(np.float32)
W = np.random.uniform(-1.0, 1.0, bsmm.w_shape ).astype(np.float32)
G = np.random.uniform( 0.0, 1.0, bsmm.blocks ).astype(np.float32)
G = np.ones(bsmm.blocks, dtype=np.float32)
for w, (c, k) in enumerate(bsmm.updat_list):
G[w] = (c & 1) ^ (k & 1) ^ 1
#G[::2] = 0.0
# block = dict()
# for w, (c, k) in enumerate(bsmm.updat_list):
# block[(c,k)] = w
# grid = []
# for c in range(bsmm.CB):
# row = []
# for k in range(bsmm.KB):
# row.append(G[block[(c,k)]])
# grid.append(row)
# for row in grid:
# print(row)
# exit()
x = tf.constant(X)
e = tf.constant(E)
w = tf.constant(W)
g = tf.constant(G)
wf = bs.float_cast(w, dtype=dtype)
xf = bs.float_cast(x, dtype=dtype)
y = bsmm(xf, wf, gate=g, gate_grad=True, dw_gated=dw_gated, bench=repeat)
y = bs.float_cast(y, dtype=tf.float32)
d = bs.gradients(y, [x, w], e)
sess.run( tf.global_variables_initializer() )
y, (dx, dw) = sess.run( [y, d] )
# gpu kernel doesn't touch zero gate blocks
# for b in range(bsmm.blocks):
# if G[b] == 0.0:
# dw[b,:,:] = 0.0
Y = bsmm.fprop_test(X, W, gate=G)
DX = bsmm.bprop_test(E, W, gate=G)
DW = bsmm.updat_test(X, E, gate=G, dw_gated=dw_gated)
#print(Y.shape, dtype)
for op, cpuA, devA in (
(" y:", Y, y),
("dx:", DX, dx),
("dw:", DW, dw),):
difA = abs(cpuA - devA)
avgval = np.average(abs(cpuA))
maxdif = difA.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(difA).sum()) / np.sqrt(np.square(cpuA).sum() + 1e-12)
print("%s max_err%%:%11.8f L2_err: %12.10f" % (op, 100*max_err, l2_err))
if out:
dim = K if op == "dw:" else N
np.savetxt("out.txt", difA.reshape((-1,dim)), fmt='%5.1f')
np.savetxt("outC.txt", cpuA.reshape((-1,dim)), fmt='%5.1f')
np.savetxt("outD.txt", devA.reshape((-1,dim)), fmt='%5.1f')
exit()
def testBlocksparseMatMul(self):
# layout = np.zeros((2,2), dtype=np.int32)
# layout[0,0] = 1
n, m = 160, 5
layout = networkx.generators.barabasi_albert_graph(n, m)
#layout = networkx.generators.random_graphs.watts_strogatz_graph(n, m*2, .5)
layout = networkx.adjacency_matrix(layout).toarray().astype(np.int32) + np.eye(n, dtype=np.int32)
layout[0:m,0:m] = 1
#layout[0:60,0:60] = 1
#layout = np.zeros((4,4), dtype=np.int32)
#layout = np.ones((4,4), dtype=np.int32)
#layout[0,0] = 1
#layout = np.ones((1,1), dtype=np.int32)
blocks = layout.sum()
n = layout.shape[0]
print(100 * blocks / n**2)
print(layout.sum(axis=0).max(), layout.sum(axis=0).min())
#exit()
with self.test_session(config=conf) as sess, tf.device("/gpu:0"):
for bsize, axis in ( (32,0), (16,0), (8,0), ): # (32,1), (32,0), (16,0), (8,0)
bsmm = bs.BlocksparseMatMul(layout, block_size=bsize, feature_axis=axis, name="test")
if one:
W = np.ones(bsmm.w_shape, dtype=np.float32)
for w in range(bsmm.blocks):
#c, k = bsmm.block_coord(w)
#if c == k:
W[w] = np.eye(bsmm.bsize, dtype=np.float32)
# W = np.ones(bsmm.w_shape, dtype=np.float32)
# W[:] += np.arange(32, dtype=np.float32).reshape(1,1,32)
else:
# W = np.random.uniform(-1.0, 1.0, bsmm.w_shape).astype(np.float16).astype(np.float32)
W = np.random.normal(loc=0.0, scale=0.01, size=bsmm.w_shape).astype(np.float16).astype(np.float32)
# WW = np.zeros((bsmm.C, bsmm.K), dtype=np.float32)
# for w, (c, k) in enumerate(bsmm.updat_list):
# WW[c*bsize:(c+1)*bsize, k*bsize:(k+1)*bsize] = W[w,:,:]
w = tf.constant(W)
# s1 = sess.run( bsmm.identity_init(gpu=True)(bsmm.w_shape) )
# s2 = bsmm.identity_init(gpu=False)(bsmm.w_shape)
# print("identity_init: ", (s1 - s2).max())
# exit()
for N in (256,128,64,32,16,8,): # 128,64,32,16,1, 256,512,1024,2048,4096, 256,1024,4096,16384
if one:
X = np.ones(bsmm.i_shape(N), dtype=np.float32)
E = np.ones(bsmm.o_shape(N), dtype=np.float32)
# X = np.eye(bsmm.bsize, dtype=np.float32)
# E = np.arange(X.size, dtype=np.float32).reshape(X.shape)
# X[:] += np.arange(X.size, dtype=np.float32).reshape(X.shape)
# X[:] += np.arange(32, dtype=np.float32).reshape(32,1)
# E[:] += np.arange(16, dtype=np.float32).reshape(1,32)
# X[:] += np.arange(64, dtype=np.float32).reshape(1,64)
# E[:] += np.arange(64, dtype=np.float32).reshape(1,64)
else:
# X = np.random.uniform(0.0, 10.0, bsmm.i_shape(N)).astype(np.float16).astype(np.float32)
# E = np.random.uniform(0.0, 10.0, bsmm.o_shape(N)).astype(np.float16).astype(np.float32)
X = np.random.normal(loc=0.0, scale=0.1, size=bsmm.i_shape(N)).astype(np.float16).astype(np.float32)
E = np.random.normal(loc=0.0, scale=0.1, size=bsmm.o_shape(N)).astype(np.float16).astype(np.float32)
x = tf.constant(X)
e = tf.constant(E)
for dtype in dtypes:
print("Axis:%d Bsize:%2d N:%d dtype:%s Params:%d" % (axis, bsize, N, dtype.name, bsize*bsize*blocks))
# compute in tensorflow
if l2norm:
w2 = bsmm.l2_normalize(w, dtype=dtype)
else:
w2 = bs.float_cast(w, dtype=dtype)
y = bs.float_cast(x, dtype=dtype)
for j in range(depth):
repeat = bench if bench and j==depth-1 else 0
y = bsmm(y, w2, bench=repeat) # (bench and j==depth-1) (bench and j==0)
y = bs.float_cast(y, dtype=tf.float32)
#if bench: sess.run( y )
#y = sess.run( y )
with tf.control_dependencies([y.op]):
d = bs.gradients(y, [x, w], e)
if depth > 1:
d[1] = bs.group_param_grads(d[1], 8)
sess.run(tf.global_variables_initializer())
#y, = sess.run( [y] )
y, (dx, dw) = sess.run( [y, d ] )
if not bench:
# compute in numpy
if l2norm:
W2 = bsmm.l2_normalize_test(W)
else:
W2 = W
Ys = [X]
for j in range(depth):
Ys.append(bsmm.fprop_test(Ys[-1], W2))
Y = Ys.pop()
DW = np.zeros(bsmm.w_shape, dtype=np.float32)
DX = E
for j in range(depth):
DW += bsmm.updat_test(Ys.pop(), DX)
DX = bsmm.bprop_test(DX, W2)
if l2norm:
DW = bsmm.l2_normalize_grad_test(W, DW)
for op, cpuA, devA in (
(" y:", Y, y),
("dx:", DX, dx),
("dw:", DW, dw),
):
difA = abs(cpuA - devA)
avgval = np.average(abs(cpuA))
maxdif = difA.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(difA).sum()) / np.sqrt(np.square(cpuA).sum())
#print("max_err: %5.3f, max_val: %7.3f, l1_err: %7.5f, l2_err: %7.5f" % (difO.max(), cpuO.max(), l1_err, l2_err))
print("%s max_err%%:%11.8f L2_err: %12.10f" % (op, 100*max_err, l2_err))
# rtol = 1e-4 if dtF is tf.float32 else 1e-1
# self.assertAllClose(devA, cpuA, rtol=rtol, atol=rtol)
if out:
np.savetxt("out.txt", difA.reshape((-1,cpuA.shape[-1])), fmt='%4.0f')
np.savetxt("outC.txt", cpuA.reshape((-1,cpuA.shape[-1])), fmt='%4.0f')
np.savetxt("outD.txt", devA.reshape((-1,cpuA.shape[-1])), fmt='%4.0f')
exit()
print("")
if __name__ == "__main__":
#print(sys.argv)
tf.test.main() #argv=["blocksparse_matmul_test.py","BlocksparseMatMulTest"]
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from time import time
import numpy as np
import tensorflow as tf
import blocksparse.ewops as ew
import blocksparse.transformer as trans
from tensorflow.python.ops import gradient_checker
out = 0
bench = 0
shapes = [
( 2, 2, 1024, 1024),
( 4, 4, 768, 768),
( 4, 4, 544, 544),
( 4, 4, 512, 512),
( 8, 8, 256, 256),
(16, 16, 128, 128),
(32, 32, 64, 64),
(64, 64, 32, 32),
# (1, 2, 1024, 1024),
# (1, 2, 512, 512),
# (1, 2, 256, 256),
# (1, 2, 128, 128),
# (1, 2, 64, 64),
# (1, 2, 32, 32),
# (1, 2, 1024, 1024-1),
# (1, 2, 512, 512+1),
# (1, 2, 256, 256+1),
# (1, 2, 128, 128+1),
# (1, 2, 64, 64+1),
# (1, 2, 32, 32+1),
]
class TopKTest(tf.test.TestCase):
def testTopK(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
for shape in shapes:
topK = shape[-1] // 4 # 25% sparsity
np.random.seed(int(time()))
cpuX = np.random.uniform(-1.0, 1.0, shape).astype(np.float32)
cpuE = np.random.uniform(-1.0, 1.0, shape).astype(np.float32)
X = tf.placeholder(tf.float32, cpuX.shape)
E = tf.placeholder(tf.float32, cpuE.shape)
for mask_dims in (0, 2, 3):
if mask_dims == 0:
mask = M = m_shape = None
feed_dict = { X: cpuX, E: cpuE }
else:
m_shape = [1 for n in shape]
m_shape[-mask_dims:] = shape[-mask_dims:]
mask = np.zeros(m_shape, dtype=np.float32)
if mask_dims == 2:
for y, x in np.ndindex(mask.shape[-2:]):
if x <= y: mask[:,:,y,x] = 3.0
elif mask_dims == 3:
for z, y, x in np.ndindex(mask.shape[-3:]):
if x <= y: mask[:,z,y,x] = (z+1)*3.0
M = tf.placeholder(tf.float32, mask.shape)
feed_dict = { X: cpuX, E: cpuE, M: mask }
for dtype in (tf.float32, ): #tf.float16, tf.bfloat16
rtol = 1e-4 if dtype is tf.float32 else 1e-1
Y = ew.float_cast(X, dtype=dtype)
#Y = trans.masked_top_k_softmax(Y, topK, mask=M, scale=2.0)
Y = trans.masked_softmax(Y, mask=M, scale=2.0, bench=bench)
Y = ew.float_cast(Y, dtype=tf.float32, dx_dtype=dtype)
D = tf.gradients(Y, [X], E)
#devY, = sess.run( [Y], feed_dict)
devY, (devDX,) = sess.run( [Y, D], feed_dict)
#devY, (devDX,), tfY = sess.run( [Y, D, tf.nn.top_k(X, topK)], feed_dict)
# gradient_checker tests are insanely slow
# if True:
# x = tf.constant(cpuX)
# m = tf.constant(mask)
# y = trans.masked_top_k_softmax(x, topK, mask=m)
# error = gradient_checker.compute_gradient_error(x, shape, y, shape) #, extra_feed_dict={ x: cpuX, m: mask }
# assert error < 0.01, error
if bench == 0:
# cpuY = trans.masked_top_k_softmax_test(cpuX, topK, mask=mask, scale=2.0)
# cpuDX = trans.masked_softmax_grad_test(cpuE, cpuY, mask=mask, scale=2.0)
cpuY = trans.masked_softmax_test(cpuX, mask=mask, scale=2.0)
cpuDX = trans.masked_softmax_grad_test(cpuE, cpuY, mask=mask, scale=2.0)
difY = np.abs(cpuY - devY)
difDX = np.abs(cpuDX - devDX)
cntY = (difY > rtol).astype(np.int).sum() / difY.size
cntDX = (difDX > rtol).astype(np.int).sum() / difDX.size
print("%s, shape:%18s, mask:%18s, errY:%.5f, errDX:%.5f" % (dtype.name, str(shape), str(m_shape), cntY, cntDX))
if out:
np.savetxt( "cpuY.txt", cpuY.reshape(-1,shape[-1]), fmt="%6.3f")
np.savetxt( "devY.txt", devY.reshape(-1,shape[-1]), fmt="%6.3f")
np.savetxt("cpuDX.txt", cpuDX.reshape(-1,shape[-1]), fmt="%6.3f")
np.savetxt("devDX.txt", devDX.reshape(-1,shape[-1]), fmt="%6.3f")
np.savetxt("difDX.txt", difDX.reshape(-1,shape[-1]), fmt="%6.3f")
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
import blocksparse.ewops as ew
import math
#from tensorflow.python.ops import gradient_checker
ones = 0
out = 0
def gelu(x):
return 0.5 * x * (1.0 + tf.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * tf.pow(x, 3.0))))
def swish(x):
return x * tf.nn.sigmoid(x)
def fast_gelu(x):
return x * tf.nn.sigmoid(1.702 * x)
class EwOpsTest(tf.test.TestCase):
def testEwOps(self):
with self.test_session() as sess, tf.device("/gpu:0"):
for shape in ( (32,1024), ): # (31,31*4), (11,1023), (33,33),
for dtypeF, dtypeB in ((np.float16, np.float16), (np.float32, np.float32) ): #, (np.float32, np.float32), (np.float16, np.float16), (np.float16, np.float32),
dtypeF = np.dtype(dtypeF) # Forward
dtypeB = np.dtype(dtypeB) # Backwards
rtol = 1e-4 if dtypeF.type is np.float32 else 1e-1
with tf.name_scope("S%dx%dF%dB%d" % (shape[0], shape[1], dtypeF.itemsize, dtypeB.itemsize)):
if ones:
np_X = np.ones(shape, dtype=np.float32)
np_Y = np.ones(shape, dtype=np.float32)
np_E = np.ones(shape, dtype=np.float32)
np_B = np.ones((1,shape[1]), dtype=np.float32)
else:
# np_X = np.random.normal(0.0, 10.0, shape).astype(dtypeF).astype(np.float32)
# np_E = np.random.normal(0.0, 10.0, shape).astype(dtypeF).astype(np.float32)
# np_X.fill(10.0)
np_X = np.random.uniform(0.01, 1.0, shape).astype(dtypeF).astype(np.float32)
np_Y = np.random.uniform(0.01, 1.0, shape).astype(dtypeF).astype(np.float32)
np_E = np.random.uniform(0.01, 1.0, shape).astype(dtypeB).astype(np.float32)
np_B = np.random.uniform(0.01, 1.0, (1,shape[1])).astype(np.float32)
x = tf.constant(np_X.astype(dtypeF))
y = tf.constant(np_Y.astype(dtypeF))
e = tf.constant(np_E.astype(dtypeB))
b = tf.constant(np_B)
X = tf.constant(np_X)
Y = tf.constant(np_Y)
E = tf.constant(np_E)
B = tf.constant(np_B)
tests = list()
# xx = tf.ones(shape, dtype=tf.float32)
# ee = tf.ones(shape, dtype=tf.float32)
# ew_op1 = ew.dropout(xx, keep_prob=0.5, scale=2.0)
# ew_op2 = ew.dropout(xx, mask=ew_op1[1], scale=2.0)
# dx_op = tf.gradients(ew_op1[0], [xx], ee)
# (z1, m), z2, (dx,) = sess.run( [ew_op1, ew_op2, dx_op] )
# #print(dx[0,0:8])
# print(z1.sum()/z1.size, dx.sum()/dx.size, (z1 - z2).sum(), (z1 - dx).sum())
# z = sess.run( ew.sparse_relu(x) )
# Z = ew.sparse_relu_test(np_X)
# tests.append(("sps_relu: Z ", Z, z))
# Non-Broadcast Binary Ops
for name, tf_op, ew_op in (
(" add", tf.add, ew.add ),
(" mul", tf.multiply, ew.multiply ),
(" sub", tf.subtract, ew.subtract ),
(" div", tf.divide, ew.divide ),
(" max", tf.maximum, ew.maximum ),
(" min", tf.minimum, ew.minimum ),):
# I think tf doesn't use fmaxf/fminf and hence has different behaviour for equal numbers.
# In fp32 the chance for equality is very small, but not so in fp16
if name[-3:] in ("max","min") and dtypeF.type is np.float16:
continue
tf_op = tf_op(X,Y)
ew_op = ew_op(x,y)
Z, z = sess.run( [tf_op, ew_op] )
DX, DY = sess.run( tf.gradients(tf_op, [X,Y], E) )
dx, dy = sess.run( tf.gradients(ew_op, [x,y], e) )
tests.append((name+": Z ", Z, z))
tests.append((name+": DX", DX, dx))
tests.append((name+": DY", DY, dy))
for name, tf_op, ew_op in (
(" add_n", tf.add_n, ew.add_n8_op),):
tf_op2 = tf_op([X,Y])
ew_op2 = ew_op([x,y])
tf_op3 = tf_op([X,Y,E])
ew_op3 = ew_op([x,y,e])
Z2, z2 = sess.run( [tf_op2, ew_op2] )
Z3, z3 = sess.run( [tf_op3, ew_op3] )
tests.append((name+": Z2", Z2, z2))
tests.append((name+": Z3", Z3, z3))
# Unary Ops
for name, tf_op, ew_op in (
(" sig", tf.sigmoid, ew.sigmoid ),
(" tanh", tf.tanh, ew.tanh ),
(" neg", tf.negative, ew.negative, ),
(" rcp", tf.reciprocal, ew.reciprocal, ),
(" sqr", tf.square, ew.square, ),
(" sqrt", tf.sqrt, ew.sqrt, ),
(" exp", tf.exp, ew.exp, ),
(" log", tf.log, ew.log, ),
(" relu", tf.nn.relu, ew.relu, ),
(" elu", tf.nn.elu, ew.elu, ),
(" gelu", gelu, ew.gelu, ),
(" swish", swish, ew.swish, ),
("fast_gelu", fast_gelu, ew.fast_gelu, ),):
tf_op = tf_op(X)
ew_op = ew_op(x)
Z, z = sess.run( [tf_op, ew_op] )
DX, = sess.run( tf.gradients(tf_op, [X], E) )
dx, = sess.run( tf.gradients(ew_op, [x], e) )
tests.append((name+": Z ", Z, z))
tests.append((name+": DX", DX, dx))
# Broadcast Binary Ops
for name, tf_op, ew_op in (
("bias_add", tf.add, ew.add, ),
("bias_mul", tf.multiply, ew.multiply),):
tf_op = tf_op(X,B)
ew_op = ew_op(x,b)
Z, z = sess.run( [tf_op, ew_op] )
DX, DB = sess.run( tf.gradients(tf_op, [X,B], E) )
dx, db = sess.run( tf.gradients(ew_op, [x,b], e) )
tests.append((name+": Z ", Z, z))
tests.append((name+": DX", DX, dx))
tests.append((name+": DB", DB, db))
# Up Cast
ew_op = ew.float_cast(x, dtype=tf.float32, dx_dtype=dtypeB.type)
z = sess.run( ew_op )
dx, = sess.run( tf.gradients(ew_op, [x], e) )
tests.append((" upCast: Z ", np_X, z))
tests.append((" upCast: DX", np_E, dx))
#Down Cast
if dtypeF.type is np.float32:
Z = np_X.astype(np.float16)
DX = np_E.astype(np.float16)
e16 = tf.constant(DX)
ew_op = ew.float_cast(x, dtype=tf.float16)
z = sess.run( ew_op )
dx, = sess.run( tf.gradients(ew_op, [x], e16) )
tests.append(("downCast: Z ", Z, z ))
tests.append(("downCast: DX", DX, dx))
for op, tfT, ewT in (tests):
dif = tfT - ewT
avgval = abs(tfT).sum() / tfT.size
maxdif = abs(dif).max()
ratio = maxdif / avgval
print("dtypeF:f%d, dtypeB:f%d, shape:%s, op:%s err:%17.12f" % (dtypeF.itemsize, dtypeB.itemsize, str(shape), op, ratio))
# print(ewT[0,0,:,:])
# print(tfT[0,0,:,:])
# exit()
if out: # and ratio > 1.0:
np.savetxt("out.txt", dif, fmt='%5.2f')
np.savetxt("outC.txt", tfT, fmt='%5.2f')
np.savetxt("outD.txt", ewT, fmt='%5.2f')
exit()
#self.assertAllClose(cpuT, ewT, rtol=rtol, atol=rtol)
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
shapes = [
# [ 4, 4 ],
# [ 60, 60 ],
# [ 64, 64 ],
# [ 64, 256 ],
# [ 256, 64 ],
[ 4096, 4096*8 ],
]
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
class TransposeTest(tf.test.TestCase):
def testTranspose(self):
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
for shape in shapes:
cpuX = np.random.uniform(-1.0, 1.0, shape).astype(np.float16).astype(np.float32)
x = tf.placeholder(tf.float32, shape, name="x")
for dtype in (tf.float16, tf.float32): #tf.float16, tf.float32
xf = bs.float_cast(x, dtype=dtype)
y = bs.transpose_2d(xf)
y = bs.float_cast(y, dtype=tf.float32)
Y = tf.transpose(xf)
Y = bs.float_cast(Y, dtype=tf.float32)
y, Y = sess.run( [ y, Y ], feed_dict={ x : cpuX } )
dif = np.abs(Y - y)
avgval = np.average(abs(Y))
maxdif = dif.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(dif).sum()) / np.sqrt(np.square(Y).sum())
print("%s, shape:%16s, err:%17.12f, l2_err:%17.12f" % (dtype.name, str(shape), max_err, l2_err))
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from tensorflow.python.ops import gradient_checker
def ceil_div(x, y):
return -(-x // y)
shapes = [
# [ [32, 32], [ [32, 1] ] ],
[ [ 64,], [ None, ] ],
[ [1024,], [ None, ] ],
[ [1023,], [ None, ] ],
[ [1024, 128], [ [1024, 1], [1, 128], None ] ],
[ [1023, 127], [ [1023, 1], [1, 127], None ] ],
[ [64, 64, 64], [ [64, 64, 1], [64, 1, 64], [1,64,64], [1,64,1], [64,1,1], [1,1,64], [1,1,1], None ] ],
[ [63, 63, 63], [ [63, 63, 1], [63, 1, 63], [1,63,63], [1,63,1], [63,1,1], [1,1,63], [1,1,1], None ] ],
[ [16,16,16,16,16], [ [16,16,16,16,1], None ] ],
]
class DropoutTest(tf.test.TestCase):
def testDropout(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
bs.set_entropy()
sess.run(tf.global_variables_initializer())
# with tf.device("/gpu:0"):
# x = tf.ones([10000])*-10.0
# g = bs.concrete_gate(x)
# g = sess.run(g)
# print(g.sum()/g.size)
# error = gradient_checker.compute_gradient_error(x, x.shape, g, g.shape) #, extra_feed_dict={ x: cpuX, m: mask }
# print(error)
for dtype in (tf.float16, ): #tf.float16, tf.bfloat16
for x_shape, mask_shapes in shapes:
for mask_shape in mask_shapes:
m_shape = x_shape if mask_shape is None else mask_shape
cpuO = np.ones(x_shape, dtype=np.float32)
cpuX = np.random.uniform(-1.0, 1.0, x_shape).astype(np.float16).astype(np.float32)
cpuM = np.random.randint(0, 2, size=m_shape, dtype=np.bool)
mask = np.zeros(ceil_div(cpuM.size, 32)*32, dtype=np.bool)
mask[:cpuM.size] = cpuM.reshape(-1)
mask = np.packbits(mask.reshape(-1,8)[:,::-1]).view(np.int32)
cpuY = cpuX * cpuM.astype(np.float32) * 2.0
with tf.device("/gpu:0"):
x = tf.placeholder(tf.float32, cpuX.shape)
m = tf.placeholder(tf.int32, mask.shape)
xf = bs.float_cast(x, dtype=dtype)
y, _ = bs.dropout(xf, keep_prob=0.5, mask=m, mask_shape=mask_shape)
y = bs.float_cast(y, dtype=tf.float32)
devY, = sess.run( [y,], feed_dict={ x: cpuX, m: mask } )
xf = bs.float_cast(x, dtype=dtype)
y, _ = bs.dropout(xf, keep_prob=0.8, mask_shape=mask_shape)
y = bs.float_cast(y, dtype=tf.float32)
devO, = sess.run( [y,], feed_dict={ x: cpuO } )
diff = np.abs(devY - cpuY)
print("dype: %8s x_shape: %-20s m_shape: %-20s err: %4.2f norm_sum: %4.2f" % ( dtype.name, str(x_shape), str(mask_shape), diff.sum(), devO.sum()/devO.size ))
#np.savetxt( "diff.txt", diff, fmt="%4.2f")
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from blocksparse.optimize import adam_op
ones = 0
out = 0
beta1 = 0.8
beta2 = 0.5
learn_rate = 0.5
grad_scale = 1.0
clip_thresh = 1.0
clip_norm = 1.0
clip_sigma = 0.0
epsilon = 1e-8
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
class AdafactorTest(tf.test.TestCase):
def testAdafactor(self):
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
for dtype in (tf.float32, tf.float16): # tf.float16
for shape in (
(1,),
(3,),
(127),
(1,1024),
(1023,1024),
(1024,1024),
):
if ones:
G = np.ones( shape, dtype=np.float32)
P = np.ones( shape, dtype=np.float32)
M = np.zeros(shape, dtype=np.float32)
V = np.zeros(shape, dtype=np.float32)
else:
G = np.random.uniform(-1.0, 1.0, shape).astype(np.float16).astype(np.float32)
P = np.random.uniform(-1.0, 1.0, shape).astype(np.float16).astype(np.float32)
M = np.random.uniform( 0.0, 1.0, shape).astype(np.float16).astype(np.float32)
V = np.random.uniform( 0.0, 1.0, shape).astype(np.float16).astype(np.float32)
g = tf.placeholder(tf.float32, G.shape)
p = tf.Variable(initial_value=P, name="p")
m = tf.Variable(initial_value=M, name="m")
v = tf.Variable(initial_value=V, name="v")
sess.run( tf.global_variables_initializer() )
g = bs.float_cast(g, dtype=dtype)
global_norm, norm_scale = bs.clip_by_global_norm([g], grad_scale=grad_scale, clip_norm=clip_norm)
p, m, v = sess.run(
adam_op(
g, p, m, v, learn_rate, grad_scale, clip_sigma, [norm_scale], [],
decay_mean=beta1, decay_var=beta2, epsilon=epsilon),
feed_dict={ g: G } )
GN = np.sqrt(np.sum(np.square(G*grad_scale), keepdims=True))
NS = clip_norm / np.maximum(GN, clip_norm)
G *= NS * grad_scale
M = beta1 * M + (1.0 - beta1) * G
V = beta2 * V + (1.0 - beta2) * G*G
P -= learn_rate * M / (np.sqrt(V) + epsilon)
print("testAdam", dtype, GN, NS)
for op, dev, cpu in [
[ "M", m, M ],
[ "V", v, V ],
[ "P", p, P ],
]:
self.compare_results(op, dev, cpu)
def compare_results(self, op, dev, cpu):
dif = np.abs(cpu - dev)
avgval = np.average(abs(cpu))
maxdif = dif.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(dif).sum()) / np.sqrt(np.square(cpu).sum())
print("op:%3s, err:%17.12f, l2_err:%17.12f shape:%14s" % (op, maxdif, l2_err, str(cpu.shape)))
if out:
np.savetxt("out_%s_dif.txt"%op, dif, fmt='%6.3f')
np.savetxt("out_%s_cpu.txt"%op, cpu, fmt='%6.3f')
np.savetxt("out_%s_gpu.txt"%op, dev, fmt='%6.3f')
if __name__ == "__main__":
tf.test.main()
# a = np.zeros((32,32), dtype=np.bool)
# for y, x in np.ndindex(a.shape):
# if x <= y: a[y,x] = True
# b = np.packbits(a.reshape(-1,8)[:,::-1]).view(np.uint32)
# np.unpackbits(b.view(np.uint8))
# b = np.packbits(a.reshape(-1,8)[:,::-1]) |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
import blocksparse.ewops as ew
from time import time
shapes = [
[ 128, 16, 149, ],
[ 128, 16, 30, ], # int32
[ 128, 16, 21, ],
[ 128, 16, 9, ],
[ 128, 16, 4, ],
[ 128, 16, 5, 128 ],
[ 128, 16, 6, 128 ],
[ 128, 16, 62, 128 ],
]
class FancyGatherTest(tf.test.TestCase):
def testFancyGather(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
for shape in shapes:
idx_shape = shape[0:2]
idx_dim = shape[2]
out_shape = idx_shape + shape[3:]
for dtype in (tf.float32, ): #tf.float16, tf.bfloat16
#rtol = 1e-4 if dtype is tf.float32 else 1e-1
#tf.reset_default_graph()
np.random.seed(int(time()))
cpuX = np.random.uniform(-1.0, 1.0, shape).astype(np.float32)
cpuA = np.random.randint(0, idx_dim, size=idx_shape, dtype=np.int32)
cpuE = np.random.uniform(-1.0, 1.0, out_shape).astype(np.float32)
with tf.device("/gpu:0"):
x = tf.placeholder(tf.float32, cpuX.shape)
a = tf.placeholder(tf.int32, cpuA.shape)
e = tf.placeholder(tf.float32, cpuE.shape)
feed_dict = { x: cpuX, a: cpuA, e: cpuE }
xf = ew.float_cast(x, dtype=dtype)
y = ew.float_cast(ew.fancy_gather(xf, a), dtype=tf.float32, dx_dtype=dtype)
devY, (devB,) = sess.run( [y, tf.gradients(y, [x], e)], feed_dict )
y = ew.fancy_gather(x, a, use_tf=True)
cpuY, (cpuB,) = sess.run( [y, tf.gradients(y, [x], e)], feed_dict )
for op, devT, cpuT in (
( "devY", devY, cpuY ),
( "devB", devB, cpuB )):
difA = np.abs(cpuT - devT)
maxdif = difA.max()
sumerr = (difA > .001).sum()
poserr = np.argmax(np.abs(difA).reshape(-1))
print("%s, shape:%22s, op:%s, err:%17.12f, sum_err: %d, pos_err:%d" % (dtype.name, str(shape), op, maxdif, sumerr, poserr))
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import function
from blocksparse.embed import embedding_lookup
import blocksparse.ewops as ew
from time import time
shapes = [
[ [ 39, 16], [512, 16, 149] ],
[ [ 5, 64], [512, 16, 1] ],
[ [ 4, 64], [512, 16, 1] ],
[ [ 93, 16], [512, 16, 139] ],
[ [ 268, 8], [512, 16, 6] ],
[ [1506, 16], [512, 16, 100] ],
[ [ 723, 32], [512, 16, 60] ],
[ [ 260, 32], [512, 16, 150] ],
[ [ 19, 256], [512, 16, 5] ],
[ [ 657, 64], [512, 16, 5, 30] ],
[ [ 657, 128], [512, 16, 5, 30] ],
[ [ 1, 1], [1] ],
[ [ 32*1024, 1024], [ 1, 1024] ],
[ [ 32*1024, 1024], [ 8, 1024] ],
[ [ 32*1024, 1024], [16, 1024] ],
[ [ 32*1024, 1024], [32, 1024] ],
]
bench = 0
class EmbeddingLookupTest(tf.test.TestCase):
def testEmbeddingLookup(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
for shapeW, shapeI in shapes:
C = shapeW[0]
shapeY = shapeI + shapeW[1:]
np.random.seed(int(time()))
cpuI = np.random.randint(0, C, size=shapeI, dtype=np.int32)
cpuW = np.random.uniform(-1.0, 1.0, shapeW).astype(np.float32)
cpuE = np.random.uniform(-1.0, 1.0, shapeY).astype(np.float32)
for dtype in (tf.float32, tf.float16, ): #tf.float16, tf.float32
for sort in (True, False):
results = []
for device in ("gpu", "cpu"):
if bench and device == "cpu":
break
castW = device == "gpu" and dtype is not tf.float32
if castW:
if C <= 256:
castI = tf.uint8
elif C <= 65536:
castI = tf.uint16
else:
castI = None
else:
castI = None
with tf.device("/%s:0" % device), tf.name_scope(device):
i = tf.placeholder(tf.int32, cpuI.shape, name="i")
w = tf.placeholder(tf.float32, cpuW.shape, name="w")
e = tf.placeholder(tf.float32, cpuE.shape, name="e")
feed_dict = { i : cpuI, w : cpuW, e : cpuE }
wf = ew.float_cast(w, dtype=dtype) if castW else w
i = tf.cast(i, dtype=castI) if castI is not None else i
y = embedding_lookup(wf, i, sort_grad=sort, bench=bench)
if castW:
y = ew.float_cast(y, dtype=tf.float32)
dw, = tf.gradients(y, [w], e)
results.append( sess.run( [ y, dw ], feed_dict ) )
if not bench:
for op, dev, cpu in zip(["y", "dw"], results[0], results[1]):
dif = np.abs(cpu - dev)
avgval = np.average(abs(cpu))
maxdif = dif.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(dif).sum()) / np.sqrt(np.square(cpu).sum())
print("%s, shape:%22s, op:%3s, err:%17.12f, l2_err:%17.12f" % (dtype.name, str(cpu.shape), op, max_err, l2_err))
if __name__ == "__main__":
tf.test.main()
# @function.Defun(
# python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),
# shape_func=lambda op: [op.inputs[0].get_shape()])
# def convert_gradient_to_tensor(x):
# return x
# np_x = np.random.randint(0, 64, size=[512, 16, 150], dtype=np.int32)
# np_w = np.random.uniform(-1.0, 1.0, [64, 32]).astype(np.float32)
# np_e = np.random.uniform(-1.0, 1.0, [512, 16, 150, 32]).astype(np.float32)
# with tf.Session() as sess, tf.device("/gpu:0"):
# x = tf.placeholder(tf.int32, np_x.shape, name="x")
# w = tf.placeholder(tf.float32, np_w.shape, name="w")
# e = tf.placeholder(tf.float32, np_e.shape, name="e")
# feed_dict = {
# x : np_x,
# w : np_w,
# e : np_e,
# }
# #y = tf.nn.embedding_lookup(w, x)
# wf = ew.float_cast(w, dtype=tf.float16)
# y = tf.gather(convert_gradient_to_tensor(wf), x)
# y = ew.float_cast(y, dtype=tf.float32)
# dw = tf.gradients(y, [w], e)[0]
# y, dw = sess.run([y, dw], feed_dict)
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from random import shuffle
from tensorflow.python.ops import gradient_checker
from blocksparse.conv import BlocksparseConv, BlocksparseDeconv
from blocksparse.norms import batch_norm, batch_norm_inference, batch_norm_inf_test, batch_norm_test, batch_norm_grad_test
ones = 0
out = 0
debug = 0
# Blocks can be any rectangular size.
# Blocks can be be uniform or non-uniform in size
# Blocks can overlap in C and/or K dim (or not)
# c and k values can be entirely random
B = 4
blockC = 32
blockK = 48
BCK_diagonal = [
[
[b*blockC + c for c in range(blockC)],
[b*blockK + k for k in range(blockK)],
] for b in range(B)
]
B = 8
overlapC = 8
overlapK = 16
blockC = 16
blockK = 32
BCK_overlap = [
[
[b*overlapC + c for c in range(blockC)],
[b*overlapK + k for k in range(blockK)],
] for b in range(B)
]
configs = [
dict(clss=BlocksparseConv, BCK=BCK_diagonal, TRS=(1,1,1), DHW=(1,1,32), dilates=(1,1,1), strides=(1,1,1), padding="VALID",),
dict(clss=BlocksparseConv, BCK=BCK_diagonal, TRS=(1,1,3), DHW=(1,1,32), dilates=(1,1,1), strides=(1,1,2), padding="SAME", ),
dict(clss=BlocksparseConv, BCK=BCK_diagonal, TRS=(1,1,5), DHW=(1,1,32), dilates=(1,1,1), strides=(1,1,2), padding="SAME", ),
dict(clss=BlocksparseConv, BCK=BCK_overlap, TRS=(1,1,3), DHW=(1,1,32), dilates=(1,1,2), strides=(1,1,1), padding="SAME", ),
dict(clss=BlocksparseConv, BCK=BCK_diagonal, TRS=(1,1,3), DHW=(1,1,32), dilates=(1,1,1), strides=(1,1,2), padding="SAME", ),
dict(clss=BlocksparseConv, BCK=BCK_diagonal, TRS=(1,3,3), DHW=(1,8, 8), dilates=(1,1,1), strides=(1,1,1), padding="SAME", ),
dict(clss=BlocksparseConv, BCK=BCK_overlap, TRS=(1,3,3), DHW=(1,8, 8), dilates=(1,1,1), strides=(1,1,1), padding="VALID",),
dict(clss=BlocksparseConv, BCK=BCK_diagonal, TRS=(3,3,3), DHW=(4,4, 4), dilates=(1,1,1), strides=(1,1,1), padding="SAME", ),
dict(clss=BlocksparseDeconv, BCK=BCK_diagonal, TRS=(1,1,3), DHW=(1,1,32), dilates=(1,1,1), strides=(1,1,2), padding="SAME", ),
]
#def batch_norm_inf_test(x, g, b, m, v, epsilon=1e-12):
#def batch_norm_inference(x, g, b, m, v, epsilon=1e-12):
class BlocksparseConvTest(tf.test.TestCase):
def testBlocksparseConv(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
with self.test_session(config=config) as sess:
with tf.device("/gpu:0"):
count = 0
for config in configs:
config["debug"] = debug
count += 1
name = "test" + str(count)
print("")
print(name)
with tf.name_scope(name):
clss = config.pop("clss")
bs_conv_op = clss(**config)
for dtypeF, dtypeB in ((np.float32, np.float32), ): #, (np.float16, np.float32)
dtypeF = np.dtype(dtypeF) # Forward + Weights
dtypeB = np.dtype(dtypeB) # Backwards
rtol = 1e-4 if dtypeF.type is np.float32 else 1e-1
with tf.name_scope("F%dB%d" % (dtypeF.itemsize, dtypeB.itemsize)):
K = bs_conv_op.o_shape(1)[1]
if ones:
cpuF = [ np.ones(bs_conv_op.f_shape(b), dtype=np.float32) for b in range(bs_conv_op.blocks) ]
cpuEF = [ np.ones(bs_conv_op.f_shape(b), dtype=np.float32) for b in range(bs_conv_op.blocks) ]
cpuG = np.ones(K, dtype=np.float32)
cpuB = np.ones(K, dtype=np.float32)
else:
cpuF = [ np.random.uniform(-1.0, 1.0, bs_conv_op.f_shape(b)).astype(np.float32) for b in range(bs_conv_op.blocks) ]
cpuEF = [ np.random.uniform(-1.0, 1.0, bs_conv_op.f_shape(b)).astype(np.float32) for b in range(bs_conv_op.blocks) ]
cpuG = np.random.uniform(-1.0, 1.0, (K,)).astype(np.float32)
cpuB = np.random.uniform(-1.0, 1.0, (K,)).astype(np.float32)
devF = tf.constant(bs_conv_op.collapse_filter(cpuF, dtypeF))
devEF = tf.constant(bs_conv_op.collapse_filter(cpuEF, dtypeB))
devG = tf.constant(cpuG)
devB = tf.constant(cpuB)
for N in [1,2,28,]: #
with tf.name_scope("N%d" % N):
if ones:
cpuI = np.ones(bs_conv_op.i_shape(N), dtype=np.float32)
cpuE = np.ones(bs_conv_op.o_shape(N), dtype=np.float32)
cpuA = np.ones(bs_conv_op.o_shape(N), dtype=np.float32)
else:
cpuI = np.random.uniform(-1.0, 1.0, bs_conv_op.i_shape(N)).astype(np.float32)
cpuE = np.random.uniform(-1.0, 1.0, bs_conv_op.o_shape(N)).astype(np.float32)
cpuA = np.random.uniform(-1.0, 1.0, bs_conv_op.o_shape(N)).astype(np.float32)
devI = tf.constant(cpuI.astype(dtypeF))
devE = tf.constant(cpuE.astype(dtypeB))
devA = tf.constant(cpuA.astype(dtypeF))
C = cpuI.shape[1]
tests = list()
# Conv and edge bias
cpuO = bs_conv_op.fprop_test(cpuF, cpuI)
cpuZ = bs_conv_op.bprop_test(cpuF, cpuE)
cpuU = bs_conv_op.updat_test(cpuE, cpuI)
op = bs_conv_op(devF, devI)
devO = sess.run( op )
devZ, devU = sess.run( tf.gradients(op, [devI, devF], devE) )
tests.append( ("conv fprop", devO, cpuO, N*K) )
tests.append( ("conv bprop", devZ, cpuZ, N*C) )
tests.append( ("conv updat", devU, cpuU, 1) )
# L2 Norm without Gain
if bs_conv_op.overlapK:
cpuO = bs_conv_op.l2_normalize_test(cpuF)
cpuZ, _ = bs_conv_op.l2_normalize_grad_test(cpuF, cpuEF)
op = bs_conv_op.l2_normalize(devF, dtype=dtypeF)
devO = sess.run( op )
devZ, = sess.run( tf.gradients(op, [devF], devEF) )
tests.append( ("l2 fprop", devO, cpuO, 1) )
tests.append( ("l2 bprop", devZ, cpuZ, 1) )
# L2 Norm with Gain
else:
cpuO = bs_conv_op.l2_normalize_test(cpuF, gain=cpuG)
cpuZ, cpuDG = bs_conv_op.l2_normalize_grad_test(cpuF, cpuEF, gain=cpuG)
op = bs_conv_op.l2_normalize(devF, gain=devG, dtype=dtypeF)
devO = sess.run( op )
devZ, devDG = sess.run( tf.gradients(op, [devF, devG], devEF) )
tests.append( ("l2g fprop", devO, cpuO, 1) )
tests.append( ("l2g bprop", devZ, cpuZ, 1) )
tests.append( ("l2g dgain", devDG, cpuDG, K) ) #bs_conv_op.f_shape
# error = gradient_checker.compute_gradient_error(devF, devF.get_shape().as_list(), op, devF.get_shape().as_list())
# print(error)
# assert error < 0.01
# error = gradient_checker.compute_gradient_error(devG, devG.get_shape().as_list(), op, devF.get_shape().as_list())
# print(error)
# assert error < 0.01
# batch norm test
cpuO, cpuM, cpuV = batch_norm_test(cpuA, cpuG, cpuB)
cpuZ, cpuDG, cpuDB = batch_norm_grad_test(cpuE, cpuA, cpuG, cpuM, cpuV)
bn_op = batch_norm(devA, devG, devB)
devO, devM, devV = sess.run( bn_op )
devZ, devDG, devDB = sess.run( tf.gradients(bn_op[0], [devA, devG, devB], devE) )
tests.append( ("bn fprop", devO, cpuO, N*K) )
tests.append( ("bn mean ", devM, cpuM, K ) )
tests.append( ("bn var ", devV, cpuV, K ) )
tests.append( ("bn bprop", devZ, cpuZ, N*K) )
tests.append( ("bn dgain", devDG, cpuDG, K ) )
tests.append( ("bn dbias", devDB, cpuDB, K ) )
cpuO = batch_norm_inf_test (cpuA, cpuG, cpuB, cpuM, cpuV)
op = batch_norm_inference(devA, devG, devB, bn_op[1], bn_op[2])
devO = sess.run( op )
tests.append( ("bn inf ", devO, cpuO, N*K) )
for op, dev, cpu, reshape in tests:
if cpu is None:
continue
dev = np.array(dev)
dif = cpu - dev
avgval = abs(cpu).sum() / cpu.size
maxdif = abs(dif).max()
ratio = maxdif / avgval
print("dtypeF:f%d, dtypeB:f%d, N:%3d, op:%s avg:%17.12f maxdif:%17.12f ratio:%17.12f" % (dtypeF.itemsize, dtypeB.itemsize, N, op, avgval, maxdif, ratio))
# print(dev[0,0,:,:])
# print(cpu[0,0,:,:])
# exit()
if out:
np.savetxt("out.txt", dif.reshape(reshape, -1), fmt='%7.4f')
np.savetxt("outC.txt", cpu.reshape(reshape, -1), fmt='%7.4f')
np.savetxt("outD.txt", dev.reshape(reshape, -1), fmt='%7.4f')
exit()
self.assertAllClose(dev, cpu, rtol=rtol, atol=rtol)
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from blocksparse.optimize import adafactor1d_op, adafactor2d_op
ones = 0
out = 0
beta2 = 0.5
learn_rate = 0.5
grad_scale = 1.0
clip_thresh = 1.0
clip_norm = 1.0
epsilon = 1e-30
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
class AdafactorTest(tf.test.TestCase):
def testAdafactor(self):
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
for dtype in (tf.float32, tf.float16): # tf.float16
for shape_g in (
(1024,1024*2),
( 1,1024*2),
(1024,1023*1),
( 1,1023*1),
):
shape_c = ( 1,shape_g[1])
shape_r = (shape_g[0], 1)
if ones:
G = np.ones( shape_g, dtype=np.float32)
P = np.ones( shape_g, dtype=np.float32)
C = np.zeros(shape_c, dtype=np.float32)
R = np.zeros(shape_r, dtype=np.float32)
else:
G = np.random.uniform(-1.0, 1.0, shape_g).astype(np.float16).astype(np.float32)
P = np.random.uniform(-1.0, 1.0, shape_g).astype(np.float16).astype(np.float32)
C = np.random.uniform( 0.0, 1.0, shape_c).astype(np.float16).astype(np.float32)
R = np.random.uniform( 0.0, 1.0, shape_r).astype(np.float16).astype(np.float32)
g = tf.placeholder(tf.float32, G.shape)
p = tf.Variable(initial_value=P, name="p")
c = tf.Variable(initial_value=C, name="c")
r = tf.Variable(initial_value=R, name="r")
sess.run( tf.global_variables_initializer() )
g = bs.float_cast(g, dtype=dtype)
# adafactor has it's own fused infinity filtering but quick test of this standalone op here.
g = bs.filter_tensor(g)
global_norm, norm_scale = bs.clip_by_global_norm([g], grad_scale=grad_scale, clip_norm=clip_norm)
if shape_g[0] > 1:
p, c, r, x, _ = sess.run(
adafactor2d_op(p, c, r, g, beta2, learn_rate, grad_scale, clip_thresh, [norm_scale], epsilon=epsilon),
feed_dict={ g: G } )
GN = np.sqrt(np.sum(np.square(G*grad_scale), keepdims=True))
NS = clip_norm / np.maximum(GN, clip_norm)
G *= NS * grad_scale
C = beta2 * C + (1.0 - beta2) * np.mean(np.square(G) + epsilon, axis=0, keepdims=True)
R = beta2 * R + (1.0 - beta2) * np.mean(np.square(G) + epsilon, axis=1, keepdims=True)
LTM = np.mean(R, keepdims=True)
X = G / (np.sqrt(R / LTM) * np.sqrt(C))
RMS_X = np.sqrt(np.mean(np.square(X), keepdims=True))
else:
r = R
p, c, x, _ = sess.run(
adafactor1d_op(p, c, g, beta2, learn_rate, grad_scale, clip_thresh, [norm_scale], epsilon=epsilon),
feed_dict={ g: G } )
GN = np.sqrt(np.sum(np.square(G*grad_scale), keepdims=True))
NS = clip_norm / np.maximum(GN, clip_norm)
G *= NS * grad_scale
C = beta2 * C + (1.0 - beta2) * (np.square(G) + epsilon)
X = G / np.sqrt(C)
RMS_X = np.sqrt(np.mean(np.square(X), keepdims=True))
P -= learn_rate * X / np.maximum(1.0, RMS_X / clip_thresh)
print("testAdafactor", dtype, GN, NS)
for op, dev, cpu in [
[ "C", c, C ],
[ "R", r, R ],
[ "X", x, X ],
[ "P", p, P ],
]:
self.compare_results(op, dev, cpu)
def compare_results(self, op, dev, cpu):
dif = np.abs(cpu - dev)
avgval = np.average(abs(cpu))
maxdif = dif.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(dif).sum()) / np.sqrt(np.square(cpu).sum())
print("op:%3s, err:%17.12f, l2_err:%17.12f shape:%14s" % (op, maxdif, l2_err, str(cpu.shape)))
if out:
np.savetxt("%s_dif.txt"%op, dif, fmt='%6.3f')
np.savetxt("%s_cpu.txt"%op, cpu, fmt='%6.3f')
np.savetxt("%s_gpu.txt"%op, dev, fmt='%6.3f')
if __name__ == "__main__":
tf.test.main()
# a = np.zeros((32,32), dtype=np.bool)
# for y, x in np.ndindex(a.shape):
# if x <= y: a[y,x] = True
# b = np.packbits(a.reshape(-1,8)[:,::-1]).view(np.uint32)
# np.unpackbits(b.view(np.uint8))
# b = np.packbits(a.reshape(-1,8)[:,::-1]) |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from time import time
from blocksparse.conv import cwise_linear
from blocksparse.ewops import float_cast
ones = 0
out = 0
shapes = [
[ 1, 32, 32 ],
[ 64, 64, 32 ],
[ 8, 64, 4, 4 ],
[ 8, 64, 16, 16 ],
[ 8, 64, 32, 32 ],
[ 8, 64, 8, 8, 8 ],
]
class CWiseLinearTest(tf.test.TestCase):
def testCWiseLinear(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
for shape in (shapes):
bshape = [1] * len(shape)
bshape[1] = shape[1]
if ones:
cpuX = np.ones(shape, dtype=np.float32)
cpuE = np.ones(shape, dtype=np.float32)
cpuG = np.ones(bshape, dtype=np.float32)
cpuB = np.ones(bshape, dtype=np.float32)
else:
np.random.seed(int(time()))
cpuX = np.random.uniform(-1.0, 1.0, shape).astype(np.float32)
cpuE = np.random.uniform(-1.0, 1.0, shape).astype(np.float32)
cpuG = np.random.uniform(-1.0, 1.0, bshape).astype(np.float32)
cpuB = np.random.uniform(-1.0, 1.0, bshape).astype(np.float32)
for dtype in (tf.float32, tf.float16, ): # tf.float32, tf.float16, tf.bfloat16
relus = (True, False) if dtype is tf.float32 else (False,)
for relu in relus:
results = []
for device in ("gpu", "cpu"):
cast = device == "gpu" and dtype is not tf.float32
with tf.device("/%s:0" % device), tf.name_scope(device):
x = tf.placeholder(tf.float32, cpuX.shape, name="x")
e = tf.placeholder(tf.float32, cpuE.shape, name="e")
g = tf.placeholder(tf.float32, cpuG.shape, name="g")
b = tf.placeholder(tf.float32, cpuB.shape, name="b")
feed_dict = {
x : cpuX,
e : cpuE,
g : cpuG,
b : cpuB,
}
xf = float_cast(x, dtype=dtype) if cast else x
y0 = cwise_linear(xf, gain=g, bias=b, relu=relu)
y1 = cwise_linear(xf, gain=g, relu=relu)
y2 = cwise_linear(xf, bias=b, relu=relu)
if cast:
y0 = float_cast(y0, dtype=tf.float32)
y1 = float_cast(y1, dtype=tf.float32)
y2 = float_cast(y2, dtype=tf.float32)
dx0, dg0, db0 = tf.gradients(y0, [ x, g, b ], e)
dx1, dg1 = tf.gradients(y1, [ x, g ], e)
dx2, db2 = tf.gradients(y2, [ x, b ], e)
results.append( sess.run( [ y0, y1, y2, dx0, dg0, db0, dx1, dg1, dx2, db2 ], feed_dict ) )
labels = ["y0", "y1", "y2", "dx0", "dg0", "db0", "dx1", "dg1", "dx2", "db2"]
for op, dev, cpu in zip(labels, results[0], results[1]):
dif = np.abs(cpu - dev)
avgval = np.average(abs(cpu))
maxdif = dif.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(dif).sum()) / np.sqrt(np.square(cpu).sum())
print("%s, shape:%16s, op: %3s, relu:%d, err:%17.12f, l2_err:%17.12f" % (dtype.name, str(cpu.shape), op, int(relu), max_err, l2_err))
# if out:
# np.savetxt("out.txt", difA.reshape(reshape), fmt='%5.2f')
# np.savetxt("outC.txt", cpuT.reshape(reshape), fmt='%5.2f')
# np.savetxt("outD.txt", devT.reshape(reshape), fmt='%5.2f')
# exit()
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
from operator import mul
from blocksparse.conv import ConvEdgeBias, ceil_div
import blocksparse.ewops as ew
ones = 0
out = 0
bench = 0
shapes = [
# N K RS HW strides
[ 1, 512, [3,3], [128,128], [1,1] ],
[ 1, 512, [3,3], [ 64, 64], [1,1] ],
[ 1, 512, [3,3], [ 32, 32], [1,1] ],
[ 1, 512, [3,3], [ 16, 16], [1,1] ],
[ 1, 512, [3,3], [ 8, 8], [1,1] ],
[ 1, 512, [3,3], [ 4, 4], [1,1] ],
[ 1, 6, [3,3], [128,128], [1,1] ],
[ 1, 12, [3,3], [ 64, 64], [1,1] ],
[ 1, 24, [3,3], [ 32, 32], [1,1] ],
[ 1, 48, [3,3], [ 16, 16], [1,1] ],
[ 1, 96, [3,3], [ 8, 8], [1,1] ],
[ 1, 192, [3,3], [ 4, 4], [1,1] ],
]
class EdgeBiasTest(tf.test.TestCase):
def testEdgeBias(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
test = 0
for N, K, RS, HW, strides in shapes:
test += 1
PQ = [ ceil_div(x, std) for x, std in zip(HW, strides) ]
for layout in ("NCHW","NHWC",): # "NCHW","NHWC"
if layout == "NHWC":
y_shape = [N] + PQ + [K]
x_shape = [N] + HW + [K]
w_shape = RS + [K, K]
else:
y_shape = [N] + [K] + PQ
x_shape = [N] + [K] + HW
w_shape = [K, K] + RS
eb = ConvEdgeBias(y_shape, x_shape, w_shape, strides=strides, data_format=layout)
if ones:
cpuX = np.ones(y_shape).astype(np.float32)
cpuE = np.ones(y_shape).astype(np.float32)
cpuG = np.ones(eb.shape).astype(np.float32)
cpuB = np.ones(eb.shape).astype(np.float32)
else:
cpuX = np.random.uniform(-1.0, 1.0, y_shape).astype(np.float32)
cpuE = np.random.uniform(-1.0, 1.0, y_shape).astype(np.float32)
cpuG = np.random.uniform(-1.0, 1.0, eb.shape).astype(np.float32)
cpuB = np.random.uniform(-1.0, 1.0, eb.shape).astype(np.float32)
x = tf.placeholder(tf.float32, cpuX.shape)
e = tf.placeholder(tf.float32, cpuE.shape)
g = tf.placeholder(tf.float32, cpuG.shape)
b = tf.placeholder(tf.float32, cpuB.shape)
feed_dict = { x: cpuX, e: cpuE, g:cpuG, b:cpuB }
for dtype in (tf.float32,): # tf.float32, tf.float16, tf.bfloat16
xf = ew.float_cast(x, dtype=dtype)
y = eb(xf, g, b, bench=bench)
y = ew.float_cast(y, dtype=tf.float32, dx_dtype=dtype)
devY, (devDX, devDG, devDB) = sess.run( [y, tf.gradients(y, [x, g, b], e)], feed_dict )
if bench == 0:
cpuY = eb.edge_bias_test(cpuX, cpuG, cpuB)
cpuDX, cpuDG, cpuDB = eb.edge_bias_grad_test(cpuE, cpuX, cpuG)
for op, devT, cpuT in (
( " devY", devY, cpuY ),
( "devDX", devDX, cpuDX ),
( "devDG", devDG, cpuDG ),
( "devDB", devDB, cpuDB ),):
devT = np.array(devT)
difA = cpuT - devT
avgval = abs(cpuT).sum() / cpuT.size
maxdif = abs(difA).max()
ratio = maxdif / avgval
print("%8s, test:%2d layout: %s op:%s err:%17.12f" % (dtype.name, test, layout, op, ratio))
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from struct import pack, unpack
from time import time
class QuantizeTest(tf.test.TestCase):
def testQuantize(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
np.random.seed(int(time()))
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
# max of 80 SMs on the GPU
# 3 lsfr's and a max of 1024 threads active
# Note tf does not supporet int32 variables!
entropy_init = np.random.randint(-(1<<31), (1<<31), size=80*3*1024, dtype=np.int32).view(np.float32)
entropy_ph = tf.placeholder(tf.float32, entropy_init.shape)
entropy_var = tf.get_variable("entropy", initializer=entropy_ph, trainable=False)
sess.run(tf.group(entropy_var.initializer), feed_dict={ entropy_ph : entropy_init })
bs.set_entropy(entropy_var)
for size in (1024*16*64,): #1024*16*64-1 131072, 1728, 229376, 262144, 28672, 3670016, 442368, 57344, 802816
# data = list()
# ebits = 4
# fbits = 3
# ebias = 0
# ebins = 1 << ebits
# fbins = 1 << fbits
# for exp in range(ebins-1, -fbits, -1):
# for frac in range(fbins-1, -1, -1):
# fraction = frac / fbins
# f8 = (1 + fraction) * 2**(exp - ebias)
# data.append(f8)
#print("%2d %.3f %.8e" % (exp-ebias, fraction, f8))
# cpuX = np.array(data, dtype=np.float32)
cpuX = np.random.normal(0.0, 1.0, size).astype(np.float32)
#cpuE = np.random.normal(0.0, 1.0, size).astype(np.float32)
x = tf.placeholder(tf.float32, cpuX.shape)
#e = tf.placeholder(tf.float32, cpuE.shape)
qspec = bs.QuantizeSpec(
ebits = 4,
fbits = 23,
stochastic = 0,
denorm = True,
frequency = 1,
mode = 0,
bias_pad = 0,
stdv_mul = 4.0,
logfile = "/home/scott/quant_log.txt",
)
y = bs.quantize(x, qspec)
sess.run(tf.group(*[v.initializer for v in tf.global_variables("quantize")], name="init"))
devY, = sess.run( [y], { x: cpuX } )
devY, = sess.run( [y], { x: cpuX } )
#devY, (devDX,) = sess.run( [y, tf.gradients(y, [x], e)], { x: cpuX, e: cpuE } )
# print("mean:", np.abs(cpuX).mean())
# print(" std:", np.abs(cpuX).std())
# print(" max:", np.abs(cpuX).max())
# print(" min:", np.abs(cpuX).min())
for cpu, gpu in (
(cpuX, devY),):
#(cpuE, devDX),):
dif = np.abs(cpu - gpu)
avgval = np.abs(cpu).mean()
maxdif = dif.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(dif).sum()) / np.sqrt(np.square(cpu).sum())
print("size: %7d max_err%%:%12.8f L2_err: %12.10f" % (cpuX.size, 100*max_err, l2_err))
for i in range(min(20, cpuX.size)):
cpu = "0x%08x" % unpack("I", pack("f", cpuX[i]))
dev = "0x%08x" % unpack("I", pack("f", devY[i]))
print(cpu, dev)
if __name__ == "__main__":
tf.test.main()
# 0x3f 7 df6d5 0x3f 8 00000
# 0xbf 0 e2fb9 0xbf 1 00000
# 0x3d c 70ace 0x3d d 00000
# 0xbf 8 34e5d 0xbf 8 00000
# 0xbf 2 acf01 0xbf 2 00000
# 0xbf 3 4c906 0xbf 3 00000
# 0x3f 0 3aa08 0x3f 0 00000
# 0xbf c bb567 0xbf c 00000
# 0xbe 9 91431 0xbe a 00000
# 0xbf 9 8dcd1 0xbf a 00000
# 0x3f 4 e0c3b 0x3f 4 00000
# 0x3f 6 65f34 0x3f 6 00000
# 0xbe e 525e5 0xbe e 00000
# 0x3f c 5d974 0x3f d 00000
# 0x3d e bf111 0x3d e 00000
# 0x3e 3 1a3b2 0x3e 4 00000
# 0x3f f f5fdf 0x40 0 00000
# 0x40 1 6b70f 0x40 2 00000
# 0xbe e 9a48e 0xbe e 00000
# 0x3c f e9693 0x3d 0 00000
|
#!/usr/bin/env python
# nvprof -f -o "nccl_test_%p.nvvp" --profile-child-processes
# nvprof --profile-child-processes
import numpy as np
import platform
from collections import defaultdict
from mpi4py import MPI
import blocksparse.nccl as nccl
import blocksparse.ewops as ew
from time import time
import tensorflow as tf
import os
comm = MPI.COMM_WORLD
mpi_size = comm.Get_size()
mpi_rank = comm.Get_rank()
print("starting process %d mpi size %d" % (mpi_rank, mpi_size), flush=True)
config = tf.ConfigProto(gpu_options=tf.GPUOptions(visible_device_list="%d" % (mpi_rank % 8)))
with tf.Session(config=config) as sess, tf.device("/gpu:0"):
N = 1024*4
shape = (N,N)
prereduce = True
np.random.seed(1)
A = np.random.normal(loc=0.1, scale=1.0, size=shape).astype(np.float32)
B = np.random.normal(loc=0.2, scale=1.0, size=shape).astype(np.float32)
a = tf.placeholder(tf.float32, A.shape, name="a")
b = tf.placeholder(tf.float32, B.shape, name="b")
feed_dict = { a : A, b : B }
prereduce = min(mpi_size, 8) if prereduce else 0
for dtype in (tf.float32, tf.float16): #tf.float16, tf.bfloat16
y0 = tf.matmul(a, b)
y0 = ew.float_cast(y0, dtype=dtype)
y0 = nccl.allreduce(y0, rank=mpi_rank, num_comms=1, prereduce=prereduce)
y0 = ew.float_cast(y0, dtype=tf.float32)
y0 = sess.run(y0, feed_dict=feed_dict)
if mpi_rank == 0:
y1 = np.dot(A, B) * mpi_size
dif = np.abs(y1 - y0)
avgval = np.average(abs(y1))
maxdif = dif.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(dif).sum()) / np.sqrt(np.square(y1).sum())
print("prereduce: %d, dtype: %s, shape:%12s, err:%17.12f, l2_err:%17.12f" % (prereduce, dtype.name, str(shape), maxdif, l2_err))
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
from blocksparse import dw_matmul_large_n
import blocksparse.ewops as ew
from time import time
shapes = [
[ 1024*1024, 32 ],
[ 1024*128, 128 ],
[ 1024*32, 512 ],
[ 32, 1024 ],
[ 64, 8 ],
[ 32, 4 ],
]
class MatMulTest(tf.test.TestCase):
def testMatMul(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
for shape in shapes:
np.random.seed(int(time()))
cpuX = np.random.normal(loc=0.1, scale=1.0, size=shape).astype(np.float16).astype(np.float32)
cpuE = np.random.normal(loc=0.2, scale=1.0, size=shape).astype(np.float16).astype(np.float32)
cpuU = np.dot(cpuX.astype(np.float64).T, cpuE.astype(np.float64)).astype(np.float32)
for dtype in (tf.float32, tf.float16): #tf.float16, tf.bfloat16
with tf.device("/gpu:0"):
x = tf.placeholder(tf.float32, cpuX.shape, name="x")
e = tf.placeholder(tf.float32, cpuE.shape, name="e")
feed_dict = { x : cpuX, e : cpuE }
if dtype is not tf.float32:
xf = ew.float_cast(x, dtype=dtype)
ef = ew.float_cast(e, dtype=dtype)
else:
xf, ef = x, e
u0 = dw_matmul_large_n(xf, ef)
u1 = tf.matmul(xf, ef, transpose_a=True, transpose_b=False)
if dtype is not tf.float32:
u1 = ew.float_cast(u1, dtype=tf.float32, dx_dtype=dtype)
u0, u1 = sess.run( [ u0, u1 ], feed_dict )
for op, dev, cpu in [
("custom", u0, cpuU),
("cublas", u1, cpuU),
]:
dif = np.abs(cpu - dev)
avgval = np.average(abs(cpu))
maxdif = dif.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(dif).sum()) / np.sqrt(np.square(cpu).sum())
print("%s, depth:%8d shape:%12s, op:%s, err:%17.12f, l2_err:%17.12f" % (dtype.name, shape[0], str(cpu.shape), op, maxdif, l2_err))
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
from operator import mul
import blocksparse.ewops as ew
from tensorflow.python.framework import function
ones = 0
out = 0
bench = 0
atomics = False
shapes = [
(1, 1),
(32, 32),
(64 ,8192),
(64 ,4096),
(64 ,2048),
(64 ,1024),
(2**5 ,8193),
(2**6 ,4097),
(2**7 ,2049),
(2**8 ,1025),
]
class BiasReluTest(tf.test.TestCase):
def testBiasRelu(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
for shape in shapes:
for axis in (0,1):
if axis == 0:
xshape = tuple(reversed(shape))
bshape = (shape[1], 1)
else:
xshape = shape
bshape = (1, shape[1])
if ones:
cpuX = np.ones(xshape, dtype=p.float32)
cpuE = np.ones(xshape, dtype=p.float32)
cpuB = np.ones(bshape, dtype=p.float32)
else:
cpuX = np.random.uniform(-1.0, 1.0, xshape).astype(np.float16).astype(np.float32)
cpuE = np.random.uniform(-1.0, 1.0, xshape).astype(np.float16).astype(np.float32)
cpuB = np.random.uniform(-1.0, 1.0, bshape).astype(np.float32)
for relu in (True, False):
for dtype in (tf.float16, tf.float32): #tf.float16, tf.bfloat16
results = []
for device in ("gpu", "cpu"):
if bench and device == "cpu":
break
cast = device == "gpu" and dtype is not tf.float32
with tf.device("/%s:0" % device), tf.name_scope(device):
x = tf.placeholder(tf.float32, cpuX.shape)
e = tf.placeholder(tf.float32, cpuE.shape)
b = tf.placeholder(tf.float32, cpuB.shape)
feed_dict = { x: cpuX, e: cpuE, b:cpuB }
xc = ew.float_cast(x, dtype=dtype) if cast else x
print(axis, xc.shape, b.shape)
y = ew.bias_relu(xc, b, axis=axis, fast_gelu=relu, atomics=atomics, bench=bench)
if cast:
y = ew.float_cast(y, dtype=tf.float32)
dx, db = tf.gradients(y, [x, b], e)
results.append( sess.run( [ y, dx, db ], feed_dict ) )
if not bench:
for op, dev, cpu in zip(["y", "dx", "db"], results[0], results[1]):
dif = np.abs(cpu - dev)
avgval = np.average(abs(cpu))
maxdif = dif.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(dif).sum()) / np.sqrt(np.square(cpu).sum())
print("%s, shape:%14s, op:%3s(%d), err:%17.12f, l2_err:%17.12f" % (dtype.name, str(cpu.shape), op, relu, maxdif, l2_err))
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
from time import time
from blocksparse.matmul import BlocksparseMatMul, SparseProj, group_param_grads
import blocksparse.ewops as ew
import networkx
bench = 4000
depth = 8
mask = "ba" # ba, ws
# multi-threading screws up benchmarking
conf = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
# bsize = 32
# for hsize in range(1,11):
# n = hsize*80*32 // bsize
# m = int(round((6553600 / (bsize*bsize*n) - 1) / 2))
# print("%2d %5d %3d %2d %.2f" % (hsize, hsize*80*32, n, m, 100*(m*2 + 1) / n))
# exit()
with tf.Session(config=conf) as sess, tf.device("/gpu:0"):
#for hsize, sparsity in ((1, 100.0), (2, 25.0), (3, 12.0), (4, 6.0), (6, 3.0)):
# for hsize, sparsity in ((2, 100.0), (3, 43.0), (4, 24.0), (5, 15.0), (6, 10.5), (7, 8.0), (9, 4.6), (11, 3.0)): #(10, 3.7), (8, 6.0),
# hsize *= 56*32
for hsize, sparsity in ( (1, 100.0), (2, 25.62), (3, 11.25), (4, 6.56), (5, 4.25), (6, 2.71), (7, 1.96), (8, 1.41) ): #(10, 3.7), (8, 6.0),
hsize *= 80*32
for bsize, axis in ( (32,0), (16,0), (8,0) ): # (32,0), (16,0), (8,0)
n = hsize // bsize
if sparsity == 100.0:
layout = np.ones((n,n), dtype=np.int32)
blks = n*n
spar = sparsity
m = n
else:
for m in range(1,n//2):
if mask == "ws":
blks = n * (m*2 + 1)
else:
blks = 2*m*(n-m) + m*m + n-m
spar = 100 * blks / n**2
if spar >= sparsity:
break
if mask == "ws":
layout = networkx.generators.random_graphs.watts_strogatz_graph(n, m*2, .2)
layout = networkx.adjacency_matrix(layout).toarray().astype(np.int32) + np.eye(n, dtype=np.int32)
else:
layout = networkx.generators.barabasi_albert_graph(n, m)
layout = networkx.adjacency_matrix(layout).toarray().astype(np.int32) + np.eye(n, dtype=np.int32)
layout[0:m,0:m] = 1
# print("axis:%d bsize:%2d hsize:%d params:%d sparsity:%.2f m:%d" % (axis, bsize, hsize, bsize*bsize*blks, spar, m))
# continue
bsmm = BlocksparseMatMul(layout, block_size=bsize, feature_axis=axis, name="test")
W = np.random.uniform(-1.0, 1.0, bsmm.w_shape).astype(np.float32)
w = tf.constant(W)
for N in (64,): # 128,64,32,16,1,
X = np.random.uniform(-1.0, 1.0, bsmm.i_shape(N)).astype(np.float32)
E = np.random.uniform(-1.0, 1.0, bsmm.o_shape(N)).astype(np.float32)
x = tf.constant(X)
e = tf.constant(E)
for dtype in (tf.bfloat16, ): # tf.bfloat16, tf.bfloat32,
#print("axis:%d bsize:%2d N:%d dtype:%s hsize:%d params:%d sparsity:%.2f" % (axis, bsize, N, dtype.name, hsize, bsize*bsize*blks, spar))
# compute in tensorflow
w2 = ew.float_cast(w, dtype=dtype)
y = ew.float_cast(x, dtype=dtype)
for j in range(depth):
repeat = bench if bench and j==depth-1 else 0
y = bsmm(y, w2, dw_dtype=dtype, bench=repeat) # (bench and j==depth-1) (bench and j==0)
y = ew.float_cast(y, dtype=tf.float32, dx_dtype=dtype)
#sess.run( y )
d = tf.gradients(y, [x, w], e, aggregation_method=3)
if depth > 1:
d[1] = group_param_grads(d[1], 8)
y, (dx, dw) = sess.run( [y, d] )
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from tensorflow.python.ops import gradient_checker
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
np.set_printoptions(linewidth=600, formatter={'float':lambda x: "%.2f" % x})
class PruneTest(tf.test.TestCase):
def testPrune(self):
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
with tf.device("/cpu:0"):
sparse_ph = tf.placeholder(tf.float32, shape=[])
step_ph = tf.placeholder(tf.int32, shape=[])
blocks = 1000
sparsity = 0.1
for ntype in ("max", "l2"):
for bsize in (8,16,32):
W = np.random.normal(0.0, 1.0, (blocks,bsize,bsize)).astype(np.float32)
G = np.ones((blocks,), dtype=np.float32)
w = tf.get_variable(f"w_{ntype}_{bsize}", initializer=W)
g = tf.get_variable(f"g_{ntype}_{bsize}", initializer=G)
sess.run( tf.global_variables_initializer() )
prune_op = bs.blocksparse_prune(w, g, step_ph, sparsity=sparse_ph, norm=ntype, frequency=1)
norm_op = bs.blocksparse_norm(w, norm=ntype)
sess.run([prune_op], feed_dict={ sparse_ph: sparsity, step_ph: 0 })
n, g = sess.run([norm_op, g])
if ntype == "max":
N = np.max(np.abs(W.reshape(blocks,-1)), axis=1)
else:
N = np.sqrt(np.sum(np.square(W.reshape(blocks,-1)), axis=1))
keep = int(round(blocks * (1.0 - sparsity)))
for si, (v, i) in enumerate(sorted(list(zip(N, range(blocks))), reverse=True)):
if si >= keep:
G[i] = 0.0
print("Type: %3s bsize: %2d norm_err: %.5f gate_err: %.0f" % ( ntype, bsize, np.sum(np.abs(N - n)), np.sum(np.abs(G - g)) ))
# print("N", N)
# print("n", n)
# print("G", G)
# print("g", g)
def atestGateGrad(self):
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
dtype = tf.float16
layout = np.ones([2,2], dtype=np.bool)
bsmm = bs.BlocksparseMatMul(layout, block_size=8, feature_axis=0, name="test")
X = np.random.uniform(-1.0, 1.0, bsmm.i_shape(64)).astype(np.float16).astype(np.float32)
W = np.random.uniform(-1.0, 1.0, bsmm.w_shape ).astype(np.float16).astype(np.float32)
G = np.random.uniform( 0.0, 1.0, bsmm.blocks ).astype(np.float16).astype(np.float32)
#G = np.ones([bsmm.blocks], dtype=np.float32)
x = tf.constant(X)
w = tf.constant(W)
g = tf.constant(G)
wf = bs.float_cast(w, dtype=dtype)
xf = bs.float_cast(x, dtype=dtype)
y = bsmm(xf, wf, gate=g, gate_grad=True, bench=0)
y = bs.float_cast(y, dtype=tf.float32)
sess.run( tf.global_variables_initializer() )
# y = sess.run( y )
# exit()
error = gradient_checker.compute_gradient_error(x, x.shape, y, y.shape) #, extra_feed_dict={ x: cpuX, m: mask }
print(error)
error = gradient_checker.compute_gradient_error(w, w.shape, y, y.shape) #, extra_feed_dict={ x: cpuX, m: mask }
print(error)
error = gradient_checker.compute_gradient_error(g, g.shape, y, y.shape) #, extra_feed_dict={ x: cpuX, m: mask }
print(error)
#assert error < 0.01, error
if __name__ == "__main__":
tf.test.main()
# with tf.Session() as sess:
# with tf.device("/gpu:0"):
# xshape = [25528,] #25528
# X = np.random.uniform(-1.0, 1.0, xshape).astype(np.float32)
# x = tf.placeholder(tf.float32, X.shape)
# val, idx = tf.nn.top_k(X, k=X.size, sorted=True)
# val, idx = sess.run( [val, idx], feed_dict={ x: X })
# print(X[0])
# print(val[0])
# print(idx[0])
# CN
# MV = (1, C)
# GB = (C, 1)
# NC
# MV = (1, C)
# GB = (1, C) |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from blocksparse.matmul import blocksparse_reduced_dw
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
class BlocksparseReducedDWTest(tf.test.TestCase):
def testBlocksparseReducedDW(self):
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
ones = 0
norm = 0
accum = 0
blocks_x = 2
blocks_y = 4
bsize = 32
axis = 0
depth = 8
N = 64
scale = 1.0 / (N * depth)
shape_x = [N, N]
shape_y = [N, N]
shape_w = (blocks_x, blocks_y)
shape_x[axis] = bsize * blocks_x
shape_y[axis] = bsize * blocks_y
XS = list()
YS = list()
if ones:
for i in range(depth):
XS.append(np.ones(shape_x, dtype=np.float32))
YS.append(np.ones(shape_y, dtype=np.float32))
if accum:
DWA = np.ones(shape_w, dtype=np.float32)
XS[0][:] += np.arange(64, dtype=np.float32).reshape(1,64)
else:
for i in range(depth):
XS.append(np.random.normal(0.0, 1.0, shape_x).astype(np.float16).astype(np.float32))
YS.append(np.random.normal(0.0, 1.0, shape_y).astype(np.float16).astype(np.float32))
if accum:
DWA = np.random.normal(0.0, 1.0, shape_w).astype(np.float32)
feed_dict = dict()
xs = list()
ys = list()
for i in range(depth):
x = tf.placeholder(tf.float32, shape_x, name=f"x{i}")
y = tf.placeholder(tf.float32, shape_y, name=f"y{i}")
feed_dict[x] = XS[i]
feed_dict[y] = YS[i]
xs.append(bs.float_cast(x, dtype=tf.float16))
ys.append(bs.float_cast(y, dtype=tf.float16))
if accum:
dwa = tf.placeholder(tf.float32, DWA.shape, name=f"dwa")
feed_dict[dwa] = DWA
#dwa = bs.float_cast(dwa, dtype=tf.float16)
dw, x_red, y_red = blocksparse_reduced_dw(xs, ys, scale, [dwa], bsize=bsize, norm=norm, axis=axis)
else:
dw, x_red, y_red = blocksparse_reduced_dw(xs, ys, scale, [ ], bsize=bsize, norm=norm, axis=axis)
#dw = bs.float_cast(dw, dtype=tf.float32)
x_red = bs.float_cast(x_red, dtype=tf.float32)
y_red = bs.float_cast(y_red, dtype=tf.float32)
dw, x_red, y_red = sess.run([dw, x_red, y_red], feed_dict=feed_dict)
if axis == 0:
X_RED = np.zeros([blocks_x, depth, N], dtype=np.float32)
Y_RED = np.zeros([blocks_y, depth, N], dtype=np.float32)
for i in range(depth):
X = XS[i].reshape([blocks_x, bsize, N])
Y = YS[i].reshape([blocks_y, bsize, N])
if norm == 0:
X_RED[:,i,:] = np.max(np.abs(X), axis=1)
Y_RED[:,i,:] = np.max(np.abs(Y), axis=1)
else:
X_RED[:,i,:] = np.sqrt(np.sum(np.square(X), axis=1))
Y_RED[:,i,:] = np.sqrt(np.sum(np.square(Y), axis=1))
DW = np.dot(X_RED.reshape(blocks_x, -1), Y_RED.reshape(blocks_y, -1).T) * scale
else:
X_RED = np.zeros([depth, N, blocks_x], dtype=np.float32)
Y_RED = np.zeros([depth, N, blocks_y], dtype=np.float32)
for i in range(depth):
X = XS[i].reshape([N, blocks_x, bsize])
Y = YS[i].reshape([N, blocks_y, bsize])
if norm == 0:
X_RED[i,:,:] = np.max(np.abs(X), axis=2)
Y_RED[i,:,:] = np.max(np.abs(Y), axis=2)
else:
X_RED[i,:,:] = np.sqrt(np.sum(np.square(X), axis=2))
Y_RED[i,:,:] = np.sqrt(np.sum(np.square(Y), axis=2))
DW = np.dot(X_RED.reshape(-1, blocks_x).T, Y_RED.reshape(-1, blocks_y)) * scale
if accum:
DW += DWA
print("BlocksparseReducedDW", norm, bsize, depth)
for op, dev, cpu in [
[ "xr", x_red, X_RED ],
[ "yr", y_red, Y_RED ],
[ "dw", dw, DW ],
]:
#print(op, dev.shape, cpu.shape)
self.compare_results(op, dev, cpu)
def compare_results(self, op, dev, cpu):
dif = np.abs(cpu - dev)
avgval = np.average(abs(cpu))
maxdif = dif.max()
max_err = maxdif if avgval == 0 else maxdif / avgval
l2_err = np.sqrt(np.square(dif).sum()) / np.sqrt(np.square(cpu).sum())
print("op:%3s, err:%17.12f, l2_err:%17.12f shape:%14s" % (op, maxdif, l2_err, str(cpu.shape)))
if 0:
np.savetxt("%s_dif.txt"%op, dif.reshape(-1, dif.shape[-1]), fmt='%6.3f')
np.savetxt("%s_cpu.txt"%op, cpu.reshape(-1, cpu.shape[-1]), fmt='%6.3f')
np.savetxt("%s_gpu.txt"%op, dev.reshape(-1, dev.shape[-1]), fmt='%6.3f')
exit()
if __name__ == "__main__":
tf.test.main()
# 2560*2560*1024*2 / (452.92 * 1000)
|
from blocksparse.matmul import BlocksparseMatMul
import tensorflow as tf
import numpy as np
hidden_size = 4096
block_size = 32
minibatch_size = 64
# Create a (random) sparsity pattern
sparsity = np.random.randint(2, size=(hidden_size//block_size,hidden_size//block_size))
# Initialize the sparse matrix multiplication object
bsmm = BlocksparseMatMul(sparsity, block_size=block_size)
# Input to graph
x = tf.placeholder(tf.float32, shape=[None, hidden_size])
# Initialize block-sparse weights
w = tf.get_variable("w", bsmm.w_shape, dtype=tf.float32)
# Block-sparse matrix multiplication
y = bsmm(x, w)
# Run
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
result = sess.run([y], feed_dict = {x: np.ones((minibatch_size,hidden_size), dtype='float32')})
print(result)
|
#!/usr/bin/env python
'''
Example of the blocksparse transformer on enwik8.
To download data:
wget http://mattmahoney.net/dc/enwik8.zip
unzip enwik8.zip -d /tmp
'''
import argparse
import numpy as np
import tensorflow as tf
import blocksparse as bs
from mpi4py import MPI
def layernorm(x, scope, epsilon=1e-5, relu=False):
"""
normalize state vector to be zero mean / unit variance + learned scale/shift
"""
n_state = x.shape[-1].value
with tf.variable_scope(scope):
gain = tf.get_variable('g', [n_state], initializer=tf.constant_initializer(1.0))
bias = tf.get_variable('b', [n_state], initializer=tf.constant_initializer(0.0))
return bs.layer_norm(x, gain, bias, axis=-1, epsilon=epsilon, relu=relu)
def conv1d(x, scope, nf, std=0.02, relu=False, fast_gelu=False):
with tf.variable_scope(scope):
nx = x.shape[-1].value
ndims = x.shape.ndims
# Note: param initializers are not particularly well tuned in this code
w = tf.get_variable("w", [nx, nf], initializer=tf.random_normal_initializer(stddev=std))
b = tf.get_variable("b", [ nf], initializer=tf.constant_initializer(0.0))
if hps.float16:
# We delay weight casting till just before use to minimize memory footprint.
# In recompute mode these casts are released just after use on forward pass,
# then remade on the recompute pass.
with tf.control_dependencies([x.op]):
# By setting dx_dtype to float16 we prevent useless casting back to fp32 in the backwards pass.
# Our all-reduce and fused optimizers can accept fp16 natively.
w = bs.float_cast(w, dtype=tf.float16, dx_dtype=tf.float16)
# merge context and batch dims for more efficient matmul
if ndims > 2:
y_shape = tf.concat([tf.shape(x)[: ndims - 1], [nf]], axis=0)
x = tf.reshape(x, [-1, nx])
y = tf.matmul(x, w)
# avoid atomics in bias grad, but be careful as tf handles temp memory badly in the presense of async ops like all-reduce
y = bs.bias_relu(y, b, relu=relu, fast_gelu=fast_gelu, atomics=False)
if ndims > 2:
y = tf.reshape(y, y_shape)
return y
# Fine sparse structure
# Within each block this mask is applied to force the softmax output to zero where the mask is zero
# This is defined as a callback to avoid having to instantiate the full mask in memory at one time.
# The callback value is immediately converted to a bit mask internally.
def causal_subblock_mask(blk_shape, head_idx, query_idx, key_idx, blk_idx):
"""Prohibit positions in sub-blocks from attending to indices in the future.
Note: query_idx and key_idx are absolute indices rather than relative to
each block.
"""
mask = np.ones(blk_shape, dtype=np.bool)
if query_idx == key_idx:
for q, k in np.ndindex(blk_shape):
if k > q:
mask[q, k] = 0
return mask
# Coarse sparse structure
# Only layout[q,k] == 1 blocks are computed and materialized in memory
# Block sizes of 8, 16, 32 and 64 are supported on volta fp16 tensorcores (64 being most appropriate for dense attention)
# Only blocksize 32 currently supported in fp32 on other gpus (sm >= 3.5).
def get_blocksparse_transformer(n_timesteps, n_heads):
blocksize = 64 if hps.float16 else 32
n_time_blocks = n_timesteps // blocksize
# The block layout can also include a head dimension if you don't want the same layout shared by all heads.
# Each head just has to have the same number of active blocks (but you can always mask them away).
layout = np.ones([n_time_blocks, n_time_blocks], dtype=np.bool)
# No query blocks may attend to key blocks in the future.
# Much more elaborate structures can be defined here aside from the usual lower triangular.
for q_idx, k_idx in np.ndindex(n_time_blocks, n_time_blocks):
if k_idx > q_idx:
layout[q_idx, k_idx] = 0
bst = bs.BlocksparseTransformer(layout, block_size=blocksize, mask_callback=causal_subblock_mask, heads=n_heads)
return bst
# very simple to use recompute decorator. Be sure to pair with bs.gradients() for it to work
@bs.recomputable
def transformer_block(x, scope, train=False):
"""
core component of transformer
performs attention + residual mlp + layer normalization
"""
n_state = x.shape[-1].value
with tf.variable_scope(scope):
h = layernorm(x, "norm_a")
q = conv1d(h, 'proj_q', n_state)
k = conv1d(h, 'proj_k', n_state)
v = conv1d(h, 'proj_v', n_state)
# only need to create one bst per config
# we could pass this in as an external param but I like to keep the code more local
bst_params = (hps.n_timesteps, hps.n_head)
bst = bst_cache.get(bst_params)
if bst is None:
bst = bst_cache[bst_params] = get_blocksparse_transformer(*bst_params)
# run the core bst ops, transposes for dealing with heads are fused in here.
w = bst.query_key_op(q, k)
w = bst.masked_softmax(w, scale=1.0/np.sqrt(n_state / hps.n_head))
a = bst.weight_value_op(w, v)
a = conv1d(a, 'proj_a', n_state, std=0.02/hps.n_layer)
if train and hps.resid_pdrop > 0.0:
# preserve the dropout mask through recompute
key = scope + "_dropout_a"
a, dropout_cache[key] = bs.dropout(a, keep_prob=1.0 - hps.resid_pdrop, mask=dropout_cache.get(key))
# many basic tf ops are about half as fast as they should be in fp16
x = bs.add(x, a)
m = layernorm(x, "norm_m")
# fast_gelu: x * sigmoid(1.702 * x)
m = conv1d(m, 'proj_m1', n_state * hps.mlp_ratio, fast_gelu=True)
m = conv1d(m, 'proj_m2', n_state)
if train and hps.resid_pdrop > 0.0:
# preserve the dropout mask through recompute
key = scope + "_dropout_m"
m, dropout_cache[key] = bs.dropout(m, keep_prob=1.0 - hps.resid_pdrop, mask=dropout_cache.get(key))
return bs.add(x, m)
def model(xs, ys, loss_scale=None, train=False):
with tf.variable_scope("model", reuse=not train):
with tf.device("/cpu:0"):
if train:
grad_scale = tf.reciprocal(loss_scale) if hps.float16 else 1.0
global_step = tf.get_variable("global_step", [], initializer=tf.ones_initializer(), trainable=False)
learning_rate = tf.minimum(global_step * (1.0/hps.warmup_iters), 1.0) * hps.lr
mpi_scale = tf.constant(1.0 / mpi_size)
with tf.device("/gpu:0"):
# Contains scope/var_name substrings we use to group gradients for all reduce
# You'll want to find groupings that are scheduled uniquely by tensorflow, otherwise bs.allreduce could hang.
# The groups should be ordered in which the all-reduce is called.
# Any gradients not matching the substrings will get appended to the last group.
grad_groups = []
# embed discrete inputs to continous space and add learned position embeddings
with tf.variable_scope('embed'):
x_embed = tf.get_variable("x", [ hps.n_vocab, hps.n_state], initializer=tf.random_normal_initializer(stddev=0.02))
p_embed = tf.get_variable('pos', [1, hps.n_timesteps, hps.n_state], initializer=tf.random_normal_initializer(stddev=0.01))
if hps.float16:
x_embed = bs.float_cast(x_embed, dtype=tf.float16, dx_dtype=tf.float16)
p_embed = bs.float_cast(p_embed, dtype=tf.float16, dx_dtype=tf.float16)
# bs.embedding_lookup can be much faster than tf version for low entropy indexes or small vocabs
x = bs.embedding_lookup(x_embed, xs)
if train and hps.embed_pdrop > 0.0:
# this part of the code is not recomputed so no need to remember the generated mask returned by bs.dropout
x, _ = bs.dropout(x, keep_prob=1.0 - hps.embed_pdrop)
p_embed, _ = bs.dropout(p_embed, keep_prob=1.0 - hps.embed_pdrop)
h = x + p_embed
grad_groups.insert(0, 'embed')
for l in range(hps.n_layer):
layer_name = 'layer_%d' % l
# enable the recompute decorator in training
# see blocksparse/grads.py if you want understand how this works
h = transformer_block(h, layer_name, train=train, recompute=train and hps.recompute)
grad_groups.insert(0, layer_name)
#average pool transformer features and apply linear classifier
with tf.variable_scope('logits'):
h = tf.reshape(h, [-1, hps.n_state])
logits = tf.matmul(h, x_embed, transpose_b=True)
if hps.float16:
# much faster and more memory efficient (but currently only implemented in fp16)
loss = bs.softmax_cross_entropy(logits=logits, labels=ys)
else:
labels = tf.cast(tf.reshape(ys, [-1]), tf.int32)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
loss = tf.reduce_mean(loss)
if train:
# apply loss scaling in fp16 mode
if hps.float16:
grad_loss = bs.scale_tensor(loss, loss_scale)
else:
grad_loss = loss
# use bs.gradients to allow bs.recomputable decorators to work
params = tf.trainable_variables()
grads = bs.gradients(grad_loss, params)
if mpi_size > 1:
# apply (1.0 / mpi_size) scaling prior to all_reduce to allow greater utilization of fp16 dynamic range.
# That is we're ok with flushing some small values to zero to allow growth of large values in allreduce (without hitting inf).
loss = bs.scale_tensor(loss, mpi_scale)
grads = [bs.scale_tensor(g, mpi_scale) for g in grads]
# allreduce in an mpi context
# bias and gain grads will be in fp32, but have them fp16 cast prior to allreduce
cast_all = tf.float16 if H.float16 else None
loss = bs.allreduce(loss)
grads = bs.group_allreduce(grads, params, search_strings=grad_groups, cast_all=cast_all)
# This does not actually perform the clippiing, only measures the norm_scale needed to be applied.
# norm_scale is then later applied in the fused optimizer ops (eliminating an extra pass over the gradients).
# norm_scale is also used to detect inf/nan values in any of the gradients so the whole update can be skipped
# and tried again with a new loss_scale.
global_norm, norm_scale = bs.clip_by_global_norm(grads, grad_scale=grad_scale, clip_norm=hps.clip_norm)
# Apply AdamOptimizer:
# fp16 mode is a special feature to store running mean and variance variables in custom fp16 formats.
# Using this mode should incure no loss in accuracy and save a lot of memory in your model.
# For futher memory savings consider using bs.AdafactorOptimizer.
adam = bs.AdamOptimizer(learning_rate=learning_rate, norm_scale=norm_scale, grad_scale=grad_scale, fp16=hps.float16)
train_op = adam.apply_gradients(zip(grads, params))
# update global step after we're done using it for this update
with tf.control_dependencies([ train_op ]), tf.device("/cpu:0"):
update_op = tf.assign_add(global_step, 1.0)
return loss, tf.group(train_op, update_op), global_norm, norm_scale
else:
if mpi_size > 1:
loss = bs.allreduce(bs.scale_tensor(loss, mpi_scale))
return loss
def enwik8(path, n_train=int(90e6), n_valid=int(5e6), n_test=int(5e6)):
X = np.fromstring(open(path).read(n_train + n_valid + n_test), dtype=np.uint8)
trX, vaX, teX = np.split(X, [n_train, n_train + n_valid])
return trX, vaX, teX
def iter_data(X, n_timesteps, n_batch, mpi_rank, mpi_size):
offset = np.random.randint(0, n_timesteps)
idxs = np.random.permutation(np.arange(offset, X.size - (n_timesteps + 1), n_timesteps))
# Truncate the training set this epoch if it does not divide evenly
sequences_per_batch = mpi_size * n_batch
length = (idxs.size // sequences_per_batch) * sequences_per_batch
if length != idxs.size:
print_rank0('Not including {} sequences'.format(idxs.size - length))
idxs = idxs[:length]
# Reshape starting indices to K*mpi_size*n_batch
idxs = idxs.reshape([-1, mpi_size, n_batch])
print_rank0(f'Number of minibatches this epoch: {len(idxs)}')
for minibatch_index in range(len(idxs)):
starting_indices = idxs[minibatch_index, mpi_rank]
x = np.zeros((n_batch, n_timesteps + 1), dtype=np.uint8)
for i, start_idx in enumerate(starting_indices):
x[i, :] = X[start_idx:start_idx + n_timesteps + 1]
yield x[:, :-1], x[:, 1:]
def print_rank0(*args):
if mpi_rank == 0:
print(*args, flush=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=100)
parser.add_argument('--n_batch', type=int, default=32)
parser.add_argument('--n_state', type=int, default=512)
parser.add_argument('--n_head', type=int, default=4)
parser.add_argument('--n_layer', type=int, default=6)
parser.add_argument('--n_timesteps', type=int, default=320)
parser.add_argument('--n_vocab', type=int, default=256)
parser.add_argument('--mlp_ratio', type=int, default=4)
parser.add_argument('--lr', type=float, default=0.0005)
parser.add_argument('--resid_pdrop', type=float, default=0.05)
parser.add_argument('--embed_pdrop', type=float, default=0.05)
parser.add_argument('--clip_norm', type=float, default=1.0)
parser.add_argument('--loss_scale', type=float, default=2.0**16)
parser.add_argument('--loss_count', type=int, default=1000)
parser.add_argument('--warmup_iters', type=int, default=1000)
parser.add_argument('--enwik8_path', type=str, default='/home/scott/datasets/enwik8') # obviously change to your local path
parser.add_argument('--log_interval', type=int, default=200)
parser.add_argument('--profile', type=int, default=0) # exit early for nvprof profiling
parser.add_argument('--float16', type=int, default=0) # only sm >= 7.0 (tensorcores)
parser.add_argument('--recompute', type=int, default=0) # allow use of large contexts and/or lots of layers/params
# use some global vars for convenience
hps = parser.parse_args()
bst_cache = dict()
dropout_cache = dict()
comm = MPI.COMM_WORLD
mpi_size = comm.Get_size()
mpi_rank = comm.Get_rank()
n_train = int(90e6)
n_valid = int(5e6)
n_test = int(5e6)
trainX, validX, testX = enwik8(hps.enwik8_path, n_train, n_valid, n_test)
with tf.device("/gpu:0"):
X = tf.placeholder(tf.uint8, shape=[hps.n_batch, hps.n_timesteps])
Y = tf.placeholder(tf.uint8, shape=[hps.n_batch, hps.n_timesteps])
# loss_scale is a host side scalar
with tf.device("/cpu:0"):
loss_scale = tf.placeholder(tf.float32, shape=[])
# needed for bs.dropout()
np.random.seed(mpi_rank)
bs.set_entropy()
# initialize the loss_scale placeholder value
cur_loss_scale = hps.loss_scale
loss_count = 0
# build the models for training and testing/validation
train_loss, train_op, gn, ns = model(X, Y, loss_scale, train=True)
valid_loss = model(X, Y)
# Free up some python memory now that models are built
bst_cache = None
dropout_cache = None
bs.clear_bst_constants()
config = tf.ConfigProto()
config.gpu_options.visible_device_list = str(mpi_rank)
config.allow_soft_placement = True
iteration = 0
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
if mpi_size > 1:
# sync variables initialized on rank 0 to all other ranks
sess.run(bs.sync_variables_op(mpi_rank))
for i in range(hps.n_epochs):
print_rank0(f'Starting epoch {i}')
for x, y in iter_data(trainX, hps.n_timesteps, hps.n_batch, mpi_rank, mpi_size):
retry = True
while retry:
loss, global_norm, norm_scale, _ = sess.run([train_loss, gn, ns, train_op], feed_dict={X: x, Y: y, loss_scale: cur_loss_scale})
# auto loss scaling for fp16.
if hps.float16 and np.isfinite(loss):
# slowly increase loss scale but quickly drop it when inf or nan is detected in the gradients
# norm_scale will be zero when this happens
# You may also want to limit the change in loss_scale from any single minibatch and throw them away when this limit is exceeded.
if norm_scale == 0.0:
cur_loss_scale *= 0.5
loss_count = 0
print_rank0("fp16 saturation detected (%f), changing loss_scale to: 2^%.0f" % (global_norm, np.log2(cur_loss_scale)))
else:
retry = False
if loss_count >= hps.loss_count:
cur_loss_scale *= 2.0
loss_count = 0
print_rank0("No fp16 saturation detected after %d iterations, changing loss_scale to: 2^%.0f" % (hps.loss_count, np.log2(cur_loss_scale)))
else:
loss_count += 1
else:
# if forward pass is not finite skip any further auto loss scaling.
retry = False
if iteration % hps.log_interval == 0:
print_rank0('train iteration: %7d, loss: %.5f, bits per byte: %.5f ns:%.5f gn:%.5f' % (iteration, loss, loss/np.log(2), norm_scale, global_norm))
iteration += 1
if hps.profile and iteration >= hps.profile:
exit()
print_rank0('Calculating validation loss')
valid_losses = []
for x, y in iter_data(validX, hps.n_timesteps, hps.n_batch, mpi_rank, mpi_size):
valid_losses.append(sess.run(valid_loss, feed_dict={X: x, Y: y}))
avg_valid = sum(valid_losses) / len(valid_losses)
print_rank0('Average validation loss: %.5f, bits per byte: %.5f' % (avg_valid, avg_valid/np.log(2)))
print_rank0('Calculating test loss')
test_losses = []
for x, y in iter_data(testX, hps.n_timesteps, hps.n_batch, mpi_rank, mpi_size):
test_losses.append(sess.run(valid_loss, feed_dict={X: x, Y: y}))
avg_test = sum(test_losses) / len(test_losses)
print_rank0('Average test loss: %.5f, bits per byte: %.5f' % (avg_test, avg_test/np.log(2)))
|
#!/usr/bin/env python
import argparse
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from mpi4py import MPI
from tensorflow.examples.tutorials.mnist import input_data
from blocksparse.transformer import transpose_0213, masked_softmax
from blocksparse.norms import layer_norm
from blocksparse.optimize import Adam
from blocksparse.embed import embedding_lookup
from blocksparse.quantize import QuantizeSpec, quantize, set_entropy
from blocksparse.ewops import bias_relu
from blocksparse.nccl import allreduce, group_allreduce, sync_variables_op
qspec_e4f3 = QuantizeSpec(
ebits = 4,
fbits = 3,
denorm = True,
frequency = 512,
bias_pad = 1,
)
qspec_e5f2 = QuantizeSpec(
ebits = 5,
fbits = 2,
stochastic = 2,
denorm = True,
frequency = 512,
bias_pad = 8,
)
qspec_e6f7 = QuantizeSpec(
ebits = 6,
fbits = 7,
stochastic = 0,
denorm = True,
frequency = 512,
bias_pad = 8,
)
def quantize_pre(x, name, tag):
if tag != "none":
if mpi_rank == 0:
qspec_f = QuantizeSpec(copy=qspec_e4f3, logfile="qspec_e4f03.f.%s.txt" % tag)
qspec_b = QuantizeSpec(copy=qspec_e6f7, logfile="qspec_e6f07.b.%s.txt" % tag)
else:
qspec_f = qspec_e4f3
qspec_b = qspec_e6f7
return quantize(x, qspec_f, qspec_b, name=name)
return x
def quantize_post(x, name, tag):
if tag != "none":
if mpi_rank == 0:
qspec_f = QuantizeSpec(copy=qspec_e6f7, logfile="qspec_e6f07.f.%s.txt" % tag)
qspec_b = QuantizeSpec(copy=qspec_e5f2, logfile="qspec_e5f02.b.%s.txt" % tag)
else:
qspec_f = qspec_e6f7
qspec_b = qspec_e5f2
return quantize(x, qspec_f, qspec_b, name=name)
return x
def layernorm(x, scope, epsilon=1e-5, relu=False):
"""
normalize state vector to be zero mean / unit variance + learned scale/shift
"""
n_state = shape_list(x)[-1]
with tf.variable_scope(scope):
gain = tf.get_variable('gain', [n_state], initializer=tf.constant_initializer(1))
bias = tf.get_variable('bias', [n_state], initializer=tf.constant_initializer(0))
return layer_norm(x, gain, bias, axis=-1, epsilon=epsilon, relu=relu)
def conv1d(x, scope, nf, hps, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), relu=False):
with tf.variable_scope(scope):
nx = x.shape[-1].value
ndims = x.shape.ndims
w = tf.get_variable("w", [nx, nf], initializer=w_init)
b = tf.get_variable("b", [ nf], initializer=b_init)
if ndims > 2:
y_shape = tf.concat([tf.shape(x)[ : ndims-1], [nf]], axis=0)
x = tf.reshape(x, [-1, nx])
scope = tf.get_variable_scope().name
w = quantize_pre(w, name=scope+"/pre_w", tag=hps.tag)
x = quantize_pre(x, name=scope+"/pre_x", tag=hps.tag)
y = tf.matmul(x, w)
y = quantize_post(y, name=scope+"/post_x", tag=hps.tag)
y = bias_relu(y, b, relu=relu)
if ndims > 2:
y = tf.reshape(y, y_shape)
return y
def shape_list(x):
"""
deal with dynamic shape in tensorflow cleanly
"""
ps = x.get_shape().as_list()
ts = tf.shape(x)
return [ts[i] if ps[i] is None else ps[i] for i in range(len(ps))]
def discretize(x, n_bin):
"""
discretize inputs for embedding - maps 0 to 1 to n integer bins
"""
return np.digitize(x, np.linspace(0, 1, n_bin), right=True).astype(np.int32)
def subsample(x):
"""
attention is n^2 - subsample 28x28 mnist images to 14x14 to speed things up
"""
return x.reshape(-1, 28, 28)[:, ::2, ::2].reshape(-1, 14*14)
def preprocess(x, n_bin, sub_sample=True):
"""
subsample and discretize image
"""
if sub_sample:
x = subsample(x)
x = discretize(x, n_bin)
return x
def split_states(x, n):
"""
reshape (batch, pixel, state) -> (batch, pixel, head, head_state)
"""
x_shape = shape_list(x)
m = x_shape[-1]
new_x_shape = x_shape[:-1]+[n, m//n]
return tf.reshape(x, new_x_shape)
def merge_states(x):
"""
reshape (batch, pixel, head, head_state) -> (batch, pixel, state)
"""
x_shape = shape_list(x)
new_x_shape = x_shape[:-2]+[np.prod(x_shape[-2:])]
return tf.reshape(x, new_x_shape)
def split_heads(x, n, scope):
"""
(batch, pixel, state) -> (batch, head, pixel, head_state)
"""
with tf.name_scope(scope):
return transpose_0213(split_states(x, n))
def merge_heads(x, scope):
"""
(batch, head, pixel, head_state) -> (batch, pixel, state)
"""
with tf.name_scope(scope):
return merge_states(transpose_0213(x))
def attention(x, scope, n_head, hps):
"""
perform multi-head qkv dot-product attention and linear project result
"""
n_state = shape_list(x)[-1]
with tf.variable_scope(scope):
q = conv1d(x, 'q', n_state, hps) #project inputs to q,k,v
k = conv1d(x, 'k', n_state, hps)
v = conv1d(x, 'v', n_state, hps)
# c = conv1d(x, 'qkv', n_state*3, hps)
# q, k, v = tf.split(c, 3, 2)
q = split_heads(q, n_head, "split_q") #reshape for multi-head attention
k = split_heads(k, n_head, "split_k")
v = split_heads(v, n_head, "split_v")
scope = tf.get_variable_scope().name
q = quantize_pre(q, name=scope+"/pre_q", tag=hps.tag)
k = quantize_pre(k, name=scope+"/pre_k", tag=hps.tag)
with tf.name_scope("qk"):
w = tf.matmul(q, k, transpose_b=True) #dot product query with key
w = quantize_post(w, name=scope+"/post_w", tag=hps.tag)
w = masked_softmax(w, scale=tf.rsqrt(n_state/n_head)) #normalized attention distribution, rescale by head dim
w = quantize_pre(w, name=scope+"/pre_w", tag=hps.tag)
v = quantize_pre(v, name=scope+"/pre_v", tag=hps.tag)
with tf.name_scope("wv"):
a = tf.matmul(w, v) #reweighted attention value
a = quantize_post(a, name=scope+"/post_a", tag=hps.tag)
a = merge_heads(a, "merge") #combine result
a = conv1d(a, 'proj', n_state, hps) #project result
return a
def mlp(x, scope, hps, ratio=4):
"""
2 layer relu residual mlp with wider first layer
"""
n_state = shape_list(x)[-1]
with tf.variable_scope(scope):
hidden = conv1d(x, 'hidden', n_state*ratio, hps, relu=True) # relu fc layer
residual = conv1d(hidden, 'residual', n_state, hps) #project back to state size
return tf.add(x, residual)
def transformer_block(x, scope, n_head):
"""
core component of transformer
performs attention + residual mlp + layer normalization
"""
with tf.variable_scope(scope):
a = attention(x, 'attention', n_head, hps)
a = layernorm(tf.add(a, x, name="Add_x"), 'norm_a')
m = mlp(a, 'mlp', hps)
m = layernorm(tf.add(m, a, name="Add_a"), 'norm_m')
return m
def embed_input(x, hps):
"""
embed discrete inputs to continous space and add learned position embeddings
"""
x_embed = tf.get_variable('x_embed', [hps.n_bin, hps.n_state], initializer=tf.random_normal_initializer(stddev=0.02))
pos_embed = tf.get_variable('pos_embed', [hps.n_x, hps.n_state], initializer=tf.random_normal_initializer(stddev=0.01))
h = tf.add(embedding_lookup(x_embed, x), pos_embed)
return h
def output(x, hps):
"""
average pool transformer features and apply linear classifier
"""
x = tf.reduce_mean(x, axis=1, keepdims=True) #avg pooling features for classifier
logits = conv1d(x, 'classifier', hps.n_y, hps)[:, 0, :] #squeeze spatial dimension
return logits
def model(X, Y, hps):
# tf Variable of random ints of size (3 * GPU_SMs * 1024)
# tf doesn't support int32 variables? Hack with float32 view.
entropy_init = np.random.randint(-(1<<31), (1<<31), size=80*3*1024, dtype=np.int32).view(np.float32)
if hps.tag != "none":
qspec_e4f11 = QuantizeSpec(
ebits = 4,
fbits = 11,
stochastic = 2,
denorm = True,
frequency = 512,
bias_pad = 1,
logfile="qspec_e4f11.%s.b.txt" % hps.tag,
)
qspec_e5f10 = QuantizeSpec(
ebits = 5,
fbits = 10,
stochastic = 2,
denorm = True,
frequency = 512,
bias_pad = 4,
logfile="qspec_e5f10.%s.b.txt" % hps.tag,
)
else:
qspec_e4f11 = None
qspec_e5f10 = None
xs = tf.split(X, mpi_size, 0)
ys = tf.split(Y, mpi_size, 0)
with tf.device("/gpu:0"), tf.variable_scope("model"):
entropy = tf.get_variable("entropy", initializer=entropy_init, trainable=False)
set_entropy(entropy)
h = embed_input(xs[mpi_rank], hps)
for l in range(hps.n_layer):
h = transformer_block(h, 'layer_%d' % l, hps.n_head)
logits = output(h, hps)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=ys[mpi_rank])
loss = tf.reduce_mean(loss)
params = tf.trainable_variables()
grads = tf.gradients(loss * cost_scale, params)
for p in params:
print(p.op.name + "_" + "_".join(str(x) for x in p.shape.as_list()))
test = tf.reduce_sum(tf.cast(tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), ys[mpi_rank]), tf.float32))
grad_scale = 1.0 / mpi_size
# all reduce grads
if mpi_size > 1:
group_allreduce(grads, params, search_strings=["classifier"] + ["layer_%d" % l for l in range(hps.n_layer-1, -1, -1)], prereduce=8, num_comms=2)
loss = allreduce(loss) * grad_scale
train = Adam(grads, params, grad_scale=grad_scale/cost_scale, param_qspec=qspec_e4f11, mean_qspec=qspec_e5f10, var_qspec=qspec_e5f10)
return loss, train, test
def accuracy(xs, ys, hps, tf_correct):
"""
compute accuracy over dataset
"""
n = len(xs)
correct = 0
for i in range(0, n, hps.n_batch): #tqdm(, total=n//hps.n_batch, ncols=80, leave=False):
correct += sess.run(tf_correct, { X: xs[i:i+hps.n_batch], Y: ys[i:i+hps.n_batch] })
return correct/n
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
if __name__ == '__main__':
np.random.seed(0)
parser = argparse.ArgumentParser()
# model hyper-parameters
parser.add_argument('--n_batch', type=int, default=512) # combined batch size across all gpus
parser.add_argument('--n_iter', type=int, default=100)
parser.add_argument('--n_bin', type=int, default=8)
parser.add_argument('--n_y', type=int, default=10)
parser.add_argument('--n_state', type=int, default=256)
parser.add_argument('--n_head', type=int, default=4)
parser.add_argument('--n_layer', type=int, default=3)
parser.add_argument('--profile', type=int, default=0)
parser.add_argument('--subsample', type=int, default=1) #2x2 subsampled MNIST
parser.add_argument('--tag', type=str, default="") # experiment labal, set to "none" to disable quantization
hps = parser.parse_args()
comm = MPI.COMM_WORLD
mpi_size = comm.Get_size()
mpi_rank = comm.Get_rank()
hps.n_x = 14*14 if hps.subsample else 28*28
mnist = input_data.read_data_sets("/home/scott/datasets/mnist")
n_train = len(mnist.train.labels)
n_test = len(mnist.test.labels)
ys_train = mnist.train.labels[:n_test]
xs_train = preprocess(mnist.train.images[:n_test], hps.n_bin, hps.subsample)
xs_test = preprocess(mnist.test.images, hps.n_bin, hps.subsample)
ys_test = mnist.test.labels
n_updates = hps.n_iter*(n_train//hps.n_batch)
X = tf.placeholder(tf.int32, [None, hps.n_x])
Y = tf.placeholder(tf.int32, [None])
loss, train, tf_correct = model(X, Y, hps)
config = tf.ConfigProto()
config.inter_op_parallelism_threads = 1
config.gpu_options.visible_device_list = str(mpi_rank)
config.allow_soft_placement = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
if mpi_size > 1:
sess.run(sync_variables_op(mpi_rank))
def run(i):
if hps.profile and i >= hps.profile:
exit()
x, y = mnist.train.next_batch(hps.n_batch)
cost, _ = sess.run([loss, train], { X: preprocess(x, hps.n_bin, hps.subsample), Y: y })
if i % (n_train//hps.n_batch) == 0 and i > 0:
train_accuracy = accuracy(xs_train, ys_train, hps, tf_correct)
test_accuracy = accuracy(xs_test, ys_test, hps, tf_correct)
if mpi_rank == 0:
print("\nupdates %d train accuracy: %.4f test accuracy: %.4f" % (i, train_accuracy, test_accuracy), flush=True)
print("", flush=True)
if mpi_rank == 0 and hps.profile == 0:
for i in tqdm(range(n_updates), total=n_updates, ncols=80, leave=False):
run(i)
else:
for i in range(n_updates):
run(i)
# m, n, k
# 128*196, 64*4, 64*4
# 128*196, 64*4, 64*4
# 128*196, 64*4, 64*4
# 196, 196, 64 x 128*4
# 196, 64, 196 x 128*4
# 128*196, 64*4, 64*4
# 128*196,256*4, 64*4
# 128*196, 64*4,256*4
# 128*1, 10, 64*4
# 00 q: 256,256 [128 196 256]
# 01 k: 256,256 [128 196 256]
# 02 v: 256,256 [128 196 256]
# 03 attention:qk 2x [128 4 196 64]
# 04 attention:v [128 4 196 196]
# 05 proj: 256,256 [128 196 256]
# 06 hidden: 256,1024 [128 196 256]
# 07 residual: 1024,256 [128 196 1024]
# 08 q: 256,256 [128 196 256]
# 09 k: 256,256 [128 196 256]
# 10 v: 256,256 [128 196 256]
# 11 attention:qk 2x [128 4 196 64]
# 12 attention:v [128 4 196 196]
# 13 proj: 256,256 [128 196 256]
# 14 hidden: 256,1024 [128 196 256]
# 15 residual: 1024,256 [128 196 1024]
# 16 q: 256,256 [128 196 256]
# 17 k: 256,256 [128 196 256]
# 18 v: 256,256 [128 196 256]
# 19 attention:qk 2x [128 4 196 64]
# 20 attention:v [128 4 196 196]
# 21 proj: 256,256 [128 196 256]
# 22 hidden: 256,1024 [128 196 256]
# 23 residual: 1024,256 [128 196 1024]
# 24 classifier: 256,10 [128 1 256]
q . k.t = a
QC . KC.T = QK 16x64 . 16x64.T = 16x16 16x16x64_NT 72,72,16
QK . KC = QC 16x16 . 16x64 = 16x64 16x64x16_NN 16,80,80
QK.T . QC = KC 16x16.T . 16x64 = 16x64 16x64x16_TN 16,80,80
w . v = q
QK . VC = QC 16x16 . 16x64 = 16x64 16x64x16_NN
QC . VC.T = QK 16x64 . 16x64.T = 16x16 16x16x64_NT
QK.T . QC = VC 16x16.T . 16x64 = 16x64 16x64x16_TN
sequence length = 196
batch size = 128
head state = 64
heads = 4
mlp mult = 4
m, n, k
128*196, 64*4, 64*4 # q
128*196, 64*4, 64*4 # k
128*196, 64*4, 64*4 # v
196, 196, 64 x 128*4 # qk (batched matmul)
196, 64, 196 x 128*4 # wv (batched matmul)
128*196, 64*4, 64*4 # projection
128*196,256*4, 64*4 # mlp
128*196, 64*4,256*4 # mlp
# NC . CK = NK
# NK . CK.T = NC
# NC.T . NK = CK
# 1 D
B, C, S
B, C/2, 2, S
B, C/2, S, 2
# 2 D
B, C, S
B, H, W, S
B, H/2, W/2, S, 2, 2
B, C/4, S*4 |
import os
import os.path
import string
import json
import numpy as np
import tensorflow as tf
import random
def ceil_div(x, y):
return -(-x // y)
def text8(path):
print("opening:", path)
text = open(path).read()
tr_text = text[:int(90e6)]
va_text = text[int(90e6):int(95e6)]
te_text = text[int(95e6):int(100e6)]
return tr_text, va_text, te_text
vocab = string.ascii_lowercase+' '
encoder = dict(zip(vocab, range(len(vocab))))
decoder = dict(zip(range(len(vocab)), vocab))
def text8_stream(text, nbatch, nsteps, maxbatches=None):
nbytes = len(text)-1
nperstripe = nbytes//nbatch
nbatches = nbytes//(nbatch*nsteps)
if maxbatches is not None and maxbatches > 0:
nbatches = min(nbatches, maxbatches)
xmb = np.zeros((nbatch, nsteps), dtype=np.int32)
ymb = np.zeros((nbatch, nsteps), dtype=np.int32)
for batch in range(nbatches):
for n in range(nbatch):
sidx = n*nperstripe + batch*nsteps
xmb[n] = [encoder[byte] for byte in text[sidx:sidx+nsteps]]
ymb[n] = [encoder[byte] for byte in text[sidx+1:sidx+nsteps+1]]
# Transpose outputs to get more efficient time step split/concat on axis 0
yield xmb.T, ymb.T
def text_to_npy(path, nbytes=-1):
text = np.fromfile(path, dtype=np.uint8, count=nbytes)
return text
def wiki3(path):
print("opening:", path)
tr_text = text_to_npy(os.path.join(path, "wiki.train.raw"))
va_text = text_to_npy(os.path.join(path, "wiki.valid.raw"))
te_text = text_to_npy(os.path.join(path, "wiki.test.raw"))
# the valid/test sets are too small and produce too much variance in the results
# creat new partitions of 10MB
text = np.concatenate((tr_text, va_text, te_text))
te_text = text[:int(10e6)]
va_text = text[int(10e6):int(20e6)]
tr_text = text[int(20e6):]
return tr_text, va_text, te_text
def wiki3_stream(text, nbatch, nsteps, maxbatches=None):
"""
breaks text into nbatch seperate streams
yields contiguous nstep sized sequences from each stream until depleted
"""
nbytes = len(text)-nbatch
nbatches = nbytes//(nbatch*nsteps)
if maxbatches is not None:
nbatches = min([nbatches, maxbatches])
text = text[:nbatch*nbatches*nsteps+nbatch].reshape(nbatch, -1)
nperstripe = text.shape[-1]
xmb = np.zeros((nbatch, nsteps), dtype=np.int32)
ymb = np.zeros((nbatch, nsteps), dtype=np.int32)
for start in range(0, nperstripe-1, nsteps):
# Transpose outputs to get more efficient time step split/concat on axis 0
yield text[:, start:start+nsteps].T, text[:, start+1:start+nsteps+1].T
# Old stream for amazon
# def text_stream(path, nbatch, nsteps, maxbatches=None):
# """
# breaks text into nbatch seperate streams
# yields contiguous nstep sized sequences from each stream until depleted
# """
# text = np.fromstring(open(path).read().encode(), dtype=np.uint8)
# nbytes = len(text)-1
# nperstripe = nbytes//nbatch
# nbatches = nbytes//(nbatch*nsteps)
# if maxbatches is not None:
# nbatches = min(nbatches, maxbatches)
# xmb = np.zeros((nbatch, nsteps), dtype=np.int32)
# ymb = np.zeros((nbatch, nsteps), dtype=np.int32)
# for batch in range(nbatches):
# for n in range(nbatch):
# sidx = n*nperstripe + batch*nsteps
# xmb[n] = text[sidx:sidx+nsteps]
# ymb[n] = text[sidx+1:sidx+nsteps+1]
# # Transpose outputs to get more efficient time step split/concat on axis 0
# yield xmb.T, ymb.T
class JsonLogger(object):
def __init__(self, path, **kwargs):
make_path(path)
self.path = path
self.log(**kwargs)
def log(self, **kwargs):
file = open(self.path, 'a')
file.write(json.dumps(kwargs) + '\n')
file.close()
def ones_initializer(c=1.):
def _initializer(shape, dtype=tf.float32, partition_info=None):
return np.ones(shape, dtype=np.float32) * c
return _initializer
def zeros_initializer():
def _initializer(shape, dtype=tf.float32, partition_info=None):
return np.zeros(shape, dtype=np.float32)
return _initializer
def normal_initializer(mean=0.0, std=0.02):
def _initializer(shape, dtype=tf.float32, partition_info=None):
return np.random.normal(mean, std, shape).astype(np.float32)
return _initializer
def ortho_initializer(scale=1.0):
def _initializer(shape, dtype=tf.float32, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
print('SHAPE', shape)
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
# return (scale * q[:shape[0], :shape[1]]).astype(np.float32) # indexing seems pointless here
return (scale * q).astype(np.float32)
return _initializer
def assign_to_gpu(gpu=0, ps_dev="/device:CPU:0"):
def _assign(op):
node_def = op if isinstance(op, tf.NodeDef) else op.node_def
if node_def.op == "Variable":
return ps_dev
else:
return "/gpu:%d" % gpu
return _assign
def average_grads(tower_grads):
def average_dense(grad_and_vars):
if len(grad_and_vars) == 1:
return grad_and_vars[0][0]
grad = grad_and_vars[0][0]
for g, _ in grad_and_vars[1:]:
grad += g
return grad / len(grad_and_vars)
def average_sparse(grad_and_vars):
if len(grad_and_vars) == 1:
return grad_and_vars[0][0]
indices = []
values = []
for g, _ in grad_and_vars:
indices += [g.indices]
values += [g.values]
indices = tf.concat(0, indices)
values = tf.concat(0, values)
return tf.IndexedSlices(values, indices, grad_and_vars[0][0].dense_shape)
average_grads = []
for grad_and_vars in zip(*tower_grads):
if grad_and_vars[0][0] is None:
grad = None
elif isinstance(grad_and_vars[0][0], tf.IndexedSlices):
grad = average_sparse(grad_and_vars)
else:
grad = average_dense(grad_and_vars)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def num_trainable_params(scope):
return np.sum([np.prod(var.get_shape()) for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)])
def print_trainable_params(scope):
print('Variable name, shape, size')
model = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
for var in model:
print(var.name, var.get_shape(), np.prod(var.get_shape()))
print('Number of parameters:', np.sum([np.prod(var.get_shape()) for var in model]))
def make_path(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
import math
def constant(p):
return 1
def linear(p):
return 1-p
def square(p):
return (1-p)**2
def sqrt(p):
return math.sqrt(1-p)
def cube(p):
return (1-p)**3
def cubert(p):
return (1-p)**(1/3.)
def fourth(p):
return (1-p)**4
def fourrt(p):
return (1-p)**(1/4.)
def cos(p):
return (math.cos(p*math.pi)+1.)/2.
def sigmoid(p):
p = p*20-10
return 1-1/(1+math.exp(-p))
class Scheduler(object):
def __init__(self, v, nvalues, schedule):
self.v = v
self.nvalues = nvalues
self.schedule = globals()[schedule]
def value(self, n):
current_value = self.v*self.schedule(n/self.nvalues)
return current_value
|
#!/usr/bin/env python
# import memory_util as mu
# mu.vlog(1)
import os
import time
import argparse
import logging
import platform
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import layers
from layers import HParams, LSTM_Model
from utils import text8, text8_stream, wiki3, wiki3_stream, num_trainable_params, print_trainable_params, make_path, JsonLogger
from tensorflow.contrib import nccl
#from tensorflow.python.client import timeline
def model(X, S, Y, hps, train=False, ema=None):
xs = tf.split(X, hps.ngpu, 1)
ys = tf.split(Y, hps.ngpu, 1)
ss = tf.split(S, hps.ngpu, 2 - hps.axis)
losses = []
states = []
grads = []
for gpu in range(hps.ngpu):
with tf.device("/gpu:%d" % gpu), tf.variable_scope("model%d" % gpu, reuse=not train):
lstm_model = LSTM_Model(hps, train)
loss, state = lstm_model.forward(xs[gpu], ss[gpu], ys[gpu], ema=ema)
losses.append(loss)
states.append(state)
if train:
grads.append( lstm_model.backward() )
if train:
ngrads = len(grads[0])
if hps.ngpu > 1:
# all reduce grads
for i in range(ngrads):
sum_grads = nccl.all_sum( [ grads[gpu][i][0] for gpu in range(hps.ngpu) ] )
for gpu in range(hps.ngpu):
grads[gpu][i] = ( sum_grads[gpu], grads[gpu][i][1] )
train = list()
for gpu, gpu_grads in enumerate(grads):
with tf.device("/gpu:%d" % gpu), tf.variable_scope("opt%d" % gpu):
# compute average from sum
if hps.ngpu > 1:
for i in range(ngrads):
# Note the scalar division must appear in a device context otherwise
# it will do a whole lot unnecessary of gpu to gpu copying.
# Also rebuild the tuple.
gpu_grads[i] = ( gpu_grads[i][0]/float(hps.ngpu), gpu_grads[i][1] )
if hps.optimizer == 'adam_old':
trainer = tf.train.AdamOptimizer(learning_rate=hps.lr, beta2=hps.beta2)
train.append(trainer.apply_gradients(gpu_grads))
else:
param_grads = [gpu_grads[i][0] for i in range(ngrads)]
param_names = [gpu_grads[i][1] for i in range(ngrads)]
if hps.optimizer == 'adam':
train.append(layers.adam_updates(param_names, param_grads, lr=hps.lr, mom2=hps.beta2, gamma=hps.gamma))
if hps.optimizer == 'adamax':
train.append(layers.adamax_updates(param_names, param_grads, lr=hps.lr, mom2=hps.beta2))
train = tf.group(*train)
else:
train = None
states = tf.concat(states, 2 - hps.axis)
return train, tf.add_n(losses)/hps.ngpu, states
def score(text, hps):
smb = np.zeros(hps.state_shape)
costs = []
for xmb, ymb in tqdm(text_stream(text, hps.nbatch, hps.nsteps),
total=len(text)//(hps.nbatch*hps.nsteps),
ncols=125, leave=False):
cost, smb = sess.run(
[ema_loss, ema_states],
{X:xmb, S:smb, Y:ymb}
)
costs.append(cost)
nats = float(np.mean(costs))
bpc = nats/np.log(2)
return bpc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# model hyper-parameters
parser.add_argument('--lstm_type', type=str, default='scottbrain', choices=['lstm','scottbrain','rnn'])
parser.add_argument('--nembd', type=int, default=64)
parser.add_argument('--nhidden', type=int, default=1120) # 8192
parser.add_argument('--nproj_in', type=int, default=2, help='Sparse input projection size or stride') # int(round(np.sqrt(blocks)))
parser.add_argument('--nproj_out', type=int, default=2, help='Sparse output projection size or stride') # int(round(np.sqrt(blocks)))
parser.add_argument('--nsteps', type=int, default=64)
parser.add_argument('--isteps', type=int, default=5)
parser.add_argument('--lsteps', type=int, default=1)
parser.add_argument('--share_isteps', type=int, default=0)
parser.add_argument('--share_masks', type=int, default=0)
parser.add_argument('--block_size', type=int, default=32, choices=[8,16,32])
parser.add_argument('--axis', type=int, default=0, choices=[0,1])
parser.add_argument('--sparsity', type=str, default='dense', help='dense | ba_X | bae_X_X')
parser.add_argument('--dropout', type=float, default=0.0, help='Whether to add dropout to both internal steps and updates. 0.2 seems to be a good value.')
parser.add_argument('--dropout_input', type=int, default=0, help='Whether to use input dropout.')
parser.add_argument('--dtype', type=int, default=32)
parser.add_argument('--dx_dtype', type=int, default=32)
parser.add_argument('--dw_dtype', type=int, default=32)
# optimization hyper-parameters
parser.add_argument('--nepochs', type=int, default=70)
parser.add_argument('--batch_size', type=int, default=128, help='Per-GPU batch size')
parser.add_argument('--ngpu', type=int, default=4)
parser.add_argument('--optimizer', type=str, default='adam', choices=['adam_old', 'adam', 'adamax'])
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--lr_warmup_epochs',type=int, default=5)
parser.add_argument('--beta2', type=float, default=.999, help='Adam hyperparameter')
parser.add_argument('--gamma', type=float, default=0., help='Adam hyperparameter')
parser.add_argument('--recompute', type=int, default=0, help='Memory efficient backprop: Should be 0 or greater than 1')
parser.add_argument('--x_group_size', type=int, default=16, help='Concat small input and output projection matmuls together')
parser.add_argument('--forget_bias', type=float, default=1., help='Forget gate bias')
# other hyper-parameters
parser.add_argument('--name', type=str, default='', help='experiment label')
parser.add_argument('--logdir', type=str, default='logs')
parser.add_argument('--save_path', type=str, default='params/params.jl')
parser.add_argument('--data_file', type=str, default='/home/scott/datasets/wiki3')
parser.add_argument('--profile', type=int, default=0)
parser.add_argument('--restore', type=str, default="")
parser.add_argument('--debug', type=int, default=0)
parser.add_argument('--tiny', type=int, default=0, help='Whether to use tiny dataset')
dtype_map = {
7 : tf.bfloat16,
16: tf.float16,
32: tf.float32
}
args = parser.parse_args()
args.node = platform.node()
args.dtype = dtype_map[args.dtype]
args.dx_dtype = dtype_map[args.dx_dtype]
args.dw_dtype = dtype_map[args.dw_dtype]
args.nbatch = args.batch_size * args.ngpu
# axis 1 not memory efficient with small block sizes so not implemeted yet
if args.block_size < 32:
assert args.axis == 0
# sparse projection not supported on axis 1 yet.. always use full project for dense
if args.axis == 1 or args.sparsity == "dense":
args.nproj_in = args.nhidden
args.nproj_out = args.nhidden
if args.sparsity == "dense" or args.share_isteps:
args.share_masks = True
args.nproj_in = min(args.nproj_in, args.nhidden)
args.nproj_out = min(args.nproj_out, args.nhidden)
assert args.recompute == 0 or args.recompute > 1
if args.recompute > 0:
# these need to be the same if recompute is enabled
args.x_group_size = args.recompute
assert args.x_group_size > 0
if args.data_file[-5:] == "text8":
trX, vaX, teX = text8(path=args.data_file)
text_stream = text8_stream
args.nvocab = 27
else:
trX, vaX, teX = wiki3(path=args.data_file)
text_stream = wiki3_stream
args.nvocab = 256
hps = HParams(args)
hps.state_shape = (2, hps.nhidden, hps.nbatch) if hps.axis == 0 else (2, hps.nbatch, hps.nhidden)
if hps.tiny == 1:
vaX = trX[1000000:1100000]
teX = trX[1100000:1200000]
trX = trX[ 0:1000000]
ntrain = len(trX)
nval = len(vaX)
ntest = len(teX)
hps.its_per_epoch = (ntrain-1)//(hps.nbatch*hps.nsteps)
print("Number of iterations per epoch:", hps.its_per_epoch)
X = tf.placeholder(tf.int32, [ hps.nsteps, hps.nbatch ])
Y = tf.placeholder(tf.int32, [ hps.nsteps, hps.nbatch ])
S = tf.placeholder(tf.float32, hps.state_shape)
# Create model
train, loss, states = model(X, S, Y, hps, train=True)
ema = tf.train.ExponentialMovingAverage(decay=args.beta2)
avg_params = ema.apply(tf.trainable_variables())
train = tf.group(train, avg_params)
if not hps.profile:
_, ema_loss, ema_states = model(X, S, Y, hps, train=False, ema=ema)
# Logging
timestamp = time.strftime('r%Y_%m_%d_%H_%M_%S')
log_file = os.path.join(hps.logdir, 'lm', timestamp, "log.txt")
json_file = os.path.join(hps.logdir, 'lm', timestamp, "json.txt")
if os.path.exists(log_file):
# avoid 2 jobs sharing log (quick and dirty fix)
print(log_file, "already exists, exiting.")
exit()
make_path(log_file)
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(message)s',
filename=log_file, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler()) # Print logs to stderr as well
hps.num_params = str(num_trainable_params("model0"))
print_trainable_params("model0")
json_header = {}
for key in sorted(hps.__dict__.keys()):
if type(hps.__dict__[key]) in (str, int, float, type, tf.DType):
logging.info(str(key) + ': ' + str(hps.__dict__[key]))
json_header[str(key)] = str(hps.__dict__[key])
json = JsonLogger(json_file, **json_header)
# config = tf.ConfigProto(#allow_soft_placement=True,
# intra_op_parallelism_threads=hps.ngpu,
# inter_op_parallelism_threads=hps.ngpu)
#config.gpu_options.allow_growth = True
# run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# run_metadata = tf.RunMetadata()
if not args.restore:
hps.epoch = 0
hps.updates = 0
hps.chars = 0
hps.seconds = 0.
#numerics = 0 #tf.add_check_numerics_ops()
with tf.Session() as sess: #mu.capture_stderr() as stderr, #config=config
# We use numpy to init a lot of variables.
# Rather than use the variable initializers we use placeholders.
# This trims down the graph_def significantly (keeping it well under 2GB)
sess.run(tf.global_variables_initializer(), feed_dict=hps.feed_dict)
# free up memory
hps.finish_init()
for i in range(hps.epoch, hps.nepochs):
smb = np.zeros(hps.state_shape, dtype=np.float32)
epoch_start = time.time()
epoch_chars = 0
it = 0
for xmb, ymb in tqdm(text_stream(trX, hps.nbatch, hps.nsteps), #, maxbatches=64
total=hps.its_per_epoch,
ncols=125, leave=False):
if hps.debug and it % hps.debug == 0:
smb, _, loss_ = sess.run(
[states, train, loss],
{X: xmb, S: smb, Y: ymb},
)
print(it, loss_, loss_/np.log(2))
#print("state:", smb)
else:
smb, _ = sess.run(
[states, train], #, numerics
{X: xmb, S: smb, Y: ymb},
#options=run_options, run_metadata=run_metadata
)
# np.savetxt("../test.txt", test, fmt='%6.3f') #, fmt='%6.3f'
# exit()
# mu.print_memory_timeline(stderr, gpu_only=True, ignore_less_than_bytes=1000)
# exit()
hps.updates += 1
hps.chars += xmb.size
epoch_chars += xmb.size
if hps.profile and hps.updates > hps.profile:
exit()
# tl = timeline.Timeline(run_metadata.step_stats)
# ctf = tl.generate_chrome_trace_format()
# with open('timeline.json', 'w') as f:
# f.write(ctf)
it += 1
epoch_time = time.time() - epoch_start
hps.epoch += 1
hps.seconds += epoch_time
#hps.save(sess, ema)
train_bpc = score(trX[:nval], hps)
valid_bpc = score(vaX, hps)
test_bpc = score(teX, hps)
cps = epoch_chars // epoch_time
#print('=' * 125)
logging.info("nepochs: %3d, train_bpc: %0.6f, valid_bpc: %0.6f, test_bpc: %0.6f, nupdates: %7d, cps: %7d, nseconds: %6d" % (
hps.epoch, train_bpc, valid_bpc, test_bpc, hps.updates, cps, hps.seconds ))
#print('=' * 125)
json.log(epoch=hps.epoch, train_bpc=train_bpc, valid_bpc=valid_bpc, test_bpc=test_bpc, updates=hps.updates, cps=cps, seconds=hps.seconds)
# 6.61it/s] 100 2.23159
# 6.59it/s] 200 2.09856
# 6.56it/s] 300 2.0249
# 6.48it/s] 400 1.89606
# 6.50it/s] 500 1.92836
# 6.48it/s] 600 1.84487
# 6.44it/s] 700 1.80276
# 6.45it/s] 800 1.80252
# 6.46it/s] 900 1.7833
# 6.40it/s]1000 1.7205
# 6.89it/s]100 2.39161
# 6.83it/s]200 2.19594
# 6.79it/s]300 2.09597
# 6.78it/s]400 1.98943
# 6.82it/s]500 1.93037
# 6.79it/s]600 1.89484
# 6.77it/s]700 1.88045
# 6.88it/s]100 2.34194
# 6.89it/s]200 2.19156
# 6.84it/s]300 2.08489
# 6.82it/s]400 1.99491
# 6.83it/s]500 1.91741
# 6.81it/s]600 1.88355
# 6.80it/s]700 1.85657
# 6.76it/s]800 1.85184
# 6.75it/s]900 1.82508
# 6.74it/s]100 2.32168
# 6.73it/s]200 2.17876
# 6.70it/s]300 2.10247
# 6.70it/s]400 2.00016
# 6.66it/s]500 1.94257
# 6.64it/s]600 1.90198
# 6.68it/s]700 1.86387
# 6.62it/s]800 1.88786
# 6.62it/s]900 1.85535
|
import numpy as np
import networkx
from random import shuffle, randint
def make_mask(n, kind, axis=0):
if kind == 'dense':
a = np.ones((n, n), dtype=np.int32)
elif kind.startswith('old_ba_'):
_, _, m = kind.split('_')
a = old_barabasi_albert(n, int(m))
elif kind.startswith('ba_'):
_, m = kind.split('_')
a = barabasi_albert(n, int(m))
elif kind.startswith('bae_'): #barabasi-albert with extra nodes
_, m, e = kind.split('_')
a = barabasi_albert(n, int(m))
a = extra(a, n, int(e))
elif kind.startswith('rande_'): #1D watts-strogatz with extra nodes
_, m, e = kind.split('_')
a = watts_strogatz_1d(n, int(m), p=1.)
a = extra(a, n, int(e))
elif kind.startswith('ws_'): #1D watts-strogatz with extra nodes
_, m, pct = kind.split('_')
a = watts_strogatz_1d(n, int(m)*2, p=float(pct)/100.0)
elif kind.startswith('br_'): # balanced_random
_, m = kind.split('_')
a = balanced_random(n, int(m)*2)
else:
raise ValueError('Unknown mask kind: ' + str(kind))
return a
def barabasi_albert(n, m):
#print("barabasi_albert", n, m)
g = networkx.generators.barabasi_albert_graph(n=n, m=m)
a = networkx.adjacency_matrix(g).toarray().astype(np.int32) + np.eye(n, dtype=np.int32)
a[0:m,0:m] = 1
return a
def watts_strogatz_1d(n, k, p):
assert k % 2 == 0
g = networkx.generators.random_graphs.watts_strogatz_graph(n, k, p)
return networkx.adjacency_matrix(g).toarray().astype(np.int32) + np.eye(n, dtype=np.int32)
def extra(a, n, extra):
# Add extra random edges
for i in range(extra):
counts = list(zip(np.sum(a, axis=0), range(n)))
shuffle(counts)
counts.sort()
i = counts[0][1]
while True:
j = randint(0, n-1)
if a[i,j] == 0:
a[i,j] = 1
a[j,i] = 1
break
return a
# Legacy functions:
def old_barabasi_albert(n, m):
g = networkx.generators.barabasi_albert_graph(n=n, m=m)
a = networkx.adjacency_matrix(g).toarray().astype(np.int32) + np.eye(n, dtype=np.int32)
a[0:m,0:m] = 1
# add a few more random blocks to match size with watts_strogatz
target = n * (m*2 + 1)
while np.sum(a) < target:
counts = list(zip(np.sum(a, axis=0), range(n)))
shuffle(counts)
counts.sort()
i = counts[0][1]
while True:
j = randint(0, n-1)
if a[i,j] == 0:
a[i,j] = 1
a[j,i] = 1
break
return a
def watts_strogatz_2d(n, m, p, wrap=True):
# construct Watts-Strogatz random network on a 2d lattice, having approximately n*m/2 connections
# get size of lattice & init adjacency matrix
n0 = int(np.ceil(np.sqrt(n)))
n1 = n // n0
assert n0 * n1 == n # can't construct 2d lattice otherwise
adjacency_matrix = np.zeros((n0, n1, n0, n1), dtype=np.int8)
# make nearest neighbor connections
d = np.square(np.arange(int(np.ceil(np.sqrt(m)))))
distance_matrix = d.reshape((1, -1)) + d.reshape((-1, 1))
cutoff = np.sort(distance_matrix.flatten())[m // 2]
local_connectivity_matrix = distance_matrix <= cutoff
for i in range(local_connectivity_matrix.shape[0]):
for j in range(local_connectivity_matrix.shape[1]):
if local_connectivity_matrix[i, j]:
if i == 0 and j == 0:
pass
else:
if wrap: # should we connect both lattice dimensions end to start?
submat0 = adjacency_matrix[:, :, np.mod(np.arange(i, n0+i), n0)]
submat1 = submat0[:, :, :, np.mod(np.arange(j, n1+j), n1)]
submat1 += np.eye(n0*n1, dtype=np.int8).reshape(n0,n1,n0,n1)
submat0[:, :, :, np.mod(np.arange(j, n1 + j), n1)] = submat1
adjacency_matrix[:, :, np.mod(np.arange(i, n0 + i), n0)] = submat0
else:
submat0 = adjacency_matrix[np.arange(n0 - i)]
submat1 = submat0[:, np.arange(n1 - j)]
submat2 = submat1[:, :, np.arange(i, n0)]
submat3 = submat2[:, :, :, np.arange(j, n1)]
submat3 += np.eye((n0 - i) * (n1 - j), dtype=np.int8).reshape((n0 - i, n1 - j, n0 - i, n1 - j))
submat2[:, :, :, np.arange(j, n1)] = submat3
submat1[:, :, np.arange(i, n0)] = submat2
submat0[:, np.arange(n1 - j)] = submat1
adjacency_matrix[np.arange(n0 - i)] = submat0
# with probability p rewire each connection to another random end-point
rewire = np.random.binomial(n=1, p=p, size=(n0, n1, n0, n1))
rewire_inds = np.nonzero(rewire * adjacency_matrix)
# remove the rewired connections
adjacency_matrix[rewire_inds] = 0
# put back random connections, taking care not to duplicate existing connections
while True:
new_end0 = np.random.randint(n0, size=len(rewire_inds[0]))
new_end1 = np.random.randint(n1, size=len(rewire_inds[0]))
do_again = [[], []]
for i in range(len(rewire_inds[0])):
if adjacency_matrix[rewire_inds[0][i], rewire_inds[1][i], new_end0[i], new_end1[i]] \
or rewire_inds[0][i]==new_end0[i] and rewire_inds[1][i]==new_end1[i]:
do_again[0].append(rewire_inds[0][i])
do_again[1].append(rewire_inds[1][i])
else:
adjacency_matrix[rewire_inds[0][i], rewire_inds[1][i], new_end0[i], new_end1[i]] = 1
if len(do_again[0]) > 0:
rewire_inds = do_again
else:
break
# reshape the adjacency matrix back into 2d
adjacency_matrix = adjacency_matrix.reshape((n, n))
return adjacency_matrix
def balanced_random(n, m):
a = np.eye(n, dtype=np.int32)
cs = list(range(n))
shuffle(cs)
# keep track of how many c's are assigned to each k
kc = [0 for k in range(n)]
for c in cs:
# find m eligeble k's but prioritize by low count
ks = [k for k in range(n) if c != k and kc[k] < m]
shuffle(ks)
ks.sort(key=lambda k: kc[k])
for k in ks[0:m]:
a[c,k] = 1
kc[k] += 1
# ensure each k has m c's
for k in range(n):
while kc[k] < m:
if len(cs) == 0:
cs = list(range(n))
shuffle(cs)
while len(cs) > 0:
c = cs.pop()
if a[c,k] == 0:
a[c,k] = 1
kc[k] += 1
break
return a
# show how much mixing occurs after each internal step
def mix_factor(masks, nsamples=None):
n = masks[0].shape[0]
if nsamples is None:
nsamples = n
nsamples = min(n, nsamples)
samples = list(range(n))
shuffle(samples)
masks = [ mask.astype(np.float32) for mask in masks ]
factors = []
for steps in range(1, len(masks)+1):
total = 0
for i in samples[0:nsamples]:
b = np.zeros(n, dtype=np.float32)
b[i] = 1.0
for step in range(steps):
b = (np.dot(b, masks[step]) > 0.0).astype(np.float32)
total += np.sum(b)
pct = 100.0 * total / (nsamples * n)
factors.append("%.1f" % pct)
if pct >= 99.99:
break
return " ".join(factors) |
import os
import re
import sys
import tempfile
import tensorflow as tf
debug_messages = False
def vlog(level):
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = str(level)
# this helper is here in case we later want to capture huge stderr that doesn't fit in RAM
class TemporaryFileHelper:
"""Provides a way to fetch contents of temporary file."""
def __init__(self, temporary_file):
self.temporary_file = temporary_file
def getvalue(self):
return open(self.temporary_file.name).read()
STDOUT=1
STDERR=2
class capture_stderr:
"""Utility to capture output, use as follows
with util.capture_stderr() as stderr:
sess = tf.Session()
print("Captured:", stderr.getvalue()).
"""
def __init__(self, fd=STDERR):
self.fd = fd
self.prevfd = None
def __enter__(self):
t = tempfile.NamedTemporaryFile()
self.prevfd = os.dup(self.fd)
os.dup2(t.fileno(), self.fd)
return TemporaryFileHelper(t)
def __exit__(self, exc_type, exc_value, traceback):
os.dup2(self.prevfd, self.fd)
################################################################################
# LOG_MEMORY_PARSING
################################################################################
# Until https://github.com/tensorflow/tensorflow/issues/6716 is resolved, the
# reliable way to get access to tensor deallocation information is to parse
# __LOG_MEMORY__ from VLOG print statements. This is sensitive to print order
# run unbuffered to prevent interleaving:
# python -u script.py
# Regex'es to parse __LOG_MEMORY__ statements
# Each regex is preceded by an example of line it's meant to pass
# I 5143420588.000000 file tensorflow/core/framework/log_memory.cc:41] __LOG_MEMORY__ MemoryLogTensorAllocation { step_id: -6 kernel_name: "Unknown (from Proto)" tensor { dtype: DT_INT32 shape { dim { size: 3 } } allocation_description { requested_bytes: 12 allocated_bytes: 12 allocator_name: "cpu" allocation_id: 3 has_single_reference: true ptr: 29496256 } } }
tensor_allocation_regex = re.compile("""MemoryLogTensorAllocation.*?step_id: (?P<step_id>[-0123456789]+).*kernel_name: \"(?P<kernel_name>[^"]+)\".*?allocated_bytes: (?P<allocated_bytes>\d+).*allocator_name: \"(?P<allocator_name>[^"]+)\".*allocation_id: (?P<allocation_id>\d+).*""")
# I 6795349363.000000 file tensorflow/core/framework/log_memory.cc:41] __LOG_MEMORY__ MemoryLogRawAllocation { step_id: -3 operation: "TF_AllocateTensor" num_bytes: 1000000 ptr: 80910752 allocation_id: 99 allocator_name: "cpu" }
raw_allocation_regex = re.compile("""MemoryLogRawAllocation.*?step_id: (?P<step_id>[-0123456789]+).*operation: \"(?P<kernel_name>[^"]+)\".*?num_bytes: (?P<allocated_bytes>\d+).*allocation_id: (?P<allocation_id>\d+).*allocator_name: "(?P<allocator_name>[^"]+)".*""")
# I 5143420588.000000 file tensorflow/core/framework/log_memory.cc:41] __LOG_MEMORY__ MemoryLogTensorOutput { step_id: 1 kernel_name: "Const" tensor { dtype: DT_INT32 shape { dim { size: 3 } } allocation_description { requested_bytes: 12 allocated_bytes: 12 allocator_name: "cpu" allocation_id: 3 ptr: 29496256 } } }
# 2017-01-26 10:13:30: I tensorflow/core/framework/log_memory.cc:35] __LOG_MEMORY__ MemoryLogTensorOutput { step_id: 2 kernel_name: "a0" tensor { dtype: DT_FLOAT shape { dim { size: 250000 } } allocation_description { requested_bytes: 1000000 allocated_bytes: 1000192 allocator_name: "gpu_bfc" allocation_id: 3 ptr: 30076651520 } } }
#tensor_output_regex = re.compile("""MemoryLogTensorOutput.* step_id: (?P<step_id>[-0123456789]+) kernel_name: \"(?P<kernel_name>[^"]+).*allocated_bytes: (?P<allocated_bytes>\d+).*allocation_id: (?P<allocation_id>\d+).*""")
tensor_output_regex = re.compile("""MemoryLogTensorOutput.* step_id: (?P<step_id>[-0123456789]+) kernel_name: \"(?P<kernel_name>[^"]+).*allocated_bytes: (?P<allocated_bytes>\d+).*allocator_name: \"(?P<allocator_name>[^"]+)\".*allocation_id: (?P<allocation_id>\d+).*""")
# some Shape lines are missing bytes info so have separate regex for them
# I 5162643141.000000 file tensorflow/core/framework/log_memory.cc:41] __LOG_MEMORY__ MemoryLogTensorOutput { step_id: 5 kernel_name: "gradients/Shape" tensor { dtype: DT_INT32 shape { dim { } } } }
tensor_output_regex_no_bytes = re.compile("""MemoryLogTensorOutput.* step_id: (?P<step_id>[-0123456789]+) kernel_name: \"(?P<kernel_name>[^"]+).*""")
# 5143420588.000000 file tensorflow/core/framework/log_memory.cc:41] __LOG_MEMORY__ MemoryLogTensorDeallocation { allocation_id: 2 allocator_name: "cpu" }
tensor_deallocation_regex = re.compile("""allocation_id: (?P<allocation_id>\d+).*allocator_name: \"(?P<allocator_name>[^"]+)\".*""")
# I 6796000229.000000 file tensorflow/core/framework/log_memory.cc:41] __LOG_MEMORY__ MemoryLogRawDeallocation { step_id: -3 operation: "TensorFlow C Api" allocation_id: 177 allocator_name: "cpu" }
raw_deallocation_regex = re.compile("""allocation_id: (?P<allocation_id>\d+).*allocator_name: \"(?P<allocator_name>[^"]+)\".*""")
# I 5143420588.000000 file tensorflow/core/framework/log_memory.cc:41] __LOG_MEMORY__ MemoryLogStep { step_id: 1 handle: "->Print:0//0/;0" }
tensor_logstep_regex = re.compile("""MemoryLogStep.*?step_id: (?P<step_id>[-0123456789]+).*""")
def _parse_logline(l):
if 'MemoryLogTensorOutput' in l:
m = tensor_output_regex.search(l)
if not m:
m = tensor_output_regex_no_bytes.search(l)
assert m, l
d = m.groupdict()
d["type"] = "MemoryLogTensorOutput"
elif 'MemoryLogTensorAllocation' in l:
m = tensor_allocation_regex.search(l)
# Broadcast args give weird allocation messages without size, ignore
# I tensorflow/core/framework/log_memory.cc:35] __LOG_MEMORY__ MemoryLogTensorAllocation { step_id: 2 kernel_name: "gradients/node_5_grad/BroadcastGradientArgs" tensor { dtype: DT_INT32 shape { dim { } } } }
if not m:
return {"type": "MemoryLogTensorAllocation", "line": l,
"allocation_id": "-1"}
assert m, l
d = m.groupdict()
d["type"] = "MemoryLogTensorAllocation"
if debug_messages:
print("Got allocation for %s, %s"%(d["allocation_id"], d["kernel_name"]))
elif 'MemoryLogTensorDeallocation' in l:
m = tensor_deallocation_regex.search(l)
assert m, l
d = m.groupdict()
d["type"] = "MemoryLogTensorDeallocation"
if debug_messages:
print("Got deallocation for %s"%(d["allocation_id"]))
elif 'MemoryLogStep' in l:
m = tensor_logstep_regex.search(l)
assert m, l
d = m.groupdict()
d["type"] = "MemoryLogStep"
elif 'MemoryLogRawAllocation' in l:
m = raw_allocation_regex.search(l)
assert m, l
d = m.groupdict()
d["type"] = "MemoryLogRawAllocation"
elif 'MemoryLogRawDeallocation' in l:
m = raw_deallocation_regex.search(l)
assert m, l
d = m.groupdict()
d["type"] = "MemoryLogRawDeallocation"
else:
assert False, "Unknown log line: "+l
if not "allocation_id" in d:
d["allocation_id"] = "-1"
d["line"] = l
return d
def memory_timeline(log):
if hasattr(log, 'getvalue'):
log = log.getvalue()
def unique_alloc_id(line):
if line["allocation_id"] == "-1":
return "-1"
return line["allocation_id"]+"-"+line["allocator_name"]
def get_alloc_names(line):
alloc_id = unique_alloc_id(line)
for entry in reversed(allocation_map.get(alloc_id, [])):
kernel_name = entry.get("kernel_name", "unknown")
if not "unknown" in kernel_name:
return kernel_name+"("+unique_alloc_id(line)+")"
# couldn't find an allocation message with name of kernel
return "("+alloc_id+")"
def get_alloc_bytes(line):
for entry in allocation_map.get(unique_alloc_id(line), []):
if "allocated_bytes" in entry:
return entry["allocated_bytes"]
return "0"
def get_alloc_type(line):
for entry in allocation_map.get(unique_alloc_id(line), []):
if "allocator_name" in entry:
return entry["allocator_name"]
return "0"
parsed_lines = []
for l in log.split("\n"):
if 'LOG_MEMORY' in l: # and not 'step_id: -6' in l:
parsed_lines.append(_parse_logline(l))
allocation_map = {} # map of <allocation_id>-<allocator_name>->parsed_logline of allocation
for line in parsed_lines:
if (line["type"] == "MemoryLogTensorAllocation" or line["type"] == "MemoryLogRawAllocation" or
line["type"] == "MemoryLogTensorOutput"):
allocation_map.setdefault(unique_alloc_id(line), []).append(line)
if debug_messages:
print(allocation_map)
result = []
for i, line in enumerate(parsed_lines):
# skip lines without allocation_id, ie lines like
# I tensorflow/core/framework/log_memory.cc:35] __LOG_MEMORY__ MemoryLogStep { step_id: 2 handle: "->/gradients/a1_grad/TanhGrad/0/;1" }
if int(line["allocation_id"]) == -1:
continue
alloc_names = get_alloc_names(line)
# if line doesn't specify bytes, look in history if there was corresponding TensorOutput or TensorAllocation msg
if int(line.get('allocated_bytes', -1)) < 0:
alloc_bytes = get_alloc_bytes(line)
else:
alloc_bytes = line.get('allocated_bytes', -1)
alloc_type = get_alloc_type(line)
if line["type"] == "MemoryLogTensorOutput":
continue
if line["type"] == "MemoryLogTensorDeallocation" or line["type"]=="MemoryLogRawDeallocation":
alloc_bytes = "-" + alloc_bytes
result.append((i, alloc_names, alloc_bytes, alloc_type))
return result
def peak_memory(log, gpu_only=False):
"""Peak memory used across all devices."""
peak_memory = -123456789 # to catch bugs
total_memory = 0
for record in memory_timeline(log):
i, kernel_name, allocated_bytes, allocator_type = record
allocated_bytes = int(allocated_bytes)
if gpu_only:
if not allocator_type.startswith("gpu"):
continue
total_memory += allocated_bytes
peak_memory = max(total_memory, peak_memory)
return peak_memory
def print_memory_timeline(log, gpu_only=False, ignore_less_than_bytes=0):
total_memory = 0
for record in memory_timeline(log):
i, kernel_name, allocated_bytes, allocator_type = record
allocated_bytes = int(allocated_bytes)
if gpu_only:
if not allocator_type.startswith("gpu"):
continue
if abs(allocated_bytes)<ignore_less_than_bytes:
continue # ignore small allocations
total_memory += allocated_bytes
print("%9d\t%42s\t%11d\t%11d\t%s"%(i, kernel_name, allocated_bytes, total_memory, allocator_type))
import matplotlib.pyplot as plt
def plot_memory_timeline(log, gpu_only=False, ignore_less_than_bytes=1000):
total_memory = 0
timestamps = []
data = []
current_time = 0
for record in memory_timeline(log):
timestamp, kernel_name, allocated_bytes, allocator_type = record
allocated_bytes = int(allocated_bytes)
if abs(allocated_bytes)<ignore_less_than_bytes:
continue # ignore small allocations
if gpu_only:
if not record[3].startswith("gpu"):
continue
timestamps.append(current_time-.00000001)
data.append(total_memory)
total_memory += int(record[2])
timestamps.append(current_time)
data.append(total_memory)
current_time+=1
plt.plot(timestamps, data)
################################################################################
# smart initialize
################################################################################
def smart_initialize(variables=None, sess=None):
"""Initializes all uninitialized variables in correct order. Initializers
are only run for uninitialized variables, so it's safe to run this multiple
times.
Args:
sess: session to use. Use default session if None.
"""
from tensorflow.contrib import graph_editor as ge
def make_initializer(var):
def f():
return tf.assign(var, var.initial_value).op
return f
def make_noop(): return tf.no_op()
def make_safe_initializer(var):
"""Returns initializer op that only runs for uninitialized ops."""
return tf.cond(tf.is_variable_initialized(var), make_noop,
make_initializer(var), name="safe_init_"+var.op.name).op
if not sess:
sess = tf.get_default_session()
g = tf.get_default_graph()
if not variables:
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
safe_initializers = {}
for v in variables:
safe_initializers[v.op.name] = make_safe_initializer(v)
# initializers access variable vaue through read-only value cached in
# <varname>/read, so add control dependency to trigger safe_initializer
# on read access
for v in variables:
var_name = v.op.name
var_cache = g.get_operation_by_name(var_name+"/read")
ge.reroute.add_control_inputs(var_cache, [safe_initializers[var_name]])
sess.run(tf.group(*safe_initializers.values()))
# remove initializer dependencies to avoid slowing down future variable reads
for v in variables:
var_name = v.op.name
var_cache = g.get_operation_by_name(var_name+"/read")
ge.reroute.remove_control_inputs(var_cache, [safe_initializers[var_name]])
|
import numpy as np
import tensorflow as tf
from sklearn.externals import joblib
from blocksparse.matmul import BlocksparseMatMul, SparseProj, group_param_grads, get_parents, add_control_input, largest_block
from blocksparse.norms import layer_norm
import blocksparse.ewops as ew
import masks
from utils import ones_initializer, zeros_initializer, normal_initializer, ortho_initializer, make_path, ceil_div
agg_method=0 # set to 3 when in bfloat16 mode
# Debugging function
def print_act_stats(x, _str="", flatten=False):
if False:
return x
_x = ew.float_cast(x, dtype=tf.float32)
if flatten:
_x = tf.reshape(_x, [-1])
if len(_x.get_shape()) == 1:
x_mean, x_var = tf.nn.moments(_x, [0], keep_dims=True)
if len(_x.get_shape()) == 2:
x_mean, x_var = tf.nn.moments(_x, [0], keep_dims=True)
if len(_x.get_shape()) == 4:
x_mean, x_var = tf.nn.moments(_x, [0,2,3], keep_dims=True)
stats = [tf.reduce_min(x_mean), tf.reduce_mean(x_mean), tf.reduce_max(x_mean),\
tf.reduce_min(tf.sqrt(x_var)), tf.reduce_mean(tf.sqrt(x_var)), tf.reduce_max(tf.sqrt(x_var))]
__str = "["+_str+"] "+x.name
print(__str)
return tf.Print(x, stats, __str)
class HParams(object):
no_serialize = set(["feed_dict","params","initializers","restore","state_shape"])
def __init__(self, args):
for k, v in args.__dict__.items():
if type(k) is str and k[0] != '_':
setattr(self, k, v)
self.feed_dict = dict()
self.params = dict()
if self.restore:
state = joblib.load(self.restore)
for k, v in state.items():
setattr(self, k, v)
print("Restore:")
for name in sorted(list(self.initializers.keys())):
val = self.initializers[name]
print(name, val.shape, val.size)
else:
self.initializers = dict()
def get_variable(self, name, shape, initializer):
scope = tf.get_variable_scope()
if scope.reuse:
return tf.get_variable(name)
ph = tf.placeholder(tf.float32, shape)
p = tf.get_variable(name, initializer=ph)
# add last part of scope to name to allow non-unique names
name = scope.name.split("/")[-1] + "/" + name
if name not in self.params:
self.params[name] = p
if name not in self.initializers:
self.initializers[name] = initializer(shape)
self.feed_dict[ph] = self.initializers[name]
return p
def save(self, sess, ema):
make_path(self.save_path)
state = dict()
params = sess.run([ema.average(p) for p in self.params.values()])
state["initializers"] = dict(zip(self.params.keys(), params))
for k, v in self.__dict__.items():
if k not in HParams.no_serialize:
state[k] = v
joblib.dump(state, self.save_path)
def finish_init(self):
# free up potentially large amount of memory used by these
self.initializers = None
self.feed_dict = None
class LSTM_Model(object):
def __init__(self, hps, train):
self.hps = hps
self.train = train
self.embd = Embedding(hps, train)
if hps.lstm_type == 'lstm':
self.lstm = LSTM_vanilla(hps, train)
if hps.lstm_type == 'scottbrain':
self.lstm = LSTM_scott(hps, train)
if hps.lstm_type == 'rnn':
self.lstm = RNN(hps, train)
# do this once for all gpus
if "bsmm" not in hps.__dict__:
self.gen_masks()
self.fc = FullyConnected(hps)
def forward(self, X, S, Y, ema=None):
inputs = self.embd.forward(X, ema=ema)
outputs, states = self.lstm.forward(inputs, S, ema=ema)
logits = self.fc.forward(outputs, ema=ema)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=tf.reshape(Y, [-1]))
loss = tf.reduce_mean(loss)
# save for layerwise custom gradients
self.logits = logits
self.loss = loss
print("LSTM_Model::forward")
return loss, states
def backward(self):
# Compute gradients 1 layer at a time.
# This enables memory efficient mode to be implemented.
fc_grads = tf.gradients(self.loss, self.logits, aggregation_method=agg_method)
fc_grads, lstm_grads = self.fc.backward(fc_grads)
lstm_grads, embd_grads = self.lstm.backward(lstm_grads)
embd_grads = self.embd.backward(embd_grads)
print("LSTM_Model::backward")
return fc_grads + lstm_grads + embd_grads
def gen_masks(self):
hps = self.hps
hps.bsmm = bsmm = dict()
assert hps.nhidden % hps.block_size == 0
assert hps.nembd % 32 == 0
# Create block-sparse matmul ops (to be shared by all instances of the model)
# We only need 1 instance of the lut constants
with tf.name_scope("BlocksparseMatMul"):
if hps.nproj_in != hps.nhidden:
# assume small projection values are acutally strides
if hps.nproj_in <= hps.block_size * 4:
hps.sproj_mul = SparseProj(hps.nhidden, proj_stride=hps.nproj_in)
hps.sproj_add = SparseProj(hps.nhidden, proj_stride=hps.nproj_in)
hps.nproj_in = hps.sproj_mul.nproj
else:
hps.sproj_mul = SparseProj(hps.nhidden, nproj=hps.nproj_in)
hps.sproj_add = SparseProj(hps.nhidden, nproj=hps.nproj_in)
else:
hps.sproj_mul = None
hps.sproj_add = None
if hps.nproj_out != hps.nhidden:
# assume small projection values are acutally strides
if hps.nproj_out <= hps.block_size * 4:
hps.sproj_out = SparseProj(hps.nhidden, proj_stride=hps.nproj_out, block_size=32)
hps.nproj_out = hps.sproj_out.nproj
else:
hps.sproj_out = SparseProj(hps.nhidden, nproj=hps.nproj_out)
else:
hps.sproj_out = None
# for the input and output projections, use the largest block size that fits
blk_in, nproj_in = largest_block(hps.nproj_in)
blk_out, nproj_out = largest_block(hps.nproj_out)
nhidden = hps.nhidden // hps.block_size
nembd = hps.nembd // blk_in
nvocab = ceil_div(hps.nvocab, blk_out)
# the dense input mask
mask = np.ones( (nembd, nproj_in), dtype=np.int32)
bsmm["x"] = BlocksparseMatMul(mask, block_size=blk_in, feature_axis=hps.axis, name="lstm_x")
istep_masks = []
if hps.share_masks:
# all gates and internal steps get the same mask
mask = masks.make_mask(n=nhidden, kind=hps.sparsity)
bsmm_p = BlocksparseMatMul(mask, block_size=hps.block_size, feature_axis=hps.axis, name="lstm_h")
for p in list("ifou") + ["h%d" % i for i in range(hps.isteps)]:
bsmm[p] = bsmm_p
istep_masks = [ mask for i in range(hps.isteps + 1)]
else:
# internal steps get different masks
for p in ["h%d" % i for i in range(hps.isteps)]:
mask = masks.make_mask(n=nhidden, kind=hps.sparsity)
bsmm[p] = BlocksparseMatMul(mask, block_size=hps.block_size, feature_axis=hps.axis, name="lstm_%s" % p)
istep_masks.append(mask)
# gates get the same mask (TODO: experiment here with differen masks)
mask = masks.make_mask(n=nhidden, kind=hps.sparsity)
bsmm_p = BlocksparseMatMul(mask, block_size=hps.block_size, feature_axis=hps.axis, name="lstm_g")
for p in list("ifou"):
bsmm[p] = bsmm_p
istep_masks.append(mask)
# the output mask
mask = np.ones( (nproj_out, nvocab), dtype=np.int32)
bsmm["y"] = BlocksparseMatMul(mask, block_size=blk_out, feature_axis=hps.axis, name="lstm_o")
hps.mix_factor = masks.mix_factor(istep_masks)
hps.sparsity += " (%.4f%%)" % (100.0 * bsmm["u"].sparsity)
class Embedding(object):
def __init__(self, hps, train, scope='embedding'):
self.hps = hps
self.train = train
self.scope = scope
def forward(self, x, ema=None):
hps = self.hps
assert hps.nsteps % hps.x_group_size == 0
xgroups = hps.nsteps // hps.x_group_size
with tf.variable_scope(self.scope):
w = hps.get_variable("w", [hps.nvocab, hps.nembd], ortho_initializer())
g = hps.get_variable("g", [hps.nvocab, 1], ones_initializer())
self.params = [w, g]
if ema is not None:
w = ema.average(w)
g = ema.average(g)
w = tf.nn.l2_normalize(w, dim=1) * g
# x (nsteps, nbatch)
# w (nvocab, nembd)
# o (nsteps, nbatch, nembd)
words = tf.nn.embedding_lookup(w, x)
if self.train and hps.dropout > 0 and hps.dropout_input > 0:
words = tf.nn.dropout(words, 1.-hps.dropout, [hps.nsteps, hps.batch_size, 1])
# potentially down cast to fp16 to save memory and speed things up
#words = ew.float_cast(words, dtype=hps.dtype)
# (x_group_size x nbatch, nembd) * xgroups
outputs = [tf.reshape(x, [-1, hps.nembd]) for x in tf.split(words, xgroups, 0)]
if hps.axis == 0:
outputs = [tf.transpose(x) for x in outputs]
self.outputs = [ew.float_cast(x, dtype=hps.dtype) for x in outputs]
outputs = [tf.stop_gradient(x) for x in self.outputs]
return outputs
def backward(self, grad_ys):
param_grads = tf.gradients(self.outputs, self.params, grad_ys, aggregation_method=agg_method)
return list(zip(param_grads, self.params))
class FullyConnected(object):
def __init__(self, hps, scope='fc'):
self.hps = hps
self.scope = scope
def forward(self, inputs, ema=None):
hps = self.hps
bsmm = hps.bsmm
xgroup = hps.x_group_size
xgroups = len(inputs) // xgroup
sproj = hps.sproj_out
self.inputs = inputs
if sproj is not None:
inputs = [ sproj.gather(h) for h in inputs ]
with tf.variable_scope(self.scope):
w = hps.get_variable("w", bsmm["y"].w_shape, normal_initializer())
g = hps.get_variable("g", [hps.nvocab], ones_initializer())
b = hps.get_variable("b", [hps.nvocab], zeros_initializer())
self.params = [w, g, b]
if ema is not None:
w = ema.average(w)
g = ema.average(g)
b = ema.average(b)
#w = ew.float_cast(w, dtype=hps.dtype)
w = bsmm["y"].l2_normalize(w, dtype=hps.dtype)
# compute the fc matmul in groups for better memory efficiency.
ygroups = []
for i in range(xgroups):
x = tf.concat(inputs[i*xgroup:(i+1)*xgroup], 1 - hps.axis)
# (nsteps x nbatch, nvocab) = (nsteps x nbatch, hidden) . (nhidden, nvocab)
ygroups.append(bsmm["y"](x, w, dw_dtype=hps.dw_dtype))
y = tf.concat(ygroups, 1 - hps.axis)
# cast to float32 before entering cost function
y = ew.float_cast(y, dtype=tf.float32, dx_dtype=hps.dx_dtype)
if hps.axis == 0:
y = tf.transpose(y)
if (hps.nvocab % 32) != 0:
y = tf.slice(y, [0,0], [-1, hps.nvocab])
self.outputs = y*g + b
outputs = tf.stop_gradient(self.outputs)
return outputs
def backward(self, grad_ys):
nparams = len(self.params)
grads = tf.gradients(self.outputs, self.params + self.inputs, grad_ys, aggregation_method=agg_method)
param_grads = grads[0:nparams]
input_grads = grads[nparams:]
grads = list(zip(param_grads, self.params))
return grads, input_grads
class LSTM_vanilla(object):
# this model is currently broken with way masks are currently initialized
def __init__(self, hps, train, scope='lstm'):
self.hps = hps
self.train = train
self.scope = scope
def forward(self, inputs, states, ema=None):
hps = self.hps
bsmm = hps.bsmm
with tf.variable_scope(self.scope) as scope:
self.param_names = ['xi','xf','xo','xu','hi','hf','ho','hu']
self.params = dict()
for p in self.param_names:
if 'x' in p:
bsmm_p, size = (bsmm.x, hps.nproj_in)
elif 'h' in p:
bsmm_p, size = (bsmm.h, hps.nhidden)
b_init = ones_initializer(hps.forget_bias) if p == 'hf' else zeros_initializer()
w = hps.get_variable("w_" + p, bsmm_p.w_shape, bsmm_p.identity_init())
g = hps.get_variable("g_" + p, [size], ones_initializer())
b = hps.get_variable("b_" + p, [size], b_init)
if ema is not None:
w = ema.average(w)
g = ema.average(g)
b = ema.average(b)
wc = ew.float_cast(w, dtype=hps.dtype)
self.params[p] = (wc, g, b, w)
c, h = tf.unstack(states, num=2)
c = ew.float_cast(c, dtype=hps.dtype)
h = ew.float_cast(h, dtype=hps.dtype)
xi_w, xi_g, xi_b = self.params["xi"][0:3]
xf_w, xf_g, xf_b = self.params["xf"][0:3]
xo_w, xo_g, xo_b = self.params["xo"][0:3]
xu_w, xu_g, xu_b = self.params["xu"][0:3]
self.inputs = inputs
self.outputs = []
self.segments = []
for xgroup in inputs:
if hps.recompute and self.train:
# We compute gradient one segment at a time, so prevent tf.gradients from going too far.
# We also want to add control inputs to the start of the segment so having wrappers
# around the segment inputs is handy.
seg = [(tf.stop_gradient(c),tf.stop_gradient(h))]
self.segments.append(seg)
# delay input expansion to just prior to use (saves memory)
with tf.control_dependencies([h]):
xwi = bsmm.x(xgroup, xi_w, dw_dtype=hps.dw_dtype)
xwf = bsmm.x(xgroup, xf_w, dw_dtype=hps.dw_dtype)
xwo = bsmm.x(xgroup, xo_w, dw_dtype=hps.dw_dtype)
xwu = bsmm.x(xgroup, xu_w, dw_dtype=hps.dw_dtype)
xwi = tf.split(xwi, hps.x_group_size, 1 - hps.axis)
xwf = tf.split(xwf, hps.x_group_size, 1 - hps.axis)
xwo = tf.split(xwo, hps.x_group_size, 1 - hps.axis)
xwu = tf.split(xwu, hps.x_group_size, 1 - hps.axis)
masks = []
for xi, xf, xo, xu in zip(xwi, xwf, xwo, xwu):
xi = layer_norm(xi, xi_g, xi_b, axis=hps.axis)
xf = layer_norm(xf, xf_g, xf_b, axis=hps.axis)
xo = layer_norm(xo, xo_g, xo_b, axis=hps.axis)
xu = layer_norm(xu, xu_g, xu_b, axis=hps.axis)
c, h, mask = self.cell(c, h, xi, xf, xo, xu)
_masks = [mask]
for _ in range(1, hps.lsteps):
c, h, mask = self.cell(c, h, None, None, None, None)
_masks.append(mask)
masks.append(_masks)
self.outputs.append(h)
if hps.recompute and self.train:
with tf.name_scope("f_seg_%04d_%d" % (len(self.segments)-1, len(seg)-1)):
c_seg, h_seg = seg[0]
with tf.control_dependencies([ h_seg ]):
xwi = bsmm.x(xgroup, xi_w, dw_dtype=hps.dw_dtype)
xwf = bsmm.x(xgroup, xf_w, dw_dtype=hps.dw_dtype)
xwo = bsmm.x(xgroup, xo_w, dw_dtype=hps.dw_dtype)
xwu = bsmm.x(xgroup, xu_w, dw_dtype=hps.dw_dtype)
xwi = tf.split(xwi, hps.x_group_size, 1 - hps.axis)
xwf = tf.split(xwf, hps.x_group_size, 1 - hps.axis)
xwo = tf.split(xwo, hps.x_group_size, 1 - hps.axis)
xwu = tf.split(xwu, hps.x_group_size, 1 - hps.axis)
for xi, xf, xo, xu, mask in zip(xwi, xwf, xwo, xwu, masks):
xi = layer_norm(xi, xi_g, xi_b, axis=hps.axis)
xf = layer_norm(xf, xf_g, xf_b, axis=hps.axis)
xo = layer_norm(xo, xo_g, xo_b, axis=hps.axis)
xu = layer_norm(xu, xu_g, xu_b, axis=hps.axis)
c_seg, h_seg, _ = self.cell(c_seg, h_seg, xi, xf, xo, xu, mask[0])
for i in range(1, hps.lsteps):
c_seg, h_seg, _ = self.cell(c_seg, h_seg, None, None, None, None, mask[i])
seg.append((c_seg, h_seg))
c = ew.float_cast(c, dtype=tf.float32)
h = ew.float_cast(h, dtype=tf.float32)
states = tf.stack([c, h], 0)
# We calculate the gradient internally.
# Don't let other layer's gradients flow into here.
# This is possible because the last cell has free c and h
# params that are popluated with zeros in the gradients pass.
outputs = [tf.stop_gradient(x) for x in self.outputs]
return outputs, states
def linear(self, p, h, relu=False):
hps = self.hps
w, g, b = self.params[p][0:3]
h = hps.bsmm.h(h, w, dw_dtype=hps.dw_dtype)
return layer_norm(h, g, b, relu=relu, axis=hps.axis)
def cell(self, c, h, xi, xf, xo, xu, mask=None):
hps = self.hps
assert hps.isteps >= 2, "multiply and add steps of mLSTM require 2 internal steps"
'''
for step in range(hps.isteps):
# we can share one set of params for all isteps
p = "h%d" % (0 if hps.share_isteps else step)
if step == 0:
h = self.linear(p, h)
if hps.sproj_add is None:
h = ew.multiply(h, m)
else:
h = hps.sproj_add.scatter_mul(h, m)
elif step == 1:
h = self.linear(p, h)
if hps.sproj_mul is None:
h = ew.add(h, a)
else:
h = hps.sproj_mul.scatter_add(h, a)
h = ew.relu(h)
else:
h = self.linear(p, h, relu=True)
'''
i = self.linear("hi", h)
f = self.linear("hf", h)
o = self.linear("ho", h)
u = self.linear("hu", h)
# apply update dropout, saving mask if we need to recompute forward pass
if self.train and hps.dropout > 0:
if mask is None:
u, mask = ew.dropout(u, keep_prob=1.0-hps.dropout)
else:
u = ew.dropout(u, mask=mask)
else:
mask = None
if xi is not None:
i = ew.add(i, xi)
f = ew.add(f, xf)
o = ew.add(o, xo)
u = ew.add(u, xu)
c, h = ew.fused_lstm_gates(c, i, f, o, u)
return c, h, mask
# i = ew.sigmoid(i)
# f = ew.sigmoid(f)
# o = ew.sigmoid(o)
# u = ew.tanh(u)
# c = ew.add(ew.multiply(f, c), ew.multiply(i, u))
# h = ew.multiply(o, ew.tanh(c))
# return (c, h)
def backward(self, grad_ys):
hps = self.hps
w_params = []
g_params = []
b_params = []
for p in self.param_names:
g, b, w = self.params[p][1:4]
w_params.append(w)
g_params.append(g)
b_params.append(b)
params = w_params + g_params + b_params
nparams = len(params)
nsegments = len(self.segments)
# memory efficient gradients by recomputing forward pass
if nsegments > 0:
param_steps = []
input_grads = []
for i in range(nsegments-1,-1,-1):
with tf.name_scope("b_seg_%04d" % i):
h_grads = grad_ys[i*hps.recompute : (i+1)*hps.recompute]
if i == nsegments-1:
c_grad = tf.zeros(h_grads[0].get_shape())
else:
fc_matmul_op = get_parents(h_grads[0], "BlocksparseMatmulDX")[0]
# delay matmul to avoid memory expansion till just prior to use
add_control_input(fc_matmul_op, h_grad.op)
h_grads[-1] = ew.add(h_grads[-1], h_grad)
s = self.segments[i]
x = self.inputs.pop()
c_prev, h_prev = s[0]
c_next = s[-1][0]
h_next = [ seg[1] for seg in s[1:] ]
# ensure the forward segments are computed in the backward pass only.
add_control_input(c_prev.op, h_grads[-1].op)
add_control_input(h_prev.op, h_grads[-1].op)
grads = tf.gradients( [c_next] + h_next, params + [c_prev, h_prev, x], [c_grad] + h_grads, aggregation_method=agg_method)
param_steps.append(grads[0:nparams])
c_grad = grads[nparams+0]
h_grad = grads[nparams+1]
input_grads.insert(0, grads[nparams+2])
#h_grad = tf.check_numerics(h_grad, "h_grad "+str(i)+"/"+str(nsegments))
#c_grad = tf.check_numerics(c_grad, "c_grad "+str(i)+"/"+str(nsegments))
#input_grads[0] = tf.check_numerics(input_grads[0], "input_grad "+str(i))
param_grads = []
for i in range(nparams):
param_grads.append(tf.add_n([ g[i] for g in param_steps]))
# Normal gradients for small models
else:
grads = tf.gradients(self.outputs, params + self.inputs, grad_ys, aggregation_method=agg_method)
param_grads = grads[0:nparams]
input_grads = grads[nparams:]
# group param grad matmuls to efficinetly accumulate
if False:
for i, p in enumerate(self.param_names):
# a and m are already grouped
if 'x' not in p:
param_grads[i] = group_param_grads(param_grads[i])
# debug
if False:
for i, p in enumerate(self.param_names):
n = len(self.param_names)
param_grads[i+0*n] = tf.check_numerics(param_grads[i+0*n], p+" w")
param_grads[i+1*n] = tf.check_numerics(param_grads[i+1*n], p+" g")
param_grads[i+1*n] = tf.check_numerics(param_grads[i+2*n], p+" b")
for i, p in enumerate(input_grads):
input_grads[i] = tf.check_numerics(input_grads[i], "input_grads "+str(i))
grads = list(zip(param_grads, params))
return grads, input_grads
class LSTM_scott(object):
def __init__(self, hps, train, scope='lstm'):
self.hps = hps
self.train = train
self.scope = scope
def forward(self, inputs, states, ema=None):
hps = self.hps
bsmm = hps.bsmm
with tf.variable_scope(self.scope) as scope:
self.param_names = list("amifou")
for i in range(1 if hps.share_isteps else hps.isteps):
self.param_names.append("h%d" % i)
self.params = dict()
for p in self.param_names:
bsmm_p, size = (bsmm["x"], hps.nproj_in) if p in "am" else (bsmm[p], hps.nhidden)
b_init = ones_initializer() if p == 'f' else zeros_initializer()
w = hps.get_variable("w_" + p, bsmm_p.w_shape, bsmm_p.identity_init())
g = hps.get_variable("g_" + p, [size], ones_initializer())
b = hps.get_variable("b_" + p, [size], b_init)
if ema is not None:
w = ema.average(w)
g = ema.average(g)
b = ema.average(b)
wc = ew.float_cast(w, dtype=hps.dtype)
self.params[p] = (wc, g, b, w)
c, h = tf.unstack(states, num=2)
c = ew.float_cast(c, dtype=hps.dtype)
h = ew.float_cast(h, dtype=hps.dtype)
wm, gm, bm = self.params["m"][0:3]
wa, ga, ba = self.params["a"][0:3]
self.inputs = inputs
self.outputs = []
self.segments = []
for xgroup in inputs:
if hps.recompute and self.train:
# We compute gradient one segment at a time, so prevent tf.gradients from going too far.
# We also want to add control inputs to the start of the segment so having wrappers
# around the segment inputs is handy.
seg = [(tf.stop_gradient(c),tf.stop_gradient(h))]
self.segments.append(seg)
# delay input expansion to just prior to use (saves memory)
with tf.control_dependencies([h]):
xwm = bsmm["x"](xgroup, wm, dw_dtype=hps.dw_dtype)
xwa = bsmm["x"](xgroup, wa, dw_dtype=hps.dw_dtype)
xwm = tf.split(xwm, hps.x_group_size, 1 - hps.axis)
xwa = tf.split(xwa, hps.x_group_size, 1 - hps.axis)
masks = []
for m, a in zip(xwm, xwa):
m = layer_norm(m, gm, bm, axis=hps.axis)
a = layer_norm(a, ga, ba, axis=hps.axis)
c, h, mask = self.cell(c, h, m, a)
_masks = [mask]
for _ in range(1, hps.lsteps):
c, h, mask = self.cell(c, h, None, None)
_masks.append(mask)
masks.append(_masks)
self.outputs.append(h)
if hps.recompute and self.train:
with tf.name_scope("f_seg_%04d_%d" % (len(self.segments)-1, len(seg)-1)):
c_seg, h_seg = seg[0]
with tf.control_dependencies([ h_seg ]):
xwm = bsmm["x"](xgroup, wm, dw_dtype=hps.dw_dtype)
xwa = bsmm["x"](xgroup, wa, dw_dtype=hps.dw_dtype)
xwm = tf.split(xwm, hps.x_group_size, 1 - hps.axis)
xwa = tf.split(xwa, hps.x_group_size, 1 - hps.axis)
for m, a, mask in zip(xwm, xwa, masks):
m = layer_norm(m, gm, bm, axis=hps.axis)
a = layer_norm(a, ga, ba, axis=hps.axis)
c_seg, h_seg, _ = self.cell(c_seg, h_seg, m, a, mask[0])
for i in range(1, hps.lsteps):
c_seg, h_seg, _ = self.cell(c_seg, h_seg, None, None, mask[i])
seg.append((c_seg, h_seg))
c = ew.float_cast(c, dtype=tf.float32)
h = ew.float_cast(h, dtype=tf.float32)
states = tf.stack([c, h], 0)
# We calculate the gradient internally.
# Don't let other layer's gradients flow into here.
# This is possible because the last cell has free c and h
# params that are popluated with zeros in the gradients pass.
outputs = [tf.stop_gradient(x) for x in self.outputs]
return outputs, states
def linear(self, p, h, relu=False):
hps = self.hps
w, g, b = self.params[p][0:3]
h = hps.bsmm[p](h, w, dw_dtype=hps.dw_dtype)
return layer_norm(h, g, b, relu=relu, axis=hps.axis)
def cell(self, c, h, m, a, mask=None):
hps = self.hps
assert hps.isteps >= 2, "multiply and add steps of mLSTM require 2 internal steps"
for step in range(hps.isteps):
# we can share one set of params for all isteps
p = "h%d" % (0 if hps.share_isteps else step)
if step == 0:
h = self.linear(p, h)
if m is not None:
if hps.sproj_add is None:
h = ew.multiply(h, m)
else:
h = hps.sproj_add.scatter_mul(h, m)
elif step == 1:
h = self.linear(p, h)
if a is not None:
if hps.sproj_mul is None:
h = ew.add(h, a)
else:
h = hps.sproj_mul.scatter_add(h, a)
h = ew.relu(h)
else:
h = self.linear(p, h, relu=True)
i = self.linear("i", h)
f = self.linear("f", h)
o = self.linear("o", h)
u = self.linear("u", h)
# apply update dropout, saving mask if we need to recompute forward pass
if self.train and hps.dropout > 0:
if mask is None:
u, mask = ew.dropout(u, keep_prob=1.0-hps.dropout)
else:
u = ew.dropout(u, mask=mask)
else:
mask = None
c, h = ew.fused_lstm_gates(c, i, f, o, u)
return c, h, mask
# i = ew.sigmoid(i)
# f = ew.sigmoid(f)
# o = ew.sigmoid(o)
# u = ew.tanh(u)
# c = ew.add(ew.multiply(f, c), ew.multiply(i, u))
# h = ew.multiply(o, ew.tanh(c))
# return (c, h)
def backward(self, grad_ys):
hps = self.hps
w_params = []
g_params = []
b_params = []
for p in self.param_names:
g, b, w = self.params[p][1:4]
w_params.append(w)
g_params.append(g)
b_params.append(b)
params = w_params + g_params + b_params
nparams = len(params)
nsegments = len(self.segments)
# memory efficient gradients by recomputing forward pass
if nsegments > 0:
param_steps = []
input_grads = []
for i in range(nsegments-1,-1,-1):
with tf.name_scope("b_seg_%04d" % i):
h_grads = grad_ys[i*hps.recompute : (i+1)*hps.recompute]
if i == nsegments-1:
c_grad = tf.zeros(h_grads[0].get_shape())
else:
fc_matmul_op = get_parents(h_grads[0], "BlocksparseMatmulDX")[0]
# delay matmul to avoid memory expansion till just prior to use
add_control_input(fc_matmul_op, h_grad.op)
h_grads[-1] = ew.add(h_grads[-1], h_grad)
s = self.segments[i]
x = self.inputs.pop()
c_prev, h_prev = s[0]
c_next = s[-1][0]
h_next = [ seg[1] for seg in s[1:] ]
# ensure the forward segments are computed in the backward pass only.
add_control_input(c_prev.op, h_grads[-1].op)
add_control_input(h_prev.op, h_grads[-1].op)
grads = tf.gradients( [c_next] + h_next, params + [c_prev, h_prev, x], [c_grad] + h_grads, aggregation_method=agg_method)
param_steps.append(grads[0:nparams])
c_grad = grads[nparams+0]
h_grad = grads[nparams+1]
input_grads.insert(0, grads[nparams+2])
param_grads = []
for i in range(nparams):
param_grads.append(tf.add_n([ g[i] for g in param_steps]))
# Normal gradients for small models
else:
grads = tf.gradients(self.outputs, params + self.inputs, grad_ys, aggregation_method=agg_method)
param_grads = grads[0:nparams]
input_grads = grads[nparams:]
# group param grad matmuls to efficinetly accumulate
for i, p in enumerate(self.param_names):
# a and m are already grouped
if p not in 'am':
param_grads[i] = group_param_grads(param_grads[i])
grads = list(zip(param_grads, params))
return grads, input_grads
class RNN(object):
def __init__(self, hps, train, scope='rnn'):
self.hps = hps
self.train = train
self.scope = scope
def forward(self, inputs, states, ema=None):
hps = self.hps
bsmm = hps.bsmm
with tf.variable_scope(self.scope) as scope:
self.param_names = list("am")
for i in range(1 if hps.share_isteps else hps.isteps):
self.param_names.append("h%d" % i)
self.params = dict()
for p in self.param_names:
bsmm_p, size = (bsmm["x"], hps.nproj_in) if p in "am" else (bsmm[p], hps.nhidden)
w = hps.get_variable("w_" + p, bsmm_p.w_shape, bsmm_p.identity_init())
g = hps.get_variable("g_" + p, [size], ones_initializer())
b = hps.get_variable("b_" + p, [size], zeros_initializer())
if ema is not None:
w = ema.average(w)
g = ema.average(g)
b = ema.average(b)
wc = ew.float_cast(w, dtype=hps.dtype)
self.params[p] = (wc, g, b, w)
c, h = tf.unstack(states, num=2)
h = ew.float_cast(h, dtype=hps.dtype)
wm, gm, bm = self.params["m"][0:3]
wa, ga, ba = self.params["a"][0:3]
self.inputs = inputs
self.outputs = []
self.segments = []
for xgroup in inputs:
# delay input expansion to just prior to use (saves memory)
with tf.control_dependencies([h]):
xwm = bsmm["x"](xgroup, wm, dw_dtype=hps.dw_dtype)
xwa = bsmm["x"](xgroup, wa, dw_dtype=hps.dw_dtype)
xwm = tf.split(xwm, hps.x_group_size, 1 - hps.axis)
xwa = tf.split(xwa, hps.x_group_size, 1 - hps.axis)
masks = []
for m, a in zip(xwm, xwa):
m = layer_norm(m, gm, bm, axis=hps.axis)
a = layer_norm(a, ga, ba, axis=hps.axis)
h = self.cell(h, m, a)
self.outputs.append(h)
h = ew.float_cast(h, dtype=tf.float32)
states = tf.stack([c, h], 0)
# We calculate the gradient internally.
# Don't let other layer's gradients flow into here.
# This is possible because the last cell has free c and h
# params that are popluated with zeros in the gradients pass.
outputs = [tf.stop_gradient(x) for x in self.outputs]
return outputs, states
def linear(self, p, h, relu=False):
hps = self.hps
w, g, b = self.params[p][0:3]
h = hps.bsmm[p](h, w, dw_dtype=hps.dw_dtype)
return layer_norm(h, g, b, relu=relu, axis=hps.axis)
def cell(self, h, m, a):
hps = self.hps
assert hps.isteps >= 2, "multiply and add steps of mLSTM require 2 internal steps"
for step in range(hps.isteps):
# we can share one set of params for all isteps
p = "h%d" % (0 if hps.share_isteps else step)
if step == 0:
h = self.linear(p, h)
if hps.sproj_add is None:
h = ew.multiply(h, m)
else:
h = hps.sproj_add.scatter_mul(h, m)
elif step == 1:
h = self.linear(p, h)
if hps.sproj_mul is None:
h = ew.add(h, a)
else:
h = hps.sproj_mul.scatter_add(h, a)
h = ew.relu(h)
else:
h = self.linear(p, h, relu=True)
return h
def backward(self, grad_ys):
hps = self.hps
w_params = []
g_params = []
b_params = []
for p in self.param_names:
g, b, w = self.params[p][1:4]
w_params.append(w)
g_params.append(g)
b_params.append(b)
params = w_params + g_params + b_params
nparams = len(params)
# Normal gradients for small models
grads = tf.gradients(self.outputs, params + self.inputs, grad_ys, aggregation_method=agg_method)
param_grads = grads[0:nparams]
input_grads = grads[nparams:]
# group param grad matmuls to efficinetly accumulate
for i, p in enumerate(self.param_names):
# a and m are already grouped
if p not in 'am':
param_grads[i] = group_param_grads(param_grads[i])
grads = list(zip(param_grads, params))
return grads, input_grads
def nodesort(ops):
return sorted(ops, key=lambda op: op.name)
def print_graph(tsort=True):
g = tf.get_default_graph()
if tsort:
from toposort import toposort
control_outputs = dict()
for op in g.get_operations():
for control_input in op._control_inputs:
if control_input in control_outputs:
control_outputs[control_input].append(op)
else:
control_outputs[control_input] = [op]
def children(op):
result = set(op for out in op.outputs for op in out.consumers())
if op in control_outputs:
result.update(control_outputs[op])
return result
deps = dict()
for op in g.get_operations():
deps[op] = children(op)
graph = toposort(deps)
for ops in graph:
for op in nodesort(ops):
print(op.name)
if "Add" in op.name:
for i in op.inputs:
print(" " + i.name)
print("")
else:
for op in g.get_operations():
print(op.name)
if "Add" in op.name:
for i in op.inputs:
print(" " + i.name)
print("")
'''
Adam optimizer
'''
def adam_updates(params, cost_or_grads, lr=0.001, mom1=0.9, mom2=0.999, epsilon=1e-8, gamma=0.):
updates = []
if type(cost_or_grads) is not list:
grads = tf.gradients(cost_or_grads, params, aggregation_method=agg_method)
else:
grads = cost_or_grads
t = tf.Variable(1., 'adam_t')
lr_t = lr * tf.sqrt((1. - tf.pow(mom2, t))) / (1. - tf.pow(mom1, t))
updates.append(t.assign_add(1))
for p, g in zip(params, grads):
mg = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_mg')
if mom1 > 0:
v = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_v')
v_t = mom1 * v + (1. - mom1) * g
updates.append(v.assign(v_t))
else:
v_t = g
mg_t = mom2 * mg + (1. - mom2) * tf.square(g)
delta_t = v_t / (tf.sqrt(mg_t) + epsilon)
if gamma > 0:
if gamma == 1:
delta_t *= tf.maximum(1., abs(p))
else:
delta_t *= tf.maximum(gamma, abs(p))/gamma
p_t = p - lr_t * delta_t
updates.append(mg.assign(mg_t))
updates.append(p.assign(p_t))
return tf.group(*updates)
'''
Adamax optimizer
'''
def adamax_updates(params, cost_or_grads, lr=0.001, mom1=0.9, mom2=0.999):
updates = []
if type(cost_or_grads) is not list:
grads = tf.gradients(cost_or_grads, params, aggregation_method=agg_method)
else:
grads = cost_or_grads
for p, g in zip(params, grads):
mg = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adamax_mg')
if mom1 > 0:
v = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adamax_v')
v_t = mom1 * v + (1. - mom1) * g
updates.append(v.assign(v_t))
else:
v_t = g
mg_t = tf.maximum(mom2 * mg, abs(g))
p_t = p - lr * v_t / (mg_t + 1e-8)
updates.append(mg.assign(mg_t))
updates.append(p.assign(p_t))
return tf.group(*updates)
|
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.training import slot_creator
from tensorflow.python.training import optimizer
from blocksparse.utils import _op_module, scalar_constant, is_param_casted
from blocksparse.ewops import float_cast
from blocksparse.quantize import quantize
############################## AdamOptimizer #####################################
adam_op = _op_module.adam
blocksparse_adam_op =_op_module.blocksparse_adam
class AdamOptimizer(optimizer.Optimizer):
def __init__(self,
learning_rate=3e-4, beta1=0.9, beta2=0.999, epsilon=1e-8, clip_sigmas=0.0,
norm_scale=None, grad_scale=1.0, saturate=0.0, zero_infs=False, zero_nans=False,
gated=False, param_qspec=None, mean_qspec=None, var_qspec=None,
fp16=False, zero_init_variables=False, name="Adam"):
super().__init__(False, name)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.saturate = saturate
self.zero_infs = zero_infs
self.zero_nans = zero_nans
self.gated = gated
self.param_qspec = param_qspec
self.mean_qspec = mean_qspec
self.var_qspec = var_qspec
self.name = name
self.norm_scale = [] if norm_scale is None else [norm_scale]
self.fp16 = fp16
beta1_init = 0.0 if zero_init_variables else beta1
beta2_init = 0.0 if zero_init_variables else beta2
with tf.device("/cpu:0"), tf.variable_scope("adam_beta"):
one = scalar_constant(1.0, dtype=tf.float32)
self.beta1_power = tf.Variable(initial_value=beta1_init, name="beta1_power", trainable=False)
self.beta2_power = tf.Variable(initial_value=beta2_init, name="beta2_power", trainable=False)
self.beta1_t = scalar_constant(beta1, dtype=tf.float32)
self.beta2_t = scalar_constant(beta2, dtype=tf.float32)
self.clip_sigma = scalar_constant(clip_sigmas, dtype=tf.float32)
self.grad_scale = scalar_constant(grad_scale, dtype=tf.float32)
self.lr = scalar_constant(learning_rate, dtype=tf.float32) * tf.sqrt(one - self.beta2_power) / (one - self.beta1_power)
def _get_beta_accumulators(self):
return self.beta1_power, self.beta2_power
def _non_slot_variables(self):
return self._get_beta_accumulators()
def _create_slots(self, params):
# Create slots for the first and second moments.
with tf.device("/gpu:0"), tf.control_dependencies(None):
for param in params:
# only use fp16 for larger params that benefit from memory savings
dtype = tf.float16 if self.fp16 and param.shape.num_elements() >= 8*1024 else tf.float32 #is_param_casted(param)
self._get_or_make_slot(param, tf.zeros(param.shape, dtype=dtype), "Mean", self.name)
self._get_or_make_slot(param, tf.zeros(param.shape, dtype=dtype), "Var", self.name)
def _apply_dense(self, grad, param):
m = self.get_slot(param, "Mean")
v = self.get_slot(param, "Var")
gate = getattr(param, "gate", None)
gate = [gate] if self.gated and gate is not None else []
op = adam_op(grad, param, m, v, self.lr, self.grad_scale, self.clip_sigma, self.norm_scale, gate,
decay_mean=self.beta1, decay_var=self.beta2, epsilon=self.epsilon,
saturate=self.saturate, zero_infs=self.zero_infs, zero_nans=self.zero_nans, lazy_emb=hasattr(grad, "lazy"))
updates = list()
if self.param_qspec is not None:
updates.append(param.assign(quantize(op.out_param, self.param_qspec, name="param_" + param.op.name)))
else:
updates.append(op.out_param)
if self.mean_qspec is not None:
updates.append(m.assign(quantize(op.out_mean, self.mean_qspec, name="mean_" + param.op.name)))
if self.var_qspec is not None:
updates.append(v.assign(quantize(op.out_var, self.var_qspec, name="var_" + param.op.name)))
return tf.group(*updates) if len(updates) > 1 else updates[0]
def _apply_sparse(self, grad, param):
raise NotImplementedError("Sparse gradient updates are not supported.")
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies([ self.lr ]), tf.device("/cpu:0"):
update_beta1 = self.beta1_power.assign(self.beta1_power * self.beta1_t)
update_beta2 = self.beta2_power.assign(self.beta2_power * self.beta2_t)
return tf.group(*update_ops + [update_beta1, update_beta2], name=name_scope)
############################## AdafactorOptimizer #####################################
adafactor1d_op = _op_module.adafactor1d
adafactor2d_op = _op_module.adafactor2d
class AdafactorOptimizer(optimizer.Optimizer):
def __init__(self, learning_rate=5e-4, beta2=0.999, epsilon=1e-30, clip_thresh=1.0,
norm_scale=None, grad_scale=1.0, saturate=0.0, zero_infs=False, zero_nans=False,
name="Adafactor", zero_init_variables=False):
super().__init__(False, name)
self.epsilon = epsilon
self.saturate = saturate
self.zero_infs = zero_infs
self.zero_nans = zero_nans
self.name = name
self.norm_scale = [] if norm_scale is None else [norm_scale]
beta2_init = 0.0 if zero_init_variables else beta2
with tf.device("/cpu:0"), tf.variable_scope("adafactor_decay"):
one = scalar_constant(1.0, dtype=tf.float32)
self.decay1_power = tf.Variable(initial_value=beta2_init, name="decay1_power", trainable=False)
self.decay2_power = tf.Variable(initial_value=beta2_init*beta2_init, name="decay2_power", trainable=False)
self.learn_rate = scalar_constant(learning_rate, dtype=tf.float32)
self.clip_thresh = scalar_constant(clip_thresh, dtype=tf.float32)
self.grad_scale = scalar_constant(grad_scale, dtype=tf.float32)
self.decay_t = scalar_constant(beta2, dtype=tf.float32)
self.decay = self.decay_t * (one - self.decay1_power) / (one - self.decay2_power)
def _get_beta_accumulators(self):
return self.decay1_power, self.decay2_power
def _non_slot_variables(self):
return self._get_beta_accumulators()
def _create_slots(self, params):
# Create slots for the first and second moments.
for param in params:
if param.shape.ndims == 2 and param.shape[0].value > 1:
self._get_or_make_slot(param, tf.zeros(param.shape[1].value), "cv", self.name + "CV")
self._get_or_make_slot(param, tf.zeros(param.shape[0].value), "rv", self.name + "RV")
elif param.shape.ndims == 1 or (param.shape.ndims == 2 and param.shape[0].value == 1):
self._get_or_make_slot(param, tf.zeros(param.shape.num_elements()), "cv", self.name + "CV")
else:
raise ValueError("only 1 or 2d params are supported")
def _apply_dense(self, grad, param):
if param.shape.ndims == 2 and param.shape[0].value > 1:
cv = self.get_slot(param, "cv")
rv = self.get_slot(param, "rv")
return adafactor2d_op(param, cv, rv, grad,
self.decay, self.learn_rate, self.grad_scale, self.clip_thresh, self.norm_scale, epsilon=self.epsilon,
saturate=self.saturate, zero_infs=self.zero_infs, zero_nans=self.zero_nans).out_param
elif param.shape.ndims == 1 or (param.shape.ndims == 2 and param.shape[0].value == 1):
cv = self.get_slot(param, "cv")
return adafactor1d_op(param, cv, grad,
self.decay, self.learn_rate, self.grad_scale, self.clip_thresh, self.norm_scale, epsilon=self.epsilon,
saturate=self.saturate, zero_infs=self.zero_infs, zero_nans=self.zero_nans).out_param
else:
raise ValueError("only 1 or 2d params are supported")
def _apply_sparse(self, grad, param):
raise NotImplementedError("Sparse gradient updates are not supported.")
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies([ self.decay ]), tf.device("/cpu:0"):
update_decay1 = self.decay1_power.assign(self.decay1_power * self.decay_t)
update_decay2 = self.decay2_power.assign(self.decay2_power * self.decay_t)
return tf.group(*update_ops + [update_decay1, update_decay2], name=name_scope)
############################## ClipGlobalNorm #####################################
clip_global_norm_op = _op_module.clip_global_norm
def clip_by_global_norm(grads, clip_norm=1.0, grad_scale=1.0, saturate=0.0, zero_infs=False, zero_nans=False):
grad_float = list()
grad_ehalf = list()
grad_bhalf = list()
for grad in grads:
if grad.dtype is tf.float32:
grad_float.append(grad)
elif grad.dtype is tf.float16:
grad_ehalf.append(grad)
elif grad.dtype is tf.bfloat16:
grad_bhalf.append(grad)
else:
raise ValueError("unsupported grad dtype")
with tf.device("/gpu:0"):
global_norm, norm_scale, _ = clip_global_norm_op(
scalar_constant(grad_scale, dtype=tf.float32),
scalar_constant(clip_norm, dtype=tf.float32),
grad_float, grad_ehalf, grad_bhalf,
saturate=saturate, zero_infs=zero_infs, zero_nans=zero_nans)
return global_norm, norm_scale
def global_norm(grads, grad_scale=1.0, saturate=0.0, zero_infs=False, zero_nans=False):
gn, _ = clip_by_global_norm(grads, clip_norm=9e9, grad_scale=grad_scale, saturate=saturate, zero_infs=zero_infs, zero_nans=zero_nans)
return gn
# old function name
def ClipGlobalNorm(grads, clip_norm=1.0, grad_scale=1.0, saturate=0.0, zero_infs=False, zero_nans=False):
return clip_by_global_norm(grads, clip_norm=clip_norm, grad_scale=grad_scale, saturate=saturate, zero_infs=zero_infs, zero_nans=zero_nans)
############################## Exponential Moving Average #####################################
ema_op = _op_module.ema
class Ema(object):
def __init__(self, decay=0.999, gated=False, fp16=False, name="Ema"):
self.decay = decay
self.gated = gated
self.fp16 = fp16
self.name = name
self.averages = dict()
def apply(self, params, qspec=None):
with tf.device("/gpu:0"), tf.control_dependencies(None):
for param in params:
if self.fp16 == 2 or (self.fp16 and is_param_casted(param)):
# only use fp16 for params that are explicitly cast to fp16 before use
init = float_cast(param.initialized_value(), dtype=tf.float16)
dtype = tf.float16
else:
init = param.initialized_value()
dtype = tf.float32
with tf.variable_scope(None, param.op.name + "/" + self.name):
# use the Identity read op output as the key
# this lets us lookup ema vars by Cast op outputs
self.averages[param.value()] = tf.get_variable("ema", dtype=dtype, initializer=init, trainable=False)
ops.add_to_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, param)
ema_ops = []
for param in params:
ema = self.averages[param.value()]
gate = getattr(param, "gate", None)
gate = [gate] if self.gated and gate is not None else []
op = ema_op(ema, param, gate, decay=self.decay)
if qspec is not None:
ema_ops.append(ema.assign(quantize(op, qspec, name="ema_" + param.op.name)))
else:
ema_ops.append(op)
return tf.group(*ema_ops)
def average(self, param):
if isinstance(param, tf.Variable):
# this is just a raw param
key = param.value()
elif isinstance(param, tf.Tensor):
# we're given a Cast op output
# TODO: maybe traverse deeper?
key = param.op.inputs[0]
else:
raise TypeError("bad param type")
return self.averages.get(key, None)
############################## Group LASSO / Blocksparse L2 decay #####################################
l2_decay_op = _op_module.blocksparse_l2_decay
def _check_param_shape(param, gate=None):
assert len(param.shape) == 3 and param.shape[1].value == param.shape[2].value and param.shape[1].value in (8,16,32,64)
if gate is not None:
assert gate.shape.num_elements() == param.shape[0].value
def blocksparse_l2_decay(param, gate=None, rate=0.05, epsilon=1e-12):
_check_param_shape(param, gate)
gate = [gate] if gate is not None else []
return l2_decay_op(param, scalar_constant(rate, dtype=tf.float32), gate, epsilon=epsilon)
############################## Blocksparse Pruning #####################################
blocksparse_norm_op = _op_module.blocksparse_norm
blocksparse_prune_op = _op_module.blocksparse_prune
blocksparse_threshold_prune_op = _op_module.blocksparse_threshold_prune
def blocksparse_norm(param, norm="max"):
_check_param_shape(param)
return blocksparse_norm_op(param, norm_type=1 if norm.lower() == "l2" else 0)
def blocksparse_prune(param, gate, step, sparsity=None, threshold=None, norm="max", frequency=1):
_check_param_shape(param, gate)
# one must be set
assert (sparsity is None) ^ (threshold is None)
if sparsity is not None:
# apply pruning to the moving average
norms = blocksparse_norm(param, norm=norm)
k = scalar_constant(param.shape[0].value, dtype=tf.int32)
_, idx = tf.nn.top_k(norms, k=k, sorted=True)
return blocksparse_prune_op(gate, idx, scalar_constant(sparsity, dtype=tf.float32), step, frequency=frequency)
elif threshold is not None:
norm = 1 if norm.lower() == "l2" else 0
return blocksparse_threshold_prune_op(gate, param, scalar_constant(threshold, dtype=tf.float32), step, frequency=frequency, norm_type=norm)
|
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import scipy.sparse as sparse
from tensorflow.python.framework import ops
from tensorflow.python.ops.init_ops import Initializer
from blocksparse.utils import _op_module, z_order_2d, ceil_div, scalar_constant
import blocksparse.ewops as ew
blocksparse_matmul = _op_module.blocksparse_matmul
blocksparse_matmul_dx = _op_module.blocksparse_matmul_dx
blocksparse_matmul_dw = _op_module.blocksparse_matmul_dw
blocksparse_matmul_dwa = _op_module.blocksparse_matmul_dwa
blocksparse_matmul_dg = _op_module.blocksparse_matmul_dg
blocksparse_reduced_dw = _op_module.blocksparse_reduced_dw
l2_normalize_ck = _op_module.l2_normalize_ck
l2_normalize_grad_ck = _op_module.l2_normalize_grad_ck
l2_normalize_gain_ck = _op_module.l2_normalize_gain_ck
l2_normalize_gain_grad_ck = _op_module.l2_normalize_gain_grad_ck
identity_init = _op_module.blocksparse_matmul_identity_init
# save a bit of gpu memory by only creating one copy of identical constant lookup tables
g_lookup_cache = dict()
g_lut_idx = 0
def get_constant(lut, name):
global g_lookup_cache
global g_lut_idx
default_graph = tf.get_default_graph()
if name not in g_lookup_cache:
g_lookup_cache[name] = list()
for np_entry, tf_entry in g_lookup_cache[name]:
if np_entry.dtype == lut.dtype and np_entry.shape == lut.shape and tf_entry.graph is default_graph:
if np.abs(np_entry.astype(np.int64) - lut.astype(np.int64)).sum() == 0:
# found an exact match
return tf_entry
#print(name, lut.size)
#tf_entry = tf.constant(lut, name=name+"_lut")
with tf.control_dependencies(None):
tf_entry = tf.get_variable(f"{name}_lut_{g_lut_idx}", initializer=lut.view(np.int64), trainable=False)
g_lut_idx += 1
g_lookup_cache[name].append( (lut, tf_entry) )
return tf_entry
class IdentityInit(Initializer):
def __init__(self, lut, CB, KB, blocks, bsize, scale=1.0):
self.lut = lut
self.CB = CB
self.KB = KB
self.blocks = blocks
self.bsize = bsize
self.scale = scale
def __call__(self, shape, dtype=None, partition_info=None):
assert shape[0] == self.blocks
#lut = get_constant(self.lut, name="updat")
with tf.control_dependencies(None):
lut = tf.constant(self.lut, name="identity_lut")
return identity_init(lut, CB=self.CB, KB=self.KB, blocks=self.blocks, bsize=self.bsize, scale=self.scale)
SEG_MAX = (1<<63)-1
class BlocksparseMatMul(object):
def __getstate__(self):
return (self.layout, self.bsize, self.axis, self.z_order, self.name)
def __setstate__(self, state):
self.__init__(*state)
def __init__(self, layout, block_size=32, feature_axis=0, z_order=True, name=None):
if (feature_axis == 0 and block_size in (8,16,32)) or \
(feature_axis == 1 and block_size in (32,64)):
self.axis = feature_axis
self.bsize = block_size
else:
raise ValueError("Unsupported block size with this feature axis")
assert len(layout.shape) == 2
CB, KB = layout.shape
group_sizes = layout.sum(axis=0) # assume symetrical transpose
max_group = group_sizes.max()
min_group = group_sizes[np.nonzero(group_sizes)].min()
if max_group / min_group > 2.0:
segment_size = max(ceil_div(max_group,4), min_group*2)
else:
segment_size = SEG_MAX # not worth segmenting
#print(max_group, min_group, segment_size, KB)
#segment_size = SEG_MAX
# don't creat any segments smaller than this
seg_min = max(ceil_div(segment_size, 4), 4)
# segment_size = seg_min = 2
if layout.dtype != np.int32:
layout = layout.astype(np.int32)
# convert to csr for vastly more efficient python iteration on large matrices
csr = sparse.csr_matrix(layout)
cs, ks, vs = sparse.find(csr) # ks is in sorted order by default
blocks = len(vs)
idx = list(range(blocks))
idxT = sorted(idx, key=lambda i: cs[i]) # transpose view
# morton order (z-order) the blocks for efficient L2 cache utilization across all 3 ops
updat_list = list()
if z_order:
blk = 0
for _, i in sorted( [ (z_order_2d(cs[i], ks[i]), i) for i in range(blocks) ] ):
vs[i] = blk
updat_list.append((cs[i], ks[i]))
blk += 1
else:
# row contiguous
updat_list = list( zip(cs, ks) )
vs = list(range(blocks))
# cs = [b[0] for b in updat_list]
# ks = [b[1] for b in updat_list]
self.updat_list = updat_list
self.updat_lut = np.array(updat_list, dtype=np.int32)
fsetup = self.xprop_lut(KB, cs, ks, vs, idx, segment_size, seg_min)
bsetup = self.xprop_lut(CB, ks, cs, vs, idxT, segment_size, seg_min)
self.fprop_list, self.fprop_lut, self.l2_lut, self.fprop_shared, self.l2_shared, self.fprop_segments, self.fprop_locks = fsetup
self.bprop_list, self.bprop_lut, _, self.bprop_shared, _, self.bprop_segments, self.bprop_locks = bsetup
if name is None:
name = "BlocksparseMatMul"
self.z_order = z_order
self.name = name
self.flops = blocks * block_size * block_size * 2
self.blocks = blocks
self.w_shape = (blocks, block_size, block_size)
self.g_shape = (blocks,)
self.count = 0
self.CB = CB
self.KB = KB
self.C = CB * block_size
self.K = KB * block_size
self.sparsity = round(float(blocks) / float(CB * KB), 3)
# save boolean version for serialization purposes, TODO save csr version
self.layout = layout > 0
def i_shape(self, N): return (N, self.C) if self.axis else (self.C, N)
def o_shape(self, N): return (N, self.K) if self.axis else (self.K, N)
# return the coordinate in the layout that corresponds to a given block id
def block_coord(self, block): return self.updat_list[block]
# TODO: write a kernel to do this on the gpu to allow dynamic sparsity
def xprop_lut(self, KB, cs, ks, vs, idx, max_seg, min_seg):
locks = 0
lockids = dict()
seg = list()
segs = list()
col = list()
cols = list()
kset = set()
# get a count of channels for each k
channels = [0 for k in range(KB)]
for i in idx:
channels[ks[i]] += 1
K = ks[idx[0]]
seg_count = 0
for i in idx:
c, k, v = cs[i], ks[i], vs[i]
kset.add(k)
# check for new value of k
if k != K:
# keep track of unsegmented columns (for l2norm and testing)
cols.append( (K, col) )
col = list()
# append segment for previous K and start a new one
if len(seg):
segs.append( (K, seg) )
seg = list()
seg_count += 1
# for more than one segment we need to use spin locks to sync accumulation
if seg_count > 1:
locks += 1
lockids[K] = locks
seg_count = 0
K = k
col.append( (c, v) )
seg.append( (c, v) )
channels[k] -= 1
# split columns up into segments, but don't let them be too small for effciency sake
if len(seg) >= max_seg and channels[k] >= min_seg:
segs.append( (k, seg) )
seg = list()
seg_count += 1
# append last value of k
cols.append( (k, col) )
if len(seg):
segs.append( (k, seg) )
seg_count += 1
if seg_count > 1:
locks += 1
lockids[k] = locks
# add in any empty k blocks at the end
for k in range(KB):
if k not in kset:
segs.append( (k, []) )
cols.append( (k, []) )
#else:
# raise ValueError("sparsity mask has empty mappings. Not yet supported with feature_axis=0")
#segs.sort(key=lambda x: len(x[1]), reverse=True)
# bsmm lut
offset = len(segs) * 4
xp_lut = np.empty(offset + len(vs)*2, dtype=np.int32)
xp_max = 0
for i, (k, lut) in enumerate(segs):
# build the lut header: int2 offset, lut_size, K, lock_id
xp_lut[i*4:(i+1)*4] = offset//2, len(lut), k, lockids.get(k, 0)
xp_max = max(xp_max, len(lut))
for entry in lut:
xp_lut[offset:offset+2] = entry
offset += 2
# l2 norm lut (columns not broken up into segments)
offset = len(cols) * 4
l2_siz = offset + len(vs)
# we use int64 views into the lut for tf compatibility reasons..
if l2_siz & 1:
l2_siz += 1
l2_lut = np.zeros(l2_siz, dtype=np.int32)
l2_max = 0
for i, (k, lut) in enumerate(cols):
# build the lut header: int offset, lut_size, K
l2_lut[i*4:(i+1)*4] = offset, len(lut), k, 0
l2_max = max(l2_max, len(lut))
for entry in lut:
l2_lut[offset] = entry[1]
offset += 1
return cols, xp_lut, l2_lut, xp_max*8, l2_max*4, len(segs), locks
def prune(self, param, gate):
new_blocks = np.sum(gate != 0.0)
if new_blocks != self.blocks:
new_param = np.empty((new_blocks, self.bsize, self.bsize), dtype=param.dtype)
new_w = 0
layout = self.layout
for w, (c, k) in enumerate(self.updat_list):
if gate[w] == 0.0:
layout[c,k] = False
else:
new_param[new_w,:,:] = param[w,:,:]
new_w += 1
else:
new_param = param
sparsity = round(100 * float(new_blocks) / float(self.CB * self.KB), 1)
print("prune: ", self.blocks, new_blocks, sparsity)
return new_param, np.ones((new_blocks,), dtype=gate.dtype)
def ortho_init(self):
def _initializer(shape, dtype=np.float32, partition_info=None):
W = np.empty(self.w_shape, dtype=dtype)
bsize = self.bsize
if self.sparsity < 1.0:
print("%s ortho_init sparsity(%.2f)" % (self.name, self.sparsity))
# different block columns are already mostly orthogonal due to sparsity
# So just make columns within each block of block_size orthogonal
for k, lut, _ in self.fprop_list:
shape = (len(lut) * bsize, bsize)
a = np.random.normal(0.0, 1.0, shape).astype(dtype)
u, _, v = np.linalg.svd(a, full_matrices=False)
if u.shape != shape:
u = v
for i, (c, w) in enumerate(lut):
W[w,:,:] = u[i*bsize:(i+1)*bsize,:]
else:
print("%s ortho_init dense" % (self.name,))
shape = (self.C, self.K)
a = np.random.normal(0.0, 1.0, shape).astype(dtype)
u, _, v = np.linalg.svd(a, full_matrices=False)
if u.shape != shape:
u = v
for w, (c, k) in enumerate(self.updat_list):
W[w,:,:] = u[c*bsize:(c+1)*bsize, k*bsize:(k+1)*bsize]
return W
return _initializer
def identity_init(self, scale=1.0):
return IdentityInit(self.updat_lut, self.CB, self.KB, self.blocks, self.bsize, scale=scale)
# def _initializer(shape, dtype=np.float32, partition_info=None):
# print("%s identity_init sparsity(%.2f)" % (self.name, self.sparsity))
# W = np.zeros(self.w_shape, dtype=dtype)
# for w in range(self.blocks):
# cb, kb = self.updat_list[w]
# if (cb % self.KB) == (kb % self.CB):
# W[w] = np.eye(self.bsize, dtype=dtype)
# return W
# return _initializer
def checker_init(self):
def _initializer(shape, dtype=np.float32, partition_info=None):
gate = np.empty(self.blocks, dtype=dtype)
for w, (c, k) in enumerate(self.updat_list):
gate[w] = (c & 1) ^ (k & 1) ^ 1
return gate
return _initializer
# grid = []
# for c in range(5):
# row = []
# for k in range(5):
# row.append((c & 1) ^ (k & 1) ^ 1)
# grid.append(row)
# for row in grid:
# print(row)
def fprop_test(self, I, W, gate=None):
bsize = self.bsize
if self.axis:
O = np.zeros((I.shape[0], self.KB, bsize))
I = I.reshape((-1, self.CB, bsize))
for k, lut in self.fprop_list:
for c, w in lut:
O[:,k,:] += np.dot( I[:,c,:], W[w,:,:] ) # NC x CK = NK
return O.reshape(I.shape[0], -1)
else:
N = I[0].size
O = np.zeros((self.KB, bsize, N))
I = I.reshape((self.CB, bsize, N))
for k, lut in self.fprop_list:
if gate is None:
for c, w in lut:
O[k,:,:] += np.dot( W[w,:,:].T, I[c,:,:] ) # CK.T x CN = KN
else:
for c, w in lut:
if gate[w] != 0.0:
O[k,:,:] += np.dot( W[w,:,:].T, I[c,:,:] ) * gate[w] # CK.T x CN = KN
return O.reshape(-1, N)
def bprop_test(self, E, W, gate=None):
bsize = self.bsize
if self.axis:
B = np.zeros((E.shape[0], self.CB, bsize))
E = E.reshape((-1, self.KB, bsize))
for c, lut in self.bprop_list:
for k, w in lut:
B[:,c,:] += np.dot( E[:,k,:], W[w,:,:].T ) # NK x CK.T = NC
return B.reshape(E.shape[0], -1)
else:
N = E[0].size
B = np.zeros((self.CB, bsize, N))
E = E.reshape((self.KB, bsize, N))
for c, lut in self.bprop_list:
if gate is None:
for k, w in lut:
B[c,:,:] += np.dot( W[w,:,:], E[k,:,:] ) # CK x KN = CN
else:
for k, w in lut:
if gate[w] != 0.0:
B[c,:,:] += np.dot( W[w,:,:], E[k,:,:] ) * gate[w] # CK x KN = CN
return B.reshape(-1, N)
def updat_test(self, I, E, gate=None, dw_gated=False):
U = np.zeros(self.w_shape)
bsize = self.bsize
if self.axis:
I = I.reshape((-1, self.CB, bsize))
E = E.reshape((-1, self.KB, bsize))
for w, (c, k) in enumerate(self.updat_list):
U[w,:,:] = np.dot( I[:,c,:].T, E[:,k,:] ) # NC.T x NK = CK
else:
I = I.reshape((self.CB, bsize, -1))
E = E.reshape((self.KB, bsize, -1))
if not dw_gated or gate is None:
for w, (c, k) in enumerate(self.updat_list):
U[w,:,:] = np.dot( I[c,:,:], E[k,:,:].T ) # CN x KN.T = CK
else:
for w, (c, k) in enumerate(self.updat_list):
if gate[w] != 0.0:
U[w,:,:] = np.dot( I[c,:,:], E[k,:,:].T ) * gate[w] # CN x KN.T = CK
return U
def l2_normalize_test(self, W, epsilon=1e-12):
W = W.copy()
for k, lut in self.fprop_list:
ws = [w for c, w in lut]
W2 = W[ws,:,:].reshape(-1, self.bsize)
norm = np.sqrt(np.maximum(np.sum(np.square(W2), axis=0, keepdims=True), epsilon))
for w in ws:
W[w,:,:] /= norm
return W
def l2_normalize_grad_test(self, W, U, epsilon=1e-12):
for k, lut in self.fprop_list:
ws = [w for c, w in lut]
W2 = W[ws,:,:].reshape(-1, self.bsize)
U2 = U[ws,:,:].reshape(-1, self.bsize)
sum_sqr_w = np.sum(np.square(W2), axis=0, keepdims=True)
max_w = np.maximum(sum_sqr_w, epsilon)
norm_grad = ( U2 + W2 * (sum_sqr_w >= epsilon) * np.sum(-U2 * W2 / max_w, axis=0, keepdims=True) ) / np.sqrt(max_w)
norm_grad = norm_grad.reshape(-1, self.bsize, self.bsize)
for i, w in enumerate(ws):
U[w,:,:] = norm_grad[i]
return U
def l2_normalize(self, W, gain=None, epsilon=1e-12, dtype=tf.float32):
l2_lut = get_constant(self.l2_lut, name="l2")
if gain is None:
W, _ = l2_normalize_ck(W, l2_lut, TY=dtype, epsilon=epsilon, K=self.K, shared=self.l2_shared, bsize=self.bsize )
else:
W, _ = l2_normalize_gain_ck(W, gain, l2_lut, TY=dtype, epsilon=epsilon, K=self.K, shared=self.l2_shared, bsize=self.bsize )
return W
def matmul(self, I, W, gate=None, gate_grad=False, dw_gated=False, name=None, bench=0):
return self.__call__(I, W, gate=gate, gate_grad=gate_grad, dw_gated=dw_gated, name=name, bench=bench)
def __call__(self, I, W, gate=None, gate_grad=False, dw_gated=False, name=None, bench=0):
if name is None:
name = self.name + ("_%06d" % self.count)
self.count += 1
if gate is None:
gate = []
else:
gate = [gate]
#assert self.bsize == 8 and self.axis == 0, "blocksparse gating only implemented for block_size 8 on axis 0"
fprop_lut = get_constant(self.fprop_lut, name="fprop")
bprop_lut = get_constant(self.bprop_lut, name="bprop")
updat_lut = get_constant(self.updat_lut, name="updat")
O, _ = blocksparse_matmul(
I, W, fprop_lut, bprop_lut, updat_lut, gate,
gated_dw=bool(dw_gated), gate_grad=bool(gate_grad),
blocks=self.blocks, bsize=self.bsize, axis=self.axis, C=self.C, K=self.K,
segments=self.fprop_segments, segments_dx=self.bprop_segments,
locks=self.fprop_locks, locks_dx=self.bprop_locks,
shared=self.fprop_shared, shared_dx=self.bprop_shared, bench=bench, name=name
)
#print(O.op.name, O.op.device)
return O
@ops.RegisterGradient("BlocksparseMatmul")
def blocksparse_matmul_grad(op, dy, temp):
blocks = op.get_attr("blocks")
bsize = op.get_attr("bsize")
axis = op.get_attr("axis")
C = op.get_attr("C")
K = op.get_attr("K")
segments = op.get_attr("segments_dx")
shared = op.get_attr("shared_dx")
locks = op.get_attr("locks_dx")
gated_dw = op.get_attr("gated_dw")
gate_grad = op.get_attr("gate_grad")
bench = op.get_attr("bench")
x = op.inputs[0]
w = op.inputs[1]
lut_dx = op.inputs[3]
lut_dw = op.inputs[4]
gate = [op.inputs[5]] if len(op.inputs) > 5 else []
name = op.name.split('/')[-1]
dx, _ = blocksparse_matmul_dx(
dy, w, lut_dx, gate, gated_dw=gated_dw, gate_grad=gate_grad,
blocks=blocks, bsize=bsize, axis=axis, C=K, K=C, # swap C,K
segments=segments, locks=locks, shared=shared,
bench=bench, name=name+"_bprop")
dw = blocksparse_matmul_dw(
[x], [dy], lut_dw, gate, gated_dw=gated_dw, gate_grad=gate_grad,
blocks=blocks, bsize=bsize, axis=axis, C=C, K=K,
bench=bench, name=name+"_updat")
# print(dx.op.name, dx.op.device)
# print(dw.op.name, dw.op.device)
if len(gate) == 0:
return (dx, dw, None, None, None)
elif gate_grad:
dw, dg = blocksparse_matmul_dg(dw, w, gate[0])
else:
dg = None
return (dx, dw, None, None, None, dg)
@ops.RegisterGradient("L2NormalizeCK")
def blocksparse_l2_normalize_grad_ck(op, dy, sum_sqr_x):
epsilon = op.get_attr("epsilon")
K = op.get_attr("K")
shared = op.get_attr("shared")
bsize = op.get_attr("bsize")
grad_x = l2_normalize_grad_ck(dy, op.inputs[0], op.outputs[1], op.inputs[1], epsilon=epsilon, K=K, shared=shared, bsize=bsize)
return (grad_x, None)
@ops.RegisterGradient("L2NormalizeGainCK")
def blocksparse_l2_normalize_grad_ck(op, dy, sum_sqr_x):
epsilon = op.get_attr("epsilon")
K = op.get_attr("K")
shared = op.get_attr("shared")
bsize = op.get_attr("bsize")
grad_x, grad_g = l2_normalize_gain_grad_ck(
dy, op.inputs[0], op.inputs[1], op.outputs[1], op.inputs[2], epsilon=epsilon, K=K, shared=shared, bsize=bsize)
return (grad_x, grad_g, None)
# Utils for graph re-writing
def block_reduced_full_dw(param_grad, scale=1.0, norm="max", group_size=8):
# max(abs()) or l2_norm()
norm = 0 if norm.lower() == "max" else 1
# host side scalar, if zero will cause compute for this op to be skipped.
scale = scalar_constant(scale, dtype=tf.float32)
assert group_size <= 8
# backward walk param grad to find BlocksparseMatmulDW ops
# this should only hit BlocksparseMatmulDWs, BlocksparseMatmulDGs, AddNs or FloatCasts
ops = get_parents(param_grad, "BlocksparseMatmulDW")
if len(ops) < 1:
raise ValueError("BlocksparseMatmulDW op not found")
# this sorting is dependent on the op names being correctly ordered.
ops.sort(key=lambda op: op.name.split('/')[-1], reverse=True)
# use the parent scope for the new ops
scope = ops[-1].name.split('/')
scope = '/'.join(scope[0:-1])
# we're going to be using absolute names, so clear name_scope
with tf.name_scope(None):
dw_full = None
offset = 0
while offset < len(ops):
xs = [op.inputs[0] for op in ops[offset:offset+group_size] ]
gs = [op.inputs[1] for op in ops[offset:offset+group_size] ]
# Get the corresponding activation grad op for the last param grad op in the group
bprop = None
for consumer in gs[-1].consumers():
if consumer.type == "BlocksparseMatmulDX":
bprop = consumer
break
assert bprop is not None
# get attributes of first op in group
up = ops[offset]
bsize = up.get_attr("bsize")
axis = up.get_attr("axis")
name = "%s/block_reduced_full_dw_%03d" % (scope, offset)
dw_full = [] if dw_full is None else [dw_full]
dw_full, _, _ = blocksparse_reduced_dw(xs, gs, scale, dw_full, bsize=bsize, norm=norm, axis=axis, name=name)
# force the dw op before any more time steps are processed
bprop._add_control_input(dw_full.op)
offset += group_size
return dw_full
def group_param_grads(param_grad, group_size=8):
assert group_size <= 8
# backward walk param grad to find BlocksparseMatmulDW ops
# this should only hit BlocksparseMatmulDWs, BlocksparseMatmulDGs, AddNs or FloatCasts
ops = get_parents(param_grad, "BlocksparseMatmulDW")
if len(ops) <= 1:
return param_grad
# this sorting is dependent on the op names being correctly ordered.
ops.sort(key=lambda op: op.name.split('/')[-1], reverse=True)
# for x in ops:
# print(x.name)
# print("")
# exit()
segment_size = len(ops)
if ops[0].get_attr("gate_grad") and len(ops[0].inputs) == 4:
gate_count = dict()
max_count = 0
for op in ops:
gate = op.inputs[3]
count = gate_count.get(gate, 0) + 1
gate_count[gate] = count
max_count = max(max_count, count)
for count in gate_count.values():
if count != max_count:
raise ValueError("Non-uniform gate broadcasting detected.")
segment_size = max_count
if group_size > segment_size:
group_size = segment_size
else:
assert segment_size % group_size == 0
# nothing to rewrite here.
if segment_size == 1:
return param_grad
# use the parent scope for the new ops
scope = ops[-1].name.split('/')
scope = '/'.join(scope[0:-1])
# we're going to be using absolute names, so clear name_scope
with tf.name_scope(None):
dw = None
dws = list()
offset = 0
seg_cnt = 0
while offset < len(ops):
xs = [op.inputs[0] for op in ops[offset:offset+group_size] ]
gs = [op.inputs[1] for op in ops[offset:offset+group_size] ]
# Get the corresponding activation grad op for the last param grad op in the group
bprop = None
for consumer in gs[-1].consumers():
if consumer.type == "BlocksparseMatmulDX":
bprop = consumer
break
assert bprop is not None
# get attributes of first op in group
up = ops[offset]
blocks = up.get_attr("blocks")
bsize = up.get_attr("bsize")
axis = up.get_attr("axis")
gated_dw = up.get_attr("gated_dw")
gate_grad = up.get_attr("gate_grad")
C = up.get_attr("C")
K = up.get_attr("K")
bench = up.get_attr("bench") // len(xs)
lut = up.inputs[2]
name = "%s/matmul_concat_updat_%03d" % (scope, offset)
gate = [up.inputs[3]] if len(up.inputs) > 3 else []
# The first op needs to allocate a new dw tensor
if dw is None:
dw = blocksparse_matmul_dw(
xs, gs, lut, gate, gated_dw=gated_dw,
gate_grad=gate_grad, blocks=blocks, bsize=bsize, axis=axis,
C=C, K=K, bench=bench, name=name)
# subsequent ops can just accumulate in place
else:
dw = blocksparse_matmul_dwa(
xs, gs, lut, dw, gate, gated_dw=gated_dw,
gate_grad=gate_grad, blocks=blocks, bsize=bsize, axis=axis,
C=C, K=K, bench=bench, name=name)
# force the dw op before any more time steps are processed
bprop._add_control_input(dw.op)
seg_cnt += group_size
offset += group_size
if gate_grad and seg_cnt >= segment_size:
seg_cnt = 0
dws.append(dw)
dw = None
if gate_grad:
for i, dw in enumerate(dws):
# for op in ops[i*group_size:(i+1)*group_size]:
# print(op.name)
# print()
dw_op = ops[i*segment_size:(i+1)*segment_size][-1]
dws[i] = group_dg_grads(dw_op, dw, scope)
# add up final dw values in groups of 4 for good mix of perforamnce and memory use
dw = ew.add_n8_op(dws[0:4]) if len(dws) > 1 else dws[0]
for i in range(4, len(dws), 4):
dw = ew.add_n8_op(dws[i:i+4] + [dw])
# splice in these grad op types sitting on top of the param
if param_grad.op.type in ("Cast", "FloatCast", "L2NormalizeGradCK", "L2NormalizeGainGradCK"):
param_grad.op._update_input(0, dw)
dw = param_grad
elif param_grad.op.type not in ("AddN", "AddN8", "BlocksparseMatmulDW","BlocksparseMatmulDG"):
raise ValueError("Unexpected grad op type:", param_grad.op.type, param_grad.op.name)
return dw
def group_dg_grads(bsmm_dw_op, dw, scope):
# splice the dg + addn ops out of the graph and replace with a single dg op
# that takes in the final accumulated dw value
dg_op = bsmm_dw_op.outputs[0].consumers()[0]
assert dg_op.type == "BlocksparseMatmulDG"
dw, dg = blocksparse_matmul_dg(dw, *dg_op.inputs[1:], name=f"{scope}/BlocksparseMatmulDG")
# splice old add_n op out of graph
addn_op = dg_op.outputs[1].consumers()[0]
addn_ops = list()
addn_ops.append(addn_op)
if addn_op.type[0:3] != "Add":
raise ValueError(f"bad type: {addn_ops[0].type} Cause: this segment does not share a broadcasted gate.")
elif addn_op.type == "AddN8":
while True:
addn_op = addn_op.outputs[0].consumers()[0]
if addn_op.type == "AddN8":
addn_ops.append(addn_op)
else:
break
# print(addn_op.name)
# for i in addn_op.inputs:
# print(i.name)
# print()
addn = addn_ops[-1].outputs[0]
dg_consumers = addn.consumers()
#for op in dg_consumers:
assert len(dg_consumers) > 0, "raw dg grad not supported"
#print(addn.name)
for dg_consumer in dg_consumers:
found = False
#print(dg_consumer.name)
for i, t in enumerate(dg_consumer.inputs):
#print(i, t.name)
if t is addn:
#print(f"splicing dg into: {dg_consumer.name} at {i}")
dg_consumer._update_input(i, dg)
found = True
break
if not found:
print(f"splice failed for {dg_consumer.name}")
return dw
def get_bsmm_dx_ops(param_grad):
dw_ops = get_parents(param_grad, "BlocksparseMatmulDW")
dx_ops = list()
# this sorting is dependent on the op names being correctly ordered.
dw_ops.sort(key=lambda op: op.name.split('/')[-1], reverse=True)
for dw_op in dw_ops:
# Get the corresponding activation grad op
dx_op = None
for op in dw_op.inputs[1].consumers():
if op.type=="BlocksparseMatmulDX":
dx_op = op
break
assert dx_op is not None
dx_ops.append(dx_op)
return dx_ops
def get_parents(grad, op_type):
if grad.op.type == op_type:
return [grad.op]
ops = list()
wave = set([grad.op])
while wave:
new_wave = set()
for op in wave:
# print(op.name)
# for i in op.inputs:
# print(" ", i.name)
# print()
for op in (t.op for t in op.inputs):
if op.type == op_type:
ops.append(op)
else:
new_wave.add(op)
wave = new_wave
return ops
def largest_block(dim):
for blk in (32,16,8):
if dim % blk == 0:
return (blk, dim // blk)
raise ValueError("dimension not multiple of 8, 16, or 32")
############################## Sparse Projection Ops #####################################
gather_scatter_op = _op_module.gather_scatter
scatter_add_mul_op = _op_module.scatter_add_mul
scatter_mul_grad_op = _op_module.scatter_mul_grad
OP_GAT = 0
OP_SCT = 1
OP_ADD = 2
OP_MUL = 3
class SparseProj(object):
def __getstate__(self):
return (self.nhidden, self.nproj, self.gather_lut, self.name)
def __setstate__(self, state):
self.__init__(state[0], nproj=state[1], gather_lut=state[2], name=state[3])
def __init__(self, nhidden, nproj=None, proj_stride=None, block_size=32, gather_lut=None, name=None):
if gather_lut is None:
gather_lut = np.arange(nhidden, dtype=np.int32)
if nproj is not None:
assert nproj <= nhidden
np.random.shuffle(gather_lut)
gather_lut = np.sort(gather_lut[0:nproj])
elif proj_stride is not None:
assert proj_stride <= nhidden
# trim to multiple of block_size
gather_max = ((nhidden // proj_stride) // block_size) * block_size * proj_stride
gather_lut = gather_lut[:gather_max:proj_stride].copy()
nproj = gather_lut.size
else:
raise ValueError("missing nproj, proj_stride or gather_lut")
if name is None:
name = "SparseProj"
# build reverse mapping
scatter_lut = np.empty(nhidden, dtype=np.int32)
scatter_lut[:] = -1
scatter_lut[gather_lut] = np.arange(nproj, dtype=np.int32)
self.name = name
self.gather_lut = gather_lut
self.scatter_lut = scatter_lut
self.nhidden = nhidden
self.nproj = nproj
def gather(self, x):
assert x.get_shape()[0].value == self.nhidden
gather_lut = get_constant(self.gather_lut, name="gather")
scatter_lut = get_constant(self.scatter_lut, name="scatter")
return gather_scatter_op(x, gather_lut, scatter_lut, C=self.nhidden, K=self.nproj, op=OP_GAT)
def scatter(self, x):
assert x.get_shape()[0].value == self.nproj
gather_lut = get_constant(self.gather_lut, name="gather")
scatter_lut = get_constant(self.scatter_lut, name="scatter")
return gather_scatter_op(x, scatter_lut, gather_lut, C=self.nproj, K=self.nhidden, op=OP_SCT)
def scatter_add(self, x, y):
assert x.get_shape()[0].value == self.nhidden
assert y.get_shape()[0].value == self.nproj
gather_lut = get_constant(self.gather_lut, name="gather")
scatter_lut = get_constant(self.scatter_lut, name="scatter")
return scatter_add_mul_op(x, y, gather_lut, scatter_lut, C=self.nproj, K=self.nhidden, op=OP_ADD)
def scatter_mul(self, x, y):
assert x.get_shape()[0].value == self.nhidden
assert y.get_shape()[0].value == self.nproj
gather_lut = get_constant(self.gather_lut, name="gather")
scatter_lut = get_constant(self.scatter_lut, name="scatter")
return scatter_add_mul_op(x, y, gather_lut, scatter_lut, C=self.nproj, K=self.nhidden, op=OP_MUL)
@ops.RegisterGradient("GatherScatter")
def gather_scatter_grad(op, dy):
dx = gather_scatter_op(dy, op.inputs[2], op.inputs[1], C=op.get_attr("K"), K=op.get_attr("C"), op=1-op.get_attr("op"))
return dx, None, None
@ops.RegisterGradient("ScatterAddMul")
def scatter_add_mul_grad(op, dz):
if op.get_attr("op") == OP_ADD:
dx = dz
dy = gather_scatter_op(dz, op.inputs[2], op.inputs[3], C=op.get_attr("K"), K=op.get_attr("C"), op=OP_GAT)
else:
dx, dy = scatter_mul_grad_op(dz, *op.inputs[0:3], C=op.get_attr("C"), K=op.get_attr("K"))
return dx, dy, None, None
# REGISTER_OP("GatherScatter")
# .Input("x: T")
# .Input("gather: int32")
# .Input("scatter: int32")
# .Output("y: T")
# .Attr("T: {half, float, bfloat16}")
# .Attr("C: int")
# .Attr("K: int")
# .Attr("op: int")
# REGISTER_OP("ScatterAddMul")
# .Input("x: T")
# .Input("y: T")
# .Input("gather: int32")
# .Input("scatter: int32")
# .Output("z: T")
# .Attr("T: {half, float, bfloat16}")
# .Attr("C: int")
# .Attr("K: int")
# .Attr("op: int")
# REGISTER_OP("ScatterMulGrad")
# .Input("dz: T")
# .Input("x: T")
# .Input("y: T")
# .Input("gather: int32")
# .Output("dx: T")
# .Output("dy: T")
# .Attr("T: {half, float, bfloat16}")
# .Attr("C: int")
# .Attr("K: int")
|
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops, function
from blocksparse.utils import _op_module, scalar_constant
embedding_lookup_op = _op_module.embedding_lookup
embedding_lookup_grad_op = _op_module.embedding_lookup_grad
float_cast_op = _op_module.float_cast
def embedding_lookup(emb, idx, sort_grad=True, bench=0, use_tf=False):
dev = emb.op.device.lower()
if use_tf or not dev or "cpu" in dev:
#print("######################### Using TF embeding:", dev)
y = tf.nn.embedding_lookup(convert_gradient_to_tensor(emb), idx)
else:
y = embedding_lookup_op(emb, idx, scalar_constant(emb.shape[0].value, dtype=tf.int32), sorted=sort_grad, bench=bench)
return y
@ops.RegisterGradient("EmbeddingLookup")
def embedding_lookup_grad(op, dy):
sort = op.get_attr("sorted")
bench = op.get_attr("bench")
dw = embedding_lookup_grad_op(dy, op.inputs[1], op.inputs[2], sorted=sort, bench=bench)
if dy.dtype is not tf.float32:
dw = float_cast_op(dw, TY=dy.dtype, dx_dtype=dy.dtype)
return dw, None, None
@function.Defun(
python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),
shape_func=lambda op: [op.inputs[0].shape])
def convert_gradient_to_tensor(x):
return x |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module, get_entropy
############################## Quantization #####################################
quantize_op = _op_module.quantize
log_stats_op = _op_module.log_stats
class QuantizeSpec(object):
def __init__(self, ebits=4, fbits=3, emax=None, stochastic=0, denorm=True, frequency=1024, mode=0, bias_pad=2, stdv_mul=4.0, logfile="", copy=None):
if copy is None:
if emax is None:
emax = (1 << (ebits-1)) - 1 # default symetric
self.ebits = ebits
self.fbits = fbits
self.emax = emax
self.stoch = stochastic
self.denorm = denorm
self.freq = frequency
self.mode = mode
self.bias_pad = bias_pad
self.stdv_mul = stdv_mul
self.logfile = logfile
else:
self.ebits = copy.ebits
self.fbits = copy.fbits
self.emax = copy.emax
self.stoch = copy.stoch
self.denorm = copy.denorm
self.freq = copy.freq
self.mode = copy.mode
self.bias_pad = copy.bias_pad
self.stdv_mul = copy.stdv_mul
self.logfile = copy.logfile or logfile
log_init = set()
quant_headers = [
"sat_pct",
"ftz_pct",
"exp_max",
"exp_min",
"max",
"mean",
"stdv",
"mean+stdv5",
"max_stat_lo",
"max_stat_hi",
"count",
"name",
]
log_timestamp = None
def get_timestamp():
global log_timestamp
if log_timestamp is None:
log_timestamp = time.strftime('%Y_%m_%d_%H_%M_%S')
return log_timestamp
def quantize(x, qspec, b_qspec=None, name=None):
if name is None:
name = "quantize"
if b_qspec is None:
b_qspec = qspec
if x.dtype.base_dtype == tf.bfloat16:
for spec in (qspec, b_qspec):
assert spec.fbits <= 7, "bfloat only supports up to 7 fractional bits"
global log_init
for spec in (qspec, b_qspec):
if spec.logfile and spec.logfile not in log_init:
with open(spec.logfile, 'w') as log:
log.write("\t".join(quant_headers) + "\n")
log_init.add(spec.logfile)
e = [get_entropy()] if qspec.stoch == 2 else []
reuse = tf.get_variable_scope().reuse
with tf.device("/cpu:0"), tf.variable_scope("quantize"):
exp_f = tf.get_variable(name + "_exp_f", dtype=tf.int64, initializer=np.int64(qspec.emax), trainable=False)
exp_b = tf.get_variable(name + "_exp_b", dtype=tf.int64, initializer=np.int64(b_qspec.emax), trainable=False)
return quantize_op(x, exp_f, exp_b, e,
ebits = qspec.ebits,
fbits = qspec.fbits,
stoch = qspec.stoch,
denorm = qspec.denorm,
freq = (not reuse and qspec.freq),
mode = qspec.mode,
bias_pad = qspec.bias_pad,
stdv_mul = qspec.stdv_mul,
logfile = qspec.logfile,
b_ebits = b_qspec.ebits,
b_fbits = b_qspec.fbits,
b_stoch = b_qspec.stoch,
b_denorm = b_qspec.denorm,
b_freq = (not reuse and b_qspec.freq),
b_mode = b_qspec.mode,
b_bias_pad = b_qspec.bias_pad,
b_stdv_mul = b_qspec.stdv_mul,
b_logfile = b_qspec.logfile,
name = name,
)
@ops.RegisterGradient("Quantize")
def quantize_grad(op, dy):
e = [get_entropy()] if op.get_attr("b_stoch") == 2 else []
dx = quantize_op(dy, op.inputs[2], op.inputs[1], e,
ebits = op.get_attr("b_ebits"),
fbits = op.get_attr("b_fbits"),
stoch = op.get_attr("b_stoch"),
denorm = op.get_attr("b_denorm"),
freq = op.get_attr("b_freq"),
mode = op.get_attr("b_mode"),
bias_pad = op.get_attr("b_bias_pad"),
stdv_mul = op.get_attr("b_stdv_mul"),
logfile = op.get_attr("b_logfile"),
)
return (dx, None, None) if len(op.inputs) == 3 else (dx, None, None, None)
stat_headers = [
"sat_pct",
"ftz_pct",
"max",
"mean",
"stdv",
"mean+stdv5",
"max_stat_lo",
"max_stat_hi",
"count",
"name",
]
def log_stats(x, step, sat_val=65504.0, ftz_val=2.0**-24, freq=512, bfreq=512, logfile="", name=None):
assert freq == 0 or round(np.log2( freq)) == np.log2( freq)
assert bfreq == 0 or round(np.log2(bfreq)) == np.log2(bfreq)
# tack on timestamp if desired
logfile = logfile % { "timestamp" : get_timestamp() }
global log_init
if logfile and logfile not in log_init:
with open(logfile, 'w') as log:
log.write("\t".join(stat_headers) + "\n")
log_init.add(logfile)
pow2 = int(np.log2(freq or bfreq))
first_steps = [1 << p for p in range(pow2)]
return log_stats_op(x, step,
sat_val = sat_val,
ftz_val = ftz_val,
freq = freq,
bfreq = bfreq,
logfile = logfile,
first_steps = first_steps,
name = name or "log_stats")
@ops.RegisterGradient("LogStats")
def log_stats_grad(op, dy):
dx = log_stats_op(dy, op.inputs[1],
sat_val = op.get_attr("sat_val"),
ftz_val = op.get_attr("ftz_val"),
freq = op.get_attr("bfreq"),
bfreq = op.get_attr("bfreq"),
logfile = op.get_attr("logfile"),
first_steps = op.get_attr("first_steps"))
return (dx, None)
# if mpi_rank == 0:
# with tf.device("/gpu:0"), tf.name_scope("LogStats"):
# for i, (grad, param) in enumerate(zip(grads, params)):
# name = param.op.name + "_" + "_".join(str(x) for x in param.shape.as_list())
# grads[i] = ew.log_stats(grad, step, logfile="scale_14.txt", name=name)
# ebits = 4
# fbits = 3
# ebias = 8
# for exp in range(1 << ebits):
# for frac in range(1 << fbits):
# frac /= 1 << fbits
# f8 = (1 + frac) * 2**(exp - ebias)
# l8 = 2**(exp + frac - ebias)
# print("%2d %.3f %9.5f %9.5f" % (exp-ebias, frac, f8, l8))
|
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module, get_entropy, scalar_constant
ew_z_xy_op = _op_module.ew_z_xy
ew_z_xa_op = _op_module.ew_z_xa
ew_dxdy_dzxy_op = _op_module.ew_dxdy_dzxy
ew_dx_dzxa_op = _op_module.ew_dx_dzxa
ew_dx_dzza_op = _op_module.ew_dx_dzza
ew_z_xb_op = _op_module.ew_z_xb
ew_db_dzb_op = _op_module.ew_db_dzb
ew_dxdg_dzxg_op = _op_module.ew_dxdg_dzxg
ADD_OP = 0
SUB_OP = 1
MUL_OP = 2
DIV_OP = 3
MAXIMUM_OP = 4
MINIMUM_OP = 5
NEG_OP = 6
RCP_OP = 7
SQR_OP = 8
SQRT_OP = 9
EXP_OP = 10
LOG_OP = 11
SIG_OP = 12
TANH_OP = 13
RELU_OP = 14
ELU_OP = 15
GELU_OP = 16
SWISH_OP = 17
BIASADD_OP = 18
GAINMUL_OP = 19
ew_names = [
"Add",
"Sub",
"Mul",
"Div",
"Maximum",
"Minimum",
"Neg",
"Rcp",
"Sqr",
"Sqrt",
"Exp",
"Log",
"Sig",
"Tanh",
"Relu",
"Elu",
"Gelu",
"Swish",
"Biasadd",
"Gainmul",
]
def broadcast_check(x, y, ew_op, bc_op, tf_op, name):
xshape = x.shape.as_list()
yshape = y.shape.as_list()
if xshape == yshape:
if name is None: name = ew_names[ew_op]
return ew_z_xy_op(x, y, op=ew_op, name=name)
if bc_op is not None:
if xshape[-1] == yshape[-1]:
if yshape[-1] == y.get_shape().num_elements():
if name is None: name = ew_names[bc_op]
return ew_z_xb_op(x, y, op=bc_op, name=name)
if xshape[-1] == x.get_shape().num_elements():
if name is None: name = ew_names[bc_op]
return ew_z_xb_op(y, x, op=bc_op, name=name)
# fall back to tf for everything else for now..
return tf_op(x, y, name=name)
def add(x, y, name=None): return broadcast_check(x, y, ADD_OP, BIASADD_OP, tf.add, name)
def multiply(x, y, name=None): return broadcast_check(x, y, MUL_OP, GAINMUL_OP, tf.multiply, name)
def subtract(x, y, name=None): return broadcast_check(x, y, SUB_OP, None, tf.subtract, name)
def divide(x, y, name=None): return broadcast_check(x, y, DIV_OP, None, tf.divide, name)
def maximum(x, y, name=None): return broadcast_check(x, y, MAXIMUM_OP, None, tf.maximum, name)
def minimum(x, y, name=None): return broadcast_check(x, y, MINIMUM_OP, None, tf.minimum, name)
def negative(x, name=None): return ew_z_xa_op(x, op= NEG_OP, name=ew_names[ NEG_OP] if name is None else name)
def reciprocal(x, name=None): return ew_z_xa_op(x, op= RCP_OP, name=ew_names[ RCP_OP] if name is None else name)
def square(x, name=None): return ew_z_xa_op(x, op= SQR_OP, name=ew_names[ SQR_OP] if name is None else name)
def sqrt(x, name=None): return ew_z_xa_op(x, op=SQRT_OP, name=ew_names[SQRT_OP] if name is None else name)
def exp(x, name=None): return ew_z_xa_op(x, op= EXP_OP, name=ew_names[ EXP_OP] if name is None else name)
def log(x, name=None): return ew_z_xa_op(x, op= LOG_OP, name=ew_names[ LOG_OP] if name is None else name)
def sigmoid(x, name=None): return ew_z_xa_op(x, op= SIG_OP, name=ew_names[ SIG_OP] if name is None else name)
def tanh(x, name=None): return ew_z_xa_op(x, op=TANH_OP, name=ew_names[TANH_OP] if name is None else name)
def relu(x, name=None): return ew_z_xa_op(x, op=RELU_OP, name=ew_names[RELU_OP] if name is None else name)
# WARNING: gelu op, not numerically stable... need to investigate more. Use fast_gelu for now.
def elu (x, alpha=1.0, name=None): return ew_z_xa_op(x, op=ELU_OP, alpha=alpha, name=ew_names[ELU_OP] if name is None else name)
def gelu (x, alpha=0.044715, name=None): return ew_z_xa_op(x, op=GELU_OP, alpha=alpha, name=ew_names[GELU_OP] if name is None else name)
def swish (x, alpha=1.0, name=None): return ew_z_xa_op(x, op=SWISH_OP, alpha=alpha, name=ew_names[SWISH_OP] if name is None else name)
def fast_gelu(x, name=None):
return swish(x, alpha=1.702, name=name)
@ops.RegisterGradient("EwZXy")
def ew_z_xy_grad(op, dz):
op_code = op.get_attr("op")
name = ew_names[op_code] + "_grad"
if op_code == ADD_OP:
return (dz, dz)
if op_code == SUB_OP:
return (dz, ew_z_xa_op(dz, op=NEG_OP, name=name))
return ew_dxdy_dzxy_op(dz, op.inputs[0], op.inputs[1], op=op_code, name=name)
@ops.RegisterGradient("EwZXa")
def ew_z_xa_grad(op, dz):
op_code = op.get_attr("op")
alpha = op.get_attr("alpha")
name = ew_names[op_code] + "_grad"
if op_code == NEG_OP:
return ew_z_xa_op(dz, op=NEG_OP, name=name)
# use the z values to compute grad for these ops
# I belive it saves memory with their typical use
if op_code in (RELU_OP, SIG_OP, TANH_OP):
return ew_dx_dzza_op(dz, op.outputs[0], op=op_code, alpha=alpha, name=name)
return ew_dx_dzxa_op(dz, op.inputs[0], op=op_code, alpha=alpha, name=name)
@ops.RegisterGradient("EwZXb")
def ew_z_xb_grad(op, dz):
op_code = op.get_attr("op")
name = ew_names[op_code] + "_grad"
if op_code == BIASADD_OP:
return (dz, ew_db_dzb_op(dz, op.inputs[1], op=op_code, name=name))
if op_code == GAINMUL_OP:
return ew_dxdg_dzxg_op(dz, op.inputs[0], op.inputs[1], op=op_code, name=name)
raise ValueError("bad op code")
############################## Filter Infinity/Nans + scale #####################################
filter_tensor_op = _op_module.filter_tensor
# set saturate to 65504.0 to saturate fp16 infinities
def filter_tensor(x, scale=1.0, saturate=0.0, zero_infs=False, zero_nans=False):
return filter_tensor_op(x, scalar_constant(scale, dtype=tf.float32), saturate=float(saturate), zero_infs=zero_infs, zero_nans=zero_nans)
# alias to filter_tensor that just does scaling by host side scalar value
def scale_tensor(x, scale=1.0):
return filter_tensor_op(x, scale)
@ops.RegisterGradient("FilterTensor")
def filter_tensor_grad(op, dy):
return filter_tensor_op(dy, op.inputs[1], saturate=op.get_attr("saturate"), zero_infs=op.get_attr("zero_infs"), zero_nans=op.get_attr("zero_nans")), None
############################## Float Cast #####################################
float_cast_op = _op_module.float_cast
def float_cast(x, dtype, dx_dtype=None, name=None):
dev = x.op.device.lower()
if not dev or "cpu" in dev:
return tf.cast(x, dtype)
dtype = tf.as_dtype(dtype)
if dtype not in (tf.float32, tf.float16, tf.bfloat16):
raise ValueError("Only float32 and float16 dtypes supported.")
# no-op
if dtype == x.dtype.base_dtype:
# no-op
return x
# default x dtype for dx
if dx_dtype is None:
dx_dtype = x.dtype.base_dtype
return float_cast_op(x, TY=dtype, dx_dtype=dx_dtype, name=name)
@ops.RegisterGradient("FloatCast")
def float_cast_grad(op, dz):
dx_dtype = op.get_attr("dx_dtype")
# passthrough
if dz.dtype == dx_dtype:
return dz
return float_cast_op(dz, TY=dx_dtype, dx_dtype=dx_dtype)
############################## Dropout #####################################
gen_dropout_mask_op = _op_module.gen_dropout_mask
apply_dropout_mask_op = _op_module.apply_dropout_mask
def dropout(x, keep_prob, mask=None, mask_shape=None):
keep_prob = scalar_constant(keep_prob)
if mask is None:
if mask_shape is not None and len(mask_shape) > 0:
size = 1
for m_dim, x_dim in zip(mask_shape, x.shape.as_list()):
# we don't currently support placeholder dims when broadcasting the dropout mask
assert m_dim == 1 or m_dim == x_dim, f"incompatible mask_shape: {mask_shape} x.shape: {x.shape}"
size *= m_dim
else:
size = 0
mask = gen_dropout_mask_op(x, get_entropy(), keep_prob, size=size)
if mask_shape is None:
mask_shape = []
return apply_dropout_mask_op(x, mask, keep_prob, mask_shape=mask_shape), mask
@ops.RegisterGradient("ApplyDropoutMask")
def dropout_grad(op, dy):
mask_shape = op.get_attr("mask_shape")
dx = apply_dropout_mask_op(dy, op.inputs[1], op.inputs[2], mask_shape=mask_shape)
return dx, None, None
############################## Concrete Gate for L0 Norm Pruning #####################################
concrete_gate_op = _op_module.concrete_gate
concrete_gate_grad_op = _op_module.concrete_gate_grad
concrete_gate_infer_op = _op_module.concrete_gate_infer
def concrete_gate(loga, tempurature=2.0/3.0, limit_a=-0.1, limit_b=1.1, epsilon=1e-6):
gate, _ = concrete_gate_op(loga, get_entropy(), scalar_constant(tempurature, dtype=tf.float32), limit_a=limit_a, limit_b=limit_b, epsilon=epsilon)
return gate
def concrete_gate_infer(loga, limit_a=-0.1, limit_b=1.1):
return concrete_gate_infer_op(loga, limit_a=limit_a, limit_b=limit_b)
@ops.RegisterGradient("ConcreteGate")
def concrete_gate_grad(op, dg, _):
limit_a = op.get_attr("limit_a")
limit_b = op.get_attr("limit_b")
dloga = concrete_gate_grad_op(dg, op.outputs[1], op.inputs[2], limit_a=limit_a, limit_b=limit_b)
return dloga, None, None
############################## add_n8 #####################################
add_n8_op = _op_module.add_n8
def add_n8(xs, name="AddN"):
if name is None: name = "AddN"
return add_n8_op(xs, name=name)
def add_n(xs, name="AddN"):
if len(xs) == 1:
return xs[0]
if name is None: name = "AddN"
if len(xs) == 2:
return ew_z_xy_op(xs[0], xs[1], op=0, name=name)
total = None
while len(xs):
xs8 = [] if total is None else [total]
while len(xs) and len(xs8) < 8:
xs8.append(xs.pop())
total = add_n8_op(xs8, name=name)
return total
old_add_n = None
def replace_add_n():
from tensorflow.python.ops import math_ops
global old_add_n
old_add_n = math_ops.add_n
math_ops.add_n = add_n
def restore_add_n():
from tensorflow.python.ops import math_ops
global old_add_n
math_ops.add_n = old_add_n
############################## BiasRelu #####################################
bias_relu_op = _op_module.bias_relu
bias_relu_grad_op = _op_module.bias_relu_grad
bias_grad_op = _op_module.bias_grad
def bias_relu(x, b, axis=-1, relu=False, fast_gelu=False, atomics=True, bench=0, use_tf=False):
if relu and fast_gelu:
raise ValueError("relu and fast_gelu can not both be enabled.")
dev = x.op.device.lower()
if use_tf or not dev or "cpu" in dev:
if b.shape.ndims > 1:
y = x + b
else:
y = tf.nn.bias_add(x, b)
if relu:
y = tf.nn.relu(y)
elif fast_gelu:
y = y * tf.nn.sigmoid(1.702 * y)
return y
relu = 1 if relu else (2 if fast_gelu else 0)
return bias_relu_op(x, b, axis=axis, relu=relu, bench=bench, atomics=atomics)
@ops.RegisterGradient("BiasRelu")
def bias_relu_grad(op, dy):
axis = op.get_attr("axis")
relu = op.get_attr("relu")
atomics = op.get_attr("atomics")
bench = op.get_attr("bench")
if relu:
x_or_y = op.outputs[0] if relu == 1 else op.inputs[0]
dx, db, _ = bias_relu_grad_op(dy, x_or_y, op.inputs[1], axis=axis, relu=relu, atomics=atomics, bench=bench)
return dx, db
db, _ = bias_grad_op(dy, op.inputs[1], axis=axis, atomics=atomics, bench=bench)
return (dy, db)
############################## FancyGather #####################################
fancy_gather_op = _op_module.fancy_gather
fancy_gather_grad_op = _op_module.fancy_gather_grad
def fancy_gather(x, idx, use_tf=False):
x_rank = len(x.shape)
i_rank = len(idx.shape)
assert x_rank > i_rank
dev = x.device.lower()
if use_tf or not dev or "cpu" in dev:
idx = tf.maximum(idx, 0)
flat_shape = tf.concat([[-1], tf.shape(x)[i_rank + 1:]], axis=0)
xx = tf.reshape(x, flat_shape)
ii = tf.expand_dims(
tf.range(0, tf.reduce_prod(tf.shape(x)[:i_rank])) * tf.shape(x)[i_rank] + tf.reshape(idx, [-1]),
1)
return tf.reshape(
tf.gather_nd(xx, ii),
tf.concat([tf.shape(idx), tf.shape(x)[i_rank + 1:]], axis=0),
)
if x_rank > i_rank + 1:
# temp restriction for now... easily fixed
assert x.shape[i_rank + 1:].num_elements() <= 1024
return fancy_gather_op(x, idx, idx_dim=x.shape[i_rank].value)
@ops.RegisterGradient("FancyGather")
def fancy_gather_grad(op, dy):
dx = fancy_gather_grad_op(dy, op.inputs[1], idx_dim=op.get_attr("idx_dim"))
return (dx, None)
############################## ReduceMax #####################################
reduce_max_op = _op_module.reduce_max
reduce_max_grad_op = _op_module.reduce_max_grad
def reduce_max(x, axis, keepdims=False, use_tf=False):
shape = x.shape.as_list()
assert type(axis) is int, "reshape prior to op to support contiguous index ranges"
assert shape[axis] is not None, "reduction over unknown dimension size not supported"
if axis < 0:
axis += len(shape)
dev = x.op.device.lower()
if use_tf or not dev or "cpu" in dev or axis == len(shape)-1:
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
idx_dtype = tf.uint16 if shape[axis] > 256 else tf.uint8
y, a = reduce_max_op(x, axis=axis, keepdims=keepdims, TA=idx_dtype)
return y
@ops.RegisterGradient("ReduceMax")
def reduce_max_grad(op, dy, a):
axis = op.get_attr("axis")
keepdims = op.get_attr("keepdims")
axis_size = op.inputs[0].shape[axis].value
return reduce_max_grad_op(dy, op.outputs[1], axis=axis, axis_size=axis_size, keepdims=keepdims)
############################## AssignAdd #####################################
def assign_add(y, x, name=None):
return _op_module.assign_add_op(y, x, name=name)
# if mpi_rank == 0:
# with tf.device("/gpu:0"), tf.name_scope("LogStats"):
# for i, (grad, param) in enumerate(zip(grads, params)):
# name = param.op.name + "_" + "_".join(str(x) for x in param.shape.as_list())
# grads[i] = ew.log_stats(grad, step, logfile="scale_14.txt", name=name)
# ebits = 4
# fbits = 3
# ebias = 8
# for exp in range(1 << ebits):
# for frac in range(1 << fbits):
# frac /= 1 << fbits
# f8 = (1 + frac) * 2**(exp - ebias)
# l8 = 2**(exp + frac - ebias)
# print("%2d %.3f %9.5f %9.5f" % (exp-ebias, frac, f8, l8))
|
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
from mpi4py import MPI
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module
from blocksparse.ewops import float_cast
from collections import deque
############################## Nccl Ops #####################################
op_counter = 0
init_num_comms = None
init_prereduce = None
# FIXME(taehoon): remove this
def mpi_size():
return MPI.COMM_WORLD.Get_size()
def allreduce(x, sync_size=0, num_comms=2, logfile="", rank=0, prereduce=0, name=None,
mpi_ranks=None, comm_id=0, debug_str=''):
if mpi_ranks is None:
mpi_ranks = list(range(0, mpi_size()))
assert not x.device or x.device[
-2:] == ":0", "Only one gpu per process currently supported by allreduce: " + x.device
global op_counter
global init_num_comms
global init_prereduce
if init_num_comms is None:
init_num_comms = num_comms
elif init_num_comms != num_comms:
print(
"Warning: only the first value of num_comms (%d) that was passed in will be used. num_comms=%d vale ignored." % (
init_num_comms, num_comms))
if init_prereduce is None:
init_prereduce = prereduce
elif init_prereduce != prereduce:
print(
"Warning: only the first value of prereduce (%d) that was passed in will be used. prereduce=%d vale ignored." % (
init_prereduce, prereduce))
if logfile and rank == 0:
print("%03d %s" % (op_counter, x.name))
ret = _op_module.allreduce_nccl(x, op_num=op_counter, sync_size=sync_size, num_comms=num_comms, prereduce=prereduce, logfile=logfile, name=name, mpi_ranks=mpi_ranks, comm_id=comm_id, debug_str=debug_str)
op_counter += 1
return ret
# @ops.RegisterGradient("AllreduceNccl")
# def allreduce_grad(op, dy):
# global op_counter
# dx = _op_module.allreduce_nccl(dy,
# op_num = op_counter,
# sync_size = op.get_attr('sync_size'),
# num_comms = op.get_attr('num_comms'),
# prereduce = op.get_attr('prereduce'),
# logfile = op.get_attr('logfile'))
@ops.RegisterGradient("AllreduceNccl")
def allreduce_grad(op, dy):
global op_counter
global init_num_comms
global init_prereduce
sync_size = op.get_attr('sync_size')
num_comms = op.get_attr('num_comms')
prereduce = op.get_attr('prereduce')
logfile = op.get_attr('logfile')
mpi_ranks = op.get_attr('mpi_ranks')
mpi_rank = op.get_attr('mpi_rank')
comm_id = op.get_attr('comm_id')
debug_str = op.get_attr('debug_str')
dx = _op_module.allreduce_nccl(
dy, op_num=op_counter,
sync_size=sync_size,
num_comms=num_comms,
prereduce=prereduce,
logfile=logfile,
mpi_ranks=mpi_ranks,
mpi_rank=mpi_rank,
comm_id=comm_id,
debug_str=debug_str,
)
op_counter += 1
return dx
def group_allreduce(
grads, parms, search_strings=None, cast_map=None, cast_all=None, allreduce_op=allreduce, **allreduce_kwargs):
# if no grouping specified, create one group to reduce at the end (no overlap with compute)
if search_strings is None:
search_strings = ["group_allreduce_all"]
groups = [(names, list(), list()) for names in search_strings]
last_group_idx = len(groups) - 1
for i, (grad, param) in enumerate(zip(grads, parms)):
for j, (names, group16, group32) in enumerate(groups):
# each group can be a single string, or a list of strings
# TODO: support regex's
if isinstance(names, str):
names = (names,)
if j == last_group_idx or any(name in param.name for name in names):
if cast_all is not None:
grad = float_cast(grad, dtype=cast_all)
elif cast_map is not None and name in cast_map:
grad = float_cast(grad, dtype=cast_map[name])
if grad.dtype.base_dtype is tf.float16:
group16.append((i, grad, param))
else:
group32.append((i, grad, param))
break
for name, group16, group32 in groups:
count = 0
if isinstance(name, str):
str_name = name
else:
str_name = "_".join(name)
str_name = str_name.replace('/', '_')
for group in (group16, group32):
count += len(group)
if len(group) > 0:
if len(group) == 1:
concated = group[0][1]
else:
concated = tf.concat([tf.reshape(grad, [-1]) for _, grad, _ in group], 0, name="concat_"+str_name)
reduced = allreduce_op(concated, **allreduce_kwargs)
if len(group) == 1:
grads[group[0][0]] = reduced
else:
offset = 0
for i, grad, param in group:
size = param.shape.num_elements()
grads[i] = tf.reshape(reduced[offset: offset + size], param.shape)
offset += size
if count == 0:
print("Warning: no grads found for all_reduce group: ", name)
# grads modified in place, but return anyway
return grads
def sync_variables_op(mpi_rank, num_comms=2, prereduce=0):
ops = list()
prev = []
with tf.device("/gpu:0"):
for var in tf.trainable_variables():
with tf.control_dependencies(prev):
op = tf.assign(var,
allreduce(var if mpi_rank == 0 else var * 0.0, num_comms=num_comms,
prereduce=prereduce))
prev = [op]
ops.append(op)
return tf.group(*ops)
def sync_globals_zero_init_op(num_comms=2, prereduce=0):
ops = list()
prev = []
with tf.device("/gpu:0"):
for var in tf.global_variables():
if var.dtype.base_dtype not in [tf.float32, tf.float16]:
cast_back = True
to_reduce = tf.cast(var, tf.float32)
else:
to_reduce = var
cast_back = False
with tf.control_dependencies(prev):
reduced = allreduce(to_reduce, num_comms=num_comms, prereduce=prereduce)
if cast_back:
reduced = tf.cast(reduced, var.dtype.base_dtype)
op = tf.assign(var, reduced)
prev = [op]
ops.append(op)
return tf.group(*ops)
# These ops are always at the end of the input (parent) chains.
# We don't partiuarly care about their ordering.
def _skip_op(op):
if op.type in ("Const", "VariableV2"):
return True
if op.type == "Identity" and op.name[-4:] == "read":
return True
return False
# The input and control_input ops are the parents of this op (output as a set)
def _get_parents_set(op):
parents = set(i.op for i in op.inputs if not _skip_op(i.op))
parents.update(ci for ci in op.control_inputs if not _skip_op(ci ))
return parents
# The input and control_input ops are the parents of this op (output as a list)
def _get_parents_list(op):
parents = [i.op for i in op.inputs if not _skip_op(i.op)]
for ci in op.control_inputs:
if not _skip_op(ci):
parents.append(ci)
return parents
# The output consumer and control_output ops are the children of this op
def _get_children_list(op):
children = list(op._control_outputs)
for output in op.outputs:
children.extend(output.consumers())
return children
# Prevent deadlocks caused by nccl ops not being scheduled in a consistent ordering across ranks.
def serialize_allreduce_ops(graph_targets, serialize_inputs=True, print_dag=False):
# Traverse all graph_targets through their inputs and:
# Build a mutable dag of dict()'s' with ops as keys and their input ops as values (as set() elements)
# For ops with no inputs, add to the ready to scheudle list.
dag = dict()
ready = list()
queue = deque([t.op for t in graph_targets])
visited = set()
while queue:
op = queue.popleft()
if op not in visited:
visited.add(op)
inputs = _get_parents_set(op)
if len(inputs):
dag[op] = inputs
# add parents to queue in deterministc order (not python set ordering)
queue.extend(_get_parents_list(op))
else:
ready.append(op)
# Implement topological sorting found here:
# https://en.wikipedia.org/wiki/Topological_sorting
# Pick out AllreduceNccl ops and append them to a list in the order we'd like them scheduled.
waves = list()
nccl_ops = list()
while len(ready):
ready_new = list()
for ready_op in ready:
for child_op in _get_children_list(ready_op):
child_inputs = dag.get(child_op)
if child_inputs is not None:
if ready_op in child_inputs:
child_inputs.remove(ready_op)
if len(child_inputs) == 0:
ready_new.append(child_op)
dag.pop(child_op)
if child_op.type == "AllreduceNccl":
nccl_ops.append(child_op)
waves.append(ready)
ready = ready_new
if len(dag):
raise ValueError("Error: graph_targets have at least one cycle")
# We could serialize all ops within each wave.
# Instead, just serialize the ops that are the inputs to the nccl ops.
# Don't serialize the nccl ops themselves since they are async.
# We just need them to be scheduled in a consistent order.
prev_op = None
for nccl_op in nccl_ops:
if serialize_inputs:
input_op = nccl_op.inputs[0].op
if prev_op is not None:
input_op._add_control_input(prev_op)
prev_op = input_op
else:
if prev_op is not None:
nccl_op._add_control_input(prev_op)
prev_op = nccl_op
if print_dag:
f = open(print_dag, 'w') if type(print_dag) is str else sys.stdout
for wave in waves:
for op in sorted(wave, key=lambda op: (op.type, op.name)):
print(op.type, op.name, op.outputs[0].dtype, op.outputs[0].shape, file=f)
print("", file=f)
if f is not sys.stdout:
f.close()
def identity_sync(*xs, sync_fwd=False, sync_bwd=True, name=None):
ys = _op_module.identity_synchronize(xs, sync=sync_fwd, sync_bwd=sync_bwd, name=name)
if len(ys) == 1:
return ys[0]
return ys
@ops.RegisterGradient("IdentitySynchronize")
def identity_sync_grad(op, *dys):
if op.get_attr('sync_bwd'):
return _op_module.identity_synchronize(dys, sync=True)
return dys
##################### Simple nccl ops for sharding models accross gpus ##################################
# Uses a single comm
# Each MPI worker / Gpu can only be part of one fixed grouping.
init_group_size = None
init_group_indx = None
init_group_rank = None
def check_group_params(group_size, group_indx, group_rank):
global init_group_size
global init_group_indx
global init_group_rank
if init_group_size is None:
init_group_size = group_size
elif init_group_size != group_size:
print(f"Warning: only the first value of group_size ({init_group_size}) that was passed in will be used. group_size={group_size} value ignored.")
if init_group_indx is None:
init_group_indx = group_indx
elif init_group_indx != group_indx:
print(f"Warning: only the first value of group_indx ({init_group_indx}) that was passed in will be used. group_indx={group_indx} value ignored.")
if init_group_rank is None:
init_group_rank = group_rank
elif init_group_rank != group_rank:
print(f"Warning: only the first value of group_rank ({init_group_rank}) that was passed in will be used. group_rank={group_rank} value ignored.")
reduce_scatter_counter = 0
def reduce_scatter(x, group_size=1, group_indx=0, group_rank=0, transpose=True, name=None, debug_str=''):
check_group_params(group_size, group_indx, group_rank)
assert not x.device or x.device[-2:] == ":0", "Only one gpu per process currently supported by allreduce: " + x.device
global reduce_scatter_counter
if transpose:
assert x.shape.ndims == 2, "input must be of dim 2 prior to reduce_scatter with transpose"
x = _op_module.transpose2d(x)
assert x.shape[0].value % group_size == 0, "leading dim must be multiple of group_size"
y = _op_module.reduce_scatter_nccl(x,
group_size = group_size,
group_indx = group_indx,
group_rank = group_rank,
op_num = reduce_scatter_counter,
name = name,
debug_str = debug_str)
reduce_scatter_counter += 1
if transpose:
y = _op_module.transpose2d(y)
return y
all_gather_counter = 0
def all_gather(x, group_size=1, group_indx=0, group_rank=0, transpose=True, name=None, debug_str=''):
global all_gather_counter
check_group_params(group_size, group_indx, group_rank)
assert not x.device or x.device[-2:] == ":0", "Only one gpu per process currently supported by allreduce: " + x.device
global reduce_scatter_counter
if transpose:
assert x.shape.ndims == 2, "input must be of dim 2 prior to all_gather with transpose"
x = _op_module.transpose2d(x)
y = _op_module.all_gather_nccl(x,
group_size = group_size,
group_indx = group_indx,
group_rank = group_rank,
op_num = all_gather_counter,
name = name,
debug_str = debug_str)
all_gather_counter += 1
if transpose:
y = _op_module.transpose2d(y)
return y
@ops.RegisterGradient("ReduceScatterNccl")
def allreduce_grad(op, dy):
global all_gather_counter
dx = _op_module.all_gather_nccl(dy,
group_size = op.get_attr('group_size'),
group_indx = op.get_attr('group_indx'),
group_rank = op.get_attr('group_rank'),
debug_str = op.get_attr('debug_str'),
op_num = all_gather_counter)
all_gather_counter += 1
return dx
@ops.RegisterGradient("AllGatherNccl")
def allreduce_grad(op, dy):
global reduce_scatter_counter
dx = _op_module.reduce_scatter_nccl(dy,
group_size = op.get_attr('group_size'),
group_indx = op.get_attr('group_indx'),
group_rank = op.get_attr('group_rank'),
debug_str=op.get_attr('debug_str'),
op_num = reduce_scatter_counter)
reduce_scatter_counter += 1
return dx
|
__version__ = '1.13.1_master'
from blocksparse.utils import (
_op_module,
entropy_size,
get_entropy,
set_entropy,
reset_scalar_constants,
scalar_constant,
ceil_div,
reduce_mul,
bst_conv_layout,
bst_deconv_layout,
)
dw_matmul_large_n = _op_module.dw_matmul_large_n
from blocksparse.conv import (
ConvEdgeBias,
conv_edge_bias_init,
deconv_edge_bias_init,
cwise_linear,
)
from blocksparse.embed import (
embedding_lookup,
)
from blocksparse.ewops import (
add,
multiply,
subtract,
divide,
maximum,
minimum,
negative,
reciprocal,
square,
sqrt,
exp,
log,
sigmoid,
tanh,
relu,
elu,
gelu,
swish,
fast_gelu,
filter_tensor,
filter_tensor_op,
scale_tensor,
float_cast,
dropout,
concrete_gate,
concrete_gate_infer,
add_n8,
add_n,
replace_add_n,
restore_add_n,
bias_relu,
fancy_gather,
reduce_max,
assign_add,
)
from blocksparse.grads import (
gradients,
recomputable,
)
from blocksparse.lstm import (
fused_lstm_gates,
split4,
concat4,
sparse_relu,
FusedBasicLSTMCell,
grouped_lstm,
group_lstm_grads,
)
from blocksparse.matmul import(
BlocksparseMatMul,
SparseProj,
block_reduced_full_dw,
group_param_grads,
get_bsmm_dx_ops,
)
# from blocksparse.nccl import (
# allreduce,
# group_allreduce,
# sync_variables_op,
# sync_globals_zero_init_op,
# serialize_nccl_ops,
# reduce_scatter,
# all_gather,
# )
from blocksparse.norms import (
layer_norm,
batch_norm,
)
from blocksparse.optimize import (
Ema,
AdamOptimizer,
AdafactorOptimizer,
blocksparse_l2_decay,
blocksparse_norm,
blocksparse_prune,
clip_by_global_norm,
global_norm,
adafactor2d_op,
adafactor1d_op,
adam_op,
blocksparse_adam_op,
)
from blocksparse.quantize import (
QuantizeSpec,
quantize,
log_stats,
)
from blocksparse.transformer import (
BlocksparseTransformer,
softmax,
masked_softmax,
softmax_cross_entropy,
transpose_2d,
transpose_0213,
top_k,
rectified_top_k,
clear_bst_constants,
)
|
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os.path
import numpy as np
import tensorflow as tf
from operator import mul
if sys.version_info >= (3, 0):
from functools import reduce
data_files_path = tf.resource_loader.get_data_files_path()
_op_module = tf.load_op_library(os.path.join(data_files_path, 'blocksparse_ops.so'))
# for x in dir(_op_module):
# print(x)
# exit()
g_entropy = None
# max of 80 SMs currently for V100
# 3 lfsr's, 1024 max threads per SM
entropy_size = 80*3*1024
def set_entropy(init=None):
global g_entropy
if init is None:
init = np.random.randint(-(1<<31), (1<<31), size=entropy_size, dtype=np.int32).view(np.float32)
with tf.device("/gpu:0"):
g_entropy = tf.get_variable("Entropy", initializer=init, trainable=False)
def get_entropy():
global g_entropy
if g_entropy is None:
# we could call it here for you but if the Session is created more than once
# then g_entropy needs to be created again.
raise ValueError("Call bs.set_entropy() after creating Session, then init global variables.")
return g_entropy
g_scalar_const_cache = dict()
def scalar_constant(value, dtype=None, name=None):
if isinstance(value, tf.Tensor):
return value
if not isinstance(value, (int, float)):
raise ValueError("Not a scalar value.")
if isinstance(value, np.float):
value = float(value)
elif isinstance(value, np.int):
value = int(value)
global g_scalar_const_cache
if value not in g_scalar_const_cache:
g_scalar_const_cache[value] = list()
default_graph = tf.get_default_graph()
for tf_const in g_scalar_const_cache[value]:
# constants are tied to the (sub)graph
if tf_const.graph is default_graph:
return tf_const
with tf.device("/cpu:0"), tf.control_dependencies(None):
tf_const = tf.constant(value, dtype=dtype, name=name)
g_scalar_const_cache[value].append(tf_const)
return tf_const
def reset_scalar_constants():
global g_scalar_const_cache
g_scalar_const_cache = dict()
def is_param_casted(param):
for c in param.op.outputs[0].consumers():
if c.type == "Identity" and c.name[-4:] == "read":
consumers = c.outputs[0].consumers()
# should just be 1 cast op, but maybe allow for more creative uses
if len(consumers) <= 5:
for op in consumers:
if "Cast" in op.type:
return True
return False
def reduce_mul(vals, init=1):
return reduce(mul, vals, init)
def ceil_div(x, y):
return -(-x // y)
def z_order_2d(x, y):
answer = 0
bits = max(len(bin(x)), len(bin(y))) - 2
for i in range(bits):
mshifted = 1 << i;
shift = i
answer |= ((x & mshifted) << shift) | ((y & mshifted) << (shift + 1))
#print mshifted, shift, answer, bin(answer)
return answer
# Morton ordering (z-order) of 3D coords
def z_order_3d(z, y, x):
answer = 0
bits = max(len(bin(x)), len(bin(y)), len(bin(z))) - 2
for i in range(bits):
mshifted = 1 << i;
shift = i << 1
answer |= ((x & mshifted) << shift) | ((y & mshifted) << (shift + 1)) | ((z & mshifted) << (shift + 2))
#print mshifted, shift, answer, bin(answer)
return answer
# Magic numbers and shift amounts for integer division
# Suitable for when nmax*magic fits in 32 bits
# Shamelessly pulled directly from:
# http://www.hackersdelight.org/hdcodetxt/magicgu.py.txt
def magic32u(nmax, d):
nc = ((nmax + 1) // d) * d - 1
nbits = len(bin(nmax)) - 2
for p in range(0, 2 * nbits + 1):
if 2 ** p > nc * (d - 1 - (2 ** p - 1) % d):
m = (2 ** p + d - 1 - (2 ** p - 1) % d) // d
return (m, p)
raise ValueError("Can't find magic number for division")
# Magic numbers and shift amounts for integer division
# Suitable for when nmax*magic fits in 64 bits and the shift
# lops off the lower 32 bits
def magic64u(d):
# 3 is a special case that only ends up in the high bits
# if the nmax is 0xffffffff
# we can't use 0xffffffff for all cases as some return a 33 bit
# magic number
nmax = 0xffffffff if d == 3 else 0x7fffffff
magic, shift = magic32u(nmax, d)
if magic != 1:
shift -= 32
return (magic, shift)
# if mpi_rank == 0:
# with tf.device("/gpu:0"), tf.name_scope("LogStats"):
# for i, (grad, param) in enumerate(zip(grads, params)):
# name = param.op.name + "_" + "_".join(str(x) for x in param.shape.as_list())
# grads[i] = ew.log_stats(grad, step, logfile="scale_14.txt", name=name)
# ebits = 4
# fbits = 3
# ebias = 8
# for exp in range(1 << ebits):
# for frac in range(1 << fbits):
# frac /= 1 << fbits
# f8 = (1 + frac) * 2**(exp - ebias)
# l8 = 2**(exp + frac - ebias)
# print("%2d %.3f %9.5f %9.5f" % (exp-ebias, frac, f8, l8))
def dilation_size(S, dil=1):
return S * dil - dil + 1
def out_dim(S, W, pad, std=1, dil=1):
return ceil_div(W - dilation_size(S, dil) + 1 + 2*pad, std)
#return ((W - dilation_size(S, dil) + 2 * pad) // std) + 1
def same_pad(S, dil=1):
return dilation_size(S, dil) // 2
def backward_pad(S, pad, dil=1):
return dilation_size(S, dil) - pad - 1
def conv_slice(q, W, S, pad, std=1, dil=1):
qs = q * std - pad
ws = list()
for s in range(S):
w = qs + s * dil
if w >= 0 and w < W:
ws.append(w)
return ws
def deconv_slice(x, Q, S, bpad, std=1, dil=1):
xs = x - bpad
e = list()
for s in range(S):
q = xs + s * dil
if q % std == 0:
q //= std
if q >= 0 and q < Q:
e.append(q)
return e
def bst_conv_layout(input_h=1, input_w=1, filter_h=1, filter_w=1, stride=1, blk_size=32, autoregressive=True):
H = input_h
W = input_w
R = filter_h
S = filter_w
assert H % stride == 0 or H == 1
assert W % stride == 0
P = H // stride or 1
Q = W // stride
if H == 1:
R = 1
pad_r = 0
else:
pad_r = -1
for r in range(R):
if P == out_dim(R, H, r, stride):
pad_r = r
break
assert pad_r >= 0, "Even size filters only work with stride 2."
pad_s = -1
for s in range(S):
if Q == out_dim(S, W, s, stride):
pad_s = s
break
assert pad_s >= 0, "Even size filters only work with stride 2."
print(f"P:{P} Q:{Q} H:{H} W:{W} R:{R} S:{S} std:{stride} pad_r:{pad_r} pad_s:{pad_s}")
assert P*Q % blk_size == 0, f"P:{P} Q:{Q}"
assert H*W % blk_size == 0, f"H:{H} W:{W}"
mask_set = set()
layout = np.zeros((P*Q//blk_size, H*W//blk_size), dtype=np.bool)
# just compute the output pixels within the tile
for p, q in np.ndindex(P, Q):
for h in conv_slice(p, H, R, pad_r, stride):
for w in conv_slice(q, W, S, pad_s, stride):
x = h*W + w
y = p*Q + q
if not autoregressive or p*stride*Q*stride + q*stride >= x:
layout[y//blk_size, x//blk_size] = 1
mask_set.add((y, x))
def cb(blk_shape, head_idx, qry_idx, key_idx, blk_idx):
mask = np.zeros(blk_shape, dtype=np.bool)
q0 = qry_idx*blk_shape[0]
k0 = key_idx*blk_shape[1]
for q, k in np.ndindex(blk_shape):
if (q0 + q, k0 + k) in mask_set:
mask[q, k] = 1
return mask
return layout, cb
# layout, cb = bst_conv_layout(input_h=64, input_w=64, filter_h=15, filter_w=15, stride=1, blk_size=8)
# layout, cb = bst_conv_layout(input_h=64, input_w=64, filter_h=15, filter_w=15, stride=2, blk_size=8)
# layout, cb = bst_conv_layout(input_h=64, input_w=64, filter_h= 8, filter_w= 8, stride=2, blk_size=8)
# layout, cb = bst_conv_layout(input_w=1024, filter_w=225, stride=1, blk_size=8)
# layout, cb = bst_conv_layout(input_w=1024, filter_w=225, stride=2, blk_size=8)
# layout, cb = bst_conv_layout(input_w=1024, filter_w=256, stride=2, blk_size=8)
# np.savetxt("layout.txt", layout, fmt="%d")
# exit()
def bst_deconv_layout(output_h=1, output_w=1, filter_h=1, filter_w=1, stride=1, blk_size=32, autoregressive=True):
H = output_h
W = output_w
R = filter_h
S = filter_w
assert H % stride == 0 or H == 1
assert W % stride == 0
P = H // stride or 1
Q = W // stride
if H == 1:
R = 1
pad_r = 0
else:
pad_r = -1
for r in range(R):
if P == out_dim(R, H, r, stride):
pad_r = backward_pad(R,r)
break
assert pad_r >= 0, "Even size filters only work with stride 2."
pad_s = -1
for s in range(S):
if Q == out_dim(S, W, s, stride):
pad_s = backward_pad(S,s)
break
assert pad_s >= 0, "Even size filters only work with stride 2."
print(f"P:{P} Q:{Q} H:{H} W:{W} R:{R} S:{S} std:{stride} pad_r:{pad_r} pad_s:{pad_s}")
assert P*Q % blk_size == 0, f"P:{P} Q:{Q}"
assert H*W % blk_size == 0, f"H:{H} W:{W}"
mask_set = set()
layout = np.zeros((H*W//blk_size, P*Q//blk_size), dtype=np.bool)
# just compute the output pixels within the tile
for h, w in np.ndindex(H, W):
for p in deconv_slice(h, P, R, pad_r, stride):
for q in deconv_slice(w, Q, S, pad_s, stride):
y = h*W + w
x = p*Q + q
if not autoregressive or y >= p*stride*Q*stride + q*stride:
layout[y//blk_size, x//blk_size] = 1
mask_set.add((y, x))
def cb(blk_shape, head_idx, qry_idx, key_idx, blk_idx):
mask = np.zeros(blk_shape, dtype=np.bool)
q0 = qry_idx*blk_shape[0]
k0 = key_idx*blk_shape[1]
for q, k in np.ndindex(blk_shape):
if (q0 + q, k0 + k) in mask_set:
mask[q, k] = 1
return mask
return layout, cb
# layout, cb = bst_deconv_layout(output_h=64, output_w=64, filter_h=15, filter_w=15, stride=1, blk_size=8)
# layout, cb = bst_deconv_layout(output_h=64, output_w=64, filter_h=15, filter_w=15, stride=2, blk_size=8)
# layout, cb = bst_deconv_layout(output_h=64, output_w=64, filter_h= 8, filter_w= 8, stride=2, blk_size=8)
# layout, cb = bst_deconv_layout(output_w=1024, filter_w=225, stride=1, blk_size=8)
# layout, cb = bst_deconv_layout(output_w=1024, filter_w=225, stride=2, blk_size=8)
# layout, cb = bst_deconv_layout(output_w=1024, filter_w=256, stride=2, blk_size=8)
# np.savetxt("layout.txt", cb((8,8), 0, 0, 0, 0), fmt="%d")
|
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module, scalar_constant
############################## Blocksparse Transformer #####################################
import scipy.sparse as sparse
blocksparse_transformer_nt = _op_module.blocksparse_transformer_nt
blocksparse_transformer_nn = _op_module.blocksparse_transformer_nn
blocksparse_transformer_tn = _op_module.blocksparse_transformer_tn
blocksparse_masked_softmax = _op_module.blocksparse_masked_softmax
blocksparse_softmax = _op_module.blocksparse_softmax
blocksparse_softmax_grad = _op_module.blocksparse_softmax_grad
bst_partial_autoregressive_mask = _op_module.bst_partial_autoregressive_mask
# save a bit of gpu memory by only creating one copy of identical constant lookup tables
g_lookup_cache = dict(nt=list(), nn=list(), tn=list(), sm=list())
def get_constant(lut, name):
global g_lookup_cache
default_graph = tf.get_default_graph()
for np_entry, tf_entry in g_lookup_cache[name]:
if np_entry.dtype == lut.dtype and np_entry.shape == lut.shape and tf_entry.graph is default_graph:
if np.abs(np_entry.astype(np.int64) - lut.astype(np.int64)).sum() == 0:
# found an exact match
return tf_entry
with tf.control_dependencies(None):
tf_entry = tf.constant(lut, name=name+"_lut")
g_lookup_cache[name].append( (lut, tf_entry) )
return tf_entry
def clear_bst_constants():
global g_lookup_cache
g_lookup_cache = dict(nt=list(), nn=list(), tn=list(), sm=list())
class BlocksparseTransformer(object):
# TODO: support save restore of this object..
# but for now just rely on hyperparameter regeneration of the object state
# def __getstate__(self):
# return (self.layout, self.blk_size, self.softmax_mask, self.name)
# def __setstate__(self, state):
# self.__init__(*state)
def __init__(self, layout, block_size=64, heads=None, mask_callback=None, name=None):
if len(layout.shape) == 2:
assert heads is not None, "heads must be explicitly specified when using shared layouts per head"
# broadcast same layout over all heads
layout = np.expand_dims(layout, 0)
if heads is None:
heads = layout.shape[0]
assert block_size in (8,16,32,64), "Block sizes of 8, 16, 32 and 64 currently supported"
assert len(layout.shape) == 3, "bad layout shape: " + str(layout.shape)
#self.layout = layout > 0 # save boolean version for serialization purposes, TODO: save packbits or csr version
self.blk_size = block_size
self.name = name
self.heads = heads
self.lut_heads = layout.shape[0]
self.ctx_blks_q = layout.shape[1]
self.ctx_blks_k = layout.shape[2]
self.blk_shape = (block_size, block_size)
self.nn_max = 0
self.tn_max = 0
self.softmax_dtype = None
if layout.dtype != np.int32:
layout = layout.astype(np.int32)
self.nt_lut = list()
self.nn_lut = list()
self.tn_lut = list()
self.nt_list = list()
self.nn_list = list()
self.tn_list = list()
blocks = None
for head in range(layout.shape[0]):
# convert to csr for vastly more efficient python iteration on large sparse layouts
csr = sparse.csr_matrix(layout[head,:,:])
ys, xs, bs = sparse.find(csr) # xs is in sorted order by default
if blocks is None:
blocks = len(bs)
else:
assert len(bs) == blocks, "number of layout blocks must be equal across heads"
# make blocks contiguous along the rows (softmax code leverages this for increased performance)
nt_list = sorted( zip(ys, xs) )
ys = [b[0] for b in nt_list]
xs = [b[1] for b in nt_list]
nt_lut = np.array(nt_list, dtype=np.int32)
nn_lut, nn_list, nn_max = self.xn_lut(ys, xs, blocks, self.ctx_blks_q)
tn_lut, tn_list, tn_max = self.xn_lut(xs, ys, blocks, self.ctx_blks_k)
self.nt_lut.append(nt_lut)
self.nn_lut.append(nn_lut)
self.tn_lut.append(tn_lut)
self.nt_list.append(nt_list)
self.nn_list.append(nn_list)
self.tn_list.append(tn_list)
self.nn_max = max(self.nn_max, nn_max)
self.tn_max = max(self.tn_max, tn_max)
self.blocks = blocks
self.nt_lut = np.array(self.nt_lut, dtype=np.int32)
self.nn_lut = np.array(self.nn_lut, dtype=np.int32)
self.tn_lut = np.array(self.tn_lut, dtype=np.int32)
if mask_callback is not None:
self.init_softmax_mask(mask_callback)
else:
self.softmax_mask = None
self.softmax_mask_np = None
def init_softmax_mask(self, mask_callback):
if self.blk_size == 64:
dtype = np.uint64
elif self.blk_size == 32:
dtype = np.uint32
elif self.blk_size == 16:
dtype = np.uint16
else:
dtype = np.uint8
masks = []
# for now assume one softmax mask per sparsity specificaiton
for h in range(self.lut_heads):
head_mask = []
for b, (q, k) in enumerate(self.nt_list[h]):
mask = mask_callback(self.blk_shape, h, q, k, b)
bits = np.packbits(mask.reshape(-1,8)[:,::-1]).view(dtype)
head_mask.append(bits)
masks.append(head_mask)
# numpy mask for test code
self.softmax_mask_np = np.array(masks, dtype=dtype) # heads, blocks, blk_size
# tf mask for kernels. Transpose to: heads, blk_size, blocks
self.softmax_mask = np.transpose(self.softmax_mask_np, [0, 2, 1]).copy()
def xn_lut(self, ys, xs, blocks, ctx_blks):
# build list of y's connected to each x and map to block id
py_lut = [list() for y in range(ctx_blks)]
for b in range(blocks):
py_lut[ ys[b] ].append(( b, xs[b] ))
# build header into variable lengh lookup tables (luts)
# the header contains the offset and size of the lut for that output block
max_lut = 0
offset = ctx_blks
np_lut = np.empty((offset + blocks, 2), dtype=np.int32)
for i, lut in enumerate(py_lut):
np_lut[i] = offset, len(lut)
max_lut = max(max_lut, len(lut))
for entry in lut:
np_lut[offset] = entry
offset += 1
return np_lut, py_lut, max_lut
# return the coordinate (q, k) in the layout that corresponds to a given block id
def block_coord(self, block, head=0): return self.nt_list[head][block]
def nt_test(self, A, B):
# A and B have shape (batch, ctx_size, state_size)
# reshape to (batch, ctx_blks, blk_size, heads, head_state)
shapeA = list(A.shape)
shapeB = list(B.shape)
shapeA[1:] = [self.ctx_blks_q, self.blk_size, self.heads, shapeA[2]//self.heads]
shapeB[1:] = [self.ctx_blks_k, self.blk_size, self.heads, shapeB[2]//self.heads]
batch_size = shapeA[0]
A = A.reshape(shapeA)
B = B.reshape(shapeB)
C = np.empty([batch_size, self.heads, self.blocks, self.blk_size, self.blk_size], dtype=np.float32)
for n in range(batch_size):
for h in range(self.heads):
lut_head = h if self.lut_heads > 1 else 0
for b, (y, x) in enumerate(self.nt_list[lut_head]):
C[n,h,b,:,:] = np.dot( A[n,y,:,h,:], B[n,x,:,h,:].T )
return C
def nn_test(self, A, B):
# B and C have shape (batch, ctx_size, state_size)
# reshape to (batch, ctx_blks, blk_size, heads, head_state)
shapeB = list(B.shape)
state_size = shapeB[2]
shapeB[1:] = [self.ctx_blks_k, self.blk_size, self.heads, state_size//self.heads]
shapeC = list(shapeB)
shapeC[1:] = [self.ctx_blks_q, self.blk_size, self.heads, state_size//self.heads]
batch_size = shapeC[0]
B = B.reshape(shapeB)
C = np.zeros(shapeC, dtype=np.float32)
for n in range(batch_size):
for h in range(self.heads):
lut_head = h if self.lut_heads > 1 else 0
for x, lut in enumerate(self.nn_list[lut_head]):
for b, y in lut:
C[n,x,:,h,:] += np.dot( A[n,h,b,:,:], B[n,y,:,h,:] )
return C.reshape([batch_size, self.ctx_blks_q * self.blk_size, state_size])
def tn_test(self, A, B):
# B and C have shape (batch, ctx_size, state_size)
# reshape to (batch, ctx_blks, blk_size, heads, head_state)
shapeB = list(B.shape)
state_size = shapeB[2]
shapeB[1:] = [self.ctx_blks_q, self.blk_size, self.heads, state_size//self.heads]
shapeC = list(shapeB)
shapeC[1:] = [self.ctx_blks_k, self.blk_size, self.heads, state_size//self.heads]
batch_size = shapeC[0]
B = B.reshape(shapeB)
C = np.zeros(shapeC, dtype=np.float32)
for n in range(batch_size):
for h in range(self.heads):
lut_head = h if self.lut_heads > 1 else 0
for x, lut in enumerate(self.tn_list[lut_head]):
for b, y in lut:
C[n,x,:,h,:] += np.dot( A[n,h,b,:,:].T, B[n,y,:,h,:] )
return C.reshape([batch_size, self.ctx_blks_k * self.blk_size, state_size])
def masked_softmax_test(self, x, scale=1.0, autoregress_at_key=None):
y = np.empty_like(x)
m = self.softmax_mask_np # heads, blocks, blk_size
bsize = self.blk_size
ones = (1 << bsize) - 1
for n in range(x.shape[0]):
for h in range(x.shape[1]):
hl = h if self.lut_heads > 1 else 0
for lut in self.nn_list[hl]:
xm = np.full((len(lut), bsize * bsize), -np.finfo(np.float32).max, dtype=np.float32)
for i, (b, k) in enumerate(lut):
xb = x[n,h,b,:,:].reshape(-1)
if m is None:
# apply scale
xm[i,:] = xb * scale
else:
mask = m[hl,b,:]
if autoregress_at_key is not None:
Q = self.nt_list[hl][b][0] * bsize
K = k * bsize
new_mask = np.empty(bsize, dtype=mask.dtype)
for q in range(bsize):
shift_a = bsize - min(max(autoregress_at_key - K, 0), bsize)
shift_b = min(max(bsize-1 + K - (Q + q), 0), bsize)
shift_c = int(min(shift_a, shift_b))
#print(ones, shift_c, type(shift_c))
new_mask[q] = int(mask[q]) & (ones >> shift_c)
mask = new_mask
# apply mask and scale to x block
mask = np.unpackbits(mask.view(np.uint8)).reshape(-1,8)[:,::-1].reshape(-1)
nzIdx = np.nonzero(mask)
xm[i,nzIdx] = xb[nzIdx] * scale
# compute softmax for collection of k blocks
xm = xm.reshape((len(lut), bsize, bsize))
xm = np.exp(xm - np.max(xm, axis=(0,2), keepdims=True))
ym = xm / np.sum(xm, axis=(0,2), keepdims=True)
for i, (b, k) in enumerate(lut):
y[n,h,b,:,:] = ym[i]
return y
def masked_softmax_grad_test(self, dy, y, scale=1.0):
dx = np.empty_like(dy)
for n in range(dy.shape[0]):
for h in range(dy.shape[1]):
hl = h if self.lut_heads > 1 else 0
for lut in self.nn_list[hl]:
bs = [ b for b, k in lut ]
dyb = dy[n,h,bs,:,:]
yb = y[n,h,bs,:,:]
dxb = (dyb - np.sum(dyb * yb, axis=(0,2), keepdims=True)) * yb * scale
for i, (b, k) in enumerate(lut):
dx[n,h,b,:,:] = dxb[i,:,:]
return dx
def get_lut_constants(self):
return get_constant(self.nt_lut, name="nt"), get_constant(self.nn_lut, name="nn"), get_constant(self.tn_lut, name="tn")
def nt_op(self, a, b, name=None, bench=0):
nt_lut, nn_lut, tn_lut = self.get_lut_constants()
return blocksparse_transformer_nt(
a, b, nt_lut, nn_lut, tn_lut, CT=tf.bfloat16,
heads=self.heads, blocks=self.blocks, blk_size=self.blk_size, ctx_blks_a=self.ctx_blks_q, ctx_blks_b=self.ctx_blks_k,
nn_max=self.nn_max, tn_max=self.tn_max, bench=bench, name=name
)
def nn_op(self, a, b, name=None, bench=0):
nt_lut, nn_lut, tn_lut = self.get_lut_constants()
return blocksparse_transformer_nn(
a, b, nt_lut, nn_lut, tn_lut,
heads=self.heads, blocks=self.blocks, blk_size=self.blk_size, ctx_blks_b=self.ctx_blks_k, ctx_blks_c=self.ctx_blks_q,
nn_max=self.nn_max, tn_max=self.tn_max, bench=bench, name=name
)
def tn_op(self, a, b, name=None, bench=0):
nt_lut, nn_lut, tn_lut = self.get_lut_constants()
return blocksparse_transformer_tn(
a, b, nt_lut, nn_lut, tn_lut,
heads=self.heads, blocks=self.blocks, blk_size=self.blk_size, ctx_blks_b=self.ctx_blks_q, ctx_blks_c=self.ctx_blks_k,
nn_max=self.nn_max, tn_max=self.tn_max, bench=bench, name=name
)
def query_key_op(self, q, k, name=None, bench=0):
nt_lut, nn_lut, tn_lut = self.get_lut_constants()
self.softmax_dtype = tf.bfloat16 if q.dtype.base_dtype == tf.float32 else tf.float16
return blocksparse_transformer_nt(
q, k, nt_lut, nn_lut, tn_lut, CT=tf.bfloat16,
heads=self.heads, blocks=self.blocks, blk_size=self.blk_size, ctx_blks_a=self.ctx_blks_q, ctx_blks_b=self.ctx_blks_k,
nn_max=self.nn_max, tn_max=self.tn_max, bench=bench, name=name
)
def weight_value_op(self, w, v, name=None, bench=0):
nt_lut, nn_lut, tn_lut = self.get_lut_constants()
return blocksparse_transformer_nn(
w, v, nt_lut, nn_lut, tn_lut,
heads=self.heads, blocks=self.blocks, blk_size=self.blk_size, ctx_blks_b=self.ctx_blks_k, ctx_blks_c=self.ctx_blks_q,
nn_max=self.nn_max, tn_max=self.tn_max, bench=bench, name=name
)
def masked_softmax(self, x, scale=1.0, autoregress_at_key=None, dtype=None):
if self.softmax_mask is None:
if autoregress_at_key is not None:
raise ValueError("autoregress_at_key only applies to ops with mask_callback defined.")
return self.softmax(x, scale)
nn_lut = get_constant(self.nn_lut, name="nn")
sm_mask = get_constant(self.softmax_mask, name="sm")
if autoregress_at_key is not None:
lut = get_constant(self.nt_lut, name="nt")
key = scalar_constant(autoregress_at_key, dtype=tf.int32)
with tf.control_dependencies([x.op]):
sm_mask = bst_partial_autoregressive_mask(sm_mask, lut, key, blocks=self.blocks, blk_size=self.blk_size, ctx_blks_k=self.ctx_blks_k)
if dtype is None:
dtype = self.softmax_dtype
return blocksparse_masked_softmax(x, scalar_constant(scale, dtype=tf.float32), nn_lut, sm_mask, blocks=self.blocks, blk_size=self.blk_size, ctx_blks=self.ctx_blks_q, lut_max=self.nn_max, T=dtype)
def softmax(self, x, scale=1.0, dtype=None):
nn_lut = get_constant(self.nn_lut, name="nn")
if dtype is None:
dtype = self.softmax_dtype
return blocksparse_softmax(x, scalar_constant(scale, dtype=tf.float32), nn_lut, blocks=self.blocks, blk_size=self.blk_size, ctx_blks=self.ctx_blks_q, lut_max=self.nn_max, T=dtype)
# w = q . k.T
# QK = QC . KC.T 16x16 = 16x64 . 16x64.T
# QC = QK . KC 16x64 = 16x16 . 16x64
# KC = QK.T . QC 16x64 = 16x16.T . 16x64
@ops.RegisterGradient("BlocksparseTransformerNT")
def blocksparse_transformer_nt_grad(op, dw):
heads = op.get_attr("heads")
blocks = op.get_attr("blocks")
blk_size = op.get_attr("blk_size")
ctx_blks_q = op.get_attr("ctx_blks_a")
ctx_blks_k = op.get_attr("ctx_blks_b")
nn_max = op.get_attr("nn_max")
tn_max = op.get_attr("tn_max")
bench = op.get_attr("bench")
q, k, nt_lut, nn_lut, tn_lut = op.inputs
dk = blocksparse_transformer_tn(
dw, q, nt_lut, nn_lut, tn_lut,
heads=heads, blocks=blocks, blk_size=blk_size, ctx_blks_b=ctx_blks_q, ctx_blks_c=ctx_blks_k,
nn_max=nn_max, tn_max=tn_max, bench=bench)
with tf.control_dependencies([dk.op]):
dq = blocksparse_transformer_nn(
dw, k, nt_lut, nn_lut, tn_lut,
heads=heads, blocks=blocks, blk_size=blk_size, ctx_blks_b=ctx_blks_k, ctx_blks_c=ctx_blks_q,
nn_max=nn_max, tn_max=tn_max, bench=bench)
return (dq, dk, None, None, None)
# y = w . v
# QC = QK . VC 16x64 = 16x16 . 16x64
# QK = QC . VC.T 16x16 = 16x64 . 16x64.T
# VC = QK.T . QC 16x64 = 16x16.T . 16x64
@ops.RegisterGradient("BlocksparseTransformerNN")
def blocksparse_transformer_nn_grad(op, dy):
heads = op.get_attr("heads")
blocks = op.get_attr("blocks")
blk_size = op.get_attr("blk_size")
ctx_blks_k = op.get_attr("ctx_blks_b")
ctx_blks_q = op.get_attr("ctx_blks_c")
nn_max = op.get_attr("nn_max")
tn_max = op.get_attr("tn_max")
bench = op.get_attr("bench")
w, v, nt_lut, nn_lut, tn_lut = op.inputs
dv = blocksparse_transformer_tn(
w, dy, nt_lut, nn_lut, tn_lut,
heads=heads, blocks=blocks, blk_size=blk_size, ctx_blks_b=ctx_blks_q, ctx_blks_c=ctx_blks_k,
nn_max=nn_max, tn_max=tn_max, bench=bench)
with tf.control_dependencies([dv.op]):
c_dtype = tf.bfloat16 if dy.dtype.base_dtype == tf.float32 else tf.float16
dw = blocksparse_transformer_nt(
dy, v, nt_lut, nn_lut, tn_lut, CT=c_dtype,
heads=heads, blocks=blocks, blk_size=blk_size, ctx_blks_a=ctx_blks_q, ctx_blks_b=ctx_blks_k,
nn_max=nn_max, tn_max=tn_max, bench=bench)
return (dw, dv, None, None, None)
@ops.RegisterGradient("BlocksparseMaskedSoftmax")
def blocksparse_masked_softmax_op_grad(op, dy):
blocks = op.get_attr("blocks")
blk_size = op.get_attr("blk_size")
ctx_blks = op.get_attr("ctx_blks")
lut_max = op.get_attr("lut_max")
y = op.outputs[0]
scale = op.inputs[1]
lut = op.inputs[2]
dx = blocksparse_softmax_grad(dy, y, scale, lut, blocks=blocks, blk_size=blk_size, ctx_blks=ctx_blks, lut_max=lut_max)
return (dx, None, None, None)
@ops.RegisterGradient("BlocksparseSoftmax")
def blocksparse_softmax_op_grad(op, dy):
blocks = op.get_attr("blocks")
blk_size = op.get_attr("blk_size")
ctx_blks = op.get_attr("ctx_blks")
lut_max = op.get_attr("lut_max")
y = op.outputs[0]
scale = op.inputs[1]
lut = op.inputs[2]
dx = blocksparse_softmax_grad(dy, y, scale, lut, blocks=blocks, blk_size=blk_size, ctx_blks=ctx_blks, lut_max=lut_max)
return (dx, None, None)
############################## Top-K #####################################
top_k_op = _op_module.topk
rectified_top_k_op = _op_module.rectified_top_k
masked_softmax_op = _op_module.masked_softmax
masked_top_k_softmax_op = _op_module.masked_top_k_softmax
masked_softmax_grad_op = _op_module.masked_softmax_grad
ew_dx_dzza_op = _op_module.ew_dx_dzza
def top_k(x, k):
assert k <= x.shape[-1].val <= 1024
return top_k_op(x, k)
def rectified_top_k(x, k, rebase=True):
assert k <= x.shape[-1].value <= 1024
return rectified_top_k_op(x, k, rebase=rebase)
@ops.RegisterGradient("RectifiedTopK")
def rectified_top_k_grad(op, dz):
# same grad as relu
return ew_dx_dzza_op(dz, op.outputs[0], op=RELU_OP)
@ops.RegisterGradient("Topk")
def top_k_grad(op, grad, _):
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [
array_ops.reshape(
sparse_ops.sparse_to_dense(
ind,
array_ops.reshape(math_ops.reduce_prod(in_shape), [1]),
array_ops.reshape(grad, [-1]),
validate_indices=False), in_shape),
array_ops.zeros([], dtype=dtypes.int32)
]
def rectified_top_k_test(x, k, rebase=True):
a = np.argsort(x)[:,::-1]
y = np.zeros(x.shape, dtype=np.float32)
for i in range(x.shape[0]):
# get min value among topk
base = max(x[i,a[i,k-1]], 0.0) if rebase else 0.0
#print(base, a[i,k-1])
# write just the topk values from x to y
y[i,a[i,:k]] = np.maximum(x[i,a[i,:k]], base) - base
return y
def masked_top_k_softmax(x, k, mask=None, scale=1.0):
assert k <= x.shape[-1].value <= 1024
if mask is not None:
x_shape = x.shape.as_list()
m_shape = mask.shape.as_list()
assert len(x_shape) == len(m_shape)
for i in range(len(m_shape)):
assert m_shape[i] in (1, x_shape[i])
mask = [ mask ]
else:
mask = []
return masked_top_k_softmax_op(x, k, scalar_constant(scale, dtype=tf.float32), mask)
def softmax(x, scale=1.0, bench=0):
return masked_softmax_op(x, scalar_constant(scale, dtype=tf.float32), [], bench=bench)
def masked_softmax(x, mask=None, scale=1.0, bench=0):
if mask is not None:
x_shape = x.shape.as_list()
m_shape = mask.shape.as_list()
assert len(x_shape) == len(m_shape)
for i in range(len(m_shape)):
assert m_shape[i] in (1, x_shape[i])
mask = [ mask ]
else:
mask = []
return masked_softmax_op(x, scalar_constant(scale, dtype=tf.float32), mask, bench=bench)
@ops.RegisterGradient("MaskedTopKSoftmax")
def masked_top_k_softmax_grad(op, dy):
n_mask = op.get_attr("n_mask")
mask = [ op.inputs[3] ] if n_mask else []
dx = masked_softmax_grad_op(dy, op.outputs[0], op.inputs[2], mask)
if n_mask:
return (dx, None, None, None)
return (dx, None, None)
@ops.RegisterGradient("MaskedSoftmax")
def masked_softmax_grad(op, dy):
bench = op.get_attr("bench")
n_mask = op.get_attr("n_mask")
mask = [ op.inputs[2] ] if n_mask else []
dx = masked_softmax_grad_op(dy, op.outputs[0], op.inputs[1], mask, bench=bench)
if n_mask:
return (dx, None, None)
return (dx, None)
def masked_softmax_test(x, mask=None, scale=1.0):
x_shape = x.shape
if mask is not None:
x = x.reshape(-1, mask.size)
y = np.empty(x.shape, dtype=np.float32)
y.fill(-np.finfo(np.float32).max)
nz = np.nonzero(mask.reshape(-1))
y[:,nz] = x[:,nz] * mask.reshape(1,-1)[:,nz] * scale
else:
y = x * scale
y = y.reshape(-1, x_shape[-1])
m = np.max(y, axis=1, keepdims=True)
z = np.exp(y - m) / np.sum(np.exp(y - m), axis=1, keepdims=True)
return z.reshape(x_shape)
def masked_top_k_softmax_test(x, k, mask=None, scale=1.0):
x_shape = x.shape
if mask is not None:
x = x.reshape(-1, mask.size)
y = np.empty(x.shape, dtype=np.float32)
y.fill(-np.finfo(np.float32).max)
nz = np.nonzero(mask.reshape(-1))
y[:,nz] = x[:,nz] * mask.reshape(1,-1)[:,nz] * scale
else:
y = x * scale
y = y.reshape(-1, x_shape[-1])
a = np.argsort(y)[:,::-1]
z = np.zeros(y.shape, dtype=np.float32)
for i in range(y.shape[0]):
# get max value among top_k
max_val = y[i,a[i,0]]
# compute softmax on just the top_k values
z[i,a[i,:k]] = np.exp(y[i,a[i,:k]] - max_val) / np.sum(np.exp(y[i,a[i,:k]] - max_val))
return z.reshape(x_shape)
def masked_softmax_grad_test(dy, y, mask=None, scale=1.0):
if mask is None:
mask = 1.0
return (dy - np.sum(dy * y, axis=-1, keepdims=True)) * y * mask * scale
# m = np.zeros((10,10), dtype=np.float32)
# for y, x in np.ndindex(m.shape):
# if x <= y: m[y,x] = 1.0
# x = np.arange(1,101, dtype=np.float32).reshape(1,10,10)
# y = masked_top_k_softmax_test(x, 5, mask=m)
############################## Transpose #####################################
transpose_0213_op = _op_module.transpose0213
transpose_2d_op = _op_module.transpose2d
def transpose_2d(x):
return transpose_2d_op(x)
@ops.RegisterGradient("Transpose2D")
def transpose_2d_grad(op, dy):
return transpose_2d_op(dy)
def transpose_0213(x):
return transpose_0213_op(x)
@ops.RegisterGradient("Transpose0213")
def transpose_0213_grad(op, dy):
return transpose_0213_op(dy)
############################## Softmax Cross Entropy #####################################
softmax_cross_entropy_op = _op_module.softmax_cross_entropy
softmax_cross_entropy_grad_op = _op_module.softmax_cross_entropy_grad
def softmax_cross_entropy(logits=None, labels=None):
assert logits is not None and labels is not None
assert logits.shape[-1].value <= 65536, "use tf.sparse_softmax_cross_entropy_with_logits if feature dim is greater than 64k"
loss, _ = softmax_cross_entropy_op(logits, labels)
return loss
@ops.RegisterGradient("SoftmaxCrossEntropy")
def softmax_cross_entropy_grad(op, dy, _):
return softmax_cross_entropy_grad_op(op.outputs[1], dy), None
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import collections
import tensorflow as tf
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module
import blocksparse.ewops as ew
recompute_op = _op_module.recompute
# Recompute Decorator
class recomputable(object):
def __init__(self, func):
functools.update_wrapper(self, func)
self.func = func
def __call__(self, *args, **kwargs):
# toggle recompute on and off with the recompute keyword arg
recompute = kwargs.pop("recompute", False)
# generate the forward pass portion of graph
fwd = self.func(*args, **kwargs)
if not recompute:
return fwd
# create a temp op to be a control input to the recomputed graph
with tf.device("/cpu:0"):
ctrl_op = tf.constant(0.0, name="temp_ctrl_op").op
# Enable variable reuse in the current variable_scope.
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
# Distinguish these ops in a new name_scope
with tf.name_scope("recompute"):
# Use the temp ctrl_op to track needed control input targets
with tf.control_dependencies([ctrl_op]):
# Generate the recomputed ops that we want to run in the backward pass.
bwd = self.func(*args, **kwargs)
# the recompute op is a passthrough op for the fwd inputs.
# the bwd inputs allow our custom grad fuction to redirect gradient flow over these
# bwd inputs are disconnected after gradients are generated
y = recompute_op(_AsList(fwd), _AsList(bwd), name=self.func.__name__)
# hold on to the temp for setting up control dependencies in the grad op.
y[0].op.ctrl_op = ctrl_op
return y[0] if len(y) == 1 else y
def __get__(self, instance, owner):
# Necessary for the decorator to work on instance methods.
# See https://stackoverflow.com/questions/30104047/how-can-i-decorate-an-instance-method-with-a-decorator-class
return functools.partial(self.__call__, instance)
@ops.RegisterGradient("Recompute")
def recompute_grad(op, *dys):
# Ensure recompute portion of graph is only executed in the backward pass just prior to use.
dy_ops = [dy.op for dy in dys]
# our temp ctrl_op points to exactly the ops that need to be executed after dys ops
for recompute_op in op.ctrl_op._control_outputs:
# rebild control_inputs list for this op filering out the temp ctrl_op
ctrl_inputs = [x for x in recompute_op.control_inputs if x != op.ctrl_op]
# rebuild control_inputs from scratch
recompute_op._remove_all_control_inputs()
# no need to hold up simple scalar/vector constants
if recompute_op.type == "Const" and len(recompute_op.outputs[0].shape) < 2:
if len(ctrl_inputs):
recompute_op._add_control_inputs(ctrl_inputs)
else:
# tack on dy ops
recompute_op._add_control_inputs(ctrl_inputs + dy_ops)
# done with temp ctrl_op
op.ctrl_op = None
# direct the gradient flow over the recomputed ops (skipping the forward graph)
return [None]*len(op.outputs) + list(dys)
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _SetGrad(grads, x, dx):
op = x.op
op_grads = grads.get(op)
if op_grads is None:
# for each op output, maintain a list of gradient inputs
grads[op] = op_grads = [[] for _ in op.outputs]
# add this grad to the appropriate list
op_grads[x.value_index].append(dx)
def _GetGrad(grads, x):
op_grads = grads.get(x.op)
if op_grads is None:
return None
# this should always return the _AggregatedGrads value instead of the list
return op_grads[x.value_index]
def _AggregatedGrads(grads, op, agg_size):
# convert lists of gradient inputs to tensors
dys = grads.get(op)
if dys is not None:
for i, dy in enumerate(dys):
if len(dy):
if len(dy) == 1:
dys[i] = dy[0] # no op
else:
# tf.add_n has poor accuracy in fp16
# also, we want to group accumuations so we can start freeing up memory early
# dys[i] = tf.add_n(dy)
agg = ew.add_n8_op(dy[0:agg_size]) if agg_size > 1 else dy[0]
for j in range(agg_size, len(dy), agg_size):
agg = ew.add_n8_op(dy[j:j+agg_size] + [agg])
dys[i] = agg
else:
dys[i] = None
return dys
else:
return [None] * len(op.outputs)
def _PendingCount(ys_ops, xs_ops):
grad_dtypes = set((tf.float32, tf.float16, tf.bfloat16))
# Ascend tree from the params and/or inputs (xs) to the losses (ys).
# Create set of each unique node along the way.
reached_ops = set()
queue = collections.deque(xs_ops)
while queue:
op = queue.popleft()
if op not in reached_ops:
reached_ops.add(op)
for output in op.outputs:
if output.dtype.base_dtype in grad_dtypes:
queue.extend(output.consumers())
# Get the subset of ys are reachable from xs.
reachable_ys_ops = set(op for op in ys_ops if op in reached_ops)
# Descend tree from ys along the reachable path.
# Mark unique ops along the way (between_ops).
# Handle gradient rerouting for recompute nodes.
recompute_ops = list()
between_ops = set()
queue = collections.deque(reachable_ys_ops)
while queue:
op = queue.popleft()
if op in reached_ops:
between_ops.add(op)
# don't add the inputs again.
reached_ops.remove(op)
# For recompute ops only traverse the second graph copy
# We don't want the forward pass ops contributing to the pending_count.
if op.type == "Recompute":
recompute_ops.append(op)
n_outs = len(op.outputs)
for x in op.inputs[n_outs:n_outs*2]:
queue.append(x.op)
else:
for x in op.inputs:
queue.append(x.op)
# Build a mapping from operation to the number of grad inputs to that op
# ops not in this dict should no longer be traversed (excepting the initial ys ops with no dependancies).
pending_count = dict()
for op in between_ops:
for x in op.inputs:
if x.op in between_ops:
pending_count[x.op] = pending_count.get(x.op, 0) + 1
return pending_count, reachable_ys_ops, recompute_ops
def _MatMulGradNN(op, dy):
# Custom Gradient for MatMul (NN)
# Force param gradient first so all-reduce can happen quicker.
x = op.inputs[0]
w = op.inputs[1]
dw = tf.matmul(x, dy, transpose_a=True)
with tf.control_dependencies([dw.op]):
dx = tf.matmul(dy, w, transpose_b=True)
return dx, dw
def gradients(ys, xs, grad_ys=None, stop_grads=None, group_aggregations=8, custom_matmul_grad=True):
if group_aggregations > 8 or group_aggregations < 1:
raise ValueError("gradients: group_aggregation sizes of 1-8 supported.")
ys = _AsList(ys)
xs = [x.value() if isinstance(x, tf.Variable) else x for x in _AsList(xs)]
stop_grads = [] if stop_grads is None else _AsList(stop_grads)
grad_ys = [None] * len(ys) if grad_ys is None else _AsList(grad_ys)
assert len(ys) == len(grad_ys)
with ops.name_scope("gradients"):
for i, dy in enumerate(grad_ys):
if dy is None:
# float grads start at ones by default
grad_ys[i] = tf.fill(tf.shape(ys[i]), tf.constant(1.0, dtype=ys[i].dtype, name=f"grad_ys_{i}"))
ys_ops = [t.op for t in ys]
xs_ops = [t.op for t in xs]
pending_count, reachable_ys_ops, recompute_ops = _PendingCount(ys_ops, xs_ops)
# The set of ops that terminate the gradient computation.
# Confirm that our xs tensors are just endpoints in the graph.
# Also set any externally provided stop grad ops.
stop_ops = set(t.op for t in stop_grads)
for op in xs_ops:
is_stop_op = True
for x in op.inputs:
if x.op in pending_count:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op)
# Each op output has an associated list of gradient inputs
# If more than one, these need to be accumulated.
# Add the initial gradients for the ys.
grads = dict()
for y, dy in zip(ys, grad_ys):
_SetGrad(grads, y, dy)
# Add the unique ys ops that are ready into the queue.
queue = collections.deque()
for op in reachable_ys_ops:
# an op is ready if it has no dependecies
if op not in pending_count:
queue.append(op)
while queue:
op = queue.popleft()
# only pending_count==0 ops are in the queue so all grad input lists are fully populated
# go ahead and apply any needed add_n ops to these lists.
dys = _AggregatedGrads(grads, op, group_aggregations)
# confirm that we have at least one tensor to compute and that this isn't a stop grad op
if any(dy is not None for dy in dys) and op not in stop_ops:
# get the grad function for this op
try:
if custom_matmul_grad and op.type == "MatMul" and not op.get_attr("transpose_a") and not op.get_attr("transpose_b"):
grad_fn = _MatMulGradNN
else:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(f"No gradient defined for operation '{op.name}' (op type: {op.type})")
# for any missing input grads, build a zero input of the right dtype/shape
for i, dy in enumerate(dys):
if dy is None:
dys[i] = tf.zeros_like(op.outputs[i])
# call the grad function with the forward op node and list of grad inputs
with ops.name_scope(op.name + "_grad"):
dxs = _AsList(grad_fn(op, *dys))
if len(dxs) != len(op.inputs):
raise ValueError(f"Num gradients {len(dxs)} generated for op {op.node_def} do not match num inputs {len(op.inputs)}")
#_LogOpGradients(op, dys, dxs)
else:
dxs = [None] * len(op.inputs)
for i, (x, dx) in enumerate(zip(op.inputs, dxs)):
if dx is not None:
# force unsorted_segment_sum call
if isinstance(dx, ops.IndexedSlices):
dx = tf.convert_to_tensor(dx)
#dx = emb.embedding_lookup_grad_op(dx.values, dx.indices, dx.dense_shape[0])
# do some shape sanity checking
try:
dx.set_shape(x.shape)
except ValueError:
raise ValueError("Incompatible shapes between op input {x.shape} and calculated input gradient {dx.shape} for {op.name} (idx:{i})")
# update the input grad list for the consumer of this gradient
_SetGrad(grads, x, dx)
# Update pending count for the inputs of op and enqueue any ready ops
for x in op.inputs:
# only traverse nodes that are in the reachable gradient path (and hence have a pending entry)
count = pending_count.get(x.op)
if count is not None:
if count == 1:
# when count is 1 this should be last time we reach this node
queue.append(x.op)
pending_count[x.op] = count - 1
# Disconnect the recomputed portion of the graph from the forward pass.
# This was only needed to direct the gradient flow.
# Leaving these connections in place would create a circular dependancy (from added control inputs).
for op in recompute_ops:
# Just overwrite the backward inputs with a copy of the forward inputs.
n_out = len(op.outputs)
for i, x in enumerate(op.inputs[:n_out]):
op._update_input(i+n_out, x)
return [_GetGrad(grads, x) for x in xs]
|
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from operator import lt
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module, reduce_mul, ceil_div, z_order_3d, magic32u, magic64u
blocksparse_conv_op = _op_module.blocksparse_conv
blocksparse_deconv_op = _op_module.blocksparse_deconv
edge_bias_op = _op_module.edge_bias
edge_bias_grad_op = _op_module.edge_bias_grad
l2_normalize_kctrs = _op_module.l2_normalize_kctrs
l2_normalize_cktrs = _op_module.l2_normalize_cktrs
l2_normalize_grad_kctrs = _op_module.l2_normalize_grad_kctrs
l2_normalize_grad_cktrs = _op_module.l2_normalize_grad_cktrs
l2_normalize_gain_kctrs = _op_module.l2_normalize_gain_kctrs
l2_normalize_gain_cktrs = _op_module.l2_normalize_gain_cktrs
l2_normalize_gain_grad_kctrs = _op_module.l2_normalize_gain_grad_kctrs
l2_normalize_gain_grad_cktrs = _op_module.l2_normalize_gain_grad_cktrs
# float_cast_op = _op_module.float_cast
#convenince wrappers:
# from blocksparse.conv import conv_edge_bias_init
# y = tf.nn.conv2d(x, w, stride_shape, pad, data_format="NHWC")
# edge_bias_op = conv_edge_bias_init(y, x, w, stride_shape, pad, data_format="NHWC")
# eg = tf.get_variable("EG", edge_bias_op.shape, tf.float32, initializer=tf.ones_initializer())
# eb = tf.get_variable("EB", edge_bias_op.shape, tf.float32, initializer=tf.zeros_initializer())
# y = edge_bias_op(y, eg, eb)
def conv_edge_bias_init(y, x, w, strides=None, padding="SAME", data_format="NHWC", dilations=None):
return ConvEdgeBias(y.shape.as_list(), x.shape.as_list(), w.shape.as_list(), strides, padding, data_format, dilations)
def deconv_edge_bias_init(y, x, w, strides=None, padding="SAME", data_format="NHWC", dilations=None):
# swap x and y
return ConvEdgeBias(x.shape.as_list(), y.shape.as_list(), w.shape.as_list(), strides, padding, data_format, dilations, deconv=True)
class ConvEdgeBias(object):
Cache = dict()
def __init__(self, y_shape, x_shape, w_shape, strides=None, padding="SAME", data_format="NHWC", dilations=None, deconv=False):
if data_format in ("NCW","NCHW","NCDHW"):
self.layout = 0
sdim = slice(2,None) # NCHW
#fdim = slice(2,None) # KCRS
# tf keeps its own format for params and does transpose ops..
fdim = slice(0,-2) # RSCK
cdim = 1
else:
self.layout = 1
sdim = slice(1,-1) # NHWC
fdim = slice(0,-2) # RSCK
cdim = -1
C = x_shape[cdim]
K = y_shape[cdim]
MPQ = expand_dims(y_shape[sdim])
DHW = expand_dims(x_shape[sdim])
TRS = expand_dims(w_shape[fdim])
strides = (1,1,1) if strides is None else expand_dims(strides[sdim])
dilates = (1,1,1) if dilations is None else expand_dims(dilations[sdim])
if padding.upper() == "VALID":
padding = (0,0,0)
else:
padding = list()
for S, Q, W, stride, dilate in zip(TRS, MPQ, DHW, strides, dilates):
# match padding formula used in tensorflow
padding.append(max((Q - 1) * stride + S - W, 0) // 2)
if deconv:
lut_func = bprop_lut
MPQ, DHW = DHW, MPQ
C, K = K, C
else:
lut_func = fprop_lut
key = tuple(tuple(a) for a in (MPQ, DHW, TRS, padding, strides, dilates))
entry = ConvEdgeBias.Cache.get(key, None)
if entry is None:
mpqLut = list()
fdata = list(zip(TRS, padding, strides, dilates))
for i in range(3):
mpqLut.append( [ lut_func( dim, DHW[i], *fdata[i]) for dim in range(MPQ[i]) ] )
self._build_edge_lut(MPQ, mpqLut)
ConvEdgeBias.Cache[key] = (self.edgeBiasMap, self.edgeBiasLut, self.edgeEntries)
else:
self.edgeBiasMap, self.edgeBiasLut, self.edgeEntries = entry
self.edgeBiasDim = len(self.edgeBiasMap)
self.shape = (self.edgeBiasDim, K) if self.layout else (K, self.edgeBiasDim)
def _build_edge_lut(self, MPQ, mpqLut):
# Hash the mpq coordinates on unique edge overlap patterns
# The hash key is the list of lut indicies where the offset is -1
PQ = MPQ[1] * MPQ[2]
Q = MPQ[2]
edge_map = dict()
mLut, pLut, qLut = mpqLut
for m,p,q in np.ndindex(*MPQ):
key = list()
for di, d in enumerate(mLut[m]):
for hi, h in enumerate(pLut[p]):
for wi, w in enumerate(qLut[q]):
if any(x == -1 for x in (d,h,w)):
key.append((di,hi,wi))
if len(key):
key = tuple(key)
mpqOffset = m*PQ + p*Q + q
edge_list = edge_map.get(key)
if edge_list is None:
edge_map[key] = [mpqOffset]
else:
edge_list.append(mpqOffset)
self.edgeBiasDim = len(edge_map)
if self.edgeBiasDim:
# so K x len(edge_map) is the size of the bias vector
# we need a lut of bias index => mpqOffset mappings
biasHead = list()
biasData = list()
biasMap = sorted(edge_map.values(), key=lambda x: x[0])
offset = len(biasMap) * 2
# the lut contains a header with 2 entries per unique bias: offset, size
for mpqList in biasMap:
biasHead.extend((offset, len(mpqList)))
biasData.extend(mpqList)
offset += len(mpqList)
pad4 = 4 - (len(biasData) & 3) if (len(biasData) & 3) else 0
biasLut = biasHead + biasData + ( [0] * pad4 )
self.edgeEntries = len(biasData)
self.edgeBiasMap = biasMap
self.edgeBiasLut = tf.constant(np.array(biasLut, dtype=np.int32), name="edge_bias_lut")
def edge_bias_test(self, x, g, b):
if self.edgeBiasDim:
if self.layout:
N = x.shape[0]
K = x.shape[-1]
y = np.array(x.reshape(N, -1, K))
for i in range(self.edgeBiasDim):
y[:,self.edgeBiasMap[i],:] = y[:,self.edgeBiasMap[i],:] * g[i,:].reshape(1, 1, K) + b[i, :].reshape(1, 1, K)
return y.reshape(x.shape)
else:
N, K = x.shape[0:2]
y = np.array(x.reshape(N, K, -1))
for i in range(self.edgeBiasDim):
y[:,:,self.edgeBiasMap[i]] = y[:,:,self.edgeBiasMap[i]] * g[:,i].reshape(1, K, 1) + b[:,i].reshape(1, K, 1)
return y.reshape(x.shape)
else:
return x
# dx = g * dy
# dg = sum(dy * x)
# db = sum(dy)
def edge_bias_grad_test(self, dy, x, g):
if self.edgeBiasDim:
lut = self.edgeBiasMap
dy_shape = dy.shape
if self.layout:
N = dy_shape[0]
K = dy_shape[-1]
x = x.reshape(N, -1, K)
dy = dy.reshape(N, -1, K)
dx = np.array(dy)
dg = np.empty(self.shape, dtype=np.float32)
db = np.empty(self.shape, dtype=np.float32)
for i in range(self.edgeBiasDim):
dx[:,lut[i],:] *= g[i,:].reshape(1, 1, K)
dg[i,:] = (dy[:,lut[i],:] * x[:,lut[i],:]).sum(axis=(0,1))
db[i,:] = dy[:,lut[i],:].sum(axis=(0,1))
else:
N, K = dy_shape[0:2]
x = x.reshape(N, K, -1)
dy = dy.reshape(N, K, -1)
dx = np.array(dy)
dg = np.empty(self.shape, dtype=np.float32)
db = np.empty(self.shape, dtype=np.float32)
for i in range(self.edgeBiasDim):
dx[:,:,lut[i]] *= g[:,i].reshape(1, K, 1)
dg[:,i] = (dy[:,:,lut[i]] * x[:,:,lut[i]]).sum(axis=(0,2))
db[:,i] = dy[:,:,lut[i]].sum(axis=(0,2))
return dx.reshape(dy_shape), dg, db
else:
return dy, None, None
def __call__(self, x, g, b, inference=False, bench=0, name=None):
if self.edgeBiasDim:
return edge_bias_op(x, g, b, self.edgeBiasLut, layout=self.layout, entries=self.edgeEntries, inference=inference, bench=bench, name=name)
return x
@ops.RegisterGradient("EdgeBias")
def edge_bias_grad(op, dy):
dx, dg, db = edge_bias_grad_op(dy, op.inputs[0], op.inputs[1], op.inputs[3], layout=op.get_attr("layout"), entries=op.get_attr("entries"), bench=op.get_attr("bench"))
return (dx, dg, db, None)
class BlocksparseConv(object):
"""
BCK: ( # block(B)/input(C)/output(K) feature dims
( (c0, c1, c2, ...), (k0, k1, k2, ...) ), # block 0
( (c0, c1, c2, ...), (k0, k1, k2, ...) ), # block 1
( (c0, c1, c2, ...), (k0, k1, k2, ...) ), # block 2 ...
)
TRS: (T,R,S) or (R,S) or (S,) - filter spatial size dims
DHW: (D,H,W) or (H,W) or (W,) - input image spatial size dims
MPQ: (M,P,Q) or (P,Q) or (Q,) or None - output image spatial size dims (used for ambiguous dims in strided transpose conv)
strides: (1,1,1) or (1,1) or (1,)
dilates: (1,1,1) or (1,1) or (1,)
padding: (1,1,1) or (1,1) or (1,) or "SAME" or "VALID"
"""
def __init__(self, BCK, TRS, DHW, MPQ=None, strides=(1,1,1), dilates=(1,1,1), padding="SAME", debug=False, deconv=False):
# save this so we know the users perfered number of dims (before we pad 1's out to 3 dims)
self.userTRS = list(TRS)
# support 1-3 dims (additional dimensions are possible by purely extending this python code)
for a in (TRS, DHW, MPQ, strides, dilates, padding):
if type(a) in (tuple, list):
assert 1 <= len(a) <= 3
assert len(TRS) == len(DHW)
# Process the spatial dimensions
# pad sizes and strides out to 3 dimensions
TRS = expand_dims(TRS)
DHW = expand_dims(DHW)
strides = expand_dims(strides)
dilates = expand_dims(dilates)
padding = get_padding(padding, TRS, dilates)
if MPQ is None:
MPQ = [ out_dim(*dims) for dims in zip(TRS, DHW, padding, strides, dilates) ]
else:
MPQ = expand_dims(MPQ)
trs = reduce_mul(TRS)
dhw = reduce_mul(DHW)
mpq = reduce_mul(MPQ)
# contruct feature portion of the grid data loaded to each cuda block
cMax = kMax = sizeF = 0
overlapC = overlapK = False
cSet, kSet = set(), set()
ckLut = list()
fpropGridF = list()
bpropGridF = list()
updatGridF = list()
normList = list()
blkSizes = set()
for listC, listK in BCK:
offset_C = list()
for c in listC:
offset_C.append(c * dhw)
if c in cSet:
overlapC = True
else:
cSet.add(c)
offset_K = list()
for k in listK:
offset_K.append(k * mpq)
if k in kSet:
overlapK = True
else:
kSet.add(k)
block_C = len(listC)
block_K = len(listK)
offset_CK = len(ckLut)
cMax = max(cMax, block_C)
kMax = max(kMax, block_K)
CTRS = block_C*trs
KTRS = block_K*trs
blkSizes.add((block_K, block_C))
# fprop: K is the outer product dim
fpropGridF.append( [ ceil_div(block_K, 32), block_C, block_K, offset_CK, sizeF ] )
# bprop: C is the outer product dim
bpropGridF.append( [ ceil_div(block_C, 32), block_C, block_K, offset_CK, sizeF ] )
# update: K and CTRS are the outer dims (KCRS = KPQ x CHW.T)
updatGridF.append( [ ceil_div(CTRS, 32), ceil_div(block_K, 32), block_C, block_K, offset_CK, sizeF ] )
# setup luts for weight norm
if deconv:
# for deconv, C and K were swapped coming in, so we need to unswap them
for c in range(block_C):
normList.append((c, KTRS, CTRS, sizeF))
else:
for k in range(block_K):
normList.append((sizeF + k * CTRS, CTRS))
# total filter size (and current filter block offset)
sizeF += block_K * block_C * trs
ckLut.extend(offset_C)
ckLut.extend(offset_K)
ckLut = np.array(ckLut, dtype=np.int32)
# Assume no missing mappings.
self.C = len(cSet)
self.K = len(kSet)
self.fixed_block_size = len(blkSizes) == 1
# Process the spatial component of the grid
self.mpqLut = list()
self.dhwLut = list()
self.mpqSlice = None
fdata = list(zip(TRS, padding, strides, dilates))
for i in range(3):
self.mpqLut.append( [ fprop_lut( x, DHW[i], *fdata[i]) for x in range(MPQ[i]) ] )
self.dhwLut.append( [ bprop_lut( x, MPQ[i], *fdata[i]) for x in range(DHW[i]) ] )
mpq_lut = self.spatial_grid(DHW, MPQ, self.mpqLut, mpq, trs)
dhw_lut = self.spatial_grid(MPQ, DHW, self.dhwLut, dhw, trs)
# get the super block dimension
dim_O = mpq_lut.shape[0]
dim_I = dhw_lut.shape[0]
# merge the spatial and feature outer product grid info
fpropGrid = list()
for dim_K, block_C, block_K, offset_CK, offset_F in fpropGridF:
for order, idx_MPQ, idx_K in sorted([ (z_order_3d(0,o,k), o,k) for o,k in np.ndindex(dim_O, dim_K) ]):
# idx_K/idx_MPQ, block_K/block_C, offset_CK, offset_F
fpropGrid.append( [
idx_MPQ + (idx_K << 16),
block_C + (block_K << 16),
offset_CK, offset_F ] )
bpropGrid = list()
for dim_C, block_C, block_K, offset_CK, offset_F in bpropGridF:
for order, idx_DHW, idx_C in sorted([ (z_order_3d(0,i,c), i,c) for i,c in np.ndindex(dim_I, dim_C) ]):
# idx_C/idx_DHW, block_K/block_C, offset_CK, offset_F
bpropGrid.append( [
idx_DHW + (idx_C << 16),
block_C + (block_K << 16),
offset_CK, offset_F ] )
updatGrid = list()
for dim_CTRS, dim_K, block_C, block_K, offset_CK, offset_F in updatGridF:
for order, idx_MPQ, idx_K, idx_CTRS in sorted([ (z_order_3d(o,k,c), o,k,c) for o,k,c in np.ndindex(dim_O, dim_K, dim_CTRS) ]):
# idx_MPQ, idx_CTRS/idx_K, block_C, block_K, offset_CK, offset_F
updatGrid.append( [
idx_MPQ, idx_CTRS + (idx_K << 16),
block_C, block_K,
offset_CK, offset_F ] )
fpropGrid = np.array(fpropGrid, dtype=np.int32)
bpropGrid = np.array(bpropGrid, dtype=np.int32)
updatGrid = np.array(updatGrid, dtype=np.int32)
normLut = np.array(normList, dtype=np.int32)
self.fshared = (trs*32 + 32 + ceil_div(cMax,4)*4 + min(kMax,32)) * 4
self.bshared = (trs*32 + 32 + ceil_div(kMax,4)*4 + min(cMax,32)) * 4
# flops per image of minibatch
self.flops = sizeF * mpq * 2
self.blocks = len(BCK)
self.debug = bool(debug)
self.BCK = BCK
self.TRS = TRS
self.DHW = DHW
self.MPQ = MPQ
self.sizeF = sizeF
self.strides = strides
self.dilates = dilates
self.padding = padding
# For integer division we'd like to do this in a single XMAD sass instruction (plus shift).
# We need to be inside of 16 bits for this to work.
# An additional XMAD could be added at a slight performance loss to support larger dimensions.
# But I'm not sure these larger dimensions are needed in practice.
cktrsMax = ceil_div(max(cMax, kMax)*trs, 32) * 32
cktrsMagic = magic32u(cktrsMax, trs)
assert cktrsMax < 2**16 and cktrsMagic[0] < 2**16, \
"Use cuDNN for large single blocks, but email me if you think there is a use case for this: [email protected]"
# kernel params
self.trs = trs
self.magic_trs = cktrsMagic
self.overlapC = overlapC
self.overlapK = overlapK
self.normSize = len(normList)
self.ck_lut = tf.constant(ckLut, name="ck_lut")
self.mpq_lut = tf.constant(mpq_lut, name="mpq_lut")
self.dhw_lut = tf.constant(dhw_lut, name="dhw_lut")
self.fprop_grid = tf.constant(fpropGrid, name="fprop_grid")
self.bprop_grid = tf.constant(bpropGrid, name="bprop_grid")
self.updat_grid = tf.constant(updatGrid, name="updat_grid")
self.norm_lut = tf.constant(normLut, name="norm_lut")
def spatial_grid(self, DHW, MPQ, mpqLut, mpq, trs):
# Find the most efficient super-block using a tile of size 32
# For ties then pick the larger tile in the W dim (more contiguous memory access)
# TODO: allow a mixture of superblock shapes, or maybe odd shapes to get better ulilization
ulilization = list()
# xxxxx yxxxx yyxxx zyyxx
for sb in ((1,1,32),(1,2,16),(1,4,8),(2,4,4)):
util = float(mpq) / reduce_mul( [ ceil_div(*dims) for dims in zip(MPQ, sb) ], 32)
ulilization.append((1.0 - util, 32 - sb[2], sb))
sb = sorted(ulilization)[0][2]
# Map the 32 positions in the superblock to MPQ coordinates
# superblock mask: zyyxx : (1,3,3), yxxxx : (0,1,15)
sb_mask = [ x - 1 for x in sb ]
# superblock cumulative right-shift: zyyxx : (4,2,0), yxxxx : (5,4,0)
shifts = [ len(bin(x)) - 3 for x in sb ]
sb_shift = [ shifts[1]+shifts[2], shifts[2], 0 ]
HW = DHW[1] * DHW[2]
W = DHW[2]
PQ = MPQ[1] * MPQ[2]
Q = MPQ[2]
# Get the dimension in super blocks
mpqDim = [ ceil_div(MPQ[i], sb[i]) for i in range(3) ]
mpq_lut = list()
# Iterate over superblocks to build the lut
for order, sb_mpq in sorted([ (z_order_3d(*mpq), mpq) for mpq in np.ndindex(*mpqDim) ]):
lut32 = [ list() for i in range(trs+1) ]
for i32 in range(32):
# get the mpq coord for each of the 32 positions in the superblock
m = sb_mpq[0] * sb[0] + ((i32 >> sb_shift[0]) & sb_mask[0])
p = sb_mpq[1] * sb[1] + ((i32 >> sb_shift[1]) & sb_mask[1])
q = sb_mpq[2] * sb[2] + ((i32 >> sb_shift[2]) & sb_mask[2])
# make sure we didn't fall off the edge
if all(lt(*mM) for mM in zip((m,p,q), MPQ)):
# add in all the input image offsets for each filter position
lut = [ d*HW + h*W + w if all(x >= 0 for x in (d,h,w)) else -1
for d in mpqLut[0][m]
for h in mpqLut[1][p]
for w in mpqLut[2][q] ]
# add the output image offset
lut.append( m*PQ + p*Q + q )
else:
# -1 offsets get zero padded
lut = [-1] * (trs+1)
# transpose lut data so contiguous rows are for 32 mpq coords of the same trs value
for i in range(trs+1):
lut32[i].append(lut[i])
mpq_lut.append(lut32)
return np.array(mpq_lut, dtype=np.int32)
def i_shape(self, N): return [N, self.C] + self.DHW
def o_shape(self, N): return [N, self.K] + self.MPQ
def f_shape(self, block=None):
if block is None:
if self.fixed_block_size:
lutC, lutK = self.BCK[0]
return [self.blocks, len(lutK), len(lutC)] + self.userTRS
return [self.sizeF,]
lutC, lutK = self.BCK[block]
return [len(lutK), len(lutC)] + self.userTRS
def __call__(self, F, I):
assert I.get_shape()[1] == self.C
output = blocksparse_conv_op(
self.fprop_grid, self.bprop_grid, self.updat_grid,
self.mpq_lut, self.dhw_lut, self.ck_lut,
F, I, c_type=I.dtype,
mode=0, overlapC=self.overlapC, overlapK=self.overlapK,
C=self.C, K=self.K, DHW=self.DHW, MPQ=self.MPQ, trs=self.trs,
magic_trs=self.magic_trs[0], shift_trs=self.magic_trs[1],
dimF=F.get_shape().as_list(), fshare=self.fshared, bshare=self.bshared, debug=self.debug
)
return output
def l2_normalize(self, F, gain=None, epsilon=1e-12, dtype=np.float32):
if gain is None:
F, _ = l2_normalize_kctrs(F, self.norm_lut, TY=dtype, epsilon=epsilon, K=self.normSize )
else:
assert self.overlapK is False, "no gain support for overlapping output blocks"
F, _ = l2_normalize_gain_kctrs(F, gain, self.norm_lut, TY=dtype, epsilon=epsilon, K=self.normSize )
return F
def collapse_filter(self, F, dtype=None):
flatF = np.empty(self.sizeF, dtype=dtype)
offset = 0
for f in F:
flatF[offset:offset+f.size] = f.reshape(f.size).astype(dtype)
offset += f.size
return flatF
def init_slices(self):
if self.mpqSlice is None:
self.mpqSlice = list()
self.dhwSlice = list()
fdata = list(zip(self.TRS, self.padding, self.strides, self.dilates))
for i in range(3):
self.mpqSlice.append( [ fprop_slice(x, self.DHW[i], *fdata[i]) for x in range(self.MPQ[i]) ] )
self.dhwSlice.append( [ bprop_slice(x, self.MPQ[i], *fdata[i]) for x in range(self.DHW[i]) ] )
def fprop_test(self, F, I, alpha=1.0):
self.init_slices()
N = I.shape[0]
O = np.zeros([N, self.K] + self.MPQ)
mSlice, pSlice, qSlice = self.mpqSlice
for block in range(self.blocks):
blockF = F[block]
blockK = blockF.shape[0]
lutC, lutK = self.BCK[block]
for m,p,q in np.ndindex(*self.MPQ):
sliceT, sliceD, _ = mSlice[m]
sliceR, sliceH, _ = pSlice[p]
sliceS, sliceW, _ = qSlice[q]
# KxCTRS
slicedF = blockF[:,:,sliceT,sliceR,sliceS].reshape((blockK, -1))
# NxCDHW
slicedI = I[:,lutC,sliceD,sliceH,sliceW].reshape((N, -1))
# NxKMPQ
O[:,lutK,m,p,q] += np.dot( slicedI, slicedF.T ) * alpha
return O
def bprop_test(self, F, I, alpha=1.0):
self.init_slices()
N = I.shape[0]
O = np.zeros([N, self.C] + self.DHW)
dSlice, hSlice, wSlice = self.dhwSlice
for block in range(self.blocks):
# KC => CK, invert TRS
blockF = np.transpose(F[block][:,:,::-1,::-1,::-1], (1,0,2,3,4)).copy()
blockC = blockF.shape[0]
lutC, lutK = self.BCK[block]
for d,h,w in np.ndindex(*self.DHW):
sliceT, sliceM = dSlice[d]
sliceR, sliceP = hSlice[h]
sliceS, sliceQ = wSlice[w]
# CxKTRS
slicedF = blockF[:,:,sliceT,sliceR,sliceS].reshape((blockC, -1))
# NxKMPQ
slicedI = I[:,lutK,sliceM,sliceP,sliceQ].reshape((N, -1))
# NxCDHW
O[:,lutC,d,h,w] += np.dot( slicedI, slicedF.T ) * alpha
return O
def updat_test(self, E, I, alpha=1.0, transpose=False):
self.init_slices()
U = list()
N = I.shape[0]
mSlice, pSlice, qSlice = self.mpqSlice
for block in range(self.blocks):
lutC, lutK = self.BCK[block]
dimF = self.f_shape(block)
blockU = np.zeros(dimF)
U.append(blockU)
for m,p,q in np.ndindex(*self.MPQ):
sliceT, sliceD, tlen = mSlice[m]
sliceR, sliceH, rlen = pSlice[p]
sliceS, sliceW, slen = qSlice[q]
# NxCDHW
slicedI = I[:,lutC,sliceD,sliceH,sliceW].reshape(N,-1)
# NxKMPQ
slicedE = E[:,lutK,m,p,q]
# CxKTRS
blockU[:,:,sliceT,sliceR,sliceS] += np.dot(slicedE.T, slicedI).reshape((dimF[0], dimF[1], tlen, rlen, slen)) * alpha
return self.collapse_filter(U, dtype=np.float32)
def l2_normalize_test(self, F, gain=None, epsilon=1e-12):
normF = list()
if gain is None:
for blockF in F:
norm = np.sqrt(np.maximum(np.sum(np.square(blockF), axis=(1,2,3,4), keepdims=True), epsilon))
normF.append((blockF / norm))
else:
offsetK = 0
for blockF in F:
blockK = blockF.shape[0]
g = gain[offsetK:offsetK+blockK].reshape((blockK,1,1,1,1))
norm = np.sqrt(np.maximum(np.sum(np.square(blockF), axis=(1,2,3,4), keepdims=True), epsilon))
normF.append((g * blockF / norm))
offsetK += blockK
return self.collapse_filter(normF, dtype=np.float32)
def l2_normalize_grad_test(self, F, U, gain=None, epsilon=1e-12):
D = list()
if gain is None:
grad_g = None
for blockF, blockU in zip(F, U):
sum_sqr_w = np.sum(np.square(blockF), axis=(1,2,3,4), keepdims=True)
max_w = np.maximum(sum_sqr_w, epsilon)
d = ( blockU + blockF * (sum_sqr_w >= epsilon) * np.sum(-blockU * blockF / max_w, axis=(1,2,3,4), keepdims=True) ) / np.sqrt(max_w)
D.append(d)
else:
grad_g = np.empty(self.K)
offsetK = 0
for blockF, blockU in zip(F, U):
blockK = blockF.shape[0]
g = gain[offsetK:offsetK+blockK].reshape((blockK,1,1,1,1))
sum_sqr_w = np.sum(np.square(blockF), axis=(1,2,3,4), keepdims=True)
max_w = np.maximum(sum_sqr_w, epsilon)
norm_w = 1.0 / np.sqrt(max_w)
grad_g[offsetK:offsetK+blockK] = np.sum(blockU * blockF * norm_w, axis=(1,2,3,4))
d = ( blockU * g + blockF * (sum_sqr_w >= epsilon) * np.sum(-blockU * blockF * g / max_w, axis=(1,2,3,4), keepdims=True) ) * norm_w
D.append(d)
offsetK += blockK
return self.collapse_filter(D, dtype=np.float32), grad_g
@ops.RegisterGradient("BlocksparseConv")
def blocksparse_conv_grad(op, grad):
overlapC = op.get_attr("overlapC")
overlapK = op.get_attr("overlapK")
C = op.get_attr("C")
K = op.get_attr("K")
DHW = op.get_attr("DHW")
MPQ = op.get_attr("MPQ")
dimF = op.get_attr("dimF")
trs = op.get_attr("trs")
magic_trs = op.get_attr("magic_trs")
shift_trs = op.get_attr("shift_trs")
fshare = op.get_attr("fshare")
bshare = op.get_attr("bshare")
debug = op.get_attr("debug")
assert grad.get_shape()[1] == K
grad_I = blocksparse_conv_op(
op.inputs[0], op.inputs[1], op.inputs[2],
op.inputs[3], op.inputs[4], op.inputs[5],
op.inputs[6], grad, c_type=grad.dtype,
mode=1, overlapC=overlapC, overlapK=overlapK,
C=C, K=K, DHW=DHW, MPQ=MPQ, trs=trs,
magic_trs=magic_trs, shift_trs=shift_trs,
dimF=dimF, fshare=fshare, bshare=bshare, debug=debug )
grad_F = blocksparse_conv_op(
op.inputs[0], op.inputs[1], op.inputs[2],
op.inputs[3], op.inputs[4], op.inputs[5],
grad, op.inputs[7], c_type=grad.dtype,
mode=2, overlapC=overlapC, overlapK=overlapK,
C=C, K=K, DHW=DHW, MPQ=MPQ, trs=trs,
magic_trs=magic_trs, shift_trs=shift_trs,
dimF=dimF, fshare=fshare, bshare=bshare, debug=debug )
return (None, None, None, None, None, None, grad_F, grad_I)
@ops.RegisterGradient("L2NormalizeKCTRS")
def blocksparse_l2_normalize_grad_kctrs(op, grad_y, sum_sqr_x):
epsilon = op.get_attr("epsilon")
K = op.get_attr("K")
grad_x = l2_normalize_grad_kctrs(
grad_y, op.inputs[0], op.outputs[1], op.inputs[1], epsilon=epsilon, K=K)
return (grad_x, None)
@ops.RegisterGradient("L2NormalizeGainKCTRS")
def blocksparse_l2_normalize_grad_kctrs(op, grad_y, sum_sqr_x):
epsilon = op.get_attr("epsilon")
K = op.get_attr("K")
grad_x, grad_g = l2_normalize_gain_grad_kctrs(
grad_y, op.inputs[0], op.inputs[1], op.outputs[1], op.inputs[2], epsilon=epsilon, K=K)
return (grad_x, grad_g, None)
############################## Blocksparse Deconv #####################################
class BlocksparseDeconv(BlocksparseConv):
def __init__(self, BCK, TRS, DHW, MPQ=None, strides=(1,1,1), dilates=(1,1,1), padding="SAME", debug=False):
# C<=>K, DHW<=>MPQ, fprop<=>bprop, update args (EI <=> IE), filter layout: KCTRS <=> CKTRS
BKC = list()
for listC, listK in BCK:
BKC.append([listK, listC])
if MPQ is None:
padding = get_padding(padding, TRS, dilates)
MPQ = [ in_dim(*dims) for dims in zip(TRS, DHW, padding, strides, dilates) ]
super(BlocksparseDeconv, self).__init__(BKC, TRS, MPQ, DHW, strides, dilates, padding, debug, True)
def i_shape(self, N): return [N, self.K] + self.MPQ
def o_shape(self, N): return [N, self.C] + self.DHW
def fprop_test(self, F, I, alpha=1.0):
return super(BlocksparseDeconv, self).bprop_test(F, I, alpha)
def bprop_test(self, F, I, alpha=1.0):
return super(BlocksparseDeconv, self).fprop_test(F, I, alpha)
def updat_test(self, E, I, alpha=1.0):
return super(BlocksparseDeconv, self).updat_test(I, E, alpha, transpose=True)
def l2_normalize_test(self, F, gain=None, epsilon=1e-12):
normF = list()
if gain is None:
for blockF in F:
norm = np.sqrt(np.maximum(np.sum(np.square(blockF), axis=(0,2,3,4), keepdims=True), epsilon))
normF.append((blockF / norm))
else:
offsetK = 0
for blockF in F:
blockK = blockF.shape[1]
g = gain[offsetK:offsetK+blockK].reshape((1,blockK,1,1,1))
norm = np.sqrt(np.maximum(np.sum(np.square(blockF), axis=(0,2,3,4), keepdims=True), epsilon))
normF.append((g * blockF / norm))
offsetK += blockK
return self.collapse_filter(normF, dtype=np.float32)
def l2_normalize_grad_test(self, F, U, gain=None, epsilon=1e-12):
D = list()
if gain is None:
grad_g = None
for blockF, blockU in zip(F, U):
sum_sqr_w = np.sum(np.square(blockF), axis=(0,2,3,4), keepdims=True)
max_w = np.maximum(sum_sqr_w, epsilon)
d = ( blockU + blockF * (sum_sqr_w >= epsilon) * np.sum(-blockU * blockF / max_w, axis=(0,2,3,4), keepdims=True) ) / np.sqrt(max_w)
D.append(d)
else:
grad_g = np.empty(self.C)
offsetK = 0
for blockF, blockU in zip(F, U):
blockK = blockF.shape[1]
g = gain[offsetK:offsetK+blockK].reshape((1,blockK,1,1,1))
sum_sqr_w = np.sum(np.square(blockF), axis=(0,2,3,4), keepdims=True)
max_w = np.maximum(sum_sqr_w, epsilon)
norm_w = 1.0 / np.sqrt(max_w)
grad_g[offsetK:offsetK+blockK] = np.sum(blockU * blockF * norm_w, axis=(0,2,3,4))
d = ( blockU * g + blockF * (sum_sqr_w >= epsilon) * np.sum(-blockU * blockF * g / max_w, axis=(0,2,3,4), keepdims=True) ) * norm_w
D.append(d)
offsetK += blockK
return self.collapse_filter(D, dtype=np.float32), grad_g
def __call__(self, F, I):
assert I.get_shape()[1] == self.K
# mode 0 => 1
output = blocksparse_deconv_op(
self.fprop_grid, self.bprop_grid, self.updat_grid,
self.mpq_lut, self.dhw_lut, self.ck_lut,
F, I, c_type=I.dtype,
mode=1, overlapC=self.overlapC, overlapK=self.overlapK,
C=self.C, K=self.K, DHW=self.DHW, MPQ=self.MPQ, trs=self.trs,
magic_trs=self.magic_trs[0], shift_trs=self.magic_trs[1],
dimF=F.get_shape().as_list(), fshare=self.fshared, bshare=self.bshared, debug=self.debug
)
return output
def l2_normalize(self, F, gain=None, epsilon=1e-12, dtype=np.float32):
if gain is None:
F, _ = l2_normalize_cktrs(
F, self.norm_lut, TY=dtype, epsilon=epsilon, K=self.normSize,
TRS=self.trs, magic_TRS=self.magic_trs[0], shift_TRS=self.magic_trs[1] )
else:
assert self.overlapC is False
F, _ = l2_normalize_gain_cktrs(
F, gain, self.norm_lut, TY=dtype, epsilon=epsilon, K=self.normSize,
TRS=self.trs, magic_TRS=self.magic_trs[0], shift_TRS=self.magic_trs[1] )
return F
@ops.RegisterGradient("BlocksparseDeconv")
def blocksparse_deconv_grad(op, grad):
overlapC = op.get_attr("overlapC")
overlapK = op.get_attr("overlapK")
C = op.get_attr("C")
K = op.get_attr("K")
DHW = op.get_attr("DHW")
MPQ = op.get_attr("MPQ")
dimF = op.get_attr("dimF")
trs = op.get_attr("trs")
magic_trs = op.get_attr("magic_trs")
shift_trs = op.get_attr("shift_trs")
fshare = op.get_attr("fshare")
bshare = op.get_attr("bshare")
debug = op.get_attr("debug")
# mode 1 => 0
grad_I = blocksparse_deconv_op(
op.inputs[0], op.inputs[1], op.inputs[2],
op.inputs[3], op.inputs[4], op.inputs[5],
op.inputs[6], grad, c_type=grad.dtype,
mode=0, overlapC=overlapC, overlapK=overlapK,
C=C, K=K, DHW=DHW, MPQ=MPQ, trs=trs,
magic_trs=magic_trs, shift_trs=shift_trs,
dimF=dimF, fshare=fshare, bshare=bshare, debug=debug )
# E,I => I,E
grad_F = blocksparse_deconv_op(
op.inputs[0], op.inputs[1], op.inputs[2],
op.inputs[3], op.inputs[4], op.inputs[5],
op.inputs[7], grad, c_type=grad.dtype,
mode=2, overlapC=overlapC, overlapK=overlapK,
C=C, K=K, DHW=DHW, MPQ=MPQ, trs=trs,
magic_trs=magic_trs, shift_trs=shift_trs,
dimF=dimF, fshare=fshare, bshare=bshare, debug=debug )
return (None, None, None, None, None, None, grad_F, grad_I)
@ops.RegisterGradient("L2NormalizeCKTRS")
def blocksparse_l2_normalize_grad_cktrs(op, grad_y, sum_sqr_x):
epsilon = op.get_attr("epsilon")
K = op.get_attr("K")
TRS = op.get_attr("TRS")
magic_TRS = op.get_attr("magic_TRS")
shift_TRS = op.get_attr("shift_TRS")
grad_x = l2_normalize_grad_cktrs(
grad_y, op.inputs[0], op.outputs[1], op.inputs[1], epsilon=epsilon,
K=K, TRS=TRS, magic_TRS=magic_TRS, shift_TRS=shift_TRS)
return (grad_x, None)
@ops.RegisterGradient("L2NormalizeGainCKTRS")
def blocksparse_l2_normalize_gain_grad_cktrs(op, grad_y, sum_sqr_x):
epsilon = op.get_attr("epsilon")
K = op.get_attr("K")
TRS = op.get_attr("TRS")
magic_TRS = op.get_attr("magic_TRS")
shift_TRS = op.get_attr("shift_TRS")
grad_x, grad_g = l2_normalize_gain_grad_cktrs(
grad_y, op.inputs[0], op.inputs[1], op.outputs[1], op.inputs[2], epsilon=epsilon,
K=K, TRS=TRS, magic_TRS=magic_TRS, shift_TRS=shift_TRS)
return (grad_x, grad_g, None)
############################## ChannelWise Linear #####################################
cwise_linear_op = _op_module.c_wise_linear
cwise_linear_grad_op = _op_module.c_wise_linear_grad
def cwise_linear(x, gain=None, bias=None, relu=False, bias_first=False, use_tf=False):
assert gain is not None or bias is not None
dev = x.op.device.lower()
if use_tf or not dev or "cpu" in dev:
if bias_first:
if bias is not None:
x += bias
if gain is not None:
x *= gain
else:
if gain is not None:
x *= gain
if bias is not None:
x += bias
return tf.nn.relu(x) if relu else x
gain = [] if gain is None else [gain]
bias = [] if bias is None else [bias]
return cwise_linear_op(x, gain, bias, relu=relu, swap=bias_first)
@ops.RegisterGradient("CWiseLinear")
def cwise_linear_axpb_grad(op, dy):
relu = op.get_attr("relu")
swap = op.get_attr("swap")
n_a = op.get_attr("n_a")
n_b = op.get_attr("n_b")
if n_a:
# anything with a scale factor we need to save the input
xy = [ op.inputs[0] ]
elif relu:
# with relu(x + b) we save the outputs
xy = [ op.outputs[0] ]
else:
# x + b requires no saved tensors
xy = []
a = [ op.inputs[1 ] ] if n_a else []
b = [ op.inputs[1+n_a] ] if n_b else []
dx, da, db = cwise_linear_grad_op(dy, xy, a, b, relu=relu, swap=swap)
if n_a and n_b:
return dx, da, db
if n_a:
return dx, da
return dx, db
def cwise_linear_test(x, a=1, b=0, relu=False):
# create broadcastable shapes for a and b
bcast = list(x.shape)
for i in range(len(bcast)):
if i != 1: bcast[i] = 1
if a is not 1:
a = a.reshape(bcast)
if b is not 0:
b = b.reshape(bcast)
y = a*x + b
if relu:
y = np.maximum(y, 0.)
return y
def cwise_linear_grad_test(dy, x, a=1, b=0, relu=False):
bcast = list(dy.shape)
axis = list()
for i in range(len(bcast)):
if i != 1:
bcast[i] = 1
axis.append(i)
axis = tuple(axis)
if a is not 1:
a = a.reshape(bcast)
if b is not 0:
b = b.reshape(bcast)
if relu:
dy = dy * (a*x + b > 0.0)
dx = a * dy
da = np.sum(dy * x, axis=axis)
db = np.sum(dy, axis=axis)
return dx, da, db
############################## Helpers #####################################
def dilation_size(S, dilate):
return S * dilate - dilate + 1
def tf_out_dim_pad(S, W, padding, stride, dilate):
S = dilation_size(S, dilate)
if padding.upper() == "SAME":
Q = ceil_div(W, stride)
p = max((Q - 1) * stride + S - W, 0) // 2
else:
Q = ceil_div(W - S + 1, stride)
p = 0;
return Q, p
def out_dim(S, W, padding, stride, dilate):
return ceil_div(W - dilation_size(S, dilate) + 1 + 2*padding, stride)
def in_dim(S, W, padding, stride, dilate):
# Note: inverting ceil_div is ambigous, assume orig numerator was even multiple of stride
# It's safer to just manually specify the output_dim
return W*stride + S - 2*padding - (S & 1)
def expand_dims(dim, pad_val=1):
return [pad_val] * (3 - len(dim)) + list(dim)
def get_padding(padding, TRS, dilates):
if type(padding) is str:
if padding.upper() == "SAME":
padding = [ dilation_size(*dims) // 2 for dims in zip(TRS, dilates) ]
else:
padding = [0,0,0]
else:
padding = expand_dims(padding, 0)
return padding
def fprop_lut(q, X, S, padding, stride, dilate):
qs = q * stride - padding
image = list()
for s in range(S):
x = qs + s * dilate
image.append(x if x >= 0 and x < X else -1)
return image
def bprop_lut(x, Q, S, padding, stride, dilate):
pad_eff = dilation_size(S, dilate) - padding - 1
xs = x - pad_eff
image = list()
# invert the filter to image mapping
for s in range(S-1, -1, -1):
q = xs + s * dilate
if q % stride == 0:
q //= stride
if q >= 0 and q < Q:
image.append(q)
else:
image.append(-1)
else:
# we need to be able to distinguish a hole in striding and edge padding
image.append(-2)
return image
def fprop_slice(q, X, S, padding, stride, dilate):
qs = q * stride - padding
x1 = None
for s in range(S):
x = qs + s * dilate
if x1 is None and x >= 0:
x1 = x
f1 = s
if x < X:
x2 = x
f2 = s
return (slice(f1, f2 + 1), slice(x1, x2 + 1, dilate), f2 - f1 + 1)
def bprop_slice(x, Q, S, padding, stride, dilate):
pad_eff = dilation_size(S, dilate) - padding - 1
xs = x - pad_eff
f, e = list(), list()
for s in range(S):
q = xs + s * dilate
if q % stride == 0:
q //= stride
if q >= 0 and q < Q:
f.append(s)
e.append(q)
if len(f) == 0:
return (slice(0, 0, 1), slice(0, 0, 1))
if len(f) == 1:
fstride = estride = 1
else:
fstride = f[1] - f[0]
estride = e[1] - e[0]
return (slice(f[0], f[-1]+1, fstride), slice(e[0], e[-1]+1, estride)) |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module
############################## fused_lstm_gates #####################################
lstm_gates_op = _op_module.lstm_gates
lstm_gates_grad_op = _op_module.lstm_gates_grad
lstm_gates4_op = _op_module.lstm_gates4
lstm_gates4_grad_op = _op_module.lstm_gates4_grad
bias_grad_op = _op_module.bias_grad
def fused_lstm_gates(c, *args, bias=None, forget_bias=1.0, name=None):
# returns c_next, h_next
dev = args[0].op.device.lower()
if not dev or "cpu" in dev:
h = args[0]
if bias is not None:
h = tf.nn.bias_add(h, bias)
i, j, f, o = tf.split(h, 4, axis=1)
fb = tf.constant(forget_bias, dtype=f.dtype)
new_c = tf.add(tf.multiply(c, tf.sigmoid(tf.add(f, fb))), tf.multiply(tf.sigmoid(i), tf.tanh(j)))
new_h = tf.multiply(tf.tanh(new_c), tf.sigmoid(o))
return new_c, new_h
# args is h (all four gates fused in single tensor)
if len(args) == 1:
bias = [] if bias is None else [ bias ]
return lstm_gates_op(c, args[0], bias, forget_bias=forget_bias, name=name)
assert len(args) == 4, "args are i, u, f, o"
assert bias is None, "bias not enabled in this mode"
return lstm_gates4_op(c, *args, forget_bias=forget_bias, name=name)
@ops.RegisterGradient("LSTMGates")
def fused_lstm_gates_grad(op, ec, eh):
bias = [] if len(op.inputs) == 2 else [ op.inputs[2] ]
# in our kernels we just conditionaly load zero instead of reading the constant tensor
grads = [eh] if ec is None or ec.op == "Fill" else [eh, ec]
dc, dh = lstm_gates_grad_op(op.inputs[0], op.inputs[1], bias, grads, forget_bias=op.get_attr("forget_bias") )
if len(op.inputs) == 2:
return dc, dh
# compute bias grad
#db = ew_db_dzb_op(dh, op.inputs[2], op=BIASADD_OP)
# db = bias_grad_op(dh, op.inputs[2])
db, _ = bias_grad_op(dh, op.inputs[2], axis=1)
return dc, dh, db
@ops.RegisterGradient("LSTMGates4")
def fused_lstm_gates4_grad(op, ec, eh):
# in our kernels we just conditionaly load zero instead of reading the constant tensor
grads = [eh] if ec is None or ec.op == "Fill" else [eh, ec]
return lstm_gates4_grad_op(op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[3], op.inputs[4], grads, forget_bias=op.get_attr("forget_bias") )
############################## Split4 #####################################
split4_op = _op_module.split4
concat4_op = _op_module.concat4
def split4(x):
return split4_op(x)
def concat4(x0, x1, x2, x3):
return concat4_op(x0, x1, x2, x3)
@ops.RegisterGradient("Split4")
def split4_grad(op, dz0, dz1, dz2, dz3):
return concat4_op(dz0, dz1, dz2, dz3)
@ops.RegisterGradient("Concat4")
def concat4_grad(op, dz):
return split4_op(dz)
############################## Sparse Relu #####################################
sparse_relu_op = _op_module.sparse_relu
ew_dx_dzza_op = _op_module.ew_dx_dzza
def sparse_relu(x, alpha=1.0):
return sparse_relu_op(x, alpha)
@ops.RegisterGradient("SparseRelu")
def sparse_relu_grad(op, dz):
# same grad as relu
return ew_dx_dzza_op(dz, op.outputs[0], op=RELU_OP)
def sparse_relu_test(x, alpha=1.0):
axis = len(x.shape)-1
mean = np.mean(x, axis=axis, keepdims=True)
std = np.std(x, axis=axis, keepdims=True)
cutoff = mean + alpha*std
return np.maximum(np.maximum(x, cutoff) - cutoff, 0.0)
############################## Fused BasicLSTMCell #####################################
from tensorflow.python.ops.rnn_cell import BasicLSTMCell, LSTMStateTuple
class FusedBasicLSTMCell(BasicLSTMCell):
def __init__(self, *args, **kwargs):
super(FusedBasicLSTMCell, self).__init__(*args, **kwargs)
def call(self, inputs, state):
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = tf.split(value=state, num_or_size_splits=2, axis=one)
h = tf.matmul( tf.concat([inputs, h], 1), self._kernel )
c, h = fused_lstm_gates(c, h, bias=self._bias, forget_bias=self._forget_bias)
if self._state_is_tuple:
state = LSTMStateTuple(c, h)
else:
state = tf.concat([c, h], 1)
return h, state
############################## Simple Fused LSTM with optional layernorm #####################################
def grouped_lstm(inputs, width, timesteps, initial_state, scope="grouped_lstm", reuse=None, lstm_id=0, layernorm=True):
fp16 = inputs.dtype is tf.float16
if layernorm:
from blocksparse.norms import layer_norm
if fp16:
from blocksparse.ewops import float_cast
in_width = inputs.shape[-1].value
with tf.variable_scope(scope, reuse=reuse):
w = tf.get_variable('kernel', shape=[in_width + width, 4 * width])
b = tf.get_variable('bias', shape=[4 * width])
if layernorm:
g = tf.get_variable('gain', shape=[4 * width])
c, h = initial_state
if fp16:
w = float_cast(w, dtype=tf.float16, dx_dtype=tf.float16)
if timesteps > 1:
inputs = [ tf.squeeze(x) for x in tf.split(inputs, timesteps, axis=1) ]
else:
inputs = [ tf.reshape(inputs, [-1, inputs.shape[-1].value]) ]
outputs = []
for t, x in enumerate(inputs):
h = tf.matmul( tf.concat([x, h], 1), w, name="lstm_%02d/step_%04d" % (lstm_id, t))
if layernorm:
h = layer_norm(h, g, b, axis=1, segments=4)
c, h = fused_lstm_gates(c, h, forget_bias=1.0)
else:
c, h = fused_lstm_gates(c, h, bias=b, forget_bias=1.0)
outputs.append(h)
output = tf.stack(outputs, axis=1)
return output, [c, h]
def group_lstm_grads(grads, params, scope="grouped_lstm", group_size=None):
grad = None
grad_idx = None
for i, (g, p) in enumerate(zip(grads, params)):
if scope in p.name and "kernel" in p.name:
grad = g
grad_idx = i
break
assert grad is not None
# backward walk param grad to find dw MatMul ops
# walk should terminate with each MatMul op
ops = list()
wave = set([grad.op])
while wave:
new_wave = set()
for op in wave:
for op in (t.op for t in op.inputs):
# TN MatMul ops
if op.type == "MatMul" and op.get_attr("transpose_a") and not op.get_attr("transpose_b"):
ops.append(op)
else:
new_wave.add(op)
wave = new_wave
# sort op names descending and split out the lstms (if weights are shared)
last_lstm = None
lstms = list()
ops.sort(key=lambda op: op.name, reverse=True)
for op in ops:
# gradients/grouped_lstm/lstm_2/step_00_grad/MatMul_1 => lstm_2
lstm = op.name.split("/")[-3]
if last_lstm != lstm:
lstms.insert(0, list())
last_lstm = lstm
lstms[0].append(op)
# we're going to be using absolute names, so clear name_scope
with tf.name_scope(None):
lstm_grads = list()
for lstm_ops in lstms:
# default dw op to one big matmul per lstm
if group_size is None:
group_size = len(lstm_ops)
# use the lstm scope for the new ops
# gradients/grouped_lstm/lstm_2/step_00_grad/MatMul_1 => gradients/grouped_lstm/lstm_2
scope = lstm_ops[-1].name.split('/')
scope = '/'.join(scope[0:-2])
offset = 0
while offset < len(lstm_ops):
xs = tf.concat([op.inputs[0] for op in lstm_ops[offset:offset+group_size] ], axis=0)
gs = tf.concat([op.inputs[1] for op in lstm_ops[offset:offset+group_size] ], axis=0)
mmop = tf.matmul(xs, gs, transpose_a=True, transpose_b=False, name="%s/dw_%04d" % (scope, offset))
grad = mmop if offset == 0 else ew.add(grad, mmop, name="%s/add_%04d" % (scope, offset))
offset += group_size
lstm_grads.append(grad)
if len(lstms) > 1:
from blocksparse.ewops import add_n
# gradients/grouped_lstm/lstm_2/step_00_grad/MatMul_1 => gradients/grouped_lstm
scope = lstms[0][-1].name.split('/')
scope = '/'.join(scope[0:-3])
grads[grad_idx] = tf.add_n(lstm_grads, name="%s/add_n" % scope)
else:
grads[grad_idx] = lstm_grads[0]
#grads modified in place
# lstm_scopes = dict()
# # rediculous amount of code just to be able to re-enter a variable scope without its name being re-numbered.
# # https://github.com/tensorflow/tensorflow/pull/14390
# global lstm_scopes
# if scope not in lstm_scopes:
# with tf.variable_scope(scope) as lstm_scope:
# lstm_scopes[scope] = lstm_scope
# lstm_scope = lstm_scopes[scope]
# with tf.variable_scope(lstm_scope, auxiliary_name_scope=False), tf.name_scope(lstm_scope.original_name_scope):
# with tf.variable_scope(weights_scope, reuse=weights_reuse):
# w = tf.get_variable('kernel', shape=[in_width + width, 4 * width])
# if bias_scope is None:
# b = tf.get_variable('bias', shape=[4 * width])
# if layernorm:
# g = tf.get_variable('gain', shape=[4 * width])
# if bias_scope is not None:
# with tf.variable_scope(bias_scope, reuse=bias_reuse):
# b = tf.get_variable('bias', shape=[4 * width])
# if layernorm:
# g = tf.get_variable('gain', shape=[4 * width]) |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module, reduce_mul
layer_norm_op = _op_module.layer_norm
layer_norm_grad_op = _op_module.layer_norm_grad
batch_norm_inf_ncdhw_op = _op_module.batch_norm_inference_ncdhw
batch_norm_ncdhw_op = _op_module.batch_norm_ncdhw
batch_norm_grad_ncdhw_op = _op_module.batch_norm_grad_ncdhw
def layer_norm(x, g, b, axis=1, segments=1, epsilon=1e-6, relu=False, atomics=True, bench=0, use_tf=False):
dev = g.op.device.lower()
if use_tf or not dev or "cpu" in dev:
if axis < 0:
axis += len(x.shape)
K = x.shape[axis].value
assert g.shape.num_elements() == K
assert b.shape.num_elements() == K
assert K % segments == 0
assert axis != 0 or segments == 1, "Segments only implemented on axis=1 for now"
K //= segments
ys = list()
for s in range(segments):
segK = slice(s*K, s*K+K)
segX = [segK if d == axis else slice(None) for d in range(x.shape.ndims)]
mean, var = tf.nn.moments(x[segX], [axis], keep_dims=True)
# mean = tf.reduce_mean(x[segX], axis=[axis], keepdims=True)
# var = tf.reduce_mean(tf.square(x[segX] - mean), axis=[axis], keepdims=True)
norm = (x[segX] - mean) * tf.rsqrt(var + epsilon)
ys.append(norm * g[segK] + b[segK])
y = tf.concat(ys, axis) if segments > 1 else ys[0]
if relu:
y = tf.nn.relu(y)
else:
y, m, v, _, _ = layer_norm_op(x, g, b, S=segments, axis=axis, epsilon=epsilon, relu=relu, atomics=atomics, bench=bench)
return y
@ops.RegisterGradient("LayerNorm")
def layer_norm_grad(op, dy, mean, rstd, p1, p2):
S = op.get_attr("S")
epsilon = op.get_attr("epsilon")
relu = op.get_attr("relu")
axis = op.get_attr("axis")
atomics = op.get_attr("atomics")
bench = op.get_attr("bench")
dx, dg, db, _, _ = layer_norm_grad_op(dy, op.inputs[0], op.inputs[1], op.inputs[2], op.outputs[1], op.outputs[2], S=S, axis=axis, epsilon=epsilon, relu=relu, atomics=atomics, bench=bench)
return dx, dg, db
def batch_norm_inference(x, g, b, m, v, epsilon=1e-6):
shape = x.shape
C = int(shape[1])
DHW = int(shape[2:].num_elements())
assert g.get_shape().num_elements() == C
assert b.get_shape().num_elements() == C
assert m.get_shape().num_elements() == C
assert v.get_shape().num_elements() == C
return batch_norm_inf_ncdhw_op(x, g, b, m, v, DHW=DHW, eps=epsilon)
@ops.RegisterGradient("BatchNormInferenceNCDHW")
def batch_norm_inf_grad(op, dy):
return (dy, None, None, None, None)
def batch_norm(x, g, b, epsilon=1e-6):
shape = x.shape
C = int(shape[1])
DHW = int(shape[2:].num_elements())
magic = _magic64u(DHW)
assert g.get_shape().num_elements() == C
assert b.get_shape().num_elements() == C
return batch_norm_ncdhw_op(x, g, b, DHW=DHW, magic_DHW=magic[0], shift_DHW=magic[1], eps=epsilon)
@ops.RegisterGradient("BatchNormNCDHW")
def batch_norm_grad(op, dy, mean, var):
eps = op.get_attr("eps")
DHW = op.get_attr("DHW")
magic_DHW = op.get_attr("magic_DHW")
shift_DHW = op.get_attr("shift_DHW")
return batch_norm_grad_ncdhw_op(dy, op.inputs[0], op.inputs[1], op.outputs[1], op.outputs[2], DHW=DHW, magic_DHW=magic_DHW, shift_DHW=shift_DHW, eps=eps)
def layer_norm_test(x, g, b, axis=1, segments=1, epsilon=1e-6, relu=False):
x_shape = x.shape
K = x_shape[axis]
if axis == 0:
x = x.reshape(K,-1)
g = g.reshape(K, 1)
b = b.reshape(K, 1)
else:
axis = 1
x = x.reshape(-1, K)
g = g.reshape( 1, K)
b = b.reshape( 1, K)
K //= segments
y = np.empty_like(x)
for s in range(segments):
segK = slice(s*K, s*K+K)
seg = (segK, slice(None)) if axis == 0 else (slice(None), segK)
mean = np.mean(x[seg], axis=axis, keepdims=True)
var = np.var(x[seg], axis=axis, keepdims=True)
rstd = np.reciprocal(np.sqrt(var + epsilon))
xhat = (x[seg] - mean) * rstd
y[seg] = xhat*g[seg] + b[seg]
if relu:
y[seg] = np.maximum(y[seg], 0.0)
return y.reshape(x_shape)
def layer_norm_grad_test(dy, x, g, b, axis=1, segments=1, epsilon=1e-6, relu=False):
x_shape = x.shape
K = x_shape[axis]
if axis == 0:
dy = dy.reshape(K,-1)
x = x.reshape(K,-1)
g = g.reshape(K, 1)
b = b.reshape(K, 1)
else:
axis = 1
dy = dy.reshape(-1, K)
x = x.reshape(-1, K)
g = g.reshape( 1, K)
b = b.reshape( 1, K)
K //= segments
dy = dy.copy()
dx = np.empty_like(dy)
dg = np.empty_like(g)
db = np.empty_like(b)
for s in range(segments):
segK = slice(s*K, s*K+K)
seg = (segK, slice(None)) if axis == 0 else (slice(None), segK)
mean = np.mean(x[seg], axis=axis, keepdims=True)
xmean = x[seg] - mean
xvar = np.var(x[seg], axis=axis, keepdims=True)
xstdr = np.reciprocal(np.sqrt(xvar + epsilon))
xhat = xmean * xstdr
if relu:
dy[seg] = dy[seg] * ((xhat*g[seg] + b[seg]) > 0.0)
#print("x:%.2f, mean:%.2f, rstd:%.2f, xhat:%.2f, dy:%.2f\n" % (x[0,0], mean[0,0], xstdr[0,0], xhat[0,0], dy[0,0]));
dg[seg] = np.sum(dy[seg] * xhat, axis=1-axis, keepdims=True)
db[seg] = np.sum(dy[seg], axis=1-axis, keepdims=True)
dy[seg] = dy[seg] * g[seg]
sum1 = np.sum(xhat * dy[seg], axis=axis, keepdims=True)
sum2 = np.sum(dy[seg], axis=axis, keepdims=True)
dx[seg] = (dy[seg] - ((xhat * sum1 + sum2) / float(K))) * xstdr
return dx.reshape(x_shape), dg, db
def batch_norm_inf_test(x, g, b, m, v, epsilon=1e-6):
xshape = x.shape
N = xshape[0]
C = xshape[1]
x = x.reshape(N, C,-1)
g = g.reshape(1, C, 1)
b = b.reshape(1, C, 1)
m = m.reshape(1, C, 1)
v = v.reshape(1, C, 1)
rstd = np.reciprocal(np.sqrt(v + epsilon))
xhat = (x - m) * rstd
return (xhat*g + b).reshape(xshape)
def batch_norm_test(x, g, b, epsilon=1e-6):
xshape = x.shape
N = xshape[0]
C = xshape[1]
x = x.reshape(N, C,-1)
g = g.reshape(1, C, 1)
b = b.reshape(1, C, 1)
mean = np.mean(x, axis=(0,2), keepdims=True)
var = np.var (x, axis=(0,2), keepdims=True)
rstd = np.reciprocal(np.sqrt(var + epsilon))
xhat = (x - mean) * rstd
return (xhat*g + b).reshape(xshape), mean.reshape(C), var.reshape(C)
def batch_norm_grad_test(dy, x, g, m, v, epsilon=1e-6):
xshape = x.shape
N = xshape[0]
C = xshape[1]
rNDHW = 1.0 / reduce_mul(xshape[2:], N)
dy = dy.reshape(N, C,-1)
x = x.reshape(N, C,-1)
g = g.reshape(1, C, 1)
m = m.reshape(1, C, 1)
v = v.reshape(1, C, 1)
rstd = np.reciprocal(np.sqrt(v + epsilon))
xhat = (x - m) * rstd;
dg = np.sum(dy * xhat, axis=(0,2), keepdims=True)
db = np.sum(dy, axis=(0,2), keepdims=True)
z = (xhat * dg + db) * rNDHW;
dx = (dy - z) * rstd * g;
return dx.reshape(xshape), dg.reshape(C), db.reshape(C)
# Magic numbers and shift amounts for integer division
# Suitable for when nmax*magic fits in 32 bits
# Shamelessly pulled directly from:
# http://www.hackersdelight.org/hdcodetxt/magicgu.py.txt
def _magic32u(nmax, d):
nc = ((nmax + 1) // d) * d - 1
nbits = len(bin(nmax)) - 2
for p in range(0, 2 * nbits + 1):
if 2 ** p > nc * (d - 1 - (2 ** p - 1) % d):
m = (2 ** p + d - 1 - (2 ** p - 1) % d) // d
return (m, p)
raise ValueError("Can't find magic number for division")
# Magic numbers and shift amounts for integer division
# Suitable for when nmax*magic fits in 64 bits and the shift
# lops off the lower 32 bits
def _magic64u(d):
# 3 is a special case that only ends up in the high bits
# if the nmax is 0xffffffff
# we can't use 0xffffffff for all cases as some return a 33 bit
# magic number
nmax = 0xffffffff if d == 3 else 0x7fffffff
magic, shift = _magic32u(nmax, d)
if magic != 1:
shift -= 32
return (magic, shift)
# for d in range(1,1000000):
# magic, shift = _magic32u(0x7fffffff, d)
# if shift < 32 or len(hex(magic)) > 10:
# if magic != 1:
# print(d, magic, shift)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_ops # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import spectral_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import tf_logging as logging
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a ControlFlowState object which is not None if the ops between from_ops
and to_ops contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
return pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If sizes of gradients and inputs don't match
TypeError: If type of any gradient is not valid for its input.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
if grad_y is None:
if y.dtype.is_complex:
raise TypeError(
"Gradients of complex tensors must set grad_ys (y.dtype = %r)" %
y.dtype)
with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
grad_ys[i] = array_ops.fill(
array_ops.shape(y), constant_op.constant(
1, dtype=y.dtype))
continue
# if y.dtype.is_floating or y.dtype.is_integer:
# if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:
# raise TypeError("Gradient type %s generated for real or "
# "integer-valued tensor %s with type %s must be "
# "real or integer" %
# (dtypes.as_dtype(grad_y.dtype).name, y,
# dtypes.as_dtype(y.dtype).name))
# elif y.dtype.is_complex:
# if not grad_y.dtype.is_complex:
# raise TypeError("Gradient type %s generated for complex-valued "
# "tensor %s with type %s must be real" %
# (dtypes.as_dtype(grad_y.dtype).name, y,
# dtypes.as_dtype(y.dtype).name))
# else:
# raise TypeError("Tensor %s with type %s must be numeric "
# "to obtain a default gradient" %
# (y, dtypes.as_dtype(y.dtype).name))
return grad_ys
def _IsTrainable(tensor):
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128)
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if sizes of gradients and inputs don't match.
TypeError: if type of any gradient is not valid for its input.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
def _StopOps(from_ops, stop_gradient_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
In addition, none of `stop_gradient_ops` will be differentiated.
Args:
from_ops: list of Operations.
stop_gradient_ops: list of Operations never to backprop through.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
stop_ops.update(op._id for op in stop_gradient_ops) # pylint: disable=protected-access
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, colocate_gradients_with_ops):
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops.colocate_with(op):
yield
else:
yield
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
f = attr_value_pb2.NameAttrList()
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# pylint: disable=protected-access
in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
# pylint: enable=protected-access
return in_grads
def _MaybeCompile(scope, op, func, grad_fn):
"""Compile the calculation in grad_fn if op was marked as compiled."""
scope = scope.rstrip("/").replace("/", "_")
if func is not None:
xla_compile = func.definition.attr["_XlaCompile"].b
xla_separate_compiled_gradients = func.definition.attr[
"_XlaSeparateCompiledGradients"].b
xla_scope = func.definition.attr["_XlaScope"].s.decode()
else:
try:
xla_compile = op.get_attr("_XlaCompile")
xla_separate_compiled_gradients = op.get_attr(
"_XlaSeparateCompiledGradients")
xla_scope = op.get_attr("_XlaScope").decode()
except ValueError:
return grad_fn() # Exit early
if not xla_compile:
return grad_fn() # Exit early
# If the gradients are supposed to be compiled separately, we give them a
# _XlaScope name that is based on the name_scope of the gradients. Otherwise
# they just inherit the existing _XlaScope name, which lets them be merged
# together with the non-gradient computation.
if xla_separate_compiled_gradients:
xla_grad_scope = "%s_grad_%s" % (xla_scope, scope)
else:
xla_grad_scope = xla_scope
attrs = {
"_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile),
"_XlaScope": attr_value_pb2.AttrValue(s=xla_grad_scope.encode())
}
with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access
return grad_fn()
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None,
stop_gradients=None):
"""Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the derivatives of `ys` with
respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where
each tensor is the `sum(dy/dx)` for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
`stop_gradients` is a `Tensor` or a list of tensors to be considered constant
with respect to all `xs`. These tensors will not be backpropagated through,
as though they had been explicitly disconnected using `stop_gradient`. Among
other things, this allows computation of partial derivatives as opposed to
total derivatives. For example:
a = tf.constant(0.)
b = 2 * a
g = tf.gradients(a + b, [a, b], stop_gradients=[a, b])
Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the
total derivatives `tf.gradients(a + b, [a, b])`, which take into account the
influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is
equivalent to:
a = tf.stop_gradient(tf.constant(0.))
b = tf.stop_gradient(2 * a)
g = tf.gradients(a + b, [a, b])
`stop_gradients` provides a way of stopping gradient after the graph has
already been constructed, as compared to `tf.stop_gradient` which is used
during graph construction. When the two approaches are combined,
backpropagation stops at both `tf.stop_gradient` nodes and nodes in
`stop_gradients`, whichever is encountered first.
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate
through.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
RuntimeError: if called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError("tf.gradients not supported in EAGER mode. Use "
"functions in tf.contrib.eager.backprop instead.")
ys = _AsList(ys)
xs = _AsList(xs)
stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(
name, "gradients",
list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope:
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = [x.handle if isinstance(x, resource_variable_ops.ResourceVariable)
else x
for x in xs]
xs = ops.internal_convert_n_to_tensor_or_indexed_slices(xs, name="x",
as_ref=True)
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
if len(ys) > 1:
ys = [array_ops.identity(y) if y.consumers() else y for y in ys]
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
stop_gradient_ops = [t.op for t in stop_gradients]
pending_count, loop_state = _PendingCount(ops.get_default_graph(), to_ops,
from_ops,
colocate_gradients_with_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
# pylint: disable=protected-access
ready = (pending_count[op._id] == 0)
if ready and op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
# pylint: enable=protected-access
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
# pylint: disable=protected-access
func_call = None
is_func_call = ops.get_default_graph()._is_function(op.type)
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op._id not in stop_ops):
if is_func_call:
func_call = ops.get_default_graph()._get_function(op.type)
grad_fn = func_call.python_grad_func
# pylint: enable=protected-access
else:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and
not out_grad) and _IsTrainable(op.outputs[i]):
# Only floating-point outputs get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
# TODO(apassos) gradients of resource handles might be an
# issue here because of zeros.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = _MaybeCompile(
grad_scope, op, func_call, lambda: grad_fn(op, *out_grads))
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _MaybeCompile(
grad_scope, op, func_call, lambda: _SymGrad(op, out_grads))
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(
[x for x in in_grads if x is not None]) > 1:
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(op.inputs)
for i, (t_in, in_grad) in enumerate(zip(op.inputs, in_grads)):
if in_grad is not None:
if (isinstance(in_grad, ops.Tensor) and
t_in.dtype != dtypes.resource):
try:
in_grad.set_shape(t_in.get_shape())
except ValueError:
raise ValueError(
"Incompatible shapes between op input and calculated "
"input gradient. Forward operation: %s. Input index: %d. "
"Original input shape: %s. "
"Calculated input gradient shape: %s"
% (op.name, i, t_in.shape, in_grad.shape))
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections.Sequence):
if any([g is not None for g in out_grad]):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in op.inputs:
# pylint: disable=protected-access
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if loop_state and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_ops.IsLoopSwitch(x.op))
# pylint: enable=protected-access
if ready:
if control_flow_ops.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_real_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_real_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_real_grad:
# For an unused exit, if it has floating-point outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_ops.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(g.values,
array_ops.gather(grad.indices, g.indices),
g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
def _MultiDeviceAddN(tensor_list):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops.colocate_with(tensors[0].op, ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
EXPERIMENTAL_CUSTOM = 3
DEFAULT = ADD_N
def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N,
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N,
AggregationMethod.EXPERIMENTAL_CUSTOM
]:
raise ValueError("Invalid aggregation_method specified %s." %
aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_ops.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections.Sequence) and not all([
isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in out_grad
if g is not None
])):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
elif aggregation_method == AggregationMethod.EXPERIMENTAL_CUSTOM:
used = "add_n"
from blocksparse import bs_module as _bs_module
if len(out_grad) == 2:
out_grads[i] = _bs_module.ew_z_xy(out_grad[0], out_grad[1], op=0, name="Add2")
else:
total = None
while len(out_grad):
xs = [] if total is None else [total]
while len(out_grad) and len(xs) < 8:
xs.append(out_grad.pop())
total = _bs_module.add_n8(xs, name="AddN")
out_grads[i] = total
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s",
len(out_grad), tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list(
[g for g in out_grad if g is not None])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat([x.values for x in out_grad], 0),
array_ops.concat([x.indices for x in out_grad], 0),
out_grad[0].dense_shape)
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
out_grads[i] = None
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
]
# Second backprop
return gradients(elemwise_products, xs)
def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False,
gate_gradients=False, aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`. This function currently
only supports evaluating the Hessian with respect to (a list of) one-
dimensional tensors.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(ys)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
"""
xs = _AsList(xs)
kwargs = {
'colocate_gradients_with_ops': colocate_gradients_with_ops,
'gate_gradients': gate_gradients,
'aggregation_method': aggregation_method
}
# Compute first-order derivatives and iterate for each x in xs.
hessians = []
_gradients = gradients(ys, xs, **kwargs)
for i, _gradient, x in zip(range(len(xs)), _gradients, xs):
# Ensure that x is a vector.
check_rank = check_ops.assert_rank(
x, 1, message='Cannot compute Hessian because element %d of `xs` does '
'not have rank one.' % i
)
with ops.control_dependencies([check_rank]):
# Declare an iterator and tensor array loop variables for the gradients.
n = array_ops.size(x)
loop_vars = [
array_ops.constant(0, dtypes.int32),
tensor_array_ops.TensorArray(x.dtype, n)
]
# Iterate over all elements of the gradient and compute second order
# derivatives.
_, hessian = control_flow_ops.while_loop(
lambda j, _: j < n,
lambda j, result: (j + 1,
result.write(j, gradients(_gradient[j], x)[0])),
loop_vars
)
hessians.append(hessian.stack())
return hessians
|
#!/usr/bin/env python
# Experimental depthwise seperable convolution kernels (just the spatial components) that run on tensorcores.
# (C,H,W,N) format is used, but if remapped to (N, heads, H, W, head_state) can be resused in self attention style convolution.
# Though the filters can no longer be broadcast, and relative attention will need to be added, just minor changes.
# It should also be possible to fuse in softmax.
# Stand-Alone Self-Attention in Vision Models: https://arxiv.org/abs/1906.05909
import os
import numpy as np
import pycuda.driver as drv
from pycuda.compiler import SourceModule
from pycuda.autoinit import context, device
file_path = os.path.dirname(os.path.realpath(__file__))
attributes = drv.Context.get_device().get_attributes()
SMs = attributes[drv.device_attribute.MULTIPROCESSOR_COUNT]
print(device.name(), SMs)
# for name in sorted(list(attributes.keys())):
# print(name, attributes[name])
# exit()
def ceil_div(x, y):
return -(-x // y)
def out_dim(S, W, padding, stride=1):
return ceil_div(W - S + 1 + 2*padding, stride)
def fprop_slice(q, X, S, padding, stride):
qs = q * stride - padding
x1 = None
for s in range(S):
x = qs + s
if x1 is None and x >= 0:
x1 = x
f1 = s
if x < X:
x2 = x
f2 = s
return (slice(f1, f2 + 1), slice(x1, x2 + 1), f2 - f1 + 1)
def bprop_slice(x, Q, S, padding, stride):
#pad_eff = S - padding - 1
xs = x - padding
f, e = list(), list()
for s in range(S):
q = xs + s
if q % stride == 0:
q //= stride
if q >= 0 and q < Q:
f.append(s)
e.append(q)
if len(f) == 0:
return (slice(0, 0, 1), slice(0, 0, 1), None)
if len(f) == 1:
fstride = estride = 1
else:
fstride = f[1] - f[0]
estride = e[1] - e[0]
return (slice(f[0], f[-1]+1, fstride), slice(e[0], e[-1]+1, estride), None)
def conv_spatial_xprop(buffers, params, fprop=True):
C, H, W, N, P, Q, R, S, pad_r, pad_s, std = params
F, I = buffers
O = np.empty((C, P, Q, N), dtype=np.float32)
if fprop:
xprop_slice = fprop_slice
else:
xprop_slice = bprop_slice
F = F[:,::-1,::-1].copy() # invert RS
pSlice = [ xprop_slice(p, H, R, pad_r, std) for p in range(P) ]
qSlice = [ xprop_slice(q, W, S, pad_s, std) for q in range(Q) ]
for c, p, q in np.ndindex(C, P, Q):
sliceR, sliceH, _ = pSlice[p]
sliceS, sliceW, _ = qSlice[q]
slicedF = F[c,sliceR,sliceS].reshape(-1)
slicedI = I[c,sliceH,sliceW].reshape(-1, N)
O[c,p,q,:] = np.dot(slicedF, slicedI)
return O
def conv_spatial_updat(buffers, params):
C, H, W, N, P, Q, R, S, pad_r, pad_s, std = params
E, I = buffers
U = np.zeros((C, R, S), dtype=np.float32)
pSlice = [ fprop_slice(p, H, R, pad_r, std) for p in range(P) ]
qSlice = [ fprop_slice(q, W, S, pad_s, std) for q in range(Q) ]
for c, p, q in np.ndindex(C, P, Q):
sliceR, sliceH, rlen = pSlice[p]
sliceS, sliceW, slen = qSlice[q]
slicedI = I[c,sliceH,sliceW,:].reshape(-1, N)
slicedE = E[c,p,q,:]
U[c,sliceR,sliceS] += np.dot(slicedI, slicedE).reshape(rlen, slen)
return U
xprop_config = {
3 : (8, 2, 1, 16),
4 : (8, 2, 1, 15),
5 : (8, 2, 1, 12),
6 : (8, 2, 1, 12),
7 : (8, 2, 1, 8),
8 : (8, 2, 1, 8),
9 : (8, 2, 1, 6),
10 : (4, 1, 2, 5),
11 : (4, 1, 2, 2),
12 : (4, 1, 3, 5),
13 : (4, 1, 3, 3),
14 : (8, 2, 1, 5),
15 : (8, 2, 1, 4),
16 : (8, 2, 1, 4),
17 : (8, 2, 1, 2),
18 : (4, 1, 4, 3),
19 : (4, 1, 4, 2),
20 : (4, 1, 4, 2),
21 : (4, 1, 4, 2),
22 : (8, 2, 3, 2),
23 : (8, 2, 2, 2),
24 : (8, 2, 2, 2),
25 : (8, 2, 2, 2),
26 : (4, 2, 2, 2),
27 : (4, 2, 2, 1),
28 : (4, 2, 2, 1),
}
def xprop_kernel(buffers, params, fprop=True, config=None):
devF, devI, devO = buffers
C, H, W, N, P, Q, R, S, pad_r, pad_s, std = params
k_dim, warp_y, warp_x, blocks = xprop_config[R] if config is None else config
cpuO = np.empty((C, P, Q, N), dtype=np.float16)
blk_p = ceil_div(P, 8 // (std if fprop else 1))
blk_q = ceil_div(Q, 8 // (std if fprop else 1))
blk_n = ceil_div(N, 16)
N16 = 1 if N % 16 == 0 else 0
parms = (devF, devI, devO, C, H, W, N, P, Q, W*N, Q*N, H*W*N, P*Q*N, pad_r, pad_s, blk_q)
block = (warp_y*warp_x*32, 1, 1)
grid = (blk_p*blk_q, blk_n, C)
name = "conv_spatial_chwn_p8q8n16_k%d_xprop" % k_dim
sig = "PPPIIIIIIIIIIIII"
kwargs = dict(R=R, S=S, STRIDE=std, N16=N16, WARP_Y=warp_y, WARP_X=warp_x, BLOCKS=blocks, FPROP=int(fprop))
if k_dim == 8:
code = "".join("#define %s %d\n" % i for i in kwargs.items()) + r"""
#include "../ew_op_gpu.h"
#include "../gpu_hmma.h"
#define MAX(a,b) ( ( (a) > (b) ) ? (a) : (b) )
extern "C"
__global__ void __launch_bounds__(WARP_X*WARP_Y*32, BLOCKS) conv_spatial_chwn_p8q8n16_k8_xprop(
const float* __restrict__ F,
const ehalf* __restrict__ I,
ehalf* O,
int C, int H, int W, int N, int P, int Q, int WN, int QN, int HWN, int PQN, int pad_r, int pad_s, int blk_q)
{
const int TILE_P = 8;
const int TILE_Q = 8;
const int TILE_N = 16;
const int THREADS = WARP_X * WARP_Y * 32;
const int TILE_H = TILE_P + R - 1;
const int TILE_W1 = TILE_Q + S - 1;
const int TILE_W = CEIL_DIV(TILE_W1, 8)*8;
const int TILE_PQ = TILE_P * TILE_Q;
const int TILE_HW = TILE_H * TILE_W;
const int TILE_RW = R * TILE_W;
const int TILE_X = CEIL_DIV(TILE_RW, WARP_X*8)*8;
const int TILE_Y = TILE_P / WARP_Y;
const int TILE_RW4 = TILE_X * WARP_X;
const int STRD_RW = TILE_RW4 & (16|8|4) ? TILE_RW4 : TILE_RW4 + 4;
const int STRD_HW = (TILE_HW + TILE_RW4 - TILE_RW) | 4;
const int SIZE_F = STRD_RW*4 + 8; // pad 4 on either end to account for the shifted filer copies
const int SIZE_I = STRD_HW * TILE_N;
const int SIZE_O = WARP_X == 1 ? 0 : WARP_X*TILE_PQ*TILE_N;
const int F_LOOPS = CEIL_DIV(STRD_RW, THREADS);
const int I_LOOPS = CEIL_DIV(STRD_HW, THREADS);
__shared__ ehalf hShare[MAX(SIZE_F + SIZE_I, SIZE_O*2)];
float* fShare = (float*)&hShare[0];
int tid = threadIdx.x;
for (int i = 0; i < CEIL_DIV(SIZE_F, THREADS*4); i++)
*(ehalf4*)&hShare[(i*THREADS + tid)*4] = ehalf4(0);
int idx_c = blockIdx.z;
int offsetF = idx_c*R*S;
float filter[F_LOOPS];
for (int f = 0; f < F_LOOPS; f++)
{
int idx = f*THREADS + tid;
int r = (uint)idx / TILE_W;
int s = (uint)idx % TILE_W;
bool f_in = s < S && r < R;
if (!FPROP)
{
r = R - r - 1;
s = S - s - 1;
}
const float* Fp = F + (offsetF + r*S + s);
asm("mov.b64 %0, %0;" : "+l"(Fp) : );
filter[f] = 0.0f;
if (f_in)
filter[f] = __ldg(Fp);
}
int idx_pq = blockIdx.x;
int idx_n = blockIdx.y;
int idx_p = (uint)idx_pq / blk_q;
int idx_q = (uint)idx_pq % blk_q;
if (idx_p & 1)
idx_q = blk_q - idx_q - 1;
int p0 = idx_p*TILE_P;
int q0 = idx_q*TILE_Q;
int h0 = p0 - pad_r;
int w0 = q0 - pad_s;
int n0 = idx_n*TILE_N;
int offsetI = idx_c*HWN + n0;
asm("mov.b32 %0, %0;" : "+r"(p0) : );
asm("mov.b32 %0, %0;" : "+r"(q0) : );
ehalf4 image[I_LOOPS][4];
for (int i = 0; i < I_LOOPS; i++)
{
int idx = i*THREADS + tid;
int y = (uint)idx / TILE_W;
int x = (uint)idx % TILE_W;
int h = h0 + y;
int w = w0 + x;
for (int j = 0; j < 4; j++)
ew_zero(image[i][j]);
if (STRIDE == 1 || FPROP)
{
const ehalf4* pI = (const ehalf4*)(I + (offsetI + h*WN + w*N));
asm("mov.b64 %0, %0;" : "+l"(pI) : );
if ((TILE_W1 == TILE_W || x < TILE_W1) &&
(i+1 < I_LOOPS || y < TILE_H) &&
h >= 0 && h < H && w >= 0 && w < W)
{
image[i][0] = __ldg(pI);
for (int j = 1; j < 4; j++)
if (N16 || n0 + j*4 < N) image[i][j] = __ldg(pI + j);
}
}
else
{
const ehalf4* pI = (const ehalf4*)(I + (offsetI + h*WN/STRIDE + w*N/STRIDE));
asm("mov.b64 %0, %0;" : "+l"(pI) : );
if ((TILE_W1 == TILE_W || x < TILE_W1) &&
(i+1 < I_LOOPS || y < TILE_H) &&
h % STRIDE == 0 && w % STRIDE == 0 &&
h >= 0 && h/STRIDE < H && w >= 0 && w/STRIDE < W)
{
image[i][0] = __ldg(pI);
for (int j = 1; j < 4; j++)
if (N16 || n0 + j*4 < N) image[i][j] = __ldg(pI + j);
}
}
}
__syncthreads();
for (int f = 0; f < F_LOOPS; f++)
{
int idx = f*THREADS + tid;
ehalf h_filter = to_ehalf(filter[f]);
if (f+1 < F_LOOPS || idx < TILE_RW)
for (int i = 0; i < 4; i++)
hShare[STRD_RW*i + idx + i + 4] = h_filter;
}
for (int i = 0; i < I_LOOPS; i++)
{
int idx = i*THREADS + tid;
if (i+1 < I_LOOPS || idx < STRD_HW)
for (int j = 0; j < 4; j++)
*(ehalf4*)&hShare[idx*4 + STRD_HW*j*4 + SIZE_F] = image[i][j];
}
__syncthreads();
int tid16_4 = (tid & 16)/4;
int warp = tid / 32;
int warp_x = warp / WARP_Y;
int warp_y = warp % WARP_Y;
int f_shr = warp_x*TILE_X + (tid & 3)*STRD_RW + (tid & 4) + 4 - tid16_4;
int i_shr = (warp_x*TILE_X + warp_y*TILE_Y*TILE_W + (tid & 7))*4 + ((tid & 8) + tid16_4)*STRD_HW;
asm("mov.b32 %0, %0;" : "+r"(f_shr) : );
asm("mov.b32 %0, %0;" : "+r"(i_shr) : );
float acc[TILE_Y][8] = {0};
for (int x = 0; x < TILE_X; x += 8)
{
for (int y = 0; y < TILE_Y; y++)
{
ehalf4 f4 = *(ehalf4*)&hShare[f_shr + x];
ehalf4 i4 = *(ehalf4*)&hShare[i_shr + (y*TILE_W + x)*4 + SIZE_F];
mma_m8n8k4_nn(acc[y], f4, i4);
}
}
tid = threadIdx.x;
idx_n = blockIdx.y;
idx_c = blockIdx.z;
warp = tid / 32;
warp_x = warp / WARP_Y;
warp_y = warp % WARP_Y;
bool t4 = (tid & 4) != 0;
float sum[TILE_Y][4];
for (int y = 0; y < TILE_Y; y++)
{
for (int i = 0; i < 4; i++)
{
float swap = t4 ? acc[y][i + 0] : acc[y][i + 4];
sum[y][i] = t4 ? acc[y][i + 4] : acc[y][i + 0];
sum[y][i] += shfl_xor(swap, 4);
}
}
if (WARP_X == 1)
{
int p = p0 + warp_y * TILE_Y;
int q = q0 + (tid & 1) + (tid & 16)/4;
int n = idx_n*TILE_N + (tid & (2|4|8));
bool bn = N16 || n < N;
int offsetO = idx_c*PQN + n;
if (STRIDE > 1 && TILE_Y >= STRIDE && FPROP)
{
for (int y = 0; y < TILE_Y/STRIDE; y++)
for (int x = 0; x < 4/STRIDE; x++)
if (q%STRIDE == 0 && p/STRIDE + y < P && q/STRIDE + x < Q && bn)
store_half2(O + (offsetO + (p/STRIDE + y)*QN + (q/STRIDE + x)*N), to_half2(&sum[y*STRIDE][x*2]));
}
else
{
for (int y = 0; y < TILE_Y; y++)
for (int x = 0; x < 2; x++)
if (p + y < P && q + x*2 < Q && bn)
store_half2(O + (offsetO + (p + y)*QN + (q + x*2)*N), to_half2(&sum[y][x*2]));
}
}
else
{
int ox = (tid & (2|4|8));
int oy = warp_x*TILE_PQ + warp_y*TILE_Q*TILE_Y + (tid & 1) + (tid & 16)/4;
int o_shr = oy*TILE_N + ox;
__syncthreads();
for (int y = 0; y < TILE_Y; y++)
for (int j = 0; j < 2; j++)
*(float2*)&fShare[o_shr + (y*TILE_Q + j*2)*TILE_N] = *(float2*)&sum[y][j*2];
__syncthreads();
int tx = tid % 4;
int ty = tid / 4;
int tn = tx * 4;
int n = idx_n*TILE_N + tn;
if (N16 || n < N)
{
int offsetO = idx_c*PQN + n;
const int O_LINES = THREADS/4;
const int O_LOOPS = CEIL_DIV(TILE_PQ, O_LINES);
for (int i = 0; i < O_LOOPS; i++)
{
int idx = i*O_LINES + ty;
int pi = (uint)idx / TILE_Q;
int qi = (uint)idx % TILE_Q;
int p = p0 + pi;
int q = q0 + qi;
if ((i+1 < O_LOOPS || idx < TILE_PQ) && p < P && q < Q)
{
float4 out[WARP_X];
for (int x = 0; x < WARP_X; x++)
out[x] = *(float4*)&fShare[(x*TILE_PQ + pi*TILE_Q + qi)*TILE_N + tn];
for (int x = 1; x < WARP_X; x++)
out[0] = ew_add(out[0], out[x]);
store_half4(O + (offsetO + p*QN + q*N), to_half4(out[0]));
}
}
}
}
}
"""
if k_dim == 4:
code = "".join("#define %s %d\n" % i for i in kwargs.items()) + r"""
#include "../ew_op_gpu.h"
#include "../gpu_hmma.h"
#define MAX(a,b) ( ( (a) > (b) ) ? (a) : (b) )
extern "C"
__global__ void __launch_bounds__(WARP_X*WARP_Y*32, BLOCKS) conv_spatial_chwn_p8q8n16_k4_xprop(
const float* __restrict__ F,
const ehalf* __restrict__ I,
ehalf* O,
int C, int H, int W, int N, int P, int Q, int WN, int QN, int HWN, int PQN, int pad_r, int pad_s, int blk_q)
{
const int TILE_P = 8;
const int TILE_Q = 8;
const int TILE_N = 16;
const int THREADS = WARP_X * WARP_Y * 32;
const int TILE_H = TILE_P + R - 1;
const int TILE_W1 = TILE_Q + S - 1;
const int TILE_W = CEIL_DIV(TILE_W1, 4)*4;
const int TILE_PQ = TILE_P * TILE_Q;
const int TILE_HW = TILE_H * TILE_W;
const int TILE_RW = R * TILE_W;
const int TILE_X = CEIL_DIV(TILE_RW, WARP_X*4)*4;
const int TILE_Y = 4 / WARP_Y;
const int TILE_RW4 = TILE_X * WARP_X;
const int STRD_RW = TILE_RW4 | 4;
const int STRD_HW = (TILE_HW + TILE_RW4 - TILE_RW) | 4;
const int SIZE_F = STRD_RW*4 + 8; // pad 4 on either end to account for the shifted filer copies
const int SIZE_I = STRD_HW * TILE_N;
const int SIZE_O = WARP_X*TILE_PQ*TILE_N;
const int F_LOOPS = CEIL_DIV(STRD_RW, THREADS);
const int I_LOOPS = CEIL_DIV(STRD_HW, THREADS);
__shared__ ehalf hShare[MAX(SIZE_F + SIZE_I, SIZE_O*2)];
float* fShare = (float*)&hShare[0];
int tid = threadIdx.x;
for (int i = 0; i < CEIL_DIV(SIZE_F, THREADS*4); i++)
*(ehalf4*)&hShare[(i*THREADS + tid)*4] = ehalf4(0);
int idx_c = blockIdx.z;
int offsetF = idx_c*R*S;
float filter[F_LOOPS];
for (int f = 0; f < F_LOOPS; f++)
{
int idx = f*THREADS + tid;
int r = (uint)idx / TILE_W;
int s = (uint)idx % TILE_W;
bool f_in = s < S && r < R;
if (!FPROP)
{
r = R - r - 1;
s = S - s - 1;
}
const float* Fp = F + (offsetF + r*S + s);
asm("mov.b64 %0, %0;" : "+l"(Fp) : );
filter[f] = 0.0f;
if (f_in)
filter[f] = __ldg(Fp);
}
int idx_pq = blockIdx.x;
int idx_n = blockIdx.y;
int idx_p = (uint)idx_pq / blk_q;
int idx_q = (uint)idx_pq % blk_q;
if (idx_p & 1)
idx_q = blk_q - idx_q - 1;
int p0 = idx_p*TILE_P;
int q0 = idx_q*TILE_Q;
int h0 = p0 - pad_r;
int w0 = q0 - pad_s;
int n0 = idx_n*TILE_N;
int offsetI = idx_c*HWN + n0;
asm("mov.b32 %0, %0;" : "+r"(p0) : );
asm("mov.b32 %0, %0;" : "+r"(q0) : );
ehalf4 image[I_LOOPS][4];
for (int i = 0; i < I_LOOPS; i++)
{
int idx = i*THREADS + tid;
int y = (uint)idx / TILE_W;
int x = (uint)idx % TILE_W;
int h = h0 + y;
int w = w0 + x;
const ehalf4* pI = (const ehalf4*)(I + (offsetI + h*WN + w*N));
asm("mov.b64 %0, %0;" : "+l"(pI) : );
for (int j = 0; j < 4; j++)
ew_zero(image[i][j]);
if ((TILE_W1 == TILE_W || x < TILE_W1) &&
(i+1 < I_LOOPS || y < TILE_H) &&
h >= 0 && h < H && w >= 0 && w < W)
{
image[i][0] = __ldg(pI);
for (int j = 1; j < 4; j++)
if (N16 || n0 + j*4 < N) image[i][j] = __ldg(pI + j);
}
}
__syncthreads();
for (int f = 0; f < F_LOOPS; f++)
{
int idx = f*THREADS + tid;
ehalf h_filter = to_ehalf(filter[f]);
if (f+1 < F_LOOPS || idx < TILE_RW)
for (int i = 0; i < 4; i++)
hShare[STRD_RW*i + idx + i + 4] = h_filter;
}
for (int i = 0; i < I_LOOPS; i++)
{
int idx = i*THREADS + tid;
if (i+1 < I_LOOPS || idx < STRD_HW)
for (int j = 0; j < 4; j++)
*(ehalf4*)&hShare[idx*4 + STRD_HW*j*4 + SIZE_F] = image[i][j];
}
__syncthreads();
int tid3 = tid & 3;
int tid16_4 = (tid & 16)/4;
int warp = tid / 32;
int warp_x = warp / WARP_Y;
int warp_y = warp % WARP_Y;
int f_shr = warp_x*TILE_X + tid3*STRD_RW + 4 - tid16_4;
int i_shr = (warp_x*TILE_X + warp_y*TILE_Y*TILE_W + tid16_4*TILE_W + tid3)*4 + (tid & (4|8))*STRD_HW;
asm("mov.b32 %0, %0;" : "+r"(f_shr) : );
asm("mov.b32 %0, %0;" : "+r"(i_shr) : );
float acc[TILE_Y][8] = {0};
for (int x = 0; x < TILE_X; x += 4)
{
for (int y = 0; y < TILE_Y; y++)
{
ehalf4 f4 = *(ehalf4*)&hShare[f_shr + x];
ehalf4 i4 = *(ehalf4*)&hShare[i_shr + (y*TILE_W + x)*4 + SIZE_F];
mma_m8n8k4_nn(acc[y], f4, i4);
}
}
tid = threadIdx.x;
tid16_4 = (tid & 16)/4;
warp = tid / 32;
warp_x = warp / WARP_Y;
warp_y = warp % WARP_Y;
int ox = (tid & (2|4|8));
int oy = warp_x*TILE_PQ + warp_y*TILE_Q*TILE_Y + (tid & 1) + tid16_4;
int o_shr = oy*TILE_N + ox;
__syncthreads();
for (int y = 0; y < TILE_Y; y++)
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
*(float2*)&fShare[o_shr + ((y + i*4)*TILE_Q + j*2)*TILE_N] = *(float2*)&acc[y][i*4 + j*2];
__syncthreads();
idx_n = blockIdx.y;
idx_c = blockIdx.z;
int tx = tid % 4;
int ty = tid / 4;
int tn = tx * 4;
int n = idx_n*TILE_N + tn;
if (N16 || n < N)
{
int offsetO = idx_c*PQN + n;
const int O_LINES = THREADS/4;
const int O_LOOPS = CEIL_DIV(TILE_PQ, O_LINES);
for (int i = 0; i < O_LOOPS; i++)
{
int idx = i*O_LINES + ty;
int pi = (uint)idx / TILE_Q;
int qi = (uint)idx % TILE_Q;
int p = p0 + pi;
int q = q0 + qi;
if ((i+1 < O_LOOPS || idx < TILE_PQ) && p < P && q < Q)
{
float4 out[WARP_X];
for (int x = 0; x < WARP_X; x++)
out[x] = *(float4*)&fShare[(x*TILE_PQ + pi*TILE_Q + qi)*TILE_N + tn];
for (int x = 1; x < WARP_X; x++)
out[0] = ew_add(out[0], out[x]);
store_half4(O + (offsetO + p*QN + q*N), to_half4(out[0]));
}
}
}
}
"""
module = SourceModule(code, options=["--use_fast_math"], include_dirs=[file_path], no_extern_c=True, arch="compute_70", code="sm_70")
kernel = module.get_function(name)
kernel.prepare(sig)
def func():
kernel.prepared_call(grid, block, *parms)
return func, devO, cpuO
updat_config = {
3 : (4, 1, 64, 16),
4 : (4, 2, 64, 16),
5 : (4, 2, 64, 14),
6 : (4, 4, 64, 8),
7 : (4, 4, 64, 4),
8 : (4, 4, 64, 4),
9 : (8, 2, 96, 8),
10 : (8, 2, 96, 8),
11 : (8, 4, 64, 6),
12 : (8, 4, 64, 4),
13 : (8, 2, 160, 6),
14 : (8, 2, 160, 6),
15 : (8, 4, 96, 4),
16 : (8, 4, 96, 3),
17 : (8, 4, 128, 1),
18 : (8, 4, 128, 2),
19 : (8, 4, 128, 3),
20 : (8, 4, 160, 1),
21 : (8, 4, 160, 2),
22 : (8, 4, 160, 3),
23 : (8, 4, 192, 1),
24 : (8, 4, 192, 2),
25 : (8, 4, 224, 1),
26 : (8, 4, 224, 2),
27 : (8, 4, 256, 1),
28 : (8, 4, 256, 1),
29 : (8, 4, 288, 1),
}
def updat_kernel(buffers, params, config=None):
devE, devI, devU = buffers
C, H, W, N, P, Q, R, S, pad_r, pad_s, std = params
tile_y, tile_x, threads, blocks = updat_config[R] if config is None else config
cpuU = np.empty((C, R, S), dtype=np.float32)
blk_p = ceil_div(P, 8 // std)
blk_q = ceil_div(Q, 8 // std)
blk_n = ceil_div(N, 16)
N16 = 1 if N % 16 == 0 else 0
parms = (devE, devI, devU, C, H, W, N, P, Q, W*N, Q*N, H*W*N, P*Q*N, pad_r, pad_s, blk_q)
block = (threads, 1, 1)
grid = (blk_p*blk_q, blk_n, C)
name = "conv_spatial_chwn_p8q8n16_updat"
sig = "PPPIIIIIIIIIIIII"
conf = dict(R=R, S=S, STRIDE=std, N16=N16, TILE_Y=tile_y, TILE_X=tile_x, THREADS=threads, BLOCKS=blocks)
code = "".join("#define %s %d\n" % i for i in conf.items()) + r"""
#include "../ew_op_gpu.h"
#include "../gpu_hmma.h"
#define MAX(a,b) ( ( (a) > (b) ) ? (a) : (b) )
extern "C"
__global__ void __launch_bounds__(THREADS,BLOCKS) conv_spatial_chwn_p8q8n16_updat(
const ehalf* __restrict__ E,
const ehalf* __restrict__ I,
float* U,
int C, int H, int W, int N, int P, int Q, int WN, int QN, int HWN, int PQN, int pad_r, int pad_s, int blk_q)
{
const int TILE_P = 8;
const int TILE_Q = 8;
const int TILE_N = 16;
const int TILE_H = TILE_P + R - 1;
const int TILE_W = TILE_Q + S - 1;
const int TILE_HW = TILE_H * TILE_W;
const int TILE_RW = TILE_W * R;
const int TILE_PQ = TILE_P * TILE_Q;
const int WARP_X = CEIL_DIV(TILE_RW, TILE_X*32);
const int WARP_Y = TILE_P / TILE_Y;
const int STRD_U = (CEIL_DIV(TILE_RW, 16)|1)*16;
const int SIZE_I = TILE_HW * TILE_N; // ehalfs
const int SIZE_E = TILE_PQ * TILE_N; // ehalfs
const int SIZE_U = TILE_Q * WARP_Y * STRD_U; // floats
const int I_EXT = TILE_X*32 - TILE_RW % (TILE_X*32);
const int I_PAD = I_EXT > TILE_PQ ? (I_EXT - TILE_PQ)*TILE_N : 0;
const int E_LINES = THREADS/2;
const int E_LOOPS = CEIL_DIV(TILE_PQ, E_LINES);
const int I_LOOPS = CEIL_DIV(TILE_HW, THREADS);
__shared__ ehalf hShare[MAX(SIZE_I + SIZE_E + I_PAD, SIZE_U*2)];
float* fShare = (float*)&hShare[0];
int tid = threadIdx.x;
int idx_pq = blockIdx.x;
int idx_n = blockIdx.y;
int idx_c = blockIdx.z;
int idx_p = (uint)idx_pq / blk_q;
int idx_q = (uint)idx_pq % blk_q;
if (idx_p & 1)
idx_q = blk_q - idx_q - 1;
int p0 = idx_p*TILE_P;
int q0 = idx_q*TILE_Q;
int h0 = p0 - pad_r;
int w0 = q0 - pad_s;
int n = idx_n*TILE_N;
int offsetI = idx_c*HWN + n;
ehalf8 image[I_LOOPS][2];
for (int i = 0; i < I_LOOPS; i++)
{
int idx = i*THREADS + tid;
int y = (uint)idx / TILE_W;
int x = (uint)idx % TILE_W;
int h = h0 + y;
int w = w0 + x;
const ehalf8* pI = (const ehalf8*)(I + (offsetI + h*WN + w*N));
asm("mov.b64 %0, %0;" : "+l"(pI) : );
ew_zero(image[i][0]);
ew_zero(image[i][1]);
if ((i+1 < I_LOOPS || idx < TILE_HW) &&
h >= 0 && h < H &&
w >= 0 && w < W)
{
image[i][0] = __ldg(pI);
if (N16 || n + 8 < N)
image[i][1] = __ldg(pI + 1);
}
}
int tx = tid % 2;
int ty = tid / 2;
int tn = tx * 8;
n += tn;
int offsetE = idx_c*PQN + n;
ehalf8 error[E_LOOPS];
for (int i = 0; i < E_LOOPS; i++)
{
int idx = i*E_LINES + ty;
int p = p0 + idx / TILE_Q;
int q = q0 + idx % TILE_Q;
ew_zero(error[i]);
if (STRIDE == 1)
{
const ehalf8* pE = (const ehalf8*)(E + (offsetE + p*QN + q*N));
asm("mov.b64 %0, %0;" : "+l"(pE) : );
if ((i+1 < E_LOOPS || idx < TILE_PQ) &&
p < P && q < Q &&
(N16 || n < N))
error[i] = __ldg(pE);
}
else
{
const ehalf8* pE = (const ehalf8*)(E + (offsetE + p*QN/STRIDE + q*N/STRIDE));
asm("mov.b64 %0, %0;" : "+l"(pE) : );
if ((i+1 < E_LOOPS || idx < TILE_PQ) &&
p % STRIDE == 0 && q % STRIDE == 0 &&
p/STRIDE < P && q/STRIDE < Q &&
(N16 || n < N))
error[i] = __ldg(pE);
}
}
for (int i = 0; i < I_LOOPS; i++)
{
int idx = i*THREADS + tid;
if (i+1 < I_LOOPS || idx < TILE_HW)
{
*(ehalf8*)&hShare[idx*8 + TILE_HW*0] = image[i][0];
*(ehalf8*)&hShare[idx*8 + TILE_HW*8] = image[i][1];
}
}
for (int i = 0; i < E_LOOPS; i++)
{
int idx = i*E_LINES + ty;
if (i+1 < E_LOOPS || idx < TILE_PQ)
*(ehalf8*)&hShare[idx*16 + tn + SIZE_I] = error[i];
}
__syncthreads();
int warp = tid / 32;
int warp_t = tid % 32;
int warp_y = warp / WARP_X;
int warp_x = warp % WARP_X;
int e_shr = (warp_y*TILE_Y*TILE_Q + (tid & 3) + (tid & 16)/4)*16;
int i_shr = (warp_y*TILE_Y*TILE_W + warp_x*TILE_X*32 + warp_t)*8;
asm("mov.b32 %0, %0;" : "+r"(e_shr) : );
asm("mov.b32 %0, %0;" : "+r"(i_shr) : );
float acc[TILE_X][8] = {0};
for (int n = 0; n < 2; n++)
{
for (int y = 0; y < TILE_Y; y++)
{
for (int x = 0; x < TILE_X; x++)
{
ehalf8 e8 = *(ehalf8*)&hShare[e_shr + (y*TILE_Q*16 + n*8 + SIZE_I)];
ehalf8 i8 = *(ehalf8*)&hShare[i_shr + (y*TILE_W + x*32 + n*TILE_HW)*8];
mma_m8n8k8_nt(acc[x], e8, i8);
}
}
}
int ux = warp_x*TILE_X*32 + (tid & (2|4|8));
int uy = warp_y*TILE_Q + (tid & 1) + (tid & 16)/4;
int u_shr = uy*STRD_U + ux;
__syncthreads();
for (int x = 0; x < TILE_X; x++)
for (int i = 0; i < 2; i++)
if (ux + x*32 + i*16 < TILE_RW)
for (int j = 0; j < 2; j++)
*(float2*)&fShare[u_shr + x*32 + i*16 + j*2*STRD_U] = *(float2*)&acc[x][i*4 + j*2];
__syncthreads();
int offsetF = idx_c*R*S;
const int F_LOOPS = CEIL_DIV(TILE_RW, THREADS);
for (uint f = 0; f < F_LOOPS; f++)
{
int idx = f*THREADS + tid;
int r = (uint)idx / TILE_W;
int s = (uint)idx % TILE_W;
if (r < R && s < S)
{
float update = 0.0f;
for (int y = 0; y < WARP_Y; y++)
for (int q = 0; q < TILE_Q; q++)
update += fShare[(y*TILE_Q + q)*STRD_U + q + idx];
atomicRed(U, update, offsetF + r*S + s);
}
}
}
"""
module = SourceModule(code, options=["--use_fast_math"], include_dirs=[file_path], no_extern_c=True, arch="compute_70", code="sm_70")
kernel = module.get_function(name)
kernel.prepare(sig)
def func():
drv.memset_d8(devU, 0, cpuU.nbytes)
kernel.prepared_call(grid, block, *parms)
return func, devU, cpuU
class Conv(object):
def __init__(self, C, R, S, H, W, N, pad="SAME", std=1, repeat=1, ones=0):
if pad.upper() == "SAME":
pad_r = max((R-1) // 2, 1)
pad_s = max((S-1) // 2, 1)
P = ceil_div(H, std)
Q = ceil_div(W, std)
# VALID
else:
pad_r, pad_s = 0, 0
P = out_dim(R, H, pad_r, std)
Q = out_dim(S, W, pad_s, std)
self.dimF = (C, R, S)
self.dimI = (C, H, W, N)
self.dimO = (C, P, Q, N)
self.std = std
self.repeat = repeat
self.ones = ones
self.nflops = C * P * Q * R * S * N * 2.0
self.fprop_params = (C, H, W, N, P, Q, R, S, pad_r, pad_s, std)
self.bprop_params = (C, P, Q, N, H, W, R, S, R-pad_r-1, S-pad_s-1, std)
self.param_str = f"C:{C:d} R:{R:2d} S:{S:2d} H:{H:d} W:{W:d} P:{P:d} Q:{Q:d} N:{N:d}"
def init(self):
if self.ones:
F = np.ones(self.dimF, dtype=np.float32)
I = np.ones(self.dimI, dtype=np.float32)
E = np.ones(self.dimO, dtype=np.float32)
#E[:,0:4,:,:] = 0
# print(E[0,0,:,:])
else:
# F = np.random.uniform(-1.0, 1.0, self.dimF).astype(np.float16).astype(np.float32)
# I = np.random.uniform(-1.0, 1.0, self.dimI).astype(np.float16).astype(np.float32)
# E = np.random.uniform(-1.0, 1.0, self.dimO).astype(np.float16).astype(np.float32)
F = np.random.normal(0.0, 1.0, self.dimF).astype(np.float16).astype(np.float32)
I = np.random.normal(0.0, 1.0, self.dimI).astype(np.float16).astype(np.float32)
E = np.random.normal(0.0, 1.0, self.dimO).astype(np.float16).astype(np.float32)
devF = drv.mem_alloc(F.size*4)
devU = drv.mem_alloc(F.size*4)
devI = drv.mem_alloc(I.size*2)
devB = drv.mem_alloc(I.size*2)
devO = drv.mem_alloc(E.size*2)
devE = drv.mem_alloc(E.size*2)
drv.memcpy_htod(devF, F)
drv.memcpy_htod(devI, I.astype(np.float16))
drv.memcpy_htod(devE, E.astype(np.float16))
self.gpu_fprop_params = (devF, devI, devO)
self.gpu_bprop_params = (devF, devE, devB)
self.gpu_updat_params = (devE, devI, devU)
self.cpu_fprop_params = (F, I)
self.cpu_bprop_params = (F, E)
self.cpu_updat_params = (E, I)
self.events = (drv.Event(), drv.Event())
self.nbytes = F.size*4 + I.size*2 + E.size*2
def cleanup(self):
self.gpu_fprop_params = None
self.gpu_bprop_params = None
self.gpu_updat_params = None
self.cpu_fprop_params = None
self.cpu_bprop_params = None
self.cpu_updat_params = None
self.events = None
def execute(self, func, devO, cpuO, op):
# warmup
for r in range(self.repeat - 1):
func()
start, end = self.events
start.record()
for r in range(self.repeat):
func()
end.record()
end.synchronize()
msecs = end.time_since(start) / self.repeat
if self.repeat > 1:
gflops = self.nflops / (msecs * 1e6)
gbps = self.nbytes / (msecs * 2**30 * 0.001)
res = f"{op} GFlops:{gflops:5.0f} GBps:{gbps:4.0f} ms:{msecs:7.3f} ({self.param_str})"
print(res, flush=True)
return (gflops, res)
drv.memcpy_dtoh(cpuO, devO)
if cpuO.dtype != np.float32:
cpuO = cpuO.astype(np.float32)
return cpuO
def cpu_fprop(self):
return conv_spatial_xprop(self.cpu_fprop_params, self.fprop_params, fprop=1)
def cpu_bprop(self):
return conv_spatial_xprop(self.cpu_bprop_params, self.bprop_params, fprop=0)
def cpu_updat(self):
return conv_spatial_updat(self.cpu_updat_params, self.fprop_params)
def gpu_fprop(self):
kernel, devO, cpuO = xprop_kernel(self.gpu_fprop_params, self.fprop_params, fprop=1)
return self.execute(kernel, devO, cpuO, "F")
def gpu_bprop(self):
kernel, devB, cpuB = xprop_kernel(self.gpu_bprop_params, self.bprop_params, fprop=0)
return self.execute(kernel, devB, cpuB, "B")
def gpu_updat(self):
kernel, devU, cpuU = updat_kernel(self.gpu_updat_params, self.fprop_params)
return self.execute(kernel, devU, cpuU, "U")
def gpu_fprop_tune(self):
C, H, W, N, P, Q, R, S, pad_r, pad_s, std = self.fprop_params
results = list()
TILE_P = 8;
TILE_Q = 8;
TILE_N = 16;
TILE_H = TILE_P + R - 1;
TILE_W1 = TILE_Q + S - 1;
TILE_W = ceil_div(TILE_W1, 8)*8;
TILE_PQ = TILE_P * TILE_Q;
TILE_HW = TILE_H * TILE_W;
TILE_RW = R * TILE_W;
#wys = (4,2,1) if R <= 10 else (2,1)
for WARP_Y in (2,):
for WARP_X in range(1,5):
THREADS = WARP_X * WARP_Y * 32;
TILE_X = ceil_div(TILE_RW, WARP_X*8)*8
TILE_Y = TILE_P // WARP_Y
TILE_RW4 = TILE_X * WARP_X
STRD_RW = TILE_RW4 if TILE_RW4 & (16|8|4) else TILE_RW4 + 4
STRD_HW = (TILE_HW + TILE_RW4 - TILE_RW) | 4
SIZE_F = STRD_RW*4 + 8;
SIZE_I = STRD_HW * TILE_N;
SIZE_O = WARP_X*TILE_PQ*TILE_N if WARP_X > 1 else 0
F_LOOPS = ceil_div(STRD_RW, THREADS)
I_LOOPS = ceil_div(STRD_HW, THREADS)
SHARE = max((SIZE_F + SIZE_I)*2, SIZE_O*4)
BLOCKS = min(49152*2 // SHARE, 1024 // THREADS)
if 64 <= THREADS <= 1024 and SHARE <= 49152:
for b in range(1, BLOCKS+1):
op = f"k:8 t:{THREADS:4d} wy:{WARP_Y:d} wx:{WARP_X:d} b:{b:2d} ma:{TILE_X//2:4d} fl:{F_LOOPS:2d} il:{I_LOOPS:2d} sh:{SHARE:5d}"
#print(op)
kernel, devO, cpuO = xprop_kernel(self.gpu_fprop_params, self.fprop_params, fprop=1, config=(8,WARP_Y,WARP_X,b))
results.append(self.execute(kernel, devO, cpuO, op))
TILE_W = ceil_div(TILE_W1, 4)*4;
TILE_PQ = TILE_P * TILE_Q;
TILE_HW = TILE_H * TILE_W;
TILE_RW = R * TILE_W;
#wys = (4,2,1) if R <= 10 else (2,1)
for WARP_Y in (2,1):
for WARP_X in range(1,9):
THREADS = WARP_X * WARP_Y * 32;
TILE_X = ceil_div(TILE_RW, WARP_X*4)*4
TILE_Y = 4 // WARP_Y
TILE_RW4 = TILE_X * WARP_X
STRD_RW = TILE_RW4 | 4
STRD_HW = (TILE_HW + TILE_RW4 - TILE_RW) | 4
SIZE_F = STRD_RW*4 + 8;
SIZE_I = STRD_HW * TILE_N;
SIZE_O = WARP_X*TILE_PQ*TILE_N
F_LOOPS = ceil_div(STRD_RW, THREADS)
I_LOOPS = ceil_div(STRD_HW, THREADS)
SHARE = max((SIZE_F + SIZE_I)*2, SIZE_O*4)
BLOCKS = min(49152*2 // SHARE, 1024 // THREADS)
if 64 <= THREADS <= 1024 and SHARE <= 49152:
for b in range(1, BLOCKS+1):
op = f"k:4 t:{THREADS:4d} wy:{WARP_Y:d} wx:{WARP_X:d} b:{b:2d} ma:{TILE_X:4d} fl:{F_LOOPS:2d} il:{I_LOOPS:2d} sh:{SHARE:5d}"
#print(op)
kernel, devO, cpuO = xprop_kernel(self.gpu_fprop_params, self.fprop_params, fprop=1, config=(4, WARP_Y, WARP_X, b))
results.append(self.execute(kernel, devO, cpuO, op))
print(R, S)
for g, s in sorted(results):
print(s)
print("", flush=True)
def gpu_updat_tune(self):
C, H, W, N, P, Q, R, S, pad_r, pad_s, std = self.fprop_params
TILE_P = 8
TILE_Q = 8
TILE_N = 16
TILE_PQ = 64
TILE_HW = (7 + S)*(7 + S)
TILE_RW = (7 + S)*S
results = list()
for TILE_Y in (4,8):
for TILE_X in (1,2,4):
WARPS_X = ceil_div(TILE_RW, TILE_X*32)
WARPS_Y = TILE_P // TILE_Y
THREADS = WARPS_X * WARPS_Y * 32;
STRIDE_U = (ceil_div(TILE_RW, 16)|1)*16;
I_SIZE = TILE_HW * TILE_N
E_SIZE = TILE_P * TILE_Q * TILE_N
U_SIZE = TILE_Q * WARPS_Y * STRIDE_U
I_EXT = TILE_X*32 - TILE_RW % (TILE_X*32)
I_PAD = (I_EXT - TILE_PQ)*TILE_N if I_EXT > TILE_PQ else 0
SHARE = max((I_SIZE+E_SIZE+I_PAD)*2, U_SIZE*4)
BLOCKS = min(49152*2 // SHARE, 2048 // THREADS)
if 64 <= THREADS <= 1024 and SHARE <= 49152 and I_EXT / TILE_HW < 0.3:
for b in range(1, BLOCKS+1):
op = f"t:{THREADS:4d} ty:{TILE_Y:d} tx:{TILE_X:d} b:{b:2d} sh:{SHARE:5d}"
kernel, devU, cpuU = updat_kernel(self.gpu_updat_params, self.fprop_params, config=(TILE_Y, TILE_X, THREADS, b))
results.append( self.execute(kernel, devU, cpuU, op) )
print(R, S)
for g, s in sorted(results):
print(s)
print("", flush=True)
fbu = (1, 1, 1)
ones = 0
out = 0
repeat = 200
for S in range(3,26):
for conv in (
Conv(C=SMs*2, R=S, S=S, H=64, W=64, N=16, pad="SAME", std=1, repeat=repeat, ones=ones),
Conv(C=SMs*2, R=S, S=S, H=64, W=64, N=16, pad="SAME", std=2, repeat=repeat, ones=ones),
):
conv.init()
if fbu[0]: devO = conv.gpu_fprop()
if fbu[1]: devB = conv.gpu_bprop()
if fbu[2]: devU = conv.gpu_updat()
if repeat == 1:
tests = list()
if fbu[0]: tests.append(("F", devO, conv.cpu_fprop()))
if fbu[1]: tests.append(("B", devB, conv.cpu_bprop()))
if fbu[2]: tests.append(("U", devU, conv.cpu_updat()))
for op, devO, cpuO in tests:
difO = np.abs(cpuO - devO)
l2_err = np.sqrt(np.square(difO).sum()) / np.sqrt(np.square(cpuO).sum())
print(f"{op} max_err: {difO.max():8.3f} max_val: {devO.max():8.3f} l2_err: {l2_err:7.5f} {conv.param_str}")
if out:
fmt = "%2.0f" if ones else "%5.2f"
np.savetxt("out_dif.txt", difO.reshape(-1, cpuO.shape[-1]), fmt=fmt)
np.savetxt("out_cpu.txt", cpuO.reshape(-1, cpuO.shape[-1]), fmt=fmt)
np.savetxt("out_gpu.txt", devO.reshape(-1, cpuO.shape[-1]), fmt=fmt)
exit()
conv.cleanup()
# nsight-cli --section MemoryWorkloadAnalysis_Tables --details-all python spatial_conv.py | grep shared
# print(c[0,:,:,0])
# print()
# print(g[0,:,:,0])
# F GFlops: 2575 GBps: 533 ms: 0.073 (C:160 R: 3 S: 3 H:64 W:64 P:64 Q:64 N:16)
# U GFlops: 2660 GBps: 550 ms: 0.071 (C:160 R: 3 S: 3 H:64 W:64 P:64 Q:64 N:16)
# F GFlops: 4565 GBps: 532 ms: 0.074 (C:160 R: 4 S: 4 H:64 W:64 P:64 Q:64 N:16)
# U GFlops: 4711 GBps: 549 ms: 0.071 (C:160 R: 4 S: 4 H:64 W:64 P:64 Q:64 N:16)
# F GFlops: 7105 GBps: 530 ms: 0.074 (C:160 R: 5 S: 5 H:64 W:64 P:64 Q:64 N:16)
# U GFlops: 7349 GBps: 548 ms: 0.071 (C:160 R: 5 S: 5 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:10113 GBps: 524 ms: 0.075 (C:160 R: 6 S: 6 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:10125 GBps: 524 ms: 0.075 (C:160 R: 6 S: 6 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:13627 GBps: 518 ms: 0.075 (C:160 R: 7 S: 7 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:12927 GBps: 492 ms: 0.079 (C:160 R: 7 S: 7 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:17435 GBps: 508 ms: 0.077 (C:160 R: 8 S: 8 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:16277 GBps: 474 ms: 0.082 (C:160 R: 8 S: 8 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:22004 GBps: 507 ms: 0.077 (C:160 R: 9 S: 9 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:18627 GBps: 429 ms: 0.091 (C:160 R: 9 S: 9 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:20335 GBps: 379 ms: 0.103 (C:160 R:10 S:10 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:19008 GBps: 355 ms: 0.110 (C:160 R:10 S:10 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:22466 GBps: 346 ms: 0.113 (C:160 R:11 S:11 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:20461 GBps: 316 ms: 0.124 (C:160 R:11 S:11 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:25035 GBps: 325 ms: 0.121 (C:160 R:12 S:12 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:23536 GBps: 305 ms: 0.128 (C:160 R:12 S:12 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:27661 GBps: 306 ms: 0.128 (C:160 R:13 S:13 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:21994 GBps: 243 ms: 0.161 (C:160 R:13 S:13 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:28962 GBps: 276 ms: 0.142 (C:160 R:14 S:14 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:24807 GBps: 236 ms: 0.166 (C:160 R:14 S:14 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:30022 GBps: 249 ms: 0.157 (C:160 R:15 S:15 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:26382 GBps: 219 ms: 0.179 (C:160 R:15 S:15 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:30238 GBps: 221 ms: 0.178 (C:160 R:16 S:16 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:29406 GBps: 215 ms: 0.183 (C:160 R:16 S:16 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:35480 GBps: 230 ms: 0.171 (C:160 R:17 S:17 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:30019 GBps: 194 ms: 0.202 (C:160 R:17 S:17 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:31339 GBps: 181 ms: 0.217 (C:160 R:18 S:18 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:30584 GBps: 177 ms: 0.222 (C:160 R:18 S:18 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:32796 GBps: 170 ms: 0.231 (C:160 R:19 S:19 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:32548 GBps: 169 ms: 0.233 (C:160 R:19 S:19 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:35651 GBps: 167 ms: 0.235 (C:160 R:20 S:20 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:29178 GBps: 137 ms: 0.287 (C:160 R:20 S:20 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:37231 GBps: 158 ms: 0.248 (C:160 R:21 S:21 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:31712 GBps: 135 ms: 0.292 (C:160 R:21 S:21 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:32057 GBps: 124 ms: 0.317 (C:160 R:22 S:22 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:33891 GBps: 131 ms: 0.299 (C:160 R:22 S:22 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:35301 GBps: 125 ms: 0.314 (C:160 R:23 S:23 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:32931 GBps: 117 ms: 0.337 (C:160 R:23 S:23 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:35695 GBps: 116 ms: 0.338 (C:160 R:24 S:24 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:32320 GBps: 105 ms: 0.374 (C:160 R:24 S:24 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:39745 GBps: 120 ms: 0.330 (C:160 R:25 S:25 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:33094 GBps: 100 ms: 0.396 (C:160 R:25 S:25 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:33897 GBps: 94 ms: 0.418 (C:160 R:26 S:26 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:33407 GBps: 93 ms: 0.424 (C:160 R:26 S:26 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:34470 GBps: 89 ms: 0.444 (C:160 R:27 S:27 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:32564 GBps: 84 ms: 0.469 (C:160 R:27 S:27 H:64 W:64 P:64 Q:64 N:16)
# F GFlops:35377 GBps: 85 ms: 0.465 (C:160 R:28 S:28 H:64 W:64 P:64 Q:64 N:16)
# U GFlops:34760 GBps: 84 ms: 0.473 (C:160 R:28 S:28 H:64 W:64 P:64 Q:64 N:16)
|
#!/usr/bin/env python
#
# Author: Hans Chris Jones <[email protected]>
# Copyright 2018, LambdaStack
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NB: This file is used as a RGW WebService that can be used with Apache or NGINX or anything that supports uwsgi.
# The following implement this service. Chef-BCS was modeled after Cepheus' implementation which uses NGINX:
# 1. https://github.com/bloomberg/chef-bcs
# 2. https://github.com/cepheus-io/cepheus
import logging
import logging.handlers
import subprocess
import json
import os
import datetime
import flask
from flask import request
# NB: Setup Logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
# handler = logging.handlers.SysLogHandler(address = '/dev/log')
handler = logging.handlers.TimedRotatingFileHandler('/var/log/rgw_webservice/rgw_webservice.log', when='midnight', backupCount=5)
formatter = logging.Formatter('%(module)s.%(funcName)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
app = flask.Flask(__name__)
# NB: Use flask.jsonify in the methods/functions that return json and not globally
class RGWWebServiceAPI(object):
def __init__(self):
# Setup admin user info here
pass
def user_create(self, user, display_name=None, remote_addr=None, region=None, zone=None, access_key=None, secret_key=None, email=None, zone_region_prefix="client.radosgw"):
# Set the display_name equal to the user id if display_name not passed in!
if display_name is None:
display_name = user
if region is not None and zone is not None:
cmd = ["sudo", "/bin/radosgw-admin", "user", "create", "--conf", "/etc/ceph/ceph.conf", "--uid", "%s" % user, "--display-name", "%s" % display_name]
else:
cmd = ["/usr/bin/radosgw-admin", "user", "create", "--conf", "/etc/ceph/ceph.conf", "--uid", "%s" % user, "--display-name", "%s" % display_name]
if region is not None and zone is not None:
cmd.append("-n")
# NB: This should match '[client.radosgw...]' or something similar found in ceph.conf for the RGW section
cmd.append("%s.%s-%s" % (zone_region_prefix, region, zone))
if email is not None:
cmd.append("--email")
cmd.append("%s" % email)
if access_key is not None:
cmd.append("--access-key")
cmd.append("%s" % access_key)
if secret_key is not None:
# Newer versions of radosgw-admin support --secret-key too
cmd.append("--secret")
cmd.append("%s" % secret_key)
return call(cmd, remote_addr)
def user_get(self, user, region=None, zone=None, zone_region_prefix="client.radosgw"):
if region is not None and zone is not None:
cmd = ["sudo", "/bin/radosgw-admin", "user", "info", "--conf", "/etc/ceph/ceph.conf", "--uid", "%s" % user]
else:
cmd = ["/usr/bin/radosgw-admin", "user", "info", "--conf", "/etc/ceph/ceph.conf", "--uid", "%s" % user]
if region is not None and zone is not None:
cmd.append("-n")
# NB: This should match '[client.radosgw...]' or something similar found in ceph.conf for the RGW section
cmd.append("%s.%s-%s" % (zone_region_prefix, region, zone))
return call(cmd)
def user_keys_add(self, user, access_key=None, secret_key=None, region=None, zone=None, zone_region_prefix="client.radosgw"):
if region is not None and zone is not None:
cmd = ["sudo", "/usr/bin/radosgw-admin", "key", "create", "--conf", "/etc/ceph/ceph.conf", "--uid", "%s" % user, "--key-type", "s3"]
else:
cmd = ["/usr/bin/radosgw-admin", "key", "create", "--conf", "/etc/ceph/ceph.conf", "--uid", "%s" % user, "--key-type", "s3"]
if access_key is not None:
cmd.append("--access_key")
cmd.append("%s" % access_key)
else:
cmd.append("--gen-access-key")
if secret_key is not None:
cmd.append("--secret")
cmd.append("%s" % secret)
else:
cmd.append("--gen-secret")
if region is not None and zone is not None:
cmd.append("-n")
# NB: This should match '[client.radosgw...]' or something similar found in ceph.conf for the RGW section
cmd.append("%s.%s-%s" % (zone_region_prefix, region, zone))
return call(cmd)
def user_quota_enable(self, user, region=None, zone=None, zone_region_prefix="client.radosgw"):
if region is not None and zone is not None:
cmd = ["sudo", "/bin/radosgw-admin", "quota", "enable", "--conf", "/etc/ceph/ceph.conf", "--uid", "%s" % user, "--quota-scope", "user"]
else:
cmd = ["/usr/bin/radosgw-admin", "quota", "enable", "--conf", "/etc/ceph/ceph.conf", "--uid", "%s" % user, "--quota-scope", "user"]
if region is not None and zone is not None:
cmd.append("-n")
# NB: This should match '[client.radosgw...]' or something similar found in ceph.conf for the RGW section
cmd.append("%s.%s-%s" % (zone_region_prefix, region, zone))
return call(cmd)
def user_quota_disable(self, user, region=None, zone=None, zone_region_prefix="client.radosgw"):
if region is not None and zone is not None:
cmd = ["sudo", "/bin/radosgw-admin", "quota", "disable", "--conf", "/etc/ceph/ceph.conf", "--uid", "%s" % user, "--quota-scope", "user"]
else:
cmd = ["/usr/bin/radosgw-admin", "quota", "disable", "--conf", "/etc/ceph/ceph.conf", "--uid", "%s" % user, "--quota-scope", "user"]
if region is not None and zone is not None:
cmd.append("-n")
# NB: This should match '[client.radosgw...]' or something similar found in ceph.conf for the RGW section
cmd.append("%s.%s-%s" % (zone_region_prefix, region, zone))
return call(cmd)
def user_quota_set(self, user, num, scope="user", qtype="size", region=None, zone=None, zone_region_prefix="client.radosgw"):
if region is not None and zone is not None:
cmd = ["sudo", "/bin/radosgw-admin", "quota", "set", "--conf", "/etc/ceph/ceph.conf", "--uid", "%s" % user, "--quota-scope", "%s" % scope]
else:
cmd = ["/usr/bin/radosgw-admin", "quota", "set", "--conf", "/etc/ceph/ceph.conf", "--uid", "%s" % user, "--quota-scope", "%s" % scope]
if qtype == "objects":
cmd.append("--max-objects")
else:
cmd.append("--max-size")
cmd.append("%s" % num)
if region is not None and zone is not None:
cmd.append("-n")
# NB: This should match '[client.radosgw...]' or something similar found in ceph.conf for the RGW section
cmd.append("%s.%s-%s" % (zone_region_prefix, region, zone))
return call(cmd)
# NB: Expects JSON returned
def call(cmd, remote_addr=None):
if remote_addr is None:
remote_addr = ''
log.debug(str(datetime.datetime.utcnow()) + ' ' + remote_addr + ' ' + ' '.join([str(x) for x in cmd]))
process = subprocess.Popen(cmd, env=os.environ.copy(), stdout=subprocess.PIPE)
json_output, err = process.communicate()
if err:
log.error(err)
return None
# log.debug(json_output)
return json_output
def flaskify(func, *args, **kwargs):
result = ''
"""
Wraps Flask response generation so that the underlying worker
functions can be invoked without a Flask application context.
:param func: function to invoke
:param *args: any arguments to pass to the func
:param **kwargs: any keyword arguments to pass to func
:returns: Flask response object
"""
try:
result = func(*args, **kwargs)
except Exception, e:
log.error(e)
return result
@app.route('/')
def help():
return flask.render_template('rgw_webservice_help.html')
@app.route('/v1/users/create/<user>', methods=['PUT'])
def rgw_users_create(user):
api = RGWWebServiceAPI()
# Getting parameters
# NB: Display Name is required
display_name = request.args.get('display_name')
region = request.args.get('region')
zone = request.args.get('zone')
access_key = request.args.get('access_key')
secret_key = request.args.get('secret_key')
email = request.args.get('email')
remote_addr = request.headers.get('X-Forwarded-For')
if remote_addr is None:
remote_addr = request.remote_addr
# Json example
# flask.jsonify(data_dict)
return flaskify(api.user_create, user, display_name=display_name, remote_addr=remote_addr, region=region, zone=zone, access_key=access_key, secret_key=secret_key, email=email)
@app.route('/v1/users/get/<user>', methods=['GET'])
def rgw_users_get(user):
api = RGWWebServiceAPI()
# Getting parameters
region = request.args.get('region')
zone = request.args.get('zone')
return flaskify(api.user_get, user, region=region, zone=zone)
@app.route('/v1/users/keys/add/<user>', methods=['PUT'])
def rgw_users_keys_add(user):
api = RGWWebServiceAPI()
# Getting parameters
access_key = request.args.get('access_key')
secret_key = request.args.get('secret_key')
region = request.args.get('region')
zone = request.args.get('zone')
# Json example
# flask.jsonify(data_dict)
return flaskify(api.user_keys_add, user, access_key=access_key, secret_key=secret_key, region=region, zone=zone)
@app.route('/v1/users/quota/enable/<user>', methods=['PUT'])
def rgw_users_quota_enable(user):
api = RGWWebServiceAPI()
# Getting parameters
region = request.args.get('region')
zone = request.args.get('zone')
# Json example
# flask.jsonify(data_dict)
return flaskify(api.user_quota_enable, user, region=region, zone=zone)
@app.route('/v1/users/quota/disable/<user>', methods=['PUT'])
def rgw_users_quota_disable(user):
api = RGWWebServiceAPI()
# Getting parameters
region = request.args.get('region')
zone = request.args.get('zone')
# Json example
# flask.jsonify(data_dict)
return flaskify(api.user_quota_disable, user, region=region, zone=zone)
# NB: scope can be 'user' or 'bucket'
# NB: qtype can be 'objects' or 'size'
@app.route('/v1/users/quota/set/<user>/<scope>/<qtype>', methods=['PUT'])
def rgw_users_quota_set(user, scope, qtype):
api = RGWWebServiceAPI()
# Getting parameters
num = request.args.get('num')
region = request.args.get('region')
zone = request.args.get('zone')
# Json example
# flask.jsonify(data_dict)
return flaskify(api.user_quota_set, user, num, scope, qtype, region=region, zone=zone)
if __name__ == '__main__':
app.run()
|
#!/usr/bin/env python
#
# Author: Hans Chris Jones <[email protected]>
#
# Copyright 2017, Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import glob
import os
from os import listdir
from os.path import isfile, join
import boto
import boto.s3.connection
from boto.s3.connection import Location
from boto.s3.bucket import Bucket
# NOTE: Modify these values with your key, secret and url/ip for s3 endpoint
access_key = "<whatever your access_key is>"
secret_key = "<whatever your secret_key is>"
endpoint = "<whatever your s3 url or IP is>"
admin_user = "<whatever your RGW admin user is>"
# NOTE: Add the proxy if you need one.
def connect(key, secret, host, proxy=None, user_agent=None, port=80,
proxy_port=8080, is_secure=False, debug=0, verbose=False):
conn = None
try:
conn = boto.connect_s3(
aws_access_key_id=key,
aws_secret_access_key=secret,
port=port,
host=host,
proxy=proxy,
proxy_port=proxy_port,
is_secure=is_secure,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
debug=debug)
if conn and verbose:
print('RGW Connected.')
except BaseException, e:
print(e.message)
return conn
def bucket_handle(conn, bucket_name, validate=True, headers=None, create=False, make_public=False, verbose=False):
if not conn or not bucket_name:
if verbose:
print('Connection and/or bucket name not valid - unable to get handle.')
return None
bucket = None
try:
bucket = conn.lookup(bucket_name, validate=validate, headers=headers)
if not bucket and create:
bucket = bucket_create(conn, bucket_name, headers=headers, make_public=make_public, verbose=verbose) # add policy for acls
if bucket:
if verbose:
print('Bucket %s created.' % bucket_name)
else:
if verbose:
print('Bucket %s not created.' % bucket_name)
else:
if verbose:
print('Bucket %s found.' % bucket_name)
if bucket and make_public: # temporary...
bucket.make_public(recursive=False, headers=headers)
if verbose:
print('Bucket %s made public.' % bucket_name)
except BaseException, e:
print(e.message)
return bucket
def bucket_create(conn, bucket_name, location=Location.DEFAULT, policy=None, headers=None, make_public=False, verbose=False):
if not conn or not bucket_name:
if verbose:
print('Connection and/or bucket name not valid - unable to create.')
return None
bucket = None
try:
bucket = conn.create_bucket(bucket_name, location=location, policy=policy, headers=headers)
if make_public:
bucket.make_public(recursive=True, headers=headers)
if bucket and verbose:
print('Bucket %s created.' % bucket_name)
except BaseException, e:
print(e.message)
return bucket
def bucket_list(bucket, verbose=False):
if not bucket:
if verbose:
print('Bucket handle not valid.')
return None
try:
for i in bucket.list():
obj_name = i.key.split("/")[-1] # i.name # i.key.split("/")[-1]
size = i.size # i.get_contents_to_filename(obj_name)
print('%s: %d' % (obj_name, size))
except BaseException, e:
print(e.message)
return None # This is only a debug test function...
def object_create(bucket, name, string_value=None, file_name_path=None, make_public=False, headers=None, verbose=False):
if not bucket or not name or (string_value is None and file_name_path is None):
if verbose:
print('Bucket handle not valid OR string_value or file_name is empty.')
return None
key = None
try:
key = bucket.get_key(name)
if not key:
key = bucket.new_key(name)
if key:
if string_value:
key.set_contents_from_string(string_value)
if file_name_path:
# Check the size of the file. If it's larger than xxx then do a multipart else do a normal set_content
key.set_contents_from_filename(file_name_path)
if key and make_public:
key.make_public(headers=headers)
if verbose:
print('Object %s created/updated.' % name)
else:
if verbose:
print('Object %s was not created/updated.' % name)
except BaseException, e:
print(e.message)
return key
def object_delete(bucket, name, headers=None, version_id=None, mfa_token=None, verbose=False):
if not bucket or not name:
if verbose:
print('Bucket and/or object name is not valid.')
return None
key_object = None
try:
key_object = bucket.delete_key(name, headers=headers, version_id=version_id, mfa_token=mfa_token)
if verbose:
print('Object %s deleted.' % name)
except BaseException, e:
print(e.message)
return key_object
def object_get(bucket, name, file_name_path, headers=None, version_id=None, response_headers=None, verbose=False):
if not bucket or not name:
if verbose:
print('Bucket and/or object name is not valid.')
return None
if not file_name_path:
file_name_path = name
key_object = None
try:
key_object = bucket.get_key(name, headers=headers, version_id=version_id, response_headers=response_headers)
key_object.get_contents_to_filename(file_name_path)
if verbose:
print('Retrieved object %s.' % name)
except BaseException, e:
print(e.message)
return key_object
def object_url(bucket, name, signed_duration=0, query_auth=False, force_http=False, verbose=False):
"""
:param bucket:
:param name:
:param signed_duration:
:param query_auth:
:param force_http: default is False so that the port is included if the port is not 80
:param verbose:
:return: url
"""
if not bucket or not name:
if verbose:
print('Bucket and/or object name is not valid.')
return None
url = None
try:
if signed_duration < 0:
signed_duration = 0
if signed_duration > 0 and query_auth is False:
query_auth = True
key_object = bucket.get_key(name)
# If the signed_duration is > than 0 then assume a signed url with signed_duration the amount of time the url
# is valid.
url = key_object.generate_url(signed_duration, query_auth=query_auth, force_http=force_http)
if url and verbose:
print('Generated %s' % url)
except BaseException, e:
print(e.message)
return url
def upload_directory(bucket, directory, pattern='*', include_dir_prefix=False, make_public=False, verbose=False):
if not bucket or not directory:
if verbose:
print('Bucket and/or directory name is not valid.')
return None
# One way
# files = [f for f in listdir(directory) if isfile(join(directory, f))]
# Using glob allows for simple patterns and no hidden files...
files = glob.glob(os.path.join(directory, pattern))
files = [f for f in files if isfile(f)] # Scrub the list for only files
file_names = [f.split('/')[-1] for f in files]
if verbose:
print('Directory list obtained and scrubbed.')
key_objects = []
count = 0
for file_name in file_names:
key_objects.append(object_create(bucket, file_name, file_name_path=files[count], make_public=make_public, verbose=verbose))
if verbose:
print('File: %s uploaded.' % file_name)
count += 1
return key_objects
# NB: Create a Tenancy (user) using the RGW API which is part of RGW on the same port(s).
# NB: *MUST USE* user with admin caps such as the default 'radosgw' user ceph-chef creates by default.
def user_create(conn, admin_user, user_name, display_name, caps=None, verbose=False):
if not conn or not user_name:
if verbose:
print('Connection and/or user name not valid - unable to create.')
return None
user = None
try:
print "/%s/user?format=json&uid=%s&display-name='%s'" % (admin_user, user_name, display_name)
resp = conn.make_request("PUT", query_args="/%s/user?format=json&uid=%s&display-name='%s'" % (admin_user, user_name, display_name))
if resp:
print resp.status
print resp.read()
if user and verbose:
print('User %s created.' % user_name)
except BaseException, e:
print(e.message)
return user
def main():
# NOTE: This is just a number of samples that can be called via the cli. However, the primary purpose of this
# are the functions defined above which are imported into rgw-admin.py
conn = connect(key, secret, is_secure=False, verbose=True) # debug=2 Add more options later...
# Sample header for object_get
# headers={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'en-US,en;q=0.8'}
bucket = bucket_handle(conn, 'bcs-test', make_public=True, validate=True, create=True, verbose=True)
object_create(bucket, 'hello1.txt', string_value='Hello everyone :)', make_public=True, verbose=True)
# file_name_path = 'eabbcab9-1d30-4218-ad78-640f234463d0_2400.mp4'
# file_name = file_name_path.split('/')[-1]
# object_create(bucket,file_name, file_name_path=file_name_path, make_public=True, verbose=True)
bucket_list(bucket, verbose=True)
# object_delete(bucket, 'hello.txt', verbose=True)
# Example usage:
key_object = object_get(bucket, 'hello1.txt', 'hello1.txt', verbose=True)
# upload_directory(bucket, '<directory>', make_public=True, verbose=True)
# print object_url(bucket, 'test_video.mp4')
print object_url(bucket, 'hello1.txt', signed_duration=60)
if __name__ == "__main__":
main()
|
import shutil
from typing import Dict, List
import pytest
import random
from datastore.providers.chroma_datastore import ChromaDataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentMetadataFilter,
QueryWithEmbedding,
Source,
)
TEST_PERSISTENCE_DIR = "chroma_test_datastore"
COLLECTION_NAME = "documents"
def ephemeral_chroma_datastore() -> ChromaDataStore:
# Initialize an ephemeral in-memory ChromaDB instance
return ChromaDataStore(
collection_name=COLLECTION_NAME, in_memory=True, persistence_dir=None
)
def persisted_chroma_datastore() -> ChromaDataStore:
# Initialize an in-memory ChromaDB instance with persistence
return ChromaDataStore(
collection_name=COLLECTION_NAME,
in_memory=True,
persistence_dir=TEST_PERSISTENCE_DIR,
)
def get_chroma_datastore() -> ChromaDataStore:
yield ephemeral_chroma_datastore()
yield persisted_chroma_datastore()
# Delete the persistence directory after the test
@pytest.fixture(autouse=True)
def cleanup():
yield
shutil.rmtree(TEST_PERSISTENCE_DIR, ignore_errors=True)
# Seed for deterministic testing
random.seed(0)
def create_embedding(dim: int) -> List[float]:
return [random.random() for _ in range(dim)]
# Data fixtures
TEST_EMBEDDING_DIM = 5
N_TEST_CHUNKS = 5
@pytest.fixture
def initial_document_chunks() -> Dict[str, List[DocumentChunk]]:
first_doc_chunks = [
DocumentChunk(
id=f"first-doc-{i}",
text=f"Lorem ipsum {i}",
metadata=DocumentChunkMetadata(),
embedding=create_embedding(TEST_EMBEDDING_DIM),
)
for i in range(N_TEST_CHUNKS)
]
return {
"first-doc": first_doc_chunks,
}
@pytest.fixture
def document_chunks(initial_document_chunks) -> Dict[str, List[DocumentChunk]]:
doc_chunks = initial_document_chunks
for k, v in doc_chunks.items():
for chunk in v:
chunk.metadata = DocumentChunkMetadata(
source=Source.email, created_at="2023-04-03", document_id="first-doc"
)
chunk.embedding = create_embedding(TEST_EMBEDDING_DIM)
doc_chunks["second-doc"] = [
DocumentChunk(
id=f"second-doc-{i}",
text=f"Dolor sit amet {i}",
metadata=DocumentChunkMetadata(
created_at="2023-04-04", document_id="second-doc"
),
embedding=create_embedding(TEST_EMBEDDING_DIM),
)
for i in range(N_TEST_CHUNKS)
]
return doc_chunks
@pytest.mark.asyncio
async def test_add_chunks(document_chunks: Dict[str, List[DocumentChunk]]):
for datastore in get_chroma_datastore():
await datastore.delete(delete_all=True)
assert datastore._collection.count() == 0
print(document_chunks)
assert await datastore._upsert(document_chunks) == list(document_chunks.keys())
assert datastore._collection.count() == sum(
len(v) for v in document_chunks.values()
)
@pytest.mark.asyncio
async def test_upsert(
initial_document_chunks: Dict[str, List[DocumentChunk]],
document_chunks: Dict[str, List[DocumentChunk]],
):
for datastore in get_chroma_datastore():
await datastore.delete(delete_all=True)
assert await datastore._upsert(initial_document_chunks) == list(
initial_document_chunks.keys()
)
assert datastore._collection.count() == sum(
len(v) for v in initial_document_chunks.values()
)
assert await datastore._upsert(document_chunks) == list(document_chunks.keys())
assert datastore._collection.count() == sum(
len(v) for v in document_chunks.values()
)
@pytest.mark.asyncio
async def test_add_and_query_all(document_chunks):
for datastore in get_chroma_datastore():
await datastore.delete(delete_all=True)
await datastore._upsert(document_chunks) == list(document_chunks.keys())
query = QueryWithEmbedding(
query="",
embedding=create_embedding(TEST_EMBEDDING_DIM),
top_k=10,
)
query_results = await datastore._query(queries=[query])
assert 1 == len(query_results)
assert 10 == len(query_results[0].results)
@pytest.mark.asyncio
async def test_query_accuracy(document_chunks):
for _, v in document_chunks.items():
for chunk in v:
print(f"id: {chunk.id} emb: {chunk.embedding}")
def add_noise_to_embedding(embedding: List[float], eps: float = 0) -> List[float]:
return [x + eps * (1.0 - 2 * random.random()) for x in embedding]
for datastore in get_chroma_datastore():
await datastore.delete(delete_all=True)
print(datastore._collection.get(include=["embeddings"]))
res = await datastore._upsert(document_chunks)
res = datastore._collection.get(include=["embeddings"])
for id, emb in zip(res["ids"], res["embeddings"]):
print(f"id: {id} emb: {emb}")
for _, v in document_chunks.items():
for chunk in v:
print(f"chunk: {chunk}")
query = QueryWithEmbedding(
query="",
embedding=add_noise_to_embedding(chunk.embedding),
top_k=1,
)
query_results = await datastore._query(queries=[query])
print(query_results)
assert query_results[0].results[0].id == chunk.id
@pytest.mark.asyncio
async def test_query_filter_by_id(document_chunks):
for datastore in get_chroma_datastore():
await datastore.delete(delete_all=True)
await datastore._upsert(document_chunks)
for doc_id, chunks in document_chunks.items():
query = QueryWithEmbedding(
query="",
embedding=chunks[0].embedding,
top_k=N_TEST_CHUNKS,
filter=DocumentMetadataFilter(document_id=doc_id),
)
query_results = await datastore._query(queries=[query])
# Assert that all document chunks are returned
assert len(query_results[0].results) == len(chunks)
assert all(
[
result.id in [chunk.id for chunk in chunks]
for result in query_results[0].results
]
)
@pytest.mark.asyncio
async def test_query_filter_by_date(document_chunks):
for datastore in get_chroma_datastore():
await datastore.delete(delete_all=True)
await datastore._upsert(document_chunks)
# Filter by dates for only the first document
query = QueryWithEmbedding(
query="",
embedding=document_chunks["first-doc"][0].embedding,
top_k=N_TEST_CHUNKS,
filter=DocumentMetadataFilter(
start_date="2023-04-03", end_date="2023-04-03"
),
)
query_results = await datastore._query(queries=[query])
# Assert that only the first document is returned
assert len(query_results[0].results) == len(document_chunks["first-doc"])
assert all(
[
result.id in [chunk.id for chunk in document_chunks["first-doc"]]
for result in query_results[0].results
]
)
# Filter for the entire date span
query = QueryWithEmbedding(
query="",
embedding=document_chunks["first-doc"][0].embedding,
top_k=N_TEST_CHUNKS * len(document_chunks),
filter=DocumentMetadataFilter(
start_date="2023-04-03", end_date="2023-04-04"
),
)
query_results = await datastore._query(queries=[query])
# Assert that both documents are returned
assert len(query_results[0].results) == len(document_chunks["first-doc"]) + len(
document_chunks["second-doc"]
)
assert all(
[
result.id
in [chunk.id for chunk in document_chunks["first-doc"]]
+ [chunk.id for chunk in document_chunks["second-doc"]]
for result in query_results[0].results
]
)
@pytest.mark.asyncio
async def test_delete_by_id(document_chunks):
for datastore in get_chroma_datastore():
await datastore.delete(delete_all=True)
await datastore._upsert(document_chunks)
# Delete the first document
await datastore.delete(ids=["first-doc"])
# Assert that the first document is deleted
query = QueryWithEmbedding(
query="",
embedding=document_chunks["first-doc"][0].embedding,
top_k=N_TEST_CHUNKS,
)
query_results = await datastore._query(queries=[query])
# Assert that only the second document is still there
query_results = await datastore._query(queries=[query])
assert len(query_results[0].results) == len(document_chunks["second-doc"])
assert all(
[
result.id in [chunk.id for chunk in document_chunks["second-doc"]]
for result in query_results[0].results
]
)
|
from datastore.providers.redis_datastore import RedisDataStore
from models.models import DocumentChunk, DocumentChunkMetadata, QueryWithEmbedding, Source, DocumentMetadataFilter
import pytest
import redis.asyncio as redis
import numpy as np
NUM_TEST_DOCS = 10
@pytest.fixture
async def redis_datastore():
return await RedisDataStore.init(dim=5)
def create_embedding(i, dim):
vec = np.array([0.1] * dim).astype(np.float64).tolist()
vec[dim-1] = i+1/10
return vec
def create_document_chunk(i, dim):
return DocumentChunk(
id=f"first-doc_{i}",
text=f"Lorem ipsum {i}",
embedding=create_embedding(i, dim),
metadata=DocumentChunkMetadata(
source=Source.file, created_at="1970-01-01", document_id="docs"
),
)
def create_document_chunks(n, dim):
docs = [create_document_chunk(i, dim) for i in range(n)]
return {"docs": docs}
@pytest.mark.asyncio
async def test_redis_upsert_query(redis_datastore):
docs = create_document_chunks(NUM_TEST_DOCS, 5)
await redis_datastore._upsert(docs)
query = QueryWithEmbedding(
query="Lorem ipsum 0",
top_k=5,
embedding= create_embedding(0, 5),
)
query_results = await redis_datastore._query(queries=[query])
assert 1 == len(query_results)
for i in range(5):
assert f"Lorem ipsum {i}" == query_results[0].results[i].text
assert "docs" == query_results[0].results[i].id
@pytest.mark.asyncio
async def test_redis_filter_query(redis_datastore):
query = QueryWithEmbedding(
query="Lorem ipsum 0",
filter=DocumentMetadataFilter(document_id="docs"),
top_k=5,
embedding= create_embedding(0, 5),
)
query_results = await redis_datastore._query(queries=[query])
print(query_results)
assert 1 == len(query_results)
assert "docs" == query_results[0].results[0].id
@pytest.mark.asyncio
async def test_redis_delete_docs(redis_datastore):
res = await redis_datastore.delete(ids=["docs"])
assert res
|
import pytest
import os
import time
from typing import Union
from azure.search.documents.indexes import SearchIndexClient
from models.models import DocumentMetadataFilter, Query, Source, Document, DocumentMetadata
AZURESEARCH_TEST_INDEX = "testindex"
os.environ["AZURESEARCH_INDEX"] = AZURESEARCH_TEST_INDEX
if os.environ.get("AZURESEARCH_SERVICE") == None:
os.environ["AZURESEARCH_SERVICE"] = "invalid service name" # Will fail anyway if not set to a real service, but allows tests to be discovered
import datastore.providers.azuresearch_datastore
from datastore.providers.azuresearch_datastore import AzureSearchDataStore
@pytest.fixture(scope="module")
def azuresearch_mgmt_client():
service = os.environ["AZURESEARCH_SERVICE"]
return SearchIndexClient(
endpoint=f"https://{service}.search.windows.net",
credential=AzureSearchDataStore._create_credentials(False)
)
def test_translate_filter():
assert AzureSearchDataStore._translate_filter(
DocumentMetadataFilter()
) == None
for field in ["document_id", "source", "source_id", "author"]:
value = Source.file if field == "source" else f"test_{field}"
needs_escaping_value = None if field == "source" else f"test'_{field}"
assert AzureSearchDataStore._translate_filter(
DocumentMetadataFilter(**{field: value})
) == f"{field} eq '{value}'"
if needs_escaping_value != None:
assert AzureSearchDataStore._translate_filter(
DocumentMetadataFilter(**{field: needs_escaping_value})
) == f"{field} eq 'test''_{field}'"
assert AzureSearchDataStore._translate_filter(
DocumentMetadataFilter(
document_id = "test_document_id",
source = Source.file,
source_id = "test_source_id",
author = "test_author"
)
) == "document_id eq 'test_document_id' and source eq 'file' and source_id eq 'test_source_id' and author eq 'test_author'"
with pytest.raises(ValueError):
assert AzureSearchDataStore._translate_filter(
DocumentMetadataFilter(start_date="2023-01-01")
)
with pytest.raises(ValueError):
assert AzureSearchDataStore._translate_filter(
DocumentMetadataFilter(end_date="2023-01-01")
)
assert AzureSearchDataStore._translate_filter(
DocumentMetadataFilter(start_date="2023-01-01T00:00:00Z", end_date="2023-01-02T00:00:00Z", document_id = "test_document_id")
) == "document_id eq 'test_document_id' and created_at ge 2023-01-01T00:00:00Z and created_at le 2023-01-02T00:00:00Z"
@pytest.mark.asyncio
async def test_lifecycle_hybrid(azuresearch_mgmt_client: SearchIndexClient):
datastore.providers.azuresearch_datastore.AZURESEARCH_DISABLE_HYBRID = None
datastore.providers.azuresearch_datastore.AZURESEARCH_SEMANTIC_CONFIG = None
await lifecycle(azuresearch_mgmt_client)
@pytest.mark.asyncio
async def test_lifecycle_vectors_only(azuresearch_mgmt_client: SearchIndexClient):
datastore.providers.azuresearch_datastore.AZURESEARCH_DISABLE_HYBRID = "1"
datastore.providers.azuresearch_datastore.AZURESEARCH_SEMANTIC_CONFIG = None
await lifecycle(azuresearch_mgmt_client)
@pytest.mark.asyncio
async def test_lifecycle_semantic(azuresearch_mgmt_client: SearchIndexClient):
datastore.providers.azuresearch_datastore.AZURESEARCH_DISABLE_HYBRID = None
datastore.providers.azuresearch_datastore.AZURESEARCH_SEMANTIC_CONFIG = "testsemconfig"
await lifecycle(azuresearch_mgmt_client)
async def lifecycle(azuresearch_mgmt_client: SearchIndexClient):
if AZURESEARCH_TEST_INDEX in azuresearch_mgmt_client.list_index_names():
azuresearch_mgmt_client.delete_index(AZURESEARCH_TEST_INDEX)
assert AZURESEARCH_TEST_INDEX not in azuresearch_mgmt_client.list_index_names()
try:
store = AzureSearchDataStore()
index = azuresearch_mgmt_client.get_index(AZURESEARCH_TEST_INDEX)
assert index is not None
result = await store.upsert([
Document(
id="test_id_1",
text="test text",
metadata=DocumentMetadata(source=Source.file, source_id="test_source_id", author="test_author", created_at="2023-01-01T00:00:00Z", url="http://some-test-url/path")),
Document(
id="test_id_2+",
text="different",
metadata=DocumentMetadata(source=Source.file, source_id="test_source_id", author="test_author", created_at="2023-01-01T00:00:00Z", url="http://some-test-url/path"))])
assert len(result) == 2 and result[0] == "test_id_1" and result[1] == "test_id_2+"
# query in a loop in case we need to retry since documents aren't searchable synchronosuly after updates
for _ in range(4):
time.sleep(0.25)
result = await store.query([Query(query="text")])
if len(result) > 0 and len(result[0].results) > 0:
break
assert len(result) == 1 and len(result[0].results) == 2
assert result[0].results[0].metadata.document_id == "test_id_1" and result[0].results[1].metadata.document_id == "test_id_2+"
result = await store.query([Query(query="text", filter=DocumentMetadataFilter(source_id="test_source_id"))])
assert len(result) == 1 and len(result[0].results) == 2
assert result[0].results[0].metadata.document_id == "test_id_1" and result[0].results[1].metadata.document_id == "test_id_2+"
result = await store.query([Query(query="text", filter=DocumentMetadataFilter(source_id="nonexisting_id"))])
assert len(result) == 1 and len(result[0].results) == 0
result = await store.query([Query(query="text", filter=DocumentMetadataFilter(start_date="2023-01-02T00:00:00Z"))])
assert len(result) == 1 and len(result[0].results) == 0
result = await store.query([Query(query="text", filter=DocumentMetadataFilter(start_date="2023-01-01T00:00:00Z"))])
assert len(result) == 1 and len(result[0].results) == 2
assert result[0].results[0].metadata.document_id == "test_id_1" and result[0].results[1].metadata.document_id == "test_id_2+"
result = await store.query([Query(query="text", filter=DocumentMetadataFilter(end_date="2022-12-31T00:00:00Z"))])
assert len(result) == 1 and len(result[0].results) == 0
result = await store.query([Query(query="text", filter=DocumentMetadataFilter(end_date="2023-01-02T00:00:00Z"))])
assert len(result) == 1 and len(result[0].results) == 2
assert result[0].results[0].metadata.document_id == "test_id_1" and result[0].results[1].metadata.document_id == "test_id_2+"
# query in a loop in case we need to retry since documents aren't searchable synchronosuly after updates
assert await store.delete(["test_id_1", "test_id_2+"])
for _ in range(4):
time.sleep(0.25)
result = await store.query([Query(query="text")])
if len(result) > 0 and len(result[0].results) == 0:
break
assert len(result) == 1 and len(result[0].results) == 0
finally:
azuresearch_mgmt_client.delete_index(AZURESEARCH_TEST_INDEX)
|
from typing import Dict, List
import pytest
from datastore.providers.supabase_datastore import SupabaseDataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentMetadataFilter,
QueryWithEmbedding,
)
def create_embedding(non_zero_pos: int) -> List[float]:
# create a vector with a single non-zero value of dimension 1535
vector = [0.0] * 1536
vector[non_zero_pos - 1] = 1.0
return vector
@pytest.fixture
def initial_document_chunks() -> Dict[str, List[DocumentChunk]]:
first_doc_chunks = [
DocumentChunk(
id=f"first-doc-{i}",
text=f"Lorem ipsum {i}",
metadata=DocumentChunkMetadata(),
embedding=create_embedding(i),
)
for i in range(4, 7)
]
return {
"first-doc": first_doc_chunks,
}
@pytest.fixture
def queries() -> List[QueryWithEmbedding]:
queries = [
QueryWithEmbedding(
query="Query 1",
top_k=1,
embedding=create_embedding(4),
),
QueryWithEmbedding(
query="Query 2",
top_k=2,
embedding=create_embedding(5),
),
]
return queries
@pytest.fixture
def supabase_datastore() -> SupabaseDataStore:
return SupabaseDataStore()
@pytest.mark.asyncio
async def test_upsert(
supabase_datastore: SupabaseDataStore,
initial_document_chunks: Dict[str, List[DocumentChunk]],
) -> None:
"""Test basic upsert."""
doc_ids = await supabase_datastore._upsert(initial_document_chunks)
assert doc_ids == [doc_id for doc_id in initial_document_chunks]
@pytest.mark.asyncio
async def test_query(
supabase_datastore: SupabaseDataStore,
initial_document_chunks: Dict[str, List[DocumentChunk]],
queries: List[QueryWithEmbedding],
) -> None:
"""Test basic query."""
# insert to prepare for test
await supabase_datastore._upsert(initial_document_chunks)
query_results = await supabase_datastore._query(queries)
assert len(query_results) == len(queries)
query_0_results = query_results[0].results
query_1_results = query_results[1].results
assert len(query_0_results) == 1
assert len(query_1_results) == 2
# NOTE: this is the correct behavior
assert query_0_results[0].id == "first-doc-4"
assert query_1_results[0].id == "first-doc-5"
assert query_1_results[1].id == "first-doc-4"
@pytest.mark.asyncio
async def test_delete(
supabase_datastore: SupabaseDataStore,
initial_document_chunks: Dict[str, List[DocumentChunk]],
) -> None:
# insert to prepare for test
await supabase_datastore._upsert(initial_document_chunks)
is_success = await supabase_datastore.delete(["first-doc"])
assert is_success
@pytest.mark.asyncio
async def test_upsert_new_chunk(supabase_datastore):
await supabase_datastore.delete(delete_all=True)
chunk = DocumentChunk(
id="chunk1",
text="Sample text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
ids = await supabase_datastore._upsert({"doc1": [chunk]})
assert len(ids) == 1
@pytest.mark.asyncio
async def test_upsert_existing_chunk(supabase_datastore):
await supabase_datastore.delete(delete_all=True)
chunk = DocumentChunk(
id="chunk1",
text="Sample text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
ids = await supabase_datastore._upsert({"doc1": [chunk]})
chunk = DocumentChunk(
id="chunk1",
text="New text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
ids = await supabase_datastore._upsert({"doc1": [chunk]})
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Query",
embedding=query_embedding,
top_k=1,
)
results = await supabase_datastore._query([query])
assert len(ids) == 1
assert len(results[0].results) == 1
assert results[0].results[0].id == "chunk1"
assert results[0].results[0].text == "New text"
@pytest.mark.asyncio
async def test_query_score(supabase_datastore):
await supabase_datastore.delete(delete_all=True)
chunk1 = DocumentChunk(
id="chunk1",
text="Sample text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
chunk2 = DocumentChunk(
id="chunk2",
text="Another text",
embedding=[-1 if i % 2 == 0 else 1 for i in range(1536)],
metadata=DocumentChunkMetadata(),
)
await supabase_datastore._upsert({"doc1": [chunk1], "doc2": [chunk2]})
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Query",
embedding=query_embedding,
)
results = await supabase_datastore._query([query])
assert results[0].results[0].id == "chunk1"
assert int(results[0].results[0].score) == 1536
@pytest.mark.asyncio
async def test_query_filter(supabase_datastore):
await supabase_datastore.delete(delete_all=True)
chunk1 = DocumentChunk(
id="chunk1",
text="Sample text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(
source="email", created_at="2021-01-01", author="John"
),
)
chunk2 = DocumentChunk(
id="chunk2",
text="Another text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(
source="chat", created_at="2022-02-02", author="Mike"
),
)
await supabase_datastore._upsert({"doc1": [chunk1], "doc2": [chunk2]})
# Test author filter -- string
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Query",
embedding=query_embedding,
filter=DocumentMetadataFilter(author="John"),
)
results = await supabase_datastore._query([query])
assert results[0].results[0].id == "chunk1"
# Test source filter -- enum
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Query",
embedding=query_embedding,
filter=DocumentMetadataFilter(source="chat"),
)
results = await supabase_datastore._query([query])
assert results[0].results[0].id == "chunk2"
# Test created_at filter -- date
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Query",
embedding=query_embedding,
filter=DocumentMetadataFilter(start_date="2022-01-01"),
)
results = await supabase_datastore._query([query])
assert results[0].results[0].id == "chunk2"
@pytest.mark.asyncio
async def test_delete(supabase_datastore):
await supabase_datastore.delete(delete_all=True)
chunk1 = DocumentChunk(
id="chunk1",
text="Sample text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
chunk2 = DocumentChunk(
id="chunk2",
text="Another text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
await supabase_datastore._upsert({"doc1": [chunk1], "doc2": [chunk2]})
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Another query",
embedding=query_embedding,
)
results = await supabase_datastore._query([query])
assert len(results[0].results) == 2
assert results[0].results[0].id == "chunk1"
assert results[0].results[1].id == "chunk2"
await supabase_datastore.delete(ids=["doc1"])
results_after_delete = await supabase_datastore._query([query])
assert len(results_after_delete[0].results) == 1
assert results_after_delete[0].results[0].id == "chunk2"
@pytest.mark.asyncio
async def test_delete_all(supabase_datastore):
await supabase_datastore.delete(delete_all=True)
chunk = DocumentChunk(
id="chunk",
text="Another text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
await supabase_datastore._upsert({"doc": [chunk]})
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Another query",
embedding=query_embedding,
top_k=1,
)
results = await supabase_datastore._query([query])
assert len(results) == 1
assert len(results[0].results) == 1
assert results[0].results[0].id == "chunk"
await supabase_datastore.delete(delete_all=True)
results_after_delete = await supabase_datastore._query([query])
assert len(results_after_delete[0].results) == 0
|
from typing import Dict, List
import pytest
from datastore.providers.postgres_datastore import PostgresDataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentMetadataFilter,
QueryWithEmbedding,
)
def create_embedding(non_zero_pos: int) -> List[float]:
# create a vector with a single non-zero value of dimension 1535
vector = [0.0] * 1536
vector[non_zero_pos - 1] = 1.0
return vector
@pytest.fixture
def initial_document_chunks() -> Dict[str, List[DocumentChunk]]:
first_doc_chunks = [
DocumentChunk(
id=f"first-doc-{i}",
text=f"Lorem ipsum {i}",
metadata=DocumentChunkMetadata(),
embedding=create_embedding(i),
)
for i in range(4, 7)
]
return {
"first-doc": first_doc_chunks,
}
@pytest.fixture
def queries() -> List[QueryWithEmbedding]:
queries = [
QueryWithEmbedding(
query="Query 1",
top_k=1,
embedding=create_embedding(4),
),
QueryWithEmbedding(
query="Query 2",
top_k=2,
embedding=create_embedding(5),
),
]
return queries
@pytest.fixture
def postgres_datastore() -> PostgresDataStore:
return PostgresDataStore()
@pytest.mark.asyncio
async def test_upsert(
postgres_datastore: PostgresDataStore,
initial_document_chunks: Dict[str, List[DocumentChunk]],
) -> None:
"""Test basic upsert."""
doc_ids = await postgres_datastore._upsert(initial_document_chunks)
assert doc_ids == [doc_id for doc_id in initial_document_chunks]
@pytest.mark.asyncio
async def test_query(
postgres_datastore: PostgresDataStore,
initial_document_chunks: Dict[str, List[DocumentChunk]],
queries: List[QueryWithEmbedding],
) -> None:
"""Test basic query."""
# insert to prepare for test
await postgres_datastore._upsert(initial_document_chunks)
query_results = await postgres_datastore._query(queries)
assert len(query_results) == len(queries)
query_0_results = query_results[0].results
query_1_results = query_results[1].results
assert len(query_0_results) == 1
assert len(query_1_results) == 2
# NOTE: this is the correct behavior
assert query_0_results[0].id == "first-doc-4"
assert query_1_results[0].id == "first-doc-5"
assert query_1_results[1].id == "first-doc-4"
@pytest.mark.asyncio
async def test_delete(
postgres_datastore: PostgresDataStore,
initial_document_chunks: Dict[str, List[DocumentChunk]],
) -> None:
# insert to prepare for test
await postgres_datastore._upsert(initial_document_chunks)
is_success = await postgres_datastore.delete(["first-doc"])
assert is_success
@pytest.mark.asyncio
async def test_upsert_new_chunk(postgres_datastore):
await postgres_datastore.delete(delete_all=True)
chunk = DocumentChunk(
id="chunk1",
text="Sample text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
ids = await postgres_datastore._upsert({"doc1": [chunk]})
assert len(ids) == 1
@pytest.mark.asyncio
async def test_upsert_existing_chunk(postgres_datastore):
await postgres_datastore.delete(delete_all=True)
chunk = DocumentChunk(
id="chunk1",
text="Sample text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
ids = await postgres_datastore._upsert({"doc1": [chunk]})
chunk = DocumentChunk(
id="chunk1",
text="New text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
ids = await postgres_datastore._upsert({"doc1": [chunk]})
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Query",
embedding=query_embedding,
top_k=1,
)
results = await postgres_datastore._query([query])
assert len(ids) == 1
assert len(results[0].results) == 1
assert results[0].results[0].id == "chunk1"
assert results[0].results[0].text == "New text"
@pytest.mark.asyncio
async def test_query_score(postgres_datastore):
await postgres_datastore.delete(delete_all=True)
chunk1 = DocumentChunk(
id="chunk1",
text="Sample text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
chunk2 = DocumentChunk(
id="chunk2",
text="Another text",
embedding=[-1 if i % 2 == 0 else 1 for i in range(1536)],
metadata=DocumentChunkMetadata(),
)
await postgres_datastore._upsert({"doc1": [chunk1], "doc2": [chunk2]})
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Query",
embedding=query_embedding,
)
results = await postgres_datastore._query([query])
assert results[0].results[0].id == "chunk1"
assert int(results[0].results[0].score) == 1536
@pytest.mark.asyncio
async def test_query_filter(postgres_datastore):
await postgres_datastore.delete(delete_all=True)
chunk1 = DocumentChunk(
id="chunk1",
text="Sample text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(
source="email", created_at="2021-01-01", author="John"
),
)
chunk2 = DocumentChunk(
id="chunk2",
text="Another text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(
source="chat", created_at="2022-02-02", author="Mike"
),
)
await postgres_datastore._upsert({"doc1": [chunk1], "doc2": [chunk2]})
# Test author filter -- string
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Query",
embedding=query_embedding,
filter=DocumentMetadataFilter(author="John"),
)
results = await postgres_datastore._query([query])
assert results[0].results[0].id == "chunk1"
# Test source filter -- enum
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Query",
embedding=query_embedding,
filter=DocumentMetadataFilter(source="chat"),
)
results = await postgres_datastore._query([query])
assert results[0].results[0].id == "chunk2"
# Test created_at filter -- date
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Query",
embedding=query_embedding,
filter=DocumentMetadataFilter(start_date="2022-01-01"),
)
results = await postgres_datastore._query([query])
assert results[0].results[0].id == "chunk2"
@pytest.mark.asyncio
async def test_delete(postgres_datastore):
await postgres_datastore.delete(delete_all=True)
chunk1 = DocumentChunk(
id="chunk1",
text="Sample text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
chunk2 = DocumentChunk(
id="chunk2",
text="Another text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
await postgres_datastore._upsert({"doc1": [chunk1], "doc2": [chunk2]})
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Another query",
embedding=query_embedding,
)
results = await postgres_datastore._query([query])
assert len(results[0].results) == 2
assert results[0].results[0].id == "chunk1"
assert results[0].results[1].id == "chunk2"
await postgres_datastore.delete(ids=["doc1"])
results_after_delete = await postgres_datastore._query([query])
assert len(results_after_delete[0].results) == 1
assert results_after_delete[0].results[0].id == "chunk2"
@pytest.mark.asyncio
async def test_delete_all(postgres_datastore):
await postgres_datastore.delete(delete_all=True)
chunk = DocumentChunk(
id="chunk",
text="Another text",
embedding=[1] * 1536,
metadata=DocumentChunkMetadata(),
)
await postgres_datastore._upsert({"doc": [chunk]})
query_embedding = [1] * 1536
query = QueryWithEmbedding(
query="Another query",
embedding=query_embedding,
top_k=1,
)
results = await postgres_datastore._query([query])
assert len(results) == 1
assert len(results[0].results) == 1
assert results[0].results[0].id == "chunk"
await postgres_datastore.delete(delete_all=True)
results_after_delete = await postgres_datastore._query([query])
assert len(results_after_delete[0].results) == 0
|
from typing import Dict, List
import pytest
import qdrant_client
from qdrant_client.http.models import PayloadSchemaType
from datastore.providers.qdrant_datastore import QdrantDataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
QueryWithEmbedding,
DocumentMetadataFilter,
Source,
)
def create_embedding(non_zero_pos: int, size: int) -> List[float]:
vector = [0.0] * size
vector[non_zero_pos % size] = 1.0
return vector
@pytest.fixture
def qdrant_datastore() -> QdrantDataStore:
return QdrantDataStore(
collection_name="documents", vector_size=5, recreate_collection=True
)
@pytest.fixture
def client() -> qdrant_client.QdrantClient:
return qdrant_client.QdrantClient()
@pytest.fixture
def initial_document_chunks() -> Dict[str, List[DocumentChunk]]:
first_doc_chunks = [
DocumentChunk(
id=f"first-doc-{i}",
text=f"Lorem ipsum {i}",
metadata=DocumentChunkMetadata(),
embedding=create_embedding(i, 5),
)
for i in range(4, 7)
]
return {
"first-doc": first_doc_chunks,
}
@pytest.fixture
def document_chunks() -> Dict[str, List[DocumentChunk]]:
first_doc_chunks = [
DocumentChunk(
id=f"first-doc_{i}",
text=f"Lorem ipsum {i}",
metadata=DocumentChunkMetadata(
source=Source.email, created_at="2023-03-05", document_id="first-doc"
),
embedding=create_embedding(i, 5),
)
for i in range(3)
]
second_doc_chunks = [
DocumentChunk(
id=f"second-doc_{i}",
text=f"Dolor sit amet {i}",
metadata=DocumentChunkMetadata(
created_at="2023-03-04", document_id="second-doc"
),
embedding=create_embedding(i + len(first_doc_chunks), 5),
)
for i in range(2)
]
return {
"first-doc": first_doc_chunks,
"second-doc": second_doc_chunks,
}
@pytest.mark.asyncio
async def test_datastore_creates_payload_indexes(
qdrant_datastore,
client,
):
collection_info = client.get_collection(collection_name="documents")
assert 2 == len(collection_info.payload_schema)
assert "created_at" in collection_info.payload_schema
created_at = collection_info.payload_schema["created_at"]
assert PayloadSchemaType.INTEGER == created_at.data_type
assert "metadata.document_id" in collection_info.payload_schema
document_id = collection_info.payload_schema["metadata.document_id"]
assert PayloadSchemaType.KEYWORD == document_id.data_type
@pytest.mark.asyncio
async def test_upsert_creates_all_points(
qdrant_datastore,
client,
document_chunks,
):
document_ids = await qdrant_datastore._upsert(document_chunks)
assert 2 == len(document_ids)
assert 5 == client.count(collection_name="documents").count
@pytest.mark.asyncio
async def test_upsert_does_not_remove_existing_documents_but_store_new(
qdrant_datastore,
client,
initial_document_chunks,
document_chunks,
):
"""
This test ensures calling ._upsert no longer removes the existing document chunks,
as they are currently removed in the .upsert method directly.
"""
# Fill the database with document chunks before running the actual test
await qdrant_datastore._upsert(initial_document_chunks)
await qdrant_datastore._upsert(document_chunks)
assert 8 == client.count(collection_name="documents").count
@pytest.mark.asyncio
async def test_query_returns_all_on_single_query(qdrant_datastore, document_chunks):
# Fill the database with document chunks before running the actual test
await qdrant_datastore._upsert(document_chunks)
query = QueryWithEmbedding(
query="lorem",
top_k=5,
embedding=[0.5, 0.5, 0.5, 0.5, 0.5],
)
query_results = await qdrant_datastore._query(queries=[query])
assert 1 == len(query_results)
assert "lorem" == query_results[0].query
assert 5 == len(query_results[0].results)
@pytest.mark.asyncio
async def test_query_returns_closest_entry(qdrant_datastore, document_chunks):
# Fill the database with document chunks before running the actual test
await qdrant_datastore._upsert(document_chunks)
query = QueryWithEmbedding(
query="ipsum",
top_k=1,
embedding=[0.0, 0.0, 0.5, 0.0, 0.0],
)
query_results = await qdrant_datastore._query(queries=[query])
assert 1 == len(query_results)
assert "ipsum" == query_results[0].query
assert 1 == len(query_results[0].results)
first_document_chunk = query_results[0].results[0]
assert 0.0 <= first_document_chunk.score <= 1.0
assert Source.email == first_document_chunk.metadata.source
assert "2023-03-05" == first_document_chunk.metadata.created_at
assert "first-doc" == first_document_chunk.metadata.document_id
@pytest.mark.asyncio
async def test_query_filter_by_document_id_returns_this_document_chunks(
qdrant_datastore, document_chunks
):
# Fill the database with document chunks before running the actual test
await qdrant_datastore._upsert(document_chunks)
first_query = QueryWithEmbedding(
query="dolor",
filter=DocumentMetadataFilter(document_id="first-doc"),
top_k=5,
embedding=[0.0, 0.0, 0.5, 0.0, 0.0],
)
second_query = QueryWithEmbedding(
query="dolor",
filter=DocumentMetadataFilter(document_id="second-doc"),
top_k=5,
embedding=[0.0, 0.0, 0.5, 0.0, 0.0],
)
query_results = await qdrant_datastore._query(queries=[first_query, second_query])
assert 2 == len(query_results)
assert "dolor" == query_results[0].query
assert "dolor" == query_results[1].query
assert 3 == len(query_results[0].results)
assert 2 == len(query_results[1].results)
@pytest.mark.asyncio
@pytest.mark.parametrize("start_date", ["2023-03-05T00:00:00", "2023-03-05"])
async def test_query_start_date_converts_datestring(
qdrant_datastore,
document_chunks,
start_date,
):
# Fill the database with document chunks before running the actual test
await qdrant_datastore._upsert(document_chunks)
query = QueryWithEmbedding(
query="sit amet",
filter=DocumentMetadataFilter(start_date=start_date),
top_k=5,
embedding=[0.0, 0.0, 0.5, 0.0, 0.0],
)
query_results = await qdrant_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 3 == len(query_results[0].results)
@pytest.mark.asyncio
@pytest.mark.parametrize("end_date", ["2023-03-04T00:00:00", "2023-03-04"])
async def test_query_end_date_converts_datestring(
qdrant_datastore,
document_chunks,
end_date,
):
# Fill the database with document chunks before running the actual test
await qdrant_datastore._upsert(document_chunks)
query = QueryWithEmbedding(
query="sit amet",
filter=DocumentMetadataFilter(end_date=end_date),
top_k=5,
embedding=[0.0, 0.0, 0.5, 0.0, 0.0],
)
query_results = await qdrant_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 2 == len(query_results[0].results)
@pytest.mark.asyncio
async def test_delete_removes_by_ids(
qdrant_datastore,
client,
document_chunks,
):
# Fill the database with document chunks before running the actual test
await qdrant_datastore._upsert(document_chunks)
await qdrant_datastore.delete(ids=["first-doc"])
assert 2 == client.count(collection_name="documents").count
@pytest.mark.asyncio
async def test_delete_removes_by_document_id_filter(
qdrant_datastore,
client,
document_chunks,
):
# Fill the database with document chunks before running the actual test
await qdrant_datastore._upsert(document_chunks)
await qdrant_datastore.delete(
filter=DocumentMetadataFilter(document_id="first-doc")
)
assert 2 == client.count(collection_name="documents").count
@pytest.mark.asyncio
async def test_delete_removes_all(
qdrant_datastore,
client,
document_chunks,
):
# Fill the database with document chunks before running the actual test
await qdrant_datastore._upsert(document_chunks)
await qdrant_datastore.delete(delete_all=True)
assert 0 == client.count(collection_name="documents").count
|
# from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
# env_path = Path(".") / "zilliz.env"
# load_dotenv(dotenv_path=env_path, verbose=True)
import pytest
from datastore.providers.zilliz_datastore import (
ZillizDataStore,
)
from datastore.providers.milvus_datastore import (
EMBEDDING_FIELD,
)
# Note: Only do basic test here, the ZillizDataStore is derived from MilvusDataStore.
@pytest.fixture
def zilliz_datastore():
return ZillizDataStore()
@pytest.mark.asyncio
async def test_zilliz(zilliz_datastore):
assert True == zilliz_datastore.col.has_index()
index_list = [x.to_dict() for x in zilliz_datastore.col.indexes]
for index in index_list:
if index['index_name'] == EMBEDDING_FIELD:
assert 'AUTOINDEX' == index['index_param']['index_type'] |
import logging
import os
import pytest
import weaviate
from _pytest.logging import LogCaptureFixture
from fastapi.testclient import TestClient
from loguru import logger
from weaviate import Client
from datastore.providers.weaviate_datastore import (
SCHEMA,
WeaviateDataStore,
extract_schema_properties,
)
from models.models import DocumentMetadataFilter, Source
from server.main import app
BEARER_TOKEN = os.getenv("BEARER_TOKEN")
client = TestClient(app)
client.headers["Authorization"] = f"Bearer {BEARER_TOKEN}"
@pytest.fixture
def weaviate_client():
host = os.getenv("WEAVIATE_HOST", "http://localhost")
port = os.getenv("WEAVIATE_PORT", "8080")
client = Client(f"{host}:{port}")
yield client
client.schema.delete_all()
@pytest.fixture
def test_db(weaviate_client, documents):
weaviate_client.schema.delete_all()
weaviate_client.schema.create_class(SCHEMA)
response = client.post("/upsert", json={"documents": documents})
if response.status_code != 200:
raise Exception(
f"Could not upsert to test client.\nStatus Code: {response.status_code}\nResponse:\n{response.json()}"
)
yield client
@pytest.fixture
def documents():
documents = []
authors = ["Max Mustermann", "John Doe", "Jane Doe"]
texts = [
"lorem ipsum dolor sit amet",
"consectetur adipiscing elit",
"sed do eiusmod tempor incididunt",
]
ids = ["abc_123", "def_456", "ghi_789"]
sources = ["chat", "email", "email"]
created_at = [
"1929-10-28T09:30:00-05:00",
"2009-01-03T16:39:57-08:00",
"2021-01-21T10:00:00-02:00",
]
for i in range(3):
documents.append(
{
"id": ids[i],
"text": texts[i],
"metadata": {
"source": sources[i],
"source_id": "5325",
"url": "http://example.com",
"created_at": created_at[i],
"author": authors[i],
},
}
)
no_metadata_doc = {
"id": "jkl_012",
"text": "no metadata",
}
documents.append(no_metadata_doc)
partial_metadata_doc = {
"id": "mno_345",
"text": "partial metadata",
"metadata": {
"source": "file",
},
}
documents.append(partial_metadata_doc)
yield documents
@pytest.fixture
def caplog(caplog: LogCaptureFixture):
handler_id = logger.add(caplog.handler, format="{message}")
yield caplog
logger.remove(handler_id)
@pytest.mark.parametrize(
"document_id", [("abc_123"), ("9a253e0b-d2df-5c2e-be6d-8e9b1f4ae345")]
)
def test_upsert(weaviate_client, document_id):
weaviate_client.schema.delete_all()
weaviate_client.schema.create_class(SCHEMA)
text = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Fusce in ipsum eget dolor malesuada fermentum at ac massa.
Aliquam erat volutpat. Sed eu velit est. Morbi semper quam id urna fringilla lacinia. Vivamus sit amet velit id lorem
pretium molestie. Nulla tincidunt sapien eu nulla consequat, a lacinia justo facilisis. Maecenas euismod urna sapien,
sit amet tincidunt est dapibus ac. Sed in lorem in nunc tincidunt bibendum. Nullam vel urna vitae nulla iaculis rutrum.
Suspendisse varius, massa a dignissim vehicula, urna ligula tincidunt orci, id fringilla velit tellus eu metus. Sed
vestibulum, nisl in malesuada tempor, nisi turpis facilisis nibh, nec dictum velit velit vel ex. Donec euismod,
leo ut sollicitudin tempor, dolor augue blandit nunc, eu lacinia ipsum turpis vitae nulla. Aenean bibendum
tincidunt magna in pulvinar. Sed tincidunt vel nisi ac maximus.
"""
source = "email"
source_id = "5325"
url = "http://example.com"
created_at = "2022-12-16T08:00:00+01:00"
author = "Max Mustermann"
documents = {
"documents": [
{
"id": document_id,
"text": text,
"metadata": {
"source": source,
"source_id": source_id,
"url": url,
"created_at": created_at,
"author": author,
},
}
]
}
response = client.post("/upsert", json=documents)
assert response.status_code == 200
assert response.json() == {"ids": [document_id]}
properties = [
"chunk_id",
"document_id",
"source",
"source_id",
"url",
"created_at",
"author",
]
where_filter = {
"path": ["document_id"],
"operator": "Equal",
"valueString": document_id,
}
weaviate_doc = (
weaviate_client.query.get("OpenAIDocument", properties)
.with_additional("vector")
.with_where(where_filter)
.with_sort({"path": ["chunk_id"], "order": "asc"})
.do()
)
weaviate_docs = weaviate_doc["data"]["Get"]["OpenAIDocument"]
assert len(weaviate_docs) == 2
for i, weaviate_doc in enumerate(weaviate_docs):
assert weaviate_doc["chunk_id"] == f"{document_id}_{i}"
assert weaviate_doc["document_id"] == document_id
assert weaviate_doc["source"] == source
assert weaviate_doc["source_id"] == source_id
assert weaviate_doc["url"] == url
assert weaviate_doc["created_at"] == created_at
assert weaviate_doc["author"] == author
assert weaviate_doc["_additional"]["vector"]
def test_upsert_no_metadata(weaviate_client):
weaviate_client.schema.delete_all()
weaviate_client.schema.create_class(SCHEMA)
no_metadata_doc = {
"id": "jkl_012",
"text": "no metadata",
}
metadata_properties = [
"source",
"source_id",
"url",
"created_at",
"author",
]
response = client.post("/upsert", json={"documents": [no_metadata_doc]})
assert response.status_code == 200
weaviate_doc = weaviate_client.query.get("OpenAIDocument", metadata_properties).do()
weaviate_doc = weaviate_doc["data"]["Get"]["OpenAIDocument"][0]
for _, metadata_value in weaviate_doc.items():
assert metadata_value is None
@pytest.mark.parametrize(
"test_document, expected_status_code",
[
({"id": "abc_123", "text": "some text"}, 200),
({"id": "abc_123"}, 422),
({"text": "some text"}, 200),
],
)
def test_upsert_invalid_documents(weaviate_client, test_document, expected_status_code):
weaviate_client.schema.delete_all()
weaviate_client.schema.create_class(SCHEMA)
response = client.post("/upsert", json={"documents": [test_document]})
assert response.status_code == expected_status_code
@pytest.mark.parametrize(
"query, expected_num_results",
[
({"query": "consectetur adipiscing", "top_k": 3}, 3),
({"query": "consectetur adipiscing elit", "filter": {"source": "email"}}, 2),
(
{
"query": "sed do eiusmod tempor",
"filter": {
"start_date": "2020-01-01T00:00:00Z",
"end_date": "2022-12-31T00:00:00Z",
},
},
1,
),
(
{
"query": "some random query",
"filter": {"start_date": "2009-01-01T00:00:00Z"},
"top_k": 3,
},
2,
),
(
{
"query": "another random query",
"filter": {"end_date": "1929-12-31T00:00:00Z"},
"top_k": 3,
},
1,
),
],
)
def test_query(test_db, query, expected_num_results):
queries = {"queries": [query]}
response = client.post("/query", json=queries)
assert response.status_code == 200
num_docs = response.json()["results"][0]["results"]
assert len(num_docs) == expected_num_results
def test_delete(test_db, weaviate_client, caplog):
caplog.set_level(logging.DEBUG)
delete_request = {"ids": ["def_456"]}
response = client.request(method="delete", url="/delete", json=delete_request)
assert response.status_code == 200
assert response.json()["success"]
assert weaviate_client.data_object.get()["totalResults"] == 4
client.request(method="delete", url="/delete", json=delete_request)
assert "Failed to delete" in caplog.text
caplog.clear()
delete_request = {"filter": {"source": "email"}}
response = client.request(method="delete", url="/delete", json=delete_request)
assert response.status_code == 200
assert response.json()["success"]
assert weaviate_client.data_object.get()["totalResults"] == 3
client.request(method="delete", url="/delete", json=delete_request)
assert "Failed to delete" in caplog.text
delete_request = {"delete_all": True}
response = client.request(method="delete", url="/delete", json=delete_request)
assert response.status_code == 200
assert response.json()["success"]
assert not weaviate_client.data_object.get()["objects"]
def test_build_auth_credentials(monkeypatch):
# Test when WEAVIATE_URL ends with weaviate.network and WEAVIATE_API_KEY is set
with monkeypatch.context() as m:
m.setenv("WEAVIATE_URL", "https://example.weaviate.network")
m.setenv("WEAVIATE_API_KEY", "your_api_key")
auth_credentials = WeaviateDataStore._build_auth_credentials()
assert auth_credentials is not None
assert isinstance(auth_credentials, weaviate.auth.AuthApiKey)
assert auth_credentials.api_key == "your_api_key"
# Test when WEAVIATE_URL ends with weaviate.network and WEAVIATE_API_KEY is not set
with monkeypatch.context() as m:
m.setenv("WEAVIATE_URL", "https://example.weaviate.network")
m.delenv("WEAVIATE_API_KEY", raising=False)
with pytest.raises(
ValueError, match="WEAVIATE_API_KEY environment variable is not set"
):
WeaviateDataStore._build_auth_credentials()
# Test when WEAVIATE_URL does not end with weaviate.network
with monkeypatch.context() as m:
m.setenv("WEAVIATE_URL", "https://example.notweaviate.network")
m.setenv("WEAVIATE_API_KEY", "your_api_key")
auth_credentials = WeaviateDataStore._build_auth_credentials()
assert auth_credentials is None
# Test when WEAVIATE_URL is not set
with monkeypatch.context() as m:
m.delenv("WEAVIATE_URL", raising=False)
m.setenv("WEAVIATE_API_KEY", "your_api_key")
auth_credentials = WeaviateDataStore._build_auth_credentials()
assert auth_credentials is None
def test_extract_schema_properties():
class_schema = {
"class": "Question",
"description": "Information from a Jeopardy! question",
"properties": [
{
"dataType": ["text"],
"description": "The question",
"name": "question",
},
{
"dataType": ["text"],
"description": "The answer",
"name": "answer",
},
{
"dataType": ["text"],
"description": "The category",
"name": "category",
},
],
"vectorizer": "text2vec-openai",
}
results = extract_schema_properties(class_schema)
assert results == {"question", "answer", "category"}
def test_reuse_schema(weaviate_client, caplog):
caplog.set_level(logging.DEBUG)
weaviate_client.schema.delete_all()
WeaviateDataStore()
assert "Creating index" in caplog.text
WeaviateDataStore()
assert "Will reuse this schema" in caplog.text
def test_build_date_filters():
filter = DocumentMetadataFilter(
document_id=None,
source=None,
source_id=None,
author=None,
start_date="2020-01-01T00:00:00Z",
end_date="2022-12-31T00:00:00Z",
)
actual_result = WeaviateDataStore.build_filters(filter)
expected_result = {
"operator": "And",
"operands": [
{
"path": ["created_at"],
"operator": "GreaterThanEqual",
"valueDate": "2020-01-01T00:00:00Z",
},
{
"path": ["created_at"],
"operator": "LessThanEqual",
"valueDate": "2022-12-31T00:00:00Z",
},
],
}
assert actual_result == expected_result
@pytest.mark.parametrize(
"test_input, expected_result",
[
("abc_123", False),
("b2e4133c-c956-5684-bbf5-584e50ec3647", True), # version 5
("f6179953-11d8-4ee0-9af8-e51e00dbf727", True), # version 4
("16fe8165-3c08-348f-a015-a8bb31e26b5c", True), # version 3
("bda85f97-be72-11ed-9291-00000000000a", False), # version 1
],
)
def test_is_valid_weaviate_id(test_input, expected_result):
actual_result = WeaviateDataStore._is_valid_weaviate_id(test_input)
assert actual_result == expected_result
def test_upsert_same_docid(test_db, weaviate_client):
def get_doc_by_document_id(document_id):
properties = [
"chunk_id",
"document_id",
"source",
"source_id",
"url",
"created_at",
"author",
]
where_filter = {
"path": ["document_id"],
"operator": "Equal",
"valueString": document_id,
}
results = (
weaviate_client.query.get("OpenAIDocument", properties)
.with_additional("id")
.with_where(where_filter)
.with_sort({"path": ["chunk_id"], "order": "asc"})
.do()
)
return results["data"]["Get"]["OpenAIDocument"]
def build_upsert_payload(document):
return {"documents": [document]}
# upsert a new document
# this is a document that has 2 chunks and
# the source is email
doc_id = "abc_123"
text = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Fusce in ipsum eget dolor malesuada fermentum at ac massa.
Aliquam erat volutpat. Sed eu velit est. Morbi semper quam id urna fringilla lacinia. Vivamus sit amet velit id lorem
pretium molestie. Nulla tincidunt sapien eu nulla consequat, a lacinia justo facilisis. Maecenas euismod urna sapien,
sit amet tincidunt est dapibus ac. Sed in lorem in nunc tincidunt bibendum. Nullam vel urna vitae nulla iaculis rutrum.
Suspendisse varius, massa a dignissim vehicula, urna ligula tincidunt orci, id fringilla velit tellus eu metus. Sed
vestibulum, nisl in malesuada tempor, nisi turpis facilisis nibh, nec dictum velit velit vel ex. Donec euismod,
leo ut sollicitudin tempor, dolor augue blandit nunc, eu lacinia ipsum turpis vitae nulla. Aenean bibendum
tincidunt magna in pulvinar. Sed tincidunt vel nisi ac maximus.
"""
document = {
"id": doc_id,
"text": text,
"metadata": {"source": Source.email},
}
response = client.post("/upsert", json=build_upsert_payload(document))
assert response.status_code == 200
weaviate_doc = get_doc_by_document_id(doc_id)
assert len(weaviate_doc) == 2
for chunk in weaviate_doc:
assert chunk["source"] == Source.email
# now update the source to file
# user still has to specify the text
# because test is a required field
document["metadata"]["source"] = Source.file
response = client.post("/upsert", json=build_upsert_payload(document))
assert response.status_code == 200
weaviate_doc = get_doc_by_document_id(doc_id)
assert len(weaviate_doc) == 2
for chunk in weaviate_doc:
assert chunk["source"] == "file"
# now update the text so that it is only 1 chunk
# user does not need to specify metadata
# since it is optional
document["text"] = "This is a short text"
document.pop("metadata")
response = client.post("/upsert", json=build_upsert_payload(document))
assert response.status_code == 200
weaviate_doc = get_doc_by_document_id(doc_id)
assert len(weaviate_doc) == 1
# TODO: Implement update function
# but the source should still be file
# but it is None right now because an
# update function is out of scope
assert weaviate_doc[0]["source"] is None
@pytest.mark.parametrize(
"url, expected_result",
[
("https://example.weaviate.network", True),
("https://example.weaviate.network/", True),
("https://example.weaviate.cloud", True),
("https://example.weaviate.cloud/", True),
("https://example.notweaviate.network", False),
("https://weaviate.network.example.com", False),
("https://example.weaviate.network/somepage", False),
("", False),
],
)
def test_is_wcs_domain(url, expected_result):
assert WeaviateDataStore._is_wcs_domain(url) == expected_result
|
import pytest
from models.models import (
DocumentChunkMetadata,
DocumentMetadataFilter,
DocumentChunk,
QueryWithEmbedding,
Source,
)
from datastore.providers.elasticsearch_datastore import (
ElasticsearchDataStore,
)
import time
DIM_SIZE = 1536
@pytest.fixture
def elasticsearch_datastore():
return ElasticsearchDataStore()
def sample_embedding(one_element_poz: int):
embedding = [0] * DIM_SIZE
embedding[one_element_poz % DIM_SIZE] = 1
return embedding
def sample_embeddings(num: int, one_element_start: int = 0):
embeddings = []
for x in range(num):
embedding = [0] * DIM_SIZE
embedding[(x + one_element_start) % DIM_SIZE] = 1
embeddings.append(embedding)
return embeddings
@pytest.fixture
def document_chunk_one():
doc_id = "abc"
doc_chunks = []
ids = ["123", "456", "789"]
texts = [
"Aenean euismod bibendum laoreet",
"Vivamus non enim vitae tortor",
"Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae",
]
sources = [Source.email, Source.file, Source.chat]
created_ats = [
"1929-10-28T09:30:00-05:00",
"2009-01-03T16:39:57-08:00",
"2021-01-21T10:00:00-02:00",
]
authors = ["Fred Smith", "Bob Doe", "Appleton Doe"]
embeddings = sample_embeddings(len(texts))
for i in range(3):
chunk = DocumentChunk(
id=ids[i],
text=texts[i],
metadata=DocumentChunkMetadata(
document_id=doc_id,
source=sources[i],
created_at=created_ats[i],
author=authors[i],
),
embedding=embeddings[i], # type: ignore
)
doc_chunks.append(chunk)
return {doc_id: doc_chunks}
async def test_upsert(elasticsearch_datastore, document_chunk_one):
await elasticsearch_datastore.delete(delete_all=True)
res = await elasticsearch_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
time.sleep(1)
results = elasticsearch_datastore.client.search(
index=elasticsearch_datastore.index_name, query={"match_all": {}}
)
assert results["hits"]["total"]["value"] == 3
elasticsearch_datastore.client.indices.delete(
index=elasticsearch_datastore.index_name
)
async def test_upsert_query_all(elasticsearch_datastore, document_chunk_one):
await elasticsearch_datastore.delete(delete_all=True)
res = await elasticsearch_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
time.sleep(1)
query = QueryWithEmbedding(
query="Aenean",
top_k=10,
embedding=sample_embedding(0), # type: ignore
)
query_results = await elasticsearch_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 3 == len(query_results[0].results)
async def test_delete_with_document_id(elasticsearch_datastore, document_chunk_one):
await elasticsearch_datastore.delete(delete_all=True)
res = await elasticsearch_datastore._upsert(document_chunk_one)
time.sleep(1)
assert res == list(document_chunk_one.keys())
await elasticsearch_datastore.delete([res[0]])
time.sleep(1)
query = QueryWithEmbedding(
query="Aenean",
top_k=9,
embedding=sample_embedding(0), # type: ignore
)
query_results = await elasticsearch_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 0 == len(query_results[0].results)
elasticsearch_datastore.client.indices.delete(
index=elasticsearch_datastore.index_name
)
async def test_delete_with_source_filter(elasticsearch_datastore, document_chunk_one):
await elasticsearch_datastore.delete(delete_all=True)
res = await elasticsearch_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
time.sleep(1)
await elasticsearch_datastore.delete(
filter=DocumentMetadataFilter(
source=Source.email,
)
)
time.sleep(1)
query = QueryWithEmbedding(
query="Aenean",
top_k=9,
embedding=sample_embedding(0), # type: ignore
)
query_results = await elasticsearch_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 2 == len(query_results[0].results)
assert "456" == query_results[0].results[0].id
elasticsearch_datastore.client.indices.delete(
index=elasticsearch_datastore.index_name
)
|
# from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
# env_path = Path(".") / "milvus.env"
# load_dotenv(dotenv_path=env_path, verbose=True)
import pytest
from models.models import (
DocumentChunkMetadata,
DocumentMetadataFilter,
DocumentChunk,
QueryWithEmbedding,
Source,
)
from datastore.providers.milvus_datastore import (
OUTPUT_DIM,
MilvusDataStore,
)
@pytest.fixture
def milvus_datastore():
return MilvusDataStore(consistency_level = "Strong")
def sample_embedding(one_element_poz: int):
embedding = [0] * OUTPUT_DIM
embedding[one_element_poz % OUTPUT_DIM] = 1
return embedding
def sample_embeddings(num: int, one_element_start: int = 0):
# since metric type is consine, we create vector contains only one element 1, others 0
embeddings = []
for x in range(num):
embedding = [0] * OUTPUT_DIM
embedding[(x + one_element_start) % OUTPUT_DIM] = 1
embeddings.append(embedding)
return embeddings
@pytest.fixture
def document_chunk_one():
doc_id = "zerp"
doc_chunks = []
ids = ["abc_123", "def_456", "ghi_789"]
texts = [
"lorem ipsum dolor sit amet",
"consectetur adipiscing elit",
"sed do eiusmod tempor incididunt",
]
sources = [Source.email, Source.file, Source.chat]
source_ids = ["foo", "bar", "baz"]
urls = ["foo.com", "bar.net", "baz.org"]
created_ats = [
"1929-10-28T09:30:00-05:00",
"2009-01-03T16:39:57-08:00",
"2021-01-21T10:00:00-02:00",
]
authors = ["Max Mustermann", "John Doe", "Jane Doe"]
embeddings = sample_embeddings(len(texts))
for i in range(3):
chunk = DocumentChunk(
id=ids[i],
text=texts[i],
metadata=DocumentChunkMetadata(
document_id=doc_id,
source=sources[i],
source_id=source_ids[i],
url=urls[i],
created_at=created_ats[i],
author=authors[i],
),
embedding=embeddings[i], # type: ignore
)
doc_chunks.append(chunk)
return {doc_id: doc_chunks}
@pytest.fixture
def document_chunk_two():
doc_id_1 = "zerp"
doc_chunks_1 = []
ids = ["abc_123", "def_456", "ghi_789"]
texts = [
"1lorem ipsum dolor sit amet",
"2consectetur adipiscing elit",
"3sed do eiusmod tempor incididunt",
]
sources = [Source.email, Source.file, Source.chat]
source_ids = ["foo", "bar", "baz"]
urls = ["foo.com", "bar.net", "baz.org"]
created_ats = [
"1929-10-28T09:30:00-05:00",
"2009-01-03T16:39:57-08:00",
"3021-01-21T10:00:00-02:00",
]
authors = ["Max Mustermann", "John Doe", "Jane Doe"]
embeddings = sample_embeddings(len(texts))
for i in range(3):
chunk = DocumentChunk(
id=ids[i],
text=texts[i],
metadata=DocumentChunkMetadata(
document_id=doc_id_1,
source=sources[i],
source_id=source_ids[i],
url=urls[i],
created_at=created_ats[i],
author=authors[i],
),
embedding=embeddings[i], # type: ignore
)
doc_chunks_1.append(chunk)
doc_id_2 = "merp"
doc_chunks_2 = []
ids = ["jkl_123", "lmn_456", "opq_789"]
texts = [
"3sdsc efac feas sit qweas",
"4wert sdfas fdsc",
"52dsc fdsf eiusmod asdasd incididunt",
]
sources = [Source.email, Source.file, Source.chat]
source_ids = ["foo", "bar", "baz"]
urls = ["foo.com", "bar.net", "baz.org"]
created_ats = [
"4929-10-28T09:30:00-05:00",
"5009-01-03T16:39:57-08:00",
"6021-01-21T10:00:00-02:00",
]
authors = ["Max Mustermann", "John Doe", "Jane Doe"]
embeddings = sample_embeddings(len(texts), 3)
for i in range(3):
chunk = DocumentChunk(
id=ids[i],
text=texts[i],
metadata=DocumentChunkMetadata(
document_id=doc_id_2,
source=sources[i],
source_id=source_ids[i],
url=urls[i],
created_at=created_ats[i],
author=authors[i],
),
embedding=embeddings[i], # type: ignore
)
doc_chunks_2.append(chunk)
return {doc_id_1: doc_chunks_1, doc_id_2: doc_chunks_2}
@pytest.mark.asyncio
async def test_upsert(milvus_datastore, document_chunk_one):
await milvus_datastore.delete(delete_all=True)
res = await milvus_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
milvus_datastore.col.flush()
assert 3 == milvus_datastore.col.num_entities
milvus_datastore.col.drop()
@pytest.mark.asyncio
async def test_reload(milvus_datastore, document_chunk_one, document_chunk_two):
await milvus_datastore.delete(delete_all=True)
res = await milvus_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
milvus_datastore.col.flush()
assert 3 == milvus_datastore.col.num_entities
new_store = MilvusDataStore()
another_in = {i: document_chunk_two[i] for i in document_chunk_two if i != res[0]}
res = await new_store._upsert(another_in)
new_store.col.flush()
assert 6 == new_store.col.num_entities
query = QueryWithEmbedding(
query="lorem",
top_k=10,
embedding=sample_embedding(0),
)
query_results = await milvus_datastore._query(queries=[query])
assert 1 == len(query_results)
new_store.col.drop()
@pytest.mark.asyncio
async def test_upsert_query_all(milvus_datastore, document_chunk_two):
await milvus_datastore.delete(delete_all=True)
res = await milvus_datastore._upsert(document_chunk_two)
assert res == list(document_chunk_two.keys())
milvus_datastore.col.flush()
# Num entities currently doesn't track deletes
query = QueryWithEmbedding(
query="lorem",
top_k=10,
embedding=sample_embedding(0),
)
query_results = await milvus_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 6 == len(query_results[0].results)
milvus_datastore.col.drop()
@pytest.mark.asyncio
async def test_query_accuracy(milvus_datastore, document_chunk_one):
await milvus_datastore.delete(delete_all=True)
res = await milvus_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
milvus_datastore.col.flush()
query = QueryWithEmbedding(
query="lorem",
top_k=1,
embedding=sample_embedding(0),
)
query_results = await milvus_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 1 == len(query_results[0].results)
assert 1.0 == query_results[0].results[0].score
assert "abc_123" == query_results[0].results[0].id
milvus_datastore.col.drop()
@pytest.mark.asyncio
async def test_query_filter(milvus_datastore, document_chunk_one):
await milvus_datastore.delete(delete_all=True)
res = await milvus_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
milvus_datastore.col.flush()
query = QueryWithEmbedding(
query="lorem",
top_k=1,
embedding=sample_embedding(0),
filter=DocumentMetadataFilter(
start_date="2000-01-03T16:39:57-08:00", end_date="2010-01-03T16:39:57-08:00"
),
)
query_results = await milvus_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 1 == len(query_results[0].results)
assert 1.0 != query_results[0].results[0].score
assert "def_456" == query_results[0].results[0].id
milvus_datastore.col.drop()
@pytest.mark.asyncio
async def test_delete_with_date_filter(milvus_datastore, document_chunk_one):
await milvus_datastore.delete(delete_all=True)
res = await milvus_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
milvus_datastore.col.flush()
await milvus_datastore.delete(
filter=DocumentMetadataFilter(
end_date="2009-01-03T16:39:57-08:00",
)
)
query = QueryWithEmbedding(
query="lorem",
top_k=9,
embedding=sample_embedding(0),
)
query_results = await milvus_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 1 == len(query_results[0].results)
assert "ghi_789" == query_results[0].results[0].id
milvus_datastore.col.drop()
@pytest.mark.asyncio
async def test_delete_with_source_filter(milvus_datastore, document_chunk_one):
await milvus_datastore.delete(delete_all=True)
res = await milvus_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
milvus_datastore.col.flush()
await milvus_datastore.delete(
filter=DocumentMetadataFilter(
source=Source.email,
)
)
query = QueryWithEmbedding(
query="lorem",
top_k=9,
embedding=sample_embedding(0),
)
query_results = await milvus_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 2 == len(query_results[0].results)
assert "def_456" == query_results[0].results[0].id
milvus_datastore.col.drop()
@pytest.mark.asyncio
async def test_delete_with_document_id_filter(milvus_datastore, document_chunk_one):
await milvus_datastore.delete(delete_all=True)
res = await milvus_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
milvus_datastore.col.flush()
await milvus_datastore.delete(
filter=DocumentMetadataFilter(
document_id=res[0],
)
)
query = QueryWithEmbedding(
query="lorem",
top_k=9,
embedding=sample_embedding(0),
)
query_results = await milvus_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 0 == len(query_results[0].results)
milvus_datastore.col.drop()
@pytest.mark.asyncio
async def test_delete_with_document_id(milvus_datastore, document_chunk_one):
await milvus_datastore.delete(delete_all=True)
res = await milvus_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
milvus_datastore.col.flush()
await milvus_datastore.delete([res[0]])
query = QueryWithEmbedding(
query="lorem",
top_k=9,
embedding=sample_embedding(0),
)
query_results = await milvus_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 0 == len(query_results[0].results)
milvus_datastore.col.drop()
# if __name__ == '__main__':
# import sys
# import pytest
# pytest.main(sys.argv)
|
import pytest
from models.models import (
DocumentChunkMetadata,
DocumentMetadataFilter,
DocumentChunk,
QueryWithEmbedding,
Source,
)
from datastore.providers.analyticdb_datastore import (
OUTPUT_DIM,
AnalyticDBDataStore,
)
@pytest.fixture
def analyticdb_datastore():
return AnalyticDBDataStore()
@pytest.fixture
def document_chunk_one():
doc_id = "zerp"
doc_chunks = []
ids = ["abc_123", "def_456", "ghi_789"]
texts = [
"lorem ipsum dolor sit amet",
"consectetur adipiscing elit",
"sed do eiusmod tempor incididunt",
]
sources = [Source.email, Source.file, Source.chat]
source_ids = ["foo", "bar", "baz"]
urls = ["foo.com", "bar.net", "baz.org"]
created_ats = [
"1929-10-28T09:30:00-05:00",
"2009-01-03T16:39:57-08:00",
"2021-01-21T10:00:00-02:00",
]
authors = ["Max Mustermann", "John Doe", "Jane Doe"]
embeddings = [[x] * OUTPUT_DIM for x in range(3)]
for i in range(3):
chunk = DocumentChunk(
id=ids[i],
text=texts[i],
metadata=DocumentChunkMetadata(
document_id=doc_id,
source=sources[i],
source_id=source_ids[i],
url=urls[i],
created_at=created_ats[i],
author=authors[i],
),
embedding=embeddings[i], # type: ignore
)
doc_chunks.append(chunk)
return {doc_id: doc_chunks}
@pytest.fixture
def document_chunk_two():
doc_id_1 = "zerp"
doc_chunks_1 = []
ids = ["abc_123", "def_456", "ghi_789"]
texts = [
"1lorem ipsum dolor sit amet",
"2consectetur adipiscing elit",
"3sed do eiusmod tempor incididunt",
]
sources = [Source.email, Source.file, Source.chat]
source_ids = ["foo", "bar", "baz"]
urls = ["foo.com", "bar.net", "baz.org"]
created_ats = [
"1929-10-28T09:30:00-05:00",
"2009-01-03T16:39:57-08:00",
"3021-01-21T10:00:00-02:00",
]
authors = ["Max Mustermann", "John Doe", "Jane Doe"]
embeddings = [[x] * OUTPUT_DIM for x in range(3)]
for i in range(3):
chunk = DocumentChunk(
id=ids[i],
text=texts[i],
metadata=DocumentChunkMetadata(
document_id=doc_id_1,
source=sources[i],
source_id=source_ids[i],
url=urls[i],
created_at=created_ats[i],
author=authors[i],
),
embedding=embeddings[i], # type: ignore
)
doc_chunks_1.append(chunk)
doc_id_2 = "merp"
doc_chunks_2 = []
ids = ["jkl_123", "lmn_456", "opq_789"]
texts = [
"3sdsc efac feas sit qweas",
"4wert sdfas fdsc",
"52dsc fdsf eiusmod asdasd incididunt",
]
sources = [Source.email, Source.file, Source.chat]
source_ids = ["foo", "bar", "baz"]
urls = ["foo.com", "bar.net", "baz.org"]
created_ats = [
"4929-10-28T09:30:00-05:00",
"5009-01-03T16:39:57-08:00",
"6021-01-21T10:00:00-02:00",
]
authors = ["Max Mustermann", "John Doe", "Jane Doe"]
embeddings = [[x] * OUTPUT_DIM for x in range(3, 6)]
for i in range(3):
chunk = DocumentChunk(
id=ids[i],
text=texts[i],
metadata=DocumentChunkMetadata(
document_id=doc_id_2,
source=sources[i],
source_id=source_ids[i],
url=urls[i],
created_at=created_ats[i],
author=authors[i],
),
embedding=embeddings[i], # type: ignore
)
doc_chunks_2.append(chunk)
return {doc_id_1: doc_chunks_1, doc_id_2: doc_chunks_2}
@pytest.mark.asyncio
async def test_upsert(analyticdb_datastore, document_chunk_one):
await analyticdb_datastore.delete(delete_all=True)
res = await analyticdb_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
query = QueryWithEmbedding(
query="lorem",
top_k=10,
embedding=[0.5] * OUTPUT_DIM,
)
query_results = await analyticdb_datastore._query(queries=[query])
assert 3 == len(query_results[0].results)
@pytest.mark.asyncio
async def test_reload(analyticdb_datastore, document_chunk_one, document_chunk_two):
await analyticdb_datastore.delete(delete_all=True)
res = await analyticdb_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
query = QueryWithEmbedding(
query="lorem",
top_k=10,
embedding=[0.5] * OUTPUT_DIM,
)
query_results = await analyticdb_datastore._query(queries=[query])
assert 3 == len(query_results[0].results)
new_store = AnalyticDBDataStore()
another_in = {i: document_chunk_two[i] for i in document_chunk_two if i != res[0]}
res = await new_store._upsert(another_in)
query_results = await analyticdb_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 6 == len(query_results[0].results)
@pytest.mark.asyncio
async def test_upsert_query_all(analyticdb_datastore, document_chunk_two):
await analyticdb_datastore.delete(delete_all=True)
res = await analyticdb_datastore._upsert(document_chunk_two)
assert res == list(document_chunk_two.keys())
# Num entities currently doesn't track deletes
query = QueryWithEmbedding(
query="lorem",
top_k=10,
embedding=[0.5] * OUTPUT_DIM,
)
query_results = await analyticdb_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 6 == len(query_results[0].results)
@pytest.mark.asyncio
async def test_query_accuracy(analyticdb_datastore, document_chunk_one):
await analyticdb_datastore.delete(delete_all=True)
res = await analyticdb_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
query = QueryWithEmbedding(
query="lorem",
top_k=1,
embedding=[0] * OUTPUT_DIM,
)
query_results = await analyticdb_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 1 == len(query_results[0].results)
assert 0 == query_results[0].results[0].score
assert "abc_123" == query_results[0].results[0].id
@pytest.mark.asyncio
async def test_query_filter(analyticdb_datastore, document_chunk_one):
await analyticdb_datastore.delete(delete_all=True)
res = await analyticdb_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
query = QueryWithEmbedding(
query="lorem",
top_k=1,
embedding=[0] * OUTPUT_DIM,
filter=DocumentMetadataFilter(
start_date="2000-01-03T16:39:57-08:00", end_date="2010-01-03T16:39:57-08:00"
),
)
query_results = await analyticdb_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 1 == len(query_results[0].results)
assert 0 != query_results[0].results[0].score
assert "def_456" == query_results[0].results[0].id
@pytest.mark.asyncio
async def test_delete_with_date_filter(analyticdb_datastore, document_chunk_one):
await analyticdb_datastore.delete(delete_all=True)
res = await analyticdb_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
await analyticdb_datastore.delete(
filter=DocumentMetadataFilter(
end_date="2009-01-03T16:39:57-08:00",
)
)
query = QueryWithEmbedding(
query="lorem",
top_k=9,
embedding=[0] * OUTPUT_DIM,
)
query_results = await analyticdb_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 1 == len(query_results[0].results)
assert "ghi_789" == query_results[0].results[0].id
@pytest.mark.asyncio
async def test_delete_with_source_filter(analyticdb_datastore, document_chunk_one):
await analyticdb_datastore.delete(delete_all=True)
res = await analyticdb_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
await analyticdb_datastore.delete(
filter=DocumentMetadataFilter(
source=Source.email,
)
)
query = QueryWithEmbedding(
query="lorem",
top_k=9,
embedding=[0] * OUTPUT_DIM,
)
query_results = await analyticdb_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 2 == len(query_results[0].results)
assert "def_456" == query_results[0].results[0].id
@pytest.mark.asyncio
async def test_delete_with_document_id_filter(analyticdb_datastore, document_chunk_one):
await analyticdb_datastore.delete(delete_all=True)
res = await analyticdb_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
await analyticdb_datastore.delete(
filter=DocumentMetadataFilter(
document_id=res[0],
)
)
query = QueryWithEmbedding(
query="lorem",
top_k=9,
embedding=[0] * OUTPUT_DIM,
)
query_results = await analyticdb_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 0 == len(query_results[0].results)
@pytest.mark.asyncio
async def test_delete_with_document_id(analyticdb_datastore, document_chunk_one):
await analyticdb_datastore.delete(delete_all=True)
res = await analyticdb_datastore._upsert(document_chunk_one)
assert res == list(document_chunk_one.keys())
await analyticdb_datastore.delete([res[0]])
query = QueryWithEmbedding(
query="lorem",
top_k=9,
embedding=[0] * OUTPUT_DIM,
)
query_results = await analyticdb_datastore._query(queries=[query])
assert 1 == len(query_results)
assert 0 == len(query_results[0].results)
# if __name__ == '__main__':
# import sys
# import pytest
# pytest.main(sys.argv)
|
from typing import Dict, List
import pytest
from datastore.providers.llama_datastore import LlamaDataStore
from models.models import DocumentChunk, DocumentChunkMetadata, QueryWithEmbedding
def create_embedding(non_zero_pos: int, size: int) -> List[float]:
vector = [0.0] * size
vector[non_zero_pos % size] = 1.0
return vector
@pytest.fixture
def initial_document_chunks() -> Dict[str, List[DocumentChunk]]:
first_doc_chunks = [
DocumentChunk(
id=f"first-doc-{i}",
text=f"Lorem ipsum {i}",
metadata=DocumentChunkMetadata(),
embedding=create_embedding(i, 5),
)
for i in range(4, 7)
]
return {
"first-doc": first_doc_chunks,
}
@pytest.fixture
def queries() -> List[QueryWithEmbedding]:
queries = [
QueryWithEmbedding(
query='Query 1',
top_k=1,
embedding=create_embedding(4, 5),
),
QueryWithEmbedding(
query='Query 2',
top_k=2,
embedding=create_embedding(5, 5),
),
]
return queries
@pytest.fixture
def llama_datastore() -> LlamaDataStore:
return LlamaDataStore()
@pytest.mark.asyncio
async def test_upsert(
llama_datastore: LlamaDataStore,
initial_document_chunks: Dict[str, List[DocumentChunk]]
) -> None:
"""Test basic upsert."""
doc_ids = await llama_datastore._upsert(initial_document_chunks)
assert doc_ids == [doc_id for doc_id in initial_document_chunks]
@pytest.mark.asyncio
async def test_query(
llama_datastore: LlamaDataStore,
initial_document_chunks: Dict[str, List[DocumentChunk]],
queries: List[QueryWithEmbedding],
) -> None:
"""Test basic query."""
# insert to prepare for test
await llama_datastore._upsert(initial_document_chunks)
query_results = await llama_datastore._query(queries)
assert len(query_results) == len(queries)
query_0_results = query_results[0].results
query_1_results = query_results[1].results
assert len(query_0_results) == 1
assert len(query_1_results) == 2
# NOTE: this is the correct behavior
assert query_0_results[0].id == 'first-doc-4'
assert query_1_results[0].id == 'first-doc-5'
assert query_1_results[1].id == 'first-doc-4'
@pytest.mark.asyncio
async def test_delete(
llama_datastore: LlamaDataStore,
initial_document_chunks: Dict[str, List[DocumentChunk]],
) -> None:
# insert to prepare for test
await llama_datastore._upsert(initial_document_chunks)
is_success = llama_datastore.delete(['first-doc'])
assert is_success
|
import os
from typing import Optional
import uvicorn
from fastapi import FastAPI, File, Form, HTTPException, Depends, Body, UploadFile
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi.staticfiles import StaticFiles
from loguru import logger
from models.api import (
DeleteRequest,
DeleteResponse,
QueryRequest,
QueryResponse,
UpsertRequest,
UpsertResponse,
)
from datastore.factory import get_datastore
from services.file import get_document_from_file
from models.models import DocumentMetadata, Source
bearer_scheme = HTTPBearer()
BEARER_TOKEN = os.environ.get("BEARER_TOKEN")
assert BEARER_TOKEN is not None
def validate_token(credentials: HTTPAuthorizationCredentials = Depends(bearer_scheme)):
if credentials.scheme != "Bearer" or credentials.credentials != BEARER_TOKEN:
raise HTTPException(status_code=401, detail="Invalid or missing token")
return credentials
app = FastAPI(dependencies=[Depends(validate_token)])
app.mount("/.well-known", StaticFiles(directory=".well-known"), name="static")
# Create a sub-application, in order to access just the query endpoint in an OpenAPI schema, found at http://0.0.0.0:8000/sub/openapi.json when the app is running locally
sub_app = FastAPI(
title="Retrieval Plugin API",
description="A retrieval API for querying and filtering documents based on natural language queries and metadata",
version="1.0.0",
servers=[{"url": "https://your-app-url.com"}],
dependencies=[Depends(validate_token)],
)
app.mount("/sub", sub_app)
@app.post(
"/upsert-file",
response_model=UpsertResponse,
)
async def upsert_file(
file: UploadFile = File(...),
metadata: Optional[str] = Form(None),
):
try:
metadata_obj = (
DocumentMetadata.parse_raw(metadata)
if metadata
else DocumentMetadata(source=Source.file)
)
except:
metadata_obj = DocumentMetadata(source=Source.file)
document = await get_document_from_file(file, metadata_obj)
try:
ids = await datastore.upsert([document])
return UpsertResponse(ids=ids)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail=f"str({e})")
@app.post(
"/upsert",
response_model=UpsertResponse,
)
async def upsert(
request: UpsertRequest = Body(...),
):
try:
ids = await datastore.upsert(request.documents)
return UpsertResponse(ids=ids)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@app.post(
"/query",
response_model=QueryResponse,
)
async def query_main(
request: QueryRequest = Body(...),
):
try:
results = await datastore.query(
request.queries,
)
return QueryResponse(results=results)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@sub_app.post(
"/query",
response_model=QueryResponse,
# NOTE: We are describing the shape of the API endpoint input due to a current limitation in parsing arrays of objects from OpenAPI schemas. This will not be necessary in the future.
description="Accepts search query objects array each with query and optional filter. Break down complex questions into sub-questions. Refine results by criteria, e.g. time / source, don't do this often. Split queries if ResponseTooLargeError occurs.",
)
async def query(
request: QueryRequest = Body(...),
):
try:
results = await datastore.query(
request.queries,
)
return QueryResponse(results=results)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@app.delete(
"/delete",
response_model=DeleteResponse,
)
async def delete(
request: DeleteRequest = Body(...),
):
if not (request.ids or request.filter or request.delete_all):
raise HTTPException(
status_code=400,
detail="One of ids, filter, or delete_all is required",
)
try:
success = await datastore.delete(
ids=request.ids,
filter=request.filter,
delete_all=request.delete_all,
)
return DeleteResponse(success=success)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@app.on_event("startup")
async def startup():
global datastore
datastore = await get_datastore()
def start():
uvicorn.run("server.main:app", host="0.0.0.0", port=8000, reload=True)
|
from pydantic import BaseModel
from typing import List, Optional
from enum import Enum
class Source(str, Enum):
email = "email"
file = "file"
chat = "chat"
class DocumentMetadata(BaseModel):
source: Optional[Source] = None
source_id: Optional[str] = None
url: Optional[str] = None
created_at: Optional[str] = None
author: Optional[str] = None
class DocumentChunkMetadata(DocumentMetadata):
document_id: Optional[str] = None
class DocumentChunk(BaseModel):
id: Optional[str] = None
text: str
metadata: DocumentChunkMetadata
embedding: Optional[List[float]] = None
class DocumentChunkWithScore(DocumentChunk):
score: float
class Document(BaseModel):
id: Optional[str] = None
text: str
metadata: Optional[DocumentMetadata] = None
class DocumentWithChunks(Document):
chunks: List[DocumentChunk]
class DocumentMetadataFilter(BaseModel):
document_id: Optional[str] = None
source: Optional[Source] = None
source_id: Optional[str] = None
author: Optional[str] = None
start_date: Optional[str] = None # any date string format
end_date: Optional[str] = None # any date string format
class Query(BaseModel):
query: str
filter: Optional[DocumentMetadataFilter] = None
top_k: Optional[int] = 3
class QueryWithEmbedding(Query):
embedding: List[float]
class QueryResult(BaseModel):
query: str
results: List[DocumentChunkWithScore]
|
from models.models import (
Document,
DocumentMetadataFilter,
Query,
QueryResult,
)
from pydantic import BaseModel
from typing import List, Optional
class UpsertRequest(BaseModel):
documents: List[Document]
class UpsertResponse(BaseModel):
ids: List[str]
class QueryRequest(BaseModel):
queries: List[Query]
class QueryResponse(BaseModel):
results: List[QueryResult]
class DeleteRequest(BaseModel):
ids: Optional[List[str]] = None
filter: Optional[DocumentMetadataFilter] = None
delete_all: Optional[bool] = False
class DeleteResponse(BaseModel):
success: bool
|
# This is a version of the main.py file found in ../../../server/main.py for testing the plugin locally.
# Use the command `poetry run dev` to run this.
from typing import Optional
import uvicorn
from fastapi import FastAPI, File, Form, HTTPException, Body, UploadFile
from loguru import logger
from models.api import (
DeleteRequest,
DeleteResponse,
QueryRequest,
QueryResponse,
UpsertRequest,
UpsertResponse,
)
from datastore.factory import get_datastore
from services.file import get_document_from_file
from starlette.responses import FileResponse
from models.models import DocumentMetadata, Source
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
PORT = 3333
origins = [
f"http://localhost:{PORT}",
"https://chat.openai.com",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.route("/.well-known/ai-plugin.json")
async def get_manifest(request):
file_path = "./local_server/ai-plugin.json"
simple_headers = {}
simple_headers["Access-Control-Allow-Private-Network"] = "true"
return FileResponse(file_path, media_type="text/json", headers=simple_headers)
@app.route("/.well-known/logo.png")
async def get_logo(request):
file_path = "./local_server/logo.png"
return FileResponse(file_path, media_type="text/json")
@app.route("/.well-known/openapi.yaml")
async def get_openapi(request):
file_path = "./local_server/openapi.yaml"
return FileResponse(file_path, media_type="text/json")
@app.post(
"/upsert-file",
response_model=UpsertResponse,
)
async def upsert_file(
file: UploadFile = File(...),
metadata: Optional[str] = Form(None),
):
try:
metadata_obj = (
DocumentMetadata.parse_raw(metadata)
if metadata
else DocumentMetadata(source=Source.file)
)
except:
metadata_obj = DocumentMetadata(source=Source.file)
document = await get_document_from_file(file, metadata_obj)
try:
ids = await datastore.upsert([document])
return UpsertResponse(ids=ids)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail=f"str({e})")
@app.post(
"/upsert",
response_model=UpsertResponse,
)
async def upsert(
request: UpsertRequest = Body(...),
):
try:
ids = await datastore.upsert(request.documents)
return UpsertResponse(ids=ids)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@app.post("/query", response_model=QueryResponse)
async def query_main(request: QueryRequest = Body(...)):
try:
results = await datastore.query(
request.queries,
)
return QueryResponse(results=results)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@app.delete(
"/delete",
response_model=DeleteResponse,
)
async def delete(
request: DeleteRequest = Body(...),
):
if not (request.ids or request.filter or request.delete_all):
raise HTTPException(
status_code=400,
detail="One of ids, filter, or delete_all is required",
)
try:
success = await datastore.delete(
ids=request.ids,
filter=request.filter,
delete_all=request.delete_all,
)
return DeleteResponse(success=success)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@app.on_event("startup")
async def startup():
global datastore
datastore = await get_datastore()
def start():
uvicorn.run("local_server.main:app", host="localhost", port=PORT, reload=True)
|
from datastore.datastore import DataStore
import os
async def get_datastore() -> DataStore:
datastore = os.environ.get("DATASTORE")
assert datastore is not None
match datastore:
case "chroma":
from datastore.providers.chroma_datastore import ChromaDataStore
return ChromaDataStore()
case "llama":
from datastore.providers.llama_datastore import LlamaDataStore
return LlamaDataStore()
case "pinecone":
from datastore.providers.pinecone_datastore import PineconeDataStore
return PineconeDataStore()
case "weaviate":
from datastore.providers.weaviate_datastore import WeaviateDataStore
return WeaviateDataStore()
case "milvus":
from datastore.providers.milvus_datastore import MilvusDataStore
return MilvusDataStore()
case "zilliz":
from datastore.providers.zilliz_datastore import ZillizDataStore
return ZillizDataStore()
case "redis":
from datastore.providers.redis_datastore import RedisDataStore
return await RedisDataStore.init()
case "qdrant":
from datastore.providers.qdrant_datastore import QdrantDataStore
return QdrantDataStore()
case "azuresearch":
from datastore.providers.azuresearch_datastore import AzureSearchDataStore
return AzureSearchDataStore()
case "supabase":
from datastore.providers.supabase_datastore import SupabaseDataStore
return SupabaseDataStore()
case "postgres":
from datastore.providers.postgres_datastore import PostgresDataStore
return PostgresDataStore()
case "analyticdb":
from datastore.providers.analyticdb_datastore import AnalyticDBDataStore
return AnalyticDBDataStore()
case "elasticsearch":
from datastore.providers.elasticsearch_datastore import (
ElasticsearchDataStore,
)
return ElasticsearchDataStore()
case _:
raise ValueError(
f"Unsupported vector database: {datastore}. "
f"Try one of the following: llama, elasticsearch, pinecone, weaviate, milvus, zilliz, redis, or qdrant"
)
|
from abc import ABC, abstractmethod
from typing import Dict, List, Optional
import asyncio
from models.models import (
Document,
DocumentChunk,
DocumentMetadataFilter,
Query,
QueryResult,
QueryWithEmbedding,
)
from services.chunks import get_document_chunks
from services.openai import get_embeddings
class DataStore(ABC):
async def upsert(
self, documents: List[Document], chunk_token_size: Optional[int] = None
) -> List[str]:
"""
Takes in a list of documents and inserts them into the database.
First deletes all the existing vectors with the document id (if necessary, depends on the vector db), then inserts the new ones.
Return a list of document ids.
"""
# Delete any existing vectors for documents with the input document ids
await asyncio.gather(
*[
self.delete(
filter=DocumentMetadataFilter(
document_id=document.id,
),
delete_all=False,
)
for document in documents
if document.id
]
)
chunks = get_document_chunks(documents, chunk_token_size)
return await self._upsert(chunks)
@abstractmethod
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""
Takes in a list of list of document chunks and inserts them into the database.
Return a list of document ids.
"""
raise NotImplementedError
async def query(self, queries: List[Query]) -> List[QueryResult]:
"""
Takes in a list of queries and filters and returns a list of query results with matching document chunks and scores.
"""
# get a list of of just the queries from the Query list
query_texts = [query.query for query in queries]
query_embeddings = get_embeddings(query_texts)
# hydrate the queries with embeddings
queries_with_embeddings = [
QueryWithEmbedding(**query.dict(), embedding=embedding)
for query, embedding in zip(queries, query_embeddings)
]
return await self._query(queries_with_embeddings)
@abstractmethod
async def _query(self, queries: List[QueryWithEmbedding]) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and returns a list of query results with matching document chunks and scores.
"""
raise NotImplementedError
@abstractmethod
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
"""
Removes vectors by ids, filter, or everything in the datastore.
Multiple parameters can be used at once.
Returns whether the operation was successful.
"""
raise NotImplementedError
|
import os
from loguru import logger
from typing import Optional
from pymilvus import (
connections,
)
from uuid import uuid4
from datastore.providers.milvus_datastore import (
MilvusDataStore,
)
ZILLIZ_COLLECTION = os.environ.get("ZILLIZ_COLLECTION") or "c" + uuid4().hex
ZILLIZ_URI = os.environ.get("ZILLIZ_URI")
ZILLIZ_USER = os.environ.get("ZILLIZ_USER")
ZILLIZ_PASSWORD = os.environ.get("ZILLIZ_PASSWORD")
ZILLIZ_USE_SECURITY = False if ZILLIZ_PASSWORD is None else True
ZILLIZ_CONSISTENCY_LEVEL = os.environ.get("ZILLIZ_CONSISTENCY_LEVEL")
class ZillizDataStore(MilvusDataStore):
def __init__(self, create_new: Optional[bool] = False):
"""Create a Zilliz DataStore.
The Zilliz Datastore allows for storing your indexes and metadata within a Zilliz Cloud instance.
Args:
create_new (Optional[bool], optional): Whether to overwrite if collection already exists. Defaults to True.
"""
# Overwrite the default consistency level by MILVUS_CONSISTENCY_LEVEL
self._consistency_level = ZILLIZ_CONSISTENCY_LEVEL or "Bounded"
self._create_connection()
self._create_collection(ZILLIZ_COLLECTION, create_new) # type: ignore
self._create_index()
def _create_connection(self):
# Check if the connection already exists
try:
i = [
connections.get_connection_addr(x[0])
for x in connections.list_connections()
].index({"address": ZILLIZ_URI, "user": ZILLIZ_USER})
self.alias = connections.list_connections()[i][0]
except ValueError:
# Connect to the Zilliz instance using the passed in Environment variables
self.alias = uuid4().hex
connections.connect(alias=self.alias, uri=ZILLIZ_URI, user=ZILLIZ_USER, password=ZILLIZ_PASSWORD, secure=ZILLIZ_USE_SECURITY) # type: ignore
logger.info("Connect to zilliz cloud server")
def _create_index(self):
try:
# If no index on the collection, create one
if len(self.col.indexes) == 0:
self.index_params = {"metric_type": "IP", "index_type": "AUTOINDEX", "params": {}}
self.col.create_index("embedding", index_params=self.index_params)
self.col.load()
self.search_params = {"metric_type": "IP", "params": {}}
except Exception as e:
logger.error("Failed to create index, error: {}".format(e))
|
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from datetime import datetime
from loguru import logger
from services.date import to_unix_timestamp
from datastore.datastore import DataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentMetadataFilter,
QueryResult,
QueryWithEmbedding,
DocumentChunkWithScore,
)
# interface for Postgres client to implement pg based Datastore providers
class PGClient(ABC):
@abstractmethod
async def upsert(self, table: str, json: dict[str, Any]) -> None:
"""
Takes in a list of documents and inserts them into the table.
"""
raise NotImplementedError
@abstractmethod
async def rpc(self, function_name: str, params: dict[str, Any]) -> Any:
"""
Calls a stored procedure in the database with the given parameters.
"""
raise NotImplementedError
@abstractmethod
async def delete_like(self, table: str, column: str, pattern: str) -> None:
"""
Deletes rows in the table that match the pattern.
"""
raise NotImplementedError
@abstractmethod
async def delete_in(self, table: str, column: str, ids: List[str]) -> None:
"""
Deletes rows in the table that match the ids.
"""
raise NotImplementedError
@abstractmethod
async def delete_by_filters(
self, table: str, filter: DocumentMetadataFilter
) -> None:
"""
Deletes rows in the table that match the filter.
"""
raise NotImplementedError
# abstract class for Postgres based Datastore providers that implements DataStore interface
class PgVectorDataStore(DataStore):
def __init__(self):
self.client = self.create_db_client()
@abstractmethod
def create_db_client(self) -> PGClient:
"""
Create db client, can be accessing postgres database via different APIs.
Can be supabase client or psycopg2 based client.
Return a client for postgres DB.
"""
raise NotImplementedError
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""
Takes in a dict of document_ids to list of document chunks and inserts them into the database.
Return a list of document ids.
"""
for document_id, document_chunks in chunks.items():
for chunk in document_chunks:
json = {
"id": chunk.id,
"content": chunk.text,
"embedding": chunk.embedding,
"document_id": document_id,
"source": chunk.metadata.source,
"source_id": chunk.metadata.source_id,
"url": chunk.metadata.url,
"author": chunk.metadata.author,
}
if chunk.metadata.created_at:
json["created_at"] = (
datetime.fromtimestamp(
to_unix_timestamp(chunk.metadata.created_at)
),
)
await self.client.upsert("documents", json)
return list(chunks.keys())
async def _query(self, queries: List[QueryWithEmbedding]) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and returns a list of query results with matching document chunks and scores.
"""
query_results: List[QueryResult] = []
for query in queries:
# get the top 3 documents with the highest cosine similarity using rpc function in the database called "match_page_sections"
params = {
"in_embedding": query.embedding,
}
if query.top_k:
params["in_match_count"] = query.top_k
if query.filter:
if query.filter.document_id:
params["in_document_id"] = query.filter.document_id
if query.filter.source:
params["in_source"] = query.filter.source.value
if query.filter.source_id:
params["in_source_id"] = query.filter.source_id
if query.filter.author:
params["in_author"] = query.filter.author
if query.filter.start_date:
params["in_start_date"] = datetime.fromtimestamp(
to_unix_timestamp(query.filter.start_date)
)
if query.filter.end_date:
params["in_end_date"] = datetime.fromtimestamp(
to_unix_timestamp(query.filter.end_date)
)
try:
data = await self.client.rpc("match_page_sections", params=params)
results: List[DocumentChunkWithScore] = []
for row in data:
document_chunk = DocumentChunkWithScore(
id=row["id"],
text=row["content"],
# TODO: add embedding to the response ?
# embedding=row["embedding"],
score=float(row["similarity"]),
metadata=DocumentChunkMetadata(
source=row["source"],
source_id=row["source_id"],
document_id=row["document_id"],
url=row["url"],
created_at=row["created_at"],
author=row["author"],
),
)
results.append(document_chunk)
query_results.append(QueryResult(query=query.query, results=results))
except Exception as e:
logger.error(e)
query_results.append(QueryResult(query=query.query, results=[]))
return query_results
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
"""
Removes vectors by ids, filter, or everything in the datastore.
Multiple parameters can be used at once.
Returns whether the operation was successful.
"""
if delete_all:
try:
await self.client.delete_like("documents", "document_id", "%")
except:
return False
elif ids:
try:
await self.client.delete_in("documents", "document_id", ids)
except:
return False
elif filter:
try:
await self.client.delete_by_filters("documents", filter)
except:
return False
return True
|
"""
Chroma datastore support for the ChatGPT retrieval plugin.
Consult the Chroma docs and GitHub repo for more information:
- https://docs.trychroma.com/usage-guide?lang=py
- https://github.com/chroma-core/chroma
- https://www.trychroma.com/
"""
import os
from datetime import datetime
from typing import Dict, List, Optional
import chromadb
from datastore.datastore import DataStore
from models.models import (
Document,
DocumentChunk,
DocumentChunkMetadata,
DocumentChunkWithScore,
DocumentMetadataFilter,
QueryResult,
QueryWithEmbedding,
Source,
)
from services.chunks import get_document_chunks
CHROMA_IN_MEMORY = os.environ.get("CHROMA_IN_MEMORY", "True")
CHROMA_PERSISTENCE_DIR = os.environ.get("CHROMA_PERSISTENCE_DIR", "openai")
CHROMA_HOST = os.environ.get("CHROMA_HOST", "http://127.0.0.1")
CHROMA_PORT = os.environ.get("CHROMA_PORT", "8000")
CHROMA_COLLECTION = os.environ.get("CHROMA_COLLECTION", "openaiembeddings")
class ChromaDataStore(DataStore):
def __init__(
self,
in_memory: bool = CHROMA_IN_MEMORY, # type: ignore
persistence_dir: Optional[str] = CHROMA_PERSISTENCE_DIR,
collection_name: str = CHROMA_COLLECTION,
host: str = CHROMA_HOST,
port: str = CHROMA_PORT,
client: Optional[chromadb.Client] = None,
):
if client:
self._client = client
else:
if in_memory:
settings = (
chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=persistence_dir,
)
if persistence_dir
else chromadb.config.Settings()
)
self._client = chromadb.Client(settings=settings)
else:
self._client = chromadb.Client(
settings=chromadb.config.Settings(
chroma_api_impl="rest",
chroma_server_host=host,
chroma_server_http_port=port,
)
)
self._collection = self._client.get_or_create_collection(
name=collection_name,
embedding_function=None,
)
async def upsert(
self, documents: List[Document], chunk_token_size: Optional[int] = None
) -> List[str]:
"""
Takes in a list of documents and inserts them into the database. If an id already exists, the document is updated.
Return a list of document ids.
"""
chunks = get_document_chunks(documents, chunk_token_size)
# Chroma has a true upsert, so we don't need to delete first
return await self._upsert(chunks)
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""
Takes in a list of list of document chunks and inserts them into the database.
Return a list of document ids.
"""
self._collection.upsert(
ids=[chunk.id for chunk_list in chunks.values() for chunk in chunk_list],
embeddings=[
chunk.embedding
for chunk_list in chunks.values()
for chunk in chunk_list
],
documents=[
chunk.text for chunk_list in chunks.values() for chunk in chunk_list
],
metadatas=[
self._process_metadata_for_storage(chunk.metadata)
for chunk_list in chunks.values()
for chunk in chunk_list
],
)
return list(chunks.keys())
def _where_from_query_filter(self, query_filter: DocumentMetadataFilter) -> Dict:
output = {
k: v
for (k, v) in query_filter.dict().items()
if v is not None and k != "start_date" and k != "end_date" and k != "source"
}
if query_filter.source:
output["source"] = query_filter.source.value
if query_filter.start_date and query_filter.end_date:
output["$and"] = [
{
"created_at": {
"$gte": int(
datetime.fromisoformat(query_filter.start_date).timestamp()
)
}
},
{
"created_at": {
"$lte": int(
datetime.fromisoformat(query_filter.end_date).timestamp()
)
}
},
]
elif query_filter.start_date:
output["created_at"] = {
"$gte": int(datetime.fromisoformat(query_filter.start_date).timestamp())
}
elif query_filter.end_date:
output["created_at"] = {
"$lte": int(datetime.fromisoformat(query_filter.end_date).timestamp())
}
return output
def _process_metadata_for_storage(self, metadata: DocumentChunkMetadata) -> Dict:
stored_metadata = {}
if metadata.source:
stored_metadata["source"] = metadata.source.value
if metadata.source_id:
stored_metadata["source_id"] = metadata.source_id
if metadata.url:
stored_metadata["url"] = metadata.url
if metadata.created_at:
stored_metadata["created_at"] = int(
datetime.fromisoformat(metadata.created_at).timestamp()
)
if metadata.author:
stored_metadata["author"] = metadata.author
if metadata.document_id:
stored_metadata["document_id"] = metadata.document_id
return stored_metadata
def _process_metadata_from_storage(self, metadata: Dict) -> DocumentChunkMetadata:
return DocumentChunkMetadata(
source=Source(metadata["source"]) if "source" in metadata else None,
source_id=metadata.get("source_id", None),
url=metadata.get("url", None),
created_at=datetime.fromtimestamp(metadata["created_at"]).isoformat()
if "created_at" in metadata
else None,
author=metadata.get("author", None),
document_id=metadata.get("document_id", None),
)
async def _query(self, queries: List[QueryWithEmbedding]) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and returns a list of query results with matching document chunks and scores.
"""
results = [
self._collection.query(
query_embeddings=[query.embedding],
include=["documents", "distances", "metadatas"], # embeddings
n_results=min(query.top_k, self._collection.count()), # type: ignore
where=(
self._where_from_query_filter(query.filter) if query.filter else {}
),
)
for query in queries
]
output = []
for query, result in zip(queries, results):
inner_results = []
(ids,) = result["ids"]
# (embeddings,) = result["embeddings"]
(documents,) = result["documents"]
(metadatas,) = result["metadatas"]
(distances,) = result["distances"]
for id_, text, metadata, distance in zip(
ids,
documents,
metadatas,
distances, # embeddings (https://github.com/openai/chatgpt-retrieval-plugin/pull/59#discussion_r1154985153)
):
inner_results.append(
DocumentChunkWithScore(
id=id_,
text=text,
metadata=self._process_metadata_from_storage(metadata),
# embedding=embedding,
score=distance,
)
)
output.append(QueryResult(query=query.query, results=inner_results))
return output
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
"""
Removes vectors by ids, filter, or everything in the datastore.
Multiple parameters can be used at once.
Returns whether the operation was successful.
"""
if delete_all:
self._collection.delete()
return True
if ids and len(ids) > 0:
if len(ids) > 1:
where_clause = {"$or": [{"document_id": id_} for id_ in ids]}
else:
(id_,) = ids
where_clause = {"document_id": id_}
if filter:
where_clause = {
"$and": [self._where_from_query_filter(filter), where_clause]
}
elif filter:
where_clause = self._where_from_query_filter(filter)
self._collection.delete(where=where_clause)
return True
|
import asyncio
import os
import re
import uuid
from typing import Dict, List, Optional
import weaviate
from loguru import logger
from weaviate import Client
from weaviate.util import generate_uuid5
from datastore.datastore import DataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentChunkWithScore,
DocumentMetadataFilter,
QueryResult,
QueryWithEmbedding,
Source,
)
WEAVIATE_URL_DEFAULT = "http://localhost:8080"
WEAVIATE_CLASS = os.environ.get("WEAVIATE_CLASS", "OpenAIDocument")
WEAVIATE_BATCH_SIZE = int(os.environ.get("WEAVIATE_BATCH_SIZE", 20))
WEAVIATE_BATCH_DYNAMIC = os.environ.get("WEAVIATE_BATCH_DYNAMIC", False)
WEAVIATE_BATCH_TIMEOUT_RETRIES = int(os.environ.get("WEAVIATE_TIMEOUT_RETRIES", 3))
WEAVIATE_BATCH_NUM_WORKERS = int(os.environ.get("WEAVIATE_BATCH_NUM_WORKERS", 1))
SCHEMA = {
"class": WEAVIATE_CLASS,
"description": "The main class",
"properties": [
{
"name": "chunk_id",
"dataType": ["string"],
"description": "The chunk id",
},
{
"name": "document_id",
"dataType": ["string"],
"description": "The document id",
},
{
"name": "text",
"dataType": ["text"],
"description": "The chunk's text",
},
{
"name": "source",
"dataType": ["string"],
"description": "The source of the data",
},
{
"name": "source_id",
"dataType": ["string"],
"description": "The source id",
},
{
"name": "url",
"dataType": ["string"],
"description": "The source url",
},
{
"name": "created_at",
"dataType": ["date"],
"description": "Creation date of document",
},
{
"name": "author",
"dataType": ["string"],
"description": "Document author",
},
],
}
def extract_schema_properties(schema):
properties = schema["properties"]
return {property["name"] for property in properties}
class WeaviateDataStore(DataStore):
def handle_errors(self, results: Optional[List[dict]]) -> List[str]:
if not self or not results:
return []
error_messages = []
for result in results:
if (
"result" not in result
or "errors" not in result["result"]
or "error" not in result["result"]["errors"]
):
continue
for message in result["result"]["errors"]["error"]:
error_messages.append(message["message"])
logger.error(message["message"])
return error_messages
def __init__(self):
auth_credentials = self._build_auth_credentials()
url = os.environ.get("WEAVIATE_URL", WEAVIATE_URL_DEFAULT)
logger.debug(
f"Connecting to weaviate instance at {url} with credential type {type(auth_credentials).__name__}"
)
self.client = Client(url, auth_client_secret=auth_credentials)
self.client.batch.configure(
batch_size=WEAVIATE_BATCH_SIZE,
dynamic=WEAVIATE_BATCH_DYNAMIC, # type: ignore
callback=self.handle_errors, # type: ignore
timeout_retries=WEAVIATE_BATCH_TIMEOUT_RETRIES,
num_workers=WEAVIATE_BATCH_NUM_WORKERS,
)
if self.client.schema.contains(SCHEMA):
current_schema = self.client.schema.get(WEAVIATE_CLASS)
current_schema_properties = extract_schema_properties(current_schema)
logger.debug(
f"Found index {WEAVIATE_CLASS} with properties {current_schema_properties}"
)
logger.debug("Will reuse this schema")
else:
new_schema_properties = extract_schema_properties(SCHEMA)
logger.debug(
f"Creating collection {WEAVIATE_CLASS} with properties {new_schema_properties}"
)
self.client.schema.create_class(SCHEMA)
@staticmethod
def _build_auth_credentials():
url = os.environ.get("WEAVIATE_URL", WEAVIATE_URL_DEFAULT)
if WeaviateDataStore._is_wcs_domain(url):
api_key = os.environ.get("WEAVIATE_API_KEY")
if api_key is not None:
return weaviate.auth.AuthApiKey(api_key=api_key)
else:
raise ValueError("WEAVIATE_API_KEY environment variable is not set")
else:
return None
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""
Takes in a list of list of document chunks and inserts them into the database.
Return a list of document ids.
"""
doc_ids = []
with self.client.batch as batch:
for doc_id, doc_chunks in chunks.items():
logger.debug(f"Upserting {doc_id} with {len(doc_chunks)} chunks")
for doc_chunk in doc_chunks:
# we generate a uuid regardless of the format of the document_id because
# weaviate needs a uuid to store each document chunk and
# a document chunk cannot share the same uuid
doc_uuid = generate_uuid5(doc_chunk, WEAVIATE_CLASS)
metadata = doc_chunk.metadata
doc_chunk_dict = doc_chunk.dict()
doc_chunk_dict.pop("metadata")
for key, value in metadata.dict().items():
doc_chunk_dict[key] = value
doc_chunk_dict["chunk_id"] = doc_chunk_dict.pop("id")
doc_chunk_dict["source"] = (
doc_chunk_dict.pop("source").value
if doc_chunk_dict["source"]
else None
)
embedding = doc_chunk_dict.pop("embedding")
batch.add_data_object(
uuid=doc_uuid,
data_object=doc_chunk_dict,
class_name=WEAVIATE_CLASS,
vector=embedding,
)
doc_ids.append(doc_id)
batch.flush()
return doc_ids
async def _query(
self,
queries: List[QueryWithEmbedding],
) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and returns a list of query results with matching document chunks and scores.
"""
async def _single_query(query: QueryWithEmbedding) -> QueryResult:
logger.debug(f"Query: {query.query}")
if not hasattr(query, "filter") or not query.filter:
result = (
self.client.query.get(
WEAVIATE_CLASS,
[
"chunk_id",
"document_id",
"text",
"source",
"source_id",
"url",
"created_at",
"author",
],
)
.with_hybrid(query=query.query, alpha=0.5, vector=query.embedding)
.with_limit(query.top_k) # type: ignore
.with_additional(["score", "vector"])
.do()
)
else:
filters_ = self.build_filters(query.filter)
result = (
self.client.query.get(
WEAVIATE_CLASS,
[
"chunk_id",
"document_id",
"text",
"source",
"source_id",
"url",
"created_at",
"author",
],
)
.with_hybrid(query=query.query, alpha=0.5, vector=query.embedding)
.with_where(filters_)
.with_limit(query.top_k) # type: ignore
.with_additional(["score", "vector"])
.do()
)
query_results: List[DocumentChunkWithScore] = []
response = result["data"]["Get"][WEAVIATE_CLASS]
for resp in response:
result = DocumentChunkWithScore(
id=resp["chunk_id"],
text=resp["text"],
# embedding=resp["_additional"]["vector"],
score=resp["_additional"]["score"],
metadata=DocumentChunkMetadata(
document_id=resp["document_id"] if resp["document_id"] else "",
source=Source(resp["source"]) if resp["source"] else None,
source_id=resp["source_id"],
url=resp["url"],
created_at=resp["created_at"],
author=resp["author"],
),
)
query_results.append(result)
return QueryResult(query=query.query, results=query_results)
return await asyncio.gather(*[_single_query(query) for query in queries])
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
# TODO
"""
Removes vectors by ids, filter, or everything in the datastore.
Returns whether the operation was successful.
"""
if delete_all:
logger.debug(f"Deleting all vectors in index {WEAVIATE_CLASS}")
self.client.schema.delete_all()
return True
if ids:
operands = [
{"path": ["document_id"], "operator": "Equal", "valueString": id}
for id in ids
]
where_clause = {"operator": "Or", "operands": operands}
logger.debug(f"Deleting vectors from index {WEAVIATE_CLASS} with ids {ids}")
result = self.client.batch.delete_objects(
class_name=WEAVIATE_CLASS, where=where_clause, output="verbose"
)
if not bool(result["results"]["successful"]):
logger.debug(
f"Failed to delete the following objects: {result['results']['objects']}"
)
if filter:
where_clause = self.build_filters(filter)
logger.debug(
f"Deleting vectors from index {WEAVIATE_CLASS} with filter {where_clause}"
)
result = self.client.batch.delete_objects(
class_name=WEAVIATE_CLASS, where=where_clause
)
if not bool(result["results"]["successful"]):
logger.debug(
f"Failed to delete the following objects: {result['results']['objects']}"
)
return True
@staticmethod
def build_filters(filter):
if filter.source:
filter.source = filter.source.value
operands = []
filter_conditions = {
"source": {
"operator": "Equal",
"value": "query.filter.source.value",
"value_key": "valueString",
},
"start_date": {"operator": "GreaterThanEqual", "value_key": "valueDate"},
"end_date": {"operator": "LessThanEqual", "value_key": "valueDate"},
"default": {"operator": "Equal", "value_key": "valueString"},
}
for attr, value in filter.__dict__.items():
if value is not None:
filter_condition = filter_conditions.get(
attr, filter_conditions["default"]
)
value_key = filter_condition["value_key"]
operand = {
"path": [
attr
if not (attr == "start_date" or attr == "end_date")
else "created_at"
],
"operator": filter_condition["operator"],
value_key: value,
}
operands.append(operand)
return {"operator": "And", "operands": operands}
@staticmethod
def _is_valid_weaviate_id(candidate_id: str) -> bool:
"""
Check if candidate_id is a valid UUID for weaviate's use
Weaviate supports UUIDs of version 3, 4 and 5. This function checks if the candidate_id is a valid UUID of one of these versions.
See https://weaviate.io/developers/weaviate/more-resources/faq#q-are-there-restrictions-on-uuid-formatting-do-i-have-to-adhere-to-any-standards
for more information.
"""
acceptable_version = [3, 4, 5]
try:
result = uuid.UUID(candidate_id)
if result.version not in acceptable_version:
return False
else:
return True
except ValueError:
return False
@staticmethod
def _is_wcs_domain(url: str) -> bool:
"""
Check if the given URL ends with ".weaviate.network" or ".weaviate.network/".
Args:
url (str): The URL to check.
Returns:
bool: True if the URL ends with the specified strings, False otherwise.
"""
pattern = r"\.(weaviate\.cloud|weaviate\.network)(/)?$"
return bool(re.search(pattern, url))
|
import json
import os
from typing import Dict, List, Optional, Type
from loguru import logger
from datastore.datastore import DataStore
from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding
from llama_index.indices.base import BaseGPTIndex
from llama_index.indices.vector_store.base import GPTVectorStoreIndex
from llama_index.indices.query.schema import QueryBundle
from llama_index.response.schema import Response
from llama_index.data_structs.node_v2 import Node, DocumentRelationship, NodeWithScore
from llama_index.indices.registry import INDEX_STRUCT_TYPE_TO_INDEX_CLASS
from llama_index.data_structs.struct_type import IndexStructType
from llama_index.indices.response.builder import ResponseMode
INDEX_STRUCT_TYPE_STR = os.environ.get('LLAMA_INDEX_TYPE', IndexStructType.SIMPLE_DICT.value)
INDEX_JSON_PATH = os.environ.get('LLAMA_INDEX_JSON_PATH', None)
QUERY_KWARGS_JSON_PATH = os.environ.get('LLAMA_QUERY_KWARGS_JSON_PATH', None)
RESPONSE_MODE = os.environ.get('LLAMA_RESPONSE_MODE', ResponseMode.NO_TEXT.value)
EXTERNAL_VECTOR_STORE_INDEX_STRUCT_TYPES = [
IndexStructType.DICT,
IndexStructType.WEAVIATE,
IndexStructType.PINECONE,
IndexStructType.QDRANT,
IndexStructType.CHROMA,
IndexStructType.VECTOR_STORE,
]
def _create_or_load_index(
index_type_str: Optional[str] = None,
index_json_path: Optional[str] = None,
index_type_to_index_cls: Optional[dict[str, Type[BaseGPTIndex]]] = None,
) -> BaseGPTIndex:
"""Create or load index from json path."""
index_json_path = index_json_path or INDEX_JSON_PATH
index_type_to_index_cls = index_type_to_index_cls or INDEX_STRUCT_TYPE_TO_INDEX_CLASS
index_type_str = index_type_str or INDEX_STRUCT_TYPE_STR
index_type = IndexStructType(index_type_str)
if index_type not in index_type_to_index_cls:
raise ValueError(f'Unknown index type: {index_type}')
if index_type in EXTERNAL_VECTOR_STORE_INDEX_STRUCT_TYPES:
raise ValueError('Please use vector store directly.')
index_cls = index_type_to_index_cls[index_type]
if index_json_path is None:
return index_cls(nodes=[]) # Create empty index
else:
return index_cls.load_from_disk(index_json_path) # Load index from disk
def _create_or_load_query_kwargs(query_kwargs_json_path: Optional[str] = None) -> Optional[dict]:
"""Create or load query kwargs from json path."""
query_kwargs_json_path= query_kwargs_json_path or QUERY_KWARGS_JSON_PATH
query_kargs: Optional[dict] = None
if query_kwargs_json_path is not None:
with open(INDEX_JSON_PATH, 'r') as f:
query_kargs = json.load(f)
return query_kargs
def _doc_chunk_to_node(doc_chunk: DocumentChunk, source_doc_id: str) -> Node:
"""Convert document chunk to Node"""
return Node(
doc_id=doc_chunk.id,
text=doc_chunk.text,
embedding=doc_chunk.embedding,
extra_info=doc_chunk.metadata.dict(),
relationships={
DocumentRelationship.SOURCE: source_doc_id
}
)
def _query_with_embedding_to_query_bundle(query: QueryWithEmbedding) -> QueryBundle:
return QueryBundle(
query_str = query.query,
embedding=query.embedding,
)
def _source_node_to_doc_chunk_with_score(node_with_score: NodeWithScore) -> DocumentChunkWithScore:
node = node_with_score.node
if node.extra_info is not None:
metadata = DocumentChunkMetadata(**node.extra_info)
else:
metadata = DocumentChunkMetadata()
return DocumentChunkWithScore(
id=node.doc_id,
text=node.text,
score=node_with_score.score if node_with_score.score is not None else 1.,
metadata=metadata,
)
def _response_to_query_result(response: Response, query: QueryWithEmbedding) -> QueryResult:
results = [_source_node_to_doc_chunk_with_score(node) for node in response.source_nodes]
return QueryResult(query=query.query, results=results,)
class LlamaDataStore(DataStore):
def __init__(self, index: Optional[BaseGPTIndex] = None, query_kwargs: Optional[dict] = None):
self._index = index or _create_or_load_index()
self._query_kwargs = query_kwargs or _create_or_load_query_kwargs()
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""
Takes in a list of list of document chunks and inserts them into the database.
Return a list of document ids.
"""
doc_ids = []
for doc_id, doc_chunks in chunks.items():
logger.debug(f"Upserting {doc_id} with {len(doc_chunks)} chunks")
nodes = [
_doc_chunk_to_node(doc_chunk=doc_chunk, source_doc_id=doc_id)
for doc_chunk in doc_chunks
]
self._index.insert_nodes(nodes)
doc_ids.append(doc_id)
return doc_ids
async def _query(
self,
queries: List[QueryWithEmbedding],
) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and
returns a list of query results with matching document chunks and scores.
"""
query_result_all = []
for query in queries:
if query.filter is not None:
logger.warning('Filters are not supported yet, ignoring for now.')
query_bundle = _query_with_embedding_to_query_bundle(query)
# Setup query kwargs
if self._query_kwargs is not None:
query_kwargs = self._query_kwargs
else:
query_kwargs = {}
# TODO: support top_k for other indices
if isinstance(self._index, GPTVectorStoreIndex):
query_kwargs['similarity_top_k'] = query.top_k
response = await self._index.aquery(query_bundle, response_mode=RESPONSE_MODE, **query_kwargs)
query_result = _response_to_query_result(response, query)
query_result_all.append(query_result)
return query_result_all
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
"""
Removes vectors by ids, filter, or everything in the datastore.
Returns whether the operation was successful.
"""
if delete_all:
logger.warning('Delete all not supported yet.')
return False
if filter is not None:
logger.warning('Filters are not supported yet.')
return False
if ids is not None:
for id_ in ids:
try:
self._index.delete(id_)
except NotImplementedError:
# NOTE: some indices does not support delete yet.
logger.warning(f'{type(self._index)} does not support delete yet.')
return False
return True |
import asyncio
import os
import re
import json
import redis.asyncio as redis
import numpy as np
from redis.commands.search.query import Query as RediSearchQuery
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.field import (
TagField,
TextField,
NumericField,
VectorField,
)
from loguru import logger
from typing import Dict, List, Optional
from datastore.datastore import DataStore
from models.models import (
DocumentChunk,
DocumentMetadataFilter,
DocumentChunkWithScore,
DocumentMetadataFilter,
QueryResult,
QueryWithEmbedding,
)
from services.date import to_unix_timestamp
# Read environment variables for Redis
REDIS_HOST = os.environ.get("REDIS_HOST", "localhost")
REDIS_PORT = int(os.environ.get("REDIS_PORT", 6379))
REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD")
REDIS_INDEX_NAME = os.environ.get("REDIS_INDEX_NAME", "index")
REDIS_DOC_PREFIX = os.environ.get("REDIS_DOC_PREFIX", "doc")
REDIS_DISTANCE_METRIC = os.environ.get("REDIS_DISTANCE_METRIC", "COSINE")
REDIS_INDEX_TYPE = os.environ.get("REDIS_INDEX_TYPE", "FLAT")
assert REDIS_INDEX_TYPE in ("FLAT", "HNSW")
# OpenAI Ada Embeddings Dimension
VECTOR_DIMENSION = 1536
# RediSearch constants
REDIS_REQUIRED_MODULES = [
{"name": "search", "ver": 20600},
{"name": "ReJSON", "ver": 20404}
]
REDIS_DEFAULT_ESCAPED_CHARS = re.compile(r"[,.<>{}\[\]\\\"\':;!@#$%^&()\-+=~\/ ]")
# Helper functions
def unpack_schema(d: dict):
for v in d.values():
if isinstance(v, dict):
yield from unpack_schema(v)
else:
yield v
async def _check_redis_module_exist(client: redis.Redis, modules: List[dict]):
installed_modules = (await client.info()).get("modules", [])
installed_modules = {module["name"]: module for module in installed_modules}
for module in modules:
if module["name"] not in installed_modules or int(installed_modules[module["name"]]["ver"]) < int(module["ver"]):
error_message = "You must add the RediSearch (>= 2.6) and ReJSON (>= 2.4) modules from Redis Stack. " \
"Please refer to Redis Stack docs: https://redis.io/docs/stack/"
logger.error(error_message)
raise AttributeError(error_message)
class RedisDataStore(DataStore):
def __init__(self, client: redis.Redis, redisearch_schema: dict):
self.client = client
self._schema = redisearch_schema
# Init default metadata with sentinel values in case the document written has no metadata
self._default_metadata = {
field: (0 if field == "created_at" else "_null_") for field in redisearch_schema["metadata"]
}
### Redis Helper Methods ###
@classmethod
async def init(cls, **kwargs):
"""
Setup the index if it does not exist.
"""
try:
# Connect to the Redis Client
logger.info("Connecting to Redis")
client = redis.Redis(
host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD
)
except Exception as e:
logger.error(f"Error setting up Redis: {e}")
raise e
await _check_redis_module_exist(client, modules=REDIS_REQUIRED_MODULES)
dim = kwargs.get("dim", VECTOR_DIMENSION)
redisearch_schema = {
"metadata": {
"document_id": TagField("$.metadata.document_id", as_name="document_id"),
"source_id": TagField("$.metadata.source_id", as_name="source_id"),
"source": TagField("$.metadata.source", as_name="source"),
"author": TextField("$.metadata.author", as_name="author"),
"created_at": NumericField("$.metadata.created_at", as_name="created_at"),
},
"embedding": VectorField(
"$.embedding",
REDIS_INDEX_TYPE,
{
"TYPE": "FLOAT64",
"DIM": dim,
"DISTANCE_METRIC": REDIS_DISTANCE_METRIC,
},
as_name="embedding",
),
}
try:
# Check for existence of RediSearch Index
await client.ft(REDIS_INDEX_NAME).info()
logger.info(f"RediSearch index {REDIS_INDEX_NAME} already exists")
except:
# Create the RediSearch Index
logger.info(f"Creating new RediSearch index {REDIS_INDEX_NAME}")
definition = IndexDefinition(
prefix=[REDIS_DOC_PREFIX], index_type=IndexType.JSON
)
fields = list(unpack_schema(redisearch_schema))
logger.info(f"Creating index with fields: {fields}")
await client.ft(REDIS_INDEX_NAME).create_index(
fields=fields, definition=definition
)
return cls(client, redisearch_schema)
@staticmethod
def _redis_key(document_id: str, chunk_id: str) -> str:
"""
Create the JSON key for document chunks in Redis.
Args:
document_id (str): Document Identifier
chunk_id (str): Chunk Identifier
Returns:
str: JSON key string.
"""
return f"doc:{document_id}:chunk:{chunk_id}"
@staticmethod
def _escape(value: str) -> str:
"""
Escape filter value.
Args:
value (str): Value to escape.
Returns:
str: Escaped filter value for RediSearch.
"""
def escape_symbol(match) -> str:
value = match.group(0)
return f"\\{value}"
return REDIS_DEFAULT_ESCAPED_CHARS.sub(escape_symbol, value)
def _get_redis_chunk(self, chunk: DocumentChunk) -> dict:
"""
Convert DocumentChunk into a JSON object for storage
in Redis.
Args:
chunk (DocumentChunk): Chunk of a Document.
Returns:
dict: JSON object for storage in Redis.
"""
# Convert chunk -> dict
data = chunk.__dict__
metadata = chunk.metadata.__dict__
data["chunk_id"] = data.pop("id")
# Prep Redis Metadata
redis_metadata = dict(self._default_metadata)
if metadata:
for field, value in metadata.items():
if value:
if field == "created_at":
redis_metadata[field] = to_unix_timestamp(value) # type: ignore
else:
redis_metadata[field] = value
data["metadata"] = redis_metadata
return data
def _get_redis_query(self, query: QueryWithEmbedding) -> RediSearchQuery:
"""
Convert a QueryWithEmbedding into a RediSearchQuery.
Args:
query (QueryWithEmbedding): Search query.
Returns:
RediSearchQuery: Query for RediSearch.
"""
filter_str: str = ""
# RediSearch field type to query string
def _typ_to_str(typ, field, value) -> str: # type: ignore
if isinstance(typ, TagField):
return f"@{field}:{{{self._escape(value)}}} "
elif isinstance(typ, TextField):
return f"@{field}:{value} "
elif isinstance(typ, NumericField):
num = to_unix_timestamp(value)
match field:
case "start_date":
return f"@{field}:[{num} +inf] "
case "end_date":
return f"@{field}:[-inf {num}] "
# Build filter
if query.filter:
redisearch_schema = self._schema
for field, value in query.filter.__dict__.items():
if not value:
continue
if field in redisearch_schema:
filter_str += _typ_to_str(redisearch_schema[field], field, value)
elif field in redisearch_schema["metadata"]:
if field == "source": # handle the enum
value = value.value
filter_str += _typ_to_str(
redisearch_schema["metadata"][field], field, value
)
elif field in ["start_date", "end_date"]:
filter_str += _typ_to_str(
redisearch_schema["metadata"]["created_at"], field, value
)
# Postprocess filter string
filter_str = filter_str.strip()
filter_str = filter_str if filter_str else "*"
# Prepare query string
query_str = (
f"({filter_str})=>[KNN {query.top_k} @embedding $embedding as score]"
)
return (
RediSearchQuery(query_str)
.sort_by("score")
.paging(0, query.top_k)
.dialect(2)
)
async def _redis_delete(self, keys: List[str]):
"""
Delete a list of keys from Redis.
Args:
keys (List[str]): List of keys to delete.
"""
# Delete the keys
await asyncio.gather(*[self.client.delete(key) for key in keys])
#######
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""
Takes in a list of list of document chunks and inserts them into the database.
Return a list of document ids.
"""
# Initialize a list of ids to return
doc_ids: List[str] = []
# Loop through the dict items
for doc_id, chunk_list in chunks.items():
# Append the id to the ids list
doc_ids.append(doc_id)
# Write chunks in a pipelines
async with self.client.pipeline(transaction=False) as pipe:
for chunk in chunk_list:
key = self._redis_key(doc_id, chunk.id)
data = self._get_redis_chunk(chunk)
await pipe.json().set(key, "$", data)
await pipe.execute()
return doc_ids
async def _query(
self,
queries: List[QueryWithEmbedding],
) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and
returns a list of query results with matching document chunks and scores.
"""
# Prepare query responses and results object
results: List[QueryResult] = []
# Gather query results in a pipeline
logger.info(f"Gathering {len(queries)} query results")
for query in queries:
logger.debug(f"Query: {query.query}")
query_results: List[DocumentChunkWithScore] = []
# Extract Redis query
redis_query: RediSearchQuery = self._get_redis_query(query)
embedding = np.array(query.embedding, dtype=np.float64).tobytes()
# Perform vector search
query_response = await self.client.ft(REDIS_INDEX_NAME).search(
redis_query, {"embedding": embedding}
)
# Iterate through the most similar documents
for doc in query_response.docs:
# Load JSON data
doc_json = json.loads(doc.json)
# Create document chunk object with score
result = DocumentChunkWithScore(
id=doc_json["metadata"]["document_id"],
score=doc.score,
text=doc_json["text"],
metadata=doc_json["metadata"]
)
query_results.append(result)
# Add to overall results
results.append(QueryResult(query=query.query, results=query_results))
return results
async def _find_keys(self, pattern: str) -> List[str]:
return [key async for key in self.client.scan_iter(pattern)]
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
"""
Removes vectors by ids, filter, or everything in the datastore.
Returns whether the operation was successful.
"""
# Delete all vectors from the index if delete_all is True
if delete_all:
try:
logger.info(f"Deleting all documents from index")
await self.client.ft(REDIS_INDEX_NAME).dropindex(True)
logger.info(f"Deleted all documents successfully")
return True
except Exception as e:
logger.error(f"Error deleting all documents: {e}")
raise e
# Delete by filter
if filter:
# TODO - extend this to work with other metadata filters?
if filter.document_id:
try:
keys = await self._find_keys(
f"{REDIS_DOC_PREFIX}:{filter.document_id}:*"
)
await self._redis_delete(keys)
logger.info(f"Deleted document {filter.document_id} successfully")
except Exception as e:
logger.error(f"Error deleting document {filter.document_id}: {e}")
raise e
# Delete by explicit ids (Redis keys)
if ids:
try:
logger.info(f"Deleting document ids {ids}")
keys = []
# find all keys associated with the document ids
for document_id in ids:
doc_keys = await self._find_keys(
pattern=f"{REDIS_DOC_PREFIX}:{document_id}:*"
)
keys.extend(doc_keys)
# delete all keys
logger.info(f"Deleting {len(keys)} keys from Redis")
await self._redis_delete(keys)
except Exception as e:
logger.error(f"Error deleting ids: {e}")
raise e
return True
|
import os
import asyncio
from typing import Dict, List, Optional, Tuple, Any
from datetime import datetime
from loguru import logger
from psycopg2cffi import compat
compat.register()
import psycopg2
from psycopg2.extras import DictCursor
from psycopg2.pool import SimpleConnectionPool
from services.date import to_unix_timestamp
from datastore.datastore import DataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentMetadataFilter,
QueryResult,
QueryWithEmbedding,
DocumentChunkWithScore,
)
PG_CONFIG = {
"collection": os.environ.get("PG_COLLECTION", "document_chunks"),
"database": os.environ.get("PG_DATABASE", "postgres"),
"user": os.environ.get("PG_USER", "user"),
"password": os.environ.get("PG_PASSWORD", "password"),
"host": os.environ.get("PG_HOST", "localhost"),
"port": int(os.environ.get("PG_PORT", "5432")),
}
OUTPUT_DIM = 1536
class AnalyticDBDataStore(DataStore):
def __init__(self, config: Dict[str, str] = PG_CONFIG):
self.collection_name = config["collection"]
self.user = config["user"]
self.password = config["password"]
self.database = config["database"]
self.host = config["host"]
self.port = config["port"]
self.connection_pool = SimpleConnectionPool(
minconn=1,
maxconn=100,
dbname=self.database,
user=self.user,
password=self.password,
host=self.host,
port=self.port,
)
self._initialize_db()
def _initialize_db(self):
conn = self.connection_pool.getconn()
try:
with conn.cursor() as cur:
self._create_table(cur)
self._create_embedding_index(cur)
conn.commit()
finally:
self.connection_pool.putconn(conn)
def _create_table(self, cur: psycopg2.extensions.cursor):
cur.execute(
f"""
CREATE TABLE IF NOT EXISTS {self.collection_name} (
id TEXT PRIMARY KEY DEFAULT uuid_generate_v4()::TEXT,
source TEXT,
source_id TEXT,
content TEXT,
document_id TEXT,
author TEXT,
url TEXT,
created_at TIMESTAMPTZ DEFAULT NOW(),
embedding real[]
);
"""
)
def _create_embedding_index(self, cur: psycopg2.extensions.cursor):
cur.execute(
f"""
SELECT * FROM pg_indexes WHERE tablename='{self.collection_name}';
"""
)
index_exists = any(
index[2] == f"{self.collection_name}_embedding_idx"
for index in cur.fetchall()
)
if not index_exists:
cur.execute(
f"""
CREATE INDEX {self.collection_name}_embedding_idx
ON {self.collection_name}
USING ann(embedding)
WITH (
distancemeasure=L2,
dim=OUTPUT_DIM,
pq_segments=64,
hnsw_m=100,
pq_centers=2048
);
"""
)
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""
Takes in a dict of document_ids to list of document chunks and inserts them into the database.
Return a list of document ids.
"""
loop = asyncio.get_event_loop()
tasks = [
loop.run_in_executor(None, self._upsert_chunk, chunk)
for document_chunks in chunks.values()
for chunk in document_chunks
]
await asyncio.gather(*tasks)
return list(chunks.keys())
def _upsert_chunk(self, chunk: DocumentChunk):
created_at = (
datetime.fromtimestamp(to_unix_timestamp(chunk.metadata.created_at))
if chunk.metadata.created_at
else None
)
data = (
chunk.id,
chunk.text,
chunk.embedding,
chunk.metadata.document_id,
chunk.metadata.source,
chunk.metadata.source_id,
chunk.metadata.url,
chunk.metadata.author,
created_at,
)
conn = self.connection_pool.getconn()
try:
with conn.cursor() as cur:
# Construct the SQL query and data
query = f"""
INSERT INTO {self.collection_name} (id, content, embedding, document_id, source, source_id, url, author, created_at)
VALUES (%s::text, %s::text, %s::real[], %s::text, %s::text, %s::text, %s::text, %s::text, %s::timestamp with time zone)
ON CONFLICT (id) DO UPDATE SET
content = EXCLUDED.content,
embedding = EXCLUDED.embedding,
document_id = EXCLUDED.document_id,
source = EXCLUDED.source,
source_id = EXCLUDED.source_id,
url = EXCLUDED.url,
author = EXCLUDED.author,
created_at = EXCLUDED.created_at;
"""
# Execute the query
cur.execute(query, data)
# Commit the transaction
conn.commit()
finally:
self.connection_pool.putconn(conn)
async def _query(self, queries: List[QueryWithEmbedding]) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and returns a list of query results with matching document chunks and scores.
"""
query_results: List[QueryResult] = []
def generate_query(query: QueryWithEmbedding) -> Tuple[str, List[Any]]:
embedding = "[" + ", ".join(str(x) for x in query.embedding) + "]"
q = f"""
SELECT
id,
content,
source,
source_id,
document_id,
url,
created_at,
author,
embedding,
l2_distance(embedding,array{embedding}::real[]) AS similarity
FROM
{self.collection_name}
"""
where_clause, params = generate_where_clause(query.filter)
q += where_clause
q += f"ORDER BY embedding <-> array{embedding}::real[] LIMIT {query.top_k};"
return q, params
def generate_where_clause(
query_filter: Optional[DocumentMetadataFilter],
) -> Tuple[str, List[Any]]:
if query_filter is None:
return "", []
conditions = [
("document_id=%s", query_filter.document_id),
("source_id=%s", query_filter.source_id),
("source LIKE %s", query_filter.source),
("author LIKE %s", query_filter.author),
("created_at >= %s", query_filter.start_date),
("created_at <= %s", query_filter.end_date),
]
where_clause = "WHERE " + " AND ".join(
[cond[0] for cond in conditions if cond[1] is not None]
)
values = [cond[1] for cond in conditions if cond[1] is not None]
return where_clause, values
def fetch_data(cur, q: str, params: List[Any]):
cur.execute(q, params)
return cur.fetchall()
def create_results(data):
results = []
for row in data:
document_chunk = DocumentChunkWithScore(
id=row["id"],
text=row["content"],
score=float(row["similarity"]),
metadata=DocumentChunkMetadata(
source=row["source"],
source_id=row["source_id"],
document_id=row["document_id"],
url=row["url"],
created_at=str(row["created_at"]),
author=row["author"],
),
)
results.append(document_chunk)
return results
conn = self.connection_pool.getconn()
try:
for query in queries:
try:
cur = conn.cursor(cursor_factory=DictCursor)
for query in queries:
q, params = generate_query(query)
data = fetch_data(cur, q, params)
results = create_results(data)
query_results.append(
QueryResult(query=query.query, results=results)
)
except Exception as e:
logger.error(e)
query_results.append(QueryResult(query=query.query, results=[]))
return query_results
finally:
self.connection_pool.putconn(conn)
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
async def execute_delete(query: str, params: Optional[List] = None) -> bool:
conn = self.connection_pool.getconn()
try:
with conn.cursor() as cur:
if params:
cur.execute(query, params)
else:
cur.execute(query)
self.conn.commit()
return True
except Exception as e:
logger.error(e)
return False
finally:
self.connection_pool.putconn(conn)
if delete_all:
query = f"DELETE FROM {self.collection_name} WHERE document_id LIKE %s;"
return await execute_delete(query, ["%"])
elif ids:
query = f"DELETE FROM {self.collection_name} WHERE document_id IN ({','.join(['%s'] * len(ids))});"
return await execute_delete(query, ids)
elif filter is not None:
query, params = self._generate_delete_query(filter)
return await execute_delete(query, params)
else:
return True
def _generate_delete_query(
self, filter: DocumentMetadataFilter
) -> Tuple[str, List]:
conditions = [
(filter.document_id, "document_id = %s"),
(filter.source, "source = %s"),
(filter.source_id, "source_id = %s"),
(filter.author, "author = %s"),
(filter.start_date, "created_at >= %s"),
(filter.end_date, "created_at <= %s"),
]
where_conditions = [f for value, f in conditions if value]
where_values = [value for value, _ in conditions if value]
query = f"DELETE FROM {self.collection_name} WHERE {' AND '.join(where_conditions)};"
return query, where_values
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.