python_code
stringlengths 0
108k
|
---|
# test model, a resnet 50
resnet = models.resnet50(pretrained=True)
# arguments
parser = argparse.ArgumentParser(description='byol-lightning-test')
parser.add_argument('--image_folder', type=str, required = True,
help='path to your folder of images for self-supervised learning')
args = parser.parse_args()
# constants
BATCH_SIZE = 32
EPOCHS = 1000
LR = 3e-4
NUM_GPUS = 2
IMAGE_SIZE = 256
IMAGE_EXTS = ['.jpg', '.png', '.jpeg']
NUM_WORKERS = multiprocessing.cpu_count()
# pytorch lightning module
class SelfSupervisedLearner(pl.LightningModule):
def __init__(self, net, **kwargs):
super().__init__()
self.learner = BYOL(net, **kwargs)
def forward(self, images):
return self.learner(images)
def training_step(self, images, _):
loss = self.forward(images)
return {'loss': loss}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=LR)
def on_before_zero_grad(self, _):
if self.learner.use_momentum:
self.learner.update_moving_average()
# images dataset
def expand_greyscale(t):
return t.expand(3, -1, -1)
class ImagesDataset(Dataset):
def __init__(self, folder, image_size):
super().__init__()
self.folder = folder
self.paths = []
for path in Path(f'{folder}').glob('**/*'):
_, ext = os.path.splitext(path)
if ext.lower() in IMAGE_EXTS:
self.paths.append(path)
print(f'{len(self.paths)} images found')
self.transform = transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Lambda(expand_greyscale)
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
img = img.convert('RGB')
return self.transform(img)
# main
if __name__ == '__main__':
ds = ImagesDataset(args.image_folder, IMAGE_SIZE)
train_loader = DataLoader(ds, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, shuffle=True)
model = SelfSupervisedLearner(
resnet,
image_size = IMAGE_SIZE,
hidden_layer = 'avgpool',
projection_size = 256,
projection_hidden_size = 4096,
moving_average_decay = 0.99
)
trainer = pl.Trainer(
gpus = NUM_GPUS,
max_epochs = EPOCHS,
accumulate_grad_batches = 1,
sync_batchnorm = True
)
trainer.fit(model, train_loader)
|
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 3e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 512
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = TransformerLM(
num_tokens = 256,
dim = 512,
depth = 12,
max_seq_len = SEQ_LEN,
heads = 8,
causal = True,
only_norm = True,
shared_kv = True
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
inp = inp[:SEQ_LEN]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
|
def default(value, default):
return value if value is not None else default
def log(t, eps=1e-9):
return torch.log(t + eps)
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > 1.0 - thres
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = None, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = default(ignore_index, pad_value)
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
input_mask = kwargs.pop('src_mask', None)
if input_mask is None:
input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
input_mask = input_mask[:, -self.max_seq_len:]
logits = self.net(x, src_mask=input_mask, **kwargs)
logits = logits[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
gumbel_noise = -log(-log(torch.zeros_like(filtered_logits).uniform_(0, 1)))
sample = ((filtered_logits / temperature) + gumbel_noise).argmax(dim=-1)
out = torch.cat((out, sample[:, None]), dim=-1)
input_mask = F.pad(input_mask, (1, 0), value=True)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, *args, **kwargs):
pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value)
m = kwargs.pop('input_mask', None)
xi, xo = x[:, :-1], x[:, 1:]
if m is not None:
assert m.shape == x.shape[0:2], 'input mask must be the same shape as the input of the auto-regressive wrapper to automatically handle'
kwargs.update(input_mask = m[:, :-1])
out = self.net(xi, *args, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
|
# helpers
def cum_mean(t):
device = t.device
running_num = torch.arange(t.shape[-1], device=t.device) + 1
return t.cumsum(dim=-1) / running_num
def normalize(t, eps=1e-8):
t -= t.mean(dim=-1, keepdim=True)
s = (t ** 2).mean(dim=-1, keepdim=True)
return t * torch.rsqrt(s + eps)
def causal_normalize(t, eps=1e-8):
t -= cum_mean(t).diagonal(dim1=-2, dim2=-1)[..., None]
s = cum_mean(t ** 2).diagonal(dim1=-2, dim2=-1)[..., None]
return t * torch.rsqrt(s + eps)
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
class PostNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
x = self.fn(x)
return self.norm(x)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * 4),
nn.GELU(),
nn.Linear(dim * 4, dim)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, causal = False, shared_kv = False):
super().__init__()
self.causal = causal
self.heads = heads
self.scale = dim ** -0.5
self.shared_kv = shared_kv
self.num_qkv = 3 if not shared_kv else 2
self.to_qkv = nn.Linear(dim, dim * self.num_qkv, bias = False)
self.to_out = nn.Linear(dim, dim)
self.norm_g = nn.Parameter(torch.ones(1, heads, 1, 1))
self.norm_b = nn.Parameter(torch.zeros(1, heads, 1, 1))
def forward(self, x):
b, n, _, h, device = *x.shape, self.heads, x.device
qkv = self.to_qkv(x)
qkv = rearrange(qkv, 'b n (qkv h d) -> qkv b h n d', qkv = self.num_qkv, h = h)
if self.shared_kv:
q, k = qkv
v = k
else:
q, k, v = qkv
dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale
if self.causal:
mask = torch.ones(n, n, device = device).triu_(1).bool()
dots.masked_fill_(mask, 0.)
normalize_fn = causal_normalize if self.causal else normalize
normed_attn = normalize_fn(dots)
attn = normed_attn * self.norm_g + self.norm_b
if self.causal:
attn.masked_fill_(mask, 0.)
out = torch.einsum('bhij,bhjd->bhid', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
class Transformer(nn.Module):
def __init__(self, dim, depth, heads = 8, causal = False, only_norm = False, shared_kv = False):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Residual(PostNorm(dim, Attention(dim, heads, causal = causal, shared_kv = shared_kv))),
Residual(PreNorm(dim, FeedForward(dim))) if not only_norm else nn.Identity(),
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x)
x = ff(x)
return x
class TransformerLM(nn.Module):
def __init__(self, *, num_tokens, dim, depth, max_seq_len, heads = 8, causal = False, only_norm = False, shared_kv = False):
super().__init__()
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.transformer = Transformer(dim, depth, heads, causal = causal, only_norm = only_norm, shared_kv = shared_kv)
self.to_logits = nn.Linear(dim, num_tokens)
def forward(self, x, **kwargs):
_, n = x.shape
x = self.token_emb(x)
x += self.pos_emb(torch.arange(n, device=x.device))
x = self.transformer(x)
x = self.to_logits(x)
return x
|
__version__ = '1.4.1'
|
# less warning messages since only using encoder
transformers.logging.set_verbosity_error()
# helper functions
def exists(val):
return val is not None
# config
MAX_LENGTH = 256
DEFAULT_T5_NAME = 'google/t5-v1_1-base'
T5_CONFIGS = {}
# singleton globals
def get_tokenizer(name):
tokenizer = T5Tokenizer.from_pretrained(name)
return tokenizer
def get_model(name):
model = T5EncoderModel.from_pretrained(name)
return model
def get_model_and_tokenizer(name):
global T5_CONFIGS
if name not in T5_CONFIGS:
T5_CONFIGS[name] = dict()
if "model" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["model"] = get_model(name)
if "tokenizer" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["tokenizer"] = get_tokenizer(name)
return T5_CONFIGS[name]['model'], T5_CONFIGS[name]['tokenizer']
def get_encoded_dim(name):
if name not in T5_CONFIGS:
config = T5Config.from_pretrained(name)
T5_CONFIGS[name] = dict(config = config)
elif "config" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["config"]
elif "model" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["model"].config
else:
raise ValueError(f'unknown t5 name {name}')
return config.d_model
# encoding text
@beartype
def t5_encode_text(
texts: Union[str, List[str]],
name = DEFAULT_T5_NAME,
output_device = None
):
if isinstance(texts, str):
texts = [texts]
t5, tokenizer = get_model_and_tokenizer(name)
if torch.cuda.is_available():
t5 = t5.cuda()
device = next(t5.parameters()).device
encoded = tokenizer.batch_encode_plus(
texts,
return_tensors = 'pt',
padding = 'longest',
max_length = MAX_LENGTH,
truncation = True
)
input_ids = encoded.input_ids.to(device)
attn_mask = encoded.attention_mask.to(device)
t5.eval()
with torch.inference_mode():
output = t5(input_ids = input_ids, attention_mask = attn_mask)
encoded_text = output.last_hidden_state.detach()
attn_mask = attn_mask[..., None].bool()
if not exists(output_device):
encoded_text = encoded_text.masked_fill(~attn_mask, 0.)
return encoded_text
encoded_text.to(output_device)
attn_mask.to(output_device)
encoded_text = encoded_text.masked_fill(~attn_mask, 0.)
return encoded_text
|
# suppress a few warnings
def noop(*args, **kwargs):
pass
logging.root.setLevel(logging.ERROR)
warnings.warn = noop
# import fairseq and joblib for hubert
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class HubertWithKmeans(nn.Module):
"""
checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert
or you can train your own
"""
def __init__(
self,
checkpoint_path,
kmeans_path,
target_sample_hz = 16000,
seq_len_multiple_of = None,
output_layer = 9
):
super().__init__()
self.target_sample_hz = target_sample_hz
self.seq_len_multiple_of = seq_len_multiple_of
self.output_layer = output_layer
model_path = Path(checkpoint_path)
kmeans_path = Path(kmeans_path)
assert model_path.exists(), f'path {checkpoint_path} does not exist'
assert kmeans_path.exists(), f'path {kmeans_path} does not exist'
checkpoint = torch.load(checkpoint_path)
load_model_input = {checkpoint_path: checkpoint}
model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input)
self.model = model[0]
self.model.eval()
kmeans = joblib.load(kmeans_path)
self.kmeans = kmeans
self.register_buffer(
'cluster_centers',
torch.from_numpy(kmeans.cluster_centers_)
)
@property
def groups(self):
return 1
@property
def codebook_size(self):
return self.kmeans.n_clusters
@property
def downsample_factor(self):
# todo: double check
return 320
@torch.inference_mode()
def forward(
self,
wav_input,
flatten = True,
input_sample_hz = None
):
batch, device = wav_input.shape[0], wav_input.device
if exists(input_sample_hz):
wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
if exists(self.seq_len_multiple_of):
wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of)
embed = self.model(
wav_input,
features_only = True,
mask = False, # thanks to @maitycyrus for noticing that mask is defaulted to True in the fairseq code
output_layer = self.output_layer
)['x']
batched_cluster_centers = repeat(self.cluster_centers, 'c d -> b c d', b = embed.shape[0])
dists = -torch.cdist(embed, batched_cluster_centers, p = 2)
clusters = dists.argmax(dim = -1)
if flatten:
return clusters
return rearrange(clusters, 'b ... -> b (...)')
|
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
|
parsed_version = version.parse(__version__)
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(t, l = 1):
return ((t,) * l) if not isinstance(t, tuple) else t
def filter_by_keys(fn, d):
return {k: v for k, v in d.items() if fn(k)}
def map_keys(fn, d):
return {fn(k): v for k, v in d.items()}
# gan losses
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def hinge_discr_loss(fake, real):
return (F.relu(1 + fake) + F.relu(1 - real)).mean()
def hinge_gen_loss(fake):
return -fake.mean()
def leaky_relu(p = 0.1):
return nn.LeakyReLU(p)
def gradient_penalty(wave, output, weight = 10):
batch_size, device = wave.shape[0], wave.device
gradients = torch_grad(
outputs = output,
inputs = wave,
grad_outputs = torch.ones_like(output),
create_graph = True,
retain_graph = True,
only_inputs = True
)[0]
gradients = rearrange(gradients, 'b ... -> b (...)')
return weight * ((vector_norm(gradients, dim = 1) - 1) ** 2).mean()
# better sequential
def Sequential(*mods):
return nn.Sequential(*filter(exists, mods))
# discriminators
class MultiScaleDiscriminator(nn.Module):
def __init__(
self,
channels = 16,
layers = 4,
groups = (4, 16, 64, 256),
chan_max = 1024,
input_channels = 1
):
super().__init__()
self.init_conv = nn.Conv1d(input_channels, channels, 15, padding = 7)
self.conv_layers = nn.ModuleList([])
curr_channels = channels
for _, group in zip(range(layers), groups):
chan_out = min(curr_channels * 4, chan_max)
self.conv_layers.append(nn.Sequential(
nn.Conv1d(curr_channels, chan_out, 41, stride = 4, padding = 20, groups = group),
leaky_relu()
))
curr_channels = chan_out
self.final_conv = nn.Sequential(
nn.Conv1d(curr_channels, curr_channels, 5, padding = 2),
leaky_relu(),
nn.Conv1d(curr_channels, 1, 3, padding = 1),
)
def forward(
self,
x,
return_intermediates = False
):
x = self.init_conv(x)
intermediates = []
for layer in self.conv_layers:
x = layer(x)
intermediates.append(x)
out = self.final_conv(x)
if not return_intermediates:
return out
return out, intermediates
# autoregressive squeeze excitation
# https://arxiv.org/abs/1709.01507
class SqueezeExcite(nn.Module):
def __init__(self, dim, reduction_factor = 4, dim_minimum = 8):
super().__init__()
dim_inner = max(dim_minimum, dim // reduction_factor)
self.net = nn.Sequential(
nn.Conv1d(dim, dim_inner, 1),
nn.SiLU(),
nn.Conv1d(dim_inner, dim, 1),
nn.Sigmoid()
)
def forward(self, x):
seq, device = x.shape[-2], x.device
# cumulative mean - since it is autoregressive
cum_sum = x.cumsum(dim = -2)
denom = torch.arange(1, seq + 1, device = device).float()
cum_mean = cum_sum / rearrange(denom, 'n -> n 1')
# glu gate
gate = self.net(cum_mean)
return x * gate
# complex stft discriminator
class ModReLU(nn.Module):
"""
https://arxiv.org/abs/1705.09792
https://github.com/pytorch/pytorch/issues/47052#issuecomment-718948801
"""
def __init__(self):
super().__init__()
self.b = nn.Parameter(torch.tensor(0.))
def forward(self, x):
return F.relu(torch.abs(x) + self.b) * torch.exp(1.j * torch.angle(x))
class ComplexConv2d(nn.Module):
def __init__(
self,
dim,
dim_out,
kernel_size,
stride = 1,
padding = 0
):
super().__init__()
conv = nn.Conv2d(dim, dim_out, kernel_size, dtype = torch.complex64)
self.weight = nn.Parameter(torch.view_as_real(conv.weight))
self.bias = nn.Parameter(torch.view_as_real(conv.bias))
self.stride = stride
self.padding = padding
def forward(self, x):
weight, bias = map(torch.view_as_complex, (self.weight, self.bias))
x = x.to(weight.dtype)
return F.conv2d(x, weight, bias, stride = self.stride, padding = self.padding)
def ComplexSTFTResidualUnit(chan_in, chan_out, strides):
kernel_sizes = tuple(map(lambda t: t + 2, strides))
paddings = tuple(map(lambda t: t // 2, kernel_sizes))
return nn.Sequential(
Residual(Sequential(
ComplexConv2d(chan_in, chan_in, 3, padding = 1),
ModReLU(),
ComplexConv2d(chan_in, chan_in, 3, padding = 1)
)),
ComplexConv2d(chan_in, chan_out, kernel_sizes, stride = strides, padding = paddings)
)
class ComplexSTFTDiscriminator(nn.Module):
def __init__(
self,
*,
channels = 32,
strides = ((1, 2), (2, 2), (1, 2), (2, 2), (1, 2), (2, 2)),
chan_mults = (1, 2, 4, 4, 8, 8),
input_channels = 1,
n_fft = 1024,
hop_length = 256,
win_length = 1024,
stft_normalized = False,
logits_abs = True
):
super().__init__()
self.init_conv = ComplexConv2d(input_channels, channels, 7, padding = 3)
layer_channels = tuple(map(lambda mult: mult * channels, chan_mults))
layer_channels = (channels, *layer_channels)
layer_channels_pairs = tuple(zip(layer_channels[:-1], layer_channels[1:]))
curr_channels = channels
self.layers = nn.ModuleList([])
for layer_stride, (chan_in, chan_out) in zip(strides, layer_channels_pairs):
self.layers.append(ComplexSTFTResidualUnit(chan_in, chan_out, layer_stride))
self.final_conv = ComplexConv2d(layer_channels[-1], 1, (16, 1)) # todo: remove hardcoded 16
# stft settings
self.stft_normalized = stft_normalized
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
# how to output the logits into real space
self.logits_abs = logits_abs
def forward(self, x, return_intermediates = False):
x = rearrange(x, 'b 1 n -> b n')
'''
reference: The content of the paper( https://arxiv.org/pdf/2107.03312.pdf)is as follows:
The STFT-based discriminator is illustrated in Figure 4
and operates on a single scale, computing the STFT with a
window length of W = 1024 samples and a hop length of
H = 256 samples
'''
x = torch.stft(
x,
self.n_fft,
hop_length = self.hop_length,
win_length = self.win_length,
normalized = self.stft_normalized,
return_complex = True
)
x = rearrange(x, 'b ... -> b 1 ...')
intermediates = []
x = self.init_conv(x)
intermediates.append(x)
for layer in self.layers:
x = layer(x)
intermediates.append(x)
complex_logits = self.final_conv(x)
if self.logits_abs:
complex_logits = complex_logits.abs()
else:
complex_logits = torch.view_as_real(complex_logits)
if not return_intermediates:
return complex_logits
return complex_logits, intermediates
# sound stream
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class CausalConv1d(nn.Module):
def __init__(self, chan_in, chan_out, kernel_size, pad_mode = 'reflect', **kwargs):
super().__init__()
kernel_size = kernel_size
dilation = kwargs.get('dilation', 1)
stride = kwargs.get('stride', 1)
self.pad_mode = pad_mode
self.causal_padding = dilation * (kernel_size - 1) + (1 - stride)
self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, **kwargs)
def forward(self, x):
x = F.pad(x, (self.causal_padding, 0), mode = self.pad_mode)
return self.conv(x)
class CausalConvTranspose1d(nn.Module):
def __init__(self, chan_in, chan_out, kernel_size, stride, **kwargs):
super().__init__()
self.upsample_factor = stride
self.padding = kernel_size - 1
self.conv = nn.ConvTranspose1d(chan_in, chan_out, kernel_size, stride, **kwargs)
def forward(self, x):
n = x.shape[-1]
out = self.conv(x)
out = out[..., :(n * self.upsample_factor)]
return out
def ResidualUnit(chan_in, chan_out, dilation, kernel_size = 7, squeeze_excite = False, pad_mode = 'reflect'):
return Residual(Sequential(
CausalConv1d(chan_in, chan_out, kernel_size, dilation = dilation, pad_mode = pad_mode),
nn.ELU(),
CausalConv1d(chan_out, chan_out, 1, pad_mode = pad_mode),
nn.ELU(),
SqueezeExcite(chan_out) if squeeze_excite else None
))
def EncoderBlock(chan_in, chan_out, stride, cycle_dilations = (1, 3, 9), squeeze_excite = False, pad_mode = 'reflect'):
it = cycle(cycle_dilations)
residual_unit = partial(ResidualUnit, squeeze_excite = squeeze_excite, pad_mode = pad_mode)
return nn.Sequential(
residual_unit(chan_in, chan_in, next(it)),
residual_unit(chan_in, chan_in, next(it)),
residual_unit(chan_in, chan_in, next(it)),
CausalConv1d(chan_in, chan_out, 2 * stride, stride = stride)
)
def DecoderBlock(chan_in, chan_out, stride, cycle_dilations = (1, 3, 9), squeeze_excite = False, pad_mode = 'reflect'):
even_stride = (stride % 2 == 0)
padding = (stride + (0 if even_stride else 1)) // 2
output_padding = 0 if even_stride else 1
residual_unit = partial(ResidualUnit, squeeze_excite = squeeze_excite, pad_mode = pad_mode)
it = cycle(cycle_dilations)
return nn.Sequential(
CausalConvTranspose1d(chan_in, chan_out, 2 * stride, stride = stride),
residual_unit(chan_out, chan_out, next(it)),
residual_unit(chan_out, chan_out, next(it)),
residual_unit(chan_out, chan_out, next(it)),
)
class LocalTransformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
heads,
window_size,
dynamic_pos_bias = False,
**kwargs
):
super().__init__()
self.window_size = window_size
self.layers = nn.ModuleList([])
self.pos_bias = None
if dynamic_pos_bias:
self.pos_bias = DynamicPositionBias(dim = dim // 2, heads = heads)
for _ in range(depth):
self.layers.append(nn.ModuleList([
LocalMHA(dim = dim, heads = heads, qk_rmsnorm = True, window_size = window_size, use_rotary_pos_emb = not dynamic_pos_bias, use_xpos = True, **kwargs),
FeedForward(dim = dim)
]))
def forward(self, x):
w = self.window_size
attn_bias = self.pos_bias(w, w * 2) if exists(self.pos_bias) else None
for attn, ff in self.layers:
x = attn(x, attn_bias = attn_bias) + x
x = ff(x) + x
return x
class FiLM(nn.Module):
def __init__(self, dim, dim_cond):
super().__init__()
self.to_cond = nn.Linear(dim_cond, dim * 2)
def forward(self, x, cond):
gamma, beta = self.to_cond(cond).chunk(2, dim = -1)
return x * gamma + beta
class SoundStream(nn.Module):
def __init__(
self,
*,
channels = 32,
strides = (2, 4, 5, 8),
channel_mults = (2, 4, 8, 16),
codebook_dim = 512,
codebook_size = 1024,
rq_num_quantizers = 8,
rq_commitment_weight = 1.,
rq_ema_decay = 0.95,
rq_quantize_dropout_multiple_of = 1,
rq_groups = 1,
rq_stochastic_sample_codes = False,
rq_kwargs: dict = {},
input_channels = 1,
discr_multi_scales = (1, 0.5, 0.25),
stft_normalized = False,
enc_cycle_dilations = (1, 3, 9),
dec_cycle_dilations = (1, 3, 9),
multi_spectral_window_powers_of_two = tuple(range(6, 12)),
multi_spectral_n_ffts = 512,
multi_spectral_n_mels = 64,
recon_loss_weight = 1.,
multi_spectral_recon_loss_weight = 1e-5,
adversarial_loss_weight = 1.,
feature_loss_weight = 100,
quantize_dropout_cutoff_index = 1,
target_sample_hz = 16000,
use_local_attn = True,
attn_window_size = 128,
attn_dim_head = 64,
attn_heads = 8,
attn_depth = 1,
attn_xpos_scale_base = None,
attn_dynamic_pos_bias = False,
squeeze_excite = False,
complex_stft_discr_logits_abs = True,
pad_mode = 'reflect',
stft_discriminator: Optional[nn.Module] = None # can pass in own stft discriminator
):
super().__init__()
# for autosaving the config
_locals = locals()
_locals.pop('self', None)
_locals.pop('__class__', None)
self._configs = pickle.dumps(_locals)
# rest of the class
self.target_sample_hz = target_sample_hz # for resampling on the fly
self.single_channel = input_channels == 1
self.strides = strides
layer_channels = tuple(map(lambda t: t * channels, channel_mults))
layer_channels = (channels, *layer_channels)
chan_in_out_pairs = tuple(zip(layer_channels[:-1], layer_channels[1:]))
encoder_blocks = []
for ((chan_in, chan_out), layer_stride) in zip(chan_in_out_pairs, strides):
encoder_blocks.append(EncoderBlock(chan_in, chan_out, layer_stride, enc_cycle_dilations, squeeze_excite, pad_mode))
self.encoder = nn.Sequential(
CausalConv1d(input_channels, channels, 7, pad_mode = pad_mode),
*encoder_blocks,
CausalConv1d(layer_channels[-1], codebook_dim, 3, pad_mode = pad_mode)
)
attn_kwargs = dict(
dim = codebook_dim,
dim_head = attn_dim_head,
heads = attn_heads,
depth = attn_depth,
window_size = attn_window_size,
xpos_scale_base = attn_xpos_scale_base,
dynamic_pos_bias = attn_dynamic_pos_bias,
prenorm = True,
causal = True
)
self.encoder_attn = LocalTransformer(**attn_kwargs) if use_local_attn else None
self.encoder_film = FiLM(codebook_dim, dim_cond = 2)
self.num_quantizers = rq_num_quantizers
self.codebook_dim = codebook_dim
self.codebook_size = codebook_size
self.rq_groups = rq_groups
self.rq = GroupedResidualVQ(
dim = codebook_dim,
num_quantizers = rq_num_quantizers,
codebook_size = codebook_size,
groups = rq_groups,
decay = rq_ema_decay,
commitment_weight = rq_commitment_weight,
quantize_dropout_multiple_of = rq_quantize_dropout_multiple_of,
kmeans_init = True,
threshold_ema_dead_code = 2,
quantize_dropout = True,
quantize_dropout_cutoff_index = quantize_dropout_cutoff_index,
stochastic_sample_codes = rq_stochastic_sample_codes,
**rq_kwargs
)
self.decoder_film = FiLM(codebook_dim, dim_cond = 2)
self.decoder_attn = LocalTransformer(**attn_kwargs) if use_local_attn else None
decoder_blocks = []
for ((chan_in, chan_out), layer_stride) in zip(reversed(chan_in_out_pairs), reversed(strides)):
decoder_blocks.append(DecoderBlock(chan_out, chan_in, layer_stride, dec_cycle_dilations, squeeze_excite, pad_mode))
self.decoder = nn.Sequential(
CausalConv1d(codebook_dim, layer_channels[-1], 7, pad_mode = pad_mode),
*decoder_blocks,
CausalConv1d(channels, input_channels, 7, pad_mode = pad_mode)
)
# discriminators
self.discr_multi_scales = discr_multi_scales
self.discriminators = nn.ModuleList([MultiScaleDiscriminator() for _ in range(len(discr_multi_scales))])
discr_rel_factors = [int(s1 / s2) for s1, s2 in zip(discr_multi_scales[:-1], discr_multi_scales[1:])]
self.downsamples = nn.ModuleList([nn.Identity()] + [nn.AvgPool1d(2 * factor, stride = factor, padding = factor) for factor in discr_rel_factors])
self.stft_discriminator = stft_discriminator
if not exists(self.stft_discriminator):
self.stft_discriminator = ComplexSTFTDiscriminator(
stft_normalized = stft_normalized,
logits_abs = complex_stft_discr_logits_abs # whether to output as abs() or use view_as_real
)
# multi spectral reconstruction
self.mel_spec_transforms = nn.ModuleList([])
self.mel_spec_recon_alphas = []
num_transforms = len(multi_spectral_window_powers_of_two)
multi_spectral_n_ffts = cast_tuple(multi_spectral_n_ffts, num_transforms)
multi_spectral_n_mels = cast_tuple(multi_spectral_n_mels, num_transforms)
for powers, n_fft, n_mels in zip_longest(multi_spectral_window_powers_of_two, multi_spectral_n_ffts, multi_spectral_n_mels):
win_length = 2 ** powers
alpha = (win_length / 2) ** 0.5
calculated_n_fft = default(max(n_fft, win_length), win_length) # @AndreyBocharnikov said this is usually win length, but overridable
# if any audio experts have an opinion about these settings, please submit a PR
melspec_transform = T.MelSpectrogram(
sample_rate = target_sample_hz,
n_fft = calculated_n_fft,
win_length = win_length,
hop_length = win_length // 4,
n_mels = n_mels,
normalized = stft_normalized
)
self.mel_spec_transforms.append(melspec_transform)
self.mel_spec_recon_alphas.append(alpha)
# loss weights
self.recon_loss_weight = recon_loss_weight
self.multi_spectral_recon_loss_weight = multi_spectral_recon_loss_weight
self.adversarial_loss_weight = adversarial_loss_weight
self.feature_loss_weight = feature_loss_weight
self.register_buffer('zero', torch.tensor([0.]), persistent = False)
@property
def device(self):
return next(self.parameters()).device
@property
def configs(self):
return pickle.loads(self._configs)
def decode_from_codebook_indices(self, quantized_indices):
quantized_indices = rearrange(quantized_indices, 'b n (g q) -> g b n q', g = self.rq_groups)
codes = self.rq.get_codes_from_indices(quantized_indices)
x = reduce(codes, 'g q b n d -> b n (g d)', 'sum')
return self.decode(x)
def decode(self, x, quantize = False):
if quantize:
x, *_ = self.rq(x)
x = self.decoder_attn(x)
x = rearrange(x, 'b n c -> b c n')
return self.decoder(x)
def save(self, path):
path = Path(path)
pkg = dict(
model = self.state_dict(),
config = self._configs,
version = __version__
)
torch.save(pkg, str(path))
@classmethod
def init_and_load_from(cls, path, strict = True):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = 'cpu')
assert 'config' in pkg, 'model configs were not found in this saved checkpoint'
config = pickle.loads(pkg['config'])
soundstream = cls(**config)
soundstream.load(path, strict = strict)
return soundstream
def load(self, path, strict = True):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = 'cpu')
# check version
if 'version' in pkg and version.parse(pkg['version']) < parsed_version:
print(f'soundstream model being loaded was trained on an older version of audiolm-pytorch ({pkg["version"]})')
has_ema = 'ema_model' in pkg
model_pkg = pkg['ema_model'] if has_ema else pkg['model']
if has_ema:
model_pkg = filter_by_keys(lambda k: k.startswith('ema_model.'), model_pkg)
model_pkg = map_keys(lambda k: k[len('ema_model.'):], model_pkg)
self.load_state_dict(model_pkg, strict = strict)
def load_from_trainer_saved_obj(self, path):
path = Path(path)
assert path.exists()
obj = torch.load(str(path))
self.load_state_dict(obj['model'])
def non_discr_parameters(self):
return [
*self.encoder.parameters(),
*self.decoder.parameters(),
*(self.encoder_attn.parameters() if exists(self.encoder_attn) else []),
*(self.decoder_attn.parameters() if exists(self.decoder_attn) else []),
*self.encoder_film.parameters(),
*self.decoder_film.parameters()
]
@property
def seq_len_multiple_of(self):
return functools.reduce(lambda x, y: x * y, self.strides)
@property
def downsample_factor(self):
return self.seq_len_multiple_of
def process_input(
self,
x,
input_sample_hz = None,
curtail_from_left = False
):
x, ps = pack([x], '* n')
if exists(input_sample_hz):
x = resample(x, input_sample_hz, self.target_sample_hz)
x = curtail_to_multiple(x, self.seq_len_multiple_of, from_left = curtail_from_left)
if x.ndim == 2:
x = rearrange(x, 'b n -> b 1 n')
return x, ps
def forward(
self,
x,
target = None,
is_denoising = None, # if you want to learn film conditioners that teach the soundstream to denoise - target would need to be passed in above
return_encoded = False,
return_discr_loss = False,
return_discr_losses_separately = False,
return_loss_breakdown = False,
return_recons_only = False,
input_sample_hz = None,
apply_grad_penalty = False,
curtail_from_left = False
):
assert not (exists(is_denoising) and not exists(target))
process_input = partial(self.process_input, input_sample_hz = input_sample_hz, curtail_from_left = curtail_from_left)
x, ps = process_input(x)
if exists(target):
target, _ = process_input(target)
orig_x = x.clone()
x = self.encoder(x)
x = rearrange(x, 'b c n -> b n c')
if exists(self.encoder_attn):
x = self.encoder_attn(x)
if exists(is_denoising):
denoise_input = torch.tensor([is_denoising, not is_denoising], dtype = x.dtype, device = self.device) # [1, 0] for denoise, [0, 1] for not denoising
x = self.encoder_film(x, denoise_input)
x, indices, commit_loss = self.rq(x)
if return_encoded:
indices = rearrange(indices, 'g b n q -> b n (g q)')
return x, indices, commit_loss
if exists(is_denoising):
x = self.decoder_film(x, denoise_input)
if exists(self.decoder_attn):
x = self.decoder_attn(x)
x = rearrange(x, 'b n c -> b c n')
recon_x = self.decoder(x)
if return_recons_only:
recon_x, = unpack(recon_x, ps, '* c n')
return recon_x
# multi-scale discriminator loss
if return_discr_loss:
real, fake = orig_x, recon_x.detach()
stft_discr_loss = None
stft_grad_penalty = None
discr_losses = []
discr_grad_penalties = []
if self.single_channel:
real, fake = orig_x.clone(), recon_x.detach()
stft_real_logits, stft_fake_logits = map(self.stft_discriminator, (real.requires_grad_(), fake))
stft_discr_loss = hinge_discr_loss(stft_fake_logits, stft_real_logits)
if apply_grad_penalty:
stft_grad_penalty = gradient_penalty(real, stft_discr_loss)
scaled_real, scaled_fake = real, fake
for discr, downsample in zip(self.discriminators, self.downsamples):
scaled_real, scaled_fake = map(downsample, (scaled_real, scaled_fake))
real_logits, fake_logits = map(discr, (scaled_real.requires_grad_(), scaled_fake))
one_discr_loss = hinge_discr_loss(fake_logits, real_logits)
discr_losses.append(one_discr_loss)
if apply_grad_penalty:
discr_grad_penalties.append(gradient_penalty(scaled_real, one_discr_loss))
if not return_discr_losses_separately:
all_discr_losses = torch.stack(discr_losses).mean()
if exists(stft_discr_loss):
all_discr_losses = all_discr_losses + stft_discr_loss
if exists(stft_grad_penalty):
all_discr_losses = all_discr_losses + stft_grad_penalty
return all_discr_losses
# return a list of discriminator losses with List[Tuple[str, Tensor]]
discr_losses_pkg = []
discr_losses_pkg.extend([(f'scale:{scale}', multi_scale_loss) for scale, multi_scale_loss in zip(self.discr_multi_scales, discr_losses)])
discr_losses_pkg.extend([(f'scale_grad_penalty:{scale}', discr_grad_penalty) for scale, discr_grad_penalty in zip(self.discr_multi_scales, discr_grad_penalties)])
if exists(stft_discr_loss):
discr_losses_pkg.append(('stft', stft_discr_loss))
if exists(stft_grad_penalty):
discr_losses_pkg.append(('stft_grad_penalty', stft_grad_penalty))
return discr_losses_pkg
# recon loss
target = default(target, orig_x) # target can also be passed in, in the case of denoising
recon_loss = F.mse_loss(target, recon_x)
# multispectral recon loss - eq (4) and (5) in https://arxiv.org/abs/2107.03312
multi_spectral_recon_loss = self.zero
if self.multi_spectral_recon_loss_weight > 0:
for mel_transform, alpha in zip(self.mel_spec_transforms, self.mel_spec_recon_alphas):
orig_mel, recon_mel = map(mel_transform, (orig_x, recon_x))
log_orig_mel, log_recon_mel = map(log, (orig_mel, recon_mel))
l1_mel_loss = (orig_mel - recon_mel).abs().sum(dim = -2).mean()
l2_log_mel_loss = alpha * vector_norm(log_orig_mel - log_recon_mel, dim = -2).mean()
multi_spectral_recon_loss = multi_spectral_recon_loss + l1_mel_loss + l2_log_mel_loss
# adversarial loss
adversarial_losses = []
discr_intermediates = []
# adversarial loss for multi-scale discriminators
real, fake = orig_x, recon_x
# features from stft
(stft_real_logits, stft_real_intermediates), (stft_fake_logits, stft_fake_intermediates) = map(partial(self.stft_discriminator, return_intermediates=True), (real, fake))
discr_intermediates.append((stft_real_intermediates, stft_fake_intermediates))
scaled_real, scaled_fake = real, fake
for discr, downsample in zip(self.discriminators, self.downsamples):
scaled_real, scaled_fake = map(downsample, (scaled_real, scaled_fake))
(real_logits, real_intermediates), (fake_logits, fake_intermediates) = map(partial(discr, return_intermediates = True), (scaled_real, scaled_fake))
discr_intermediates.append((real_intermediates, fake_intermediates))
one_adversarial_loss = hinge_gen_loss(fake_logits)
adversarial_losses.append(one_adversarial_loss)
feature_losses = []
for real_intermediates, fake_intermediates in discr_intermediates:
losses = [F.l1_loss(real_intermediate, fake_intermediate) for real_intermediate, fake_intermediate in zip(real_intermediates, fake_intermediates)]
feature_losses.extend(losses)
feature_loss = torch.stack(feature_losses).mean()
# adversarial loss for stft discriminator
adversarial_losses.append(hinge_gen_loss(stft_fake_logits))
adversarial_loss = torch.stack(adversarial_losses).mean()
# sum commitment loss
all_commitment_loss = commit_loss.sum()
total_loss = recon_loss * self.recon_loss_weight + multi_spectral_recon_loss * self.multi_spectral_recon_loss_weight + adversarial_loss * self.adversarial_loss_weight + feature_loss * self.feature_loss_weight + all_commitment_loss
if return_loss_breakdown:
return total_loss, (recon_loss, multi_spectral_recon_loss, adversarial_loss, feature_loss, all_commitment_loss)
return total_loss
# some default soundstreams
def AudioLMSoundStream(
strides = (2, 4, 5, 8),
target_sample_hz = 16000,
rq_num_quantizers = 12,
**kwargs
):
return SoundStream(
strides = strides,
target_sample_hz = target_sample_hz,
rq_num_quantizers = rq_num_quantizers,
**kwargs
)
def MusicLMSoundStream(
strides = (3, 4, 5, 8),
target_sample_hz = 24000,
rq_num_quantizers = 12,
**kwargs
):
return SoundStream(
strides = strides,
target_sample_hz = target_sample_hz,
rq_num_quantizers = rq_num_quantizers,
**kwargs
)
|
# constants
Config = namedtuple('Config', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
causal = False,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
causal = self.causal
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
if causal:
causal_mask = torch.ones((q_len, k_len), device = q.device, dtype = torch.bool).triu(k_len - q_len + 1)
mask = mask & ~causal_mask
causal = False
config = self.cuda_config if is_cuda else self.cpu_config
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out
def forward(self, q, k, v, mask = None, attn_bias = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.flash:
assert not exists(attn_bias), 'attention bias not supported for flash attention'
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k) * scale
# attention bias
if exists(attn_bias):
sim = sim + attn_bias
# key padding mask
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), device = sim.device, dtype = torch.bool).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
return out
|
# functions
def round_down_nearest_multiple(num, divisor):
return num // divisor * divisor
def curtail_to_multiple(t, mult, from_left = False):
data_len = t.shape[-1]
rounded_seq_len = round_down_nearest_multiple(data_len, mult)
seq_slice = slice(None, rounded_seq_len) if not from_left else slice(-rounded_seq_len, None)
return t[..., seq_slice]
# base class
class AudioConditionerBase(nn.Module):
pass
|
logging.root.setLevel(logging.ERROR)
def exists(val):
return val is not None
class FairseqVQWav2Vec(nn.Module):
"""
checkpoint path can be found at https://github.com/facebookresearch/fairseq/blob/main/examples/wav2vec/README.md#vq-wav2vec
specifically download the kmeans model for now
$ wget https://dl.fbaipublicfiles.com/fairseq/wav2vec/vq-wav2vec_kmeans.pt
"""
def __init__(
self,
checkpoint_path,
target_sample_hz = 24000,
seq_len_multiple_of = None
):
super().__init__()
self.target_sample_hz = target_sample_hz
self.seq_len_multiple_of = seq_len_multiple_of
path = Path(checkpoint_path)
assert path.exists(), f'path {checkpoint_path} does not exist'
checkpoint = torch.load(checkpoint_path)
load_model_input = {checkpoint_path: checkpoint}
model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input)
self.model = model[0]
self.model.eval()
assert hasattr(self.model, 'vector_quantizer') and hasattr(self.model.vector_quantizer, 'embedding'), 'the vq wav2vec model does not seem to be valid'
@property
def groups(self):
return self.model.vector_quantizer.groups
@property
def downsample_factor(self):
# todo: double check architecture
return 80
@property
def codebook_size(self):
return self.model.vector_quantizer.embedding.shape[0]
@torch.inference_mode()
def forward(
self,
wav_input,
flatten = True,
input_sample_hz = None
):
if exists(input_sample_hz):
wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
if exists(self.seq_len_multiple_of):
wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of)
embed = self.model.feature_extractor(wav_input)
_, codebook_indices = self.model.vector_quantizer.forward_idx(embed)
if not flatten:
return codebook_indices
return rearrange(codebook_indices, 'b ... -> b (...)')
|
def separate_weight_decayable_params(params):
wd_params, no_wd_params = [], []
for param in params:
param_list = no_wd_params if param.ndim < 2 else wd_params
param_list.append(param)
return wd_params, no_wd_params
def get_optimizer(
params,
lr = 1e-4,
wd = 1e-2,
betas = (0.9, 0.99),
eps = 1e-8,
filter_by_requires_grad = False,
group_wd_params = True,
use_lion = False,
**kwargs
):
has_wd = wd > 0
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
if group_wd_params and has_wd:
wd_params, no_wd_params = separate_weight_decayable_params(params)
params = [
{'params': wd_params},
{'params': no_wd_params, 'weight_decay': 0},
]
if use_lion:
return Lion(params, lr = lr, betas = betas, weight_decay = wd)
if not has_wd:
return Adam(params, lr = lr, betas = betas, eps = eps)
return AdamW(params, lr = lr, weight_decay = wd, betas = betas, eps = eps)
|
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def always(val):
def inner(*args, **kwargs):
return val
return inner
def maybe(fn):
if not exists(fn):
return always(None)
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
def ceil_div(numer, denom):
return (numer + denom - 1) // denom
def remainder_needed_until_multiple(n, mult):
return (ceil_div(n, mult) * mult) - n
def round_down_nearest_multiple(val, mult):
return (val // mult) * mult
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# tensor helpers
def generate_mask_with_prob(shape, mask_prob, device):
seq = shape[-1]
rand = torch.randn(shape, device = device)
rand[:, 0] = -torch.finfo(rand.dtype).max
num_mask = min(int(seq * mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros(shape, device = device).scatter(1, indices, 1.).bool()
return mask
# attention related utils
def grad_shrink(t, alpha = 0.1):
return t * alpha + t.detach() * (1 - alpha)
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t + eps)
def l2norm(t):
return F.normalize(t, dim = -1)
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def mask_out_after_eos_id(t, eos_id, mask_value = -1, keep_eos = True):
eos_mask = (t == eos_id).float()
if keep_eos:
eos_mask = F.pad(eos_mask, (1, -1))
after_eos_mask = eos_mask.cumsum(dim = -1) > 0
return t.masked_fill(after_eos_mask, mask_value)
def all_rows_have_eos_id(t, eos_id):
eos_mask = (t == eos_id)
return torch.any(eos_mask, dim = -1).all()
# classifier free guidance functions
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
# removing unique consecutives in the semantic token ids
# important detail noted by @eonglints
def append_eos_id(ids, eos_id):
b, device = ids.shape[0], ids.device
eos_ids = torch.ones(1, device = device).long() * eos_id
eos_ids = repeat(eos_ids, '1 -> b 1', b = b)
ids = torch.cat((ids, eos_ids), dim = -1)
return ids
def batch_unique_consecutive(t, pad_value = 0.):
unique_arr = [torch.unique_consecutive(el) for el in t.unbind(dim = 0)]
return pad_sequence(unique_arr, batch_first = True, padding_value = pad_value)
# function for getting embeds from nn.Embedding but with padding as some designated value (-1) outside the range of the embed table
@beartype
def get_embeds(
embeddings: nn.Embedding,
codes: torch.Tensor,
pad_id = -1,
return_mask = False,
mask_pad_pos_to = 0
):
pad_mask = codes == pad_id
codes_without_pad = codes.masked_fill(pad_mask, 0) # just retrieve first code as dummy
embeds = embeddings(codes_without_pad)
if exists(mask_pad_pos_to):
embeds = embeds.masked_fill(rearrange(pad_mask, '... -> ... 1'), mask_pad_pos_to)
if return_mask:
return embeds, ~pad_mask
return embeds
# bias-less layernorm, being used in more recent T5s, PaLM, also in @borisdayma 's experiments shared with me
# greater stability
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# relative positional bias
class RelativePositionBias(nn.Module):
""" from https://arxiv.org/abs/2111.09883 """
def __init__(
self,
*,
dim,
heads,
layers = 3
):
super().__init__()
self.net = nn.ModuleList([])
self.net.append(nn.Sequential(nn.Linear(1, dim), nn.SiLU()))
for _ in range(layers - 1):
self.net.append(nn.Sequential(nn.Linear(dim, dim), nn.SiLU()))
self.net.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, n):
device = self.device
pos = torch.arange(n, device = device)
rel_pos = (rearrange(pos, 'i -> i 1') - rearrange(pos, 'j -> 1 j'))
rel_pos += (n - 1)
x = torch.arange(-n + 1, n, device = device).float()
x = rearrange(x, '... -> ... 1')
for layer in self.net:
x = layer(x)
x = x[rel_pos]
return rearrange(x, 'i j h -> h i j')
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x
def FeedForward(dim, mult = 4, dropout = 0.1):
inner_dim = int(dim * 2 * mult / 3)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, inner_dim * 2, bias = False),
GEGLU(),
LayerNorm(inner_dim),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim, bias = False)
)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
causal = False,
dim_head = 64,
dim_context = None,
heads = 8,
norm_context = False,
num_null_kv = 0,
dropout = 0.1,
scale = 8,
flash = False
):
super().__init__()
self.heads = heads
self.causal = causal
inner_dim = dim_head * heads
dim_context = default(dim_context, dim)
self.norm = LayerNorm(dim)
self.context_norm = LayerNorm(dim_context) if norm_context else nn.Identity()
self.attn_dropout = nn.Dropout(dropout)
self.num_null_kv = num_null_kv
self.null_kv = nn.Parameter(torch.randn(2, num_null_kv, dim_head)) if num_null_kv > 0 else None
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim_context, dim_head * 2, bias = False)
self.attend = Attend(
flash = flash,
dropout = dropout,
causal = causal
)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
nn.Dropout(dropout)
)
def forward(
self,
x,
context = None,
mask = None,
attn_bias = None,
prefix_context = None,
prefix_context_mask = None
):
b, n, _, device = *x.shape, x.device
if exists(context):
context = self.context_norm(context)
kv_input = default(context, x)
# take care of prefix-based self attention conditioning
# make sure to either concat the to the self attention mask or lengthen it accordingly
if exists(prefix_context):
kv_input = torch.cat((prefix_context, kv_input), dim = -2)
prefix_seq_len = prefix_context.shape[-2]
if not exists(mask):
mask = torch.ones((b, n), device = device, dtype = torch.bool)
if exists(prefix_context_mask):
mask = torch.cat((prefix_context_mask, mask), dim = -1)
else:
mask = F.pad(mask, (prefix_seq_len, 0), value = True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (prefix_seq_len, 0), value = 0.)
# prenorm
x = self.norm(x)
# project for queries, keys, values
q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1)
# null key / values
if self.num_null_kv > 0:
null_k, null_v = repeat(self.null_kv, 'kv n d -> kv b n d', b = b).unbind(dim = 0)
k = torch.cat((null_k, k), dim = -2)
v = torch.cat((null_v, v), dim = -2)
# split for multi-headed attention
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# handle mask and null key / value
if exists(mask):
mask = F.pad(mask, (self.num_null_kv, 0), value = True)
# attention
out = self.attend(q, k, v, attn_bias = attn_bias, mask = mask)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# transformer
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
heads,
dim_context = None,
cross_attend = False,
attn_dropout = 0.,
ff_dropout = 0.,
grad_shrink_alpha = 0.1,
cond_as_self_attn_prefix = False,
rel_pos_bias = True,
flash_attn = False,
**kwargs
):
super().__init__()
rel_pos_bias = rel_pos_bias and not flash_attn
assert not (cross_attend and cond_as_self_attn_prefix)
self.dim_context = default(dim_context, dim)
self.cond_as_self_attn_prefix = cond_as_self_attn_prefix
self.grad_shrink = partial(grad_shrink, alpha = grad_shrink_alpha)
self.layers = nn.ModuleList([])
self.rel_pos_bias = RelativePositionBias(dim = dim // 2, heads = heads) if rel_pos_bias else None
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, heads = heads, dropout = attn_dropout, flash = flash_attn, causal = True, **kwargs),
Attention(dim = dim, heads = heads, dropout = attn_dropout, dim_context = dim_context, flash = flash_attn, num_null_kv = 1, norm_context = True, **kwargs) if cross_attend else None,
FeedForward(dim = dim, dropout = ff_dropout)
]))
self.norm = LayerNorm(dim)
def forward(
self,
x,
self_attn_mask = None,
context = None,
context_mask = None,
attn_bias = None
):
assert not (self.cond_as_self_attn_prefix and not exists(context))
assert not (exists(context) and context.shape[-1] != self.dim_context), f'you had specified a conditioning dimension of {self.dim_context}, yet what was received by the transformer has dimension of {context.shape[-1]}'
n, device = x.shape[1], x.device
x = self.grad_shrink(x) # from cogview paper, adopted by GLM 130B LLM, decreases likelihood of attention net instability
if exists(attn_bias):
rel_pos_bias = attn_bias
else:
rel_pos_bias = maybe(self.rel_pos_bias)(n)
self_attn_kwargs = dict()
if self.cond_as_self_attn_prefix:
self_attn_kwargs = dict(
prefix_context = context,
prefix_context_mask = context_mask
)
for attn, cross_attn, ff in self.layers:
x = attn(x, attn_bias = rel_pos_bias, mask = self_attn_mask, **self_attn_kwargs) + x
if exists(cross_attn):
assert exists(context)
x = cross_attn(x, context = context, mask = context_mask) + x
x = ff(x) + x
return self.norm(x)
# the three hierarchical transformers
class SemanticTransformer(nn.Module):
@beartype
def __init__(
self,
*,
dim,
depth,
num_semantic_tokens,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
t5_name = DEFAULT_T5_NAME,
cond_dim = None,
has_condition = False,
audio_text_condition = False,
cond_as_self_attn_prefix = False,
cond_drop_prob = 0.5,
grad_shrink_alpha = 0.1,
rel_pos_bias = True,
flash_attn = False,
**kwargs
):
super().__init__()
rel_pos_bias = rel_pos_bias and not flash_attn
self.num_semantic_tokens = num_semantic_tokens
if audio_text_condition:
has_condition = True
cond_dim = default(cond_dim, dim)
self.has_condition = has_condition
self.embed_text = partial(t5_encode_text, name = t5_name)
self.cond_drop_prob = cond_drop_prob
self.start_token = nn.Parameter(torch.randn(dim))
self.semantic_embedding = nn.Embedding(num_semantic_tokens + 1, dim)
self.eos_id = num_semantic_tokens
text_dim = default(cond_dim, get_encoded_dim(t5_name))
self.proj_text_embed = nn.Linear(text_dim, dim, bias = False) if text_dim != dim else nn.Identity()
self.transformer = Transformer(
dim = dim,
depth = depth,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
cross_attend = has_condition and not cond_as_self_attn_prefix,
cond_as_self_attn_prefix = cond_as_self_attn_prefix,
grad_shrink_alpha = grad_shrink_alpha,
rel_pos_bias = rel_pos_bias,
flash_attn = flash_attn,
**kwargs
)
self.to_logits = nn.Linear(dim, num_semantic_tokens + 1)
@property
def device(self):
return next(self.parameters()).device
def load(self, path):
# Return pkg so that if this function gets called from within a Trainer function call,
# the trainer can also access the package loaded from the checkpoint.
device = self.device
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = device)
# check version
if 'version' in pkg and version.parse(pkg['version']) < version.parse(__version__):
print(f'model was trained on older version {pkg["version"]} of audiolm-pytorch')
self.load_state_dict(pkg['model'])
return pkg
def forward_with_cond_scale(
self,
*args,
cond_scale = 3,
**kwargs
):
logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1 or not self.has_condition:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
@beartype
def forward(
self,
*,
ids = None,
return_loss = False,
text: Optional[List[str]] = None,
text_embeds = None,
self_attn_mask = None,
cond_drop_prob = None,
unique_consecutive = None
):
device = self.device
b = ids.shape[0]
has_text = exists(text) or exists(text_embeds)
assert not (self.has_condition ^ has_text)
text_mask = None
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.embed_text(text, output_device = device)
text_mask = torch.any(text_embeds != 0, dim = -1)
if exists(text_embeds):
text_embeds = self.proj_text_embed(text_embeds)
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
if exists(text_mask) and cond_drop_prob > 0:
keep_mask = prob_mask_like((b,), 1 - cond_drop_prob, device = device)
text_mask = rearrange(keep_mask, 'b -> b 1') & text_mask
if return_loss:
labels, ids = ids.clone(), ids[:, :-1]
tokens = get_embeds(self.semantic_embedding, ids)
start_tokens = repeat(self.start_token, 'd -> b 1 d', b = ids.shape[0])
tokens = torch.cat((start_tokens, tokens), dim = 1)
if exists(self_attn_mask):
self_attn_mask = F.pad(self_attn_mask, (1, 0), value = True)
tokens = self.transformer(tokens, context = text_embeds, self_attn_mask = self_attn_mask, context_mask = text_mask)
return self.to_logits(tokens)
class CoarseTransformer(nn.Module):
@beartype
def __init__(
self,
*,
codebook_size,
num_coarse_quantizers,
dim,
depth,
num_semantic_tokens,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
t5_name = DEFAULT_T5_NAME,
has_condition = False,
cond_dim = None,
audio_text_condition = False,
cond_as_self_attn_prefix = False,
cond_drop_prob = 0.5,
grad_shrink_alpha = 0.1,
project_semantic_logits = True,
rel_pos_bias = True,
flash_attn = False,
**kwargs
):
super().__init__()
rel_pos_bias = rel_pos_bias and not flash_attn
self.num_semantic_tokens = num_semantic_tokens
if audio_text_condition:
has_condition = True
cond_dim = default(cond_dim, dim)
self.has_condition = has_condition
self.embed_text = partial(t5_encode_text, name = t5_name)
self.cond_drop_prob = cond_drop_prob
self.semantic_start_token = nn.Parameter(torch.randn(dim))
self.coarse_start_token = nn.Parameter(torch.randn(dim))
self.semantic_eos_id = num_semantic_tokens
self.semantic_embedding = nn.Embedding(num_semantic_tokens + 1, dim)
self.coarse_eos_id = codebook_size
codebook_size_with_eos = codebook_size + 1
self.coarse_embedding = nn.Embedding(num_coarse_quantizers * codebook_size_with_eos, dim)
self.coarse_quantize_embedding = nn.Embedding(num_coarse_quantizers, dim)
text_dim = default(cond_dim, get_encoded_dim(t5_name))
self.proj_text_embed = nn.Linear(text_dim, dim, bias = False) if text_dim != dim else nn.Identity()
self.cross_attn_bias = nn.Parameter(torch.zeros(heads, 1, 1)) if rel_pos_bias else None
self.transformer = Transformer(
dim = dim,
depth = depth,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
cross_attend = has_condition and not cond_as_self_attn_prefix,
cond_as_self_attn_prefix = cond_as_self_attn_prefix,
grad_shrink_alpha = grad_shrink_alpha,
rel_pos_bias = rel_pos_bias,
flash_attn = flash_attn,
**kwargs
)
self.codebook_size = codebook_size
self.num_coarse_quantizers = num_coarse_quantizers
self.to_semantic_logits = nn.Linear(dim, num_semantic_tokens + 1) if project_semantic_logits else None
self.coarse_logit_weights = nn.Parameter(torch.randn(num_coarse_quantizers, codebook_size_with_eos, dim))
@property
def device(self):
return next(self.parameters()).device
def load(self, path):
# Return pkg so that if this function gets called from within a Trainer function call,
# the trainer can also access the package loaded from the checkpoint.
device = self.device
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = device)
# check version
if 'version' in pkg and version.parse(pkg['version']) < version.parse(__version__):
print(f'model was trained on older version {pkg["version"]} of audiolm-pytorch')
self.load_state_dict(pkg['model'])
return pkg
def forward_with_cond_scale(
self,
*args,
cond_scale = 3,
**kwargs
):
semantic_logits, coarse_logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1 or not self.has_condition:
return semantic_logits, coarse_logits
null_semantic_logits, null_coarse_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
scaled_semantic_logits = None
if exists(null_semantic_logits):
scaled_semantic_logits = null_semantic_logits + (semantic_logits - null_semantic_logits) * cond_scale
scaled_coarse_logits = null_coarse_logits + (coarse_logits - null_coarse_logits) * cond_scale
return scaled_semantic_logits, scaled_coarse_logits
@beartype
def forward(
self,
*,
semantic_token_ids,
coarse_token_ids,
self_attn_mask = None,
text: Optional[List[str]] = None,
text_embeds = None,
cond_drop_prob = None,
return_only_coarse_logits = False
):
b, device = semantic_token_ids.shape[0], semantic_token_ids.device
arange = partial(torch.arange, device = device)
has_text = exists(text) or exists(text_embeds)
assert not (self.has_condition ^ has_text)
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.embed_text(text, output_device = device)
text_mask = None
if exists(text_embeds):
text_mask = torch.any(text_embeds != 0, dim = -1)
text_embeds = self.proj_text_embed(text_embeds)
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
if exists(text_mask) and cond_drop_prob > 0:
keep_mask = prob_mask_like((b,), 1 - cond_drop_prob, device = device)
text_mask = rearrange(keep_mask, 'b -> b 1') & text_mask
coarse_token_ids, semantic_token_ids = map(lambda t: rearrange(t, 'b ... -> b (...)'), (coarse_token_ids, semantic_token_ids))
offsets = self.codebook_size * arange(self.num_coarse_quantizers)
offsets = repeat(offsets, 'q -> 1 (n q)', n = ceil_div(coarse_token_ids.shape[-1], self.num_coarse_quantizers))
offsets = offsets[:, :coarse_token_ids.shape[-1]]
coarse_token_ids = coarse_token_ids + offsets
semantic_tokens = get_embeds(self.semantic_embedding, semantic_token_ids)
coarse_tokens = self.coarse_embedding(coarse_token_ids)
coarse_quantize_tokens = repeat(self.coarse_quantize_embedding.weight, 'q d -> (n q) d', n = ceil_div(coarse_token_ids.shape[-1], self.num_coarse_quantizers))
coarse_quantize_tokens = coarse_quantize_tokens[:coarse_token_ids.shape[-1], ...]
coarse_tokens = coarse_tokens + coarse_quantize_tokens
semantic_seq_len = semantic_tokens.shape[1]
semantic_start_tokens = repeat(self.semantic_start_token, 'd -> b 1 d', b = b)
coarse_start_tokens = repeat(self.coarse_start_token, 'd -> b 1 d', b = b)
tokens = torch.cat((
semantic_start_tokens,
semantic_tokens,
coarse_start_tokens,
coarse_tokens
), dim = 1)
# engineer the attention bias so that cross attention is not dominated by relative positions
seq_len = tokens.shape[-2]
attn_bias = None
if exists(self.transformer.rel_pos_bias):
attn_bias = self.transformer.rel_pos_bias(seq_len)
is_semantic = arange(seq_len) < (semantic_seq_len + 1) # semantic seq len + start token
is_cross_attn = rearrange(is_semantic, 'i -> i 1') ^ rearrange(is_semantic, 'j -> 1 j')
attn_bias = torch.where(
is_cross_attn,
self.cross_attn_bias,
attn_bias
)
# attend
tokens = self.transformer(
tokens,
context = text_embeds,
attn_bias = attn_bias,
self_attn_mask = self_attn_mask,
context_mask = text_mask
)
pred_semantic_tokens, pred_coarse_tokens = tokens[:, :semantic_seq_len], tokens[:, (semantic_seq_len + 1):]
# semantic logits
semantic_logits = self.to_semantic_logits(pred_semantic_tokens) if not return_only_coarse_logits and exists(self.to_semantic_logits) else None
# get coarse logits
n = pred_coarse_tokens.shape[1]
nq = round_down_nearest_multiple(n, self.num_coarse_quantizers)
pred_coarse_tokens_groupable, pred_coarse_tokens_remainder = pred_coarse_tokens[:, :nq], pred_coarse_tokens[:, nq:]
pred_coarse_tokens_groupable = rearrange(pred_coarse_tokens_groupable, 'b (n q) d -> b n q d', q = self.num_coarse_quantizers)
coarse_logits_groupable = einsum('q c d, b n q d -> b n q c', self.coarse_logit_weights, pred_coarse_tokens_groupable)
coarse_logits_groupable = rearrange(coarse_logits_groupable, 'b n q c -> b (n q) c')
remainder_num_quantizers = pred_coarse_tokens_remainder.shape[1]
if remainder_num_quantizers > 0:
coarse_logits_remainder = einsum('q c d, b q d -> b q c', self.coarse_logit_weights[:remainder_num_quantizers], pred_coarse_tokens_remainder)
coarse_logits = torch.cat((coarse_logits_groupable, coarse_logits_remainder), dim = 1)
else:
coarse_logits = coarse_logits_groupable
return semantic_logits, coarse_logits
class FineTransformer(nn.Module):
def __init__(
self,
*,
num_coarse_quantizers,
num_fine_quantizers,
codebook_size,
dim,
depth,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
t5_name = DEFAULT_T5_NAME,
has_condition = False,
cond_dim = None,
audio_text_condition = False,
cond_as_self_attn_prefix = False,
cond_drop_prob = 0.5,
grad_shrink_alpha = 0.1,
project_coarse_logits = True,
pad_id = -1,
rel_pos_bias = True,
flash_attn = False,
**kwargs
):
super().__init__()
rel_pos_bias = rel_pos_bias and not flash_attn
if audio_text_condition:
has_condition = True
cond_dim = default(cond_dim, dim)
self.has_condition = has_condition
self.embed_text = partial(t5_encode_text, name = t5_name)
self.cond_drop_prob = cond_drop_prob
self.num_coarse_quantizers = num_coarse_quantizers
self.coarse_start_token = nn.Parameter(torch.randn(dim))
self.fine_start_token = nn.Parameter(torch.randn(dim))
self.coarse_embedding = nn.Embedding(num_coarse_quantizers * codebook_size, dim)
self.fine_embedding = nn.Embedding(num_fine_quantizers * codebook_size, dim)
self.coarse_quantize_embedding = nn.Embedding(num_coarse_quantizers, dim)
self.fine_quantize_embedding = nn.Embedding(num_fine_quantizers, dim)
self.pad_id = pad_id
self.eos_id = codebook_size
text_dim = default(cond_dim, get_encoded_dim(t5_name))
self.proj_text_embed = nn.Linear(text_dim, dim, bias = False) if text_dim != dim else nn.Identity()
self.transformer = Transformer(
dim = dim,
depth = depth,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
cross_attend = has_condition and not cond_as_self_attn_prefix,
cond_as_self_attn_prefix = cond_as_self_attn_prefix,
rel_pos_bias = False,
grad_shrink_alpha = grad_shrink_alpha,
flash_attn = flash_attn,
**kwargs
)
# doing a specialized attn bias so that corresponding time steps at fine and coarse sequences attend to each other better
self.null_pos_bias = nn.Parameter(torch.randn(heads, 1, 1)) if rel_pos_bias else None
pos_bias_mlp_dim = dim // 2
self.pos_bias_mlp = nn.Sequential(
nn.Linear(2, pos_bias_mlp_dim),
nn.SiLU(),
nn.Linear(pos_bias_mlp_dim, pos_bias_mlp_dim),
nn.SiLU(),
nn.Linear(pos_bias_mlp_dim, heads)
) if rel_pos_bias else None
self.codebook_size = codebook_size
self.num_coarse_quantizers = num_coarse_quantizers
self.num_fine_quantizers = num_fine_quantizers
self.coarse_logit_weights = nn.Parameter(torch.randn(num_coarse_quantizers, codebook_size, dim)) if project_coarse_logits else None
self.fine_logit_weights = nn.Parameter(torch.randn(num_fine_quantizers, codebook_size, dim))
@property
def device(self):
return next(self.parameters()).device
def load(self, path):
# Return pkg so that if this function gets called from within a Trainer function call,
# the trainer can also access the package loaded from the checkpoint.
device = self.device
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = device)
# check version
if 'version' in pkg and version.parse(pkg['version']) < version.parse(__version__):
print(f'model was trained on older version {pkg["version"]} of audiolm-pytorch')
self.load_state_dict(pkg['model'])
return pkg
def forward_with_cond_scale(
self,
*args,
cond_scale = 3,
**kwargs
):
coarse_logits, fine_logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1 or not self.has_condition:
return coarse_logits, fine_logits
null_coarse_logits, null_fine_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
scaled_coarse_logits = None
if exists(null_coarse_logits):
scaled_coarse_logits = null_coarse_logits + (coarse_logits - null_coarse_logits) * cond_scale
scaled_fine_logits = null_fine_logits + (fine_logits - null_fine_logits) * cond_scale
return scaled_coarse_logits, scaled_fine_logits
def forward(
self,
coarse_token_ids,
fine_token_ids,
text: Optional[List[str]] = None,
text_embeds = None,
cond_drop_prob = None,
self_attn_mask = None,
return_only_fine_logits = False
):
b, device = coarse_token_ids.shape[0], coarse_token_ids.device
# handle text conditioning
has_text = exists(text) or exists(text_embeds)
assert not (self.has_condition ^ has_text)
text_mask = None
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.embed_text(text, output_device = device)
text_mask = torch.any(text_embeds != 0, dim = -1)
if exists(text_embeds):
text_embeds = self.proj_text_embed(text_embeds)
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
if exists(text_mask) and cond_drop_prob > 0:
keep_mask = prob_mask_like((b,), 1 - cond_drop_prob, device = device)
text_mask = rearrange(keep_mask, 'b -> b 1') & text_mask
coarse_token_ids, fine_token_ids = map(lambda t: rearrange(t, 'b ... -> b (...)'), (coarse_token_ids, fine_token_ids))
# do not attend to any of the coarse padding tokens or coarse end token either
coarse_self_attn_mask = (coarse_token_ids != self.pad_id) & (coarse_token_ids != self.eos_id)
coarse_token_ids = coarse_token_ids.masked_fill(~coarse_self_attn_mask, 0)
fine_token_seq_len = fine_token_ids.shape[-1]
coarse_self_attn_mask = F.pad(coarse_self_attn_mask, (1, fine_token_seq_len + 1), value = True)
if exists(self_attn_mask):
self_attn_mask &= coarse_self_attn_mask
else:
self_attn_mask = coarse_self_attn_mask
# prepare coarse and fine token embeddings
b, n = coarse_token_ids.shape
coarse_length = coarse_token_ids.shape[-1]
coarse_offsets = torch.arange(self.num_coarse_quantizers, device = device)
coarse_seq_length = ceil_div(coarse_token_ids.shape[-1], self.num_coarse_quantizers)
coarse_offsets = repeat(coarse_offsets, 'q -> (n q)', n = coarse_seq_length)
coarse_offsets = coarse_offsets[:coarse_length]
coarse_token_ids = coarse_token_ids + rearrange(coarse_offsets, '... -> 1 ...') * self.codebook_size
fine_length = fine_token_ids.shape[-1]
fine_offsets = torch.arange(self.num_fine_quantizers, device = device)
fine_seq_length = ceil_div(fine_token_ids.shape[-1], self.num_fine_quantizers)
fine_offsets = repeat(fine_offsets, 'q -> (n q)', n = fine_seq_length)
fine_offsets = fine_offsets[:fine_length]
fine_token_ids = fine_token_ids + rearrange(fine_offsets, '... -> 1 ...') * self.codebook_size
coarse_tokens = self.coarse_embedding(coarse_token_ids)
fine_tokens = self.fine_embedding(fine_token_ids)
coarse_quantize_tokens = repeat(self.coarse_quantize_embedding.weight, 'q d -> (n q) d', n = ceil_div(coarse_token_ids.shape[-1], self.num_coarse_quantizers))
coarse_quantize_tokens = coarse_quantize_tokens[:coarse_token_ids.shape[-1], ...]
coarse_tokens = coarse_tokens + coarse_quantize_tokens
fine_quantize_tokens = repeat(self.fine_quantize_embedding.weight, 'q d -> (n q) d', n = ceil_div(fine_token_ids.shape[-1], self.num_fine_quantizers))
fine_quantize_tokens = fine_quantize_tokens[:fine_token_ids.shape[-1], ...]
fine_tokens = fine_tokens + fine_quantize_tokens
coarse_start_tokens = repeat(self.coarse_start_token, 'd -> b 1 d', b = b)
fine_start_tokens = repeat(self.fine_start_token, 'd -> b 1 d', b = b)
tokens = torch.cat((
coarse_start_tokens,
coarse_tokens,
fine_start_tokens,
fine_tokens
), dim = 1)
# an engineered attention bias so coarse and fine sequences attend to each other better
attn_bias = None
if exists(self.pos_bias_mlp):
max_seq_len = max(coarse_seq_length, fine_seq_length)
coarse_pos = torch.arange(coarse_seq_length, device = device)
fine_pos = torch.arange(fine_seq_length, device = device)
coarse_pos = repeat(coarse_pos, 'n -> (n q)', q = self.num_coarse_quantizers)[:coarse_length]
fine_pos = repeat(fine_pos, 'n -> (n q)', q = self.num_fine_quantizers)[:fine_length]
coarse_pos = F.pad(coarse_pos, (1, 0), value = -1)
fine_pos = F.pad(fine_pos, (1, 0), value = -1)
seq_positions = torch.cat((coarse_pos, fine_pos), dim = -1)
coarse_offsets = F.pad(coarse_offsets, (1, 0), value = 0)
fine_offsets = fine_offsets + self.num_coarse_quantizers
fine_offsets = F.pad(fine_offsets, (1, 0), value = 0)
seq_offsets = torch.cat((coarse_offsets, fine_offsets), dim = -1)
pos_mlp_input = torch.stack((seq_positions.clamp(min = 0), seq_offsets), dim = -1)
num_offsets = self.num_fine_quantizers + self.num_coarse_quantizers
# relative positions are always (2 * N - 1), where N is the length of the dimension
rel_seq_len, rel_offsets = map(lambda n: 2 * n - 1, (max_seq_len, num_offsets))
# get all relative distances
rel_dist = (rearrange(pos_mlp_input, 'i c -> i 1 c') - rearrange(pos_mlp_input, 'j c -> 1 j c'))
# get all possible relative distances for the attention bias to be computed from the mlp
# which would be - (2 * N - 1) * (2 * Q - 1) - where N = sequence length and Q = total quantizers
rel_seq_len_range = repeat(torch.arange(rel_seq_len, device = device), 'n -> (n q)', q = rel_offsets)
rel_offset_range = repeat(torch.arange(rel_offsets, device = device), 'q -> (n q)', n = rel_seq_len)
mlp_inputs = torch.stack((rel_seq_len_range, rel_offset_range), dim = -1)
# implicitly parameterized relative distances, by sequence and quantizer positions
attn_bias = self.pos_bias_mlp(mlp_inputs.float())
# translate coordinates of (rel_seq_pos, rel_quantizer_offset) -> positive index to select from attn bias
rel_dist_seq_pos, rel_dist_seq_offset = rel_dist.unbind(dim = -1)
rel_dist_seq_pos += max_seq_len - 1
rel_dist_seq_offset += num_offsets - 1
rel_dist_indices = rel_dist_seq_pos * rel_offsets + rel_dist_seq_offset
# select the relative positional attention bias outputted by the MLP
# savings go from (N * Q) ^ 2 -> ~ (4 * N * Q)
attn_bias = attn_bias[rel_dist_indices]
attn_bias = rearrange(attn_bias, '... h -> h ...')
# need to make sure start token has a custom positional bias
is_start_token_seq = seq_positions == -1
start_token_mask = rearrange(is_start_token_seq, 'i -> i 1') | rearrange(is_start_token_seq, 'j -> 1 j')
attn_bias = torch.where(
start_token_mask,
self.null_pos_bias,
attn_bias,
)
# attention
tokens = self.transformer(
tokens,
context = text_embeds,
self_attn_mask = self_attn_mask,
context_mask = text_mask,
attn_bias = attn_bias
)
pred_coarse_tokens, pred_fine_tokens = tokens[:, :n], tokens[:, (n + 1):]
# get coarse logits
pred_coarse_seq_len = pred_coarse_tokens.shape[1]
padding = remainder_needed_until_multiple(pred_coarse_seq_len, self.num_coarse_quantizers)
if padding != 0:
pred_coarse_tokens = F.pad(pred_coarse_tokens, (0, 0, 0, padding), value = 0.)
pred_coarse_tokens = rearrange(pred_coarse_tokens, 'b (n q) d -> b n q d', q = self.num_coarse_quantizers)
coarse_logits = None
if not return_only_fine_logits and exists(self.coarse_logit_weights):
coarse_logits = einsum('q c d, b n q d -> b n q c', self.coarse_logit_weights, pred_coarse_tokens)
coarse_logits = rearrange(coarse_logits, 'b n q c -> b (n q) c')
coarse_logits = coarse_logits[:, :pred_coarse_seq_len]
# get fine logits
pred_fine_seq_len = pred_fine_tokens.shape[1]
nq = round_down_nearest_multiple(pred_fine_seq_len, self.num_fine_quantizers)
pred_fine_tokens_groupable, pred_fine_tokens_remainder = pred_fine_tokens[:, :nq], pred_fine_tokens[:, nq:]
pred_fine_tokens_groupable = rearrange(pred_fine_tokens_groupable, 'b (n q) d -> b n q d', q = self.num_fine_quantizers)
fine_logits_groupable = einsum('q c d, b n q d -> b n q c', self.fine_logit_weights, pred_fine_tokens_groupable)
fine_logits_groupable = rearrange(fine_logits_groupable, 'b n q c -> b (n q) c')
remainder_num_quantizers = pred_fine_tokens_remainder.shape[1]
if remainder_num_quantizers > 0:
fine_logits_remainder = einsum('q c d, b q d -> b q c', self.fine_logit_weights[:remainder_num_quantizers], pred_fine_tokens_remainder)
fine_logits = torch.cat((fine_logits_groupable, fine_logits_remainder), dim = 1)
else:
fine_logits = fine_logits_groupable
return coarse_logits, fine_logits
# training wrappers
class SemanticTransformerWrapper(nn.Module):
@beartype
def __init__(
self,
*,
transformer: SemanticTransformer,
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]] = None,
audio_conditioner: Optional[AudioConditionerBase] = None,
pad_id = -1,
unique_consecutive = True,
mask_prob = 0.15
):
super().__init__()
self.wav2vec = wav2vec
self.transformer = transformer
self.to(transformer.device)
self.audio_conditioner = audio_conditioner
assert not (exists(audio_conditioner) and not transformer.has_condition), 'if conditioning on audio embeddings from mulan, transformer has_condition must be set to True'
assert not exists(self.wav2vec) or self.wav2vec.codebook_size == transformer.num_semantic_tokens, f'num_semantic_tokens on SemanticTransformer must be set to {self.wav2vec.codebook_size}'
self.unique_consecutive = unique_consecutive
self.pad_id = pad_id
self.eos_id = transformer.eos_id
self.mask_prob = mask_prob
@property
def device(self):
return next(self.parameters()).device
def embed_text(self, text):
return self.transformer.embed_text(text, output_device = self.device)
@eval_decorator
@torch.inference_mode()
@beartype
def generate(
self,
*,
max_length,
text: Optional[List[str]] = None,
text_embeds = None,
prime_wave = None,
prime_wave_input_sample_hz = None,
prime_ids = None,
batch_size = 1,
cond_scale = 3,
filter_thres = 0.9,
temperature = 1.,
include_eos_in_output = True, # if doing hierarchical sampling, eos must be kept for an easy time
**kwargs
):
device = self.device
# derive wav2vec ids from the input wave
if exists(prime_wave):
assert not exists(prime_ids)
assert exists(self.wav2vec)
ids = self.wav2vec(
prime_wave,
flatten = False,
input_sample_hz = prime_wave_input_sample_hz
)
elif exists(prime_ids):
ids = prime_ids
else:
ids = torch.empty((batch_size, 0), dtype = torch.long, device = device)
if self.unique_consecutive:
ids = batch_unique_consecutive(ids, pad_value = self.pad_id)
# derive joint audio-text embeddings if needed
if exists(self.audio_conditioner) and exists(prime_wave):
assert not exists(text) and not exists(text_embeds)
text_embeds = self.audio_conditioner(wavs = prime_wave, namespace = 'semantic')
# derive text embeddings if needed
has_text = exists(text) or exists(text_embeds)
assert not (self.transformer.has_condition ^ has_text)
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.transformer.embed_text(text, output_device = device)
# start length and get running id output
batch = ids.shape[0]
start_length = ids.shape[-1]
sample_semantic_ids = ids.clone()
last_logit_indices = (ids != self.pad_id).sum(dim = -1).long()
# sample from transformer
for ind in tqdm(range(start_length, max_length), desc = 'generating semantic'):
logits = self.transformer.forward_with_cond_scale(
ids = sample_semantic_ids,
text_embeds = text_embeds,
cond_scale = cond_scale,
**kwargs
)
last_logit_indices_expanded = repeat(last_logit_indices, 'b -> b 1 c', b = batch, c = logits.shape[-1])
last_logits = logits.gather(1, last_logit_indices_expanded)
last_logits = rearrange(last_logits, 'b 1 c -> b c')
filtered_logits = top_k(last_logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sampled = rearrange(sampled, 'b -> b 1')
sample_semantic_ids = torch.cat((sample_semantic_ids, sampled), dim = -1)
if all_rows_have_eos_id(sample_semantic_ids, self.eos_id):
break
last_logit_indices += 1
sample_semantic_ids = mask_out_after_eos_id(sample_semantic_ids, self.eos_id, keep_eos = False)
return sample_semantic_ids
def forward(
self,
*,
semantic_token_ids = None,
raw_wave = None,
text = None,
text_embeds = None,
return_loss = False,
**kwargs
):
assert exists(raw_wave) or exists(semantic_token_ids), 'either raw waveform (raw_wave) is given or semantic token ids are given (semantic_token_ids)'
if exists(self.audio_conditioner):
assert exists(raw_wave)
assert not exists(text) and not exists(text_embeds)
text_embeds = self.audio_conditioner(wavs = raw_wave, namespace = 'semantic')
if not exists(semantic_token_ids):
assert exists(self.wav2vec), 'VQWav2Vec must be be provided if given raw wave for training'
semantic_token_ids = self.wav2vec(raw_wave, flatten = False)
semantic_token_ids = rearrange(semantic_token_ids, 'b ... -> b (...)')
if self.training:
semantic_token_ids = append_eos_id(semantic_token_ids, self.transformer.eos_id)
if self.unique_consecutive:
semantic_token_ids = batch_unique_consecutive(semantic_token_ids, pad_value = self.pad_id)
input_ids = semantic_token_ids
if return_loss:
input_ids = semantic_token_ids[:, :-1]
self_attn_mask = None
if self.mask_prob > 0. and self.training:
self_attn_mask = generate_mask_with_prob(input_ids.shape, self.mask_prob, input_ids.device)
logits = self.transformer(
ids = input_ids,
text = text,
text_embeds = text_embeds,
self_attn_mask = self_attn_mask,
**kwargs
)
if not return_loss:
return logits
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
semantic_token_ids,
ignore_index = self.pad_id
)
return loss
class CoarseTransformerWrapper(nn.Module):
@beartype
def __init__(
self,
*,
transformer: CoarseTransformer,
codec: Optional[Union[SoundStream, EncodecWrapper]] = None,
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]] = None,
audio_conditioner: Optional[AudioConditionerBase] = None,
pad_id = -1,
unique_consecutive = True,
semantic_cross_entropy_loss_weight = 1.,
mask_prob = 0.15
):
super().__init__()
self.codec = codec
self.wav2vec = wav2vec
self.transformer = transformer
self.to(transformer.device)
self.audio_conditioner = audio_conditioner
assert not (exists(audio_conditioner) and not transformer.has_condition), 'if conditioning on audio embeddings from mulan, transformer has_condition must be set to True'
self.unique_consecutive = unique_consecutive
self.pad_id = pad_id
self.semantic_cross_entropy_loss_weight = semantic_cross_entropy_loss_weight
self.num_coarse_quantizers = transformer.num_coarse_quantizers * codec.rq_groups
self.semantic_eos_id = transformer.semantic_eos_id
self.coarse_eos_id = transformer.coarse_eos_id
self.mask_prob = mask_prob
@property
def device(self):
return next(self.parameters()).device
@eval_decorator
@torch.inference_mode()
@beartype
def generate(
self,
*,
semantic_token_ids,
prime_wave: Optional[Tensor] = None,
prime_wave_input_sample_hz = None,
prime_coarse_token_ids: Optional[Tensor] = None,
text: Optional[List[str]] = None,
text_embeds = None,
max_time_steps = 512,
cond_scale = 3.,
filter_thres = 0.9,
temperature = 1.,
reconstruct_wave = False,
**kwargs
):
batch, device = semantic_token_ids.shape[0], self.device
semantic_token_ids = semantic_token_ids.to(device)
# initialize coarse token ids
# if a prime audio wave was supplied, then start off with appropriate acoustic tokens
assert not (exists(prime_wave) and exists(prime_coarse_token_ids)), 'you can either pass in the prime as a raw wave (codec required) or as preprocessed acoustic token ids'
if exists(prime_coarse_token_ids):
coarse_token_ids = prime_coarse_token_ids
elif exists(prime_wave):
assert exists(self.codec)
with torch.inference_mode():
self.codec.eval()
_, indices, _ = self.codec(
prime_wave,
return_encoded = True,
input_sample_hz = prime_wave_input_sample_hz
)
coarse_token_ids = indices[..., :self.num_coarse_quantizers]
coarse_token_ids = rearrange(coarse_token_ids, 'b ... -> b (...)')
else:
coarse_token_ids = torch.empty((batch, 0), device = device, dtype = torch.long)
# derive text embeddings if needed
has_text = exists(text) or exists(text_embeds)
assert not (self.transformer.has_condition ^ has_text)
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.transformer.embed_text(text, output_device = device)
if self.unique_consecutive:
semantic_token_ids = batch_unique_consecutive(semantic_token_ids, pad_value=self.pad_id)
# initialize
init_coarse_time_step = 0
sampled_coarse_token_ids = coarse_token_ids.clone()
for time_step in tqdm(range(init_coarse_time_step, max_time_steps), desc = 'generating coarse'):
for ind in range(self.num_coarse_quantizers):
just_finished_quantizer_step = (ind == 0 and time_step > 0)
_, coarse_logits = self.transformer.forward_with_cond_scale(
coarse_token_ids = sampled_coarse_token_ids,
semantic_token_ids = semantic_token_ids,
text_embeds = text_embeds,
cond_scale = cond_scale,
return_only_coarse_logits = True,
**kwargs
)
last_coarse_logits = coarse_logits[:, -1]
if not just_finished_quantizer_step:
last_coarse_logits[:, -1] = float('-inf') # prevent from eos in the middle of a time step
filtered_logits = top_k(last_coarse_logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sampled = rearrange(sampled, 'b -> b 1')
sampled_coarse_token_ids = torch.cat((sampled_coarse_token_ids, sampled), dim = -1)
sampled_coarse_token_ids = mask_out_after_eos_id(sampled_coarse_token_ids, self.coarse_eos_id, keep_eos = False)
sampled_coarse_token_ids = rearrange(sampled_coarse_token_ids, 'b (n q) -> b n q', q = self.num_coarse_quantizers)
if not reconstruct_wave:
return sampled_coarse_token_ids
assert exists(self.codec)
wav = self.codec.decode_from_codebook_indices(sampled_coarse_token_ids)
return rearrange(wav, 'b 1 n -> b n')
def forward(
self,
*,
semantic_token_ids = None,
raw_wave = None,
raw_wave_for_codec = None,
text = None,
text_embeds = None,
coarse_token_ids = None,
return_loss = False,
**kwargs
):
assert exists(raw_wave) or exists(semantic_token_ids), 'either raw waveform (raw_wave) is given or semantic token ids are given (semantic_token_ids)'
raw_wave_for_codec = default(raw_wave_for_codec, raw_wave)
assert exists(raw_wave_for_codec) or exists(coarse_token_ids), 'either raw waveform (raw_wav) is given, or coarse and fine token ids (coarse_token_ids, fine_token_ids)'
assert not all(map(exists, (raw_wave, raw_wave_for_codec, semantic_token_ids, coarse_token_ids)))
if exists(self.audio_conditioner):
assert exists(raw_wave)
assert not exists(text) and not exists(text_embeds)
text_embeds = self.audio_conditioner(wavs = raw_wave, namespace = 'coarse') # technically audio embeds, but shared text-audio joint embedding space for mulan
if not exists(semantic_token_ids):
assert exists(self.wav2vec), 'VQWav2Vec must be be provided if given raw wave for training'
semantic_token_ids = self.wav2vec(raw_wave, flatten = False)
if not exists(coarse_token_ids):
assert exists(self.codec), 'Codec must be provided if given raw wave for training'
with torch.inference_mode():
self.codec.eval()
_, indices, _ = self.codec(raw_wave_for_codec, return_encoded = True)
batch, num_timesteps = raw_wave_for_codec.shape
num_frames = int(num_timesteps / self.codec.seq_len_multiple_of)
assert indices.shape[0] == batch and indices.shape[1] == num_frames, \
f'Expected indices to have shape (batch, num_frames, num_coarse_quantizers + num_fine_quantizers), but got {indices.shape}'
coarse_token_ids = indices[..., :self.num_coarse_quantizers]
semantic_token_ids = rearrange(semantic_token_ids, 'b ... -> b (...)')
coarse_token_ids = rearrange(coarse_token_ids, 'b ... -> b (...)')
if self.training:
semantic_token_ids = append_eos_id(semantic_token_ids, self.transformer.semantic_eos_id)
coarse_token_ids = append_eos_id(coarse_token_ids, self.transformer.coarse_eos_id)
if self.unique_consecutive:
semantic_token_ids = batch_unique_consecutive(semantic_token_ids, pad_value = self.pad_id)
if return_loss:
semantic_labels, coarse_labels = semantic_token_ids, coarse_token_ids.clone()
coarse_token_ids = coarse_token_ids[:, :-1]
# self attention mask would omit any padding and eos tokens in the semantic prime
self_attn_mask = (semantic_token_ids != self.pad_id) & (semantic_token_ids != self.semantic_eos_id)
semantic_token_ids = semantic_token_ids.masked_fill(~self_attn_mask, 0)
coarse_token_len = coarse_token_ids.shape[-1]
self_attn_mask = F.pad(self_attn_mask, (1, coarse_token_len + 1), value = True) # attend to semantic bos and all coarse tokens
# forgetful causal mask - structured dropout
if self.mask_prob > 0 and self.training:
self_attn_mask &= generate_mask_with_prob(self_attn_mask.shape, self.mask_prob, device = self_attn_mask.device)
semantic_logits, coarse_logits = self.transformer(
semantic_token_ids = semantic_token_ids,
coarse_token_ids = coarse_token_ids,
self_attn_mask = self_attn_mask,
text = text,
text_embeds = text_embeds,
**kwargs
)
# whether to early return the logits
if not return_loss:
return semantic_logits, coarse_logits
coarse_logits, semantic_logits = map(lambda t: maybe(rearrange)(t, 'b n c -> b c n'), (coarse_logits, semantic_logits))
if self.unique_consecutive:
num_coarse_logits, _num_semantic_logits = coarse_labels.numel(), (semantic_labels != self.pad_id).sum()
else:
num_coarse_logits, _num_semantic_logits = coarse_logits.shape[-1], semantic_logits.shape[-1]
semantic_loss = 0.
num_semantic_logits = 0
if self.semantic_cross_entropy_loss_weight > 0 and exists(semantic_logits):
num_semantic_logits = _num_semantic_logits
semantic_loss = F.cross_entropy(
semantic_logits,
semantic_labels,
ignore_index = self.pad_id
)
coarse_loss = F.cross_entropy(
coarse_logits,
coarse_labels,
ignore_index = self.pad_id
)
return (
semantic_loss * num_semantic_logits * self.semantic_cross_entropy_loss_weight +
coarse_loss * num_coarse_logits
) / (num_semantic_logits + num_coarse_logits)
class FineTransformerWrapper(nn.Module):
@beartype
def __init__(
self,
*,
transformer: FineTransformer,
codec: Optional[Union[SoundStream, EncodecWrapper]] = None,
audio_conditioner: Optional[AudioConditionerBase] = None,
coarse_cross_entropy_loss_weight = 1.,
pad_id = -1,
mask_prob = 0.15
):
super().__init__()
self.codec = codec
self.transformer = transformer
self.to(transformer.device)
self.audio_conditioner = audio_conditioner
assert not (exists(audio_conditioner) and not transformer.has_condition), 'if conditioning on audio embeddings from mulan, transformer has_condition must be set to True'
self.num_fine_quantizers = transformer.num_fine_quantizers * codec.rq_groups
self.num_coarse_quantizers = transformer.num_coarse_quantizers * codec.rq_groups
if exists(codec):
assert (self.num_fine_quantizers + self.num_coarse_quantizers) == (codec.num_quantizers * codec.rq_groups), 'number of fine and coarse quantizers on fine transformer must add up to total number of quantizers on codec'
self.eos_id = transformer.eos_id
assert self.num_coarse_quantizers > 0
self.pad_id = pad_id
self.coarse_cross_entropy_loss_weight = coarse_cross_entropy_loss_weight
self.mask_prob = mask_prob
@property
def device(self):
return next(self.parameters()).device
@eval_decorator
@torch.inference_mode()
@beartype
def generate(
self,
*,
coarse_token_ids,
prime_wave: Optional[Tensor] = None,
prime_wave_input_sample_hz = None,
prime_fine_token_ids: Optional[Tensor] = None,
text: Optional[List[str]] = None,
text_embeds = None,
cond_scale = 3.,
filter_thres = 0.9,
temperature = 1.,
reconstruct_wave = False,
mask_out_generated_fine_tokens = False,
**kwargs
):
coarse_token_ids = rearrange(coarse_token_ids, 'b ... -> b (...)')
batch, device = coarse_token_ids.shape[0], self.device
coarse_token_ids = coarse_token_ids.to(device)
# derive text embeddings if needed
has_text = exists(text) or exists(text_embeds)
assert not (self.transformer.has_condition ^ has_text)
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.transformer.embed_text(text, output_device = device)
# initialize fine token ids
# if a prime wave was supplied, start off with fine acoustic tokens
assert not (exists(prime_wave) and exists(prime_fine_token_ids)), 'you can either pass in the prime as a raw wave (codec required) or as preprocessed acoustic token ids'
if exists(prime_fine_token_ids):
fine_token_ids = prime_fine_token_ids
elif exists(prime_wave):
assert exists(self.codec)
with torch.inference_mode():
self.codec.eval()
_, token_ids, _ = self.codec(
prime_wave,
return_encoded = True,
input_sample_hz = prime_wave_input_sample_hz
)
fine_token_ids = token_ids[..., self.num_coarse_quantizers:]
fine_token_ids = rearrange(fine_token_ids, 'b ... -> b (...)')
else:
fine_token_ids = torch.empty((batch, 0), device = device, dtype = torch.long)
# calculate number of sampling steps
init_fine_time_step = fine_token_ids.shape[-1] // self.num_fine_quantizers
max_time_steps = coarse_token_ids.shape[1] // self.num_coarse_quantizers
sampled_fine_token_ids = fine_token_ids.clone()
for time_step in tqdm(range(init_fine_time_step, max_time_steps), desc = 'generating fine'):
for ind in range(self.num_fine_quantizers):
just_finished_quantizer_step = (ind == 0 and time_step > 0)
_, fine_logits = self.transformer.forward_with_cond_scale(
coarse_token_ids = coarse_token_ids,
fine_token_ids = sampled_fine_token_ids,
text_embeds = text_embeds,
cond_scale = cond_scale,
return_only_fine_logits = True,
**kwargs
)
last_fine_logits = fine_logits[:, -1]
if not just_finished_quantizer_step:
last_fine_logits[:, -1] = float('-inf') # prevent from eos in the middle of a time step
filtered_logits = top_k(last_fine_logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sampled = rearrange(sampled, 'b -> b 1')
sampled_fine_token_ids = torch.cat((sampled_fine_token_ids, sampled), dim = -1)
sampled_fine_token_ids = mask_out_after_eos_id(sampled_fine_token_ids, self.eos_id, keep_eos = False)
# reshape coarse and fine tokens for quantization dimension
sampled_fine_token_ids = rearrange(sampled_fine_token_ids, 'b (n q) -> b n q', q = self.num_fine_quantizers)
coarse_token_ids = rearrange(coarse_token_ids, 'b (n q) -> b n q', q = self.num_coarse_quantizers)
# whether to mask out fine token positions where the coarse token ids are all padding (variable lengthed training)
if mask_out_generated_fine_tokens:
pos_is_all_padding = (coarse_token_ids == self.pad_id).all(dim = -1, keepdim = True)
sampled_fine_token_ids = sampled_fine_token_ids.masked_fill(pos_is_all_padding, self.pad_id)
# if not reconstructing wave, return just the fine token ids
if not reconstruct_wave:
return sampled_fine_token_ids
# reconstruct the wave using codec, concatting the fine and coarse token ids together first across quantization dimension
assert exists(self.codec)
coarse_and_fine_ids = torch.cat((coarse_token_ids, sampled_fine_token_ids), dim = -1)
wav = self.codec.decode_from_codebook_indices(coarse_and_fine_ids)
return rearrange(wav, 'b 1 n -> b n')
def forward(
self,
*,
raw_wave = None,
text = None,
text_embeds = None,
token_ids = None,
coarse_token_ids = None,
fine_token_ids = None,
return_loss = False,
**kwargs
):
assert exists(raw_wave) ^ (exists(token_ids) ^ (exists(coarse_token_ids) and exists(fine_token_ids))), 'either raw waveform (raw_wav) is given, or coarse and fine token ids (coarse_token_ids, fine_token_ids)'
if exists(self.audio_conditioner):
assert exists(raw_wave)
assert not exists(text) and not exists(text_embeds)
text_embeds = self.audio_conditioner(wavs = raw_wave, namespace = 'fine') # technically audio embeds, but shared text-audio joint embedding space for mulan
if exists(raw_wave):
assert exists(self.codec), 'Codec must be provided if given raw wave for training'
with torch.inference_mode():
self.codec.eval()
_, token_ids, _ = self.codec(raw_wave, return_encoded = True)
batch, num_timesteps = raw_wave.shape
num_frames = int(num_timesteps / self.codec.seq_len_multiple_of)
assert token_ids.shape == torch.Size((batch, num_frames, self.num_coarse_quantizers + self.num_fine_quantizers)), \
f'Expected token ids to have shape (batch, num_frames, num_coarse_quantizers + num_fine_quantizers), but got {token_ids.shape}'
if exists(token_ids):
coarse_token_ids, fine_token_ids = token_ids[..., :self.num_coarse_quantizers], token_ids[..., self.num_coarse_quantizers:]
coarse_token_ids = rearrange(coarse_token_ids, 'b ... -> b (...)')
fine_token_ids = rearrange(fine_token_ids, 'b ... -> b (...)')
# if training, determine labels, should remove one from fine token ids
if return_loss:
coarse_labels = coarse_token_ids
fine_labels = fine_token_ids
fine_token_ids = fine_token_ids[:, :-1]
# forgetful causal mask - structured dropout
self_attn_mask = None
if self.mask_prob > 0 and self.training:
mask_shape = (
coarse_token_ids.shape[0],
coarse_token_ids.shape[-1] + fine_token_ids.shape[-1] + 2
)
self_attn_mask = generate_mask_with_prob(mask_shape, self.mask_prob, device = self.device)
coarse_logits, fine_logits = self.transformer(
coarse_token_ids = coarse_token_ids,
fine_token_ids = fine_token_ids,
self_attn_mask = self_attn_mask,
text = text,
text_embeds = text_embeds,
**kwargs
)
# early return the logits
if not return_loss:
return coarse_logits, fine_logits
coarse_logits, fine_logits = map(lambda t: maybe(rearrange)(t, 'b n c -> b c n'), (coarse_logits, fine_logits))
num_fine_logits = fine_logits.shape[-1]
num_coarse_logits = 0
coarse_loss = 0.
if self.coarse_cross_entropy_loss_weight > 0 and exists(coarse_logits):
num_coarse_logits = coarse_logits.shape[-1]
coarse_loss = F.cross_entropy(
coarse_logits,
coarse_labels,
ignore_index = self.pad_id
)
fine_loss = F.cross_entropy(
fine_logits,
fine_labels,
ignore_index = self.pad_id
)
return (
coarse_loss * num_coarse_logits * self.coarse_cross_entropy_loss_weight +
fine_loss * num_fine_logits
) / (num_coarse_logits + num_fine_logits)
# audio LM
class AudioLM(nn.Module):
@beartype
def __init__(
self,
*,
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]],
codec: Union[SoundStream, EncodecWrapper],
semantic_transformer: SemanticTransformer,
coarse_transformer: CoarseTransformer,
fine_transformer: FineTransformer,
audio_conditioner: Optional[AudioConditionerBase] = None,
unique_consecutive = True
):
super().__init__()
self.audio_conditioner = audio_conditioner
assert semantic_transformer.num_semantic_tokens == coarse_transformer.num_semantic_tokens
assert coarse_transformer.codebook_size == fine_transformer.codebook_size
assert coarse_transformer.num_coarse_quantizers == fine_transformer.num_coarse_quantizers
assert (fine_transformer.num_coarse_quantizers + fine_transformer.num_fine_quantizers) == codec.num_quantizers
self.semantic_has_condition = semantic_transformer.has_condition
self.coarse_has_condition = coarse_transformer.has_condition
self.fine_has_condition = fine_transformer.has_condition
self.needs_text = any([self.semantic_has_condition, self.coarse_has_condition, self.fine_has_condition])
self.semantic = SemanticTransformerWrapper(
wav2vec = wav2vec,
transformer = semantic_transformer,
audio_conditioner = audio_conditioner,
unique_consecutive = unique_consecutive
)
self.coarse = CoarseTransformerWrapper(
wav2vec = wav2vec,
codec = codec,
transformer = coarse_transformer,
audio_conditioner = audio_conditioner,
unique_consecutive = unique_consecutive
)
self.fine = FineTransformerWrapper(
codec= codec,
transformer = fine_transformer,
audio_conditioner = audio_conditioner
)
@property
def device(self):
return next(self.parameters()).device
@eval_decorator
@torch.inference_mode()
def forward(
self,
*,
batch_size = 1,
text: Optional[List[str]] = None,
text_embeds: Optional[Tensor] = None,
prime_wave = None,
prime_wave_input_sample_hz = None,
prime_wave_path = None,
max_length = 2048,
return_coarse_generated_wave = False,
mask_out_generated_fine_tokens = False
):
assert not (self.needs_text and (not exists(text) and not exists(text_embeds))), 'text needs to be passed in if one of the transformer requires conditioning'
if self.needs_text:
if exists(text):
text_embeds = self.semantic.embed_text(text)
assert not (exists(prime_wave) and exists(prime_wave_path)), 'prompt audio must be given as either `prime_wave: Tensor` or `prime_wave_path: str`'
if exists(prime_wave):
assert exists(prime_wave_input_sample_hz), 'the input sample frequency for the prompt audio must be given as `prime_wave_input_sample_hz: int`'
prime_wave = prime_wave.to(self.device)
elif exists(prime_wave_path):
prime_wave_path = Path(prime_wave_path)
assert exists(prime_wave_path), f'file does not exist at {str(prime_wave_path)}'
prime_wave, prime_wave_input_sample_hz = torchaudio.load(str(prime_wave_path))
prime_wave = prime_wave.to(self.device)
semantic_token_ids = self.semantic.generate(
text_embeds = text_embeds if self.semantic_has_condition else None,
batch_size = batch_size,
prime_wave = prime_wave,
prime_wave_input_sample_hz = prime_wave_input_sample_hz,
max_length = max_length
)
coarse_token_ids_or_recon_wave = self.coarse.generate(
text_embeds = text_embeds if self.coarse_has_condition else None,
semantic_token_ids = semantic_token_ids,
prime_wave = prime_wave,
prime_wave_input_sample_hz = prime_wave_input_sample_hz,
reconstruct_wave = return_coarse_generated_wave
)
if return_coarse_generated_wave:
return coarse_token_ids_or_recon_wave
generated_wave = self.fine.generate(
text_embeds = text_embeds if self.fine_has_condition else None,
coarse_token_ids = coarse_token_ids_or_recon_wave,
prime_wave = prime_wave,
prime_wave_input_sample_hz = prime_wave_input_sample_hz,
reconstruct_wave = True,
mask_out_generated_fine_tokens = mask_out_generated_fine_tokens
)
return generated_wave
|
SemanticTransformer,
SemanticTransformerWrapper,
CoarseTransformer,
CoarseTransformerWrapper,
FineTransformer,
FineTransformerWrapper,
FairseqVQWav2Vec,
HubertWithKmeans
)
# constants
DEFAULT_SAMPLE_RATE = 16000
# make sure only one trainer is instantiated
ONE_TRAINER_INSTANTIATED = False
def check_one_trainer():
global ONE_TRAINER_INSTANTIATED
assert not ONE_TRAINER_INSTANTIATED, 'only one Trainer can be instantiated at a time for training'
ONE_TRAINER_INSTANTIATED = True
# for automatically routing data emitted from a dataset to keywords of the transformer wrappers
DATASET_FIELD_TYPE_CONFIG = dict(
raw_wave = Annotated[
torch.Tensor,
Is[lambda t: t.dtype == torch.float and t.ndim in {2, 3}]
],
text = List[str],
text_embeds = Annotated[
torch.Tensor,
Is[lambda t: t.dtype == torch.float and t.ndim == 3]
],
)
# helpers
def exists(val):
return val is not None
def noop(*args, **kwargs):
pass
def cycle(dl):
while True:
for data in dl:
yield data
def cast_tuple(t):
return t if isinstance(t, (tuple, list)) else (t,)
def yes_or_no(question):
answer = input(f'{question} (y/n) ')
return answer.lower() in ('yes', 'y')
def accum_log(log, new_logs):
for key, new_value in new_logs.items():
old_value = log.get(key, 0.)
log[key] = old_value + new_value
return log
# auto data to module keyword argument routing functions
def has_duplicates(tup):
counts = dict(Counter(tup))
return any(filter(lambda count: count > 1, counts.values()))
def determine_types(data, config):
output = []
for el in data:
for name, data_type in config.items():
if is_bearable(el, data_type):
output.append(name)
break
else:
raise TypeError(f'unable to determine type of {data}')
return tuple(output)
def checkpoint_num_steps(checkpoint_path):
"""Returns the number of steps trained from a checkpoint based on the filename.
Filename format assumed to be something like "/path/to/semantic.transformer.20000.pt" which is
for 20k train steps. Returns 20000 in that case.
"""
results = re.findall(r'\d+', str(checkpoint_path))
if len(results) == 0:
return 0
return int(results[-1])
# main trainer class
class SoundStreamTrainer(nn.Module):
@beartype
def __init__(
self,
soundstream: SoundStream,
*,
num_train_steps: int,
batch_size: int,
data_max_length: int = None,
data_max_length_seconds: Union[int, float] = None,
folder: str = None,
train_dataloader: DataLoader = None,
val_dataloader: DataLoader = None,
lr: float = 2e-4,
grad_accum_every: int = 4,
wd: float = 0.,
max_grad_norm: float = 0.5,
discr_max_grad_norm: float = None,
save_results_every: int = 100,
save_model_every: int= 1000,
log_losses_every: int= 1,
results_folder: str = './results',
valid_frac: float = 0.05,
random_split_seed: int = 42,
use_ema: bool = True,
ema_beta: float = 0.995,
ema_update_after_step: int = 500,
ema_update_every: int = 10,
apply_grad_penalty_every: int = 4,
dl_num_workers: int = 0,
accelerator: Accelerator = None,
accelerate_kwargs: dict = dict(),
dataloader_drop_last = True,
split_batches = False,
use_lion: bool = False,
force_clear_prev_results: bool = None # set to True | False to skip the prompt
):
"""
Initialize with a SoundStream instance and either a folder containing audio data or
train/val DataLoader instances.
"""
super().__init__()
check_one_trainer()
if accelerator:
self.accelerator = accelerator
assert len(accelerate_kwargs) == 0
else:
kwargs = DistributedDataParallelKwargs(find_unused_parameters = True)
self.accelerator = Accelerator(
kwargs_handlers = [kwargs],
split_batches = split_batches,
**accelerate_kwargs
)
self.soundstream = soundstream
self.use_ema = use_ema
if self.use_ema:
self.ema_soundstream = EMA(soundstream, beta = ema_beta, update_after_step = ema_update_after_step, update_every = ema_update_every)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
hyperparameters = {
"num_train_steps": num_train_steps,
"batch_size": batch_size,
"gradient_accum_every": grad_accum_every,
"learning_rate": lr,
"target_sample_hz": soundstream.target_sample_hz,
}
# optimizers
self.optim = get_optimizer(soundstream.non_discr_parameters(), lr = lr, wd = wd)
for discr_optimizer_key, discr in self.multiscale_discriminator_iter():
one_multiscale_discr_optimizer = get_optimizer(discr.parameters(), lr = lr, wd = wd)
setattr(self, discr_optimizer_key, one_multiscale_discr_optimizer)
self.discr_optim = get_optimizer(soundstream.stft_discriminator.parameters(), lr = lr, wd = wd, use_lion = use_lion)
# max grad norm
self.max_grad_norm = max_grad_norm
self.discr_max_grad_norm = discr_max_grad_norm
if folder is None:
assert train_dataloader is not None
assert val_dataloader is not None
self.dl = train_dataloader
self.valid_dl = val_dataloader
else:
assert train_dataloader is None
assert val_dataloader is None
# create dataset
if exists(data_max_length_seconds):
assert not exists(data_max_length)
data_max_length = int(data_max_length_seconds * soundstream.target_sample_hz)
else:
assert exists(data_max_length)
hyperparameters['data_max_length'] = data_max_length
self.ds = SoundDataset(
folder,
max_length = data_max_length,
target_sample_hz = soundstream.target_sample_hz,
seq_len_multiple_of = soundstream.seq_len_multiple_of
)
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, num_workers = dl_num_workers, shuffle = True, drop_last = dataloader_drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, num_workers = dl_num_workers, shuffle = True, drop_last = dataloader_drop_last)
# prepare with accelerator
(
self.soundstream,
self.optim,
self.discr_optim,
self.dl
) = self.accelerator.prepare(
self.soundstream,
self.optim,
self.discr_optim,
self.dl
)
# prepare the multiscale discriminators with accelerator
for name, _ in self.multiscale_discriminator_iter():
optimizer = getattr(self, name)
optimizer = self.accelerator.prepare(optimizer)
setattr(self, name, optimizer)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.log_losses_every = log_losses_every
self.apply_grad_penalty_every = apply_grad_penalty_every
self.results_folder = Path(results_folder)
if self.is_main and force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
# Initialize experiment trackers if an external Accelerator is not passed in
if not accelerator:
self.accelerator.init_trackers("soundstream", config=hyperparameters)
assert self.accelerator.distributed_type != DistributedType.FSDP, 'FSDP not supported for soundstream trainer due to complex-valued stft discriminator'
def set_model_as_ema_model_(self):
""" this will force the main 'online' model to have same parameters as the exponentially moving averaged model """
assert self.use_ema
self.ema_soundstream.ema_model.load_state_dict(self.soundstream.state_dict())
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.soundstream),
optim = self.optim.state_dict(),
config = self.unwrapped_soundstream._configs,
discr_optim = self.discr_optim.state_dict(),
version = __version__
)
if self.use_ema:
pkg['ema_model'] = self.ema_soundstream.state_dict()
for key, _ in self.multiscale_discriminator_iter():
discr_optim = getattr(self, key)
pkg[key] = discr_optim.state_dict()
torch.save(pkg, path)
@property
def unwrapped_soundstream(self):
return self.accelerator.unwrap_model(self.soundstream)
def load(self, path):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = 'cpu')
# if loading from old version, make a hacky guess
if len(pkg.keys()) > 20:
self.unwrapped_soundstream.load_state_dict(pkg)
if self.use_ema:
self.ema_soundstream.ema_model.load_state_dict(pkg)
return
# check version
if 'version' in pkg and version.parse(pkg['version']) < version.parse(__version__):
print(f'model was trained on older version {pkg["version"]} of audiolm-pytorch')
# otherwise load things normally
self.unwrapped_soundstream.load_state_dict(pkg['model'])
if self.use_ema:
assert 'ema_model' in pkg
self.ema_soundstream.load_state_dict(pkg['ema_model'])
self.optim.load_state_dict(pkg['optim'])
self.discr_optim.load_state_dict(pkg['discr_optim'])
for key, _ in self.multiscale_discriminator_iter():
discr_optim = getattr(self, key)
discr_optim.load_state_dict(pkg[key])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def multiscale_discriminator_iter(self):
for ind, discr in enumerate(self.unwrapped_soundstream.discriminators):
yield f'multiscale_discr_optimizer_{ind}', discr
def multiscale_discriminator_optim_iter(self):
for name, _ in self.multiscale_discriminator_iter():
yield name, getattr(self, name)
def print(self, msg):
self.accelerator.print(msg)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def train_step(self):
device = self.device
steps = int(self.steps.item())
apply_grad_penalty = self.apply_grad_penalty_every > 0 and not (steps % self.apply_grad_penalty_every)
log_losses = self.log_losses_every > 0 and not (steps % self.log_losses_every)
self.soundstream.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
wave, = next(self.dl_iter)
wave = wave.to(device)
loss, (recon_loss, multi_spectral_recon_loss, adversarial_loss, feature_loss, all_commitment_loss) = self.soundstream(wave, return_loss_breakdown = True)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, dict(
loss = loss.item() / self.grad_accum_every,
recon_loss = recon_loss.item() / self.grad_accum_every,
))
if log_losses:
accum_log(logs, dict(
multi_spectral_recon_loss = multi_spectral_recon_loss.item() / self.grad_accum_every,
adversarial_loss = adversarial_loss.item() / self.grad_accum_every,
feature_loss = feature_loss.item() / self.grad_accum_every,
all_commitment_loss = all_commitment_loss.item() / self.grad_accum_every,
))
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.soundstream.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# update discriminator
self.discr_optim.zero_grad()
for name, multiscale_discr_optim in self.multiscale_discriminator_optim_iter():
multiscale_discr_optim.zero_grad()
for _ in range(self.grad_accum_every):
wave, = next(self.dl_iter)
wave = wave.to(device)
discr_losses = self.soundstream(
wave,
apply_grad_penalty = apply_grad_penalty,
return_discr_loss = True,
return_discr_losses_separately = True
)
for name, discr_loss in discr_losses:
self.accelerator.backward(discr_loss / self.grad_accum_every, retain_graph = True)
accum_log(logs, {name: discr_loss.item() / self.grad_accum_every})
if exists(self.discr_max_grad_norm):
self.accelerator.clip_grad_norm_(self.soundstream.stft_discriminator.parameters(), self.discr_max_grad_norm)
# gradient step for all discriminators
self.discr_optim.step()
for name, multiscale_discr_optim in self.multiscale_discriminator_optim_iter():
multiscale_discr_optim.step()
# build pretty printed losses
losses_str = f"{steps}: soundstream total loss: {logs['loss']:.3f}, soundstream recon loss: {logs['recon_loss']:.3f}"
if log_losses:
self.accelerator.log({
"total_loss": logs['loss'],
"recon_loss": logs['recon_loss'],
"multi_spectral_recon_loss": logs['multi_spectral_recon_loss'],
"adversarial_loss": logs['adversarial_loss'],
"feature_loss": logs['feature_loss'],
"all_commitment_loss": logs['all_commitment_loss'],
"stft_discr_loss": logs['stft']
}, step=steps)
for key, loss in logs.items():
if not key.startswith('scale:'):
continue
_, scale_factor = key.split(':')
losses_str += f" | discr (scale {scale_factor}) loss: {loss:.3f}"
if log_losses:
self.accelerator.log({f"discr_loss (scale {scale_factor})": loss}, step=steps)
# log
self.print(losses_str)
# update exponential moving averaged generator
self.accelerator.wait_for_everyone()
if self.is_main and self.use_ema:
self.ema_soundstream.update()
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
models = [(self.unwrapped_soundstream, str(steps))]
if self.use_ema:
models.append((self.ema_soundstream.ema_model if self.use_ema else self.unwrapped_soundstream, f'{steps}.ema'))
wave, = next(self.valid_dl_iter)
wave = wave.to(device)
for model, label in models:
model.eval()
with torch.inference_mode():
recons = model(wave, return_recons_only = True)
for ind, recon in enumerate(recons.unbind(dim = 0)):
filename = str(self.results_folder / f'sample_{label}.flac')
torchaudio.save(filename, recon.cpu().detach(), self.unwrapped_soundstream.target_sample_hz)
self.print(f'{steps}: saving to {str(self.results_folder)}')
# save model every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'soundstream.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
# semantic transformer trainer
class SemanticTransformerTrainer(nn.Module):
@beartype
def __init__(
self,
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]],
transformer: SemanticTransformer,
*,
num_train_steps,
batch_size,
audio_conditioner: Optional[AudioConditionerBase] = None,
dataset: Optional[Dataset] = None,
data_max_length = None,
data_max_length_seconds = None,
folder = None,
lr = 3e-4,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
split_batches = False,
drop_last = False,
force_clear_prev_results = None,
average_valid_loss_over_grad_accum_every: bool = True, # if False, valid loss on a single batch
):
super().__init__()
check_one_trainer()
self.accelerator = Accelerator(
split_batches = split_batches,
**accelerate_kwargs
)
self.wav2vec = wav2vec
self.transformer = transformer
self.audio_conditioner = audio_conditioner
self.train_wrapper = SemanticTransformerWrapper(
wav2vec = wav2vec,
transformer = transformer,
audio_conditioner = audio_conditioner
)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
# optimizers
self.optim = get_optimizer(transformer.parameters(), lr = lr, wd = wd)
# max grad norm
self.max_grad_norm = max_grad_norm
# create dataset
self.ds = dataset
if not exists(self.ds):
assert exists(folder), 'folder must be passed in, if not passing in a custom dataset for text conditioned audio synthesis training'
assert not (exists(data_max_length) and exists(data_max_length_seconds))
if exists(data_max_length_seconds):
data_max_length = data_max_length_seconds * wav2vec.target_sample_hz
self.ds = SoundDataset(
folder,
max_length = data_max_length,
target_sample_hz = wav2vec.target_sample_hz,
seq_len_multiple_of = wav2vec.seq_len_multiple_of
)
self.ds_fields = None
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
# prepare with accelerator
(
self.train_wrapper,
self.optim,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.train_wrapper,
self.optim,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.results_folder = Path(results_folder)
if self.is_main and force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
hps = {"num_train_steps": num_train_steps, "data_max_length": data_max_length, "learning_rate": lr}
self.accelerator.init_trackers("semantic", config=hps)
self.average_valid_loss_over_grad_accum_every = average_valid_loss_over_grad_accum_every
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.transformer),
optim = self.optim.state_dict(),
version = __version__
)
torch.save(pkg, path)
def load(self, path):
transformer = self.accelerator.unwrap_model(self.transformer)
pkg = transformer.load(path)
# trainer-specific things
self.optim.load_state_dict(pkg['optim'])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def print(self, msg):
self.accelerator.print(msg)
def generate(self, *args, **kwargs):
return self.train_wrapper.generate(*args, **kwargs)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def data_tuple_to_kwargs(self, data):
if not exists(self.ds_fields):
self.ds_fields = determine_types(data, DATASET_FIELD_TYPE_CONFIG)
assert not has_duplicates(self.ds_fields), 'dataset fields must not have duplicate field names'
return dict(zip(self.ds_fields, data))
def train_step(self):
device = self.device
steps = int(self.steps.item())
self.transformer.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
data_kwargs = self.data_tuple_to_kwargs(next(self.dl_iter))
loss = self.train_wrapper(**data_kwargs, return_loss = True)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.transformer.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
self.print(f"{steps}: loss: {logs['loss']}")
self.accelerator.log({"train_loss": logs['loss']}, step=steps)
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
valid_loss = 0
for _ in range(self.average_valid_loss_over_grad_accum_every):
data_kwargs = self.data_tuple_to_kwargs(next(self.valid_dl_iter))
with torch.inference_mode():
self.train_wrapper.eval()
valid_loss += self.train_wrapper(**data_kwargs, return_loss = True)
valid_loss = valid_loss.clone() # avoid inference mode to non-inference mode error
valid_loss /= self.average_valid_loss_over_grad_accum_every
self.print(f'{steps}: valid loss {valid_loss}')
self.accelerator.log({"valid_loss": valid_loss}, step=steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'semantic.transformer.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
# fine transformer trainer
class CoarseTransformerTrainer(nn.Module):
@beartype
def __init__(
self,
transformer: CoarseTransformer,
codec: Union[SoundStream, EncodecWrapper],
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]],
*,
num_train_steps,
batch_size,
audio_conditioner: Optional[AudioConditionerBase] = None,
dataset: Optional[Dataset] = None,
ds_fields: Tuple[str, ...] = ('raw_wave', 'raw_wave_for_codec', 'text'),
data_max_length = None,
data_max_length_seconds = None,
folder = None,
lr = 3e-4,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
split_batches = False,
drop_last = False,
force_clear_prev_results = None,
average_valid_loss_over_grad_accum_every: bool = True, # if False, valid loss on a single batch
):
super().__init__()
check_one_trainer()
self.accelerator = Accelerator(
split_batches = split_batches,
**accelerate_kwargs
)
self.transformer = transformer
self.codec = codec
self.wav2vec = wav2vec
self.audio_conditioner = audio_conditioner
self.train_wrapper = CoarseTransformerWrapper(
codec = codec,
wav2vec = wav2vec,
transformer = transformer,
audio_conditioner = audio_conditioner
)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
# optimizers
self.optim = get_optimizer(transformer.parameters(), lr = lr, wd = wd)
# max grad norm
self.max_grad_norm = max_grad_norm
# create dataset
self.ds = dataset
if not exists(self.ds):
assert exists(folder), 'folder must be passed in, if not passing in a custom dataset for text conditioned audio synthesis training'
assert not (exists(data_max_length) and exists(data_max_length_seconds))
if exists(data_max_length_seconds):
data_max_length = tuple(data_max_length_seconds * hz for hz in (wav2vec.target_sample_hz, codec.target_sample_hz))
self.ds = SoundDataset(
folder,
max_length = data_max_length,
target_sample_hz = (
wav2vec.target_sample_hz,
codec.target_sample_hz
), # need 2 waves resampled differently here
seq_len_multiple_of = codec.seq_len_multiple_of
)
self.ds_fields = ds_fields
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
# prepare with accelerator
(
self.transformer,
self.optim,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.transformer,
self.optim,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.results_folder = Path(results_folder)
if self.is_main and force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
hps = {"num_train_steps": num_train_steps, "data_max_length": data_max_length, "learning_rate": lr}
self.accelerator.init_trackers("coarse", config=hps)
self.train_wrapper.to(self.device)
self.average_valid_loss_over_grad_accum_every = average_valid_loss_over_grad_accum_every
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.transformer),
optim = self.optim.state_dict(),
version = __version__
)
torch.save(pkg, path)
def load(self, path):
transformer = self.accelerator.unwrap_model(self.transformer)
pkg = transformer.load(path)
# trainer-specific things
self.optim.load_state_dict(pkg['optim'])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def print(self, msg):
self.accelerator.print(msg)
def generate(self, *args, **kwargs):
return self.train_wrapper.generate(*args, **kwargs)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def train_step(self):
device = self.device
steps = int(self.steps.item())
self.transformer.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
data_kwargs = dict(zip(self.ds_fields, next(self.dl_iter)))
loss = self.train_wrapper(
**data_kwargs,
return_loss = True
)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.transformer.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
self.print(f"{steps}: loss: {logs['loss']}")
self.accelerator.log({"train_loss": logs['loss']}, step=steps)
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
valid_loss = 0
for i in range(self.average_valid_loss_over_grad_accum_every):
data_kwargs = dict(zip(self.ds_fields, next(self.valid_dl_iter)))
with torch.inference_mode():
self.train_wrapper.eval()
valid_loss += self.train_wrapper(
**data_kwargs,
return_loss = True
)
valid_loss = valid_loss.clone() # avoid inference mode to non-inference mode error
valid_loss /= self.average_valid_loss_over_grad_accum_every
self.print(f'{steps}: valid loss {valid_loss}')
self.accelerator.log({"valid_loss": valid_loss}, step=steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'coarse.transformer.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
# fine transformer trainer
class FineTransformerTrainer(nn.Module):
@beartype
def __init__(
self,
transformer: FineTransformer,
codec: Union[SoundStream, EncodecWrapper],
*,
num_train_steps,
batch_size,
audio_conditioner: Optional[AudioConditionerBase] = None,
dataset: Optional[Dataset] = None,
data_max_length = None,
data_max_length_seconds = None,
dataset_normalize = False,
folder = None,
lr = 3e-4,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
split_batches = False,
drop_last = False,
force_clear_prev_results = None,
average_valid_loss_over_grad_accum_every: bool = True, # if False, valid loss on a single batch
):
super().__init__()
check_one_trainer()
self.accelerator = Accelerator(
split_batches = split_batches,
**accelerate_kwargs
)
self.transformer = transformer
self.codec = codec
self.audio_conditioner = audio_conditioner
self.train_wrapper = FineTransformerWrapper(
codec = codec,
transformer = transformer,
audio_conditioner = audio_conditioner
)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
# optimizers
self.optim = get_optimizer(transformer.parameters(), lr = lr, wd = wd)
# max grad norm
self.max_grad_norm = max_grad_norm
# create dataset
self.ds = dataset
if not exists(self.ds):
assert exists(folder), 'folder must be passed in, if not passing in a custom dataset for text conditioned audio synthesis training'
assert not (exists(data_max_length) and exists(data_max_length_seconds))
if exists(data_max_length_seconds):
data_max_length = data_max_length_seconds * codec.target_sample_hz
self.ds = SoundDataset(
folder,
max_length = data_max_length,
target_sample_hz = codec.target_sample_hz,
seq_len_multiple_of = codec.seq_len_multiple_of
)
self.ds_fields = None
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
# prepare with accelerator
(
self.transformer,
self.optim,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.transformer,
self.optim,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.results_folder = Path(results_folder)
if force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
hps = {"num_train_steps": num_train_steps, "data_max_length": data_max_length, "learning_rate": lr}
self.accelerator.init_trackers("fine", config=hps)
self.train_wrapper.to(self.device)
self.average_valid_loss_over_grad_accum_every = average_valid_loss_over_grad_accum_every
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.transformer),
optim = self.optim.state_dict(),
version = __version__
)
torch.save(pkg, path)
def load(self, path):
transformer = self.accelerator.unwrap_model(self.transformer)
pkg = transformer.load(path)
# trainer-specific things
self.optim.load_state_dict(pkg['optim'])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def print(self, msg):
self.accelerator.print(msg)
def generate(self, *args, **kwargs):
return self.train_wrapper.generate(*args, **kwargs)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def data_tuple_to_kwargs(self, data):
if not exists(self.ds_fields):
self.ds_fields = determine_types(data, DATASET_FIELD_TYPE_CONFIG)
assert not has_duplicates(self.ds_fields), 'dataset fields must not have duplicate field names'
return dict(zip(self.ds_fields, data))
def train_step(self):
device = self.device
steps = int(self.steps.item())
self.transformer.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
data_kwargs = self.data_tuple_to_kwargs(next(self.dl_iter))
loss = self.train_wrapper(**data_kwargs, return_loss = True)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.transformer.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
self.print(f"{steps}: loss: {logs['loss']}")
self.accelerator.log({"train_loss": logs['loss']}, step=steps)
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
valid_loss = 0
for i in range(self.average_valid_loss_over_grad_accum_every):
data_kwargs = self.data_tuple_to_kwargs(next(self.valid_dl_iter))
with torch.inference_mode():
self.train_wrapper.eval()
valid_loss += self.train_wrapper(**data_kwargs, return_loss = True)
valid_loss = valid_loss.clone() # avoid inference mode to non-inference mode error
valid_loss /= self.average_valid_loss_over_grad_accum_every
self.print(f'{steps}: valid loss {valid_loss}')
self.accelerator.log({"valid_loss": valid_loss}, step=steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'fine.transformer.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
|
# helper functions
def exists(val):
return val is not None
# hacky way to get num quantizers
def get_num_quantizers(model: EncodecModel, audio_length = 512):
out = model.encode(torch.randn(1, 1, audio_length))
return out[0][0].shape[1]
class EncodecWrapper(nn.Module):
"""
Support pretrained 24kHz Encodec by Meta AI, if you want to skip training SoundStream.
TODO:
- see if we need to keep the scaled version and somehow persist the scale factors for when we need to decode? Right
now I'm just setting self.model.normalize = False to sidestep all of that
- see if we can use the 48kHz model, which is specifically for music. Right now we're using the 24kHz model because
that's what was used in MusicLM and avoids any resampling issues.
-
"""
def __init__(
self,
target_sample_hz = 24000,
strides = (2, 4, 5, 8),
num_quantizers = 8,
bandwidth = 6.0
):
super().__init__()
# Instantiate a pretrained EnCodec model
self.model = EncodecModel.encodec_model_24khz()
self.model.normalize = False # this means we don't need to scale codes e.g. when running model.encode(wav)
# The number of codebooks used will be determined bythe bandwidth selected.
# E.g. for a bandwidth of 6kbps, `n_q = 8` codebooks are used.
# Supported bandwidths are 1.5kbps (n_q = 2), 3 kbps (n_q = 4), 6 kbps (n_q = 8) and 12 kbps (n_q =16) and 24kbps (n_q=32).
# For the 48 kHz model, only 3, 6, 12, and 24 kbps are supported. The number
# of codebooks for each is half that of the 24 kHz model as the frame rate is twice as much.
# bandwidth affects num quantizers used: https://github.com/facebookresearch/encodec/pull/41
self.model.set_target_bandwidth(bandwidth)
num_quantizers = get_num_quantizers(self.model)
# Fields that SoundStream has that get used externally. We replicate them here.
self.target_sample_hz = target_sample_hz
assert self.target_sample_hz == 24000, "haven't done anything with non-24kHz yet"
self.codebook_dim = 128
self.rq_groups = 1
self.num_quantizers = num_quantizers
self.strides = strides # used in seq_len_multiple_of
# cross entropy loss to indices passed in on l2 distance logits introduced in vector-quantize-pytorch 1.2.2
self.rq = ResidualVQ(
dim = 128,
codebook_size = 1024,
num_quantizers = num_quantizers
)
# copy codebook over to ResidualVQ for cross entropy loss logic from naturalspeech2
# luckily, it seems Meta AI basically used my ResidualVQ code verbatim. makes porting it over easy
for encodec_rq_layer, rq_layer in zip(self.model.quantizer.vq.layers, self.rq.layers):
encodec_codebook = dict(encodec_rq_layer._codebook.named_buffers()).get('embed')
vq_codebook = dict(rq_layer._codebook.named_buffers()).get('embed')
encodec_codebook = rearrange(encodec_codebook, '... -> 1 ...')
vq_codebook.copy_(encodec_codebook)
@property
def seq_len_multiple_of(self):
return reduce(lambda x, y: x * y, self.strides)
@property
def downsample_factor(self):
return self.seq_len_multiple_of
def forward(
self,
x,
input_sample_hz = None,
return_encoded = False,
**kwargs
):
x, ps = pack([x], '* n')
if exists(input_sample_hz):
x = resample(x, input_sample_hz, self.target_sample_hz)
# kwargs for stuff like return_encoded=True, which SoundStream uses but Encodec doesn't
assert not self.model.training, "Encodec is pretrained and should never be called outside eval mode."
# Unlike in the Encodec sample code in its README, x has already been resampled so we don't need to call
# convert_audio and unsqueeze. The convert_audio function also doesn't play nicely with batches.
# b = batch, t = timesteps, 1 channel for the 24kHz model, 2 channels for the 48kHz model
wav = rearrange(x, f'b t -> b {self.model.channels} t')
# Extract discrete codes from EnCodec
with torch.inference_mode():
encoded_frames = self.model.encode(wav)
# encoded_frames is a list of (frame, scale) tuples. Scale is a scalar but we don't use it. Frame is a tensor
# of shape [batch, num_quantizers, num_samples_per_frame]. We want to concatenate the frames to get all the
# timesteps concatenated.
codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1) # [batch, num_quantizers, timesteps]
# transformer code that uses codec expects codes to be [batch, timesteps, num_quantizers]
codes = rearrange(codes, 'b q n -> b n q') # result: [batch, timesteps, num_quantizers]
# in original soundstream, is x, indices, commit_loss. But we only use indices in eval mode, so just keep that.
# allow for returning of sum of quantized embeddings
emb = None
if return_encoded:
emb = self.get_emb_from_indices(codes)
emb, = unpack(emb, ps, '* n c')
codes, = unpack(codes, ps, '* n q')
return emb, codes, None
def decode_from_codebook_indices(self, quantized_indices):
# Input: batch x num tokens x num quantizers
# Output: batch x 1 x num samples
assert self.model.sample_rate == 24000,\
"if changing to 48kHz, that model segments its audio into lengths of 1.0 second with 1% overlap, whereas " \
"the 24kHz doesn't segment at all. this means the frame decode logic might change; this is a reminder to " \
"double check that."
# Since 24kHz pretrained doesn't do any segmenting, we have all the frames already (1 frame = 1 token in quantized_indices)
# The following code is hacked in from self.model.decode() (Encodec version 0.1.1) where we skip the part about
# scaling.
# Shape: 1 x (num_frames * stride product). 1 because we have 1 frame (because no segmenting)
frames = self._decode_frame(quantized_indices)
result = _linear_overlap_add(frames, self.model.segment_stride or 1)
# TODO: I'm not overly pleased with this because when this function gets called, we just rearrange the result
# back to b n anyways, but we'll keep this as a temporary hack just to make things work for now
return rearrange(result, 'b n -> b 1 n')
def get_emb_from_indices(self, indices):
codes = rearrange(indices, 'b t q -> q b t')
emb = self.model.quantizer.decode(codes)
return rearrange(emb, 'b c n -> b n c')
def decode(self, emb):
emb = rearrange(emb, 'b n c -> b c n')
return self.model.decoder(emb)
def _decode_frame(self, quantized_indices):
# The following code is hacked in from self.model._decode_frame() (Encodec version 0.1.1) where we assume we've
# already unwrapped the EncodedFrame
# Input: batch x num tokens x num quantizers
# Output: batch x new_num_samples, where new_num_samples is num_frames * stride product (may be slightly
# larger than original num samples as a result, because the last frame might not be "fully filled" with samples
# if num_samples doesn't divide perfectly).
# num_frames == the number of acoustic tokens you have, one token per frame
codes = rearrange(quantized_indices, 'b t q -> q b t')
emb = self.model.quantizer.decode(codes)
# emb shape: batch x self.model.quantizer.dimension x T. Note self.model.quantizer.dimension is the embedding dimension
return self.model.decoder(emb)
|
# helper functions
def exists(val):
return val is not None
def cast_tuple(val, length = 1):
return val if isinstance(val, tuple) else ((val,) * length)
def is_unique(arr):
return len(set(arr)) == len(arr)
# dataset functions
class SoundDataset(Dataset):
@beartype
def __init__(
self,
folder,
target_sample_hz: Union[int, Tuple[int, ...]], # target sample hz must be specified, or a tuple of them if one wants to return multiple resampled
exts = ['flac', 'wav', 'mp3', 'webm'],
max_length: Optional[int] = None, # max length would apply to the highest target_sample_hz, if there are multiple
seq_len_multiple_of: Optional[Union[int, Tuple[Optional[int], ...]]] = None
):
super().__init__()
path = Path(folder)
assert path.exists(), 'folder does not exist'
files = [file for ext in exts for file in path.glob(f'**/*.{ext}')]
assert len(files) > 0, 'no sound files found'
self.files = files
self.max_length = max_length
self.target_sample_hz = cast_tuple(target_sample_hz)
num_outputs = len(self.target_sample_hz)
# strategy, if there are multiple target sample hz, would be to resample to the highest one first
# apply the max lengths, and then resample to all the others
self.max_target_sample_hz = max(self.target_sample_hz)
self.seq_len_multiple_of = cast_tuple(seq_len_multiple_of, num_outputs)
assert len(self.target_sample_hz) == len(self.seq_len_multiple_of)
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
file = self.files[idx]
data, sample_hz = torchaudio.load(file)
assert data.numel() > 0, f'one of your audio file ({file}) is empty. please remove it from your folder'
if data.shape[0] > 1:
# the audio has more than 1 channel, convert to mono
data = reduce(data, 'c ... -> 1 ...', 'mean')
# first resample data to the max target freq
data = resample(data, sample_hz, self.max_target_sample_hz)
sample_hz = self.max_target_sample_hz
# then curtail or pad the audio depending on the max length
max_length = self.max_length
audio_length = data.size(1)
if exists(max_length):
if audio_length > max_length:
max_start = audio_length - max_length
start = torch.randint(0, max_start, (1, ))
data = data[:, start:start + max_length]
else:
data = F.pad(data, (0, max_length - audio_length), 'constant')
data = rearrange(data, '1 ... -> ...')
# resample if target_sample_hz is not None in the tuple
num_outputs = len(self.target_sample_hz)
data = cast_tuple(data, num_outputs)
data_tuple = tuple(resample(d, sample_hz, target_sample_hz) for d, target_sample_hz in zip(data, self.target_sample_hz))
output = []
# process each of the data resample at different frequencies individually for curtailing to multiple
for data, seq_len_multiple_of in zip(data_tuple, self.seq_len_multiple_of):
if exists(seq_len_multiple_of):
data = curtail_to_multiple(data, seq_len_multiple_of)
output.append(data.float())
# cast from list to tuple
output = tuple(output)
# return only one audio, if only one target resample freq
if num_outputs == 1:
return output[0]
return output
# dataloader functions
def collate_one_or_multiple_tensors(fn):
@wraps(fn)
def inner(data):
is_one_data = not isinstance(data[0], tuple)
if is_one_data:
data = fn(data)
return (data,)
outputs = []
for datum in zip(*data):
if is_bearable(datum, Tuple[str, ...]):
output = list(datum)
else:
output = fn(datum)
outputs.append(output)
return tuple(outputs)
return inner
@collate_one_or_multiple_tensors
def curtail_to_shortest_collate(data):
min_len = min(*[datum.shape[0] for datum in data])
data = [datum[:min_len] for datum in data]
return torch.stack(data)
@collate_one_or_multiple_tensors
def pad_to_longest_fn(data):
return pad_sequence(data, batch_first = True)
def get_dataloader(ds, pad_to_longest = True, **kwargs):
collate_fn = pad_to_longest_fn if pad_to_longest else curtail_to_shortest_collate
return DataLoader(ds, collate_fn = collate_fn, **kwargs)
|
# standard imports
# non-standard imports
# local imports
num_recommendations = 500 # papers to recommend per user
# -----------------------------------------------------------------------------
if not os.path.isfile(Config.database_path):
print("the database file as.db should exist. You can create an empty database with sqlite3 as.db < schema.sql")
sys.exit()
sqldb = sqlite3.connect(Config.database_path)
sqldb.row_factory = sqlite3.Row # to return dicts rather than tuples
def query_db(query, args=(), one=False):
"""Queries the database and returns a list of dictionaries."""
cur = sqldb.execute(query, args)
rv = cur.fetchall()
return (rv[0] if rv else None) if one else rv
# -----------------------------------------------------------------------------
# fetch all users
users = query_db('''select * from user''')
print('number of users: ', len(users))
# load the tfidf matrix and meta
meta = pickle.load(open(Config.meta_path, 'rb'))
out = pickle.load(open(Config.tfidf_path, 'rb'))
X = out['X']
X = X.todense()
xtoi = { strip_version(x):i for x,i in meta['ptoi'].items() }
user_sim = {}
for ii,u in enumerate(users):
print("%d/%d building an SVM for %s" % (ii, len(users), u['username'].encode('utf-8')))
uid = u['user_id']
lib = query_db('''select * from library where user_id = ?''', [uid])
pids = [x['paper_id'] for x in lib] # raw pids without version
posix = [xtoi[p] for p in pids if p in xtoi]
if not posix:
continue # empty library for this user maybe?
print(pids)
y = np.zeros(X.shape[0])
for ix in posix: y[ix] = 1
clf = svm.LinearSVC(class_weight='balanced', verbose=False, max_iter=10000, tol=1e-6, C=0.1)
clf.fit(X,y)
s = clf.decision_function(X)
sortix = np.argsort(-s)
sortix = sortix[:min(num_recommendations, len(sortix))] # crop paper recommendations to save space
user_sim[uid] = [strip_version(meta['pids'][ix]) for ix in list(sortix)]
print('writing', Config.user_sim_path)
safe_pickle_dump(user_sim, Config.user_sim_path)
|
"""
Very simple script that simply iterates over all files data/pdf/f.pdf
and create a file data/txt/f.pdf.txt that contains the raw text, extracted
using the "pdftotext" command. If a pdf cannot be converted, this
script will not produce the output file.
"""
# make sure pdftotext is installed
if not shutil.which('pdftotext'): # needs Python 3.3+
print('ERROR: you don\'t have pdftotext installed. Install it first before calling this script')
sys.exit()
if not os.path.exists(Config.txt_dir):
print('creating ', Config.txt_dir)
os.makedirs(Config.txt_dir)
have = set(os.listdir(Config.txt_dir))
files = os.listdir(Config.pdf_dir)
for i,f in enumerate(files): # there was a ,start=1 here that I removed, can't remember why it would be there. shouldn't be, i think.
txt_basename = f + '.txt'
if txt_basename in have:
print('%d/%d skipping %s, already exists.' % (i, len(files), txt_basename, ))
continue
pdf_path = os.path.join(Config.pdf_dir, f)
txt_path = os.path.join(Config.txt_dir, txt_basename)
cmd = "pdftotext %s %s" % (pdf_path, txt_path)
os.system(cmd)
print('%d/%d %s' % (i, len(files), cmd))
# check output was made
if not os.path.isfile(txt_path):
# there was an error with converting the pdf
print('there was a problem with parsing %s to text, creating an empty text file.' % (pdf_path, ))
os.system('touch ' + txt_path) # create empty file, but it's a record of having tried to convert
time.sleep(0.01) # silly way for allowing for ctrl+c termination
|
render_template, abort, g, flash, _app_ctx_stack
# various globals
# -----------------------------------------------------------------------------
# database configuration
if os.path.isfile('secret_key.txt'):
SECRET_KEY = open('secret_key.txt', 'r').read()
else:
SECRET_KEY = 'devkey, should be in a file'
app = Flask(__name__)
app.config.from_object(__name__)
limiter = Limiter(app, global_limits=["100 per hour", "20 per minute"])
SEARCH_DICT = {}
# -----------------------------------------------------------------------------
# utilities for database interactions
# -----------------------------------------------------------------------------
# to initialize the database: sqlite3 as.db < schema.sql
def connect_db():
sqlite_db = sqlite3.connect(Config.database_path)
sqlite_db.row_factory = sqlite3.Row # to return dicts rather than tuples
return sqlite_db
def query_db(query, args=(), one=False):
"""Queries the database and returns a list of dictionaries."""
cur = g.db.execute(query, args)
rv = cur.fetchall()
return (rv[0] if rv else None) if one else rv
def get_user_id(username):
"""Convenience method to look up the id for a username."""
rv = query_db('select user_id from user where username = ?',
[username], one=True)
return rv[0] if rv else None
def get_username(user_id):
"""Convenience method to look up the username for a user."""
rv = query_db('select username from user where user_id = ?',
[user_id], one=True)
return rv[0] if rv else None
# -----------------------------------------------------------------------------
# connection handlers
# -----------------------------------------------------------------------------
@app.before_request
def before_request():
# this will always request database connection, even if we dont end up using it ;\
g.db = connect_db()
# retrieve user object from the database if user_id is set
g.user = None
if 'user_id' in session:
g.user = query_db('select * from user where user_id = ?',
[session['user_id']], one=True)
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
# -----------------------------------------------------------------------------
# search/sort functionality
# -----------------------------------------------------------------------------
def date_sort():
scores = []
for pid,p in db.items():
timestruct = dateutil.parser.parse(p['updated'])
p['time_updated'] = int(timestruct.strftime("%s")) # store in struct for future convenience
timestruct = dateutil.parser.parse(p['published'])
p['time_published'] = int(timestruct.strftime("%s")) # store in struct for future convenience
scores.append((p['time_updated'], p))
scores.sort(reverse=True, key=lambda x: x[0])
out = [sp[1] for sp in scores]
return out
def papers_search(qraw):
qparts = qraw.lower().strip().split() # split by spaces
# use reverse index and accumulate scores
scores = []
for pid,p in db.items():
score = sum(SEARCH_DICT[pid].get(q,0) for q in qparts)
if score == 0:
continue # no match whatsoever, dont include
# give a small boost to more recent papers
score += 0.0001*p['tscore']
scores.append((score, p))
scores.sort(reverse=True, key=lambda x: x[0]) # descending
out = [x[1] for x in scores if x[0] > 0]
return out
def papers_similar(pid):
rawpid = strip_version(pid)
# check if we have this paper at all, otherwise return empty list
if not rawpid in db:
return []
# check if we have distances to this specific version of paper id (includes version)
if pid in sim_dict:
# good, simplest case: lets return the papers
return [db[strip_version(k)] for k in sim_dict[pid]]
else:
# ok we don't have this specific version. could be a stale URL that points to,
# e.g. v1 of a paper, but due to an updated version of it we only have v2 on file
# now. We want to use v2 in that case.
# lets try to retrieve the most recent version of this paper we do have
kok = [k for k in sim_dict if rawpid in k]
if kok:
# ok we have at least one different version of this paper, lets use it instead
id_use_instead = kok[0]
return [db[strip_version(k)] for k in sim_dict[id_use_instead]]
else:
# return just the paper. we dont have similarities for it for some reason
return [db[rawpid]]
def papers_from_library():
out = []
if g.user:
# user is logged in, lets fetch their saved library data
uid = session['user_id']
user_library = query_db('''select * from library where user_id = ?''', [uid])
libids = [strip_version(x['paper_id']) for x in user_library]
out = [db[x] for x in libids]
out = sorted(out, key=lambda k: k['updated'], reverse=True)
return out
def papers_from_svm(recent_days=None):
out = []
if g.user:
uid = session['user_id']
if not uid in user_sim:
return []
# we want to exclude papers that are already in user library from the result, so fetch them.
user_library = query_db('''select * from library where user_id = ?''', [uid])
libids = {strip_version(x['paper_id']) for x in user_library}
plist = user_sim[uid]
out = [db[x] for x in plist if not x in libids]
if recent_days is not None:
# filter as well to only most recent papers
curtime = int(time.time()) # in seconds
out = [x for x in out if curtime - x['time_published'] < recent_days*24*60*60]
return out
def papers_filter_version(papers, v):
if v != '1':
return papers # noop
intv = int(v)
filtered = [p for p in papers if p['_version'] == intv]
return filtered
def encode_json(ps, n=10, send_images=True, send_abstracts=True):
libids = set()
if g.user:
# user is logged in, lets fetch their saved library data
uid = session['user_id']
user_library = query_db('''select * from library where user_id = ?''', [uid])
libids = {strip_version(x['paper_id']) for x in user_library}
ret = []
for i in range(min(len(ps),n)):
p = ps[i]
idvv = '%sv%d' % (p['_rawid'], p['_version'])
struct = {}
struct['title'] = p['title']
struct['pid'] = idvv
struct['category'] = p['arxiv_primary_category']['term']
struct['authors'] = [a['name'] for a in p['authors']]
struct['link'] = p['link']
struct['in_library'] = 1 if p['_rawid'] in libids else 0
if send_abstracts:
struct['abstract'] = p['summary']
if send_images:
struct['img'] = '/static/thumbs/' + idvv + '.pdf.jpg'
struct['tags'] = [t['term'] for t in p['tags']]
timestruct = dateutil.parser.parse(p['updated'])
struct['published_time'] = '%s/%s/%s' % (timestruct.month, timestruct.day, timestruct.year)
timestruct = dateutil.parser.parse(p['published'])
struct['originally_published_time'] = '%s/%s/%s' % (timestruct.month, timestruct.day, timestruct.year)
cc = p.get('arxiv_comment', '')
if len(cc) > 100:
cc = cc[:100] + '...' # crop very long comments
struct['comment'] = cc
ret.append(struct)
return ret
# -----------------------------------------------------------------------------
# flask request handling
# -----------------------------------------------------------------------------
def default_context(papers, **kws):
top_papers = encode_json(papers, args.num_results)
ans = dict(papers=top_papers, numresults=len(papers), totpapers=len(db), msg='')
ans.update(kws)
return ans
@app.route("/")
def intmain():
vstr = request.args.get('vfilter', 'all')
papers = DATE_SORTED_PAPERS # precomputed
papers = papers_filter_version(papers, vstr)
ctx = default_context(papers, render_format='recent',
msg='Showing most recent Arxiv papers:')
return render_template('main.html', **ctx)
@app.route("/<request_pid>")
def rank(request_pid=None):
if not isvalidid(request_pid):
return '' # these are requests for icons, things like robots.txt, etc
papers = papers_similar(request_pid)
ctx = default_context(papers, render_format='paper')
return render_template('main.html', **ctx)
@app.route("/search", methods=['GET'])
def search():
q = request.args.get('q', '') # get the search request
papers = papers_search(q) # perform the query and get sorted documents
ctx = default_context(papers, render_format="search")
return render_template('main.html', **ctx)
@app.route('/recommend', methods=['GET'])
def recommend():
""" return user's svm sorted list """
ttstr = request.args.get('timefilter', 'week') # default is week
vstr = request.args.get('vfilter', 'all') # default is all (no filter)
legend = {'day':1, '3days':3, 'week':7, 'month':30, 'year':365}
tt = legend.get(ttstr, None)
papers = papers_from_svm(recent_days=tt)
papers = papers_filter_version(papers, vstr)
ctx = default_context(papers, render_format='recommend',
msg='Recommended papers: (based on SVM trained on tfidf of papers in your library, refreshed every day or so)' if g.user else 'You must be logged in and have some papers saved in your library.')
return render_template('main.html', **ctx)
@app.route('/top', methods=['GET'])
def top():
""" return top papers """
ttstr = request.args.get('timefilter', 'week') # default is week
vstr = request.args.get('vfilter', 'all') # default is all (no filter)
legend = {'day':1, '3days':3, 'week':7, 'month':30, 'year':365, 'alltime':10000}
tt = legend.get(ttstr, 7)
curtime = int(time.time()) # in seconds
papers = [p for p in TOP_SORTED_PAPERS if curtime - p['time_published'] < tt*24*60*60]
papers = papers_filter_version(papers, vstr)
ctx = default_context(papers, render_format='top',
msg='Top papers based on people\'s libraries:')
return render_template('main.html', **ctx)
@app.route('/toptwtr', methods=['GET'])
def toptwtr():
""" return top papers """
papers = TWITTER_TOP
ctx = default_context(papers, render_format='toptwtr',
msg='Top papers mentioned on Twitter over last 5 days:')
return render_template('main.html', **ctx)
@app.route('/library')
def library():
""" render user's library """
papers = papers_from_library()
ret = encode_json(papers, 500) # cap at 500 papers in someone's library. that's a lot!
if g.user:
msg = '%d papers in your library:' % (len(ret), )
else:
msg = 'You must be logged in. Once you are, you can save papers to your library (with the save icon on the right of each paper) and they will show up here.'
ctx = default_context(papers, render_format='library', msg=msg)
return render_template('main.html', **ctx)
@app.route('/libtoggle', methods=['POST'])
def review():
""" user wants to toggle a paper in his library """
# make sure user is logged in
if not g.user:
return 'NO' # fail... (not logged in). JS should prevent from us getting here.
idvv = request.form['pid'] # includes version
if not isvalidid(idvv):
return 'NO' # fail, malformed id. weird.
pid = strip_version(idvv)
if not pid in db:
return 'NO' # we don't know this paper. wat
uid = session['user_id'] # id of logged in user
# check this user already has this paper in library
record = query_db('''select * from library where
user_id = ? and paper_id = ?''', [uid, pid], one=True)
print(record)
ret = 'NO'
if record:
# record exists, erase it.
g.db.execute('''delete from library where user_id = ? and paper_id = ?''', [uid, pid])
g.db.commit()
#print('removed %s for %s' % (pid, uid))
ret = 'OFF'
else:
# record does not exist, add it.
rawpid = strip_version(pid)
g.db.execute('''insert into library (paper_id, user_id, update_time) values (?, ?, ?)''',
[rawpid, uid, int(time.time())])
g.db.commit()
#print('added %s for %s' % (pid, uid))
ret = 'ON'
return ret
@app.route('/login', methods=['POST'])
def login():
""" logs in the user. if the username doesn't exist creates the account """
if not request.form['username']:
flash('You have to enter a username')
elif not request.form['password']:
flash('You have to enter a password')
elif get_user_id(request.form['username']) is not None:
# username already exists, fetch all of its attributes
user = query_db('''select * from user where
username = ?''', [request.form['username']], one=True)
if check_password_hash(user['pw_hash'], request.form['password']):
# password is correct, log in the user
session['user_id'] = get_user_id(request.form['username'])
flash('User ' + request.form['username'] + ' logged in.')
else:
# incorrect password
flash('User ' + request.form['username'] + ' already exists, wrong password.')
else:
# create account and log in
creation_time = int(time.time())
g.db.execute('''insert into user (username, pw_hash, creation_time) values (?, ?, ?)''',
[request.form['username'],
generate_password_hash(request.form['password']),
creation_time])
user_id = g.db.execute('select last_insert_rowid()').fetchall()[0][0]
g.db.commit()
session['user_id'] = user_id
flash('New account %s created' % (request.form['username'], ))
return redirect(url_for('intmain'))
@app.route('/logout')
def logout():
session.pop('user_id', None)
flash('You were logged out')
return redirect(url_for('intmain'))
# -----------------------------------------------------------------------------
# int main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--prod', dest='prod', action='store_true', help='run in prod?')
parser.add_argument('-r', '--num_results', dest='num_results', type=int, default=200, help='number of results to return per query')
parser.add_argument('--port', dest='port', type=int, default=5000, help='port to serve on')
args = parser.parse_args()
print(args)
print('loading the paper database', Config.db_path)
db = pickle.load(open(Config.db_path, 'rb'))
print('loading tfidf_meta', Config.meta_path)
meta = pickle.load(open(Config.meta_path, "rb"))
vocab = meta['vocab']
idf = meta['idf']
print('loading paper similarities', Config.sim_path)
sim_dict = pickle.load(open(Config.sim_path, "rb"))
print('loading user recommendations', Config.user_sim_path)
if os.path.isfile(Config.user_sim_path):
user_sim = pickle.load(open(Config.user_sim_path, 'rb'))
else:
user_sim = {}
print('loading twitter top', Config.tweet_path)
if os.path.isfile(Config.tweet_path):
TWITTER_TOP = pickle.load(open(Config.tweet_path, 'rb'))
TWITTER_TOP = [db[pid] for count,pid in TWITTER_TOP]
else:
TWITTER_TOP = []
print('precomputing papers date sorted...')
DATE_SORTED_PAPERS = date_sort()
if not os.path.isfile(Config.database_path):
print('did not find as.db, trying to create an empty database from schema.sql...')
print('this needs sqlite3 to be installed!')
os.system('sqlite3 as.db < schema.sql')
# compute top papers in peoples' libraries
print('computing top papers...')
def get_popular():
sqldb = sqlite3.connect(Config.database_path)
sqldb.row_factory = sqlite3.Row # to return dicts rather than tuples
libs = sqldb.execute('''select * from library''').fetchall()
counts = {}
for lib in libs:
pid = lib['paper_id']
counts[pid] = counts.get(pid, 0) + 1
return counts
top_counts = get_popular()
top_paper_counts = sorted([(v,k) for k,v in top_counts.items() if v > 0], reverse=True)
print(top_paper_counts[:min(30, len(top_paper_counts))])
TOP_SORTED_PAPERS = [db[q[1]] for q in top_paper_counts]
# compute min and max time for all papers
tts = [time.mktime(dateutil.parser.parse(p['updated']).timetuple()) for pid,p in db.items()]
ttmin = min(tts)*1.0
ttmax = max(tts)*1.0
for pid,p in db.items():
tt = time.mktime(dateutil.parser.parse(p['updated']).timetuple())
p['tscore'] = (tt-ttmin)/(ttmax-ttmin)
# some utilities for creating a search index for faster search
punc = "'!\"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~'" # removed hyphen from string.punctuation
trans_table = {ord(c): None for c in punc}
def makedict(s, forceidf=None, scale=1.0):
words = set(s.lower().translate(trans_table).strip().split())
out = {}
for w in words: # todo: if we're using bigrams in vocab then this won't search over them
if forceidf is None:
if w in vocab:
# we have idf for this
idfval = idf[vocab[w]]*scale
else:
idfval = 1.0*scale # assume idf 1.0 (low)
else:
idfval = forceidf
out[w] = idfval
return out
def merge_dicts(dlist):
out = {}
for d in dlist:
for k,v in d.items():
out[k] = out.get(k,0) + v
return out
# caching: check if db.p is younger than search_dict.p
recompute_index = True
if os.path.isfile(Config.search_dict_path):
db_modified_time = os.path.getmtime(Config.db_path)
search_modified_time = os.path.getmtime(Config.search_dict_path)
if search_modified_time > db_modified_time:
# search index exists and is more recent, no need
recompute_index = False
if recompute_index:
print('building an index for faster search...')
for pid in db:
p = db[pid]
dict_title = makedict(p['title'], forceidf=5, scale=3)
dict_authors = makedict(' '.join(x['name'] for x in p['authors']), forceidf=5)
dict_categories = {x['term'].lower():5 for x in p['tags']}
if 'and' in dict_authors:
# special case for "and" handling in authors list
del dict_authors['and']
dict_summary = makedict(p['summary'])
SEARCH_DICT[pid] = merge_dicts([dict_title, dict_authors, dict_categories, dict_summary])
# and cache it in file
print('writing ', Config.search_dict_path, ' as cache...')
safe_pickle_dump(SEARCH_DICT, Config.search_dict_path)
else:
print('loading cached index for faster search from', Config.search_dict_path)
SEARCH_DICT = pickle.load(open(Config.search_dict_path, 'rb'))
# start
if args.prod:
# run on Tornado instead, since running raw Flask in prod is not recommended
print('starting tornado!')
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
enable_pretty_logging()
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(args.port)
IOLoop.instance().start()
else:
print('starting flask!')
app.debug = True
app.run(port=args.port)
|
"""
Queries arxiv API and downloads papers (the query is a parameter).
The script is intended to enrich an existing database pickle (by default db.p),
so this file will be loaded first, and then new results will be added to it.
"""
def encode_feedparser_dict(d):
"""
helper function to get rid of feedparser bs with a deep copy.
I hate when libs wrap simple things in their own classes.
"""
if isinstance(d, feedparser.FeedParserDict) or isinstance(d, dict):
j = {}
for k in d.keys():
j[k] = encode_feedparser_dict(d[k])
return j
elif isinstance(d, list):
l = []
for k in d:
l.append(encode_feedparser_dict(k))
return l
else:
return d
def parse_arxiv_url(url):
"""
examples is http://arxiv.org/abs/1512.08756v2
we want to extract the raw id and the version
"""
ix = url.rfind('/')
idversion = j['id'][ix+1:] # extract just the id (and the version)
parts = idversion.split('v')
assert len(parts) == 2, 'error parsing url ' + url
return parts[0], int(parts[1])
if __name__ == "__main__":
# parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--search-query', type=str,
default='cat:cs.CV+OR+cat:cs.AI+OR+cat:cs.LG+OR+cat:cs.CL+OR+cat:cs.NE+OR+cat:stat.ML',
help='query used for arxiv API. See http://arxiv.org/help/api/user-manual#detailed_examples')
parser.add_argument('--start-index', type=int, default=0, help='0 = most recent API result')
parser.add_argument('--max-index', type=int, default=10000, help='upper bound on paper index we will fetch')
parser.add_argument('--results-per-iteration', type=int, default=100, help='passed to arxiv API')
parser.add_argument('--wait-time', type=float, default=5.0, help='lets be gentle to arxiv API (in number of seconds)')
parser.add_argument('--break-on-no-added', type=int, default=1, help='break out early if all returned query papers are already in db? 1=yes, 0=no')
args = parser.parse_args()
# misc hardcoded variables
base_url = 'http://export.arxiv.org/api/query?' # base api query url
print('Searching arXiv for %s' % (args.search_query, ))
# lets load the existing database to memory
try:
db = pickle.load(open(Config.db_path, 'rb'))
except Exception as e:
print('error loading existing database:')
print(e)
print('starting from an empty database')
db = {}
# -----------------------------------------------------------------------------
# main loop where we fetch the new results
print('database has %d entries at start' % (len(db), ))
num_added_total = 0
for i in range(args.start_index, args.max_index, args.results_per_iteration):
print("Results %i - %i" % (i,i+args.results_per_iteration))
query = 'search_query=%s&sortBy=lastUpdatedDate&start=%i&max_results=%i' % (args.search_query,
i, args.results_per_iteration)
with urllib.request.urlopen(base_url+query) as url:
response = url.read()
parse = feedparser.parse(response)
num_added = 0
num_skipped = 0
for e in parse.entries:
j = encode_feedparser_dict(e)
# extract just the raw arxiv id and version for this paper
rawid, version = parse_arxiv_url(j['id'])
j['_rawid'] = rawid
j['_version'] = version
# add to our database if we didn't have it before, or if this is a new version
if not rawid in db or j['_version'] > db[rawid]['_version']:
db[rawid] = j
print('Updated %s added %s' % (j['updated'].encode('utf-8'), j['title'].encode('utf-8')))
num_added += 1
num_added_total += 1
else:
num_skipped += 1
# print some information
print('Added %d papers, already had %d.' % (num_added, num_skipped))
if len(parse.entries) == 0:
print('Received no results from arxiv. Rate limiting? Exiting. Restart later maybe.')
print(response)
break
if num_added == 0 and args.break_on_no_added == 1:
print('No new papers were added. Assuming no new papers exist. Exiting.')
break
print('Sleeping for %i seconds' % (args.wait_time , ))
time.sleep(args.wait_time + random.uniform(0, 3))
# save the database before we quit, if we found anything new
if num_added_total > 0:
print('Saving database with %d papers to %s' % (len(db), Config.db_path))
safe_pickle_dump(db, Config.db_path)
|
"""
Use imagemagick to convert all pfds to a sequence of thumbnail images
requires: sudo apt-get install imagemagick
"""
# make sure imagemagick is installed
if not shutil.which('convert'): # shutil.which needs Python 3.3+
print("ERROR: you don\'t have imagemagick installed. Install it first before calling this script")
sys.exit()
# create if necessary the directories we're using for processing and output
pdf_dir = os.path.join('data', 'pdf')
if not os.path.exists(Config.thumbs_dir): os.makedirs(Config.thumbs_dir)
if not os.path.exists(Config.tmp_dir): os.makedirs(Config.tmp_dir)
# fetch all pdf filenames in the pdf directory
files_in_pdf_dir = os.listdir(pdf_dir)
pdf_files = [x for x in files_in_pdf_dir if x.endswith('.pdf')] # filter to just pdfs, just in case
# iterate over all pdf files and create the thumbnails
for i,p in enumerate(pdf_files):
pdf_path = os.path.join(pdf_dir, p)
thumb_path = os.path.join(Config.thumbs_dir, p + '.jpg')
if os.path.isfile(thumb_path):
print("skipping %s, thumbnail already exists." % (pdf_path, ))
continue
print("%d/%d processing %s" % (i, len(pdf_files), p))
# take first 8 pages of the pdf ([0-7]), since 9th page are references
# tile them horizontally, use JPEG compression 80, trim the borders for each image
#cmd = "montage %s[0-7] -mode Concatenate -tile x1 -quality 80 -resize x230 -trim %s" % (pdf_path, "thumbs/" + f + ".jpg")
#print "EXEC: " + cmd
# nvm, below using a roundabout alternative that is worse and requires temporary files, yuck!
# but i found that it succeeds more often. I can't remember wha thappened anymore but I remember
# that the version above, while more elegant, had some problem with it on some pdfs. I think.
# erase previous intermediate files thumb-*.png in the tmp directory
if os.path.isfile(os.path.join(Config.tmp_dir, 'thumb-0.png')):
for i in range(8):
f = os.path.join(Config.tmp_dir, 'thumb-%d.png' % (i,))
f2= os.path.join(Config.tmp_dir, 'thumbbuf-%d.png' % (i,))
if os.path.isfile(f):
cmd = 'mv %s %s' % (f, f2)
os.system(cmd)
# okay originally I was going to issue an rm call, but I am too terrified of
# running scripted rm queries, so what we will do is instead issue a "mv" call
# to rename the files. That's a bit safer, right? We have to do this because if
# some papers are shorter than 8 pages, then results from previous paper will
# "leek" over to this result, through the intermediate files.
# spawn async. convert can unfortunately enter an infinite loop, have to handle this.
# this command will generate 8 independent images thumb-0.png ... thumb-7.png of the thumbnails
pp = Popen(['convert', '%s[0-7]' % (pdf_path, ), '-thumbnail', 'x156', os.path.join(Config.tmp_dir, 'thumb.png')])
t0 = time.time()
while time.time() - t0 < 20: # give it 15 seconds deadline
ret = pp.poll()
if not (ret is None):
# process terminated
break
time.sleep(0.1)
ret = pp.poll()
if ret is None:
print("convert command did not terminate in 20 seconds, terminating.")
pp.terminate() # give up
if not os.path.isfile(os.path.join(Config.tmp_dir, 'thumb-0.png')):
# failed to render pdf, replace with missing image
missing_thumb_path = os.path.join('static', 'missing.jpg')
os.system('cp %s %s' % (missing_thumb_path, thumb_path))
print("could not render pdf, creating a missing image placeholder")
else:
cmd = "montage -mode concatenate -quality 80 -tile x1 %s %s" % (os.path.join(Config.tmp_dir, 'thumb-*.png'), thumb_path)
print(cmd)
os.system(cmd)
time.sleep(0.01) # silly way for allowing for ctrl+c termination
|
# global settings
# -----------------------------------------------------------------------------
class Config(object):
# main paper information repo file
db_path = 'db.p'
# intermediate processing folders
pdf_dir = os.path.join('data', 'pdf')
txt_dir = os.path.join('data', 'txt')
thumbs_dir = os.path.join('static', 'thumbs')
# intermediate pickles
tfidf_path = 'tfidf.p'
meta_path = 'tfidf_meta.p'
sim_path = 'sim_dict.p'
user_sim_path = 'user_sim.p'
tweet_path = 'twitter.p' # written by twitter_daemon.py
# sql database file
database_path = 'as.db'
search_dict_path = 'search_dict.p'
tmp_dir = 'tmp'
# Context managers for atomic writes courtesy of
# http://stackoverflow.com/questions/2333872/atomic-writing-to-file-with-python
@contextmanager
def _tempfile(*args, **kws):
""" Context for temporary file.
Will find a free temporary filename upon entering
and will try to delete the file on leaving
Parameters
----------
suffix : string
optional file suffix
"""
fd, name = tempfile.mkstemp(*args, **kws)
os.close(fd)
try:
yield name
finally:
try:
os.remove(name)
except OSError as e:
if e.errno == 2:
pass
else:
raise e
@contextmanager
def open_atomic(filepath, *args, **kwargs):
""" Open temporary file object that atomically moves to destination upon
exiting.
Allows reading and writing to and from the same filename.
Parameters
----------
filepath : string
the file path to be opened
fsync : bool
whether to force write the file to disk
kwargs : mixed
Any valid keyword arguments for :code:`open`
"""
fsync = kwargs.pop('fsync', False)
with _tempfile(dir=os.path.dirname(filepath)) as tmppath:
with open(tmppath, *args, **kwargs) as f:
yield f
if fsync:
f.flush()
os.fsync(file.fileno())
os.rename(tmppath, filepath)
def safe_pickle_dump(obj, fname):
with open_atomic(fname, 'wb') as f:
pickle.dump(obj, f, -1)
# arxiv utils
# -----------------------------------------------------------------------------
def strip_version(idstr):
""" identity function if arxiv id has no version, otherwise strips it. """
parts = idstr.split('v')
return parts[0]
# "1511.08198v1" is an example of a valid arxiv id that we accept
def isvalidid(pid):
return re.match('^\d+\.\d+(v\d+)?$', pid)
|
sleep_time = 60*10 # in seconds
max_days_keep = 5 # max number of days to keep a tweet in memory
def get_db_pids():
print('loading the paper database', Config.db_path)
db = pickle.load(open(Config.db_path, 'rb'))
# I know this looks weird, but I don't trust dict_keys to be efficient with "in" operator.
# I also don't trust it to keep some reference to the whole dict, as I'm hoping db here deallocates.
# Can't find good docs here
pid_dict = {p:1 for p in db}
return pid_dict
def get_keys():
lines = open('twitter.txt', 'r').read().splitlines()
return lines
# authenticate
keys = get_keys()
api = twitter.Api(consumer_key=keys[0],
consumer_secret=keys[1],
access_token_key=keys[2],
access_token_secret=keys[3])
print(api.VerifyCredentials())
def extract_arxiv_pids(r):
pids = []
for u in r.urls:
m = re.search('arxiv.org/abs/(.+)', u.expanded_url)
if m:
rawid = m.group(1)
pids.append(rawid)
return pids
db_pids = get_db_pids()
seen = {}
epochd = datetime.datetime(1970,1,1,tzinfo=pytz.utc) # time of epoch
while True:
try:
results = api.GetSearch(raw_query="q=arxiv.org&result_type=recent&count=100")
ok = True
except Exception as e:
print('there was some problem:')
print(e)
time.sleep(sleep_time)
continue
tnow = time.time()
num_processed = 0
parsed = []
for r in results:
arxiv_pids = extract_arxiv_pids(r)
arxiv_pids = [p for p in arxiv_pids if p in db_pids] # filter to those that are in our paper db
if not arxiv_pids: continue # nothing relevant here, lets move on
if r.id in seen: continue # skip, already saw and recorded
seen[r.id] = {'seen':tnow} # mark as seen at this time
num_processed += 1
# collect all arxiv paper ids from valid urls
seen[r.id]['pids'] = arxiv_pids
# parse & records time of this tweet
d = parser.parse(r.created_at)
time_posted = (d - epochd).total_seconds()
seen[r.id]['time_posted'] = time_posted
print('processed %d/%d new tweets. Currently maintaining total %d' % (num_processed, len(results), len(seen)))
# maintain state: if something was seen > few days ago, forget it
maxdt = 60*60*24*max_days_keep
seen_new = { tweetid:d for tweetid,d in seen.items() if tnow - d['time_posted'] < maxdt }
print('previous seen dict had %d tweets, pruning to %d' % (len(seen), len(seen_new)))
seen = seen_new # swap
# compile all votes and write output for serving
votes = {}
for tweetid,d in seen.items():
for pid in d['pids']:
votes[pid] = votes.get(pid, 0) + 1
votes = [(v,k) for k,v in votes.items()]
votes.sort(reverse=True, key=lambda x: x[0]) # descending
print('top votes', votes[:min(len(votes), 10)])
print('writing', Config.tweet_path)
safe_pickle_dump(votes, Config.tweet_path)
# and sleep for a while
print('sleeping', sleep_time)
time.sleep(sleep_time)
|
timeout_secs = 10 # after this many seconds we give up on a paper
if not os.path.exists(Config.pdf_dir): os.makedirs(Config.pdf_dir)
have = set(os.listdir(Config.pdf_dir)) # get list of all pdfs we already have
numok = 0
numtot = 0
db = pickle.load(open(Config.db_path, 'rb'))
for pid,j in db.items():
pdfs = [x['href'] for x in j['links'] if x['type'] == 'application/pdf']
assert len(pdfs) == 1
pdf_url = pdfs[0] + '.pdf'
basename = pdf_url.split('/')[-1]
fname = os.path.join(Config.pdf_dir, basename)
# try retrieve the pdf
numtot += 1
try:
if not basename in have:
print('fetching %s into %s' % (pdf_url, fname))
req = urlopen(pdf_url, None, timeout_secs)
with open(fname, 'wb') as fp:
shutil.copyfileobj(req, fp)
time.sleep(0.05 + random.uniform(0,0.1))
else:
print('%s exists, skipping' % (fname, ))
numok+=1
except Exception as e:
print('error downloading: ', pdf_url)
print(e)
print('%d/%d of %d downloaded ok.' % (numok, numtot, len(db)))
print('final number of papers downloaded okay: %d/%d' % (numok, len(db)))
|
"""
Reads txt files of all papers and computes tfidf vectors for all papers.
Dumps results to file tfidf.p
"""
seed(1337)
max_train = 10000 # max number of tfidf training documents (chosen randomly), for memory efficiency
# read database
db = pickle.load(open(Config.db_path, 'rb'))
# read all text files for all papers into memory
txt_paths, pids = [], []
n = 0
for pid,j in db.items():
n += 1
idvv = '%sv%d' % (j['_rawid'], j['_version'])
txt_path = os.path.join('data', 'txt', idvv) + '.pdf.txt'
if os.path.isfile(txt_path): # some pdfs dont translate to txt
with open(txt_path, 'r') as f:
txt = f.read()
if len(txt) > 1000 and len(txt) < 500000: # 500K is VERY conservative upper bound
txt_paths.append(txt_path) # todo later: maybe filter or something some of them
pids.append(idvv)
print("read %d/%d (%s) with %d chars" % (n, len(db), idvv, len(txt)))
else:
print("skipped %d/%d (%s) with %d chars: suspicious!" % (n, len(db), idvv, len(txt)))
else:
print("could not find %s in txt folder." % (txt_path, ))
print("in total read in %d text files out of %d db entries." % (len(txt_paths), len(db)))
# compute tfidf vectors with scikits
v = TfidfVectorizer(input='content',
encoding='utf-8', decode_error='replace', strip_accents='unicode',
lowercase=True, analyzer='word', stop_words='english',
token_pattern=r'(?u)\b[a-zA-Z_][a-zA-Z0-9_]+\b',
ngram_range=(1, 2), max_features = 10000,
norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=True,
max_df=1.0, min_df=1)
# create an iterator object to conserve memory
def make_corpus(paths):
for p in paths:
with open(p, 'r') as f:
txt = f.read()
yield txt
# train
train_txt_paths = list(txt_paths) # duplicate
shuffle(train_txt_paths) # shuffle
train_txt_paths = train_txt_paths[:min(len(train_txt_paths), max_train)] # crop
print("training on %d documents..." % (len(train_txt_paths), ))
train_corpus = make_corpus(train_txt_paths)
v.fit(train_corpus)
# transform
print("transforming %d documents..." % (len(txt_paths), ))
corpus = make_corpus(txt_paths)
X = v.transform(corpus)
print(v.vocabulary_)
print(X.shape)
# write full matrix out
out = {}
out['X'] = X # this one is heavy!
print("writing", Config.tfidf_path)
safe_pickle_dump(out, Config.tfidf_path)
# writing lighter metadata information into a separate (smaller) file
out = {}
out['vocab'] = v.vocabulary_
out['idf'] = v._tfidf.idf_
out['pids'] = pids # a full idvv string (id and version number)
out['ptoi'] = { x:i for i,x in enumerate(pids) } # pid to ix in X mapping
print("writing", Config.meta_path)
safe_pickle_dump(out, Config.meta_path)
print("precomputing nearest neighbor queries in batches...")
X = X.todense() # originally it's a sparse matrix
sim_dict = {}
batch_size = 200
for i in range(0,len(pids),batch_size):
i1 = min(len(pids), i+batch_size)
xquery = X[i:i1] # BxD
ds = -np.asarray(np.dot(X, xquery.T)) #NxD * DxB => NxB
IX = np.argsort(ds, axis=0) # NxB
for j in range(i1-i):
sim_dict[pids[i+j]] = [pids[q] for q in list(IX[:50,j])]
print('%d/%d...' % (i, len(pids)))
print("writing", Config.sim_path)
safe_pickle_dump(sim_dict, Config.sim_path)
|
# Version: 0.19
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/python-versioneer/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3
* [![Latest Version][pypi-image]][pypi-url]
* [![Build Status][travis-image]][travis-url]
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere in your $PATH
* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md))
* run `versioneer install` in your source tree, commit the results
* Verify version information with `python setup.py version`
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes).
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/python-versioneer/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other languages) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## Similar projects
* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time
dependency
* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of
versioneer
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg
[pypi-url]: https://pypi.python.org/pypi/versioneer/
[travis-image]:
https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg
[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer
"""
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as f:
parser.read_file(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY[
"git"
] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.19) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
mo = re.search(
r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "build_py" in cmds:
_build_py = cmds["build_py"]
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "sdist" in cmds:
_sdist = cmds["sdist"]
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(
target_versionfile, self._versioneer_generated_versions
)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (
EnvironmentError,
configparser.NoSectionError,
configparser.NoOptionError,
) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(
" appending versionfile_source ('%s') to MANIFEST.in"
% cfg.versionfile_source
)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
logger.remove()
logger.add(
sys.stderr, format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | {level} | {message}"
)
TFP_URL = "https://maayanlab.cloud/Enrichr/geneSetLibrary?mode=text&libraryName=TF_Perturbations_Followed_by_Expression"
TRRUST_URL = "https://www.grnpedia.org/trrust/data/trrust_rawdata.human.tsv"
MSIGDB_URL = "https://data.broadinstitute.org/gsea-msigdb/msigdb/release/7.4/c3.all.v7.4.symbols.gmt"
def download_trrust_reference(outfile):
edges = []
with urllib.request.urlopen(
TRRUST_URL,
) as f:
for line in f.readlines():
tf, target, regtype, pmid = line.decode().strip().split("\t")
# Just skip repression for now
if regtype in ["Activation", "Unknown"]:
edges.append([tf, target, 1])
edges = pd.DataFrame(edges, columns=["tf", "target", "interaction"])
edges.to_csv(outfile, sep="\t", index=False)
def download_msigdb_reference(outfile):
with urllib.request.urlopen(MSIGDB_URL) as gmt, open(outfile, "w") as fl1:
for line in gmt:
a = line.decode("utf-8").split()
tf = a[0].split("_")[0]
targets = a[2:]
for target in targets:
fl1.write(f"{tf}\t{target}\n")
def fix_columns(df):
"""Make sure network has a tf and a target column."""
df.columns = df.columns.str.lower()
df = df.rename(
columns={
"source": "tf",
"source_target": "tf_target",
"target_gene": "target",
}
)
if "tf_target" in df.columns:
df[["tf", "target"]] = df["tf_target"].str.split("_", expand=True).iloc[:, :2]
df = df.drop(columns=["tf_target"])
if "tf" not in df.columns:
raise ValueError("Expect a column named 'source' or 'tf'")
if "target" not in df.columns:
raise ValueError("Expect a column named 'target' or 'target_gene'")
return df
def prepare_reference_network(network, filter_tfs=True):
"""Generate reference network.
This network contains all possible edges, based on the TFs
and the target genes in the input. TFs are optionally filtered
to contain only validated TFs.
Returns
-------
DataFrame with column `"interaction"` having 1 for a validated
edge and 0 otherwise.
"""
if isinstance(network, pd.DataFrame):
df = network.reset_index()
elif isinstance(network, str):
if network.endswith("feather"):
df = pd.read_feather(network)
else:
df = pd.read_table(network)
else:
raise ValueError("Unknown network type, need DataFrame or filename.")
df = fix_columns(df)
interaction_column = None
for col in df.columns:
if col in ["tf", "target"]:
continue
vals = df[col].unique()
if len(vals) in [1, 2] and 1 in vals:
interaction_column = col
break
tfs = set(df["tf"].unique())
if filter_tfs:
valid_tfs = set(get_tfs())
tfs = list(tfs.intersection(valid_tfs))
targets = df["target"].unique()
# logger.info(
# f"{os.path.split(network)[-1]} reference - {len(tfs)} TFs, {len(targets)} targets, {df.shape[0]} edges."
# )
total = []
for tf in tfs:
for target in targets:
total.append([tf, target])
total = pd.DataFrame(total, columns=["tf", "target"]).set_index(["tf", "target"])
if interaction_column is not None:
logger.info(f"Using '{interaction_column}' as interaction column.")
df = df.set_index(["tf", "target"])[[interaction_column]].rename(
columns={interaction_column: "interaction"}
)
else:
logger.info("No column with 1 found, assuming all lines are positive edges.")
df = df.set_index(["tf", "target"])
df["interaction"] = 1
return total.join(df[["interaction"]]).fillna(0)
def _read_dorothea_reference(fname):
dorothea = pd.read_table(fname)
cols = [
"is_evidence_chip_seq",
"is_evidence_curated",
"is_evidence_inferred",
"is_evidence_tfbs",
]
dorothea = dorothea.set_index(["tf", "target"])[cols]
for col in cols:
dorothea[col] = dorothea[col].astype(int)
dorothea["dorothea"] = np.any(dorothea[cols] == 1, 1).astype(int)
dorothea = dorothea.reset_index()
tfs = set(dorothea["tf"].unique())
valid_tfs = set(get_tfs())
tfs = list(tfs.intersection(valid_tfs))
targets = dorothea["target"].unique()
logger.info(
f"Dorothea reference - {len(tfs)} TFs, {len(targets)} targets, {dorothea.shape[0]} edges."
)
total = []
for tf in tfs:
for target in targets:
total.append([tf, target])
total = pd.DataFrame(total, columns=["tf", "target"]).set_index(["tf", "target"])
dorothea = dorothea.set_index(["tf", "target"])
dorothea = total.join(dorothea)
dorothea = dorothea.fillna(0)
return dorothea
def _read_enrichr_perturbation_reference(fname=None):
"""Uses the TF perturbations from Enrichr[1,2] to create reference edges.
Targets are defined by up- or down-regulated gened from the following sets:
Up: INDUCTION, ACTIVATION, OE.
Down: KD, KO, INACTIVATION, DEPLETION, SIRNA, SHRNA, KNOCKOUT, DELETION INHIBITION.
The TF and targets in the DataFrame consists of the Cartesian product of
all TFs and target genes that occur in the set.
Returns
-------
DataFrame with tf-target edges.
References
----------
.. [1] Chen EY, Tan CM, Kou Y, Duan Q, Wang Z, Meirelles GV, Clark NfR, Ma'ayan A.
"Enrichr: interactive and collaborative HTML5 gene list enrichment analysis
tool." BMC Bioinformatics. 2013;128(14)
.. [2] Kuleshov MV, Jones MR, Rouillard AD, Fernandez NF, Duan Q, Wang Z,
Koplev S, Jenkins SL, Jagodnik KM, Lachmann A, McDermott MG, Monteiro CD,
Gundersen GW, Ma'ayan A. "Enrichr: a comprehensive gene set enrichment
analysis web server 2016 update." Nucleic Acids Research. 2016; gkw377.
"""
use_online = False
if fname:
fopen = open(fname)
else:
logger.info(
"No filename provided for TF perturbations, downloading from Enrichr"
)
fopen = urllib.request.urlopen(TFP_URL)
use_online = True
p = re.compile(r"(\w+)\s+(\w+)\s+(.+)\s+(\w+)")
all_info = []
edges = []
with fopen as f:
for line in f:
if use_online:
line = line.decode("utf-8")
vals = line.strip().split("\t")
m = re.search(p, vals[0])
all_info.append(m.groups(0))
if (
m.group(2) in ["INDUCTION", "ACTIVATION", "OE"] and m.group(4) == "UP"
) or (
m.group(2)
in [
"KD",
"KO",
"INACTIVATION",
"DEPLETION",
"SIRNA",
"SHRNA",
"KNOCKOUT",
"DELETION",
"INHIBITION",
]
and m.group(4) == "DOWN"
):
tf = m.group(1)
for target in vals[2:]:
edges.append([tf, target])
all_info = pd.DataFrame(all_info, columns=["tf", "exp", "info", "up_down"])
perturb_df = pd.DataFrame(edges, columns=["tf", "target"])
tfs = set(perturb_df["tf"].unique())
targets = perturb_df["target"].unique()
logger.info(
f"TF perturbation reference - {len(tfs)} TFs, {len(targets)} targets, {perturb_df.shape[0]} edges."
)
perturb_df["experiments"] = 1
perturb_df = perturb_df.groupby(["tf", "target"]).count()
perturb_df["interaction"] = 1
perturb_df.columns = ["perturb_experiments", "perturb_interaction"]
valid_tfs = set(get_tfs())
tfs = list(tfs.intersection(valid_tfs))
total = []
for tf in tfs:
for target in targets:
total.append([tf, target])
total = pd.DataFrame(total, columns=["tf", "target"]).set_index(["tf", "target"])
perturb_df = total.join(perturb_df).fillna(0)
return perturb_df
def get_tfs():
valid_factors = pd.read_excel(
"https://www.biorxiv.org/content/biorxiv/early/2020/12/07/2020.10.28.359232/DC1/embed/media-1.xlsx",
engine="openpyxl",
sheet_name=1,
)
valid_factors = valid_factors.loc[
valid_factors["Pseudogene"].isnull(), "HGNC approved gene symbol"
].values
valid_factors = [f for f in valid_factors if f != "EP300"]
return valid_factors
def read_network(fname, name=None):
network = fname
if fname.endswith("feather"):
df = pd.read_feather(network)
else:
df = pd.read_table(network)
df = fix_columns(df)
df = df.set_index(["tf", "target"])
# Assuming last column is the edge weight
df = df.iloc[:, [-1]]
if name is not None:
df.columns = [name]
return df
def _read_correlation_reference(network, corCutoff=0.6):
tfs_name = f"{os.path.dirname(ananse.__file__)}/db/tfs.txt"
tfs = pd.read_csv(tfs_name, header=None)[0].tolist()
edb = pd.read_csv(network, sep="\t")
edb["iscorrelation"] = [1 if i > corCutoff else 0 for i in edb["correlationRank"]]
edb[["tf", "target"]] = edb["source_target"].str.split("_", expand=True).iloc[:, :2]
edb = edb.drop(
columns=["source_target", "ocorrelation", "correlation", "correlationRank"]
)
edb = edb[edb.tf.isin(tfs)]
edb = edb.set_index(["tf", "target"])
return edb
def _read_goterm_reference(network, goCutoff=0):
tfs_name = f"{os.path.dirname(ananse.__file__)}/db/tfs.txt"
tfs = pd.read_csv(tfs_name, header=None)[0].tolist()
gdb = pd.read_csv(network, sep="\t", header=None)
gdb["isgo"] = [1 if i > goCutoff else 0 for i in gdb[2]]
gdb = gdb.rename(columns={3: "tf", 1: "target"})
gdb = gdb[gdb.tf.isin(tfs)]
gdb = gdb.drop(columns=[0, 2])
gdb = gdb.set_index(["tf", "target"])
return gdb
def _read_msigdb_reference(network):
msidb = pd.read_csv(network, sep="\t", header=None)
msidb = msidb.rename(columns={0: "tf", 1: "target"})
msidb = msidb.set_index(["tf", "target"])
msidb["interaction"] = 1
return msidb
def _read_regnet_reference(network):
regnet = pd.read_csv(network)
regnet = regnet.rename(
columns={"regulator_symbol": "tf", "target_symbol": "target"}
)
regnet = regnet.set_index(["tf", "target"])
regnet["interaction"] = 1
return regnet[["interaction"]]
def read_reference(name, fname=None):
"""
Valid reference networks (name):
- dorothea
- perturbation
- correlation
- goterm
- msigdb
- regnet
- trrust
"""
if name.lower() == "dorothea":
return _read_dorothea_reference(fname)
if name.lower() == "perturbation":
return prepare_reference_network(_read_enrichr_perturbation_reference(fname))
if name.lower() == "correlation":
return prepare_reference_network(_read_correlation_reference(fname, 0.6))
if name.lower() == "goterm":
return prepare_reference_network(_read_goterm_reference(fname, 0))
if name.lower() == "msigdb":
return prepare_reference_network(_read_msigdb_reference(fname))
if name.lower() == "regnet":
return prepare_reference_network(_read_regnet_reference(fname))
if name.lower() == "trrust":
return prepare_reference_network(fname)
def validate_files(fnames, ignore_missing=False):
file_error = False
for fname in fnames:
if not os.path.exists(fname):
logger.error(f"file {fname} does not exist")
file_error = True
if not ignore_missing and file_error:
raise ValueError("One or more files not found!")
def read_networks(network_dict, ignore_missing=False):
"""Read predicted networks.
Input is a dictionary with name as key and filename as value.
"""
# Validate files first
validate_files(network_dict.values(), ignore_missing=ignore_missing)
df = pd.DataFrame({"tf": [], "target": []}).set_index(["tf", "target"])
for name, fname in network_dict.items():
if os.path.exists(fname):
logger.info(f"Reading {name}")
tmp = read_network(fname, name=name)
logger.info(f"Merging {name}")
df = df.join(tmp, how="outer")
return df
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = " (HEAD -> master)"
git_full = "18995f01657db5e92d4558eff4c1e81d30ff088e"
git_date = "2021-09-28 10:06:03 +0200"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "ananse-"
cfg.versionfile_source = "ananse/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
# This motif file is not created by default
# * f"{self.data_dir}/reference.factor.feather"
class PeakPredictor:
def __init__(
self,
reference=None,
atac_bams=None,
histone_bams=None,
regions=None,
genome="hg38",
pfmfile=None,
factors=None,
pfmscorefile=None,
ncpus=4,
):
self.data_dir = reference
if atac_bams is None and histone_bams is None:
raise ValueError("Need either ATAC-seq or H3K27ac BAM file(s).")
if genome is None:
logger.warning("Assuming genome is hg38")
genome = "hg38"
self.genome = genome
self.set_species(genome)
if pfmfile is None and self.species not in ["human", "mouse"]:
logger.warning(
f"The genome '{genome}' is not recognized as human or mouse."
)
logger.warning(
"If you do have another species, the motif file likely needs to be adapted."
)
logger.warning(
"Currently mouse and human gene names are used to link motif to TFs."
)
logger.warning(
"If your gene symbols are different, then you will need to create a new mapping"
)
logger.warning(
"and use the `-p` argument. For a possible method to do this, see here:"
)
logger.warning(
"https://gimmemotifs.readthedocs.io/en/stable/reference.html#command-gimme-motif2factors"
)
# Set basic information
self.ncpus = ncpus
self._atac_data = None
self._histone_data = None
self.factor_models = {}
self.pfmfile = pfmfile
self._load_motifs(factors=factors)
# if the reference regions are used, we can use existing data such
# as motif scores.
if regions is None:
self.region_type = "reference"
self._load_reference_data()
# If we have custom regions we have to scan for motifs.
else:
self.region_type = "custom"
self.regions = regions
if pfmscorefile is None:
self._scan_motifs(regions)
else:
self._load_prescanned_motifs(pfmscorefile)
# Load ATAC data
if atac_bams is not None:
self.load_atac(atac_bams, update_models=False)
# Load histone ChIP-seq data
if histone_bams is not None:
self.load_histone(histone_bams, update_models=False)
self._set_model_type()
def _scan_motifs(self, regions):
"""[summary]
Parameters
----------
regions : [type]
[description]
"""
logger.info("Scanning regions for motifs.")
with NamedTemporaryFile(mode="w") as f:
print("region", file=f)
for region in regions:
print(region, file=f)
f.flush()
# TODO: we're still scanning for *all* motifs, even if we only have
# a few factors
motif_df = scan_regionfile_to_table(
f.name, self.genome, "score", ncpus=self.ncpus
)
self._motifs = pd.DataFrame(index=motif_df.index)
for factor in self.f2m:
# if factor not in valid_factors:
# continue
self._motifs[factor] = motif_df[self.f2m[factor]].mean(1)
def _load_prescanned_motifs(self, pfmscorefile):
"""
Use pre-scanned gimmemotifs motif scores.
Parameters
----------
pfmscorefile : str/file
pre-scanned gimmemotifs scores file
"""
logger.info("loading pre-scanned motif scores.")
motif_df = pd.read_table(pfmscorefile, comment="#", index_col=0)
self._motifs = pd.DataFrame(index=motif_df.index)
for factor in self.f2m:
# if factor not in valid_factors:
# continue
self._motifs[factor] = motif_df[self.f2m[factor]].mean(1)
def _load_reference_data(self):
"""Load data for reference regions.
Will load three types of data:
* Motif scores.
* The average peak coverage (self._avg)
* The distance from the peak to nearest TSS. (self._dist)
All of these data are only used with the reference set of regions.
"""
# Read motifs
logger.info("loading motifs for reference")
self._motifs = pd.read_feather(f"{self.data_dir}/reference.factor.feather")
self._motifs.set_index(self._motifs.columns[0], inplace=True)
# Read average coverage
logger.info("loading average peak coverage for reference")
self._avg = pd.read_table(
f"{self.data_dir}/reference.coverage.txt",
sep="\t",
comment="#",
index_col=0,
)
self._avg.columns = ["average"]
self._avg["average"] = self._avg["average"] / self._avg["average"].max()
# Read distance to TSS
logger.info("loading distance for reference")
self._dist = pd.read_table(
f"{self.data_dir}/reference.dist_to_tss.txt",
sep="\t",
comment="#",
index_col=0,
)
# Set regions
self.regions = self._avg.index
def _load_human_factors(self):
package_dir = os.path.dirname(ananse.__file__)
tf_xlsx = os.path.join(package_dir, "db", "lovering.tfs.xlsx")
valid_factors = pd.read_excel(
tf_xlsx,
engine="openpyxl",
sheet_name=1,
)
valid_factors = valid_factors.loc[
valid_factors["Pseudogene"].isnull(), "HGNC approved gene symbol"
].values
valid_factors = list(set(valid_factors) - set(["EP300"]))
return valid_factors
def set_species(self, genome):
try:
# Try to get taxonomy id for genomepy managed genome.
# If there is a taxonomy id, we can be really sure about the species.
# If genome doesn't have a tax_id, then it will be 'na' and
# fail to convert to int.
genome = Genome(genome)
tax_id = int(genome.tax_id)
if tax_id == 9606:
self.species = "human"
elif tax_id == 10090:
self.species = "mouse"
else:
# tax_id converts to int so it is valid, must be not human or mouse
self.species = None
return
except Exception:
pass
mapping = {
"hg38": "human",
"hg19": "human",
"GRCh3": "human",
"mm10": "mouse",
"mm9": "mouse",
"GRCm3": "mouse",
}
base_genome = os.path.basename(self.genome.strip("/"))
for name, species in mapping.items():
if name in base_genome:
self.species = species
return
self.species = None
def factors(self):
if self.species == "human":
valid_factors = self._load_human_factors()
return [f for f in self.f2m if f in valid_factors]
if self.species == "mouse":
# Mouse mappings are included in the default motif db.
# Using the fact here that mouse names are not all upper-case.
# TODO: replace with a curated set of factors.
return [f for f in self.f2m if f[1:].islower()]
return list(self.f2m.keys())
def _load_factor2motifs(self, pfmfile=None, indirect=True, factors=None):
motifs = read_motifs(pfmfile, as_dict=True)
f2m = {}
if self.species == "human":
valid_factors = self._load_human_factors()
for name, motif in motifs.items():
for factor in get_motif_factors(motif, indirect=indirect):
if factors is not None and factor not in factors:
continue
# TODO: this is temporary, while the motif database we use
# not very clean...
if self.species == "human":
factor = factor.upper()
if self.species == "human" and factor not in valid_factors:
continue
f2m.setdefault(factor, []).append(name)
return f2m
def _load_motifs(self, indirect=True, factors=None):
"""Load motif-associated data.
For now, only default motifs are supported.
Will read factors associated to motifs, and generates a graph of
related factors based on different factors binding to the same motif.
This information is used to select the most appropriate TF model.
Parameters
----------
indirect : bool, optional
Include TF-motif associations that are not curated, for instance
based on ChIP-seq motif prediction, or binding inference. This will
greatly increase TF coverage. By default True.
"""
if self.pfmfile is None:
logger.info("using default motif file")
else:
logger.debug(f"Motifs: {self.pfmfile}")
self.motifs = read_motifs(self.pfmfile, as_dict=True)
self.f2m = self._load_factor2motifs(
pfmfile=self.pfmfile, indirect=indirect, factors=factors
)
if len(self.f2m) == 1:
logger.info("using motifs for 1 factor")
else:
logger.info(f"using motifs for {len(self.f2m)} factors")
# Create a graph of TFs where edges are determined by the Jaccard index
# of the motifs that they bind to. For instance, when TF 1 binds motif
# A and B and TF 2 binds motif B and C, the edge weight will be 0.33.
tmp_f2m = {}
if self.pfmfile is not None:
logger.debug("reading default file")
tmp_f2m = self._load_factor2motifs(indirect=True)
for k, v in self.f2m.items():
if k in tmp_f2m:
tmp_f2m[k] += v
else:
tmp_f2m[k] = v
self.motif_graph = nx.Graph()
d = []
for f1 in tmp_f2m:
for f2 in tmp_f2m:
jaccard = len(set(tmp_f2m[f1]).intersection(set(tmp_f2m[f2]))) / len(
set(tmp_f2m[f1]).union(set(tmp_f2m[f2]))
)
d.append([f1, f2, jaccard])
if jaccard > 0:
self.motif_graph.add_edge(f1, f2, weight=1 - jaccard)
def _load_bams(self, bams, title, window=200):
tmp = pd.DataFrame(index=self.regions)
with NamedTemporaryFile(mode="w") as f_out:
for region in self.regions:
print("{}\t{}\t{}".format(*re.split("[:-]", region)), file=f_out)
f_out.flush()
for bam in bams:
result = load_heatmap_data(
f_out.name,
bam,
bins=1,
up=window // 2,
down=window // 2,
rmdup=True,
rmrepeats=True,
)
tmp[result[0]] = result[2].T[0]
fname = f"{self.data_dir}/{title}.qnorm.ref.txt.gz"
if os.path.exists(fname):
logger.debug(f"quantile normalization for {title}")
qnorm_ref = pd.read_table(fname, index_col=0)["qnorm_ref"].values
if len(self.regions) != len(qnorm_ref):
qnorm_ref = np.random.choice(
qnorm_ref, size=len(self.regions), replace=True
)
tmp = qnorm.quantile_normalize(tmp, target=qnorm_ref)
else:
tmp = np.log1p(tmp)
# Limit memory usage by using float16
tmp = tmp.mean(1).astype("float16").to_frame(title)
fname = f"{self.data_dir}/{title}.mean.ref.txt.gz"
if self.region_type == "reference" and os.path.exists(fname):
mean_ref = pd.read_table(fname, index_col=0)
if mean_ref.shape[0] == tmp.shape[0]:
mean_ref.index = tmp.index
tmp[f"{title}.relative"] = (
tmp[title] - mean_ref.loc[tmp.index]["mean_ref"].values
)
tmp[f"{title}.relative"] = scale(tmp[f"{title}.relative"])
else:
logger.debug(f"Regions of {fname} are not the same as input regions.")
logger.debug("Skipping calculation of relative values.")
tmp[title] = tmp[title] / tmp[title].max()
return tmp
def load_atac(self, bams, update_models=True):
"""Load ATAC-seq counts from BAM files.
Parameters
----------
bams : list
List of file names.
update_models : bool, optional
Update the model used if data is loaded, by default True.
"""
logger.info("loading ATAC data")
self._atac_data = self._load_bams(bams, title="ATAC", window=200)
if update_models:
self._set_model_type()
def load_histone(self, bams, update_models=True):
"""Load H3K27ac ChIP-seq counts from BAM files.
Parameters
----------
bams : list
List of file names.
update_models : bool, optional
Update the model used if data is loaded, by default True.
"""
logger.info("loading H3K27ac data")
self._histone_data = self._load_bams(bams, title="H3K27ac", window=2000)
if update_models:
self._set_model_type()
def _set_model_type(self):
"""Select the mode to use for binding prediction.
Basically, this will select the columns that are available,
based on the different types of data that are loaded.
Reference regions will have the most information.
"""
cols = ["motif"]
if self._atac_data is not None:
cols += ["ATAC"]
if self.region_type == "reference":
cols += ["ATAC.relative"]
if self._histone_data is not None:
cols += ["H3K27ac"]
if self.region_type == "reference":
cols += ["average", "dist"]
cols = sorted(cols)
self._X_columns = cols
self._model_type = "_".join(cols)
# Load models
logger.info("Loading models")
# print(os.path.join(self.data_dir, self._model_type))
for fname in glob(os.path.join(self.data_dir, self._model_type, "*.pkl")):
factor = fname.split("/")[-1].replace(".pkl", "")
self.factor_models[factor] = joblib.load(fname)
logger.info(f"{len(self.factor_models)} models found")
def predict_proba(self, factor=None, motifs=None):
"""Predict binding probability.
Predict binding probability for either a TF (factor) or a set of
motifs. Prediction will be based on the data that been loaded,
either ATAC-seq or H3K27ac data or both.
Parameters
----------
factor : str, optional
Transcription factor name.
motifs : [type], optional
Motifs. Currently not implemented.
Returns
-------
pandas.DataFrame
DataFrame with binding probabilities
"""
if factor is None and motifs is None:
raise ValueError("Need either a TF name or one or more motifs.")
if motifs is not None:
raise NotImplementedError("Custom motifs not yet implemented!")
if factor not in self.f2m:
raise ValueError(f"Motif not known for {factor}")
model, factor = self._load_model(factor)
X = self._load_data(factor)
proba = model.predict_proba(X)[:, 1]
return pd.DataFrame(proba, index=self.regions)
def _load_data(self, factor):
# if self.region_type == "reference":
# logger.debug("Reading motif data")
tmp = pd.DataFrame(
{factor: self._motifs[factor]}, index=self.regions
) # pd.read_table(os.path.join(self.data_dir, f"{factor}.motif.txt.gz"), index_col=0)
# else:
tmp.columns = ["motif"]
if self._atac_data is not None:
tmp = tmp.join(self._atac_data)
if self._histone_data is not None:
tmp = tmp.join(self._histone_data)
if self.region_type == "reference":
tmp = tmp.join(self._avg)
tmp = tmp.join(self._dist)
tmp = tmp.dropna()
# logger.debug(str(self._X_columns))
return tmp[self._X_columns]
def _load_model(self, factor):
model = None
if factor in self.factor_models:
logger.info(f"Using {factor} model")
model = self.factor_models[factor]
elif factor in self.motif_graph:
paths = {
p: v
for p, v in nx.single_source_dijkstra_path_length(
self.motif_graph, factor
).items()
if p in self.factor_models
}
try:
sub_factor = list(paths.keys())[0]
logger.info(f"Using {factor} motif with {sub_factor} model weights")
model = self.factor_models[sub_factor]
# factor = sub_factor
except Exception:
logger.info(f"No match for {factor} based on motifs")
if model is None:
logger.info(f"No related TF found for {factor}, using general model")
model = self.factor_models["general"]
return model, factor
def predict_factor_activity(self, nregions=20_000):
"""Predict TF activity.
Predicted based on motif activity using ridge regression.
Parameters
----------
"""
# Run ridge regression using motif score to predict (relative) ATAC/H3K27ac signal
try:
nregions = int(nregions)
except ValueError:
logger.warning("nregions is not an integer, using default number of 20_000")
nregions = 20_000
activity = pd.DataFrame()
for df in (self._atac_data, self._histone_data):
if df is None:
continue
for col in df.columns:
with NamedTemporaryFile() as f:
# float16 will give NaN's
signal = df[col].astype("float32")
signal = pd.DataFrame({col: scale(signal)}, index=df.index)
if df.shape[0] < nregions:
signal.to_csv(f.name, sep="\t")
else:
signal.sample(nregions).to_csv(f.name, sep="\t")
try:
activity = activity.join(
moap(
f.name,
genome=self.genome,
method="bayesianridge",
pfmfile=self.pfmfile,
),
how="outer",
)
except Exception as e:
print(e)
# Rank aggregation
for col in activity:
activity[col] = rankdata(activity[col])
activity = activity.mean(1)
activity[:] = minmax_scale(activity)
# Take the maximum activity from the motifs of each factor
factor_activity = []
for factor, motifs in self.f2m.items():
act = activity.loc[motifs].max()
factor_activity.append([factor, act])
factor_activity = pd.DataFrame(factor_activity, columns=["factor", "activity"])
return factor_activity
def _check_input_regions(regionfiles, genome, outdir=".", verbose=True, force=False):
# Load regions from BED or region text file
if regionfiles is None:
# Keep regions to None, use reference regions.
return
infile = regionfiles[0]
if len(regionfiles) > 1:
# merge files, assumed to be all BED
peak_width = 200
cbed = CombineBedFiles(genome=genome, peakfiles=regionfiles, verbose=verbose)
combined_bed = os.path.join(outdir, "regions_combined.bed")
cbed.run(outfile=combined_bed, width=peak_width, force=force)
infile = combined_bed
df = pd.read_table(infile, header=None, sep="\t", comment="#", dtype=str)
assert df.shape[0] > 2, "regions file must have more that 2 regions."
test = str(df.at[1, 0])
if bool(re.match(r"^.*:\d+-\d+$", test)):
# it's a regions list
# or it's a Seq2science counts table
regions = df.iloc[:, 0].tolist()
elif df.shape[1] >= 3:
# it's a BED file
regions = (
# For Ensembl genome names, make sure it's a string
df.iloc[:, 0].astype(str)
+ ":"
+ df.iloc[:, 1].astype(str)
+ "-"
+ df.iloc[:, 2].astype(str)
).tolist()
else:
raise TypeError("Cannot identify regions file(s) type.")
# remove the header, if any.
header = str(regions[0])
if not bool(re.match(r"^.*:\d+-\d+$", header)):
regions = regions[1:]
return regions
def _check_input_files(*args):
files = []
for arg in args:
if arg is None:
continue
if isinstance(arg, list):
files.extend(arg)
else:
files.append(arg)
all_files_found = True
for fname in files:
if not os.path.exists(fname):
logger.exception(f"Could not find {fname}!")
all_files_found = False
if not all_files_found:
exit(1)
def predict_peaks(
outdir,
atac_bams=None,
histone_bams=None,
regionfiles=None,
reference=None,
factors=None,
genome=None,
pfmfile=None,
pfmscorefile=None,
ncpus=4,
):
"""Predict binding in a set of genomic regions.
Binding is predicted based on ATAC-seq and/or H3K27ac ChIP-seq data in
combination with motif scores. The model that is used is flexible, based
on the input data. The most accurate model will be the one that uses the
references regions in combination with both ATAC-seq and H3K27ac ChIP-seq.
The result will will be saved to an outputfile called `binding.tsv` in the
output directory, specified by the `outdir` argument. This file wil contain
three columns: factor, enhancer and binding. The binding columns represents
the binding probability.
To predict binding, `predict_peaks()` needs a set of input regions. For
human, you have two options. You can either use the reference set of
putative enhancer regions, as described in the ANANSE manuscript [1]. This
is specified by the `reference` argument.
Alternatively, you can specify one or more region files with the
`regionfiles` argument. These are files in BED or narrowPeak format, that
describe potential enhancers. For instance, a reference enhancer set, peaks
from your ATAC-seq experiments or any other collection of regions. For
accurate motif analysis, these should be as precise as possible. BroadPeaks
from histone ChIP-seq are not really suitable. NarrowPeaks from ATAC-seq,
DNase-seq or TF ChIP-seq will be fine.
Parameters
----------
outdir : str
Name of output directory.
atac_bams : list, optional
List of BAM files, by default None
histone_bams : list, optional
List of H3K27ac ChIP-seq BAM files, by default None
regionfiles : list, optional
BED file or text file with regions, or a list of BED, narrowPeak or
broadPeak files If None, then the reference regions are used.
reference : str, optional
Directory name to a reference.
factors : list, optional
List of TF names or file with TFs, one per line. If None (default),
then all TFs are used.
genome : str, optional
Genome name. The default is hg38.
pfmfile : str, optional
Motifs in PFM format, with associated motif2factors.txt file.
pfmscorefile : str, optional
Path to file with pre-scanned motif scores.
ncpus : int, optional
Number of threads to use. Default is 4.
"""
if reference is None and regionfiles is None:
logger.error("Need either input regions or location of a reference set!")
logger.error(
"For human, you can download the REMAP reference here: https://doi.org/10.5281/zenodo.4768075 "
"(please see the docs on how to install this)."
)
logger.error(
"Otherwise you need to specify one or more BED or narrowPeak files"
)
logger.error(
"with potential enhancer regions, for instance, all ATAC-seq peaks"
)
logger.error("from your combined experiments.")
sys.exit(1)
if reference is not None and regionfiles is not None:
logger.error("Need either a reference location *or* or a set of input regions")
sys.exit(1)
# Check if all specified BAM files exist
_check_input_files(atac_bams, histone_bams)
# Read the factors, from a file if needed
factors = check_input_factors(factors)
# Check genome, will fail if it is not a correct genome name or file
Genome(genome)
if not os.path.exists(outdir):
os.makedirs(outdir, exist_ok=True)
# If regions are specified, read them in, combining multiple files if
# necessary.
regions = _check_input_regions(regionfiles, genome, outdir=outdir)
if reference is None:
install_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
reference = os.path.join(install_dir, "db", "default_reference")
if reference is not None:
if not os.path.exists(reference):
logger.error(f"Reference directory {reference} does not exist!")
sys.exit(1)
p = PeakPredictor(
reference=reference,
atac_bams=atac_bams,
histone_bams=histone_bams,
regions=regions,
genome=genome,
pfmfile=pfmfile,
factors=factors,
pfmscorefile=pfmscorefile,
ncpus=ncpus,
)
outfile = os.path.join(outdir, "binding.h5")
# Make sure we create a new file
with open(outfile, "w"):
pass
with HDFStore(outfile, complib="lzo", complevel=9) as hdf:
if p._atac_data is not None:
hdf.put(key="_atac", value=p._atac_data, format="table")
if p._histone_data is not None:
hdf.put(key="_h3k27ac", value=p._histone_data, format="table")
logger.info("Predicting TF activity")
factor_activity = p.predict_factor_activity()
hdf.put(key="_factor_activity", value=factor_activity, format="table")
for factor in p.factors():
try:
proba = p.predict_proba(factor)
hdf.put(
key=f"{factor}",
value=proba.iloc[:, -1].reset_index(drop=True).astype(np.float16),
format="table",
)
except ValueError as e:
logger.debug(str(e))
hdf.put(key="_index", value=proba.index.to_series(), format="table")
|
# Remove default logger
logger.remove()
# Add logger
logger.add(sys.stderr, format="{time} | {level} | {message}", level="INFO")
# This is here to prevent very high memory usage on numpy import.
# On a machine with many cores, just importing numpy can result in up to
# 8GB of (virtual) memory. This wreaks havoc on management of the dask
# workers.
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
__version__ = get_versions()["version"]
del get_versions
|
#!/usr/bin/env python
# Copyright (c) 2009-2019 Quan Xu <[email protected]>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
"""Predict TF influence score"""
# Python imports
warnings.filterwarnings("ignore")
# Here because of multiprocessing and pickling
Expression = namedtuple("Expression", ["score", "absfc", "realfc"])
def read_network(fname, edges=100000):
"""Read network file and return networkx DiGraph."""
G = nx.DiGraph()
rnet = pd.read_csv(fname, sep="\t")
nrnet = rnet.sort_values("prob", ascending=False)
if len(nrnet) < edges:
usenet = nrnet
else:
usenet = nrnet[:edges]
for vals in usenet.iterrows():
source, target = vals[1][0].split("_", 1)
try:
if len(vals[1]) > 1:
# weight = 1 - float(vals[1])
weight = float(vals[1][1])
# if weight < 0 or weight > 1:
# sys.stderr.write("expect weight between 0 and 1")
# sys.exit(1)
else:
weight = 0
G.add_edge(source, target, weight=weight, n=1)
except Exception:
sys.stderr.write("could not parse edge weight\n")
raise
return G
def difference(S, R):
"""Calculate the network different between two cell types."""
DIF = nx.create_empty_copy(R)
for (u, v, d) in S.edges(data=True):
if (u, v) not in R.edges:
DIF.add_edge(u, v, weight=d["weight"], n=1)
else:
diff_weight = S.edges[u, v]["weight"] - R.edges[u, v]["weight"]
if diff_weight > 0:
DIF.add_edge(
u, v, weight=diff_weight, n=1, neglogweight=-np.log(diff_weight)
)
return DIF
def read_expression(fname):
"""Read differential gene expression analysis output, return dictionary with namedtuples of scores, absolute fold
change and "real" (directional) fold change.
input:
a tab-separated file containing 3 columns (HGNC gene symbols, (adjusted) p-values and log2foldchange)
header is omitted if starting with "resid"
"""
expression_change = dict()
df = pd.read_table(
fname,
index_col=0,
header=0,
dtype={"resid": str, "log2FoldChange": float, "padj": float},
)
# absolute fold change
df["fc"] = df["log2FoldChange"].abs()
# get the gscore (absolute fold change if significanlty differential)
df["score"] = df["fc"] * (df["padj"] < 0.05)
for k, row in df.iterrows():
expression_change[row.name] = Expression(
score=row.score, absfc=row.fc, realfc=row.log2FoldChange
)
return expression_change
def targetScore(node, G, expression_change, max_degree=3):
"""Calculate the influence score."""
# debug only.
# todo
# if expression_change is None:
# expression_change = {"score": {}, "fc": {}}
total_score = 0
# Get the targets that are within a certain number of steps from TF
lengths, paths = nx.single_source_dijkstra(G, node, cutoff=max_degree - 1)
targets = [t for t in lengths if 0 < lengths[t] <= max_degree]
for target in paths:
all_paths = {}
# Calculate all paths from TF to target to select to path with the lowest total weight
for path in nx.all_simple_paths(G, node, target, cutoff=max_degree - 1):
if len(path) <= max_degree:
weight = np.cumprod(
[G[s][t]["weight"] for s, t in zip(path, path[1:])]
)[-1]
# Add weight, corrected for the length of the path
all_paths[tuple(path)] = weight / (len(path) - 1)
if len(all_paths) > 0:
path, weight = sorted(all_paths.items(), key=lambda p: p[1])[-1]
# print(target, path, weight)
# outdegree of parent node of the target
# d = np.log(G.out_degree(path[-2]) + 1)
# d = G.out_degree(path[-2])
# the level (or the number of steps) that gene is away from transcription factor
pathlen = len(path)
# expression score of the target
g = expression_change[target].score if target in expression_change else 0
# weight is cumulative product of probabilities
# weight = [G[s][t]["weight"] for s, t in zip(path[:-1], path[1:])]
# cumulative sum of weight
# weight = np.cumprod(weight)[-1]
# score = g / len(path) / d * weight
score = g / pathlen * weight
total_score += score
# Get Mann-Whitney U p-value of direct targets vs. non-direct targets
direct_targets = [n for n in G[node] if n in expression_change]
non_direct_targets = [
n for n in list(G.nodes) if n in expression_change and n not in direct_targets
]
target_fc = [expression_change[t].absfc for t in direct_targets]
non_target_fc = [expression_change[t].absfc for t in non_direct_targets]
pval = mannwhitneyu(target_fc, non_target_fc)[1]
target_fc_diff = np.mean(target_fc) - np.mean(non_target_fc)
# factor, targetScore, directTargets, totalTargets, Gscore, pval, target_fc
return (
node,
total_score,
G.out_degree(node),
len(targets),
expression_change[node].absfc if node in expression_change else 0,
pval,
target_fc_diff,
)
def filter_TF(scores_df, network=None, tpmfile=None, tpm=20, overlap=0.98):
"""Filter TFs:
1) it have high expression in origin cell type;
2) 98% of its target genes are also regulated by previous TFs.
"""
tpmscore = {}
with open(tpmfile) as tpf:
next(tpf)
for line in tpf:
tpmscore[line.split()[0]] = float(line.split()[1])
tftarget = {}
for tf in scores_df.index:
tftarget[tf] = set(network[tf]) if tf in network else set()
ltf = list(scores_df.index)
keeptf = []
for i in ltf:
passtf = []
if len(tftarget[i]) > 0:
for j in ltf[: ltf.index(i)]:
if len(tftarget[i] & tftarget[j]) / len(tftarget[i]) > overlap:
break
else:
passtf.append(j)
if passtf == ltf[: ltf.index(i)] and i in tpmscore and tpmscore[i] < tpm:
keeptf.append(i)
scores_df = scores_df.loc[keeptf]
scores_df.sort_values("sumScaled", inplace=True, ascending=False)
return scores_df
def plot_influscore(infile, outfile):
"""Plot TF influence score to expression."""
mogrify = pd.read_table(infile, index_col="factor")
mogrify = mogrify.dropna()
factors = list(mogrify.sort_values("sumScaled").tail(20).index)
# factors = list(mogrify.sort_values("sumScaled").tail(20).index)
xcol = "factor_fc"
plt.figure(figsize=(8, 6))
sns.regplot(
data=mogrify,
x=xcol,
y="sumScaled",
fit_reg=False,
scatter_kws={"s": mogrify["directTargets"] / 10, "alpha": 0.5},
)
x = mogrify.loc[factors, xcol]
y = mogrify.loc[factors, "sumScaled"]
texts = []
for s, xt, yt in zip(factors, x, y):
texts.append(plt.text(xt, yt, s))
adjust_text(texts, arrowprops=dict(arrowstyle="-", color="black"))
plt.xlabel("Log2 fold change of TF")
plt.ylabel("Influence score")
plt.savefig(outfile, dpi=300)
class Influence(object):
def __init__(
self, outfile, degenes, Gbf=None, Gaf=None, filter=False, edges=100000, ncore=1
):
self.ncore = ncore
logger.info(f"Reading network(s), using top {edges} edges.")
# Load GRNs
if Gbf is None and Gaf is not None:
self.G = read_network(Gaf, edges=edges)
logger.warning("You only provide the target network!")
elif Gaf is None and Gbf is not None:
self.G = read_network(Gbf, edges=edges)
logger.warning("You only provided the source network!")
elif Gaf is None and Gbf is None:
logger.warning("You should provide at least one ANANSE network file!")
else:
G1 = read_network(Gbf, edges=edges)
G2 = read_network(Gaf, edges=edges)
self.G = difference(G2, G1)
logger.info(f"Differential network has {len(self.G.edges)} edges.")
# Load expression file
self.expression_change = read_expression(degenes)
self.outfile = outfile
# Filter TFs
self.filter = filter
def save_reg_network(self, filename):
"""Save the network difference between two cell types to a file."""
with open(filename, "w") as nw:
for (u, v, d) in self.G.edges(data=True):
nw.write(u + "\t" + v + "\t" + str(d["weight"]) + "\n")
def run_target_score(self, max_degree=3):
"""Run target score for all TFs."""
pool = mp.Pool(self.ncore)
jobs = []
tfs = [node for node in self.G.nodes() if self.G.out_degree(node) > 0]
logger.info(f"Differential network contains {len(tfs)} transcription factors.")
# differentially expressed TFs
detfs = [tf for tf in tfs if tf in self.expression_change]
if len(detfs) == 0:
sys.stderr.write(
"no overlapping transcription factors found between the network file(s) "
"(-s/--source, -t/--target) and the differential expression data (-d/--degenes)\n"
)
sys.exit(1)
detfs = [tf for tf in detfs if self.expression_change[tf].realfc > 0]
if len(detfs) == 0:
sys.stderr.write(
"no differentially expressed TFs found with a log2 fold change above 0\n"
)
sys.exit(1)
for tf in detfs:
jobs.append(
pool.apply_async(
targetScore, (tf, self.G, self.expression_change, max_degree)
)
)
# Get results and write to file
influence_file = open(self.outfile, "w")
influence_file.write(
"factor\tdirectTargets\ttotalTargets\ttargetsore\tGscore\tfactor_fc\tpval\ttarget_fc\n"
)
with tqdm(total=len(jobs)) as pbar:
for j in jobs:
(
factor,
score,
direct_targets,
total_targets,
factor_fc,
pval,
target_fc,
) = j.get()
print(
factor,
direct_targets,
total_targets,
score,
self.expression_change[factor].score,
factor_fc,
pval,
target_fc,
file=influence_file,
sep="\t",
)
pbar.update(1)
print("\n", file=influence_file)
pool.close()
influence_file.close()
scores_df = pd.read_table(self.outfile, index_col=0)
scores_df["targetScaled"] = minmax_scale(
rankdata(scores_df["targetsore"], method="dense")
)
scores_df.sort_values("targetScaled", inplace=True, ascending=False)
return self.outfile
def run_influence_score(self, influence_file, fin_expression=None):
"""Calculate influence score from target score and gscore"""
scores_df = pd.read_table(influence_file, index_col=0)
scores_df["targetScaled"] = minmax_scale(
rankdata(scores_df["targetsore"], method="dense")
)
scores_df["GscoreScaled"] = minmax_scale(
rankdata(scores_df["Gscore"], method="dense")
)
scores_df["sumScaled"] = minmax_scale(
rankdata(scores_df.targetScaled + scores_df.GscoreScaled, method="dense")
)
scores_df.sort_values("sumScaled", inplace=True, ascending=False)
scores_df = scores_df[
[
"targetScaled",
"GscoreScaled",
"sumScaled",
"directTargets",
"targetsore",
"factor_fc",
]
]
scores_df.to_csv(self.outfile, sep="\t")
if self.filter:
scores_df2 = filter_TF(
network=self.G, scores_df=scores_df, tpmfile=fin_expression
)
scores_df2.to_csv(
".".join(self.outfile.split(".")[:-1]) + "_filtered.txt", sep="\t"
)
def run_influence(self, plot=True, fin_expression=None):
logger.info("Save differential network")
self.save_reg_network(
".".join(self.outfile.split(".")[:-1]) + "_diffnetwork.txt"
)
logger.info("Run target score")
influence_file = self.run_target_score()
logger.info("Run influence score")
self.run_influence_score(influence_file, fin_expression=fin_expression)
if plot is True:
logger.info("Plot results")
plot_influscore(
self.outfile, ".".join(self.outfile.split(".")[:-1]) + ".pdf"
)
|
class Distributions:
def __init__(self):
# dist_functions = [f for f in dir(ananse.distributions) if f.endswith("_dist")]
dist_functions = [
scale_dist,
log_scale_dist,
scipy_dist,
peak_rank_dist,
peak_rank_file_dist,
]
self.functions = {func.__name__: func for func in dist_functions}
def get(self):
"""list distribution methods"""
return list(self.functions.keys())
def set(self, dist_func):
"""return a distribution method by name"""
dist_functions = self.get()
if dist_func not in dist_functions:
raise ValueError(
f"Distribution function '{dist_func}' not recognised. Options: {', '.join(dist_functions)}"
)
return self.functions[dist_func]
def scale_dist(scores, **kwargs): # noqa
"""
Scale the scores between 0 and 1
"""
return (scores - np.min(scores)) / (np.max(scores) - np.min(scores))
def log_scale_dist(scores, **kwargs): # noqa
"""
Scale the log of the scores between 0 and 1
"""
scores = np.log(scores + 1)
return (scores - np.min(scores)) / (np.max(scores) - np.min(scores))
def replace_infs(dist):
"""
Replace positive and negative infinity with the closes real value in the array
"""
# https://stackoverflow.com/questions/12937824/lognormal-random-numbers-centered-around-a-high-value
if not isinstance(dist, np.ndarray):
dist = np.array(dist)
min_real_val = np.nanmin(dist[dist != -np.inf])
dist[dist == -np.inf] = min_real_val
max_real_val = np.nanmax(dist[dist != np.inf])
dist[dist == np.inf] = max_real_val
return dist
def scipy_dist(scores, **kwargs):
"""
fit scores to a scipy.stats distribution.
specified distribution name via kwargs['dist']
"""
if not isinstance(scores, np.ndarray):
scores = np.array(scores)
scores = scores + 1 # add pseudocount
x = range(len(scores))
dist_name = kwargs.get("dist", "lognorm")
if dist_name not in dir(stats):
raise ValueError(f"'{dist_name}' is not a recognized scipy.stats model.")
distribution = getattr(stats, dist_name) # eval(f"stats.{dist_name}")
# fit dist to data
params = distribution.fit(scores)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF
dist = distribution.pdf(x, loc=loc, scale=scale, *arg)
dist = replace_infs(dist)
return dist
# def lognorm_dist(scores, **kwargs):
# """
# fit scores to a log normal distribution
# """
# scores = scores + 1 # add pseudocount
# x = range(len(scores))
#
# # mu = np.log(scores).mean()
# # sigma = np.log(scores).std()
# # dist = stats.lognorm([sigma], loc=mu).pdf(x)
#
# s, loc, scale = stats.lognorm.fit(scores) # floc=0
# dist = stats.lognorm.pdf(x=x, s=s, loc=loc, scale=scale)
# return dist
def peak_rank_dist(scores, **kwargs): # noqa
"""
Fit scores to a distribution similar to what the p300 model was trained on
"""
# use a lognormal distribution:
# https://github.com/jsh58/Genrich#p-value-calculation
# # peak_rank_file = "ananse/db/peak_rank.txt"
# # scores = pd.read_csv(peak_rank_file, header=None)[0]
# # mu = np.log(scores+1).mean()
# # sigma = np.log(scores+1).std()
# mu = 1.0500836750482117
# sigma = 0.8000981267240566
#
# x = len(scores)
# rng = np.random.default_rng(seed=None)
# dist = rng.lognormal(mean=mu, sigma=sigma, size=x)
#
# print("proximity to the initial distribtion")
# print("delta mu:", np.abs(mu - np.log(dist).mean()))
# print("delta std:", np.abs(sigma - np.log(dist).std()))
# best fitting distribution turns out to be this loglaplace
x = range(len(scores))
c = 0.92
loc = 1.00
scale = 1.14
dist = stats.loglaplace.pdf(x=x, c=c, loc=loc, scale=scale)
dist = replace_infs(dist)
return dist
def peak_rank_file_dist(scores, **kwargs):
"""
fit scores to the distribution in kwargs['file'].
builtin files: "peak_rank.txt" and "peak_rank_hg38_h3k27ac.txt"
"""
if not isinstance(scores, np.ndarray):
scores = np.array(scores)
dist_filename = kwargs.get("file", "peak_rank.txt")
# internal data or user data
if dist_filename in ["peak_rank.txt", "peak_rank_hg38_h3k27ac.txt"]:
package_dir = os.path.dirname(__file__)
dist_filepath = os.path.join(package_dir, "db", dist_filename)
else:
dist_filepath = cleanpath(dist_filename)
if not os.path.exists(dist_filepath):
raise FileNotFoundError(f"Could not find file {dist_filepath}")
dist = pd.read_csv(dist_filepath, header=None)
n = scores.shape[0]
max_n = dist.shape[0]
if max_n < n:
raise ValueError(
f"Too many regions ({n}) to fit to '{dist_filename}' ({max_n})"
)
dist = dist.sample(n=n, random_state=1)[0].tolist()
return dist
|
def check_path(arg, error_missing=True):
"""Expand all paths. Can check for existence."""
if arg is None:
return arg
args = [arg] if isinstance(arg, str) else arg
paths = [cleanpath(arg) for arg in args]
if error_missing:
for path in paths:
if not os.path.exists(path):
raise FileNotFoundError(
f"'{os.path.basename(path)}' not found in '{os.path.dirname(path)}'."
)
return paths[0] if isinstance(arg, str) else paths
def cleanpath(path):
"""Expand any path input to a literal path output"""
return os.path.abspath( # expand relative paths (inc './' and '../')
os.path.expanduser( # expand '~'
os.path.expandvars(path) # expand '$VARIABLES'
)
)
def shhh_bedtool(func):
"""
Decorator that silences pybedtools RuntimeWarnings such as
`line buffering (buffering=1) isn't supported in binary mode`
"""
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
func(*args, **kwargs)
return wrapper
@shhh_bedtool
def bed_sort(bed):
"""
Sort a bed file.
"""
tmpdir = tempfile.mkdtemp(prefix="ANANSE_")
try:
tmpfile = os.path.join(tmpdir, os.path.basename(bed))
BedTool(bed).sort(output=tmpfile)
shutil.copy2(tmpfile, bed)
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
@shhh_bedtool
def bed_merge(list_of_beds, merged_bed):
"""
merge any number of bed files (merges overlapping regions)
"""
bed = BedTool(list_of_beds[0])
if list_of_beds[1:]:
bed = bed.cat(*list_of_beds[1:])
bed.saveas(merged_bed)
@shhh_bedtool
def count_reads(bams, peakfile, bed_output):
"""
Count bam reads in putative enhancer regions
"""
# replace with gimmemotifs.preprocessing.coverage_table()
bed = BedTool(peakfile)
bam_list = bams if isinstance(bams, list) else [bams]
bed.multi_bam_coverage(bams=bam_list, output=bed_output)
def samc(ncore):
"""set decent samtools range for samtools functions (1-5 total threads)"""
return max(0, min(ncore - 1, 4))
def bam_index(bam, force=True, ncore=1):
if force or not os.path.exists(f"{bam}.bai"):
index_parameters = [f"-@ {samc(ncore)}", bam]
pysam.index(*index_parameters) # noqa
def bam_sort(bam, ncore=1):
tmpdir = tempfile.mkdtemp(prefix="ANANSE_")
try:
sorted_bam = os.path.join(tmpdir, os.path.basename(bam))
sort_parameters = [f"-@ {samc(ncore)}", "-o", sorted_bam, bam]
pysam.sort(*sort_parameters) # noqa: pysam bug
shutil.copy2(sorted_bam, bam)
bam_index(bam, force=True, ncore=ncore)
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
def bam_merge(list_of_bams, merged_bam, ncore=1):
"""
merge any number of (sorted) bam files
"""
[bam_index(bam, force=False, ncore=ncore) for bam in list_of_bams]
if len(list_of_bams) > 1:
merge_parameters = ["-f", f"-@ {samc(ncore)}", merged_bam] + list_of_bams
pysam.merge(*merge_parameters) # noqa: pysam bug
bam_index(merged_bam)
else:
# os.symlink() doesn't work with multi_bam_coverage()
bam = list_of_bams[0]
shutil.copy2(bam, merged_bam)
shutil.copy2(f"{bam}.bai", f"{merged_bam}.bai")
def mosdepth(bed, bam, bed_output, ncore=1):
"""
Count (median per base overlap of) bam reads in putative enhancer regions
"""
ncore = min(4, ncore)
tmpdir = tempfile.mkdtemp(prefix="ANANSE_")
try:
prefix = os.path.join(tmpdir, "bam_coverage")
cmd = f"mosdepth -nxm -t {ncore} -b {bed} {prefix} {bam}"
sp.check_call(cmd, shell=True)
tmp_bed_output = f"{prefix}.regions.bed"
cmd = f"gunzip -f {tmp_bed_output}.gz"
sp.check_call(cmd, shell=True)
shutil.copy2(tmp_bed_output, bed_output)
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
# def bed_sum_coverages(multi_bam_coverage, sum_bam_coverage):
# """
# MultiBamCov returns a BED3+n with one column per bam.
# This function sums up all bam counts and returns a BED3+1.
# """
# bed = pd.read_csv(multi_bam_coverage, header=None, sep="\t")
# columns = bed.shape[1]
# if columns != 4:
# bed3 = bed.iloc[:, :3]
# scores = bed.iloc[:, 3:].sum(axis=1)
# bed = pd.concat([bed3, scores], axis=1)
# bed.to_csv(sum_bam_coverage, sep="\t", header=False, index=False)
# def non_empty_files(files, error_msg, size_threshold=10, verbose=True):
# """Check list of files for content
#
# Args:
# files: list of filepaths
# error_msg: message for all empty files
# size_threshold: minimum size to be considered non-empty
# verbose: return warnings?
#
# Returns:
# list of non-empty files
# """
# ret_files = []
# for file in files:
# if os.path.getsize(file) > size_threshold:
# ret_files.append(file)
# elif verbose:
# logger.warning(f"Empty file: '{os.path.basename(file)}'")
#
# if not ret_files:
# logger.exception(error_msg)
# exit(1)
# return ret_files
def mytmpdir():
"""
returns a temp directory that is removed when the process is completed
the directory is not removed if the process is killed by the user
"""
if not hasattr(mytmpdir, "dir") or not mytmpdir.dir:
# can also be removed with clean_tmp()
mytmpdir.dir = tempfile.mkdtemp(prefix=f"ANANSE_{os.getpid()}.")
atexit.register(shutil.rmtree, mytmpdir.dir, ignore_errors=True)
return mytmpdir.dir
def clean_tmp():
"""
remove leftover temp dirs
temp dirs are left if ANANSE was killed by the user
"""
user = getpass.getuser()
tempdir = tempfile.gettempdir()
# all tmp files/directories starting with "ANANSE_" & owner by the user
tmp_files = os.listdir(tempdir)
ananse_files = [
os.path.join(tempdir, f) for f in tmp_files if f.startswith("ANANSE_")
]
user_files = [
f
for f in ananse_files
if os.path.exists(f) and pwd.getpwuid(os.stat(f).st_uid).pw_name == user
]
# delete
_ = [genomepy.utils.rm_rf(f) for f in user_files]
def get_motif_factors(motif, indirect=True):
"""Return all TFs that are associated with a motif."""
motif_factors = []
for factor_type, factors in motif.factors.items():
if factor_type == "direct" or indirect:
motif_factors += factors
return motif_factors
def check_input_factors(factors):
"""Check factors.
Factors can either be a list of transcription factors, or a filename of a
file that contains TFs. Returns a list of factors.
Returns
-------
list
List of TF names.
"""
# Load factors
if factors is None:
return
# if factors is a string, assume it's a filename
if isinstance(factors, str):
fname = factors
# if factors is a list of 1, and it exists, assume it's a filename
elif isinstance(factors, list) and len(factors) == 1:
fname = factors[0]
# It's a list with more than one value, assuming it's a list of TF names.
else:
return factors
if not os.path.exists(fname):
raise ValueError(f"Factors file '{factors}' does not exist")
factors = [line.strip() for line in open(fname)]
return factors
def view_h5(fname, tfs=None, fmt="wide"):
"""Extract information from an ANANSE binding.h5 file.
Parameters
----------
fname : str
File name (binding.h5).
tfs : list, optional
List of transcription factor names to extract. All TFs are used
by default.
fmt : str, optional
Return output in 'wide' or in 'long' format. Default is 'wide'.
Returns
-------
pandas.Dataframe
"""
if fmt not in ["wide", "long"]:
raise ValueError("fmt should be either 'wide' or 'long'")
with pd.HDFStore(fname) as hdf:
if tfs is None:
tfs = [x for x in dir(hdf.root) if not x.startswith("_")]
idx = hdf.get("_index")
df = pd.DataFrame(index=idx.index)
for tf in tfs:
df[tf] = hdf.get(tf).values
if fmt == "long":
df.index.rename("loc", inplace=True)
df = df.reset_index().melt(
id_vars=["loc"], value_name="prob", var_name="factor"
)
return df
|
#!/usr/bin/env python
# Copyright (c) 2009-2019 Quan Xu <[email protected]>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
"""Build gene regulatory network"""
# Python imports
warnings.filterwarnings("ignore")
PACKAGE_DIR = os.path.dirname(__file__)
class Network(object):
def __init__(
self,
ncore=1,
genome="hg38",
gene_bed=None,
include_promoter=False,
include_enhancer=True,
):
"""
infer cell type-specific gene regulatory network
Parameters
----------
ncore : int
Specifies the number of threads to use during analysis. (default: 1)
genome : str
The genome that is used for the gene annotation and the enhancer location. (default: "hg38")
gene_bed : str, optional
Gene annotation for the genome specified with -g as a 12 column BED file. (default: None)
include_promoter : bool
Include or exclude promoter peaks (<= TSS +/- 2kb) in network inference. (default: False)
include_enhancer : bool
Include or exclude enhancer peaks (> TSS +/- 2kb) in network inference. (default: True)
"""
self.ncore = ncore
self.genome = genome
self._tmp_files = []
# # Motif information file
# if pfmfile is None:
# self.pfmfile = "../data/gimme.vertebrate.v5.1.pfm"
# else:
# self.pfmfile = pfmfile
# self.motifs2factors = self.pfmfile.replace(".pfm", ".motif2factors.txt")
# self.factortable = self.pfmfile.replace(".pfm", ".factortable.txt")
# Gene information file
self.gene_bed = gene_bed
if gene_bed is None:
if self.genome in ["hg38", "hg19"]:
self.gene_bed = os.path.join(
PACKAGE_DIR, "db", f"{self.genome}.genes.bed"
)
else:
raise TypeError("Please provide a gene bed file with -a argument.")
if not os.path.exists(self.gene_bed):
raise FileNotFoundError(
f"Could not find the gene bed file {self.gene_bed}."
)
# self.promoter = promoter
self.include_promoter = include_promoter
self.include_enhancer = include_enhancer
@staticmethod
def unique_enhancers(fname):
"""Extract a list of unique enhancers.
Parameters
----------
fname : str
File name of a tab-separated file that contains an 'enhancer' column.
Returns
-------
PyRanges object with enhancers
"""
logger.info("reading enhancers")
# Read enhancers from binding file
header = pd.read_table(fname, nrows=0)
idx = header.columns.get_loc("enhancer")
skiprows = 1
chunksize = 2_000_000
enhancers = np.array([])
while True:
try:
tmp = pd.read_table(
fname,
usecols=[idx],
header=None,
nrows=chunksize,
skiprows=skiprows,
)
except pd.errors.EmptyDataError:
break
if tmp.shape[0] == 0 or tmp.iloc[0, 0] in enhancers:
break
skiprows += chunksize
enhancers = np.hstack((enhancers, tmp.iloc[:, 0].unique()))
enhancers = np.unique(enhancers)
# Split into columns and create PyRanges object
p = re.compile("[:-]")
enhancers = pr.PyRanges(
pd.DataFrame(
[re.split(p, e) for e in enhancers],
columns=["Chromosome", "Start", "End"],
)
)
return enhancers
@staticmethod
def distance_weight(
include_promoter=False,
include_enhancer=True,
alpha=1e4,
maximum_distance=100_000,
full_weight_region=5000,
promoter_region=2000,
):
"""Build weight distribution based on distance to TSS.
The basic idea is similar to Wang et al. [1], with some modifications.
The resulting weight ranges from 0 (far from the TSS) to 1 (near the
TSS) and is based on several different variables.
If `include_promoter` is `True`, then distances smaller than
`promoter_region` are included, otherwise they are excluded, the weight
is set to 0.
The `full_weight_region` parameters determines the region where
the weight will be 1, regardless of distance. The `maximum_distance`
parameter sets the maximum distance to consider. The weight decays with
an increasing distance, starting from 1 at `full_weight_region` to 0
at `maximum_distance`. The `alpha` parameters controls the decay.
Parameters
----------
include_promoter : bool, optional
Include promoter regions. Default is False.
include_enhancer : bool, optional
Include enhancer regions, ie. regions that are distal to the
promoter.
alpha : float, optional
Controls weight decay, default is 1e4.
maximum_distance : int, optional
Maximum distance from TSS to consider. Default is 100kb.
full_weight_region : int, optional
Distance where regions will receive the full weight. Default
is 5kb.
promoter_region : int, optional
Promoter region, default is 2kb.
Returns
-------
DataFrame with two columns: distance and weight.
References
----------
..[1] Wang S, Zang C, Xiao T, Fan J, Mei S, Qin Q, Wu Q, Li X, Xu K,
He HH, Brown M, Meyer CA, Liu XS. "Modeling cis-regulation with a
compendium of genome-wide histone H3K27ac profiles." Genome Res.
2016 Oct;26(10):1417-1429. doi: 10.1101/gr.201574.115. PMID: 27466232
"""
u = -math.log(1.0 / 3.0) * 1e5 / alpha
promoter_weight = int(include_promoter)
enhancer_weight = int(include_enhancer)
weight1 = pd.DataFrame(
{
"weight": [promoter_weight for _ in range(0, promoter_region + 1)],
"dist": range(0, promoter_region + 1),
}
)
weight2 = pd.DataFrame(
{
"weight": [
enhancer_weight
for _ in range(promoter_region + 1, full_weight_region + 1)
],
"dist": range(promoter_region + 1, full_weight_region + 1),
}
)
weight3 = pd.DataFrame(
{
"weight": [
enhancer_weight
* 2.0
* math.exp(-u * math.fabs(z) / 1e5)
/ (1.0 + math.exp(-u * math.fabs(z) / 1e5))
for z in range(1, maximum_distance - full_weight_region + 1)
],
"dist": range(full_weight_region + 1, maximum_distance + 1),
}
)
weight = pd.concat([weight1, weight2, weight3])
return weight
def enhancer2gene(
self,
peak_pr,
up=100_000,
down=100_000,
alpha=1e4,
promoter=2000,
full_weight_region=5000,
):
"""Couple enhancers to genes.
Parameters
----------
peak_pr : PyRanges object
PyRanges object with enhancer regions.
up : int, optional
Upstream maximum distance, by default 100kb.
down : int, optional
Upstream maximum distabce, by default 100kb.
alpha : float, optional
Parameter to control weight decay, by default 1e4.
promoter : int, optional
Promoter region, by default 2000.
full_weight_region : int, optional
Region that will receive full weight, by default 5000.
Returns
-------
pandas.DataFrame
DataFrame with enhancer regions, gene names, distance and weight.
"""
genes = region_gene_overlap(peak_pr, self.gene_bed)
# Get the distance from center of enhancer to TSS
# Correct for extension
genes["dist"] = (
(genes["Start_b"] + genes["End_b"]) / 2 - genes["Start"]
).astype(int)
genes.loc[genes["Strand"] == "+", "dist"] -= up
genes.loc[genes["Strand"] == "-", "dist"] -= down
genes["dist"] = np.abs(genes["dist"])
# Create region in chr:start:end format
genes["loc"] = (
genes["Chromosome"].astype(str)
+ ":"
+ genes["Start_b"].astype(str)
+ "-"
+ genes["End_b"].astype(str)
)
# Keep the gene-enhancer combination with the smallest distance
genes = genes.sort_values("dist").drop_duplicates(
subset=["loc", "Name"], keep="first"
)
# Return the right stuff
genes = genes.set_index("loc")[["Name", "dist"]].rename(
columns={"Name": "gene"}
)
# Get distance-based wight
weight = self.distance_weight(
include_promoter=self.include_promoter,
include_enhancer=self.include_enhancer,
alpha=alpha,
promoter_region=promoter,
full_weight_region=full_weight_region,
).set_index("dist")
genes = genes.join(weight, on="dist")
return genes
def aggregate_binding(
self,
binding_fname,
tfs=None,
up=1e5,
down=1e5,
alpha=None,
promoter=2000,
full_weight_region=5000,
combine_function="sum",
):
"""Summarize all binding signal per gene per TF.
Return a dask delayed computation object.
Parameters
----------
binding_fname : str
Filename of binding network.
tfs : list, optional
List of transcription factor names, by default None, which means
that all TFs will be used.
up : int, optional
Maximum upstream region to include, by default 1e5
down : [type], optional
Maximum downstream region to include, by default 1e5
alpha : float, optional
Distance at which the weight will be half, by default None
promoter : int, optional
Promoter region, by default 2000
full_weight_region : int, optional
Region that will receive full weight, regardless of distance, by
default 5000.
combine_function : str, optional
How to combine signal of weighted enhancers, by default "sum".
Valid options are "sum", "mean" or "max".
Returns
-------
dask.DataFrame
DataFrame with delayed computations.
"""
if not os.path.exists(binding_fname):
raise ValueError(f"File {binding_fname} does not exist!")
if combine_function not in ["mean", "max", "sum"]:
raise NotImplementedError(
"Unknown combine function, valid options are: mean, max, sum"
)
maximum_distance = max(up, down)
if alpha is None:
alpha = maximum_distance / 10
if promoter > maximum_distance:
raise ValueError(
"promoter region is larger than the maximum distance to use"
)
hdf = HDFStore(binding_fname, "r")
# TODO: This is hacky (depending on "_"), however the hdf.keys() method is
# much slower. Currently all TF names do *not* start with "_"
all_tfs = [x for x in dir(hdf.root) if not x.startswith("_")]
logger.info(f"Binding file contains {len(all_tfs)} TFs.")
if tfs is None:
tfs = all_tfs
else:
not_valid = set(all_tfs) - set(tfs)
if len(not_valid) > 1:
logger.warning(
f"The following TFs are found in {binding_fname}, but do not seem to be TFs:"
)
logger.warning(", ".join(not_valid))
tfs = set(tfs) & set(all_tfs)
logger.info(f"Using {len(tfs)} TFs.")
# Read enhancer index from hdf5 file
enhancers = hdf.get(key="_index")
chroms = enhancers.index.to_series().str.replace(":.*", "").unique()
tmpdir = mkdtemp()
self._tmp_files.append(tmpdir) # mark for deletion later
# Summarize enhancers per gene, per chromosome. In principle this could
# also be done at once, however, the memory usage of dask is very finicky.
# This is a pragmatic solution, that seems to work well, does not use a
# lot of memory and is not too slow (~50 seconds per chromosome).
for chrom in chroms:
logger.info(f"Aggregating binding for genes on {chrom}")
# Get the index of all enhancers for this specific chromosome
idx = enhancers.index.str.contains(f"^{chrom}:")
idx_i = np.arange(enhancers.shape[0])[idx]
# Create a pyranges object
enhancer_pr = pr.PyRanges(
enhancers[idx]
.index.to_series()
.str.split(r"[:-]", expand=True)
.rename(columns={0: "Chromosome", 1: "Start", 2: "End"})
)
# Link enhancers to genes on basis of distance to annotated TSS
gene_df = self.enhancer2gene(
enhancer_pr,
up=up,
down=down,
alpha=alpha,
promoter=promoter,
full_weight_region=full_weight_region,
)
gene_df = gene_df.dropna()
bp = pd.DataFrame(index=enhancers[idx].index)
for tf in tqdm(
tfs, total=len(tfs), desc="Aggregating", unit_scale=1, unit=" TFs"
):
# Load TF binding data for this chromosome.
# hdf.get() is *much* faster here than pd.read_hdf()
bp[tf] = hdf.get(key=tf)[idx_i].values
# Skipping everything with weight 0, as it won't be counted anyway.
gene_df = gene_df[gene_df["weight"] > 0]
# Make sure binding score and enhancers match up (i.e. same enhancer
# is used for multiple genes)
gene_df = gene_df.join(bp).dropna()
bp = gene_df[tfs]
gene_df = gene_df[["gene", "weight"]]
# Multiply binding score by weight
bp = bp.mul(gene_df["weight"], axis=0)
# Summarize weighted score per gene
bp["gene"] = gene_df["gene"]
tmp = bp.groupby("gene")
if combine_function == "mean":
tmp = tmp.mean()
elif combine_function == "max":
tmp = tmp.max()
elif combine_function == "sum":
tmp = tmp.sum()
# Go from wide to long format, to be able to merge with other
# information later
tmp = tmp.reset_index().melt(
id_vars=tmp.index.name, var_name="tf", value_name="weighted_binding"
)
# Create dataframe with two columns: tf_gene and weighted_binding score
tmp["tf_target"] = tmp["tf"] + "_" + tmp["gene"]
tmp[["tf_target", "weighted_binding"]].to_csv(
os.path.join(tmpdir, f"{chrom}.csv"), index=False
)
hdf.close()
ddf = dd.read_csv(os.path.join(tmpdir, "*.csv")).set_index("tf_target")
return ddf
def _save_temp_expression(self, df, name):
tmp = df.rename(columns={"tpm": f"{name}_expression"})
tmp[f"{name}_expression"] = minmax_scale(tmp[f"{name}_expression"].rank())
tmp.index.rename(name, inplace=True)
tmp["key"] = 0
fname = NamedTemporaryFile(
prefix="ananse.", suffix=f".{name}.parquet", delete=False
).name
self._tmp_files.append(fname)
tmp.reset_index().to_parquet(fname, index=False)
return fname
def create_expression_network(
self, fin_expression, column="tpm", tfs=None, bindingfile=None
):
"""Create a gene expression based network.
Based on a file with gene expression levels (a TPM column), a
dask DataFrame is generated with the combined expression levels
of the tf and the target gene. By default, the expresison levels
are ranked and subsequently scaled between 0 and 1.
Parameters
----------
fin_expression : str or list
One of more files that contains gene expression data.
First column should contain the gene names in HGNC symbols.
column : str, optional
Column name that contains gene expression, 'tpm' by default (case insensitive).
tfs : list, optional
List of TF gene names. All TFs will be used by default.
bindingfile : str, optional
Output file from ANANSE binding.
Returns
-------
Dask DataFrame with gene expression based values.
"""
# Convert to a list of filename(s)
if isinstance(fin_expression, str):
fin_expression = [fin_expression]
# Read all expression input files and take the mean expression per gene
re_column = re.compile(fr"^{column}$", re.IGNORECASE)
expression = pd.DataFrame(
pd.concat(
[
pd.read_table(f, index_col=0).filter(regex=re_column)
for f in fin_expression
],
axis=1,
).mean(1),
columns=[column],
)
expression[column] = np.log2(expression[column] + 1e-5)
genes = pd.read_table(
self.gene_bed, usecols=[3], comment="#", names=["name"], index_col=0
)
overlap = len(genes.index.intersection(expression.index))
if overlap / expression.shape[0] < 0.1:
logger.error(
"gene annotation identifiers do not seem to match between annotation and expression files!"
)
sample_exp = ", ".join(expression.sample(5).index.values)
sample_gene = ", ".join(genes.sample(5).index.values)
logger.error(f"expression sample: {sample_exp}")
logger.error(f"annotation sample: {sample_gene}")
sys.exit(1)
# Create the TF list, based on valid transcription factors
if tfs is None:
try:
act = pd.read_hdf(bindingfile, key="_factor_activity")
if "factor" in act.columns:
act = act.set_index("factor")
tfs = list(set(act.index.tolist()))
except KeyError:
tffile = os.path.join(PACKAGE_DIR, "db", "tfs.txt")
tfs = pd.read_csv(tffile, header=None)[0].tolist()
# Save TFs and targets as temporary files
idx = expression.index[expression.index.isin(tfs)]
tmp = expression.loc[idx]
if tmp.shape[0] == 0:
logger.error(
"None of the transcription factors are found in your expression file."
)
logger.error(
"If you have human data, please make sure you use HGNC symbols (gene names)."
)
logger.error(
"If you have non-human data, you have to create a custom motif to gene mapping."
)
logger.error("See this link for one possibility to create this file: ")
logger.error(
"https://gimmemotifs.readthedocs.io/en/stable/reference.html#command-gimme-motif2factors"
)
logger.error(
"If you use a custom motif mapping, you will also have (re)run `gimme binding` with this file."
)
sys.exit(1)
tf_fname = self._save_temp_expression(tmp, "tf")
target_fname = self._save_temp_expression(expression, "target")
# Read files (delayed) and merge on 'key' to create a Cartesian product
# combining all TFs with all target genes.
a = dd.read_parquet(tf_fname)
b = dd.read_parquet(target_fname)
network = a.merge(b, how="outer")
# Use one-column index that contains TF and target genes.
# This is necessary for dask, as dask cannot merge on a MultiIndex.
# Otherwise this would be an inefficient and unnecessary step.
network["tf_target"] = network["tf"] + "_" + network["target"]
network = network[
["tf", "target", "tf_target", "tf_expression", "target_expression"]
]
return network
def run_network(
self,
binding,
fin_expression=None,
tfs=None,
outfile=None,
up=1e5,
down=1e5,
alpha=None,
promoter=2000,
full_weight_region=5000,
):
"""Create network.
Parameters
----------
binding : str
Filename with binding information. Should contain at least three
columns: "factor", "enhancer" and "binding".
fin_expression : str or list, optional
Filename of list of filenames with expression information.
tfs : list, optional
List of transcription factors to use, by default None, which means
all TFs will be used.
outfile : str, optional
Output file. If None, returns a dataframe.
up : int, optional
Upstream maximum distance, by default 100kb.
down : int, optional
Upstream maximum distabce, by default 100kb.
alpha : float, optional
Parameter to control weight decay, by default 1e4.
promoter : int, optional
Promoter region, by default 2000.
full_weight_region : int, optional
Region that will receive full weight, by default 5000."""
# Expression base network
logger.info("Loading expression")
df_expression = self.create_expression_network(
fin_expression, tfs=tfs, bindingfile=binding
)
# Use a version of the binding network, either promoter-based, enhancer-based
# or both.
if self.include_promoter or self.include_enhancer:
df_binding = self.aggregate_binding(
binding,
tfs=tfs,
up=up,
down=down,
alpha=alpha,
promoter=promoter,
full_weight_region=full_weight_region,
combine_function="sum",
)
try:
act = pd.read_hdf(binding, key="_factor_activity")
if "factor" in act.columns:
act = act.set_index("factor")
logger.info("Reading factor activity")
act.index.name = "tf"
act["activity"] = minmax_scale(rankdata(act["activity"], method="min"))
df_expression = df_expression.merge(
act, right_index=True, left_on="tf", how="left"
).fillna(0.5)
except KeyError:
pass
df_expression = df_expression.drop(columns=["tf"])
# This is where the heavy lifting of all delayed computations gets done
# logger.info("Computing network")
if fin_expression is not None:
result = df_expression.merge(
df_binding, right_index=True, left_on="tf_target", how="left"
)
result = result.persist()
result = result.fillna(0)
logger.info("Computing network")
progress(result)
result = result.compute()
else:
result = df_binding
result["weighted_binding"] = minmax_scale(
rankdata(result["weighted_binding"], method="min")
)
columns = [
"tf_expression",
"target_expression",
"weighted_binding",
"activity",
]
columns = [col for col in columns if col in result]
logger.info(f"Using {', '.join(columns)}")
# Combine the individual scores
result["prob"] = result[columns].mean(1)
else:
result = df_expression
result["prob"] = result[["tf_expression", "target_expression"]].mean(1)
result = result.compute()
if outfile:
logger.info("Writing network")
out_dir = os.path.abspath(os.path.dirname(outfile))
os.makedirs(out_dir, exist_ok=True)
result[["tf_target", "prob"]].to_csv(outfile, sep="\t", index=False)
else:
return result[["tf_target", "prob"]]
def __del__(self):
if not hasattr(self, "_tmp_files"):
return
for fname in self._tmp_files:
if os.path.exists(fname):
shutil.rmtree(fname, ignore_errors=True)
def region_gene_overlap(
region_pr,
gene_bed,
up=100_000,
down=100_000,
):
"""
Couple enhancers to genes.
Parameters
----------
region_pr : PyRanges object
PyRanges object with enhancer regions.
gene_bed : str
gene_bed
up : int, optional
Upstream maximum distance, by default 100kb.
down : int, optional
Upstream maximum distance, by default 100kb.
Returns
-------
pandas.DataFrame
DataFrame with enhancer regions, gene names, distance and weight.
"""
genes = pr.read_bed(gene_bed)
# Convert to DataFrame & we don't need intron/exon information
genes = genes.as_df().iloc[:, :6]
# Get the TSS only
genes.loc[genes["Strand"] == "+", "End"] = genes.loc[
genes["Strand"] == "+", "Start"
]
genes.loc[genes["Strand"] == "-", "Start"] = genes.loc[
genes["Strand"] == "-", "End"
]
# Extend up and down
genes.loc[genes["Strand"] == "+", "Start"] -= up
genes.loc[genes["Strand"] == "+", "End"] += down
genes.loc[genes["Strand"] == "-", "Start"] -= down
genes.loc[genes["Strand"] == "-", "End"] += up
# Perform the overlap
genes = pr.PyRanges(genes)
genes = genes.join(region_pr).as_df()
return genes
|
bed_sort,
bed_merge,
bam_index,
bam_sort,
mosdepth,
)
class CombineBedFiles:
def __init__(self, genome, peakfiles, verbose=True):
self.genome = genome
self.list_of_peakfiles = (
peakfiles if isinstance(peakfiles, list) else [peakfiles]
)
self.verbose = verbose
@staticmethod
def is_narrowpeak(bed, check_values=True):
"""
Check BED type by column count.
Check if peak values are not all zeroes unless check_values is False.
Accepts a BED file (including narrowPeak, broadPeak, etc.)
Returns bool
"""
with open(bed) as b:
for line in b:
if line.startswith("#"):
continue
line = line.split("\t")
cols = len(line)
break
# narrowPeak has 10 columns
# and the peak column is >= 0
if cols != 10 or int(line[9]) < 0:
return False
if not check_values:
return True
# check if the peak values aren't all zeroes
summit_values = 0
sample_size = 20 # check an arbitrary number of lines
with open(bed) as b:
for n, line in enumerate(b):
if line.startswith("#"):
continue
line = line.split("\t")
peak_val = int(line[9])
# value must be >=0
if peak_val < 0:
return False
summit_values += peak_val
if n >= sample_size:
break
if summit_values > 0:
return True
return False
@staticmethod
def bed_resize(
genome,
bed_in,
bed_out,
width=200,
narrowpeak=False,
fix_outliers=False,
output_bed3=True,
verbose=True,
):
"""
Set bed region width.
If the input bed is a narrowPeak file (narrowpeak=True),
center region on the summit (start+peak).
Otherwise center on the middle of the region.
If fix_outliers is set to True, shift regions to fit their chromosomes.
Otherwise drop these regions.
If output_bed3 is set to False, output the whole bed file.
"""
half_seqlen = width // 2
chrom_sizes = genomepy.Genome(genome).sizes
missing_chrm = []
if narrowpeak:
def get_summit(_start, _, summit_offset):
return _start + int(summit_offset)
summit_col = 9
else:
def get_summit(_start, _end, _):
return (_start + _end) // 2
summit_col = 0 # unused
with open(bed_in) as old, open(bed_out, "w") as new:
for line in old:
if line.startswith("#"):
continue
line = line.split("\t")
chrm = str(line[0])
if chrm not in chrom_sizes.keys():
missing_chrm.append(chrm)
continue
start = int(line[1])
end = int(line[2])
rest = line[3:] if not output_bed3 else []
chrm_len = chrom_sizes[chrm]
if width == end - start:
nstart = str(start)
nend = str(end)
elif chrm_len <= width:
if not fix_outliers:
continue
nstart = str(0)
nend = str(chrm_len)
else:
summit = get_summit(start, end, line[summit_col])
if not fix_outliers:
nstart = str(summit - half_seqlen)
nend = str(summit + half_seqlen)
if int(nstart) < 0 or int(nend) > chrm_len:
continue
else:
# adjust the summit for the chromosome boundaries
summit = max(summit, 0 + half_seqlen)
summit = min(summit, chrm_len - half_seqlen)
nstart = str(summit - half_seqlen)
nend = str(summit + half_seqlen)
new.write("\t".join([chrm, nstart, nend] + rest) + "\n")
if missing_chrm and verbose:
logger.warning(
"The following contigs were present in "
+ f"'{os.path.basename(bed_in)}', "
+ "but were missing in the genome file: "
+ f"{', '.join(list(set(missing_chrm)))}\n"
)
return bed_out
def run(self, outfile, width=200, force=False):
if force or not os.path.exists(outfile):
if self.verbose:
logger.info("Combining bed files")
tmpdir = tempfile.mkdtemp(prefix="ANANSE_")
try:
list_of_beds = []
for peakfile in self.list_of_peakfiles:
# use narrowPeak Peak location for region centering if possible
is_np = self.is_narrowpeak(peakfile)
resized_peakfile = os.path.join(tmpdir, os.path.basename(peakfile))
# resize each BED region to 200 BP
self.bed_resize(
genome=self.genome,
bed_in=peakfile,
bed_out=resized_peakfile,
width=width,
narrowpeak=is_np,
verbose=self.verbose,
)
bed_sort(resized_peakfile)
list_of_beds.append(resized_peakfile)
# merge resized beds into one
merged_bed = os.path.join(tmpdir, "merged")
bed_merge(list_of_beds=list_of_beds, merged_bed=merged_bed)
shutil.copy2(merged_bed, outfile)
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
class ScorePeaks:
def __init__(self, bams, bed, ncore=1, verbose=True):
self.list_of_bams = bams if isinstance(bams, list) else [bams]
self.bed = bed # one bed file with all putative enhancer binding regions
self.verbose = verbose
self.ncore = ncore
def compatibility_check(self):
"""
Check if any chromosome in each bams file are found in the bed file.
This filters out datasets mapped to different genomes.
"""
error = False
bed_chromosomes = set(
pd.read_csv(self.bed, sep="\t", header=None)[0].astype(str)
)
for bam in self.list_of_bams:
bam_header = pysam.view(bam, "-H").split("\n") # noqa: pysam bug
for line in bam_header:
if not line.startswith("@SQ"):
continue
# extract chrom (ex: '@SQ\tSN:chr11\tLN:100316')
chrom = line.split("\tSN:")[1].split("\tLN:")[0]
# if any chrom matches: next bam
if chrom in bed_chromosomes:
break
else:
logger.exception(
f"Chromosomes in the peak file(s) do not match any in bam file '{os.path.basename(bam)}'!\n"
f"Does {self.bed} contain any regions, and "
"are both bam- and peak file(s) mapped to the same genome assembly?\n"
)
error = True
if error:
exit(1)
def peaks_count(self, outdir):
"""
count bam reads in the bed regions
returns one bed file for each bam in outdir
"""
# linear script:
# coverage_files = []
# for bam in self.list_of_bams:
# bed_output = os.path.join(outdir, os.path.basename(bam).replace(".bam", ".regions.bed"))
# coverage_files.append(bed_output)
# mosdepth(self.bed, bam, bed_output, self.ncore)
# return coverage_files
# parallel script:
nbams = len(self.list_of_bams)
npool = min(self.ncore, nbams)
ncore = min(4, self.ncore // npool) # 1-4 cores/bam
# list with tuples. each tuple = one run
mosdepth_params = []
coverage_files = []
for bam in self.list_of_bams:
bed_output = os.path.join(
outdir, os.path.basename(bam).replace(".bam", ".regions.bed")
)
mosdepth_params.append((self.bed, bam, bed_output, ncore))
coverage_files.append(bed_output)
pool = mp.Pool(npool)
try:
pool.starmap_async(mosdepth, mosdepth_params)
finally: # To make sure processes are closed in the end, even if errors happen
pool.close()
pool.join()
return coverage_files
@staticmethod
def peaks_merge(coverage_files, bed_output, ncore=1):
"""
averages all peaks_count outputs
uses quantile normalization to normalize for read depth
returns one BED 3+1 file
"""
ncore = min(4, ncore)
bed = pd.read_csv(coverage_files[0], header=None, sep="\t")
if len(coverage_files) > 1:
for file in coverage_files[1:]:
scores = pd.read_csv(file, header=None, sep="\t")[3]
bed = pd.concat([bed, scores], axis=1)
scores = bed.iloc[:, 3:]
scores = qnorm.quantile_normalize(scores, axis=1, ncpus=ncore)
scores = scores.mean(axis=1)
bed = pd.concat([bed.iloc[:, :3], scores], axis=1)
bed.to_csv(bed_output, sep="\t", header=False, index=False)
@staticmethod
def peaks_fit(bam_coverage, bed_output, dist_func="lognorm_dist", **kwargs):
"""
fit the peak scores to a distribution
"""
bed = pd.read_csv(bam_coverage, header=None, sep="\t")
region = (
bed[0].astype(str) + ":" + bed[1].astype(str) + "-" + bed[2].astype(str)
)
score = bed[3]
# obtain a distribution
dist_func = Distributions().set(dist_func)
# with np.errstate(divide="ignore", invalid="ignore"):
# dist = dist_func(score, **kwargs)
dist = dist_func(score, **kwargs)
# replace scores with distribution values
ascending_dist = np.sort(dist)
ascending_scores_index = np.searchsorted(np.sort(score), score)
norm_score = np.array([ascending_dist[i] for i in ascending_scores_index])
logn_score = np.log(norm_score + 1)
scaled_score = minmax_scale(logn_score)
log10_score = np.log10(norm_score + 1)
data = {
"region": region, # ex: "chr1:0-200"
"score": score,
"norm_score": norm_score,
"logn_score": logn_score,
"scaled_score": scaled_score,
"log10_score": log10_score, # used by the original function
}
bed = pd.DataFrame(data=data)
bed.to_csv(bed_output, sep="\t", index=False)
def run(self, outfile, dist_func="peak_rank_file_dist", force=False, **kwargs):
# save the results as it takes ages to run
raw_peak_scores = os.path.join(os.path.dirname(outfile), "raw_scoredpeaks.bed")
if force or not os.path.exists(raw_peak_scores):
self.compatibility_check()
tmpdir = tempfile.mkdtemp(prefix="ANANSE_")
try:
if self.verbose:
logger.info("Scoring peaks (slow)")
try: # assumes sorted
for bam in self.list_of_bams:
bam_index(bam, force=False, ncore=self.ncore)
coverage_files = self.peaks_count(tmpdir)
except Exception: # sort, index & try again
for bam in self.list_of_bams:
bam_sort(bam, self.ncore)
coverage_files = self.peaks_count(tmpdir)
tmp_peak_scores = os.path.join(tmpdir, "raw_scoredpeaks.bed")
self.peaks_merge(coverage_files, tmp_peak_scores, self.ncore)
shutil.copy2(tmp_peak_scores, raw_peak_scores)
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
# fit bam read counts to specified distribution
if force or not os.path.exists(outfile):
self.peaks_fit(raw_peak_scores, outfile, dist_func=dist_func, **kwargs)
class ScoreMotifs:
def __init__(self, genome, bed, pfmfile=None, ncore=1, verbose=True):
self.genome = genome
self.bed = bed # putative enhancer regions in format chr:start-end (in column 0 with header)
self.pfm_file = pfmfile_location(pfmfile)
self.ncore = ncore
self.verbose = verbose
def motifs_get_scores(self, pfmscorefile, debug=False):
"""
Scan for TF binding motifs in potential enhancer regions.
"""
if not debug:
df = scan_regionfile_to_table(
input_table=self.bed,
genome=self.genome,
scoring="score",
pfmfile=self.pfm_file,
ncpus=self.ncore,
zscore=True,
gc=True,
)
else: # test output
df = pd.DataFrame(
{
"region": ["chr1:400-600", "chr1:2400-2600", "chr1:10003-10203"],
"GM.5.0.Sox.0001": [-0.544, -2.496, -0.544],
"GM.5.0.Homeodomain.0001": [-0.750, -0.377, -7.544],
}
).set_index("region")
df["motif"] = df.idxmax(axis=1)
df["zscore"] = df.max(axis=1)
df.reset_index(inplace=True)
df.to_csv(
pfmscorefile,
sep="\t",
header=True,
index=False,
columns=["motif", "region", "zscore"], # filter + order columns
)
@staticmethod
def motifs_normalize(bed_input, bed_output):
"""
Add normalized scores to the scored motifs
"""
bed = pd.read_csv(bed_input, sep="\t")
bed["rank_zscore"] = minmax_scale(stats.rankdata(bed["zscore"]))
bed.to_csv(bed_output, sep="\t", index=False)
def run(self, outfile, force=False):
# save the results as it takes ages to run
raw_motif_scores = os.path.join(
os.path.dirname(outfile), "raw_scoredmotifs.bed"
)
if force or not os.path.exists(raw_motif_scores):
if self.verbose:
logger.info("Scoring motifs (really slow)")
self.motifs_get_scores(raw_motif_scores)
if force or not os.path.exists(outfile):
self.motifs_normalize(raw_motif_scores, outfile)
class Binding:
def __init__(
self,
peak_weights,
motif_weights,
pfmfile=None,
model=None,
curation_filter=None,
tf_list=None,
whitelist=True,
ncore=1,
verbose=True,
):
self.peak_weights = peak_weights # output from ScorePeaks
self.motif_weights = motif_weights # output from ScoreMotifs
self.motifs2factors_file = pfmfile_location(pfmfile).replace(
".pfm", ".motif2factors.txt"
)
self.motifs2factors = self.filter_transcription_factors(
curation_filter, tf_list, whitelist
)
self.model = model
if self.model is None:
# dream_model.txt is a 2D logistic regression model.
package_dir = os.path.dirname(__file__)
self.model = os.path.join(package_dir, "db", "dream_model_p300.pickle")
self.ncore = ncore
self.verbose = verbose
def filter_transcription_factors(
self, curation_filter=None, tf_list=None, whitelist=True
):
"""
filter transcription factors from the motif database
curation_filter: If None (default), keep all factors.
If True, keep only curated factors. If False, keep only non-curated factors.
Note: "Curated" TFs have direct evidence for binding or are manually selected for likely binding.
tf_list: an optional, single-column file with (case-insensitive) transcription factor names.
whitelist: if True (default), tf_list is used as a whitelist. If False, as a blacklist.
"""
m2f = pd.read_csv(self.motifs2factors_file, sep="\t")
# rename stuff
m2f.rename(
columns={"Motif": "motif", "Factor": "factor", "Curated": "curated"},
inplace=True,
)
m2f["factor"] = m2f.factor.str.upper() # make case-insensitive
m2f.replace("T", "TBXT", inplace=True) # rename T to TBXT
# filter by curation
if curation_filter is True:
m2f = m2f.loc[m2f.curated == "Y"]
elif curation_filter is False:
m2f = m2f.loc[m2f.curated == "N"]
# shrink table
m2f = m2f[["motif", "factor"]] # subset
m2f.drop_duplicates(
inplace=True
) # case-insensitivity adds loads of duplicates (ex: Sox9 and SOX9)
# filter by white/blacklist
if tf_list:
tfs = (
pd.read_csv(tf_list, header=None)[0].str.upper().tolist()
) # make case-insensitive
m2f = (
m2f.loc[m2f.factor.isin(tfs)]
if whitelist
else m2f.loc[~m2f.factor.isin(tfs)]
)
return m2f
def get_binding_score(self, motif_weights, peak_weights, outfile):
"""
Infer TF binding score from motif z-score and peak intensity.
"""
# merge the scoring tables
m = dd.read_csv(motif_weights, sep="\t")
m = m.merge(dd.read_csv(peak_weights, sep="\t", blocksize=200e6), on="region")[
["motif", "region", "zscore", "log10_score"]
]
# filter scoring tables for motifs found in motifs2factors
m = m.merge(self.motifs2factors, on="motif") # also adds "factor" column
# combine scores
m = m.groupby(["factor", "region"])[["zscore", "log10_score"]].mean()
m = m.dropna().reset_index()
with dask.diagnostics.ProgressBar():
m = m.compute(num_workers=self.ncore)
# Load model
with open(self.model, "rb") as f:
clf = pickle.load(f)
m["binding"] = clf.predict_proba(m[["zscore", "log10_score"]])[:, 1]
# "region" renames to "enhancer" for consistency with ANANSE network
m.rename(columns={"region": "enhancer"}, inplace=True)
m.to_csv(
outfile, sep="\t", index=False, columns=["factor", "enhancer", "binding"]
)
def run(self, outfile, force=False):
if force or not os.path.exists(outfile):
if self.verbose:
logger.info("Predict TF binding")
self.get_binding_score(self.peak_weights, self.motif_weights, outfile)
|
#!/usr/bin/env python
# Copyright (c) 2009-2019 Quan Xu <[email protected]>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
def influence(args):
a = ananse.influence.Influence(
ncore=args.ncore, # --ncore (optional)
Gbf=check_path(args.Gbf), # --source (Gbf = GRN before)
Gaf=check_path(args.Gaf), # --target (Gaf = GRN after)
outfile=check_path(args.outfile, error_missing=False), # --output
degenes=check_path(args.expression), # --degenes (HGNC gene names, padj and log2foldchanges)
edges=args.edges, # --edges (optional)
)
a.run_influence(args.plot) # -p
|
#!/usr/bin/env python
# Copyright (c) 2021 Simon van Heeringen
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
def view(args):
df = view_h5(args.infile, tfs=args.factors, fmt=args.format)
index = True
if args.format == "long":
index = False
if args.outfile is None:
args.outfile = sys.stdout
df.to_csv(args.outfile, sep="\t", index=index)
|
#!/usr/bin/env python
# Copyright (c) 2009-2019 Quan Xu <[email protected]>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
def network(args):
ncore = args.ncore
if ncore is None:
ncore = min(os.cpu_count(), 4)
ncore = int(ncore)
memory_limit = "12GB"
# With one core more memory is needed
if ncore == 1:
memory_limit = "20GB"
b = ananse.network.Network(
genome=args.genome, # checked in CLI
gene_bed=check_path(args.annotation),
include_promoter=args.include_promoter,
include_enhancer=args.include_enhancer
# pfmfile=args.pfmfile,
# promoter=args.promoter
)
cluster = LocalCluster(
local_directory=os.environ.get("TMP", None),
scheduler_port=0,
dashboard_address=None, # noqa
n_workers=ncore,
threads_per_worker=2,
memory_limit=memory_limit,
)
client = Client(cluster)
b.run_network(
binding=check_path(args.binding),
fin_expression=check_path(args.fin_expression),
outfile=check_path(args.outfile, error_missing=False),
)
client.close()
|
#!/usr/bin/env python
# Copyright (c) 2009-2019 Quan Xu <[email protected]>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
def binding(args):
predict_peaks(
check_path(args.outdir, error_missing=False),
atac_bams=check_path(args.atac_bams),
histone_bams=check_path(args.histone_bams),
regionfiles=check_path(args.regionfiles),
reference=check_path(args.reference),
factors=check_input_factors(args.factors),
genome=args.genome, # checked in CLI
pfmfile=check_path(args.pfmfile),
pfmscorefile=check_path(args.pfmscorefile),
ncpus=args.ncpus,
)
|
CombineBedFiles,
ScorePeaks,
ScoreMotifs,
Binding,
)
@logger.catch
def run_binding(
genome,
peakfiles,
bams,
outdir,
peak_width=200,
dist_func="peak_rank_file_dist",
pfmfile=None,
curation_filter=None,
tf_list=None,
whitelist=True,
model=None,
ncore=1,
force=False,
keep_intermediates=True,
verbose=True,
**kwargs,
):
"""
Predict transcription factor binding in specified regions
Args:
genome: path to the genome fasta used to align the bams and peaks to
peakfiles: one or more BED format files with putative enhancer regions (e.g. narrowPeak, broadPeak)
bams: one or more BAM format files where reads mark enhancer activity (H3K27Ac/p300 ChIP-seq or ATAC-seq)
outdir: directory where you wish to store the output
peak_width: peakfiles are resized to this width (default 200 bp)
dist_func: bam reads are normalized to the selected distribution (default: an empirical distribution)
pfmfile: the pfm file of the transcription factors to search for (default gimme.vertebrate.v5.0)
curation_filter: True = curated TFs, False = no curated TFs, None = all TFs (default: None)
tf_list: optional file with single column TF names
whitelist: True = use tf_list as a whitelist. False = use tf_list as a blacklist
model: classification model to use (default: dream)
ncore: number of cores to use
force: overwrite earlier intermediate data? (default: False)
keep_intermediates: keep intermediate data after completion? (default: True)
verbose: keep you informed of the progress? (default: True)
**kwargs: passed to the selected dist_func
Returns:
binding.tsv: the strongest transcription factor and its binding score for each region in the peakfile(s)
"""
# clean up previous ANANSE tmp files
clean_tmp()
# check input file paths
files = []
for arg in [genome, peakfiles, bams, pfmfile, tf_list, model]:
if arg:
if isinstance(arg, list):
files.extend(arg)
else:
files.append(arg)
for file in files:
if not os.path.exists(file):
logger.exception(f"Could not find {file}!")
exit(1)
outfile = os.path.join(outdir, "binding.tsv")
intermediate_dir = os.path.join(outdir, "intermediate_results")
if force or not os.path.exists(outfile):
genomepy.utils.mkdir_p(intermediate_dir)
cbed = CombineBedFiles(genome=genome, peakfiles=peakfiles, verbose=verbose)
combined_bed = os.path.join(intermediate_dir, "combined.bed")
cbed.run(outfile=combined_bed, width=peak_width, force=force)
sp = ScorePeaks(bams=bams, bed=combined_bed, ncore=ncore, verbose=verbose)
scored_peaks = os.path.join(intermediate_dir, "scoredpeaks.bed")
sp.run(outfile=scored_peaks, dist_func=dist_func, force=force, **kwargs)
sm = ScoreMotifs(
genome=genome,
bed=scored_peaks,
pfmfile=pfmfile,
ncore=ncore,
verbose=verbose,
)
scored_motifs = os.path.join(intermediate_dir, "scoredmotifs.bed")
sm.run(outfile=scored_motifs, force=force)
b = Binding(
peak_weights=scored_peaks,
motif_weights=scored_motifs,
pfmfile=pfmfile,
model=model,
curation_filter=curation_filter,
tf_list=tf_list,
whitelist=whitelist,
ncore=ncore,
verbose=verbose,
)
b.run(outfile=outfile, force=force)
if not keep_intermediates:
genomepy.utils.rm_rf(intermediate_dir)
if verbose:
logger.info("ANANSE binding finished successfully!")
|
def test_distributions():
d = ananse.distributions.Distributions()
func_list = d.get()
assert isinstance(func_list, list)
for func in func_list:
d.set(func)
scores = np.array([0, 1, 2])
def test_scale_dist():
s = ananse.distributions.scale_dist(scores)
assert np.array_equal(s, np.array([0, 0.5, 1]))
def test_log_scale_dist():
s = ananse.distributions.log_scale_dist(scores)
assert np.allclose(s, np.array([0.0, 0.63092975, 1.0]))
def test_replace_infs():
score_w_infs = [-np.inf, 0, 1, np.inf]
s = ananse.distributions.replace_infs(score_w_infs)
assert np.array_equal(s, np.array([0, 0, 1, 1]))
def test_scipy_dist():
s = ananse.distributions.scipy_dist(scores, **{"dist": "loglaplace"})
assert np.allclose(s, np.array([4.72219713e-05, 2.05410078e-01, 6.83221921e-01]))
s = ananse.distributions.scipy_dist(scores, **{"dist": "lognorm"})
assert np.allclose(s, np.array([0, 8.0793556e12, 2.8352896e-02]))
with pytest.raises(ValueError):
ananse.distributions.scipy_dist(scores, **{"dist": "wrongname"})
def test_peak_rank_dist():
s = ananse.distributions.peak_rank_dist(scores)
assert np.allclose(s, np.array([0, 0.4077607, 0.4077607]))
def test_peak_rank_file_dist():
s = ananse.distributions.peak_rank_file_dist(scores, **{"file": "peak_rank.txt"})
assert len(s) == 3
s = ananse.distributions.peak_rank_file_dist(
scores, **{"file": "peak_rank_hg38_h3k27ac.txt"}
)
assert len(s) == 3
# too many peaks
with pytest.raises(ValueError):
ananse.distributions.peak_rank_file_dist(
range(108_087), **{"file": "peak_rank.txt"}
)
|
@pytest.fixture
def binding_fname():
return "tests/example_data/binding2.tsv"
@pytest.fixture
def network_obj():
return Network(genome="", gene_bed="ananse/db/hg38.genes.bed")
def test_unique_enhancer(network_obj, binding_fname):
regions = network_obj.unique_enhancers(binding_fname)
regions = regions.as_df()
assert regions.shape[0] == 6
assert sorted(list(regions["Chromosome"].unique())) == ["chr1", "chr10", "chr17"]
assert sorted(list(regions["Start"].unique())) == [7677184, 7687827]
def test_distance_weight(network_obj):
dw = network_obj.distance_weight(
include_promoter=True,
promoter_region=20,
full_weight_region=50,
maximum_distance=100,
alpha=5,
)
assert list(dw.columns) == ["weight", "dist"]
dw = dw.set_index("dist")
assert dw.loc[0, "weight"] == 1
assert dw.loc[25, "weight"] == 1
assert dw.loc[50, "weight"] == 1
assert dw.loc[51, "weight"] < 1
assert np.isclose(dw.loc[100, "weight"], 0, atol=1e-4)
assert dw.shape[0] == 101
dw = network_obj.distance_weight(
include_promoter=False,
promoter_region=20,
full_weight_region=50,
maximum_distance=100,
alpha=5,
)
assert list(dw.columns) == ["weight", "dist"]
dw = dw.set_index("dist")
assert dw.loc[0, "weight"] == 0
assert dw.loc[20, "weight"] == 0
assert dw.loc[21, "weight"] == 1
assert dw.shape[0] == 101
def test_command():
with NamedTemporaryFile() as tmp:
fname = tmp.name
Args = namedtuple(
"args",
"genome annotation include_promoter include_enhancer binding fin_expression outfile ncore",
)
args = Args(
genome="hg38",
annotation=None,
include_promoter=True,
include_enhancer=True,
binding="tests/data/network/binding.h5",
fin_expression="tests/data/network/heart_expression.txt",
outfile=fname,
ncore=2,
)
network(args)
df = pd.read_table(fname, sep="\t")
assert df.shape[0] == 30690
assert list(df.columns).__eq__(["tf_target", "prob"])
|
def test_read_expression():
res = read_expression("tests/data/dge.tsv")
assert set(res.keys()) == {"ANPEP", "CD24", "COL6A3", "DAB2", "DMKN"}
assert res["ANPEP"].score - 7.44242618323665 < 0.001
assert res["ANPEP"].realfc - 7.44242618323665 < 0.001
assert res["ANPEP"].absfc - 7.44242618323665 < 0.001
assert res["COL6A3"].score == 0
assert res["COL6A3"].realfc - 11.0553152937569 < 0.001
assert res["COL6A3"].absfc - 11.0553152937569 < 0.001
test_read_expression()
|
# run tests locally with:
# pytest -vv --disable-pytest-warnings
# pytest -vv --disable-pytest-warnings tests/continuous_integration/test_01*
# pytest -vv --disable-pytest-warnings -k [substring]
# TODO: apply to all code --> targets = ["ananse/", "tests/"]
targets = [
"ananse/commands/__init__.py",
"ananse/commands/enhancer_binding.py",
"ananse/commands/network.py",
"ananse/__init__.py",
"ananse/enhancer_binding.py",
"ananse/distributions.py",
"ananse/network.py",
"ananse/utils.py",
"tests/",
]
def test_import_ananse():
import ananse
assert str(ananse.__file__).endswith("ANANSE/ananse/__init__.py")
def test_black_formatting():
sp.check_call(" ".join(["black setup.py"] + targets), shell=True)
def test_flake8_formatting():
ret = sp.check_call(" ".join(["flake8 setup.py"] + targets), shell=True)
assert ret == 0
|
# prep
test_dir = os.path.dirname(os.path.dirname(__file__))
outdir = os.path.join(test_dir, "output")
genomepy.utils.mkdir_p(outdir)
# beds
genome = os.path.join(outdir, "genome.fa")
write_file(genome, [">chr1", "N" * 50000])
bed1 = os.path.join(outdir, "bed1.bed")
write_file(bed1, ["chr1\t0\t1000\n", "chr1\t2000\t3000\n"])
bed2 = os.path.join(outdir, "bed2.bed")
write_file(bed2, ["chr1\t4000\t5000\n", "chr1\t2000\t3000\n"])
sp_bed_input = os.path.join(outdir, "sp_input.bed")
write_file(sp_bed_input, ["chr1\t10003\t10203\n", "chr1\t10203\t10403\n"])
# bams
bam1 = os.path.join(outdir, "bam1.bam")
write_bam(bam1, [h0, h1, line1, line2, line2])
ananse.utils.bam_index(bam1)
bam2 = os.path.join(outdir, "bam2.bam")
write_bam(bam2, [h0, h1, line1, line3, line3])
ananse.utils.bam_index(bam2)
# shared in/outputs
combined_bed = os.path.join(outdir, "combined.bed")
raw_peak_scores = os.path.join(outdir, "raw_scoredpeaks.bed")
scored_peaks = os.path.join(outdir, "scoredpeaks.bed")
raw_motif_scores = os.path.join(outdir, "raw_scoredmotifs.bed")
scored_motifs = os.path.join(outdir, "scoredmotifs.bed")
outfile = os.path.join(outdir, "binding.tsv")
def test_is_narrowpeak():
np = os.path.join(outdir, "f.narrowPeak")
write_file(np, ["chr1\t629812\t630105\tnarrowPeak1\t6047\t.\t0\t0\t0\t122"])
bp = os.path.join(outdir, "f.broadPeak")
write_file(
bp,
[
"chr1\t778061\t779255\tbroadRegion1\t660\t.\t778061\t"
+ "779255\t0\t3\t1,638,1\t0,17,1193\t0\t0\t0"
],
)
cbed = ananse.enhancer_binding.CombineBedFiles(genome=genome, peakfiles=[])
assert cbed.is_narrowpeak(np) is True
assert cbed.is_narrowpeak(bp) is False
def test_bed_resize():
cbed = ananse.enhancer_binding.CombineBedFiles(genome=genome, peakfiles=[])
bed_out = os.path.join(outdir, "bed_out.bed")
# default width, extended width with outlier, extended width with fixed outlier
for n in range(3):
width = [200, 2000, 2000][n]
fix_outliers = [False, False, True][n]
nlines = [2, 1, 2][n]
estart = [400, 1500, 0][n]
estop = [600, 3500, 2000][n]
cbed.bed_resize(genome, bed1, bed_out, width=width, fix_outliers=fix_outliers)
with open(bed_out) as f:
lines = f.readlines()
assert len(lines) == nlines
chrom, start, stop = lines[0].split()[0:3]
assert int(start) == estart
assert int(stop) == estop
assert int(stop) - int(start) == width
def test_cbedf():
cbed = ananse.enhancer_binding.CombineBedFiles(
genome=genome, peakfiles=[bed1, bed2]
)
width = 200
cbed.run(outfile=combined_bed, width=width, force=True)
with open(combined_bed) as f:
lines = f.readlines()
# 3 unique regions over the 2 bed files
assert len(lines) == 3
# width is set correctly
for line in lines:
chrom, start, stop = line.split()[0:3]
assert int(stop) - int(start) == width
def test_compatibility_check():
incompatible = os.path.join(outdir, "incompatible.bed")
write_file(incompatible, ["1\t0\t200"])
sp = ananse.enhancer_binding.ScorePeaks(
bams=bam1, bed=incompatible, ncore=1, verbose=True
)
with pytest.raises(SystemExit):
sp.compatibility_check()
def test_peaks_count():
sp = ananse.enhancer_binding.ScorePeaks(
bams=[bam1, bam2], bed=sp_bed_input, ncore=1, verbose=True
)
coverage_files = sp.peaks_count(outdir)
assert len(coverage_files) == 2
assert os.path.join(outdir, "bam1.regions.bed") in coverage_files
assert os.path.join(outdir, "bam2.regions.bed") in coverage_files
def test_peaks_merge():
sp = ananse.enhancer_binding.ScorePeaks(bams=[], bed=None, ncore=1, verbose=True)
coverage_files = [
os.path.join(outdir, "bam1.regions.bed"),
os.path.join(outdir, "bam2.regions.bed"),
]
sp.peaks_merge(coverage_files, raw_peak_scores, sp.ncore)
with open(raw_peak_scores) as f:
content = f.readlines()[0]
assert len(content.strip().split("\t")) == 4
def test_normalize_peaks():
sp = ananse.enhancer_binding.ScorePeaks(bams=[], bed=None, ncore=1, verbose=True)
raw_cov = os.path.join(outdir, "raw_cov.bed")
write_file(raw_cov, ["chr1\t0\t200\t10", "chr1\t0\t200\t20", "chr1\t0\t200\t30"])
norm_cov = os.path.join(outdir, "norm_cov.bed")
sp.peaks_fit(raw_cov, norm_cov, dist_func="scale_dist")
scores = []
norm_scores = []
with open(norm_cov) as bed:
for line in bed:
line = line.strip().split()
scores.append(line[1])
norm_scores.append(line[2])
assert len(scores) == 3 + 1 # lines + header
assert scores[1:] == ["10", "20", "30"]
assert norm_scores[1:] == ["0.0", "0.5", "1.0"]
def test_sp():
sp = ananse.enhancer_binding.ScorePeaks(
bams=[bam1, bam2], bed=sp_bed_input, ncore=1, verbose=True
)
sp.run(outfile=scored_peaks, dist_func="scale_dist", force=True)
with open(scored_peaks) as f:
lines = f.readlines()
peak1 = lines[1].split()
assert len(peak1) == 6
assert peak1[1] == "0.375" # raw score
assert peak1[2] == "1.0" # norm score (scaled)
def test_motifs_get_scores():
# scan_regionfile_to_table output:
# region GM.5.0.Sox.0001 GM.5.0.Mixed.0002
# chr1:10003-10203 -4.4961200165161355 -3.1206201127508577
# chr1:10203-10403 -4.4961200165161355 -3.1206201127508577
# desired output:
# motif region zscore
# GM.5.0.Mixed.0002 chr1:10003-10203 -3.1200
# GM.5.0.Sox.0001 chr1:10203-10403 -2.4961
sm = ananse.enhancer_binding.ScoreMotifs(None, None)
sm.motifs_get_scores(raw_motif_scores, debug=True)
with open(raw_motif_scores) as f:
content = f.readlines()
headers = content[0].strip().split("\t")
motif1 = content[1].strip().split("\t")
assert headers == ["motif", "region", "zscore"]
assert motif1 == ["GM.5.0.Sox.0001", "chr1:400-600", "-0.544"]
# TODO: get gimme to make small & quick(!) output for testing
# fake_cg_index = "~/.cache/gimmemotifs/genome.fa.gcfreq.100.feather"
# try:
# import pandas as pd
# import numpy as np
# df = pd.DataFrame({
# "chrom": ["chr1"], "start": ["0"], "end": ["100"],
# "w100": ["0.0"], "n100": ["0.0"], "w200": [np.NaN],
# "n200": [np.NaN], "w500": [np.NaN], "n500": [np.NaN],
# })
# df.to_feather(fake_cg_index)
#
# pfmfile = os.path.join(test_dir, "example_data", "debug.pfm")
# sm = ananse.enhancer_binding.ScoreMotifs(genome, combined_bed, pfmfile=pfmfile)
# sm.get_motif_scores(combined_bed, raw_motif_scores)
# finally:
# genomepy.utils.rm_rf(fake_cg_index)
def test_normalize_motifs():
sm = ananse.enhancer_binding.ScoreMotifs(None, None)
sm.motifs_normalize(raw_motif_scores, scored_motifs)
with open(raw_motif_scores) as f:
lines1 = f.readlines()
with open(scored_motifs) as f:
lines2 = f.readlines()
assert len(lines2[0].split()) == len(lines1[0].split()) + 1
def test_filter_transcription_factors():
pfmfile = os.path.join(test_dir, "data", "debug.pfm")
b = ananse.enhancer_binding.Binding(None, None, pfmfile=pfmfile)
# curation filter
m2f = b.filter_transcription_factors(curation_filter=None)
assert m2f.shape[0] == 9 # all TFs in the file
m2f = b.filter_transcription_factors(curation_filter=True)
assert m2f.shape[0] == 8 # all curated TFs
m2f = b.filter_transcription_factors(curation_filter=False)
assert m2f.shape[0] == 1 # all non-curated TFs
# tf filter
tf_list = os.path.join(outdir, "tf_list.txt")
write_file(tf_list, ["SOX12"])
m2f = b.filter_transcription_factors(tf_list=tf_list, whitelist=True)
assert m2f.shape[0] == 1
m2f = b.filter_transcription_factors(tf_list=tf_list, whitelist=False)
assert m2f.shape[0] == 8
def test_get_binding_score():
pfmfile = os.path.join(test_dir, "data", "debug.pfm")
b = ananse.enhancer_binding.Binding(None, None, pfmfile=pfmfile)
b.get_binding_score(scored_motifs, scored_peaks, outfile)
assert os.path.exists(outfile)
def test_run_binding(capsys):
# test API wrapper
run_binding(genome=genome, bams=[bam1], peakfiles=bed1, outdir=outdir, force=False)
with pytest.raises(SystemExit):
run_binding(
genome="/not/a/real/genome.fa",
bams=[bam1],
peakfiles=bed1,
outdir=outdir,
force=False,
)
captured = capsys.readouterr().err.strip()
assert "Could not find /not/a/real/genome.fa!" in captured
|
# prep
test_dir = os.path.dirname(os.path.dirname(__file__))
outdir = os.path.join(test_dir, "output")
genomepy.utils.mkdir_p(outdir)
def write_file(filename, lines):
with open(filename, "w") as f:
for line in lines:
if not line.endswith("\n"):
line = line + "\n"
f.write(line)
def write_bam(filename, lines):
tmp_sam = os.path.join(outdir, "tmp.sam")
write_file(tmp_sam, lines)
pysam.view(tmp_sam, "-b", "-o", filename, catch_stdout=False)
genomepy.utils.rm_rf(tmp_sam)
def compare_contents(file1, file2, ftype="bed"):
if ftype == "bed":
with open(file1) as f:
contents1 = f.readlines()
with open(file2) as f:
contents2 = f.readlines()
else:
contents1 = pysam.view(file1)
contents2 = pysam.view(file2)
return contents1 == contents2
# test BED functions
unsorted_bed = os.path.join(outdir, "unsorted.bed")
write_file(unsorted_bed, ["chr1\t817046\t817246\n", "chr1\t778558\t778758\n"])
sorted_bed = os.path.join(outdir, "sorted.bed")
write_file(sorted_bed, ["chr1\t778558\t778758\n", "chr1\t817046\t817246\n"])
second_bed = os.path.join(outdir, "second.bed")
write_file(second_bed, ["chr1\t827457\t827657\n"])
def test_bed_sort():
assert not compare_contents(unsorted_bed, sorted_bed, ftype="bed")
ananse.utils.bed_sort(unsorted_bed)
assert compare_contents(unsorted_bed, sorted_bed, ftype="bed")
def test_bed_merge():
merged_bed = os.path.join(outdir, "merged.bed")
# 1 bed = nothing changes
ananse.utils.bed_merge([sorted_bed], merged_bed)
assert compare_contents(sorted_bed, merged_bed, ftype="bed")
# >1 bed, same content
ananse.utils.bed_merge([unsorted_bed, sorted_bed], merged_bed)
assert compare_contents(sorted_bed, merged_bed, ftype="bed")
with open(merged_bed) as mb:
assert len(mb.readlines()) == 2
# >1 beds, different content
ananse.utils.bed_merge([unsorted_bed, second_bed], merged_bed)
with open(merged_bed) as mb:
assert len(mb.readlines()) == 3
# test BAM functions
h0 = "@HD VN:1.6 SO:coordinate"
h1 = "@SQ SN:chr1 LN:50000"
line1 = (
"read1 147 chr1 10003 40 11S90M = 10048 -46 "
+ "CCCTACCCTCTCCCTATCCCTAACCCTAACCCCAACCCTAACCCTATCCCCAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAA "
+ "A77--7-7---7-7---A77---AA7----<7-AAJAA-7JJFF<--F-A-AFFFF<FJJJJF-AFJF7F-JJJFJFFFJFF<FJJJJFJJFJJFFFFFAA "
)
line2 = (
"read2 83 chr1 10004 30 2S45M1D54M = 10074 -30 "
+ "ATCCCTAACCCTAACCCTAACCCTAACCCTACCCCTACCCCTAACCCAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCT "
+ "--JAA7F-FAFA-7JJFA--F<7-FF<<FAF7<7F7A-FFAF7-FJJJFJJ----J<JFA-JAF7JFJFJF<<JFJF<JJJFFJJJAAAA-JFFFA-FAA- "
)
line3 = (
"read3 163 chr1 10027 40 100M = 10032 105 "
+ "ACCCGAACCCTAACCCTAACCCTAACCCTAACCCGAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCAACCCTAACCCGAACCCA "
+ "AAFFFJJJJJJJJJJJFJJJFJJJFJFJJFJJJJ<-FJJFJAFFJA7AFAJJJJFJFJ-<F-AAJJ<FF7-J-AAJ--<JJJ--AAJ-77-AA-7A<-A- "
)
unsorted_bam = os.path.join(outdir, "unsorted.bam")
write_bam(unsorted_bam, [h0, h1, line2, line1])
sorted_bam = os.path.join(outdir, "sorted.bam")
write_bam(sorted_bam, [h0, h1, line1, line2])
second_bam = os.path.join(outdir, "second.bam")
write_bam(second_bam, [h0, h1, line3])
def test_bam_index():
ncores = os.cpu_count() # test max cores
genomepy.utils.rm_rf(f"{sorted_bam}.bai")
assert not os.path.exists(f"{sorted_bam}.bai")
ananse.utils.bam_index(sorted_bam, ncore=ncores)
assert os.path.exists(f"{sorted_bam}.bai")
# test force
t0 = os.path.getmtime(f"{sorted_bam}.bai")
time.sleep(1)
ananse.utils.bam_index(sorted_bam, force=False, ncore=ncores)
t1 = os.path.getmtime(f"{sorted_bam}.bai")
assert t0 == t1
ananse.utils.bam_index(sorted_bam, force=True, ncore=ncores)
t1 = os.path.getmtime(f"{sorted_bam}.bai")
assert t0 != t1
def test_bam_sort():
ncores = -999 # test min cores
assert not compare_contents(sorted_bam, unsorted_bam, ftype="bam")
ananse.utils.bam_sort(unsorted_bam, ncore=ncores)
assert compare_contents(sorted_bam, unsorted_bam, ftype="bam")
assert os.path.exists(f"{unsorted_bam}.bai") # bam is indexed
# bam is identical to the already sorted bam
ananse.utils.bam_index(sorted_bam, force=False)
assert os.path.getsize(f"{unsorted_bam}.bai") == os.path.getsize(
f"{sorted_bam}.bai"
)
def test_bam_merge():
ncores = min(2, os.cpu_count()) # test average cores
merged_bam = os.path.join(outdir, "merged.bam")
# 1 bam: copy
ananse.utils.bam_merge([sorted_bam], merged_bam, ncore=ncores)
assert compare_contents(sorted_bam, merged_bam, ftype="bam")
assert os.path.getsize(f"{sorted_bam}.bai") == os.path.getsize(f"{merged_bam}.bai")
# >1 bam: merge
ananse.utils.bam_merge([sorted_bam, second_bam], merged_bam, ncore=ncores)
l1 = pysam.view(sorted_bam).strip().split("\n")
l2 = pysam.view(second_bam).strip().split("\n")
l3 = pysam.view(merged_bam).strip().split("\n")
assert len(l1) + len(l2) == len(l3) == 3
def test_mosdepth():
bed_input = os.path.join(outdir, "mosdepth_input.bed")
write_file(bed_input, ["chr1\t10003\t10203\n", "chr1\t10203\t10403\n"])
# bam = sorted & indexed (required)
bam_input = os.path.join(outdir, "mosdepth_input.bam")
write_bam(bam_input, [h0, h1, line1, line2, line3])
ananse.utils.bam_index(bam_input, ncore=os.cpu_count())
bed_output = os.path.join(outdir, "mosdepth_output.bed")
ananse.utils.mosdepth(bed_input, bam_input, bed_output, ncore=1)
with open(bed_output) as f:
score = f.readlines()[0].strip().split("\t")[3]
assert score == "1.00"
# test other functions
def test_cleanpath():
path = "./tests/continuous_integration/test_02_utils.py"
expected = __file__
res = ananse.utils.cleanpath(path)
assert res == expected
path = "~/../.."
expected = "/"
res = ananse.utils.cleanpath(path)
assert res == expected
def test_mytmpdir():
tmpdir = ananse.utils.mytmpdir()
assert os.path.exists(tmpdir)
assert tempfile.gettempdir() in tmpdir
def test_clean_tmp():
tmpdir = ananse.utils.mytmpdir()
assert os.path.exists(tmpdir)
ananse.utils.clean_tmp()
assert not os.path.exists(tmpdir)
|
# source:
# https://stackoverflow.com/questions/6620471/fitting-empirical-distribution-to-theoretical-ones-with-scipy-python
mpl.rcParams["figure.figsize"] = (16.0, 12.0)
plt.style.use("ggplot")
# Create models from data
def best_fit_distribution(data, bins=200, ax=None):
"""Find the best fitting distribution to the data"""
# Get histogram of original data
y, x = np.histogram(data, bins=bins, density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
# Distributions to check
DISTRIBUTIONS = [
st.alpha,
st.anglit,
st.arcsine,
st.argus,
st.beta,
st.betaprime,
st.bradford,
st.burr,
st.burr12,
st.cauchy,
st.chi,
st.chi2,
st.cosine,
st.crystalball,
st.dgamma,
st.dweibull,
st.erlang,
st.expon,
st.exponnorm,
st.exponweib,
st.exponpow,
st.f,
st.fatiguelife,
st.fisk,
st.foldcauchy,
st.foldnorm,
st.genlogistic,
st.gennorm,
st.genpareto,
st.genexpon,
st.genextreme,
st.gausshyper,
st.gamma,
st.gengamma,
st.genhalflogistic,
st.geninvgauss,
st.gilbrat,
st.gompertz,
st.gumbel_r,
st.gumbel_l,
st.halfcauchy,
st.halflogistic,
st.halfnorm,
st.halfgennorm,
st.hypsecant,
st.invgamma,
st.invgauss,
st.invweibull,
st.johnsonsb,
st.johnsonsu,
st.kappa4,
st.kappa3,
st.ksone,
st.kstwo,
st.kstwobign,
st.laplace,
st.laplace_asymmetric,
st.levy,
st.levy_l,
# st.levy_stable, # unstable in v1.6.0
st.logistic,
st.loggamma,
st.loglaplace,
st.lognorm,
st.loguniform,
st.lomax,
st.maxwell,
st.mielke,
st.moyal,
st.nakagami,
st.ncx2,
st.ncf,
st.nct,
st.norm,
st.norminvgauss,
st.pareto,
st.pearson3,
st.powerlaw,
st.powerlognorm,
st.powernorm,
st.rdist,
st.rayleigh,
st.rice,
st.recipinvgauss,
st.semicircular,
st.skewnorm,
st.t,
st.trapezoid,
st.triang,
st.truncexpon,
st.truncnorm,
st.tukeylambda,
st.uniform,
# st.vonmises, # does not work in v1.6.0
st.vonmises_line,
st.wald,
st.weibull_min,
st.weibull_max,
st.wrapcauchy,
]
# Best holders
best_distribution = st.norm
best_params = (0.0, 1.0)
best_sse = np.inf
# Estimate distribution parameters from data
for distribution in tqdm(DISTRIBUTIONS):
# Try to fit the distribution
try:
# Ignore warnings from data that can't be fit
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# fit dist to data
params = distribution.fit(data)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
# if ax is passed, add to plot
try:
if ax:
pd.Series(pdf, x).plot(
label=distribution.name, legend=True, ax=ax
)
except Exception:
pass
# identify if this distribution is better
if best_sse > sse > 0:
best_distribution = distribution
best_params = params
best_sse = sse
except Exception:
pass
return best_distribution.name, best_params
def make_pdf(dist, params, size=10000):
"""Generate distributions's Probability Distribution Function"""
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Get sane start and end points of distribution
start = (
dist.ppf(0.01, *arg, loc=loc, scale=scale)
if arg
else dist.ppf(0.01, loc=loc, scale=scale)
)
end = (
dist.ppf(0.99, *arg, loc=loc, scale=scale)
if arg
else dist.ppf(0.99, loc=loc, scale=scale)
)
# Build PDF and turn into pandas Series
x = np.linspace(start, end, size)
y = dist.pdf(x, loc=loc, scale=scale, *arg)
pdf = pd.Series(y, x)
return pdf
def find_best_pdf(data, outfile=None):
# Plot for comparison
fig, (ax1, ax2) = plt.subplots(1, 2)
# Find best fit distribution
best_fit_name, best_fit_params = best_fit_distribution(data, 200, ax1)
best_dist = getattr(st, best_fit_name)
data.plot(
kind="hist",
density=True,
bins=50,
alpha=0.5,
label="Data",
legend=True,
ax=ax1,
color=mpl.rcParams["axes.prop_cycle"].by_key()["color"][1],
)
# Save plot limits
dataYLim = ax1.get_ylim()
dataXLim = ax1.get_xlim()
# Update plots
ax1.set_ylim(dataYLim)
ax1.set_xlim(dataXLim)
ax1.set_title("All Fitted Distributions\n")
ax1.set_xlabel("Score")
# Make PDF with best params
pdf = make_pdf(best_dist, best_fit_params)
# Display
pdf.plot(lw=2, label="PDF", legend=True, ax=ax2)
data.plot(
kind="hist", density=True, bins=50, alpha=0.5, label="Data", legend=True, ax=ax2
)
param_names = (
(best_dist.shapes + ", loc, scale").split(", ")
if best_dist.shapes
else ["loc", "scale"]
)
param_str = ", ".join(
["{}={:0.2f}".format(k, v) for k, v in zip(param_names, best_fit_params)]
)
dist_str = "{}({})".format(best_fit_name, param_str)
ax2.set_ylim(dataYLim)
ax2.set_xlim(dataXLim)
ax2.set_title("Best fit distribution \n" + dist_str)
ax2.set_xlabel("Score")
if outfile:
fig.savefig(outfile)
fig.show()
# Load data
peak_rank_file = "db/peak_rank.txt" # "ananse/db/peak_rank.txt"
scores = pd.read_csv(peak_rank_file, header=None)[0]
data = pd.Series(scores + 1)
outfile = "../tests/output/distributions.png" # "tests/output/distributions.png"
# run
find_best_pdf(data, outfile)
|
# import numpy as np
def distplot(infile, score_col=4, show=False):
"""
generate simple distplot from bedfile
"""
# https://stackoverflow.com/questions/18534562/scipy-lognormal-fitting
# https://stackoverflow.com/questions/41940726/scipy-lognorm-fitting-to-histogram
# https://stackoverflow.com/questions/26406056/a-lognormal-distribution-in-python
# https://stackoverflow.com/questions/15630647/fitting-lognormal-distribution-using-scipy-vs-matlab
bed = pd.read_csv(infile, header=None, sep="\t")
scores = pd.Series(bed[score_col])
bins = min(30, len(scores)) # too many bins = bad
fig = sns.histplot(scores, kde=True, stat="density", bins=bins, alpha=0.2)
fig.set_yscale("log") # most methods are log scaled
# # exclude outliers from plot
# y_min = np.percentile(scores, 1)
# y_max = np.percentile(scores, 99)
# fig.axes.set_ylim([y_min, y_max])
title = os.path.splitext(os.path.basename(infile))[0].replace(".out", "")
fig.set_title(f"{title} score distribution")
fig.xaxis.set_label_text("Score")
if show:
fig.figure.show()
else:
outfile = infile.replace(".bed", ".png")
fig.figure.savefig(outfile, orientation="landscape")
fig.figure.clear()
# distplot("../tests/output/ScorePeaks_scale.out.bed", show=True)
# distplot("../tests/output/ScorePeaks_logscale.out.bed", show=True)
# distplot("../tests/output/ScorePeaks_lognorm.out.bed", show=True)
# distplot("../tests/output/ScorePeaks_loglaplace.out.bed", show=True)
# distplot("../tests/output/ScorePeaks_peakrank.out.bed", show=True)
# distplot("../tests/output/ScorePeaks_peakrankfile.out.bed", show=True)
# from matplotlib import pyplot as plt
#
# fig, (ax1) = plt.subplots(1, 1)
# scores.plot(kind='hist', density=True, bins=bins, alpha=0.5, ax=ax1)
# ax1.set_title(f"{title} score distribution")
# ax1.set_xlabel('Score')
# fig.show()
# from matplotlib import pyplot as plt
# from matplotlib.ticker import FormatStrFormatter
#
# fig, (ax1, ax2) = plt.subplots(1, 2)
# fig.suptitle(f"{title} score distribution")
#
# sns.histplot(scores, ax=ax1, kde=True, stat="density")
# ax1.set_title("raw score")
# ax1.xaxis.set_label_text("Score")
# ax1.xaxis.set_major_locator(plt.MaxNLocator(6))
# ax1.xaxis.set_major_formatter(FormatStrFormatter('%i'))
#
# sns.histplot(np.log10(scores+1), ax=ax2, kde=True, stat="density") # log_scale=10
# ax2.set_title(f"log10 score")
# ax2.xaxis.set_label_text("Score")
#
# fig.xaxis.set_major_locator(plt.MaxNLocator(10))
# fig.xaxis.set_tick_params(rotation=15) # for long floats
# fig.xaxis.set_major_formatter(FormatStrFormatter('%i')) # integer. for floats, use '%.3f'
# fig.set_size_inches(10, 5)
# fig.savefig(outfile, orientation='landscape')
|
CombineBedFiles,
ScorePeaks,
ScoreMotifs,
Binding,
)
# prep
run_gimme = False # takes ages
test_dir = os.path.dirname(os.path.dirname(__file__))
data_dir = os.path.join(test_dir, "data")
genomepy.utils.mkdir_p(data_dir)
outdir = os.path.join(test_dir, "output")
genomepy.utils.mkdir_p(outdir)
intermediate_dir = os.path.join(outdir, "intermediate_results")
genomepy.utils.mkdir_p(intermediate_dir)
ncore = max(1, os.cpu_count() - 2)
# H3K27Ac data
genome = os.path.join(data_dir, "hg38.fa")
peakfiles = os.path.join(data_dir, "hg38-keratinocyte_H3K27ac_peaks.broadPeak")
bams = os.path.join(data_dir, "hg38-keratinocyte_H3K27ac_rep1.samtools-coordinate.bam")
# download test data locally
for file in [genome, peakfiles, bams]:
if not os.path.exists(file):
url = "https://mbdata.science.ru.nl/ANANSE/tests/data/" + file
genomepy.utils.download_file(url, file)
cbed = CombineBedFiles(genome=genome, peakfiles=peakfiles, verbose=True)
combined_bed = os.path.join(intermediate_dir, "combined.bed")
cbed.run(outfile=combined_bed, width=200, force=False)
sp = ScorePeaks(bams=bams, bed=combined_bed, ncore=ncore, verbose=True)
# benchmark peak normalization
for func, kwargs in zip(
[
"peak_rank_file_dist", # Quan's file
"peak_rank_dist", # scalable version of Quan's file
"scale_dist", # no normalization
"log_scale_dist", # no normalization, but with a log
"scipy_dist", # see kwargs
"scipy_dist", # see kwargs
],
[{}, {}, {}, {}, {"dist": "loglaplace"}, {"dist": "lognorm"}], # fits best
):
suffix = kwargs.get("dist")
if not suffix:
suffix = func
scored_peaks = os.path.join(intermediate_dir, f"scoredpeaks_{suffix}.bed")
sp.run(outfile=scored_peaks, dist_func=func, force=False, **kwargs)
distplot(scored_peaks, score_col=5)
if run_gimme:
scored_peaks = os.path.join(intermediate_dir, "scoredpeaks.bed")
sp = ScorePeaks(bams=bams, bed=combined_bed, ncore=ncore, verbose=True)
sp.run(outfile=scored_peaks, dist_func="peak_rank_dist", force=False)
sm = ScoreMotifs(
genome=genome, bed=scored_peaks, pfmfile=None, ncore=ncore, verbose=True
)
scored_motifs = os.path.join(intermediate_dir, "scoredmotifs.bed")
sm.run(outfile=scored_motifs, force=True)
b = Binding(
peak_weights=scored_peaks,
motif_weights=scored_motifs,
pfmfile=None,
model=None,
curation_filter=None,
tf_list=None,
whitelist=True,
ncore=ncore,
verbose=True,
)
outfile = os.path.join(outdir, "binding.tsv")
b.run(outfile=outfile, force=True)
|
#!/usr/bin/env python
# TODO maybe have sklearn transforms for dot prod and Lp dists
# TODO add L1 distance
# ================================================================ Distances
def dists_elemwise_sq(x, q):
diffs = x - q
return diffs * diffs
def dists_elemwise_l1(x, q):
return np.abs(x - q)
def dists_elemwise_dot(x, q):
return x * q
# ================================================================ Preproc
def _insert_zeros(X, nzeros):
"""injects nzeros zero columns spaced as far apart as possible"""
if nzeros < 1:
return X
N, D = X.shape
D_new = D + nzeros
X_new = np.zeros((N, D_new), dtype=X.dtype)
nonzeros_per_zero = D // nzeros
if nonzeros_per_zero < 1:
X_new[:, :D] = X
return X_new
stripe_width = nonzeros_per_zero
for i in range(nzeros):
in_start = stripe_width * i
in_end = in_start + stripe_width
out_start = i * (stripe_width + 1)
out_end = out_start + stripe_width
X_new[:, out_start:out_end] = X[:, in_start:in_end]
out_end += 1
remaining_len = D - in_end
out_remaining_len = D_new - out_end
# print "D, remaining_incols, remaining_outcols, in_end, out_end: ", \
# D, remaining_len, out_remaining_len, in_end, out_end
assert remaining_len == out_remaining_len
assert remaining_len >= 0
if remaining_len:
X_new[:, out_end:out_end+remaining_len] = X[:, in_end:D]
# check that we copied both the beginning and end properly
# assert np.array_equal(X[:, 0], X_new[:, 1])
assert np.array_equal(X[:, 0], X_new[:, 0])
if remaining_len > 0:
assert np.array_equal(X[:, -1], X_new[:, -1])
return X_new
def _ensure_num_cols_multiple_of(X, multiple_of):
"""Adds as many columns of zeros as necessary to ensure that
X.shape[1] % multiple_of == 0"""
remainder = X.shape[1] % multiple_of
if remainder > 0:
return _insert_zeros(X, multiple_of - remainder)
return X
# ================================================================ kmeans
def kmeans(X, k, max_iter=16, init='kmc2'):
X = X.astype(np.float32)
np.random.seed(123)
# if k is huge, initialize centers with cartesian product of centroids
# in two subspaces
if init == 'subspaces':
sqrt_k = int(np.sqrt(k) + .5)
if sqrt_k ** 2 != k:
raise ValueError("K must be a square number if init='subspaces'")
_, D = X.shape
centroids0, _ = kmeans(X[:, :D/2], sqrt_k, max_iter=1)
centroids1, _ = kmeans(X[:, D/2:], sqrt_k, max_iter=1)
seeds = np.empty((k, D), dtype=np.float32)
for i in range(sqrt_k):
for j in range(sqrt_k):
row = i * sqrt_k + j
seeds[row, :D/2] = centroids0[i]
seeds[row, D/2:] = centroids1[j]
elif init == 'kmc2':
seeds = kmc2.kmc2(X, k).astype(np.float32)
else:
raise ValueError("init parameter must be one of {'kmc2', 'subspaces'}")
estimator = cluster.MiniBatchKMeans(k, init=seeds, max_iter=max_iter).fit(X)
return estimator.cluster_centers_, estimator.labels_
# ================================================================ PQ
# TODO rm after debug
def _encode_X_pq(X, codebooks, elemwise_dist_func=dists_elemwise_sq):
ncentroids, ncodebooks, subvect_len = codebooks.shape
assert X.shape[1] == (ncodebooks * subvect_len)
idxs = np.empty((X.shape[0], ncodebooks), dtype=np.int)
X = X.reshape((X.shape[0], ncodebooks, subvect_len))
for i, row in enumerate(X):
row = row.reshape((1, ncodebooks, subvect_len))
dists = elemwise_dist_func(codebooks, row)
dists = np.sum(dists, axis=2)
idxs[i, :] = np.argmin(dists, axis=0)
# return idxs + self._offsets_ # offsets let us index into raveled dists
return idxs # [N x ncodebooks]
def _learn_centroids(X, ncentroids, ncodebooks):
subvect_len = int(X.shape[1] / ncodebooks)
assert subvect_len * ncodebooks == X.shape[1] # must divide evenly
ret = np.empty((ncentroids, ncodebooks, subvect_len))
for i in range(ncodebooks):
start_col = i * subvect_len
end_col = start_col + subvect_len
X_in = X[:, start_col:end_col]
centroids, labels = kmeans(X_in, ncentroids)
ret[:, i, :] = centroids
return ret.astype(np.float32)
def _learn_best_quantization(luts): # luts can be a bunch of vstacked luts
best_loss = np.inf
best_alpha = None
best_floors = None
best_scale_by = None
for alpha in [0, .001, .002, .005, .01, .02, .05, .1]:
alpha_pct = int(100 * alpha)
# compute quantized luts this alpha would yield
floors = np.percentile(luts, alpha_pct, axis=0)
luts_offset = np.maximum(0, luts - floors) # clip at 0
ceil = np.percentile(luts_offset, 100 - alpha_pct)
scale_by = 255. / ceil
luts_quantized = np.floor(luts_offset * scale_by).astype(np.int)
luts_quantized = np.minimum(255, luts_quantized) # clip at 255
# compute err
luts_ideal = (luts - luts_offset) * scale_by
diffs = luts_ideal - luts_quantized
loss = np.sum(diffs * diffs)
# print "alpha = {}\t-> loss = {}".format(alpha, loss)
# # yep, almost exactly alpha saturate in either direction
# print "fraction of 0s, 255s = {}, {}".format(
# np.mean(luts_offset == 0), np.mean(luts_quantized == 255))
if loss <= best_loss:
best_loss = loss
best_alpha = alpha
best_floors = floors
best_scale_by = scale_by
# print "best alpha, loss = ", best_alpha, best_loss
# print "best floors, scale = ", best_floors, best_scale_by
return best_floors, best_scale_by, best_alpha
def _extract_random_rows(X, how_many, remove_from_X=True):
if how_many > len(X):
raise IndexError("how_many ({}) > len(X) ({})".format(how_many, len(X)))
split_start = np.random.randint(len(X) - how_many - 1)
split_end = split_start + how_many
rows = np.copy(X[split_start:split_end])
if remove_from_X:
return np.vstack((X[:split_start], X[split_end:])), rows
return X, rows
def _fit_pq_lut(q, centroids, elemwise_dist_func):
_, nsubvects, subvect_len = centroids.shape
assert len(q) == nsubvects * subvect_len
q = q.reshape((1, nsubvects, subvect_len))
q_dists_ = elemwise_dist_func(centroids, q)
q_dists_ = np.sum(q_dists_, axis=-1)
return np.asfortranarray(q_dists_) # ncentroids, nsubvects, col-major
def _learn_quantization_params(X, centroids, elemwise_dist_func, Q=None,
# plot=True):
plot=False):
"""learn distros of entries in each lut"""
if Q is None:
num_rows = int(min(10*1000, len(X) / 2))
how_many = int(min(1000, num_rows // 2))
_, Q = _extract_random_rows(
X[num_rows:], how_many=how_many, remove_from_X=False)
X = X[:num_rows] # limit to first 10k rows of X
# compute luts for all the queries
luts = [_fit_pq_lut(q, centroids=centroids,
elemwise_dist_func=elemwise_dist_func) for q in Q]
luts = np.vstack(luts)
if plot:
import matplotlib.pyplot as plt
import seaborn as sb
# print "plotting LUT distributions..."
plot_luts = np.asfortranarray(luts[:5000])
_, ax = plt.subplots(figsize=(10, 4))
sb.violinplot(data=plot_luts, inner="box", cut=0, ax=ax)
ax.set_title('Distributions of distances within each LUT')
ax.set_xlabel('LUT')
ax.set_ylabel('Distance to query')
ax.set_ylim([0, np.max(plot_luts)])
plt.show()
# print "lut stats (min, mean, max):"
# print np.min(luts, axis=0)
# print np.mean(luts, axis=0)
# print np.max(luts, axis=0)
assert luts.shape == (centroids.shape[0] * len(Q), centroids.shape[1])
offsets, scaleby, _ = _learn_best_quantization(luts)
return offsets.astype(np.float32), scaleby
class MockEncoder(object):
"""Stand-in for cpp impl; only for debuging"""
def __init__(self, nbytes):
self._enc_bytes = nbytes
self.ncodebooks = 2 * nbytes
self._encoder = bolt.BoltEncoder(nbytes)
def set_centroids(self, centroids):
# accept centroids as 2D array like cpp; but we'll need them 3D
nrows, ndims = centroids.shape
ncentroids = 16
codebook_sz = ncentroids * ndims
self.centroids = np.empty((ncentroids, self.ncodebooks, ndims))
for m in range(self.ncodebooks):
start_idx = m * ncentroids # start idx of block
end_idx = start_idx + ncentroids
block = centroids[start_idx:end_idx]
self.centroids[:, m, :] = block
# check whether centroids bridge is broken
self._encoder.set_centroids(centroids)
raw_centroids = self._encoder.centroids()
cpp_centroids = np.full(raw_centroids.shape, -1)
# print "ncentroids, ncodebooks, ndims ", self.centroids.shape
inbuff = raw_centroids.ravel()
outbuff = np.zeros(raw_centroids.size) - 1
for m in range(self.ncodebooks):
start_idx = m * codebook_sz # start idx of block
for i in range(ncentroids): # for each row in block
for j in range(ndims): # for each col in block
in_idx = start_idx + (ndims * i) + j
out_idx = start_idx + (ncentroids * j) + i
outbuff[in_idx] = inbuff[out_idx]
cpp_centroids = outbuff.reshape(centroids.shape)
# print "py, cpp centroids: "
# print centroids[:20]
# print cpp_centroids[:20]
# print centroids.shape
# print cpp_centroids.shape
assert np.allclose(centroids, cpp_centroids)
def set_data(self, X):
self.X = X
self.X_enc = _encode_X_pq(X, self.centroids)
ncodebooks = self.centroids.shape[1]
enc_offsets = np.arange(ncodebooks, dtype=np.int) * 16
self._encoder.set_data(X)
raw_Xenc = self._encoder.codes()
assert 2 * raw_Xenc.shape[1] == ncodebooks
cpp_Xenc = np.empty((raw_Xenc.shape[0], ncodebooks), dtype=np.uint8)
# cpp returns codes in bitpacked form, so unpack them
for in_j, out_j in enumerate(range(0, ncodebooks, 2)):
col = raw_Xenc[:, in_j]
cpp_Xenc[:, out_j] = np.bitwise_and(col, 15)
for in_j, out_j in enumerate(range(1, ncodebooks, 2)):
col = raw_Xenc[:, in_j]
cpp_Xenc[:, out_j] = np.bitwise_and(col, 255 - 15) >> 4
# print "python X enc"
# print self.X_enc.shape
# print self.X_enc[:20]
# print "cpp X enc"
# print cpp_Xenc.shape
# print cpp_Xenc[:20]
# print "raw cpp X_enc"
# print raw_Xenc[:20]
self.X_enc += enc_offsets
def set_offsets(self, offsets):
assert self.scale > 0
self._offsets_ = offsets
self._encoder.set_offsets(offsets)
def set_scale(self, scale):
self.scale = scale
self._encoder.set_scale(scale)
def _quantize_lut(self, raw_lut):
lut = np.floor(raw_lut * self.scale + self._offsets_)
return np.maximum(0, np.minimum(lut, 255)).astype(np.uint16)
def _dists(self, raw_lut):
lut = np.asfortranarray(self._quantize_lut(raw_lut))
centroid_dists = lut.T.ravel()[self.X_enc.ravel()]
return np.sum(centroid_dists.reshape(self.X_enc.shape), axis=-1)
# dists = np.sum(centroid_dists.reshape(self.X_enc.shape), axis=-1)
def dists_sq(self, q):
lut = _fit_pq_lut(q, centroids=self.centroids,
elemwise_dist_func=dists_elemwise_sq)
offsets_cpp = self._encoder.get_offsets()
scale_cpp = self._encoder.get_scale()
# print "py, cpp offsets:"
# print self.offsets
# print offsets_cpp
# print "py, cpp scale factors:"
# print self.scale
# print scale_cpp
lut_py = self._quantize_lut(lut)
# print "lets try to read the cpp lut..."
# self._encoder.lut_l2(q)
self._encoder.lut_dot(q)
lut_cpp = self._encoder.get_lut()
# print "py, cpp lut:" # within +/- 1 using naive lut impl in cpp
# print lut_py
# print lut_cpp
# return self._dists(lut)
dists_py = self._dists(lut)
dists_cpp = self._encoder.dists_sq(q)[:len(dists_py)] # strip padding
# print "py, cpp initial dists:"
# print dists_py[:20]
# print dists_cpp[:20]
# print "py, cpp final dists:"
# print dists_py[-20:]
# print dists_cpp[-20:]
return dists_py
# return dists_cpp
def dot_prods(self, q):
lut = _fit_pq_lut(q, centroids=self.centroids,
elemwise_dist_func=dists_elemwise_dot)
return self._dists(lut)
class Reductions:
SQUARED_EUCLIDEAN = 'l2'
DOT_PRODUCT = 'dot'
class Accuracy:
LOWEST = 'lowest'
LOW = 'low'
MEDIUM = 'medium'
HIGH = 'high'
_acc_to_nbytes = {
Accuracy.LOWEST: 2,
Accuracy.LOW: 8,
Accuracy.MEDIUM: 16,
Accuracy.HIGH: 32,
}
class Encoder(object):
def __init__(self, reduction=Reductions.SQUARED_EUCLIDEAN,
accuracy=Accuracy.MEDIUM, norm_mean=None):
self._enc_bytes = _acc_to_nbytes[accuracy]
self.reduction = reduction
self.norm_mean = norm_mean if norm_mean is not None \
else reduction != Reductions.DOT_PRODUCT
def _preproc(self, X):
# TODO rows of X also needs to have variance >> 1 to avoid
# everything going to 0 when bolt_encode converts to ints in argmin
one_d = len(X.shape) == 1
if one_d:
X = X.reshape((1, -1))
ncodebooks = self._enc_bytes * 2
X = X.astype(np.float32)
if self.norm_mean:
# X = X - self.means_
X -= self.means_
out = _ensure_num_cols_multiple_of(X.astype(np.float32), ncodebooks)
return out.ravel() if one_d else out
@property
def nbytes(self):
try:
return self._nbytes_
except AttributeError:
raise exceptions.NotFittedError("Encoder has not yet been given "
"a dataset; call fit() first")
def fit(self, X, just_train=False, Q=None):
if not len(X.shape) == 2:
raise IndexError("X must be [num_examples x num_dimensions]!")
if X.shape[1] < 2 * self._enc_bytes:
raise ValueError("num_dimensions must be at least 2 * nbytes")
ncentroids = 16
self._nbytes_ = self._enc_bytes * len(X) #
self.DEBUG = False
# self.DEBUG = True
self.means_ = np.mean(X, axis=0) if self.norm_mean \
else np.zeros(X.shape[1])
self.means_ = self.means_.astype(np.float32)
# self.means_ = np.zeros_like(self.means_) # TODO rm
# self.means_ = np.ones_like(self.means_) # TODO rm
X = self._preproc(X)
self._ndims_ = X.shape[1]
self._ncodebooks = self._enc_bytes * 2
centroids = _learn_centroids(X, ncentroids=ncentroids,
ncodebooks=self._ncodebooks)
centroids = centroids.astype(np.float32)
# print "X shape, centroids shape: ", X.shape, centroids.shape
# print "X means before preproc:", self.means_
# print "X means after preproc:", np.mean(X, axis=0)
# print "means of centroids:", np.mean(centroids, axis=0)
if self.DEBUG:
self._encoder_ = MockEncoder(self._enc_bytes)
else:
self._encoder_ = bolt.BoltEncoder(self._enc_bytes)
# print "centroids shape: ", centroids.shape
# compute lut offsets and scaleby for l2 and dot here; we'll have
# to switch off which ones are used based on which method gets called
if self.reduction == Reductions.SQUARED_EUCLIDEAN:
elemwise_dist_func = dists_elemwise_sq
elif self.reduction == Reductions.DOT_PRODUCT:
elemwise_dist_func = dists_elemwise_dot
else:
self._bad_reduction()
offsets, self.scale = _learn_quantization_params(
X, centroids, elemwise_dist_func)
# account for fact that cpp's fma applies scale first, then adds offset
# self._offsets_ = -offsets / self.scale
self._offsets_ = -offsets * self.scale
self._total_offset_ = np.sum(self._offsets_)
# offsets_sq, self.scale_sq_ = _learn_quantization_params(
# X, centroids, dists_elemwise_sq)
# offsets_dot, self.scale_dot_ = _learn_quantization_params(
# X, centroids, dists_elemwise_dot)
self._encoder_.set_scale(self.scale)
self._encoder_.set_offsets(self._offsets_)
# # account for fact that cpp applies scale first, then offset, in fma
# self.offsets_sq_ = -offsets_sq / self.scale_sq_
# self.offsets_dot_ = -offsets_dot / self.scale_dot_
# # TODO rm after debug
# self.offsets_sq_ *= 5
# self.offsets_sq_[:] = 0.
# self.offsets_dot_[:] = 0.
# self.scale_sq_ = 1.
# self.scale_dot_ = 1.
# print "centroids shape", centroids.shape
# munge centroids into contiguous 2D array;
# starts as [ncentroids, ncodebooks, subvect_len] and
# needs to be [ncentroids * ncodebooks, subvect_len
subvect_len = centroids.shape[-1]
flat_centroids = np.empty((self._ncodebooks * ncentroids,
subvect_len), dtype=np.float32)
for m in range(self._ncodebooks):
codebook = centroids[:, m, :]
start_row = m * ncentroids
end_row = start_row + ncentroids
flat_centroids[start_row:end_row, :] = codebook
# print "centroids shape: ", centroids.shape
# print "flat centroids shape: ", flat_centroids.shape
self._encoder_.set_centroids(flat_centroids)
if not just_train:
self._encoder_.set_data(X)
self._n = len(X)
return self
def set_data(self, X):
"""set data to actually encode; separate from fit() because fit()
could use different training data than what we actully compress"""
self._encoder_.set_data(self._preproc(X))
self._n = len(X)
def transform(self, q, unquantize=False):
if self.reduction == Reductions.DOT_PRODUCT:
func = self._encoder_.dot_prods
elif self.reduction == Reductions.SQUARED_EUCLIDEAN:
func = self._encoder_.dists_sq
else:
self._bad_reduction()
ret = func(self._preproc(q))[:self._n]
return (ret - self._total_offset_) / self.scale if unquantize else ret
def knn(self, q, k):
if self.reduction == Reductions.DOT_PRODUCT:
return self._encoder_.knn_mips(self._preproc(q), k)
elif self.reduction == Reductions.SQUARED_EUCLIDEAN:
return self._encoder_.knn_l2(self._preproc(q), k)
def _bad_reduction(self):
raise ValueError("Unreconized reduction '{}'!".format(self.reduction))
# def dot(self, q, unquantize=False):
# self._check_reduction(Reductions.DOT_PRODUCT)
# ret = self._encoder_.dot_prods(self._preproc(q))[:self._n]
# return (ret - self._offsets_) * self.scale if unquantize else ret
# def dists_sq(self, q, unquantize=False):
# self._check_reduction(Reductions.SQUARED_EUCLIDEAN)
# ret = self._encoder_.dists_sq(self._preproc(q))[:self._n]
# return (ret - self._offsets_) * self.scale if unquantize else ret
# def knn_dot(self, q, k):
# self._check_reduction(Reductions.DOT_PRODUCT)
# return self._encoder_.knn_mips(self._preproc(q), k)
# def knn_l2(self, q, k):
# self._check_reduction(Reductions.SQUARED_EUCLIDEAN)
# return self._encoder_.knn_l2(self._preproc(q), k)
def _test_insert_zeros():
X = np.random.randn(4, 1000)
for ncols in range(1, X.shape[1] + 1):
for nzeros in np.arange(64):
_insert_zeros(X[:, :ncols], nzeros)
if __name__ == '__main__':
_test_insert_zeros()
|
#!/usr/bin/env python
# note that we import module generate py file, not the generated
# wrapper so (which is _bolt)
|
#!/usr/bin/env python
# from future import absolute_import, division, print_function
# import pathlib as pl
# _memory = Memory('.', verbose=0, compress=7) # compression between 1 and 9
# _memory = Memory('.', verbose=0, compress=3) # compression between 1 and 9
_memory = Memory('.', verbose=0)
_dir = os.path.dirname(os.path.abspath(__file__))
CIFAR10_DIR = os.path.join(_dir, '..', 'assets', 'cifar10-softmax')
CIFAR100_DIR = os.path.join(_dir, '..', 'assets', 'cifar100-softmax')
# ================================================================ types
class MatmulTask(object):
def __init__(self, X_train, Y_train, X_test, Y_test, W_train, W_test=None,
name=None, info=None):
self.X_train = X_train
self.Y_train = Y_train
self.X_test = X_test
self.Y_test = Y_test
self.W_train = W_train
self.W_test = W_test if W_test is not None else W_train
self.name = name
self.info = info if info is not None else {}
self.train_mats = (self.X_train, self.Y_train, self.W_train)
self.test_mats = (self.X_test, self.Y_test, self.W_test)
self.initial_hashes = self._hashes()
def __str__(self):
train_str = '{} @ {} = {}'.format(
self.X_train.shape, self.W_train.shape, self.Y_train.shape)
test_str = '{} @ {} = {}'.format(
self.X_test.shape, self.W_test.shape, self.Y_test.shape)
s = "train:\t{}\ntest:\t{}".format(train_str, test_str)
if self.name:
s = "---- {}\n{}".format(self.name, s)
return s
def validate_shapes(self):
for (X, Y, W) in [self.train_mats, self.test_mats]:
N, D = X.shape
D2, M = W.shape
assert D == D2
assert (N, M) == Y.shape
def validate_hashes(self):
assert self._hashes() == self.initial_hashes
def validate(self, verbose=1, mse_thresh=1e-7, train=True, test=True):
self.validate_shapes()
self.validate_hashes()
which_mats = []
if train:
which_mats.append(self.train_mats)
if test:
which_mats.append(self.test_mats)
for (X, Y, W) in which_mats:
Y_hat = X @ W
diffs = Y - Y_hat
mse = np.mean(diffs * diffs) / np.var(Y)
if verbose > 0:
print("mse: ", mse)
assert mse < mse_thresh
def _hashes(self):
return {
'X_train': self.X_train.std(),
'Y_train': self.Y_train.std(),
'W_train': self.W_train.std(),
'X_test': self.X_test.std(),
'Y_test': self.Y_test.std(),
'W_test': self.W_test.std()
}
# ================================================================ ECG
def _load_x_y_w_for_ar_model(data, window_len=4, verbose=1, N_train=-1,
# estimator='minlog'):
estimator='ridge'):
# # TODO rm after debug
# print("initial data shape: ", data.shape)
# new_data = np.zeros((len(data), 4), dtype=data.dtype)
# new_data[:, :3] = data
# new_data[:, 3] = np.random.randn(len(data)) * np.std(data) * .01 + np.mean(data)
# data = new_data
data = data[1:] - data[:-1] # predict 1st derivatives so nontrivial
windows = window.sliding_window(
data, ws=(window_len, data.shape[1]), ss=(1, 1))
X = windows.reshape(windows.shape[0], -1)[:-1]
Y = data[window_len:]
# TODO rm
# Y[1:] = Y[1:] - Y[:-1] # predict differences, not raw values
N = len(X)
if N_train < data.shape[1]:
N_train = N // 2 # TODO more flexible train/test split
# N_test = N - N_train
X_train, Y_train = X[:N_train], Y[:N_train]
X_test, Y_test = X[N_train:], Y[N_train:]
# fit the autoregressive model (just a filter)
# est = linear_model.LinearRegression(fit_intercept=False)
if estimator == 'ridge':
est = linear_model.Ridge(
# alpha=.01*len(Y_train)*np.var(data), fit_intercept=False)
# alpha=(.01 * np.var(data)), fit_intercept=False)
alpha=(.1 * np.var(data)), fit_intercept=False)
# est = linear_model.Lasso(
# # alpha=.001*np.sum(np.abs(Y_train)), fit_intercept=False)
# # alpha=1e-4*(Y_train * Y_train).sum(), fit_intercept=False)
# alpha=(1e-2 * Y_train.var()), fit_intercept=False)
est.fit(X_train, Y_train)
W = est.coef_.T
else:
W = algo.linear_regression_log_loss(X_train, Y_train)
if verbose > 0:
# print("ts ar model: data.shape: ", data.shape)
# print("ts ar model: windows.shape: ", windows.shape)
print("ts ar model: X shape: ", X.shape)
print("ts ar model: Y shape: ", Y.shape)
try:
print("train r^2:", est.score(X_train, Y_train))
print("test r^2:", est.score(X_test, Y_test))
except UnboundLocalError: # not using sklearn estimator
pass
diffs = Y[1:] - Y[:-1]
# print("column variances of diffs", np.var(diffs, axis=0))
# print("column variances of Y", np.var(Y, axis=0))
# print("var(diffs), var(Y)", np.var(diffs), np.var(Y))
print("var(diffs) / var(Y)", np.var(diffs) / np.var(Y))
# print("coeffs: ")
# for i in range(0, len(W), 10):
# print(W[i:(i + 10)])
# Y_hat_train = est.predict(X_train)
# Y_hat_test = est.predict(X_test)
# print("Y_hat_train var / 1e3", np.var(Y_hat_train, axis=0) / 1e3)
# print("Y_train var / 1e3", np.var(Y_train, axis=0) / 1e3)
# print("Y_hat_test var / 1e3", np.var(Y_hat_test, axis=0) / 1e3)
# print("Y_test var / 1e3", np.var(Y_test, axis=0) / 1e3)
# import sys; sys.exit()
# print(W.shape)
# print(est.score(X, Y))
# Y_hat = est.predict(X)
# diffs = Y - Y_hat
# print("normalized mse: ", np.mean(diffs * diffs) / np.var(Y))
# Y_hat = X @ W
# diffs = Y - Y_hat
# print("normalized mse: ", np.mean(diffs * diffs) / np.var(Y))
# return X_test, Y_test, W # Y_hat = X @ W
return MatmulTask(X_train=X_train, Y_train=Y_train,
X_test=X_test, Y_test=Y_test, W_train=W)
# def load_ecg_x_y_w_for_recording(recording, window_len=4):
# return _load_x_y_w_for_ar_model(recording, window_len=window_len)
# @_memory.cache
# def load_ecg_recordings(limit_nhours=2):
# generator = limit_nhours is not None and limit_nhours > 0
# return sharee.load_recordings(
# limit_nhours=limit_nhours, generator=generator)
# ------------------------------------------------ sharee
# @_memory.cache() # caching is no faster than just recomputing with ridge
# @_memory.cache()
def load_sharee_x_y_w_for_recording_id(rec_id, window_len=4, limit_nhours=.5):
rec = sharee.load_recording(rec_id, limit_nhours=limit_nhours)
return _load_x_y_w_for_ar_model(rec, window_len=window_len)
def load_sharee_tasks(window_len=4, validate=False, **kwargs):
rec_ids = sharee.load_recording_ids()
for i, rec_id in enumerate(rec_ids):
task = load_sharee_x_y_w_for_recording_id(
rec_id, window_len=window_len)
# task.info = {'rec_id: ', rec_id}
task.name = rec_id
if validate:
print("validating ecg task {}/{}...".format(i + 1, len(rec_ids)))
task.validate(mse_thresh=.25) # normalized mse; >0 since lstsq
yield task
# ------------------------------------------------ incart
# @_memory.cache()
def load_incart_x_y_w_for_recording_id(rec_id, window_len=4, limit_nhours=1):
rec = incart.load_recording(rec_id, limit_nhours=limit_nhours)
return _load_x_y_w_for_ar_model(rec, window_len=window_len)
def load_incart_tasks(window_len=4, validate=False, **kwargs):
rec_ids = incart.load_recording_ids()
for i, rec_id in enumerate(rec_ids):
task = load_incart_x_y_w_for_recording_id(
rec_id, window_len=window_len)
task.name = rec_id
if validate:
print("validating ecg task {}/{}...".format(i + 1, len(rec_ids)))
task.validate(mse_thresh=.25) # normalized mse; >0 since lstsq
yield task
# ------------------------------------------------ wrapper
def load_ecg_x_y_w_for_recording_id(*args, **kwargs):
return load_incart_x_y_w_for_recording_id(*args, **kwargs)
def load_ecg_tasks(*args, **kwargs):
return load_incart_tasks(*args, **kwargs)
# ================================================================ caltech
# def load_caltech_imgs(ntrain_classes=50, limit_per_class=10):
# # imgs = caltech.load_caltech101(resample=None, crop=None)
# (imgs, y), label2name = caltech.load_caltech101(
# limit_per_class=limit_per_class)
# # print(len(imgs))
# # print(sum([img.size for img in imgs]))
# # print("class counts: ", np.bincount(y))
# # split by class; more relaxed than assuming you have examples from
# # the same dataset/class you're later going to apply your filter to
# train_idxs = np.where(y < ntrain_classes)[0]
# test_idxs = np.where(y >= ntrain_classes)[0]
# imgs_train = [imgs[i] for i in train_idxs]
# imgs_test = [imgs[i] for i in test_idxs]
# return imgs_train, imgs_test
@_memory.cache
def load_caltech_img_ids(ntrain_classes=50, limit_per_class_train=10,
limit_per_class_test=10, verbose=1):
limit_per_class = max(limit_per_class_train, limit_per_class_test)
(imgs, y), label2name = caltech.load_caltech101_ids(
limit_per_class=limit_per_class)
# split by class; more relaxed than assuming you have examples from
# the same dataset/class you're later going to apply your filter to
imgs_ids_train = []
imgs_ids_test = []
if verbose > 0:
print("limiting ntrain per class to ", limit_per_class_train)
print("limiting ntest per class to ", limit_per_class_test)
# keep fewer idxs for train or test if requested
if limit_per_class_train > 0:
train_idxs = np.where(y < ntrain_classes)[0]
if limit_per_class_train < limit_per_class:
y_train = y[train_idxs]
keep_idxs = []
for c in np.unique(y_train):
c_idxs = np.where(y_train == c)[0][:limit_per_class_train]
keep_idxs += list(c_idxs)
train_idxs = train_idxs[np.array(keep_idxs)]
imgs_ids_train = [imgs[i] for i in train_idxs]
if limit_per_class_test > 0:
test_idxs = np.where(y >= ntrain_classes)[0]
if limit_per_class_test < limit_per_class:
y_test = y[test_idxs]
keep_idxs = []
for c in np.unique(y_test):
c_idxs = np.where(y_test == c)[0][:limit_per_class_test]
keep_idxs += list(c_idxs)
test_idxs = test_idxs[np.array(keep_idxs)]
imgs_ids_test = [imgs[i] for i in test_idxs]
return imgs_ids_train, imgs_ids_test
def _load_dummy_caltech_filter_3x3(order='hwc'):
filt = [[-1, 2, -1],
[-1, 2, -1],
[-1, 2, -1]]
if order == 'hwc':
filt = np.array(filt)[..., np.newaxis]
filt = np.tile(filt, (1, 1, 3))
else:
assert order == 'chw'
filt = np.array(filt)[np.newaxis, ...]
filt = np.tile(filt, (3, 1, 1))
return filt
def _lift_grayscale_filt_to_rgb(filt, order='hwc'):
if order == 'hwc':
filt = np.array(filt)[..., np.newaxis]
filt = np.tile(filt, (1, 1, 3))
else:
assert order == 'chw'
filt = np.array(filt)[np.newaxis, ...]
filt = np.tile(filt, (3, 1, 1))
return filt
def _lift_vert_filter_to_rgb_pair(filt_v, order='hwc'):
filt_v = np.array(filt_v)
filt_h = np.ascontiguousarray(filt_v.T)
filt_v = _lift_grayscale_filt_to_rgb(filt_v, order=order)
filt_h = _lift_grayscale_filt_to_rgb(filt_h, order=order)
return filt_v, filt_h
def load_filters_sobel_3x3(order='hwc'):
filt_v = [[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]]
filt_v = np.array(filt_v, dtype=np.float32) / 2.
return _lift_vert_filter_to_rgb_pair(filt_v, order=order)
def load_filters_sobel_5x5(order='hwc'):
filt_v = [[-5, -4, 0, 4, 5],
[-8, -10, 0, 10, 8],
[-10, 20, 0, 20, 10],
[-8, -10, 0, 10, 8],
[-5, -4, 0, 4, 5]]
filt_v = np.array(filt_v, dtype=np.float32) / 20.
return _lift_vert_filter_to_rgb_pair(filt_v)
def load_filters_gaussian_3x3(order='hwc'):
x = np.array([1, 2, 1])
filt = (np.outer(x, x) / 16.).astype(np.float32)
return [_lift_grayscale_filt_to_rgb(filt, order=order)]
def load_filters_gaussian_5x5(order='hwc'):
x = np.array([1, 4, 6, 4, 1])
filt = (np.outer(x, x) / 256.).astype(np.float32)
return [_lift_grayscale_filt_to_rgb(filt, order=order)]
def load_filters_sharpen_5x5(order='hwc'):
# from https://en.wikipedia.org/wiki/Kernel_(image_processing)
x = np.array([1, 4, 6, 4, 1])
x = np.outer(x, x)
x[2, 2] = -476
filt = (x / -256.).astype(np.float32)
return [_lift_grayscale_filt_to_rgb(filt, order=order)]
def load_filters_gaussian(
order='hwc', shape=(5, 5), sigmas=(1, 2)):
filts = []
for sigma in sigmas:
filt = np.zeros(shape, dtype=np.float32)
coeff = 1. / (sigma * np.sqrt(2 * np.pi))
scale = 1. / (2 * sigma * sigma)
i0 = int(shape[0] - 1) // 2
j0 = int(shape[1] - 1) // 2
for i in range(shape[0]):
for j in range(shape[1]):
dist_sq = (i - i0)**2 + (j - j0)**2
filt[i, j] = np.exp(-dist_sq * scale)
filt *= coeff
filts.append(filt)
return [_lift_grayscale_filt_to_rgb(filt, order=order) for filt in filts]
def _filters_list_to_mat(filters):
filters_flat = [filt.ravel() for filt in filters]
return np.vstack(filters_flat).T
def caltech_x_y_for_img(img, filt_spatial_shape, filters_list=None, W=None,
strides=(1, 1), order='chw'):
# extract and flatten windows into rows of X matrix
if order == 'hwc':
windows = window.extract_conv2d_windows(
img, filt_shape=filt_spatial_shape, strides=strides)
filt_sz = img.shape[-1] * np.prod(filt_spatial_shape)
X = windows.reshape(-1, filt_sz)
else:
assert order == 'chw'
assert img.shape[2] == 3 # assumes img in hwc order
X_subs_list = []
filt_spatial_sz = np.prod(filt_spatial_shape)
for c in range(3):
windows = window.extract_conv2d_windows(
img[:, :, c][..., np.newaxis],
filt_shape=filt_spatial_shape, strides=strides)
X_subs_list.append(windows.reshape(-1, filt_spatial_sz))
X = np.hstack(X_subs_list)
assert X.max() <= 255
assert X.min() >= 0
W = _filters_list_to_mat(filters_list) if W is None else W
return X, X @ W
@_memory.cache # cache raw images to avoid IO, but dynamically produce windows
def _load_caltech_train_imgs(limit_per_class=10):
train_ids, _ = load_caltech_img_ids(
limit_per_class_train=limit_per_class, limit_per_class_test=0)
imgs = [caltech.load_caltech_img(img_id) for img_id in train_ids]
return imgs, train_ids
@_memory.cache # cache raw images to avoid IO, but dynamically produce windows
def _load_caltech_test_imgs(limit_per_class=10):
_, test_ids = load_caltech_img_ids(
limit_per_class_train=0, limit_per_class_test=limit_per_class)
imgs = [caltech.load_caltech_img(img_id) for img_id in test_ids]
return imgs, test_ids
# def _load_caltech_train(filters, filt_spatial_shape):
# def _load_caltech_train(W, filt_spatial_shape, strides=(3, 3)):
# def _load_caltech_train(W, filt_spatial_shape, strides=(1, 1), order='chw',
def _load_caltech_train(W, filt_spatial_shape, strides=(2, 2), order='chw',
limit_ntrain=-1, limit_per_class=10):
train_imgs, _ = _load_caltech_train_imgs(limit_per_class=limit_per_class)
#
# uncomment to plot imgs to make sure this is working
#
# which_idxs = np.random.randint(len(train_imgs), size=16)
# # imgs = [train_imgs[i] for i in which_idxs]
# import matplotlib.pyplot as plt
# _, axes = plt.subplots(4, 4, figsize=(9, 9))
# for i, idx in enumerate(which_idxs):
# axes.ravel()[i].imshow(train_imgs[idx])
# plt.show()
train_mats = [caltech_x_y_for_img(img, W=W, strides=strides, order=order,
filt_spatial_shape=filt_spatial_shape)
for img in train_imgs]
Xs, Ys = list(zip(*train_mats))
X_train = np.vstack(Xs)
Y_train = np.vstack(Ys)
if limit_ntrain is not None and limit_ntrain > 0:
limit_ntrain = int(limit_ntrain)
# X_train = X_train[:limit_ntrain]
# Y_train = Y_train[:limit_ntrain]
X_train = X_train[-limit_ntrain:]
Y_train = Y_train[-limit_ntrain:]
print("caltech training shape: ", X_train.shape, Y_train.shape)
return X_train, Y_train
def load_caltech_tasks(order='chw', limit_ntrain=-1,
limit_ntest=-1, validate=False, filt='sobel',
limit_per_class_train=1,
limit_per_class_test=10):
if filt == 'sobel':
filters = load_filters_sobel_3x3(order=order)
# filt_spatial_shape = (3, 3)
elif filt == 'gauss3x3':
filters = load_filters_gaussian_3x3(order=order)
# filt_spatial_shape = (3, 3)
elif filt == 'gauss5x5':
filters = load_filters_gaussian_5x5(order=order)
# filt_spatial_shape = (5, 5)
elif filt == 'sharpen5x5':
filters = load_filters_sharpen_5x5(order=order)
# filt_spatial_shape = (5, 5)
else:
assert filt == 'dog5x5'
filters = load_filters_gaussian(order=order, shape=(5, 5))
# filt_spatial_shape = (5, 5)
if order == 'chw':
filt_spatial_shape = filters[0].shape[-2:]
else:
assert order == 'hwc'
filt_spatial_shape = filters[0].shape[:2]
W = _filters_list_to_mat(filters)
X_train, Y_train = _load_caltech_train(
W=W, filt_spatial_shape=filt_spatial_shape, order=order,
limit_ntrain=limit_ntrain, limit_per_class=limit_per_class_train)
test_imgs, test_ids = _load_caltech_test_imgs(
limit_per_class=limit_per_class_test)
# print("caltech tasks stats:")
# print("X train shape: ", X_train.shape)
# print("X train nbytes: ", X_train.nbytes)
# print("Y train shape: ", Y_train.shape)
# print("Y train nbytes: ", Y_train.nbytes)
# # print("type(test_imgs)", type(test_imgs))
# print("len(test_imgs)", len(test_imgs))
# _, test_ids = load_caltech_img_ids(limit_per_class)
# for i, _ in enumerate(test_imgs):
for i, img in enumerate(test_imgs):
# if i < 2: # TODO rm after debug
# continue
# img = img1.copy() if i % 2 else img0.copy()
# img = img1
X_test, Y_test = caltech_x_y_for_img(
img, filt_spatial_shape=filt_spatial_shape, W=W, order=order)
name = f'Caltech {i} ({os.path.dirname(test_ids[i]).split("/")[-1]})'
task = MatmulTask(X_train=X_train, Y_train=Y_train, W_train=W,
X_test=X_test, Y_test=Y_test, W_test=W,
name=name)
task.info['problem'] = filt
# print(f"task {task.name} matrix hashes:")
# import pprint
# pprint.pprint(task.hashes())
if limit_ntest is not None and limit_ntest > 0:
limit_ntest = int(limit_ntest)
task.X_test = task.X_test[:limit_ntest]
task.Y_test = task.Y_test[:limit_ntest]
# task.info = {'task_id: ', i}
# task.name = str(i)
if validate:
print("validating caltech task {}/{}...".format(
i + 1, len(test_imgs)))
print("X_train.std()", X_train.std())
print("Y_train.std()", Y_train.std())
print("X_test.std()", X_test.std())
print("Y_test.std()", Y_test.std())
task.validate()
# print("about to yield task with name: ", task.name)
yield task
# print("exiting at load_caltech_tasks()")
# import sys; sys.exit()
def test_caltech_loading():
imgs_train, imgs_test = _load_caltech_test_imgs()
filt = _load_dummy_caltech_filter_3x3()
imgs = imgs_train
print("imgs[0].shape", imgs[0].shape)
print("filt shape: ", filt.shape)
X, Y = caltech_x_y_for_img(imgs[0], filt)
print("X shape: ", X.shape)
print("Y shape: ", Y.shape)
# yep, looks like these are equivalent
flat_filt = filt.ravel()
Y_hat = X @ flat_filt
diffs = Y - Y_hat
mse = np.sum(diffs * diffs) / np.var(Y)
print("mse: ", mse)
assert mse < 1e-10
# ================================================================ cifar
def load_cifar10_tasks():
SOFTMAX_INPUTS_TRAIN_PATH = 'cifar10_softmax_inputs_train.npy'
SOFTMAX_OUTPUTS_TRAIN_PATH = 'cifar10_softmax_outputs_train.npy'
SOFTMAX_INPUTS_TEST_PATH = 'cifar10_softmax_inputs_test.npy'
SOFTMAX_OUTPUTS_TEST_PATH = 'cifar10_softmax_outputs_test.npy'
SOFTMAX_W_PATH = 'cifar10_softmax_W.npy'
SOFTMAX_B_PATH = 'cifar10_softmax_b.npy'
LABELS_TRAIN_PATH = 'cifar10_labels_train.npy'
LABELS_TEST_PATH = 'cifar10_labels_test.npy'
def load_mat(fname):
fpath = os.path.join(CIFAR10_DIR, fname)
return np.load(fpath)
X_train = load_mat(SOFTMAX_INPUTS_TRAIN_PATH)
Y_train = load_mat(SOFTMAX_OUTPUTS_TRAIN_PATH)
X_test = load_mat(SOFTMAX_INPUTS_TEST_PATH)
Y_test = load_mat(SOFTMAX_OUTPUTS_TEST_PATH)
W = load_mat(SOFTMAX_W_PATH)
b = load_mat(SOFTMAX_B_PATH)
lbls_train = load_mat(LABELS_TRAIN_PATH).ravel()
lbls_test = load_mat(LABELS_TEST_PATH).ravel()
# we aren't going to store or approximate the biases, so just subtract
# off their contributions at the start
Y_train -= b
Y_test -= b
# # TODO rm all this after debug
# logits_test = Y_test + b
# print("logits_test.shape", logits_test.shape)
# print("lbls_test.shape", lbls_test.shape)
# lbls_hat_test = np.argmax(Y_test, axis=1)
# print("lbls_hat_test.shape", lbls_hat_test.shape)
# acc = np.mean(lbls_hat_test.ravel() == lbls_test.ravel())
# print("Y_test: ", Y_test[:10])
# print("Y_train head: ", Y_train[:10])
# print("Y_train tail: ", Y_train[-10:])
# print("b:\n", b)
# # print("lbls hat test:")
# # print(lbls_hat_test[:100])
# # print("lbls test:")
# # print(lbls_test[:100])
# print("lbls train:")
# print(lbls_train[:100])
# print("acc: ", acc)
info = {'problem': 'softmax', 'biases': b,
'lbls_train': lbls_train, 'lbls_test': lbls_test}
return [MatmulTask(X_train, Y_train, X_test, Y_test, W,
name='CIFAR-10 Softmax', info=info)]
def load_cifar100_tasks():
SOFTMAX_INPUTS_TRAIN_PATH = 'cifar100_softmax_inputs_train.npy'
SOFTMAX_OUTPUTS_TRAIN_PATH = 'cifar100_softmax_outputs_train.npy'
SOFTMAX_INPUTS_TEST_PATH = 'cifar100_softmax_inputs_test.npy'
SOFTMAX_OUTPUTS_TEST_PATH = 'cifar100_softmax_outputs_test.npy'
SOFTMAX_W_PATH = 'cifar100_softmax_W.npy'
SOFTMAX_B_PATH = 'cifar100_softmax_b.npy'
LABELS_TRAIN_PATH = 'cifar100_labels_train.npy'
LABELS_TEST_PATH = 'cifar100_labels_test.npy'
def load_mat(fname):
fpath = os.path.join(CIFAR100_DIR, fname)
return np.load(fpath)
X_train = load_mat(SOFTMAX_INPUTS_TRAIN_PATH)
Y_train = load_mat(SOFTMAX_OUTPUTS_TRAIN_PATH)
X_test = load_mat(SOFTMAX_INPUTS_TEST_PATH)
Y_test = load_mat(SOFTMAX_OUTPUTS_TEST_PATH)
W = load_mat(SOFTMAX_W_PATH)
b = load_mat(SOFTMAX_B_PATH)
lbls_train = load_mat(LABELS_TRAIN_PATH).ravel()
lbls_test = load_mat(LABELS_TEST_PATH).ravel()
# we aren't going to store or approximate the biases, so just subtract
# off their contributions at the start
Y_train -= b
Y_test -= b
# # TODO rm all this after debug
# logits_test = Y_test + b
# print("logits_test.shape", logits_test.shape)
# print("lbls_test.shape", lbls_test.shape)
# lbls_hat_test = np.argmax(Y_test, axis=1)
# print("lbls_hat_test.shape", lbls_hat_test.shape)
# acc = np.mean(lbls_hat_test.ravel() == lbls_test.ravel())
# print("Y_test: ", Y_test[:10])
# print("Y_train head: ", Y_train[:10])
# print("Y_train tail: ", Y_train[-10:])
# print("b:\n", b)
# # print("lbls hat test:")
# # print(lbls_hat_test[:100])
# # print("lbls test:")
# # print(lbls_test[:100])
# print("lbls train:")
# print(lbls_train[:100].ravel())
# print("acc: ", acc)
info = {'problem': 'softmax', 'biases': b,
'lbls_train': lbls_train, 'lbls_test': lbls_test}
return [MatmulTask(X_train, Y_train, X_test, Y_test, W,
name='CIFAR-100 Softmax', info=info)]
def load_cifar_tasks():
return load_cifar10_tasks() + load_cifar100_tasks()
# ================================================================ ucr
def _learn_neighbor_compression_W_info(X, lbls, k):
centroids, lbls_centroids = algo.stochastic_neighbor_compression(
X, lbls, k)
extra_info = {'lbls_centroids': lbls_centroids}
return centroids.T, extra_info
def _learn_softmax_W_info(X, lbls):
est = linear_model.LogisticRegression(
# raise max iters from 100 to avoid convergence messages
fit_intercept=False, solver='lbfgs', max_iter=200)
est.fit(X, lbls)
nclasses, _ = est.coef_.shape
return est.coef_.T, {'biases': np.zeros_like(nclasses, dtype=np.float32)}
@_memory.cache
def _load_ucr_task_for_dset(
dset_name, D=320, k=128, min_train_sz=-1, use_test_sz=-1,
problem='rbf', verbose=1):
dset = ucr.UCRDataset(dset_name)
if min_train_sz is None or min_train_sz < k:
min_train_sz = k
if use_test_sz is None or use_test_sz < 1:
use_test_sz = len(dset.X_test)
if verbose > 0:
print(f"----- loading task for UCR dataset: {dset.name}")
nclasses = len(np.unique(dset.y_test))
ntrain = len(dset.X_train)
ntest = len(dset.X_test)
if nclasses > k:
if verbose > 0:
print(f"returning None because " +
f"nclasses={nclasses} > k={k}")
return None # some class will have no centroids
if ntrain < min_train_sz:
if verbose > 0:
print(f"returning None because " +
f"num_train={ntrain} < min_train_sz={min_train_sz}")
return None
if ntest < use_test_sz:
if verbose > 0:
print(f"returning None because " +
f"num_test={ntest} < min_test_sz={use_test_sz}")
return None
X_train = dset.X_train
X_test = dset.X_test[:use_test_sz]
dset.y_test = dset.y_test[:use_test_sz]
X_train = signal.resample(X_train, D, axis=1).astype(np.float32)
X_test = signal.resample(X_test, D, axis=1).astype(np.float32)
info = {'problem': problem, 'lbls_train': dset.y_train,
'lbls_test': dset.y_test}
if problem in ('1nn', 'rbf'):
print(f"compressing training set for dset: {dset.name}")
W, extra_info = _learn_neighbor_compression_W_info(
X_train, dset.y_train, k)
elif problem == 'softmax':
W, extra_info = _learn_softmax_W_info(
X_train, dset.y_train)
else:
raise ValueError(f"Unrecognized problem '{problem}'")
Y_train = X_train @ W
Y_test = X_test @ W
info.update(extra_info)
return [MatmulTask(X_train, Y_train, X_test, Y_test, W,
name=f'ucr {dset.name} k={k}', info=info)]
def load_ucr_tasks(limit_ntasks=-1, k=128, **kwargs):
all_tasks = []
df = ucr.load_ucr_dset_stats()
name2acc = dict(zip(df['Name'], df['l2-1nn-acc']))
for dset_name in ucr.all_ucr_dataset_dirs():
orig_acc = name2acc[os.path.basename(dset_name)]
tasks = _load_ucr_task_for_dset(dset_name, k=k, **kwargs)
if tasks is not None:
for task in tasks:
task.info['acc-1nn-raw'] = orig_acc
all_tasks += tasks
# else:
# print("got None instead of tasks for dset: ", dset_name)
if ((limit_ntasks is not None) and (limit_ntasks > 0) and
(len(all_tasks) >= limit_ntasks)):
all_tasks = all_tasks[:limit_ntasks]
break
return all_tasks
# ================================================================ main
def test_caltech_tasks():
for _ in load_caltech_tasks(validate=True):
pass # need to loop thru since it's a generator
def test_ecg_tasks():
# for _ in load_ecg_tasks(validate=True):
for i, _ in enumerate(load_ecg_tasks(validate=False)):
print("loaded ecg task {}/{}".format(i + 1, sharee.NUM_RECORDINGS))
def test_cifar_tasks():
task = load_cifar10_tasks()[0]
print(task)
task.validate()
task = load_cifar100_tasks()[0]
print(task)
task.validate()
def main():
np.set_printoptions(formatter={'float': lambda f: "{:.3}".format(f)})
train_ids, test_ids = load_caltech_img_ids()
print("number of uniq train ids: ", len(np.unique(train_ids)))
print("number of uniq test ids: ", len(np.unique(test_ids)))
for i, task in enumerate(load_caltech_tasks(validate=True)):
pass
# test_caltech_tasks()
# test_cifar_tasks()
# test_ecg_tasks()
# load_cifar10_tasks()
# load_cifar100_tasks()
# print("number of ucr dirs:", len(list(ucr.all_ucr_dataset_dirs())))
# tasks = load_ucr_tasks()
# print("number of tasks meeting basic size criteria:", len(tasks))
# print("number of caltech imgs: ", len(_load_caltech_test_imgs()))
if __name__ == '__main__':
main()
|
#!/bin/env python
def main():
UNDEFINED = 7
M = 40000
# M = 500
# M = 2
# K = 16
# C = 64
try_Cs = np.array([2, 4, 8, 16, 32, 64, 128])
try_Us = np.array([2, 4, 8, 16, 32, 64, 128])
biases = np.zeros((try_Cs.size, try_Us.size)) + UNDEFINED
# sses = np.zeros((try_Cs.size, try_Us.size)) + UNDEFINED
dists_true = np.zeros((try_Cs.size, try_Us.size, M))
dists_hat = np.zeros((try_Cs.size, try_Us.size, M))
all_errs = np.zeros((try_Cs.size, try_Us.size, M))
for i, C in enumerate(try_Cs):
for j, upcast_every in enumerate(try_Us):
if upcast_every > C:
continue
# dists = np.random.randint(256, size=(M * K, C))
orig_dists = np.random.randint(256, size=(M, C))
# print("orig_dists[:10]", orig_dists[:10]) # ya, these are sane
dists = orig_dists.reshape(orig_dists.shape[0], -1, upcast_every)
while dists.shape[-1] > 2:
# print("dists shape: ", dists.shape)
# print("dists:\n", dists)
dists = (dists[:, :, ::2] + dists[:, :, 1::2] + 1) // 2
# print("dists shape: ", dists.shape)
dists = (dists[:, :, 0] + dists[:, :, 1] + 1) // 2
dists = dists.sum(axis=-1) # clipping not needed
dists *= upcast_every
true_dists = orig_dists.sum(axis=1)
errs = dists - true_dists
# biases[i, j] = diffs.mean()
biases[i, j] = errs.mean()
# store true dists so we can compute variance of estimator
# dists_true[i, j] = true_dists
# dists_hat[i, j] = dists
all_errs[i, j] = errs
# debias =
# sses[i, j] = diffs
# diffs = true_dists - dists
# print(f"C = {C}, upcast_every={upcast_every}")
# print("mean true dist: ", true_dists.mean())
# print("mean diff:", diffs.mean())
print("biases:\n", biases)
# col = try_Cs / 4
# row = np.log2(try_Us).astype(np.int)
# biases_hat = np.outer(col, row)
# print("biases_hat:\n", biases_hat)
# biases_hat2 = np.zeros((try_Cs.size, try_Us.size)) - UNDEFINED
biases_hat2 = np.zeros((try_Cs.size, try_Us.size))
for i, C in enumerate(try_Cs):
for j, upcast_every in enumerate(try_Us):
if upcast_every > C:
continue
biases_hat2[i, j] = C / 4 * np.log2(upcast_every)
print("biases_hat2:\n", biases_hat2)
print("corrected biases:\n", biases - biases_hat2)
all_errs -= biases_hat2[..., np.newaxis]
# print("mean corrected errs:\n", all_errs.mean(axis=-1))
print("mean corrected errs:\n", np.var(all_errs, axis=-1))
sq_errs = (all_errs * all_errs).mean(axis=-1)
print("empirical mean squared err for C, U", sq_errs)
sq_errs_hat = np.zeros((try_Cs.size, try_Us.size))
for i, C in enumerate(try_Cs):
for j, upcast_every in enumerate(try_Us):
if upcast_every > C:
continue
sq_errs_hat[i, j] = C / 8 * np.log2(upcast_every)
print("estimated mean squared err for C, U", sq_errs_hat)
print("takeaway: no idea what closed form for mse is...")
# print("biases - biases_hat", biases - biases_hat)
# plt.scatter(true_dists, dists)
# plt.show()
if __name__ == '__main__':
np.set_printoptions(formatter={'float': lambda f: "{:5.1f}".format(f)},
linewidth=100)
main()
|
#!/bin/env/python
def ls(dir='.'):
return os.listdir(dir)
def is_hidden(path):
return os.path.basename(path).startswith('.')
def is_visible(path):
return not is_hidden(path)
def join_paths(dir, contents):
return [os.path.join(dir, f) for f in contents]
def files_matching(dir, prefix=None, suffix=None, abs_paths=False,
only_files=False, only_dirs=False, recursive=False,
only_visible=False, only_hidden=False):
files = os.listdir(dir)
if recursive:
abs_dir = dir
paths = join_paths(abs_dir, files)
for path in paths:
if not os.path.isdir(path):
continue
matches = files_matching(
path, prefix=prefix, suffix=suffix,
abs_paths=abs_paths, only_files=only_files,
only_dirs=only_dirs, recursive=True)
matches = join_paths(path, matches)
matches = [os.path.relpath(m, start=dir) for m in matches]
files += matches
if prefix:
files = [f for f in files if f.startswith(prefix)]
if suffix:
files = [f for f in files if f.endswith(suffix)]
if only_files or only_dirs:
paths = join_paths(dir, files)
if only_files:
files = [f for f, p in zip(files, paths) if os.path.isfile(p)]
if only_dirs:
files = [f for f, p in zip(files, paths) if os.path.isdir(p)]
if abs_paths:
files = join_paths(os.path.abspath(dir), files)
if only_visible:
files = [f for f in files if is_visible(f)]
if only_hidden:
files = [f for f in files if is_hidden(f)]
return sorted(files)
def list_subdirs(dir, startswith=None, endswith=None, abs_paths=False,
recursive=False):
return files_matching(dir, startswith, endswith, abs_paths,
only_dirs=True, recursive=recursive)
def list_files(dir, startswith=None, endswith=None, abs_paths=False,
recursive=False):
return files_matching(dir, startswith, endswith, abs_paths,
only_files=True, recursive=recursive)
def list_hidden_files(dir, startswith=None, endswith=None, abs_paths=False,
recursive=False):
return files_matching(dir, startswith, endswith, abs_paths, only_files=True,
recursive=recursive, only_hidden=True)
def list_visible_files(dir, startswith=None, endswith=None, abs_paths=False,
recursive=False):
return files_matching(dir, startswith, endswith, abs_paths, only_files=True,
recursive=recursive, only_visible=True)
def remove(path):
if os.path.exists(path):
try:
os.remove(path)
except (OSError):
shutil.rmtree(path)
def force_create_dir(dir):
if os.path.exists(dir):
remove(dir)
os.makedirs(dir)
def ensure_dir_exists(dir_or_file):
if '.' in os.path.basename(dir_or_file): # this looks like a file
dirname = os.path.dirname(dir_or_file)
else:
dirname = dir_or_file
if not os.path.exists(dirname):
os.makedirs(dirname)
def basename(f, noext=False):
name = os.path.basename(f)
if noext:
name = name.split('.')[0]
return name
|
# CAMERA_READY_FONT = 'Calibri'
CAMERA_READY_FONT = 'DejaVu Sans'
SAVE_DIR = os.path.expanduser('~/Desktop/bolt/figs/')
ensure_dir_exists(SAVE_DIR)
def save_fig(name):
plt.savefig(os.path.join(SAVE_DIR, name + '.pdf'), bbox_inches='tight')
def save_fig_png(name):
plt.savefig(os.path.join(SAVE_DIR, name + '.png'),
dpi=300, bbox_inches='tight')
def set_palette(ncolors=8): # use this to change color palette in all plots
pal = sb.color_palette("Set1", n_colors=ncolors)
sb.set_palette(pal)
return pal
def set_xlabels_bold(ax, which_lbl_idxs, rotation=0):
xlabels = list(ax.get_xticklabels())
for idx in which_lbl_idxs:
xlabels[idx].set_weight('bold')
ax.set_xticklabels(xlabels, rotation=rotation)
def popcount_fig(fake_data=False):
# sb.set_context("poster", rc={"figure.figsize": (8, 4)})
sb.set_context("talk")
set_palette(ncolors=2)
_, ax = plt.subplots(1, figsize=(6, 4))
# fake_data = data is None
if fake_data: # for prototyping / debugging this func
bolt_128d_8b = np.random.randn(10) + 12
bolt_128d_16b = np.random.randn(10) + 6
bolt_128d_32b = np.random.randn(10) + 3
popcnt_128d_8b = np.random.randn(10) + 10
popcnt_128d_16b = np.random.randn(10) + 5
popcnt_128d_32b = np.random.randn(10) + 2
dicts = []
dicts += [{'algo': 'Bolt', 'nbytes': '8B', 't': t} for t in bolt_128d_8b]
dicts += [{'algo': 'Bolt', 'nbytes': '16B', 't': t} for t in bolt_128d_16b]
dicts += [{'algo': 'Bolt', 'nbytes': '32B', 't': t} for t in bolt_128d_32b]
dicts += [{'algo': 'Binary Embedding', 'nbytes': '8B', 't': t} for t in popcnt_128d_8b]
dicts += [{'algo': 'Binary Embedding', 'nbytes': '16B', 't': t} for t in popcnt_128d_16b]
dicts += [{'algo': 'Binary Embedding', 'nbytes': '32B', 't': t} for t in popcnt_128d_32b]
df = pd.DataFrame.from_records(dicts)
else:
df = results.popcount_results()
print("df cols: ", df.columns)
# df.rename(columns={'algo': 'Algorithm'}, inplace=True)
df.rename(columns={'algo': ' '}, inplace=True) # hide from legend
sb.barplot(x='nbytes', y='t', hue=' ', ci=95, data=df, ax=ax)
ax.set_title('Distance Computations Per Second')
ax.set_xlabel('Encoding Length (Bytes)')
ax.set_ylabel('Millions of distances / sec')
plt.tight_layout()
save_fig('popcount_speed')
# plt.show()
def encoding_fig(fake_data=False, camera_ready=False):
sb.set_style('darkgrid')
# sb.set_context("talk", rc={"figure.figsize": (6, 6)})
sb.set_context("talk", rc={"figure.figsize": (7, 7)})
# sb.set_context("talk", rc={"figure.figsize": (8, 8)})
# sb.set_context("talk", rc={"figure.figsize": (9, 9)})
# fig, axes = plt.subplots(3, 1)
fig, axes = plt.subplots(3, 2)
# ALGOS = ['Bolt', 'PQ', 'OPQ', 'PairQ']
ALGOS = ['Bolt', 'PQ', 'OPQ']
algo2offset = {'Bolt': 100, 'PQ': 50, 'OPQ': 30, 'PairQ': 25}
lengths = [64, 128, 256, 512, 1024]
# results_for_algos_lengths =
# sb.set_palette("Set1", n_colors=len(ALGOS))
set_palette(ncolors=len(ALGOS))
if fake_data:
data = np.random.randn(1, len(lengths), len(algo2offset))
for i, algo in enumerate(ALGOS):
data[:, :, i] += algo2offset[algo]
data /= np.arange(len(lengths)).reshape((1, -1, 1))
# ------------------------ data encoding
# 8B encodings
ax = axes[0, 0]
# sb.tsplot(data=data, condition=condition, time=lengths, ax=ax)
sb.tsplot(data=data, condition=None, time=lengths, ax=ax)
# ax.set_title(prefix + ' Encoding Speed, 8B codes')
ax.set_title('Data Encoding Speed', y=1.02)
# 16B encodings
data /= 2
ax = axes[1, 0]
sb.tsplot(data=data, condition=None, time=lengths, ax=ax)
# 32B encodings
data /= 2
ax = axes[2, 0]
sb.tsplot(data=data, condition=None, time=lengths, ax=ax)
# ------------------------ query encoding
data *= 8
data += np.random.randn(*data.shape) * 5
# 8B encodings
ax = axes[0, 1]
sb.tsplot(data=data, condition=None, time=lengths, ax=ax)
# ax.set_title(prefix + ' Encoding Speed')
ax.set_title('Query Encoding Speed', y=1.03, fontsize=16)
# 16B encodings
data /= 2
ax = axes[1, 1]
sb.tsplot(data=data, condition=None, time=lengths, ax=ax)
# 32B encodings
data /= 2
ax = axes[2, 1]
sb.tsplot(data=data, condition=ALGOS, time=lengths, ax=ax)
else: # real data
NBYTES_LIST = [8, 16, 32]
df = results.encode_results()
df_x = df[df['task'] == 'encode_x']
df_q = df[df['task'] == 'encode_q']
dfs = [df_x, df_q]
# print df_x
# return
# dfs = [results.encode_data_results(), results.encode_lut_results()]
ax_cols = [axes[:, 0], axes[:, 1]]
for df, ax_col in zip(dfs, ax_cols): # for each col in subplots
for b, nbytes in enumerate(NBYTES_LIST): # for each row in subplots
ax = ax_col[b]
plot_df = df.loc[df['nbytes'] == nbytes]
plot_df = plot_df.loc[plot_df['algo'].isin(ALGOS)]
sb.tsplot(value='y', condition='algo', unit='trial', time='D',
data=plot_df, ax=ax, ci=95, n_boot=500)
# data=plot_df, ax=ax, legend=False, ci=95, n_boot=500)
# ------------------------ legend
ax = axes.ravel()[-1]
leg_lines, leg_labels = ax.get_legend_handles_labels()
# ax.legend_.remove()
# leg_lines, leg_labels = leg_lines[:len(ALGOS)], leg_labels[:len(ALGOS)]
plt.figlegend(leg_lines, leg_labels, loc='lower center',
ncol=len(ALGOS), labelspacing=0)
# ------------------------ postproc + save plot
for ax in axes.ravel():
ax.set_yscale("log")
ax.legend_.remove()
ax.set_ylim(5e3, 2e7)
if camera_ready:
# axes[0, 0].set_title('Data Encoding Speed', x=.45, y=1.03, fontsize=16)
# axes[0, 1].set_title('Query Encoding Speed', x=.45, y=1.03, fontsize=16)
axes[0, 0].set_title('Data Encoding Speed', x=.49, y=1.03, fontsize=18)
axes[0, 1].set_title('Query Encoding Speed', x=.5, y=1.03, fontsize=18)
else:
axes[0, 0].set_title('Data Encoding Speed', y=1.03, fontsize=16)
axes[0, 1].set_title('Query Encoding Speed', y=1.03, fontsize=16)
# for ax in axes[0, :].ravel():
# ax.set_title('Vector Length')
for ax in axes[:-1, :].ravel():
# ax.xaxis.set_visible(False)
plt.setp(ax.get_xticklabels(), visible=False)
ax.set_xlabel('', labelpad=-10)
for ax in axes[-1, :].ravel():
# ax.set_xlabel('Vector Length')
ax.set_xlabel('Vector Length', labelpad=7)
for ax in axes[:, 0]:
if camera_ready:
# ax.set_ylabel('Vectors Encoded / s ', fontsize=12)
ax.set_ylabel('Vectors Encoded / s', fontsize=13)
else:
ax.set_ylabel('Vectors Encoded / s')
# only bottom row gets xlabels
for ax in axes[:-1, :].ravel():
# plt.setp(ax.get_xticklabels(), visible=False)
ax.set_xlabel('', labelpad=-10)
# show byte counts on the right
fmt_str = "{}B Encodings"
# if camera_ready:
# fmt_str += ' '
for i, ax in enumerate(axes[:, 1].ravel()):
ax.yaxis.set_label_position('right')
ax.set_ylabel(fmt_str.format((2 ** i) * 8), labelpad=10, fontsize=15)
plt.tight_layout()
plt.subplots_adjust(bottom=.15)
if camera_ready:
save_fig_png('encoding_speed') # bypass mpl truetype pdf ineptitude
else:
save_fig('encoding_speed')
# plt.show()
# if data_enc:
# save_fig('encoding_speed_data')
# else:
# save_fig('encoding_speed_query')
# plt.show()
def query_speed_fig(fake_data=False, fname='query_speed', with_matmuls=True,
camera_ready=False):
# experiment params: fixed N = 100k, D = 256, Q = 1024;
# layout: rows = 8B, 16B, 32B; bar graph in each row
# alternative: plot in each row vs batch size
# algos: Bolt; PQ; OPQ; PairQ; Matmul, batch={1, 16, 64, 256}
sb.set_context("talk")
# sb.set_style("white") # adds border (spines) we have to remove
sb.set_style("darkgrid")
# if camera_ready: # white style overwrites our fonts
# matplotlib.rcParams['font.family'] = CAMERA_READY_FONT
set_palette(ncolors=8)
# fig, axes = plt.subplots(3, 1, figsize=(6, 8))
fig, axes = plt.subplots(3, 1, figsize=(6, 8), dpi=300)
if fake_data: # for debugging
ALGOS = ['Bolt', 'PQ', 'OPQ', 'PairQ',
# 'Matmul Batch 1', 'Matmul Batch 16', 'Matmul Batch 64', 'Matmul Batch 256']
# 'Matmul Batch1', 'Matmul Batch16', 'Matmul Batch64', 'Matmul Batch256']
'Matmul 1', 'Matmul 16', 'Matmul 64', 'Matmul 256']
algo2offset = {'Bolt': 100, 'PQ': 50, 'OPQ': 30, 'PairQ': 25,
# 'Matmul Batch 1': 1, 'Matmul Batch 16': 16,
# 'Matmul Batch 64': 64, 'Matmul Batch 256': 256}
# 'Matmul Batch1': 1, 'Matmul Batch16': 16,
# 'Matmul Batch64': 64, 'Matmul Batch256': 256}
'Matmul 1': 1, 'Matmul 16': 16, 'Matmul 64': 64,
'Matmul 256': 256}
for i, nbytes in enumerate([8, 16, 32]):
bytes_str = '{}B'.format(nbytes)
dicts = []
for algo in ALGOS:
dps = np.random.randn(10) + 256 / nbytes
dps += algo2offset[algo] / nbytes
dicts += [{'algo': algo, 'nbytes': bytes_str, 'y': y} for y in dps]
df = pd.DataFrame.from_records(dicts)
else:
# ALGOS = ['Bolt', 'PQ', 'OPQ', 'PairQ', 'Matmul 1', # 'Matmul 16',
# 'Matmul 64', 'Matmul 256', 'Matmul 1024']
if with_matmuls:
# ALGOS = ['Bolt', 'Binary Embedding', 'PQ', 'OPQ',
ALGOS = ['Bolt', 'PQ', 'OPQ',
'Matmul 1024', 'Matmul 256', 'Matmul 1']
else:
ALGOS = ['Bolt', 'Binary Embedding', 'PQ', 'OPQ']
df = results.query_speed_results()
df['y'] = df['y'] / 1e9 # convert to billions
print("df cols: ", df.columns)
df.rename(columns={'algo': ' '}, inplace=True) # hide from legend
# ax = sb.barplot(x='x', y='y', hue=' ', ci=95, data=df, ax=axes[i])
for i, nbytes in enumerate([8, 16, 32]):
bytes_str = '{}B'.format(nbytes)
data = df[df['nbytes'] == nbytes]
ax = sb.barplot(x='nbytes', y='y', hue=' ', hue_order=ALGOS, ci=95,
# data=data, ax=axes[i])
# data=data, ax=axes[i], errwidth=10)
data=data, ax=axes[i], capsize=.0004)
# data=data, ax=axes[i], capsize=.0004, errwidth=6)
# ------------------------ clean up / format axes
for ax in axes[:-1]:
# remove x labels except for bottom axis
plt.setp(ax.get_xticklabels(), visible=False)
ax.get_xaxis().set_visible(False)
end = .5 * (len(ALGOS) / float((len(ALGOS) + 2)))
start = -end
tick_positions = np.linspace(start + .02, end - .05, len(ALGOS))
if camera_ready:
tick_positions[0] += .02
tick_positions[2] += .02
tick_positions[3] += .01
for ax in axes:
ax.set_xlim([start - .02, end + .02])
if camera_ready:
# ax.set_ylabel('Billions of\nDistances/s', y=.4,
# ax.set_ylabel('Billions of\nDistances/s', y=.5,
ax.set_ylabel('Billion\nDistances/s', y=.49, # .5 = centered ?
family=CAMERA_READY_FONT)
else:
ax.set_ylabel('Billions of Distances/s')
ax.legend_.remove()
if not fake_data:
ax.set_ylim(0, 2.5)
# add byte counts on the right
fmt_str = "{}B Encodings"
for i, ax in enumerate(axes):
ax2 = ax.twinx()
sb.despine(ax=ax2, top=True, left=True, bottom=True, right=True)
ax2.get_xaxis().set_visible(False)
# ax2.get_yaxis().set_visible(False) # nope, removes ylabel
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
ax2.yaxis.set_label_position('right')
if camera_ready:
# ax2.set_ylabel(fmt_str.format((2 ** i) * 8), y=.39,
ax2.set_ylabel(fmt_str.format((2 ** i) * 8),
labelpad=10, fontsize=14, family=CAMERA_READY_FONT)
else:
ax2.set_ylabel(fmt_str.format((2 ** i) * 8), labelpad=10, fontsize=15)
# ------------------------ have bottom / top axes print title, x info
if camera_ready:
# axes[0].set_title('Distance Computations per Second', x=.39, y=1.02)
# axes[0].set_title('Distance Computations per Second', x=.42, y=1.02,
# family=CAMERA_READY_FONT)
axes[0].set_title('Distance Computations per Second', y=1.02,
family=CAMERA_READY_FONT, fontsize=15)
else:
axes[0].set_title('Distance Computations per Second', y=1.02)
# axes[-1].set_xticks(tick_positions)
for ax in axes:
axes[-1].set_xticks(tick_positions)
ax.set_xlim(-.4, .4) # no idea why this makes the bars fit right...
xlabels = ["\n".join(name.split(' ')) for name in ALGOS]
if not camera_ready:
for i, lbl in enumerate(xlabels):
if '\n' in lbl:
# shift label up by adding another line
xlabels[i] = xlabels[i] + '\n'
# xlabels = ["\nBatch".join(name.split(' Batch')) for name in ALGOS]
# xlabels = ALGOS
axes[-1].set_xticklabels(xlabels, rotation=70)
if camera_ready:
# axes[-1].tick_params(axis='x', which='major', pad=15)
# axes[-1].tick_params(axis='x', which='major', pad=13)
axes[-1].tick_params(axis='x', which='major', pad=4)
# axes[-1].set_xticklabels(xlabels, rotation=70, y=-.02)
# else:
# axes[-1].set_xticklabels(xlabels, rotation=70)
# if camera_ready:
# axes[-1].set_xlabel("", labelpad=10)
# else:
axes[-1].set_xlabel("", labelpad=-20)
# plt.setp(axes[-1].get_xlabel(), visible=False) # doesn't work
# ------------------------ show / save plot
# plt.tight_layout()
plt.tight_layout()
if camera_ready:
plt.subplots_adjust(hspace=.18)
# save_fig(fname)
# MPL conversion to pdf is selectively braindead for just this plot; it
# lays things out horribly in a way that doesn't match the results
# of show() at all. Just export as high-density png as a workaround
# plt.savefig(os.path.join(SAVE_DIR, fname + '.png'),
# dpi=300, bbox_inches='tight')
save_fig_png(fname)
# plt.show()
def query_speed_poster_fig(fname='query_speed', with_matmuls=True, defense=True):
# experiment params: fixed N = 100k, D = 256, Q = 1024;
# layout: rows = 8B, 16B, 32B; bar graph in each row
# alternative: plot in each row vs batch size
# algos: Bolt; PQ; OPQ; PairQ; Matmul, batch={1, 16, 64, 256}
sb.set_context("talk")
# sb.set_style("whitegrid")
sb.set_style("darkgrid")
# if camera_ready: # white style overwrites our fonts
# matplotlib.rcParams['font.family'] = CAMERA_READY_FONT
set_palette(ncolors=8)
# fig, axes = plt.subplots(3, 1, figsize=(6, 8))
# fig, axes = plt.subplots(3, 1, figsize=(6, 8), dpi=300)
# fig, axes = plt.subplots(2, 1, figsize=(6, 6), dpi=300)
if defense:
fig, axes = plt.subplots(2, 1, figsize=(6, 8), dpi=300)
else:
fig, axes = plt.subplots(2, 1, figsize=(4, 6), dpi=300)
# ALGOS = ['Bolt', 'PQ', 'OPQ', 'PairQ', 'Matmul 1', # 'Matmul 16',
# 'Matmul 64', 'Matmul 256', 'Matmul 1024']
if with_matmuls:
# ALGOS = ['Ours', 'Binary Embedding', 'PQ', 'OPQ',
# 'Matmul Batch=1024', 'Matmul Batch=256', 'Matmul Batch=1']
# ALGOS = ['Ours', 'Matmul Batch=1024', 'Matmul Batch=256', 'Matmul Batch=1']
if defense:
ALGOS = ['Ours', 'PQ', 'OPQ', 'Matmul Batch=1024', 'Matmul Batch=256', 'Matmul Batch=1']
else:
ALGOS = ['Ours', 'Matmul Batch=1024', 'Matmul Batch=256', 'Matmul Batch=1']
else:
ALGOS = ['Bolt', 'Binary Embedding', 'PQ', 'OPQ']
df = results.query_speed_results()
df['y'] = df['y'] / 1e9 # convert to billions
print("df cols: ", df.columns)
df['algo'] = df['algo'].apply(lambda s: 'Ours' if s == 'Bolt' else s)
df['algo'] = df['algo'].apply(lambda s: 'Matmul Batch={}'.format(s.split()[1]) if 'Matmul' in s else s)
df.rename(columns={'algo': ' '}, inplace=True) # hide from legend
# ax = sb.barplot(x='x', y='y', hue=' ', ci=95, data=df, ax=axes[i])
# for i, nbytes in enumerate([8, 16, 32]):
for i, nbytes in enumerate([8, 16]):
data = df[df['nbytes'] == nbytes]
# ax = sb.barplot(x='nbytes', y='y', hue=' ', hue_order=ALGOS, ci=95,
ax = sb.barplot(x='nbytes', y='y', hue=' ', hue_order=ALGOS, ci='sd',
data=data, ax=axes[i], capsize=.0004)
# data=data, ax=axes[i])
# data=data, ax=axes[i], errwidth=10)
# data=data, ax=axes[i], capsize=.0004, errwidth=6)
# ------------------------ clean up / format axes
for ax in axes[:-1]:
# remove x labels except for bottom axis
plt.setp(ax.get_xticklabels(), visible=False)
ax.get_xaxis().set_visible(False)
end = .5 * (len(ALGOS) / float((len(ALGOS) + 2)))
start = -end
tick_positions = np.linspace(start + .02, end - .05, len(ALGOS))
tick_positions[0] += .02
tick_positions[2] += .02
tick_positions[3] += .01
for ax in axes:
ax.set_xlim([start - .02, end + .02])
if defense:
ax.set_ylabel('Billion Products/s', y=.49, # .5 = centered ?
family=CAMERA_READY_FONT)
else:
ax.set_ylabel('Billion Activations/s', y=.49, # .5 = centered ?
family=CAMERA_READY_FONT)
ax.legend_.remove()
ax.set_ylim(0, 2.5)
# add byte counts on the right
# fmt_str = "{}B Encodings"
sb.set_style("white") # adds border (spines) we have to remove
for i, ax in enumerate(axes):
ax2 = ax.twinx()
sb.despine(ax=ax2, top=True, left=True, bottom=True, right=True)
ax2.get_xaxis().set_visible(False)
# ax2.get_yaxis().set_visible(False) # nope, removes ylabel
ax2.yaxis.set_label_position('right')
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
# lbl = fmt_str.format((2 ** i) * 8)
# lbl = {0: 'Fastest Setting', 1: 'Higher Accuracy', 2: 'Highest Accuracy'}[i]
lbl = {0: '8B Encodings', 1: '16B Encodings', 2: '32B Encodings'}[i]
ax2.set_ylabel(lbl,
labelpad=10, fontsize=14, family=CAMERA_READY_FONT)
# ------------------------ have bottom / top axes print title, x info
# axes[0].set_title('Activations Computed per Second', y=1.02,
axes[0].set_title('Activations Computed / Second', y=1.04,
family=CAMERA_READY_FONT, fontsize=15)
# axes[-1].set_xticks(tick_positions)
for ax in axes:
axes[-1].set_xticks(tick_positions)
ax.set_xlim(-.4, .4) # no idea why this makes the bars fit right...
xlabels = ["\n".join(name.split(' ')) for name in ALGOS]
# xlabels = ["\nBatch".join(name.split(' Batch')) for name in ALGOS]
# xlabels = ALGOS
axes[-1].set_xticklabels(xlabels, rotation=70)
# get and set them again so we can make the first one bold; can't make
# it bold beforehand because need a tick lbl object, not a string
xlabels = list(axes[-1].get_xticklabels())
xlabels[0].set_weight('bold')
axes[-1].set_xticklabels(xlabels, rotation=70)
axes[-1].tick_params(axis='x', which='major', pad=4)
axes[-1].set_xlabel("", labelpad=-20)
# plt.setp(axes[-1].get_xlabel(), visible=False) # doesn't work
# plt.setp(axes[-1].get_xticks(), visible=False)
ax.xaxis.set_ticks_position('none')
# ------------------------ show / save plot
# plt.tight_layout()
plt.tight_layout()
plt.subplots_adjust(hspace=.18)
# save_fig(fname)
# MPL conversion to pdf is selectively braindead for just this plot; it
# lays things out horribly in a way that doesn't match the results
# of show() at all. Just export as high-density png as a workaround
# plt.savefig(os.path.join(SAVE_DIR, fname + '.png'),
# dpi=300, bbox_inches='tight')
save_fig_png(fname + ('_defense' if defense else ''))
# plt.show()
def matmul_fig(fake_data=False, fname='matmul', camera_ready=False):
# two line graphs
# lines in both top and bottom = bolt {8,16,32}B, matmul
# just use square mats of power-of-two lengths cuz best case for matmuls
# in top one, one mat already encoded and Bolt just has to do queries
# in bottom one, Bolt has encode one of the mats as data before queries
sb.set_style('darkgrid')
sb.set_context("talk")
# sb.set_palette("Set1", n_colors=len(ALGOS))
pal = set_palette(ncolors=8)
fig, axes = plt.subplots(2, 1, figsize=(6, 6))
# axes = axes.reshape((2, 1))
if fake_data: # for debuging / prototyping fig
SIZES = np.array([64, 128, 256, 512, 1024, 2048, 4096], dtype=np.float32)
matmul_times = (SIZES ** 2.5).reshape((-1, 1)) # strassen-ish scaling
bolt_times = ((SIZES ** 3) / 100 + 400).reshape((-1, 1))
# pretend we had 5 trials; each trial gets a col, so rows are lengths
matmul_times = np.tile(matmul_times, (1, 5))
bolt_times = np.tile(bolt_times, (1, 5))
matmul_times += np.random.randn(*matmul_times.shape) * SIZES.T.reshape((-1, 1)) / 10.
bolt_times += np.random.randn(*bolt_times.shape) * SIZES.T.reshape((-1, 1)) / 10.
matmul_times /= 1e9
bolt8_times = bolt_times / 2e9
bolt16_times = bolt_times / 1e9
bolt32_times = bolt_times / .5e9
dicts = []
ALGOS = ['Bolt 8B', 'Bolt 16B', 'Bolt 32B', 'Floats (BLAS)']
algo_times = [bolt8_times, bolt16_times, bolt32_times, matmul_times]
for all_times, algo in zip(algo_times, ALGOS):
for sz, times_for_sz in zip(SIZES, all_times):
dicts += [{'algo': algo, 'trial': i, 'size': sz, 'y': t}
for i, t in enumerate(times_for_sz)]
df = pd.DataFrame.from_records(dicts)
df_enc = df
df_no_enc = df
sb.tsplot(time='size', value='y', condition='algo', unit='trial',
data=df_no_enc, ax=axes[0], n_boot=100)
sb.tsplot(time='size', value='y', condition='algo', unit='trial',
data=df_enc, ax=axes[1], n_boot=100)
else:
# ALGOS = ['Bolt 8B', 'Bolt 16B', 'Bolt 32B', 'Floats']
# ALGOS = ['Bolt 32B', 'Bolt 32B + Encode', 'Floats']
# ALGOS = ['Bolt 8B', 'Bolt 32B', 'Bolt 32B + Encode', 'Floats']
ALGOS = ['Bolt 8B', 'Bolt 8B + Encode', 'Bolt 32B', 'Bolt 32B + Encode', 'Floats']
# df = results.matmul_results_square()
def clean_df(df):
df = df.loc[df['algo'].isin(ALGOS)]
non_encode_algos = ['Bolt 8B', 'Bolt 16B', 'Bolt 32B']
# rm_idxs = (df['algo'] == 'Bolt 32B') * (df['enc'] == 1)
rm_idxs = (df['algo'].isin(non_encode_algos)) * (df['enc'] == 1)
df = df.loc[~rm_idxs]
df['algo'].loc[df['algo'] == 'Floats'] = 'Floats (BLAS)'
return df
df = results.matmul_results(which='square')
df = clean_df(df)
colors = {
'Bolt 8B': pal[0], 'Bolt 8B + Encode': pal[0],
# 'Bolt 16B': pal[2], 'Bolt 16B + Encode': pal[2],
'Bolt 32B': pal[1], 'Bolt 32B + Encode': pal[1],
'Floats (BLAS)': 'k'
}
df_no_enc = df.loc[df['enc'] != 1]
sb.tsplot(time='size', value='y', condition='algo', unit='trial',
data=df_no_enc, ax=axes[0], n_boot=100, color=colors, linestyle='solid')
df_enc = df.loc[df['enc'] == 1]
sb.tsplot(time='size', value='y', condition='algo', unit='trial',
data=df_enc, ax=axes[0], n_boot=100, color=colors, linestyle='dotted', lw=4)
df = results.matmul_results(which='tall')
df = clean_df(df)
# print df
# return
# sb.tsplot(time='size', value='y', condition='algo', unit='trial',
# data=df, ax=axes[1], n_boot=100, color=colors)
df_no_enc = df.loc[df['enc'] != 1]
sb.tsplot(time='size', value='y', condition='algo', unit='trial',
data=df_no_enc, ax=axes[1], n_boot=100, color=colors, linestyle='solid')
df_enc = df.loc[df['enc'] == 1]
sb.tsplot(time='size', value='y', condition='algo', unit='trial',
data=df_enc, ax=axes[1], n_boot=100, color=colors, linestyle='dotted', lw=4)
# axes[1].set_ylim(1, 1e3)
# without encoding at the top; with encoding on the bottom
# sb.tsplot(time='size', value='y', condition='algo', unit='trial',
# sb.tsplot(time='size', value='y', condition='algo', unit='trial',
# data=df_no_enc, ax=axes[0], n_boot=100)
# sb.tsplot(time='size', value='y', condition='algo', unit='trial',
# data=df_enc, ax=axes[1], n_boot=100)
# ------------------------ legend
ax = axes.ravel()[-1]
leg_lines, leg_labels = ax.get_legend_handles_labels()
# for some reason, each algo appears 3x, so just take first
leg_lines, leg_labels = leg_lines[:len(ALGOS)], leg_labels[:len(ALGOS)]
plt.figlegend(leg_lines, leg_labels, loc='lower center',
ncol=len(ALGOS)/2, labelspacing=0)
# ------------------------ axis cleanup / formatting
# axes[0].set_title('Matrix Multiply Time, One Matrix Encoded', y=1.03, fontsize=16)
# axes[1].set_title('Matrix Multiply Time, Neither Matrix Encoded', y=1.03, fontsize=16)
axes[0].set_title('Square Matrix Multiply Time', y=1.03, fontsize=16)
axes[1].set_title('Tall Matrix Multiply Time', y=1.03, fontsize=16)
for ax in axes.ravel():
ax.legend_.remove()
ax.set_xscale('log', basex=2)
ax.set_yscale('log', basey=10)
if not camera_ready:
ax.set_ylabel('Wall Time (s)')
if camera_ready:
axes[0].set_ylabel('Wall Time (s)')
axes[1].set_ylabel('Wall Time (s)', labelpad=10)
# for ax in axes[:-1].ravel():
# # plt.setp(ax.get_xticklabels(), visible=False)
# ax.set_xlabel('', labelpad=-10)
# axes[0].set_xlabel('Matrix Side Length, L', labelpad=-1)
axes[0].set_xlabel('Matrix Side Length')
axes[1].set_xlabel('Matrix Side Length')
# ------------------------ show / save plot
# plt.tight_layout(h_pad=1.4)
plt.tight_layout(h_pad=1.2)
plt.subplots_adjust(bottom=.23)
if camera_ready:
save_fig_png('matmul_speed') # bypass mpl truetype pdf ineptitude
else:
save_fig('matmul_speed')
# plt.show()
def recall_r_fig(fake_data=False, suptitle=None, l2=True, fname='l2_recall',
camera_ready=False):
# experiment params:
# datasets = Sift1M, Convnet1M, LabelMe22k, MNIST
# bytes = [8, 16, 32]
# R = 1, 10, 100, 1000
DATASETS = ['Sift1M', 'Convnet', 'LabelMe', 'MNIST']
ALGOS = ['Bolt', 'PQ', 'OPQ', 'PairQ']
NBYTES_LIST = [8, 16, 32]
# Rs = [1, 10, 100, 1000]
Rs = [1, 5, 10, 50, 100, 500, 1000]
sb.set_style('darkgrid')
sb.set_context("talk")
set_palette(ncolors=len(ALGOS))
fig, axes = plt.subplots(4, 3, figsize=(6, 9))
if suptitle is None:
suptitle = 'Nearest Neighbor Recall'
if fake_data:
algo2offset = {'Bolt': -.1, 'PQ': -.2, 'OPQ': 0, 'PairQ': .1}
data = np.random.rand(1, len(Rs), len(algo2offset))
data = np.sort(data, axis=1) # ensure fake recalls are monotonic
for i, algo in enumerate(ALGOS):
recall = data[:, :, i] + algo2offset[algo]
data[:, :, i] = np.clip(recall, 0., 1.)
line_styles_for_nbytes = {8: '-', 16: '-', 32: '-'}
# plot the data
for d, dataset in enumerate(DATASETS):
axes_row = axes[d]
for b, nbytes in enumerate(NBYTES_LIST):
ax = axes_row[b]
if fake_data: # TODO handle real data
data_tmp = data * (.5 + nbytes / 64.) # slightly less
assert np.max(data_tmp) <= 1.
for algo in ALGOS:
x = Rs
sb.tsplot(data=data_tmp, condition=ALGOS, time=x, ax=ax, n_boot=100,
ls=line_styles_for_nbytes[nbytes])
else: # real data
DATASETS = ['Sift1M', 'Convnet1M', 'LabelMe', 'MNIST']
# ALGOS = ['PQ', 'OPQ']
# ALGOS = ['Bolt', 'Bolt No Quantize', 'PQ', 'OPQ']
ALGOS = ['Bolt', 'PQ', 'OPQ'] # for thesis defense
for d, dset in enumerate(DATASETS):
if l2:
path = os.path.join('../results/recall_at_r/', dset, 'summary.csv')
else:
path = os.path.join('../results/recall_at_r_mips/', dset, 'summary.csv')
df = pd.read_csv(path)
pq4 = (df['_algo'] == 'PQ') & (df['_code_bits'] == 4)
df.loc[pq4, '_algo'] = 'Bolt No Quantize'
# rm results with bolt rotations
bolt_rot = (df['_algo'] == 'Bolt') & (df['opq_iters'] > 0)
df = df.loc[~bolt_rot]
df.rename(columns={'_algo': 'algo'}, inplace=True)
all_nbytes = (df['_code_bits'] * df['_ncodebooks'] / 8).values
df['nbytes'] = all_nbytes.astype(np.int)
for b, nbytes in enumerate(NBYTES_LIST):
ax = axes[d, b]
data = df.loc[df['nbytes'] == nbytes]
for algo in ALGOS:
df_row = data.loc[data['algo'] == algo] # should be 1 row
if len(df_row) != 1:
print(df_row)
print("dset = ", dset)
print("algo = ", algo)
assert len(df_row) == 1
assert len(df_row) == 1
x = np.array(Rs)
y = [df_row['recall@{}'.format(r)].values[0] for r in x]
if camera_ready:
x = np.log10(x)
# print "recall plot: using X values: ", x # TODO rm
ax.plot(x, y, label=algo)
ax.legend()
# ------------------------ legend
ax = axes.ravel()[-1]
leg_lines, leg_labels = ax.get_legend_handles_labels()
# for some reason, each algo appears 3x, so just take first
leg_lines, leg_labels = leg_lines[:len(ALGOS)], leg_labels[:len(ALGOS)]
plt.figlegend(leg_lines, leg_labels, loc='lower center',
ncol=len(ALGOS), labelspacing=0)
# ------------------------ axis cleanup / formatting
# configure all axes
for i, ax_row in enumerate(axes):
for j, ax in enumerate(ax_row):
title = "{}, {}B".format(DATASETS[i], NBYTES_LIST[j])
if camera_ready:
# x_pos = .44 if j == 0 else .45
# ax.set_title(title, x=x_pos, y=1.01, fontsize=15)
# ax.set_title(title, x=.45, y=1.01, fontsize=15)
# x_pos = .49 if j == 0 else .48
# ax.set_title(title, x=.49, y=1.01, fontsize=15)
ax.set_title(title, y=1.01, fontsize=15)
else:
ax.set_title(title, y=1.01)
ax.set_ylim([0, 1])
if not camera_ready:
ax.set_xscale("log")
# remove all legends except the very last one
if (i != len(axes) or j != len(ax_row)) and ax.legend_:
ax.legend_.remove()
# remove x labels except for bottom axis
for ax in axes[:-1, :].ravel():
plt.setp(ax.get_xticklabels(), visible=False)
# ax.get_xaxis().set_visible(False)
if axes.shape[1] > 1:
# hide y axis for axes not in left col
for i, ax in enumerate(axes[:, 1:].ravel()):
# pass
# ax.get_yaxis().set_visible(False)
ax.get_yaxis().set_ticklabels([], labelpad=-10, fontsize=1)
# ylabel left col
for i, ax in enumerate(axes[:, 0].ravel()):
ax.set_ylabel("Recall@R")
# xlabel bottom rows
if camera_ready:
for i, ax in enumerate(axes.ravel()):
ax.set_xticks([0, 1, 2, 3])
for i, ax in enumerate(axes[-1, :].ravel()):
ax.set_xticklabels(['0', '1', '2', '3'])
else:
for i, ax in enumerate(axes[-1, :].ravel()):
# no idea why we need the dummy tick at the beginning
ax.set_xticklabels(['', '0', '1', '2', ''])
axes[-1, -1].set_xticklabels(['', '0', '1', '2', '3'])
axes[-1, 1].set_xlabel("Log10(R)")
# ------------------------ show / save plot
# plt.tight_layout(h_pad=.02, w_pad=.02)
plt.tight_layout(w_pad=.02)
# plt.subplots_adjust(top=.88, bottom=.21, hspace=.4)
# if camera_ready:
# plt.suptitle(suptitle, fontsize=18)
# else:
# plt.suptitle(suptitle, fontsize=16)
plt.suptitle(suptitle, fontsize=16)
# plt.subplots_adjust(top=.91, bottom=.11)
plt.subplots_adjust(top=.91, bottom=.15) # defense
if camera_ready:
save_fig_png(fname) # mpl saving as pdf stupid; just bypass it
else:
save_fig(fname)
# plt.show()
def distortion_fig(fake_data=False, l2=True, suptitle=None,
fname='l2_distortion', camera_ready=False):
# experiment params:
# datasets = Sift1M, Convnet1M, LabelMe22k, MNIST
# bytes = [8, 16, 32]
# layout: [ndatasets x nums_bytes] (ie, [4x3])
# each subplot a barplot showing corr with err bars
DATASETS = ['Sift1M', 'Convnet1M', 'LabelMe', 'MNIST']
ALGOS = ['Bolt', 'PQ', 'OPQ', 'PairQ']
NBYTES_LIST = [8, 16, 32]
figsize = (6, 8)
sb.set_style('darkgrid')
sb.set_context("talk", rc={'xtick.major.pad': 3})
set_palette(ncolors=len(ALGOS))
# fig, axes = plt.subplots(4, 3)
# fig, axes = plt.subplots(4, 1, figsize=figsize)
fig, axes = plt.subplots(4, 1, figsize=figsize, dpi=300)
axes = axes.reshape((4, 1))
if suptitle is None:
suptitle = 'Quality of Approximate Distances'
# fake_data = data is None
if fake_data:
algo2offset = {'Bolt': .4, 'PQ': .3, 'OPQ': .45, 'PairQ': .5}
nfake_corrs = 10
dicts = []
for dataset in DATASETS:
for nbytes in NBYTES_LIST:
for algo in ALGOS:
if fake_data:
corrs = np.random.rand(nfake_corrs) / 2.
corrs += algo2offset[algo]
corrs *= .9 + .1 * nbytes / 32.
params = {'algo': algo, 'dataset': dataset,
'nbytes': '{}B'.format(nbytes)}
dicts += [dict(params, **{'corr': c}) for c in corrs]
# data = pd.DataFrame.from_records(dicts, index=[0])
data = pd.DataFrame.from_records(dicts)
# print data
# return
# ------------------------ plot the data
for d, dataset in enumerate(DATASETS):
# df_dataset = data.loc[data['dataset'] == dataset]
df = data.loc[data['dataset'] == dataset]
df.rename(columns={'algo': ' '}, inplace=True) # hide from legend
ax = axes.ravel()[d]
sb.barplot(x='nbytes', y='corr', hue=' ', data=df, ax=ax)
else:
DATASETS = ['Sift1M', 'Convnet1M', 'LabelMe', 'MNIST']
# ALGOS = ['Bolt', 'PQ', 'OPQ', 'PairQ']
# DATASETS = ['Convnet1M', 'MNIST']
# ALGOS = ['PQ', 'OPQ']
# ALGOS = ['PQ4', 'PQ', 'OPQ']
ALGOS = ['Bolt No Quantize', 'PQ', 'OPQ']
for d, dset in enumerate(DATASETS):
if l2:
path = os.path.join('../results/correlation_l2/', dset, 'all_results.csv')
else:
path = os.path.join('../results/correlation_dotprods/', dset, 'all_results.csv')
df = pd.read_csv(path)
print("path: ", path)
pq4 = (df['_algo'] == 'PQ') & (df['_code_bits'] == 4)
df.loc[pq4, '_algo'] = 'Bolt No Quantize'
bolt_rot = (df['_algo'] == 'Bolt') & (df['opq_iters'] > 0)
df = df.loc[~bolt_rot]
# print df.loc[df['_algo'] == 'PQ4']
# print df.loc[df['_algo'] == 'PQ4']
# return
df.rename(columns={'_algo': ' '}, inplace=True)
# df['nbytes'] = df['_code_bits'] * df['_ncodebooks'] / 8
all_nbytes = (df['_code_bits'] * df['_ncodebooks'] / 8).values
df['nbytes'] = ["{}B".format(b) for b in all_nbytes.astype(np.int)]
ax = axes.ravel()[d]
# sb.barplot(x='nbytes', y='corr', hue=' ', data=df, ax=ax)
sb.barplot(x='nbytes', y='corr', hue=' ', data=df, ax=ax, capsize=.0025)
ax.set_title(dset)
# ------------------------ legend
ax = axes.ravel()[-1]
leg_lines, leg_labels = ax.get_legend_handles_labels()
plt.figlegend(leg_lines, leg_labels, loc='lower center',
ncol=2, labelspacing=0)
# ------------------------ axis cleanup / formatting
# configure all axes
for i, ax in enumerate(axes.ravel()):
# title = "{}".format(DATASETS[i]) # TODO uncomment
# ax.set_title(title, y=1.01) # TODO uncomment
# ax.set_ylim([0, 1])
ax.set_ylim([.5, 1])
# ax.set_ylim([.75, 1])
ax.set_xlabel('', labelpad=-10)
if l2:
ax.set_ylabel('Correlation With\nTrue Distance')
else:
if camera_ready:
# ax.set_ylabel('Correlation With\nTrue Dot Product', y=.46, fontsize=13)
ax.set_ylabel('Correlation With\nTrue Dot Product', fontsize=13)
else:
ax.set_ylabel('Correlation With\nTrue Dot Product')
if ax.legend_:
ax.legend_.remove()
# ------------------------ show / save plot
# plt.tight_layout() # for fig size 6x9
plt.tight_layout(h_pad=.8)
# if camera_ready:
# plt.suptitle(suptitle, fontsize=17)
# else:
# plt.suptitle(suptitle, fontsize=16)
plt.suptitle(suptitle, fontsize=16)
# plt.subplots_adjust(top=.92, bottom=.08) # for fig size 6x9
# plt.subplots_adjust(top=.90, bottom=.08)
plt.subplots_adjust(top=.90, bottom=.1)
if camera_ready:
save_fig_png(fname) # bypass mpl truetype pdf ineptitude
else:
save_fig(fname)
# plt.show()
def kmeans_fig(data=None, fname='kmeans'):
# bolt vs raw floats, k=16 on top and k=32 on the bottom
ALGOS = ['Bolt', 'Matmul']
Ks = [16, 64]
sb.set_context("talk")
set_palette()
figsize = (6, 6)
fig, axes = plt.subplots(2, 1, figsize=figsize)
fake_data = data is None
if fake_data:
dicts = []
bolt_times = np.linspace(0, 100, 21)
bolt_errs = np.max(Ks) * np.exp(-.1 * bolt_times)
matmul_times = np.linspace(0, 100, 11)
matmul_errs = np.max(Ks) * np.exp(-.05 * matmul_times)
for i in range(3): # simulate multiple trials
# bolt_errs *= (1 + .2 * np.random.randn(*bolt_errs.shape))
# matmul_errs *= (1 + .2 * np.random.randn(*matmul_errs.shape))
bolt_errs += 5 * np.random.randn(*bolt_errs.shape)
matmul_errs += 5 * np.random.randn(*matmul_errs.shape)
bolt_errs = np.maximum(0, bolt_errs)
matmul_errs = np.maximum(0, matmul_errs)
bolt_errs = np.sort(bolt_errs)[::-1]
matmul_errs = np.sort(matmul_errs)[::-1]
for k in Ks:
for t, err in zip(bolt_times, bolt_errs):
dicts.append({'trial': i, 'algo': 'Bolt', 'k': k, 't': t, 'err': err / k})
for t, err in zip(matmul_times, matmul_errs):
dicts.append({'trial': i, 'algo': 'Matmul', 'k': k, 't': t, 'err': err / k})
# data = pd.DataFrame.from_records(dicts, index=[0])
data = pd.DataFrame.from_records(dicts)
# print data
# return
# ------------------------ plot curves
for i, k in enumerate(Ks):
ax = axes[i]
df = data.loc[data['k'] == k]
df.rename(columns={'algo': ' '}, inplace=True) # hide from legend
# sb.tsplot(value='err', condition=' ', unit='k', time='t', data=df, ax=ax, n_boot=100)
sb.tsplot(value='err', condition=' ', unit='trial', time='t', data=df,
ax=ax, ci=95, n_boot=500)
# ------------------------ configure axes
# configure all axes
for i, ax in enumerate(axes.ravel()):
title = "K-Means Convergence, K={}".format(Ks[i])
ax.set_title(title, y=1.01)
# ax.set_xlabel('', labelpad=-10)
ax.set_xlabel('Wall Time (s)')
# ax.set_ylabel('MSE')
ax.set_ylabel('Mean Squared Error')
axes[1].legend_.remove()
# ------------------------ show / save plot
plt.tight_layout()
# plt.tight_layout(h_pad=.8)
# plt.subplots_adjust(top=.92, bottom=.08) # for fig size 6x9
# plt.subplots_adjust(top=.90, bottom=.08)
# save_fig(fname)
plt.show()
def main():
# camera-ready can't deal with Type 3 fonts, which are what matplotlib
# uses by default; 42 is apparently TrueType fonts
# matplotlib.use("agg")
matplotlib.rcParams['pdf.fonttype'] = 42
# matplotlib.rcParams['font.family'] = 'Helvetica'
# matplotlib.rcParams['font.family'] = 'Lucida Sans Unicode'
# matplotlib.rcParams['font.family'] = 'Arial'
# matplotlib.rcParams['font.family'] = 'Arial Narrow'
# matplotlib.rcParams['font.family'] = 'Bitstream Vera Sans'
# matplotlib.rcParams['font.family'] = 'Calibri' # used this for cam ready
# matplotlib.rcParams['font.family'] = 'Gill Sans MT'
# matplotlib.rcParams['font.family'] = 'Franklin Gothic Book'
# matplotlib.rcParams['font.family'] = 'Herculanum'
# matplotlib.rcParams['font.family'] = 'DejaVu Sans'
matplotlib.rcParams['font.family'] = CAMERA_READY_FONT
# matplotlib.rcParams['font.sans-serif'] =
# matplotlib.rcParams['ps.fonttype'] = 42
# matplotlib.rcParams['text.usetex'] = True
# pal = set_palette()
# sb.palplot(pal)
# plt.show()
# ------------------------ begin actual plotting func calls
# encoding_fig(camera_ready=True)
# # # query_speed_fig(fname='query_speed_with_matmuls', camera_ready=False)
# query_speed_fig(fname='query_speed_with_matmuls', camera_ready=True)
# query_speed_poster_fig(fname='query_speed_poster')
# matmul_fig(camera_ready=True)
# # # recall_r_fig(suptitle='Nearest Neighbor Recall', fname='l2_recall')
recall_r_fig(suptitle='Nearest Neighbor Recall', fname='l2_recall',
camera_ready=True)
# # distortion_fig(fake_data=False, fname='l2_distortion')
# # distortion_fig(fake_data=False, fname='dotprod_distortion',
# # suptitle='Quality of Approximate Dot Products', l2=False)
# distortion_fig(fake_data=False, fname='dotprod_distortion',
# suptitle='Quality of Approximate Dot Products', l2=False,
# camera_ready=True)
if __name__ == '__main__':
main()
|
#!/bin/env/python
# ================================================================ eigenvecs
# @numba.jit(nopython=True) # don't jit since take like 2.5s
# def top_principal_component(X, niters=50, return_eigenval=False,
def top_principal_component(X, niters=100, return_eigenval=False,
momentum=.9, nguesses=32, learning_rate=1.,
# allow_materialize=False):
allow_materialize_XtX=True):
N, D = X.shape
X = X.astype(np.float32)
X = X - X.mean(axis=0)
if nguesses > 1:
V = np.random.randn(D, nguesses).astype(X.dtype)
V /= np.linalg.norm(V, axis=0)
# norms = np.sqrt((V * V).sum(axis=0))
# V /= norms
prods = X.T @ (X @ V)
new_norms = np.linalg.norm(prods, axis=0)
# new_norms_sq = (prods * prods).sum(axis=0)
v = V[:, np.argmax(new_norms)]
# v = V[:, np.argmax(new_norms_sq)]
# print("picking v = ", v)
else:
v = np.random.randn(D).astype(X.dtype)
# v = np.ones(D, dtype=np.float32)
v = v.astype(np.float32)
prev_v = np.zeros_like(v)
v_momentum = np.zeros_like(v)
v /= (np.linalg.norm(v) + 1e-20)
materialize_cost = N * D * D
iter_cost_no_materialize = 2 * N * D
iter_cost_materialize = D * D
materialize = (materialize_cost + (niters * iter_cost_materialize) <
(niters * iter_cost_no_materialize))
materialize = materialize and allow_materialize_XtX
if materialize:
scaleby = np.max(np.linalg.norm(X, axis=0))
X *= 1. / scaleby # precondition by setting largest variance to 1
XtX = X.T @ X
for i in range(niters):
if materialize:
v = XtX @ v
else:
v = X.T @ (X @ v)
v *= 1. / (np.linalg.norm(v) + 1e-20)
# v_momentum = .9 * v_momentum + .5 * (v - prev_v)
# v_momentum = (.9 * v_momentum + (v - prev_v)).astype(np.float32)
v_momentum = momentum * v_momentum + learning_rate * (v - prev_v)
v += v_momentum
prev_v = v
# if i % 5 == 0:
# print("v: ", v)
v /= (np.linalg.norm(v) + 1e-20)
if return_eigenval:
new_v = X.T @ (X @ v)
lamda = np.linalg.norm(new_v)
return v, lamda
return v
def top_principal_component_v1(X, init='gauss', niters=100,
return_eigenval=False, batch_sz=-1,
momentum=.9, nguesses=32, verbose=0):
N, D = X.shape
X = X.astype(np.float32)
# Z = X - X.mean(axis=0)
if nguesses is not None and nguesses > 1:
assert init == 'gauss'
if init == 'ones':
v = np.ones(D, dtype=X.dtype)
elif init == 'gauss':
if nguesses > 1:
V = np.random.randn(D, nguesses).astype(X.dtype)
V /= np.linalg.norm(V, axis=0)
prods = X.T @ (X @ V)
new_norms = np.linalg.norm(prods, axis=0)
# print("new_norms: ", new_norms)
# assert np.min(eigenvals > -.001) # should be nonneg
v = V[:, np.argmax(new_norms)]
# print("picking v = ", v)
else:
v = np.random.randn(D).astype(X.dtype)
elif init == 'variance':
v = X.var(axis=0)
else:
v = init # can also pass in raw vector to initialize it with
if batch_sz < 1:
# batch_sz = min(2048, N)
# batch_sz = min(N, max(2048, N // 4))
# batch_sz = N // 4
batch_sz = N
nbatches = int(np.ceil(N / batch_sz))
prev_v = np.zeros_like(v)
v_momentum = np.zeros_like(v)
v /= (np.linalg.norm(v) + 1e-20)
for i in range(niters):
v = X @ v
# v /= (np.linalg.norm(v) + 1e-20)
v = X.T @ v
v /= (np.linalg.norm(v) + 1e-20)
# v_momentum = .9 * v_momentum + .5 * (v - prev_v)
v_momentum = .9 * v_momentum + (v - prev_v)
# v_momentum = .95 * v_momentum + (v - prev_v)
v += v_momentum
prev_v = v
if (verbose > 0) and (i % 5 == 0):
print("----")
print("mom: ", v_momentum)
print("v: ", v)
# print("v, prev_v dot prod: ", v.T @ prev_v)
# for i in range(niters):
# perm = np.random.permutation(nbatches)
# for b in range(nbatches):
# use_b = perm[b] # shuffle order of batches across iters
# start_idx = use_b * batch_sz
# end_idx = min(N, start_idx + batch_sz)
# Zb = Z[start_idx:end_idx]
# Xb = X[start_idx:end_idx]
# # print("v: ", v)
# # print("Z shape", Z.shape)
# # print("X shape", X.shape)
# # print("X @ v shape", (X @ v).shape)
# # update based on Adaptive Synaptogenesis Constructs Neural Codes
# # That Benefit Discrimination, theorem 1; could also use Oja's rule
# # v += ((Z - v) * (X @ v).reshape(-1, 1)).mean(axis=0)
# # v += ((Zb - v) * (Xb @ v).reshape(-1, 1)).sum(axis=0)
# dv = ((Zb - v) * (Xb @ v).reshape(-1, 1)).mean(axis=0)
# # dv /= np.linalg.norm(dv)
# v += dv
# # v_momentum = .5 * v_momentum + .5 * dv
# # v += v_momentum
# # v += dv + v_momentum
# v /= (np.linalg.norm(v) + 1e-20)
# # v += v_momentum + dv
# v += v_momentum
# v /= (np.linalg.norm(v) + 1e-20)
# v_momentum = .8 * v_momentum + .5 * (v - prev_v)
# # v_momentum = .9 * v_momentum + .1 * dv
# # v_momentum = .9 * v_momentum + (v - prev_v)
# # v_momentum = .5 * v_momentum + .5 * dv
# if i % 5 == 0:
# print("----")
# print("v_momentum: ", v_momentum)
# print("prev_v: ", prev_v)
# print("v: ", v)
# prev_v[:] = v
# # v_momentum = .1 * dv
v /= (np.linalg.norm(v) + 1e-20)
if return_eigenval:
new_v = X.T @ (X @ v)
lamda = np.linalg.norm(new_v)
return v, lamda
return v
def power_iteration(A, niters=5, init='ones', return_eigenval=False):
if init == 'ones':
v = A.sum(axis=0)
elif init == 'gauss':
v = np.random.randn(A.shape[1]).astype(A.dtype)
else:
v = init # can also pass in raw vector to initialize it with
for i in range(niters):
v /= (np.linalg.norm(v) + 1e-20)
v = (A * v).mean(axis=1)
lamda = np.linalg.norm(v)
v /= (lamda + 1e-20)
if return_eigenval:
return v, lamda
return v
def greedy_eigenvector_threshold(X, subspace_len, sample_how='deterministic',
stats_mat='cov', npower_iters=5,
nsubspaces=-1):
# nsubspaces=-1, col_stds=None):
assert sample_how in ('deterministic', 'importance')
# print("nsubspaces: ", nsubspaces)
# print("X.shape", X.shape)
# rm all-zero columns; if whole thing is zero, just return original order
# keep_cols = (X != 0).sum(axis=0) != 0
# nnz_cols = np.sum(keep_cols) != 0
# orig_all_idxs = np.arange(X.shape[1])
# if nnz_cols == 0:
# return orig_all_idxs
# else:
# orig_D = X.shape[1]
# X = X[:, keep_cols]
# numpy funcs for corr, cov are too quick to create nans, so compute stats
# manually
N, D = X.shape
if stats_mat == 'cov':
X = (X - X.mean(axis=0)) / np.sqrt(N)
cov = X.T @ X
elif stats_mat == 'corr':
X = (X - X.mean(axis=0)) / (np.linalg.norm(X, axis=0) + 1e-14)
cov = X.T @ X
else:
assert X.shape[0] == X.shape[1] # using X as the cov/corr mat
cov = X
# if col_stds is None:
# col_stds = np.std(cov, axis=0) + 1e-14
if nsubspaces is None or nsubspaces < 0:
nsubspaces = int(np.ceil(D / subspace_len))
all_idxs = np.arange(D)
if nsubspaces == 1:
return all_idxs
# find the indices to add to the next subspace
v = power_iteration(cov, niters=npower_iters)
if sample_how == 'deterministic':
idxs = top_k_idxs(np.abs(v), subspace_len, smaller_better=False)
elif sample_how == 'importance':
probs = np.abs(v) + 1e-14
probs /= np.sum(probs)
idxs = np.random.choice(all_idxs, size=subspace_len,
p=probs, replace=False)
# remove the indices we selected, and recurse
mask = np.ones(D, dtype=np.bool)
mask[idxs] = False
cov = cov[mask][:, mask]
# col_stds = col_stds[mask]
rest_of_perm = greedy_eigenvector_threshold(
cov, subspace_len, sample_how=sample_how, stats_mat=None,
npower_iters=npower_iters, nsubspaces=nsubspaces - 1)
# nsubspaces=nsubspaces - 1, col_stds=col_stds)
# convert indices from recursive call (which are in a different subspace
# since we excluded some indices) back to the original space
rest_of_perm = all_idxs[mask][rest_of_perm] # child call using subspace
# perm = np.array(list(idxs) + list(rest_of_perm))
perm = np.r_[idxs, rest_of_perm]
# if orig_D > D: # we removed some zero cols at the beginning
# perm = orig_all_idxs[keep_cols][perm]
if len(set(perm)) != len(perm): # TODO rm after debug
print("nsubspaces, subspace_len: ", nsubspaces, subspace_len)
print("size of set(all_idxs)", len(set(all_idxs)))
print("size of set(perm)", len(set(perm)))
assert len(set(perm)) == len(perm)
# import sys; sys.exit()
return perm
# v = 'ones' # in subseqent iters, will init with prev eigenva
# zero_cols =
# TODO ideally actually pull rows/cols out of cov to create a smaller
# matrix, so that later subspaces have less work to do; issue here is
# that it makes keeping track of what the indices mean pretty ugly
# if nsubspaces == 1:
# return all_idxs
# mask = np.zeros(D, dtype=np.bool)
# perm = []
# for m in range(nsubspaces - 1):
# v = power_iteration(cov, niters=npower_iters)
# if sample_how == 'deterministic':
# idxs = top_k_idxs(np.abs(v), subspace_len, smaller_better=False)
# elif sample_how == 'importance':
# probs = np.abs(v)
# probs /= np.sum(probs) + 1e-14
# # # TODO rm after debug
# # nnz = np.sum(probs > 0)
# # # if nnz < subspace_len:
# # print("m: {}/{}".format(m + 1, nsubspaces))
# # print("D:", D)
# # print("subspace_len:", subspace_len)
# # print("nnz:", nnz)
# try:
# idxs = np.random.choice(all_idxs, size=subspace_len,
# p=probs, replace=False)
# except ValueError:
# missing_idxs = set(all_idxs) - set(perm)
# perm += list(missing_idxs)
# break
# perm += list(idxs)
# # print("adding {} idxs".format(len(idxs)))
# # print("new len(perm)", len(perm))
# # rm selected indices from future consideration
# mask[:] = True
# mask[idxs] = False
# cov = cov[mask, mask]
# # # rm the selected indices from future consideration
# # mask[:] = False
# # mask[idxs] = True
# # # print("cov.shape: ", cov.shape)
# # # # print("mask.shape: ", mask.shape)
# # # print("idxs: ", idxs)
# # # print("mask: ", mask)
# # # print("cov[mask]\n", cov[mask])
# # # print("cov[:, mask]\n", cov[:, mask])
# # # cov[mask, mask] = 0
# # # print("nnz in mask: ", np.sum(mask != 0))
# # cov[mask] = 0
# # cov[:, mask] = 0
# # # print("cov[mask]\n", cov[mask])
# # # print("cov[:, mask]\n", cov[:, mask])
# # # print("idxs: ", idxs)
# # # print("cov[idxs].sum(axis=1)", cov[idxs].sum(axis=1))
# # # print("cov[:, idxs].sum(axis=0)", cov[:, idxs].sum(axis=0))
# # # print("nnz cols in cov: ", np.sum(cov.sum(axis=0) != 0))
# # # assert np.all(cov[mask] == 0)
# # # assert np.all(cov[:, mask] == 0)
# # add whatever indices are left over to last subspace; doing it this way
# # both saves us work and avoids breaking things when some columns are 0
# # as a result of earlier padding to multiple of subspace_len
# missing_idxs = set(all_idxs) - set(perm)
# if len(set(perm)) != len(perm): # TODO rm after debug
# print("nsubspaces, subspace_len: ", nsubspaces, subspace_len)
# print("size of set(all_idxs)", len(set(all_idxs)))
# print("size of set(perm)", len(set(perm)))
# print("number of missing_idxs", len(missing_idxs))
# # assert len(set(perm)) == len(perm)
# import sys; sys.exit()
# perm += list(missing_idxs)
# return np.array(perm)
# return all_idxs[::-1] # TODO rm after debug
# return np.roll(all_idxs, 1) # TODO rm after debug
# return all_idxs # TODO rm after debug
# def ksparse_pca(X, ncomponents, k, algo='anydims'):
# def ksparse_pca(X, ncomponents, k, algo='noreuse'):
def ksparse_pca_v1(X, ncomponents, k, algo='1uniq'):
N, D = X.shape
k = int(k)
assert k < D # TODO run dense randomized PCA to handle this case
if algo == 'noreuse':
assert ncomponents * k <= D # TODO allow dims to be included >1 time
from sklearn.linear_model import OrthogonalMatchingPursuit
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=k, fit_intercept=False)
if algo == '1uniq':
assert k > 1
omp_initial = OrthogonalMatchingPursuit(
n_nonzero_coefs=k - 1, fit_intercept=False)
omp_final = OrthogonalMatchingPursuit(
n_nonzero_coefs=1, fit_intercept=False)
X = np.asfarray(X) # we'll be taking subsets of columns a lot
X_res = np.copy(X)
# allowed_idxs = set(np.arange(D))
allowed_idxs = np.arange(D)
# all_used_idxs = set()
V = None
for i in range(ncomponents):
# compute ideal projection, and resulting latent values
v = top_principal_component(X_res).reshape(D, 1)
if i > 0:
# gram-schmidt to orthogonalize; we don't get to use this exact
# vector anyway, so we don't care too much about numerical issues;
# also, principal component of residuals should be in a subspace
# that's orthogonal to V already, so might be able to prove this
# step isn't even necessary
prods = (V.T @ v).ravel() # (D x i+1).T @ (D x 1) = i+1 x 1
# print("prods shape: ", prods.shape)
# print("V shape: ", V.shape)
# print("v shape: ", v.shape)
# print("projections shape: ", (V * prods).shape)
v -= (V * prods).sum(axis=1, keepdims=True)
# V = np.hstack((V, v))
# V, R = np.linalg.qr(V)
# v = V[-1]
h = X_res @ v # N x 1
# compute sparse version of this ideal projection
# if False:
if algo == 'anydims':
v = omp.fit(X, h).coef_
elif algo == '1uniq': # 1 new idx -> possible to make orthogonal
assert k > 1
if i == 0:
v = omp.fit(X, h).coef_
# used_idxs = np.where(v != 0)[0]
# all_used_idxs += set(used_idxs)
else:
# compute k-1 sparse v
v = omp_initial.fit(X, h).coef_.ravel()
initial_nonzero_idxs = np.where(v != 0)[0]
# now find last zero to add, from set that have never been used
h_res = h - (X @ v)
use_allowed_idxs = set(allowed_idxs) - set(initial_nonzero_idxs)
use_allowed_idxs = np.array(sorted(list(use_allowed_idxs)))
X_subs = X[:, use_allowed_idxs]
soln = omp_final.fit(X_subs, h_res).coef_.ravel()
new_nonzero_idx = use_allowed_idxs[np.where(soln != 0)[0][0]]
# now take union of all these idxs to get nonzero idxs to use
use_idxs = list(initial_nonzero_idxs) + [new_nonzero_idx]
use_idxs = np.array(use_idxs)
# print("use_idxs", use_idxs)
# given nonzero idxs, least squares to get v
X_subs = X[:, use_idxs]
soln, _, _, _ = np.linalg.lstsq(X_subs, h, rcond=None)
v = np.zeros(D)
v[use_idxs] = soln.ravel()
else: # dims outright can't be reused
assert algo == 'noreuse'
X_subs = X[:, allowed_idxs]
assert len(allowed_idxs) >= k
soln = omp.fit(X_subs, h).coef_
v = np.zeros(D)
v[allowed_idxs] = soln
v = v.reshape(-1, 1)
v /= np.linalg.norm(v)
assert np.sum(v != 0) == k
# update V, ensuring that it remains orthonormal
# TODO the issue with this is that there doesn't necessarily *exist*
# a k-sparse vector that's orthogonal to all others picked so far; we
# could solve this by requiring dk <= D and making it impossible to
# select the same input dimension twice; that's more restrictive than
# is strictly needed though; what would be really nice is just writing
# our own OMP that you can tell to not select certain idxs, because
# that would create a combination that can't be made orthogonal;
# probably adapt https://github.com/scikit-learn/scikit-learn/blob/1495f69242646d239d89a5713982946b8ffcf9d9/sklearn/linear_model/omp.py#L407
if i > 0:
# if dims_can_be_reused:
if algo != 'noreuse':
nnz_idxs = np.where(v != 0)[0]
assert len(nnz_idxs) <= k
V_subs = V[nnz_idxs]
v_subs = v[nnz_idxs]
# niters_ortho = 1000
niters_ortho = 100
for it in range(niters_ortho):
if False:
prods = (V.T @ v).ravel()
# print("prods shape: ", prods.shape)
# print("V shape: ", V.shape)
# print("v shape: ", v.shape)
# print("projections shape: ", (V * prods).shape)
v -= (V * prods).sum(axis=1, keepdims=True)
v = v.ravel()
zero_out_idxs = np.argsort(np.abs(v))[:-k]
# keep_idxs = np.argsort(np.abs(v))[-k:]
# print("i, it: ", i, it)
# print(f"zeroing out {len(zero_out_idxs)} / {D} indices")
# print("nnz before zeroing: ", np.sum(v != 0))
# old_v = v
# v = np.zeros(D)
# v[keep_idxs] = old_v[keep_idxs]
v[zero_out_idxs] = 0
nnz = np.sum(v != 0)
# print("nnz: ", nnz)
# print("len v:", len(v))
# print("v", v)
assert nnz <= k
v /= np.linalg.norm(v)
v = v.reshape(-1, 1)
else:
prods = (V_subs.T @ v_subs).ravel()
v_subs -= (V_subs * prods).sum(axis=1, keepdims=True)
# v_subs = v_subs.ravel()
if np.max(np.abs(prods)) < 1e-5: # TODO add tol param
# print("breaking at iter: ", it)
break # pretty converged
v = v.ravel()
v[:] = 0
v[nnz_idxs] = v_subs.ravel()
v /= np.linalg.norm(v)
v = v.reshape(-1, 1)
if algo in ('noreuse', '1uniq'):
used_idxs = np.where(v != 0)[0]
# used_idxs = [np.argmax(np.abs(v))] # only eliminate 1 idx
allowed_idxs = set(allowed_idxs) - set(used_idxs)
allowed_idxs = np.array(sorted(list(allowed_idxs)))
if i > 0:
V = np.hstack((V, v))
else:
V = v
# now update X_res; residuals from best linear approx of input given H
H = X_res @ V
W, _, _, _ = np.linalg.lstsq(H, X, rcond=None)
X_res = X - (H @ W)
return V
# these are just for debugging
def _to_sparse(x):
x = x.ravel()
idxs = np.where(x != 0)[0]
vals = x[idxs]
idxs = idxs.reshape(-1, 1)
vals = vals.reshape(-1, 1)
# print("idxs: ", idxs)
# print("vals: ", vals)
return np.hstack((idxs, vals))
def _to_sparse_cols(A):
ars = [_to_sparse(A[:, j])[np.newaxis, ...]
for j in range(A.shape[1])]
return "\n".join([str(ar) for ar in ars])
# return np.concatenate(vecs, axis=0)
def ksparse_pca(X, ncomponents, k):
N, D = X.shape
k = int(k)
assert k < D # TODO run dense randomized PCA to handle this cases
X = np.asfarray(X) # we'll be taking subsets of columns a lot
X_res = np.copy(X)
from sklearn.linear_model import OrthogonalMatchingPursuit
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=k, fit_intercept=False)
idx_counts = np.zeros(D, dtype=np.int)
V = None
for i in range(ncomponents):
v = top_principal_component(X_res).reshape(D, 1)
if i > 0:
# gram-schmidt to orthogonalize; we don't get to use this exact
# vector anyway, so we don't care too much about numerical issues;
# also, principal component of residuals should be in a subspace
# that's orthogonal to V already, so might be able to prove this
# step isn't even necessary
prods = (V.T @ v).ravel() # (D x i+1).T @ (D x 1) = i+1 x 1
v -= (V * prods).sum(axis=1, keepdims=True)
h = X_res @ v
# compute sparse version of this ideal projection
allowed_idxs = idx_counts < k
X_subs = X[:, allowed_idxs]
assert allowed_idxs.sum() >= k
soln = omp.fit(X_subs, h).coef_
v = np.zeros(D)
v[allowed_idxs] = soln
nnz_idxs = v != 0
v = v.reshape(-1, 1)
v /= np.linalg.norm(v)
assert np.sum(v != 0) == k
# TODO this is broken because having no dim used more than k times
# isn't actually a sufficient condition to ensure that cols of V
# can be made orthogonal; need to write our own OMP that can take
# in existing nnz pattern of V and not include dims that would result
# in too many linearly indep cols in that subspace
# update idx_counts
idx_counts[nnz_idxs] += 1
# make v orthogonal to existing cols in V
if V is None:
V = v
continue
V_subs = V[nnz_idxs].copy()
nonzero_cols = V_subs.sum(axis=0) != 0
if np.sum(nonzero_cols) < 1: # already orthogonal to existing V
V = np.hstack((V, v))
continue
V_subs_orig = V_subs.copy()
V_subs = V_subs[:, nonzero_cols]
# V_subs, _ = np.linalg.qr(V_subs)
debug = i == 7
v_subs = v[nnz_idxs].copy()
niters_ortho = 100 if not debug else 1
v_orig = v.copy()
v_subs_orig = v_subs.copy()
for it in range(niters_ortho):
prods = (V_subs.T @ v_subs).ravel()
projections = (V_subs * prods).sum(axis=1, keepdims=True)
# v_subs -= .999 * projections
v_subs -= projections
V_subs = V_subs[:, prods != 0]
# if debug:
# print("V_subs:\n", V_subs)
# print("projections: ", projections)
# # print("v_subs: ", projections)
# SELF: issue here is that cols of V_subs are not necessarily
# orthogonal, so projections can actually overcorrect and have
# exactly the wrong component come to dominate
v_subs /= np.linalg.norm(v_subs)
if np.max(np.abs(prods)) < 1e-5: # TODO add tol param
# print("breaking at iter: ", it)
break # pretty converged
if it == niters_ortho - 1:
print(f"k={k}, it={it}")
print(f"FAILED to get component {i} orthogonal")
print("prods:\n", prods)
# print("v before gram-schmidt:")
# print(_to_sparse(v_orig))
# print("V with nonzeros in subspace: ")
# V_subset = V[:, prods != 0]
# print("V_subset shape:", V_subset.shape)
# print(_to_sparse_cols(V_subset))
# # print(V[:, prods != 0])
# print("v:")
# print(_to_sparse(v))
print("projections:", projections)
print("V_subs_orig\n", V_subs_orig)
print("v_subs_orig\n", v_subs_orig)
print("V_subs:\n", V_subs[:, prods != 0])
print("v_subs:", v_subs)
import sys; sys.exit()
# print("got to ortho iteration: ", it)
# nonzero_count_idxs = np.where(idx_counts)[0]
# print("idx counts:\n", np.array(list(zip(nonzero_count_idxs, idx_counts[nonzero_count_idxs]))).T)
# print("picked idxs: ", np.where(nnz_idxs)[0])
v = v.ravel()
v[:] = 0
v[nnz_idxs] = v_subs.ravel()
v /= np.linalg.norm(v)
v = v.reshape(-1, 1)
V = np.hstack((V, v))
# now update X_res; residuals from best linear approx of input given H
H = X_res @ V
W, _, _, _ = np.linalg.lstsq(H, X, rcond=None)
X_res = X - (H @ W)
return V
def debug_orthogonalize():
# V = np.array([[0.0, 0.0, 0.0],
# [-0.72, -0.367, 0.55],
# [-0.463, 0.482, 0.0],
# [-0.391, -0.457, -0.797]])
# v = np.array([[-0.243],
# [-0.705],
# [-0.427],
# [-0.511]])
V = np.array([[0.759, 0.506, 0.41],
[-0.58, 0.811, 0.0733],
[0.0, 0.0, 0.0],
[-0.296, -0.294, 0.909]])
v = np.array([[0.729],
[-0.547],
[0.261],
[-0.318]])
print("V:\n", V)
print("v:\n", v)
V /= np.linalg.norm(V, axis=0)
print("V norms: ", np.linalg.norm(V, axis=0))
for it in range(1):
prods = (V.T @ v).ravel()
print("prods: ", prods)
projections = (V * prods).sum(axis=1, keepdims=True)
print("projections:\n", projections)
v -= projections
v /= np.linalg.norm(v)
# print("V:\n", V)
print("new v:\n", v)
# print("new prods: ", prods)
# prods = (V.T @ v).ravel()
# ================================================================ main
def main():
# debug_orthogonalize(); return # TODO rm
np.random.seed(12)
# np.random.seed(6)
# N, D = 20, 10
# N, D = 10000, 128
# N, D = 1000, 128
# N, D = 1000, 512
N, D = 10000, 64
# N, D = 10000, 32
# N, D = 10000, 16
# N, D = 10000, 8
# N, D = 10000, 10
d = int(D / 4)
# create X with low-rank structure
# np.random.seed(123)
X0 = np.random.randn(N, d).astype(np.float32)
X1 = np.random.randn(d, D).astype(np.float32)
X = X0 @ X1
X += np.random.randn(N, D).astype(np.float32) * .1
# X = np.random.randn(N, D).astype(np.float32)
# greedy_eigenvector_threshold(X, 3)
# greedy_eigenvector_threshold(X, 3, sample_how='deterministic')
# greedy_eigenvector_threshold(X, 3, sample_how='importance')
# greedy_eigenvector_threshold(X, 3, use_corr=True)
# k = 1 # k = 1 is really interesting; corresponds to just subsampling cols
# k = 2
# k = 4
# k = 6
k = 8
k = min(k, int(D / d))
V = ksparse_pca(X, d, k)
H = X @ V
W, _, _, _ = np.linalg.lstsq(H, X, rcond=None)
X_res = X - (H @ W)
print("X sq frob norm: ", np.sum(X * X))
print("X res sq frob norm: ", np.sum(X_res * X_res))
# print("nnz in V cols: ", (V != 0).sum(axis=0))
from sklearn.decomposition import PCA
pca = PCA(n_components=d).fit(X)
# print("pca explained variance: ", pca.explained_variance_)
V2 = pca.components_.T
H = X @ V2
W, _, _, _ = np.linalg.lstsq(H, X, rcond=None)
X_res = X - (H @ W)
print("pca X res sq frob norm: ", np.sum(X_res * X_res))
VtV = V.T @ V
VtV2 = V2.T @ V2
our_abs_offdiags = np.abs(VtV) - np.diag(np.diag(VtV))
pca_abs_offdiags = np.abs(VtV2) - np.diag(np.diag(VtV2))
print("our max abs off-diagonal, pca max abs off-diagonal:")
print(np.max(our_abs_offdiags))
print(np.max(pca_abs_offdiags))
print("our mean abs off-diagonal, pca mean abs off-diagonal:")
print(np.mean(our_abs_offdiags))
print(np.mean(pca_abs_offdiags))
# import matplotlib.pyplot as plt
# import seaborn as sb
# _, axes = plt.subplots(2)
# # sb.heatmap(V.T @ V, ax=axes[0], cmap='RdBu')
# # sb.heatmap(V2.T @ V2, ax=axes[1], cmap='RdBu')
# sb.heatmap(V.T @ V, ax=axes[0])
# sb.heatmap(V2.T @ V2, ax=axes[1])
# # axes[0].imshow(V.T @ V, interpolation='nearest', cmap='RdBu')
# # plt.colorbar(ax=axes[0])
# # axes[0].imshow(V2.T @ V2, interpolation='nearest', cmap='RdBu')
# # plt.colorbar(ax=axes[1])
# axes[0].set_title("our V.T @ V")
# axes[1].set_title("pca V.T @ V")
# plt.tight_layout()
# plt.show()
# print("our V.T @ V: ", V.T @ V)
# print("pca V.T @ V: ", V2.T @ V2)
# # # Z = X - X.mean(axis=0)
# # # pca = PCA(n_components=D).fit(X.T @ X)
# # pca = PCA(n_components=D).fit(X)
# # eigenvecs = pca.components_
# # print("PCA components:", eigenvecs)
# # print("PCA singular vals:", pca.singular_values_)
# # v, lamda = top_principal_component(X, return_eigenval=True, init='gauss')
# # print("v: ", v)
# # print("v * eigenvecs: ", (eigenvecs * v).sum(axis=1))
# from sklearn.decomposition import PCA
# import time
# # pca = PCA(n_components=D)
# # pca = PCA(n_components=D, svd_solver='full') # TODO rm
# pca = PCA(n_components=1, svd_solver='full') # TODO rm
# # pca = PCA(n_components=1, svd_solver='randomized')
# t = time.perf_counter()
# pca.fit(X)
# nsecs = time.perf_counter() - t
# print("pca time (s): ", nsecs)
# t = time.perf_counter()
# v = top_principal_component(X)
# nsecs = time.perf_counter() - t
# print("our time (s): ", nsecs)
# print("v * eigenvecs: ", (pca.components_ * v).sum(axis=1)[:5])
# # print("cossim between vecs: ", pca.components_ @ v)
if __name__ == '__main__':
np.set_printoptions(formatter={'float': lambda f: "{:.3}".format(f)},
linewidth=100)
main()
|
# first 3 functions taken from:
# http://www.johnvinyard.com/blog/?p=268
# from .arrays import normalizeMat
def norm_shape(shape):
'''
Normalize numpy array shapes so they're always expressed as a tuple,
even for one-dimensional shapes.
Parameters
shape - an int, or a tuple of ints
Returns
a shape tuple
'''
try:
i = int(shape)
return (i,)
except TypeError:
# shape was not a number
pass
try:
t = tuple(shape)
return t
except TypeError:
# shape was not iterable
pass
raise TypeError('shape must be an int, or a tuple of ints')
def sliding_window(a, ws, ss=None, flatten=True):
'''
Return a sliding window over a in any number of dimensions
Parameters:
a - an n-dimensional numpy array
ws - an int (a is 1D) or tuple (a is 2D or greater) representing the size
of each dimension of the window
ss - an int (a is 1D) or tuple (a is 2D or greater) representing the
amount to slide the window in each dimension. If not specified, it
defaults to ws.
flatten - if True, all slices are flattened, otherwise, there is an
extra dimension for each dimension of the input.
Returns
an array containing each n-dimensional window from a
'''
if None is ss:
# ss was not provided. the windows will not overlap in any direction.
ss = ws
ws = norm_shape(ws)
ss = norm_shape(ss)
# convert ws, ss, and a.shape to numpy arrays so that we can do math in every
# dimension at once.
ws = np.array(ws)
ss = np.array(ss)
shape = np.array(a.shape)
# ensure that ws, ss, and a.shape all have the same number of dimensions
ls = [len(shape), len(ws), len(ss)]
if 1 != len(set(ls)):
raise ValueError(
'a.shape, ws and ss must all have the same length. They were %s' % str(ls))
# ensure that ws is smaller than a in every dimension
if np.any(ws > shape):
raise ValueError(
'ws cannot be larger than a in any dimension.'
'a.shape was %s and ws was %s' % (str(a.shape), str(ws)))
# how many slices will there be in each dimension?
newshape = norm_shape(((shape - ws) // ss) + 1)
# the shape of the strided array will be the number of slices in each dimension
# plus the shape of the window (tuple addition)
newshape += norm_shape(ws)
# the strides tuple will be the array's strides multiplied by step size, plus
# the array's strides (tuple addition)
newstrides = norm_shape(np.array(a.strides) * ss) + a.strides
strided = ast(a, shape=newshape, strides=newstrides)
if not flatten:
return strided
# Collapse strided so that it has one more dimension than the window. I.e.,
# the new array is a flat list of slices.
meat = len(ws) if ws.shape else 0
firstdim = (np.product(newshape[:-meat]),) if ws.shape else ()
dim = firstdim + (newshape[-meat:])
return strided.reshape(dim)
def sliding_windows_of_elements(a, ss, ws=None, flatten=False):
return [sliding_window(row, ss, ws, flatten) for row in a]
def sliding_windows_of_rows(a, ss, ws=None, flatten=True):
windowsForRows = sliding_windows_of_elements(a, ss, ws, flatten)
return np.vstack(windowsForRows)
def _compute_from_seq(allSubseqs, n):
seqLens = np.array(map(lambda subseqs: subseqs.shape[0], allSubseqs))
startIdxs = np.r_[0, np.cumsum(seqLens)[:-1]]
endIdxs = np.r_[startIdxs[1:], n]
fromSeq = np.zeros(n)
for i in range(len(startIdxs)):
startIdx, endIdx = startIdxs[i], endIdxs[i]
fromSeq[startIdx:endIdx] = i
return fromSeq
# def flattened_subseqs_of_length(seqs, m, norm=None, return_from_seq=False):
# # TODO should have flags for returning X and allSubseqs, not just fromSeq
# # each element of seqs is assumed to be a 1D or 2D array
# origM = m
# step = 1
# origDims = len(seqs[0].shape)
# if origDims > 1:
# sampleDimensions = np.prod(seqs[0].shape[1:]) # num cols in mat
# m *= sampleDimensions # TODO don't enforce stepping in only one direction
# step *= sampleDimensions
# for i, seq in enumerate(seqs):
# seqs[i] = seq.flatten()
# allSubseqs = sliding_windows_of_elements(seqs, m, step)
# X = np.asarray(allSubseqs, dtype=np.float).reshape((-1, m)) # -1 = compute it
# Xnorm = normalizeMat(X, origM, how=norm)
# if not return_from_seq:
# return Xnorm, X, allSubseqs
# fromSeq = _compute_from_seq(allSubseqs, Xnorm.shape[0])
# return Xnorm, X, allSubseqs, fromSeq
# simple function for common case
def sliding_window_1D(x, windowLen, step=1):
return sliding_window(x, windowLen, step)
class InputTooSmallException(Exception):
pass
def extract_conv2d_windows(
X, filt_shape, strides=(1, 1), flatten_spatial_dims=False,
flatten_examples_dim=False, padding='valid'):
# TODO support NCHW format
orig_X_ndim = X.ndim
if X.ndim == 3:
X = X[np.newaxis, ...]
assert X.ndim == 4
assert len(filt_shape) == 2
assert len(strides) in (2, 4)
filt_shape = int(filt_shape[0]), int(filt_shape[1])
if filt_shape[0] > X.shape[1]: # TODO rm after debug
raise InputTooSmallException(
"filt_shape[0] ({}) > X.shape[1] ({})".format(
filt_shape[0], X.shape[1]))
if filt_shape[1] > X.shape[2]:
raise InputTooSmallException(
"filt_shape[1] ({}) > X.shape[2] ({})".format(
filt_shape[0], X.shape[2]))
padding = padding.lower()
assert padding in ('same', 'valid')
pad_nrows = filt_shape[0] - 1
pad_ncols = filt_shape[1] - 1
if padding == 'same' and (pad_nrows > 0 or pad_ncols > 0):
padded = np.zeros((X.shape[0], X.shape[1] + pad_nrows,
X.shape[1] + pad_ncols, X.shape[3]))
# NOTE: this should mirror the padding used by scipy and tensorflow;
# however, since their exact behavior is only vaguely documented, it
# may diverge from their behavior at any time. See the source code for
# scipy.signal.convolve2d or https://stackoverflow.com/a/38111069
row_start = int(pad_nrows) // 2
row_end = row_start + X.shape[1]
col_start = int(pad_ncols) // 2
col_end = col_start + X.shape[2]
# print("padding to shape:", padded.shape)
# print("padding: data row start, end:", row_start, row_end)
# print("padding: data col start, end:", col_start, col_end)
padded[:, row_start:row_end, col_start:col_end, :] = X
X = padded
filt_shape = (1, filt_shape[0], filt_shape[1], X.shape[3])
if len(strides) == 2:
strides = (1, strides[0], strides[1], X.shape[3])
windows = sliding_window(X, filt_shape, strides, flatten=False)
# strip out dims 3 and 4, since these are always 1; dim 3 is filter
# position across channels (only one position, since doing 2D conv),
# and dim 4 is all filter data across examples (not actually
# convolving across examples); e.g., for first 200 examples from
# MNIST with a 5x5 filter, goes from shape:
# (200, 24, 24, 1, 1, 5, 5, 1)
# to shape:
# (200, 24, 24, 5, 5, 1)
windows = windows.reshape(windows.shape[:3] + windows.shape[5:])
if flatten_spatial_dims:
# nexamples x npositions x filt_size
windows = windows.reshape(X.shape[0], -1, np.prod(filt_shape))
if flatten_examples_dim:
windows = windows.reshape(-1, *windows.shape[2:])
if orig_X_ndim == 3:
windows = windows.reshape(windows.shape[1:])
return windows
if __name__ == '__main__':
A = np.arange(24).reshape((6, 4))
print(A)
ws = 3
ss = 1
print(sliding_windows_of_rows(A, ws, ss))
|
#!#!/bin/env/python
_memory = Memory('.', verbose=0)
def _to_np(A):
return A.cpu().detach().numpy()
def _class_balanced_sampling(X, labels, k):
np.random.seed(123)
N, D = X.shape
# intialize centroids by sampling from each class in proportion to its
# relative frequency
uniq_lbls, counts = np.unique(labels, return_counts=True)
sort_idxs = np.argsort(counts)
uniq_lbls = uniq_lbls[sort_idxs]
counts = counts[sort_idxs]
remaining_counts = np.cumsum(counts[::-1])[::-1]
nremaining_samples = k
# C = np.empty((k, D), dtype=np.float32)
C = []
C_labels = []
# affinities = np.zeros((k, nclasses), dtype=np.float32)
for i, lbl in enumerate(uniq_lbls):
count = counts[i]
target_frac = count / remaining_counts[i]
target_nsamples = int(nremaining_samples * target_frac + .999)
target_nsamples = max(1, target_nsamples)
target_nsamples = min(target_nsamples, count)
nremaining_samples -= target_nsamples
lbl_idxs = np.where(labels == lbl)[0]
# print("lbl, count, num lbl idxs: ", lbl, count, len(lbl_idxs))
assert len(lbl_idxs) == count
use_idxs = np.random.choice(count, size=target_nsamples, replace=False)
keep_idxs = lbl_idxs[use_idxs]
C.append(X[keep_idxs])
C_labels.append(np.full(target_nsamples, lbl, dtype=np.int32))
# if len(C).shape[0] < k:
C = np.vstack(C).astype(np.float32)
# print("k, C shape", k, C.shape)
assert C.shape == (k, D)
C_labels = np.hstack(C_labels)
assert C_labels.shape == (k,)
return C, C_labels
def neighbor_compression(X, labels, k, niters=1000, rel_tol=.0001, verbose=1):
N, D = X.shape
# one-hot encode labels
nclasses = len(np.unique(labels))
# Y = np.zeros((N, nclasses), dtype=np.float32)
# for i in range(N):
# Y[i, labels[i]] = 1
# intialize centroids
# C, _ = kmeans(X, k)
C, C_labels = _class_balanced_sampling(X, labels, k)
# convert to torch tensors for optimization
# Y = torch.from_numpy(Y)
C = torch.tensor(C.T, requires_grad=True) # not from_numpy to allow grad
X = torch.from_numpy(X)
# having trained class affinities doesn't really seem to help
# Z = torch.randn(k, nclasses, requires_grad=True)
# print("uniq labels: ", np.unique(labels))
# print("uniq C_labels: ", np.unique(C_labels))
# one-hot encode labels
affinities = torch.zeros((k, nclasses),
dtype=torch.float32, requires_grad=True)
for kk in range(k):
affinities[kk, C_labels[kk]] = 1
Z = affinities.clone().detach().requires_grad_(True)
labels = torch.from_numpy(labels)
loss_fn = torch.nn.CrossEntropyLoss()
# opt = optim.SGD([C], lr=.1, momentum=.9)
# opt = optim.SGD([C, affinities], lr=.1, momentum=.9)
opt = optim.SGD([C, Z], lr=.1, momentum=.9)
# X_norms_sq = (X * X).sum(dim=1).view(-1, 1)
prev_loss = np.inf
for t in range(niters):
temperature = np.log2(t + 2) # +2 so that it starts at 1 at t=0
# # compute distances to all centroids
# # prods = torch.mm(X, C)
# prods = X @ C
# # norms_sq = torch.sqrt(torch.sum(C * C))
# # dists_sq = prods - norms_sq
# # C_norms_sq = torch.sqrt(torch.sum(C * C, dim=0))
# # C_norms_sq = torch.sum(C * C, dim=0)
# # dists_sq = -2 * prods
# # dists_sq += X_norms_sq
# # dists_sq += C_norms_sq
# # neg_dists_sq = -dists_sq
# neg_dists_sq = prods
# # # update soft labels for each centroid
# # similarities = F.softmax(neg_dists_sq, dim=0) # N x C; sim to each sample
# # class_affinities = similarities.transpose(0, 1) @ Y # C x nclasses
# # class_affinities = F.softmax(class_affinities * temperature, dim=1)
# # update class assignments for inputs
# # centroid_similarities = F.softmax(neg_dists_sq * temperature, dim=1) # N x C
# centroid_similarities = F.softmax(neg_dists_sq, dim=1) # N x C
# # centroid_similarities = torch.exp(neg_dists_sq / np.sqrt(D))
# # logits = centroid_similarities @ class_affinities
# logits = centroid_similarities @ Z
# way simpler version
similarities = F.softmax(X @ C, dim=1) # N x C
# logits = similarities @ Z
affinities = F.softmax(Z * temperature, dim=1)
# affinities = F.softmax(affinities * temperature, dim=1)
logits = similarities @ affinities
# update params and print how we're doing
loss = loss_fn(logits, labels)
loss.backward()
opt.step()
opt.zero_grad()
loss_pyfloat = loss.item()
change = prev_loss - loss_pyfloat
thresh = rel_tol * min(loss_pyfloat, prev_loss)
if np.abs(change) < thresh:
if verbose > 0:
_, labels_hat = torch.max(logits, dim=1)
acc = torch.mean((labels == labels_hat).type(torch.float))
print("converged after {} iters with acc {:.3f}, loss: {:.4f}"
"".format(t + 1, acc.item(), loss_pyfloat))
break # converged
prev_loss = loss_pyfloat
if (verbose > 1) and ((t + 1) % 10 == 0):
_, labels_hat = torch.max(logits, dim=1)
acc = torch.mean((labels == labels_hat).type(torch.float)).item()
print("acc: ", acc)
print("{:.3f}".format(loss.item())) # convert to python float
# return _to_np(C).T, _to_np(class_affinities)
centroid_labels = np.argmax(_to_np(Z), axis=1)
return _to_np(C).T, centroid_labels
# or at least, ProtoNN without the L0 constraints; also with simultaneous
# updates to all param tensors instead of alternating
# def protonn(X, labels, k, niters=10000, verbose=1, gamma=1):
def protonn(X, labels, k, d=-1, niters=1000, verbose=1, gamma=-1):
N, D = X.shape
if gamma < 1:
gamma = 1. / np.sqrt(D) # makes it struggle less / not make NaNs
# gamma = 1. / D
if d < 1:
d = D
labels = torch.from_numpy(labels)
# # one-hot encode labels
nclasses = len(np.unique(labels))
# Y = np.zeros((N, nclasses), dtype=np.float32)
# for i in range(N):
# Y[i, labels[i]] = 1
# intialize centroids
C, _ = kmeans(X, k)
W = np.random.randn(D, d).astype(np.float32)
# C = C @ W
# W = np.eye(D).astype(np.float32)[:, :d] # better than randn init
# convert to torch tensors for optimization
# Y = torch.from_numpy(Y)
C = torch.tensor(C.T, requires_grad=True) # not from_numpy to allow grad
X = torch.from_numpy(X)
W = torch.tensor(W, requires_grad=True) # not from_numpy to allow grad
# gamma = torch.tensor(np.array(gamma, dtype=np.float32), requires_grad=True)
# labels = torch.from_numpy(labels)
# print("W", W[:10])
# return None, None, None
Z = torch.randn(k, nclasses, requires_grad=True)
loss_fn = torch.nn.CrossEntropyLoss()
# opt = optim.SGD([C, Z], lr=.1, momentum=.9)
opt = optim.SGD([C, W, Z], lr=.1, momentum=.9)
# opt = optim.SGD([C, W, Z, gamma], lr=.1, momentum=.9)
nbatches = 1
batch_sz = int(np.ceil(N / nbatches))
# batch_sz = 1024
# nbatches = int(np.ceil(N / batch_sz))
# for t in range(1):
for t in range(niters):
perm = np.random.permutation(N)
for b in range(nbatches):
start_idx = b * batch_sz
end_idx = min(start_idx + batch_sz, N)
perm_idxs = perm[start_idx:end_idx]
X_batch = X[perm_idxs]
labels_batch = labels[perm_idxs]
# temperature = np.log2(t + 2) # +2 so that it starts at 1 at t=0
# compute distances to all centroids
# embeddings = X @ W
# embeddings = X_batch @ W
embeddings = X_batch
embed_norms_sq = (embeddings * embeddings).sum(dim=1, keepdim=True)
# prods = torch.mm(embeddings, C)
prods = embeddings @ C
C_norms_sq = torch.sum(C * C, dim=0)
dists_sq = -2 * prods
dists_sq += embed_norms_sq
dists_sq += C_norms_sq
neg_dists_sq = -dists_sq
# print("gamma: ", gamma)
# use_gamma = torch.clamp(gamma, max=1.)
# use_gamma = torch.clamp(gamma, 0, 1)
# use_gamma = F.sigmoid(gamma)
# gamma = torch.min((1, gamma))
# gamma = torch.max((0, gamma))
assert np.min(_to_np(dists_sq)) >= 0
assert np.max(_to_np(neg_dists_sq)) <= 0
similarities = torch.exp(gamma * neg_dists_sq) # N x C
# similarities = torch.exp(use_gamma * neg_dists_sq) # N x C
logits = similarities @ Z
# print("logits shape: ", logits.shape)
# print("logits shape: ", logits.shape)
# logits_np = _to_np(logits)
# print("dists_sq shape", dists_sq.shape)
# print("dists_sq", dists_sq[:10])
# print("C_norms_sq", C_norms_sq)
# print("embed_norms_sq", embed_norms_sq[:10])
# print("similarities", similarities[:10])
# print("logits", logits[:10])
# update soft labels for each centroid
# similarities = F.softmax(neg_dists_sq, dim=0) # N x C; sim to each sample
# class_affinities = similarities.transpose(0, 1) @ Y # C x nclasses
# class_affinities = F.softmax(class_affinities * temperature, dim=1)
# # update class assignments for inputs
# centroid_similarities = F.softmax(neg_dists_sq * temperature, dim=1) # N x C
# logits = centroid_similarities @ affinities
# update params and print how we're doing
# loss = loss_fn(logits, labels)
loss = loss_fn(logits, labels_batch)
# loss += .01 * (gamma * gamma).sum()
loss.backward()
opt.step()
opt.zero_grad()
# if (verbose > 0) and (t % 10 == 0):
# if (verbose > 0) and ((t + 1) % 10 == 0):
if (verbose > 0) and ((t + 1) % 10 == 0) and b == 0:
_, labels_hat = torch.max(logits, dim=1)
acc = torch.mean((labels[perm_idxs] == labels_hat).type(torch.float))
print("acc: ", acc)
print("{:.3f}".format(loss.item())) # convert to python float
# print("gamma: ", gamma.item())
return _to_np(C).T, _to_np(W), _to_np(Z)
@_memory.cache
def stochastic_neighbor_compression(X, labels, k, niters=1000,
gamma=-1, rel_tol=.0001, verbose=1):
N, D = X.shape
nclasses = len(np.unique(labels))
if gamma < 1:
gamma = 1
# gamma = 1. / np.sqrt(D) # makes it struggle less / not make NaNs
# gamma = 1. / D
# labels = torch.from_numpy(labels)
# C = np.random.randn(k, D).astype(np.float32)
C, C_labels = _class_balanced_sampling(X, labels, k)
# one-hot encode labels
affinities = torch.zeros((k, nclasses), dtype=torch.float32)
for kk in range(k):
affinities[kk, C_labels[kk]] = 1
# so that there's actual gradient flow
affinities += torch.randn(k, nclasses) * .1
# W = np.random.randn(D, D).astype(np.float32)
# C = C @ W
# W = np.eye(D).astype(np.float32) # better than randn init
# convert to torch tensors for optimization
# Y = torch.from_numpy(Y)
C = torch.tensor(C.T, requires_grad=True) # not from_numpy to allow grad
X = torch.from_numpy(X)
labels = torch.from_numpy(labels)
gamma = torch.tensor(np.array(gamma, dtype=np.float32))
# affinities = torch.from_numpy(affinities)
# print("labels shape: ", labels.shape)
# print("uniq labels: ", uniq_lbls)
# print("uniq label counts: ", counts)
# labels = labels.reshape(-1, 1)
# print("labels shape: ", labels.shape)
# W = torch.tensor(W, requires_grad=True) # not from_numpy to allow grad
# print("W", W[:10])
# return None, None, None
# Z = torch.randn(k, nclasses, requires_grad=True)
loss_fn = torch.nn.CrossEntropyLoss()
opt = optim.SGD([C], lr=.1, momentum=.9)
# opt = optim.SGD([C, Z], lr=.1, momentum=.9)
# opt = optim.SGD([C, gamma], lr=.1, momentum=.9)
nbatches = 1
batch_sz = int(np.ceil(N / nbatches))
# batch_sz = 1024
# nbatches = int(np.ceil(N / batch_sz))
# for t in range(50):
prev_loss = np.inf
converged = False
t = 0
while t < niters and not converged:
perm = np.random.permutation(N)
for b in range(nbatches):
if nbatches > 1:
start_idx = b * batch_sz
end_idx = min(start_idx + batch_sz, N)
perm_idxs = perm[start_idx:end_idx]
X_batch = X[perm_idxs]
labels_batch = labels[perm_idxs]
else:
X_batch = X
labels_batch = labels
# temperature = np.log2(t + 2) # +2 so that it starts at 1 at t=0
# compute distances to all centroids
# embeddings = X @ W
# embeddings = X_batch @ W
embeddings = X_batch
embed_norms_sq = (embeddings * embeddings).sum(dim=1, keepdim=True)
# prods = torch.mm(embeddings, C)
prods = embeddings @ C
C_norms_sq = torch.sum(C * C, dim=0)
dists_sq = -2 * prods
dists_sq += embed_norms_sq
dists_sq += C_norms_sq
neg_dists_sq = -dists_sq
# print("min dist sq: ", torch.min(dists_sq).item())
minval_dist_sq = torch.min(dists_sq).item()
if minval_dist_sq < -.01:
print("min dist sq: ", minval_dist_sq)
print("min C_norms_sq", torch.min(C_norms_sq).item())
print("min X_norms_sq", torch.min(embed_norms_sq).item())
print("dists_sq: ", dists_sq[:10])
assert minval_dist_sq >= -.01
# assert np.min(_to_np(dists_sq)) >= -1e-3
# assert np.max(_to_np(neg_dists_sq)) <= 1e-3
similarities = torch.exp(gamma * neg_dists_sq) # N x C
logits = similarities @ affinities
# logits = similarities @ Z
# print("logits shape: ", logits.shape)
# print("logits shape: ", logits.shape)
# print("dists_sq shape", dists_sq.shape)
# print("dists_sq", dists_sq[:10])
# print("C_norms_sq", C_norms_sq)
# print("embed_norms_sq", embed_norms_sq[:10])
# print("similarities", similarities[:10])
# print("logits", logits[:10])
# update params and print how we're doing
loss = loss_fn(logits, labels_batch)
# loss += gamma * gamma
loss.backward()
opt.step()
opt.zero_grad()
loss_pyfloat = loss.item()
change = prev_loss - loss_pyfloat
thresh = rel_tol * min(loss_pyfloat, prev_loss)
if np.abs(change) < thresh:
if verbose > 0:
_, labels_hat = torch.max(logits, dim=1)
labels_true = labels[perm_idxs] if nbatches > 1 else labels
acc = torch.mean(
(labels_true == labels_hat).type(torch.float))
print("converged after {} iters with acc {:.3f}, loss: {:.4f}" # noqa
"".format(t + 1, acc.item(), loss_pyfloat))
converged = True # converged
break
prev_loss = loss_pyfloat
# if (verbose > 0) and ((t + 1) % 10 == 0):
# if (verbose > 0) and ((t + 1) % 10 == 0) and b == 0:
if (verbose > 1) and (t % 10 == 0) and b == 0:
_, labels_hat = torch.max(logits, dim=1)
labels_true = labels[perm_idxs] if nbatches > 1 else labels
acc = torch.mean(
(labels_true == labels_hat).type(torch.float))
print("acc: {:.3f}".format(acc.item()))
print("{:.3f}".format(loss.item())) # convert to python float
# print("gamma: ", gamma.item())
t += 1
return _to_np(C).T, C_labels
def linear_regression_log_loss(
X, Y, lamda=1, max_niters=1000, rel_tol=.0001, verbose=2):
N, D = X.shape
N, M = Y.shape
X = X.astype(np.float32)
Y = Y.astype(np.float32)
# initialize W to OLS solution
XtX = X.T @ X
XtX += np.eye(D) * np.std(X)
XtY = X.T @ Y
W = np.linalg.solve(XtX, XtY).astype(np.float32)
# W += np.random.randn(*W.shape)
X = torch.from_numpy(X)
Y = torch.from_numpy(Y)
W = torch.tensor(W, requires_grad=True)
# W = torch.randn(D, M, requires_grad=True)
# W += torch.randn(D, M, requires_grad=False)
opt = optim.SGD([W], lr=.1, momentum=.9)
# now optimize using pytorch
prev_loss = np.inf
for t in range(max_niters):
Y_hat = X @ W
diffs = Y - Y_hat
# errs = torch.floor(torch.abs(diffs))
# loss = torch.abs(diffs) # TODO rm
# loss = diffs * diffs
loss = torch.log2(1 + torch.abs(diffs))
# loss = torch.log2(1e-10 + torch.abs(diffs))
# loss *= (loss > 0).type(torch.float32)
loss = torch.mean(loss)
# loss = torch.max(loss, 0)
loss.backward()
opt.step()
opt.zero_grad()
loss_pyfloat = loss.item()
change = prev_loss - loss_pyfloat
thresh = rel_tol * min(loss_pyfloat, prev_loss)
if np.abs(change) < thresh:
if verbose > 0:
print("converged after {} iters with loss: {:.4f}".format(
t + 1, loss_pyfloat))
break # converged
prev_loss = loss_pyfloat
if (verbose > 1) and ((t + 1) % 10 == 0):
print("loss: {:.4f}".format(loss_pyfloat))
return _to_np(W)
def main():
# N, D = 10000, 20
N, D = 1000, 20
# niters = 1000
niters = 10000
X = np.random.randn(N, D).astype(np.float32)
# ------------------------ linear regression with weird loss
# M = 20
# Y = np.random.randn(N, M).astype(np.float32)
# linear_regression_log_loss(X, Y)
# ------------------------ neighbor compression
K = 16
nclasses = 5
# labels = torch.randint(nclasses, size=(N,))
# labels = _to_np(torch.randint(nclasses, size=(N,)))
labels = np.random.randint(nclasses, size=(N,))
# C, W, Z = protonn(X, labels, K, niters=niters) # significantly worse
C, centroid_labels = stochastic_neighbor_compression(X, labels, K, niters=niters)
# C, centroid_labels = neighbor_compression(X, labels, K, niters=niters)
print("centroid_labels:", centroid_labels)
print("C type, shape", type(C), C.shape)
print("done")
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# TODO this file is hideous (but necessarily so for deadline purposes...)
#
# Also, this file is tightly coupled to figs.py; it basically has a func
# for each figure func that spits out data in exactly the required form
MCQ_RESULTS_DIR = '../results/timing/'
MATMUL_RESULTS_DIR = '../results/matmul/'
def get_mcq_path(D, nbytes):
fname = 'mcq_D={}_M={}.txt'.format(D, nbytes)
return os.path.join(MCQ_RESULTS_DIR, fname)
class McqResults(object):
def __init__(self, path=None, D=None, nbytes=None):
if path is None:
path = get_mcq_path(D=D, nbytes=nbytes)
self.path = path
with open(self.path, 'r') as f:
self.lines = f.readlines()
self.stats = {line.split(':')[0].strip(): line.split(':')[1].strip()
for line in self.lines if ':' in line}
self.bolt_nbytes = int(self.stats['bolt M'])
self.pq_nbytes = int(self.stats['pq M'])
self.bolt_D = int(self.stats['bolt subvect_len']) * self.bolt_nbytes * 2
self.pq_D = int(self.stats['pq subvect_len']) * self.pq_nbytes
assert self.bolt_nbytes == self.pq_nbytes
assert self.bolt_D == self.pq_D
self.nbytes = self.bolt_nbytes
self.D = self.bolt_D
# check that file was named properly
expected_path = get_mcq_path(D=self.D, nbytes=self.nbytes)
if expected_path != path:
print("expected path, path = ", expected_path, path)
assert expected_path == path
def __str__(self): # for debugging
s = ""
sorted_keys = sorted(self.stats.keys())
for k in sorted_keys:
v = self.stats[k]
s += "'{}': '{}'\n".format(k, v)
return s
def _extract_thruput(profile_str):
result_strs = profile_str.split(':')[-1]
rep_strs = result_strs.strip(' ,').split(',')
thruput_parens = [s.strip(' ').split(' ')[1] for s in rep_strs]
return np.array([int(s.strip('()s/')) for s in thruput_parens])
def _extract_times(profile_str):
result_strs = profile_str.split(':')[-1]
rep_strs = result_strs.strip(' ,').split(',')
time_strs = [s.strip(' ').split(' ')[0] for s in rep_strs]
return np.array([float(s) for s in time_strs])
def popcount_results_256():
LENGTH = 256
popcnt_times = {}
popcnt_times[8] = '2.456 (1302931596/s), 2.344 (1365187713/s), 2.125 (1505882352/s), 2.829 (1131141746/s), 2.148 (1489757914/s), 2.167 (1476695892/s), 2.327 (1375161151/s), 2.145 (1491841491/s), 2.12 (1509433962/s), 2.112 (1515151515/s)'
popcnt_times[16] = '4.368 (732600732/s), 4.121 (776510555/s), 3.926 (815078960/s), 4.105 (779537149/s), 4.176 (766283524/s), 4.119 (776887594/s), 4.464 (716845878/s), 4.153 (770527329/s), 4.364 (733272227/s), 4.198 (762267746/s)'
popcnt_times[32] = '7.612 (420388859/s), 7.347 (435551925/s), 7.694 (415908500/s), 9.122 (350800263/s), 7.343 (435789186/s), 9.344 (342465753/s), 8.148 (392734413/s), 9.046 (353747512/s), 8.455 (378474275/s), 7.685 (416395575/s)'
bolt_times = {}
bolt_times[8] = '0.461 (2169197396/s), 0.456 (2192982456/s), 0.539 (1855287569/s), 0.53 (1886792452/s), 0.456 (2192982456/s), 0.452 (2212389380/s), 0.442 (2262443438/s), 0.438 (2283105022/s), 0.434 (2304147465/s), 0.547 (1828153564/s)'
bolt_times[16] = '0.894 (1118568232/s), 1.08 (925925925/s), 0.88 (1136363636/s), 0.877 (1140250855/s), 0.881 (1135073779/s), 0.847 (1180637544/s), 1.011 (989119683/s), 0.866 (1154734411/s), 0.984 (1016260162/s), 0.838 (1193317422/s)'
bolt_times[32] = '2.047 (488519785/s), 1.726 (579374275/s), 1.924 (519750519/s), 2.085 (479616306/s), 2.076 (481695568/s), 1.748 (572082379/s), 1.757 (569151963/s), 2.064 (484496124/s), 1.742 (574052812/s), 1.725 (579710144/s)'
out_dicts = []
algos = ['Bolt', 'Binary Embedding']
dicts = [bolt_times, popcnt_times]
for algo, d in zip(algos, dicts):
for nbytes, s in list(d.items()):
thruputs = _extract_thruput(s)
out_dicts += [{'algo': algo, 'nbytes': nbytes, 'length': LENGTH,
'trial': i, 'y': t} for i, t in enumerate(thruputs)]
return pd.DataFrame.from_records(out_dicts)
def encode_results():
dicts = []
for D in [64, 128, 256, 512, 1024]:
for nbytes in [8, 16, 32]:
res = McqResults(D=D, nbytes=nbytes)
abbrevs = ['bolt', 'pq', 'opq']
names = ['Bolt', 'PQ', 'OPQ']
for abbrev, name in zip(abbrevs, names):
# results for encoding data
key = abbrev + ' encode (10x5)'
thruputs = _extract_thruput(res.stats[key])
dicts += [{'task': 'encode_x', 'D': D, 'nbytes': nbytes,
'algo': name, 'trial': i, 'y': t}
for i, t in enumerate(thruputs)]
# results for encoding query
if abbrev == 'bolt':
key = abbrev + ' encode lut (10x5)'
else:
key = abbrev + ' encode lut float dist (10x5)'
thruputs = _extract_thruput(res.stats[key])
dicts += [{'task': 'encode_q', 'D': D, 'nbytes': nbytes,
'algo': name, 'trial': i, 'y': t}
for i, t in enumerate(thruputs)]
return pd.DataFrame.from_records(dicts)
def matmul_results(which='square'):
if which == 'square':
SIZES = [64, 128, 256, 512, 1024, 4096, 8192]
data_fname = 'square_matmul_results.txt'
elif which == 'tall':
SIZES = [32, 64, 128, 256, 512, 1024]
data_fname = 'tall_matmul_results.txt'
with open(MATMUL_RESULTS_DIR + data_fname) as f:
lines = f.readlines()
stats = {line.split(':')[0].strip(): line.split(':')[1].strip()
for line in lines if ':' in line}
dicts = []
# add in results from bolt
for nbytes in [8, 16, 32]:
prefix = 'bolt<{}>'.format(nbytes)
algo = 'Bolt {}B'.format(nbytes)
for sz in SIZES:
for enc in (0, 1): # don't vs do encode X at start
key = '{} encode={} matmul {} (10x5)'.format(prefix, enc, sz)
times = _extract_times(stats[key])
dicts += [{'algo': algo, 'size': sz, 'enc': enc, 'nbytes': nbytes,
'trial': i, 'y': t}
for i, t in enumerate(times)]
# also add in "encode" version of bolt
if enc:
enc_algo_name = algo + ' + Encode'
dicts += [{'algo': enc_algo_name, 'size': sz, 'enc': enc,
'nbytes': nbytes, 'trial': i, 'y': t}
for i, t in enumerate(times)]
# add in matmul results
for sz in SIZES:
key = 'matmul {} (10x5)'.format(sz)
times = _extract_times(stats[key])
dicts += [{'algo': 'Floats', 'size': sz, 'enc': -1, 'trial': i, 'y': t}
for i, t in enumerate(times)]
return pd.DataFrame.from_records(dicts)
def encode_data_results_256():
LENGTH = 256
pq_times = {}
pq_times[8] = 'pq encode (10x5): 6.696 (149342/s), 6.688 (149521/s), 6.639 (150625/s), 6.648 (150421/s), 6.711 (149009/s), 6.67 (149925/s), 6.634 (150738/s), 6.684 (149611/s), 6.663 (150082/s), 6.67 (149925/s),'
pq_times[16] = 'pq encode (10x5): 7.181 (139256/s), 7.194 (139004/s), 7.179 (139295/s), 7.146 (139938/s), 7.123 (140390/s), 7.123 (140390/s), 7.162 (139625/s), 7.148 (139899/s), 7.116 (140528/s), 7.193 (139024/s),'
pq_times[32] = 'pq encode (10x5): 8.089 (123624/s), 8.175 (122324/s), 8.117 (123198/s), 8.096 (123517/s), 8.48 (117924/s), 8.071 (123900/s), 8.126 (123061/s), 8.123 (123107/s), 8.069 (123931/s), 8.21 (121802/s),'
opq_times = {}
opq_times[8] = 'opq encode (10x5): 8.441 (118469/s), 8.385 (119260/s), 8.368 (119502/s), 8.39 (119189/s), 8.355 (119688/s), 8.388 (119217/s), 8.383 (119289/s), 8.412 (118877/s), 8.401 (119033/s), 8.391 (119175/s),'
opq_times[16] = 'opq encode (10x5): 8.88 (112612/s), 8.786 (113817/s), 8.874 (112688/s), 8.834 (113199/s), 8.874 (112688/s), 8.902 (112334/s), 8.899 (112372/s), 8.925 (112044/s), 8.867 (112777/s), 8.907 (112271/s),'
opq_times[32] = 'opq encode (10x5): 9.761 (102448/s), 9.718 (102901/s), 9.717 (102912/s), 9.726 (102817/s), 9.908 (100928/s), 9.796 (102082/s), 10.164 (98386/s), 9.792 (102124/s), 9.735 (102722/s), 9.729 (102785/s),'
bolt_times = {}
bolt_times[8] = 'bolt encode (10x5): 3.43 (2915451/s), 3.586 (2788622/s), 3.421 (2923121/s), 3.408 (2934272/s), 3.409 (2933411/s), 3.406 (2935995/s), 3.407 (2935133/s), 3.412 (2930832/s), 3.411 (2931691/s), 3.409 (2933411/s),'
bolt_times[16] = 'bolt encode (10x5): 3.93 (2544529/s), 3.687 (2712232/s), 3.826 (2613695/s), 4.007 (2495632/s), 3.705 (2699055/s), 3.976 (2515090/s), 3.709 (2696144/s), 3.681 (2716653/s), 3.693 (2707825/s), 3.802 (2630194/s),'
bolt_times[32] = 'bolt encode (10x5): 5.039 (1984520/s), 4.591 (2178174/s), 5.081 (1968116/s), 4.697 (2129018/s), 4.591 (2178174/s), 4.763 (2099517/s), 4.832 (2069536/s), 4.805 (2081165/s), 4.961 (2015722/s), 4.665 (2143622/s),'
out_dicts = []
algos = ['Bolt', 'PQ', 'OPQ']
dicts = [bolt_times, pq_times, opq_times]
for algo, d in zip(algos, dicts):
for nbytes, s in list(d.items()):
thruputs = _extract_thruput(s)
out_dicts += [{'algo': algo, 'nbytes': nbytes, 'length': LENGTH,
'trial': i, 'y': t} for i, t in enumerate(thruputs)]
return pd.DataFrame.from_records(out_dicts)
def encode_lut_results():
pq_times = {}
pq_times[8] = 'pq encode lut float dist (10x5): 64.986 (153879/s), 65.014 (153813/s), 65.155 (153480/s), 64.808 (154301/s), 66.593 (150165/s), 67.68 (147754/s), 69.399 (144094/s), 66.702 (149920/s), 66.234 (150979/s), 66.286 (150861/s),'
pq_times[16] = 'pq encode lut float dist (10x5): 67.893 (147290/s), 67.484 (148183/s), 69.608 (143661/s), 68.083 (146879/s), 70.958 (140928/s), 69.423 (144044/s), 72.129 (138640/s), 74.984 (133361/s), 70.837 (141169/s), 74.967 (133392/s),'
pq_times[32] = 'pq encode lut float dist (10x5): 78.809 (126889/s), 79.34 (126039/s), 78.565 (127283/s), 79.171 (126308/s), 78.372 (127596/s), 78.689 (127082/s), 78.094 (128050/s), 80.031 (124951/s), 93.367 (107104/s), 81.896 (122106/s),'
opq_times = {}
opq_times[8] = 'opq encode lut float dist (10x5): 155.68 (64234/s), 159.49 (62698/s), 160.64 (62249/s), 158.21 (63205/s), 159.37 (62747/s), 159.29 (62778/s), 160.81 (62186/s), 158.5 (63090/s), 155.22 (64423/s), 158.98 (62901/s),'
opq_times[16] = 'opq encode lut float dist (10x5): 170.42 (58677/s), 168.41 (59380/s), 169.12 (59129/s), 171.53 (58298/s), 167.32 (59766/s), 168.96 (59185/s), 170.43 (58676/s), 170.7 (58581/s), 169.86 (58870/s), 160.43 (62333/s),'
opq_times[32] = 'opq encode lut float dist (10x5): 170.86 (58527/s), 175.79 (56885/s), 169.86 (58870/s), 180.3 (55464/s), 172.46 (57983/s), 171.66 (58254/s), 167.23 (59799/s), 168.19 (59457/s), 164.47 (60801/s), 168.31 (59413/s),'
bolt_times = {}
bolt_times[8] = 'bolt encode lut (10x5): 2.907 (3439972/s), 2.911 (3435245/s), 2.902 (3445899/s), 2.899 (3449465/s), 2.907 (3439972/s), 2.908 (3438789/s), 2.908 (3438789/s), 2.906 (3441156/s), 2.906 (3441156/s), 2.908 (3438789/s),'
bolt_times[16] = 'bolt encode lut (10x5): 2.957 (3381805/s), 2.953 (3386386/s), 2.957 (3381805/s), 2.943 (3397893/s), 2.949 (3390979/s), 2.95 (3389830/s), 2.946 (3394433/s), 3.103 (3222687/s), 2.944 (3396739/s), 3.029 (3301419/s),'
bolt_times[32] = 'bolt encode lut (10x5): 2.511 (3982477/s), 2.51 (3984063/s), 2.587 (3865481/s), 2.508 (3987240/s), 2.847 (3512469/s), 2.508 (3987240/s), 2.508 (3987240/s), 2.769 (3611412/s), 2.729 (3664345/s), 2.556 (3912363/s),'
out_dicts = []
algos = ['Bolt', 'PQ', 'OPQ']
dicts = [bolt_times, pq_times, opq_times]
for algo, d in zip(algos, dicts):
for nbytes, s in list(d.items()):
thruputs = _extract_thruput(s)
out_dicts += [{'algo': algo, 'nbytes': nbytes, 'y': t} for t in thruputs]
return pd.DataFrame.from_records(out_dicts)
def query_speed_results():
# NOTE: all thruputs in this function (except matmul ones) need be
# multiplied by 100,000 because we're reporting distances/sec, not time
# to query 100k points
bolt_times = {}
bolt_times[8] = '4.385 (22805/s), 4.385 (22805/s), 4.408 (22686/s), 4.385 (22805/s), 5.117 (19542/s), 4.378 (22841/s), 4.392 (22768/s), 4.393 (22763/s), 4.381 (22825/s), 4.383 (22815/s)'
bolt_times[16] = '8.268 (12094/s), 9.807 (10196/s), 8.389 (11920/s), 8.681 (11519/s), 8.711 (11479/s), 8.293 (12058/s), 9.797 (10207/s), 8.32 (12019/s), 9.767 (10238/s), 9.499 (10527/s)'
bolt_times[32] = '19.385 (5158/s), 17.215 (5808/s), 18.612 (5372/s), 18.117 (5519/s), 17.323 (5772/s), 18.436 (5424/s), 18.979 (5268/s), 16.274 (6144/s), 19.696 (5077/s), 17.026 (5873/s)'
popcnt_times = {}
popcnt_times[8] = '2.456 (1302931596/s), 2.344 (1365187713/s), 2.125 (1505882352/s), 2.829 (1131141746/s), 2.148 (1489757914/s), 2.167 (1476695892/s), 2.327 (1375161151/s), 2.145 (1491841491/s), 2.12 (1509433962/s), 2.112 (1515151515/s)'
popcnt_times[16] = '4.368 (732600732/s), 4.121 (776510555/s), 3.926 (815078960/s), 4.105 (779537149/s), 4.176 (766283524/s), 4.119 (776887594/s), 4.464 (716845878/s), 4.153 (770527329/s), 4.364 (733272227/s), 4.198 (762267746/s)'
popcnt_times[32] = '7.612 (420388859/s), 7.347 (435551925/s), 7.694 (415908500/s), 9.122 (350800263/s), 7.343 (435789186/s), 9.344 (342465753/s), 8.148 (392734413/s), 9.046 (353747512/s), 8.455 (378474275/s), 7.685 (416395575/s)'
pq_times = {}
pq_times[8] = '36.499 (2739/s), 35.729 (2798/s), 36.521 (2738/s), 37.924 (2636/s), 37.079 (2696/s), 36.444 (2743/s), 36.115 (2768/s), 36.955 (2705/s), 35.913 (2784/s), 40.354 (2478/s)'
pq_times[16] = '79.482 (1258/s), 82.546 (1211/s), 84.992 (1176/s), 84.996 (1176/s), 86.218 (1159/s), 84.495 (1183/s), 90.637 (1103/s), 82.164 (1217/s), 85.954 (1163/s), 82.255 (1215/s)'
pq_times[32] = '214.85 (465/s), 217.41 (459/s), 212.49 (470/s), 210.75 (474/s), 211.12 (473/s), 212.54 (470/s), 209.91 (476/s), 219.95 (454/s), 212.97 (469/s), 213.44 (468/s)'
opq_times = {}
opq_times[8] = '38.653 (2587/s), 36.958 (2705/s), 37.684 (2653/s), 35.902 (2785/s), 38.032 (2629/s), 39.511 (2530/s), 42.321 (2362/s), 38.94 (2568/s), 39.224 (2549/s), 39.06 (2560/s)'
opq_times[16] = '82.636 (1210/s), 82.401 (1213/s), 88.424 (1130/s), 86.649 (1154/s), 83.329 (1200/s), 82.719 (1208/s), 82.281 (1215/s), 80.581 (1240/s), 80.777 (1237/s), 81.107 (1232/s)'
opq_times[32] = '221.61 (451/s), 230.01 (434/s), 241.68 (413/s), 222.39 (449/s), 215.13 (464/s), 215.49 (464/s), 212.27 (471/s), 213.95 (467/s), 213.96 (467/s), 217.79 (459/s)'
# 1, 16 -> rowmajor times; 64, 256, 1024 -> colmajor times; (ie, use times from best layout)
matmul1_times = '12.063 (8289811/s), 11.231 (8903926/s), 10.283 (9724788/s), 10.864 (9204712/s), 10.492 (9531071/s), 10.877 (9193711/s), 10.79 (9267840/s), 10.85 (9216589/s), 11.041 (9057150/s), 10.647 (9392317/s)'
matmul16_times = '21.707 (73708941/s), 21.38 (74836295/s), 21.71 (73698756/s), 21.54 (74280408/s), 21.454 (74578167/s), 21.989 (72763654/s), 22.486 (71155385/s), 22.048 (72568940/s), 23.18 (69025021/s), 21.771 (73492260/s)'
matmul64_times = '56.496 (113282356/s), 55.488 (115340253/s), 54.853 (116675478/s), 56.689 (112896681/s), 56.482 (113310435/s), 55.644 (115016893/s), 54.623 (117166761/s), 55.773 (114750865/s), 54.726 (116946241/s), 54.918 (116537383/s)'
matmul256_times = '164.72 (155414306/s), 168.41 (152014488/s), 169.93 (150652927/s), 164.99 (155157157/s), 166.66 (153609831/s), 163.04 (157012830/s), 167.45 (152880544/s), 161.06 (158949936/s), 171.13 (149594750/s), 168.49 (151940505/s)'
matmul1024_times = '653.63 (156664035/s), 677.26 (151197248/s), 692.88 (147788938/s), 664.79 (154032909/s), 702.61 (145742096/s), 651.74 (157116904/s), 656.4 (156003388/s), 664.69 (154056314/s), 665.34 (153906736/s), 651.88 (157083643/s)'
out_dicts = []
algos = ['Bolt', 'PQ', 'OPQ', 'Binary Embedding']
dicts = [bolt_times, pq_times, opq_times, popcnt_times]
for algo, d in zip(algos, dicts):
for nbytes, s in list(d.items()):
thruputs = _extract_thruput(s) * 1e5
if algo == 'Binary Embedding':
thruputs /= 1e5 # these are already dists/sec, not qps
out_dicts += [{'algo': algo, 'nbytes': nbytes, 'y': t} for t in thruputs]
matmul_strs = [matmul1_times, matmul16_times, matmul64_times, matmul256_times, matmul1024_times]
batch_sizes = [1, 16, 64, 256, 1024]
nbytes_list = [8, 16, 32] # replicate results in each plot
for s, sz in zip(matmul_strs, batch_sizes):
algo = 'Matmul {}'.format(sz)
for nbytes in nbytes_list:
thruputs = _extract_thruput(s)
out_dicts += [{'algo': algo, 'nbytes': nbytes, 'y': t} for t in thruputs]
return pd.DataFrame.from_records(out_dicts)
def main():
pass
# print _extract_thruput('foo (10x5): 2.456 (1302931596/s), 2.344 (1365187713/s), 2.125 (1505882352/s), 2.829 (1131141746/s), 2.148 (1489757914/s), 2.167 (1476695892/s), 2.327 (1375161151/s), 2.145 (1491841491/s), 2.12 (1509433962/s), 2.112 (1515151515/s)')
# print McqResults('../results/tmp.txt')
# print McqResults('../results/mcq/mcq_D=256_M=8.txt')
# res = query_speed_results()
# print res.loc[res['algo'] == 'Matmul 1']
# print res.loc[res['algo'] == 'Matmul 256']
if __name__ == '__main__':
main()
|
#!/bin/env/python
_memory = Memory('.', verbose=0)
# NUM_TRIALS = 1
NUM_TRIALS = 10
# @_memory.cache
def _estimator_for_method_id(method_id, **method_hparams):
return methods.METHOD_TO_ESTIMATOR[method_id](**method_hparams)
def _hparams_for_method(method_id):
if method_id in methods.SKETCH_METHODS:
# dvals = [2, 4, 6, 8, 12, 16, 24, 32, 48, 64] # d=1 undef on fd methods
# dvals = [1, 2, 4, 8, 16, 32, 64, 128]
dvals = [1, 2, 4, 8, 16, 32, 64]
# dvals = [1, 2, 4, 8, 16, 32]
# dvals = [1, 2, 4, 8]
# dvals = [32] # TODO rm after debug
# dvals = [16] # TODO rm after debug
# dvals = [8] # TODO rm after debug
# dvals = [4] # TODO rm after debug
# dvals = [3] # TODO rm after debug
# dvals = [2] # TODO rm after debug
# dvals = [1] # TODO rm after debug
if method_id == methods.METHOD_SPARSE_PCA:
# first one gets it to not return all zeros on caltech
alpha_vals = (1. / 16384, .03125, .0625, .125, .25, .5, 1, 2, 4, 8)
# alpha_vals = (.0625, .125, .25, .5, 1, 2, 4, 8)
# alpha_vals = (.0625, .125)
# alpha_vals = [.0625] # TODO rm
# alpha_vals = [.03125] # TODO rm
# alpha_vals = [1./1024] # TODO rm
# alpha_vals = [1./16384] # TODO rm
# alpha_vals = [0] # TODO rm
# alpha_vals = (2, 4, 5)
# alpha_vals = [.1]
# alpha_vals = [1.]
# alpha_vals = [10.]
# alpha_vals = [20.]
# alpha_vals = [50.]
return [{'d': d, 'alpha': alpha}
for d in dvals for alpha in alpha_vals]
return [{'d': dval} for dval in dvals]
if method_id in methods.VQ_METHODS:
# mvals = [1, 2, 4, 8, 16, 32, 64]
mvals = [2, 4, 8, 16, 32, 64]
# mvals = [64]
# mvals = [1, 2, 4, 8, 16]
# mvals = [1, 2, 4, 8]
# mvals = [8, 16] # TODO rm after debug
# mvals = [8, 16, 64] # TODO rm after debug
# mvals = [128] # TODO rm after debug
# mvals = [64] # TODO rm after debug
# mvals = [32] # TODO rm after debug
# mvals = [16] # TODO rm after debug
# mvals = [8] # TODO rm after debug
# mvals = [4] # TODO rm after debug
# mvals = [1] # TODO rm after debug
if method_id == methods.METHOD_MITHRAL:
lut_work_consts = (2, 4, -1)
# lut_work_consts = [-1] # TODO rm
params = []
for m in mvals:
for const in lut_work_consts:
params.append({'ncodebooks': m, 'lut_work_const': const})
return params
return [{'ncodebooks': m} for m in mvals]
if method_id in [methods.METHOD_EXACT, methods.METHOD_SCALAR_QUANTIZE]:
return [{}]
raise ValueError(f"Unrecognized method: '{method_id}'")
def _ntrials_for_method(method_id, ntasks):
# return 1 # TODO rm
if ntasks > 1: # no need to avg over trials if avging over multiple tasks
return 1
# return NUM_TRIALS if method_id in methods.NONDETERMINISTIC_METHODS else 1
return NUM_TRIALS if method_id in methods.RANDOM_SKETCHING_METHODS else 1
# ================================================================ metrics
def _compute_compression_metrics(ar):
# if quantize_to_type is not None:
# ar = ar.astype(quantize_to_type)
# ar -= np.min(ar)
# ar /= (np.max(ar) / 65535) # 16 bits
# ar -= 32768 # center at 0
# ar = ar.astype(np.int16)
# elem_sz = ar.dtype.itemsize
# return {'nbytes_raw': ar.nbytes,
# 'nbytes_blosc_noshuf': len(_blosc_compress(
# ar, elem_sz=elem_sz, shuffle=blosc.NOSHUFFLE)),
# 'nbytes_blosc_byteshuf': len(_blosc_compress(
# ar, elem_sz=elem_sz, shuffle=blosc.SHUFFLE)),
# 'nbytes_blosc_bitshuf': len(_blosc_compress(
# ar, elem_sz=elem_sz, shuffle=blosc.BITSHUFFLE)),
# 'nbytes_zstd': len(_zstd_compress(ar)),
# 'nbits_cost': nbits_cost(ar).sum() // 8,
# 'nbits_cost_zigzag':
# nbits_cost(zigzag_encode(ar), signed=False).sum() // 8,
# 'nbytes_sprintz': compress.sprintz_packed_size(ar)
# }
return {'nbytes_raw': ar.nbytes,
'nbytes_sprintz': compress.sprintz_packed_size(ar)}
def _cossim(Y, Y_hat):
ynorm = np.linalg.norm(Y) + 1e-20
yhat_norm = np.linalg.norm(Y_hat) + 1e-20
return ((Y / ynorm) * (Y_hat / yhat_norm)).sum()
def _compute_metrics(task, Y_hat, compression_metrics=True, **sink):
Y = task.Y_test
diffs = Y - Y_hat
raw_mse = np.mean(diffs * diffs)
normalized_mse = raw_mse / np.var(Y)
# Y_meannorm = Y - Y.mean()
# Y_hat_meannorm = Y_hat - Y_hat.mean()
# ynorm = np.linalg.norm(Y_meannorm) + 1e-20
# yhat_norm = np.linalg.norm(Y_hat_meannorm) + 1e-20
# r = ((Y_meannorm / ynorm) * (Y_hat_meannorm / yhat_norm)).sum()
metrics = {'raw_mse': raw_mse, 'normalized_mse': normalized_mse,
'corr': _cossim(Y - Y.mean(), Y_hat - Y_hat.mean()),
'cossim': _cossim(Y, Y_hat), # 'bias': diffs.mean(),
'y_mean': Y.mean(), 'y_std': Y.std(),
'yhat_std': Y_hat.std(), 'yhat_mean': Y_hat.mean()}
if compression_metrics:
# Y_q = compress.quantize(Y, nbits=8)
# Y_hat_q = compress.quantize(Y_hat, nbits=8)
# diffs_q = Y_q - Y_hat_q
# # diffs_q = compress.zigzag_encode(diffs_q).astype(np.uint8)
# assert Y_q.dtype == np.int8
# assert diffs_q.dtype == np.int8
Y_q = compress.quantize(Y, nbits=12)
Y_hat_q = compress.quantize(Y_hat, nbits=12)
diffs_q = Y_q - Y_hat_q
assert Y_q.dtype == np.int16
assert diffs_q.dtype == np.int16
# Y_q = quantize_i16(Y)
# # quantize to 16 bits
# Y = Y - np.min(Y)
# Y /= (np.max(Y) / 65535) # 16 bits
# Y -= 32768 # center at 0
# Y = Y.astype(np.int16)
# diffs =
metrics_raw = _compute_compression_metrics(Y_q)
metrics.update({k + '_orig': v for k, v in metrics_raw.items()})
metrics_raw = _compute_compression_metrics(diffs_q)
metrics.update({k + '_diffs': v for k, v in metrics_raw.items()})
if task.info:
problem = task.info['problem']
metrics['problem'] = problem
if problem == 'softmax':
lbls = task.info['lbls_test'].astype(np.int32)
b = task.info['biases']
logits_amm = Y_hat + b
logits_orig = Y + b
lbls_amm = np.argmax(logits_amm, axis=1).astype(np.int32)
lbls_orig = np.argmax(logits_orig, axis=1).astype(np.int32)
# print("Y_hat shape : ", Y_hat.shape)
# print("lbls hat shape: ", lbls_amm.shape)
# print("lbls amm : ", lbls_amm[:20])
metrics['acc_amm'] = np.mean(lbls_amm == lbls)
metrics['acc_orig'] = np.mean(lbls_orig == lbls)
elif problem in ('1nn', 'rbf'):
lbls = task.info['lbls_test'].astype(np.int32)
lbls_centroids = task.info['lbls_centroids']
lbls_hat_1nn = []
rbf_lbls_hat = []
W = task.W_test
centroid_norms_sq = (W * W).sum(axis=0)
sample_norms_sq = (task.X_test * task.X_test).sum(
axis=1, keepdims=True)
k = W.shape[1]
nclasses = np.max(lbls_centroids) + 1
affinities = np.zeros((k, nclasses), dtype=np.float32)
for kk in range(k):
affinities[kk, lbls_centroids[kk]] = 1
for prods in [Y_hat, Y]:
dists_sq_hat = (-2 * prods) + centroid_norms_sq + sample_norms_sq
# 1nn classification
centroid_idx = np.argmin(dists_sq_hat, axis=1)
lbls_hat_1nn.append(lbls_centroids[centroid_idx])
# rbf kernel classification (bandwidth=1)
# gamma = 1. / np.sqrt(W.shape[0])
# gamma = 1. / W.shape[0]
gamma = 1
similarities = scipy.special.softmax(-dists_sq_hat * gamma, axis=1)
class_probs = similarities @ affinities
rbf_lbls_hat.append(np.argmax(class_probs, axis=1))
lbls_amm_1nn, lbls_orig_1nn = lbls_hat_1nn
rbf_lbls_amm, rbf_lbls_orig = rbf_lbls_hat
metrics['acc_amm_1nn'] = np.mean(lbls_amm_1nn == lbls)
metrics['acc_orig_1nn'] = np.mean(lbls_orig_1nn == lbls)
metrics['acc_amm_rbf'] = np.mean(rbf_lbls_amm == lbls)
metrics['acc_orig_rbf'] = np.mean(rbf_lbls_orig == lbls)
if problem == '1nn':
lbls_amm, lbls_orig = rbf_lbls_amm, rbf_lbls_orig
elif problem == 'rbf':
lbls_amm, lbls_orig = rbf_lbls_amm, rbf_lbls_orig
orig_acc_key = 'acc-1nn-raw'
if orig_acc_key in task.info:
metrics[orig_acc_key] = task.info[orig_acc_key]
metrics['acc_amm'] = np.mean(lbls_amm == lbls)
metrics['acc_orig'] = np.mean(lbls_orig == lbls)
elif problem == 'sobel':
assert Y.shape[1] == 2
grad_mags_true = np.sqrt((Y * Y).sum(axis=1))
grad_mags_hat = np.sqrt((Y_hat * Y_hat).sum(axis=1))
diffs = grad_mags_true - grad_mags_hat
metrics['grad_mags_nmse'] = (
(diffs * diffs).mean() / grad_mags_true.var())
elif problem.lower().startswith('dog'):
# difference of gaussians
assert Y.shape[1] == 2
Z = Y[:, 0] - Y[:, 1]
Z_hat = Y_hat[:, 0] - Y_hat[:, 1]
diffs = Z - Z_hat
metrics['dog_nmse'] = (diffs * diffs).mean() / Z.var()
return metrics
# ================================================================ driver funcs
def _eval_amm(task, est, fixedB=True, **metrics_kwargs):
est.reset_for_new_task()
if fixedB:
est.set_B(task.W_test)
# print("eval_amm validating task: ", task.name)
# task.validate(train=False, test=True)
# print(f"task {task.name} matrix hashes:")
# pprint.pprint(task._hashes())
# print("task: ", task.name)
# print("X_test shape: ", task.X_test.shape)
# print("W_test shape: ", task.W_test.shape)
t = time.perf_counter()
# Y_hat = est.predict(task.X_test.copy(), task.W_test.copy())
Y_hat = est.predict(task.X_test, task.W_test)
# Y_hat = task.X_test @ task.W_test # yep, zero error
duration_secs = time.perf_counter() - t
metrics = _compute_metrics(task, Y_hat, **metrics_kwargs)
metrics['secs'] = duration_secs
# metrics['nmultiplies'] = est.get_nmuls(task.X_test, task.W_test)
metrics.update(est.get_speed_metrics(
task.X_test, task.W_test, fixedB=fixedB))
# print("eval_amm re-validating task: ", task.name)
# task.validate(train=False, test=True)
# print(f"task {task.name} matrix hashes:")
# pprint.pprint(task.hashes())
return metrics
def _get_all_independent_vars():
independent_vars = set(['task_id', 'method', 'trial'])
for method_id in methods.ALL_METHODS:
hparams = _hparams_for_method(method_id)[0]
est = _estimator_for_method_id(method_id, **hparams)
independent_vars = (independent_vars |
set(est.get_params().keys()))
return independent_vars
# @functools.lru_cache(maxsize=None)
# @_memory.cache
def _fitted_est_for_hparams(method_id, hparams_dict, X_train, W_train,
Y_train, **kwargs):
est = _estimator_for_method_id(method_id, **hparams_dict)
est.fit(X_train, W_train, Y=Y_train, **kwargs)
return est
# def _main(tasks, methods=['SVD'], saveas=None, ntasks=None,
def _main(tasks_func, methods=None, saveas=None, ntasks=None,
verbose=1, limit_ntasks=-1, compression_metrics=False, # TODO uncomment below
# verbose=3, limit_ntasks=-1, compression_metrics=False,
tasks_all_same_shape=False):
methods = methods.DEFAULT_METHODS if methods is None else methods
if isinstance(methods, str):
methods = [methods]
if limit_ntasks is None or limit_ntasks < 1:
limit_ntasks = np.inf
independent_vars = _get_all_independent_vars()
for method_id in methods:
if verbose > 0:
print("running method: ", method_id)
ntrials = _ntrials_for_method(method_id=method_id, ntasks=ntasks)
# for hparams_dict in _hparams_for_method(method_id)[2:]: # TODO rm
for hparams_dict in _hparams_for_method(method_id):
if verbose > 3:
print("got hparams: ")
pprint.pprint(hparams_dict)
metrics_dicts = []
try:
prev_X_shape, prev_Y_shape = None, None
prev_X_std, prev_Y_std = None, None
est = None
for i, task in enumerate(tasks_func()):
if i + 1 > limit_ntasks:
raise StopIteration()
if verbose > 1:
print("-------- running task: {} ({}/{})".format(
task.name, i + 1, ntasks))
task.validate_shapes() # fail fast if task is ill-formed
can_reuse_est = (
(i != 0) and (est is not None)
and (prev_X_shape is not None)
and (prev_Y_shape is not None)
and (prev_X_std is not None)
and (prev_Y_std is not None)
and (task.X_train.shape == prev_X_shape)
and (task.Y_train.shape == prev_Y_shape)
and (task.X_train.std() == prev_X_std)
and (task.Y_train.std() == prev_Y_std))
if not can_reuse_est:
try:
est = _fitted_est_for_hparams(
method_id, hparams_dict,
task.X_train, task.W_train, task.Y_train)
except amm.InvalidParametersException as e:
# hparams don't make sense for task (eg, D < d)
if verbose > 2:
print(f"hparams apparently invalid: {e}")
est = None
if tasks_all_same_shape:
raise StopIteration()
else:
continue
prev_X_shape = task.X_train.shape
prev_Y_shape = task.Y_train.shape
prev_X_std = task.X_train.std()
prev_Y_std = task.Y_train.std()
try:
# print(f"task {task.name} matrix hashes:")
# pprint.pprint(task.hashes())
for trial in range(ntrials):
metrics = _eval_amm(
task, est, compression_metrics=compression_metrics)
metrics['N'] = task.X_test.shape[0]
metrics['D'] = task.X_test.shape[1]
metrics['M'] = task.W_test.shape[1]
metrics['trial'] = trial
metrics['method'] = method_id
metrics['task_id'] = task.name
# metrics.update(hparams_dict)
metrics.update(est.get_params())
print("got metrics: ")
pprint.pprint(metrics)
# pprint.pprint({k: metrics[k] for k in 'method task_id normalized_mse'.split()})
# print("{:.5f}".format(metrics['normalized_mse'])) # TODO uncomment above
metrics_dicts.append(metrics)
except amm.InvalidParametersException as e:
if verbose > 2:
print(f"hparams apparently invalid: {e}")
if tasks_all_same_shape:
raise StopIteration()
else:
continue
except StopIteration: # no more tasks for these hparams
pass
if len(metrics_dicts):
pyn.save_dicts_as_data_frame(
metrics_dicts, save_dir='results/amm', name=saveas,
dedup_cols=independent_vars)
# def main_ecg(methods=None, saveas='ecg', limit_nhours=1):
# tasks = md.load_ecg_tasks(limit_nhours=limit_nhours)
# return _main(tasks=tasks, methods=methods, saveas=saveas, ntasks=139,
# # limit_ntasks=10, compression_metrics=False)
# limit_ntasks=5, compression_metrics=True)
def main_caltech(methods=methods.USE_METHODS, saveas='caltech',
limit_ntasks=-1, limit_ntrain=-1, filt='sobel'):
# tasks = md.load_caltech_tasks()
# tasks = md.load_caltech_tasks(limit_ntrain=100e3, limit_ntest=10e3) # TODO rm after debug
# tasks = md.load_caltech_tasks(limit_ntrain=-1, limit_ntest=10e3) # TODO rm after debug
# tasks = md.load_caltech_tasks(limit_ntrain=100e3)
# tasks = md.load_caltech_tasks(limit_ntrain=500e3)
# tasks = md.load_caltech_tasks(limit_ntrain=1e6) # does great
# tasks = md.load_caltech_tasks(limit_ntrain=15e5)
# tasks = md.load_caltech_tasks(limit_ntrain=17.5e5) # bad
# tasks = md.load_caltech_tasks(limit_ntrain=2e6)
# tasks = md.load_caltech_tasks(limit_ntrain=2.5e6)
# return _main(tasks=tasks, methods=methods, saveas=saveas,
# limit_ntasks = -1
# limit_ntasks = 10
# filt = 'sharpen5x5'
# filt = 'gauss5x5'
# filt = 'sobel'
saveas = '{}_{}'.format(saveas, filt)
# saveas = '{}_{}'.format(saveas, filt)
# limit_ntrain = -1
# limit_ntrain = 500e3
task_func = functools.partial(
md.load_caltech_tasks, filt=filt, limit_ntrain=limit_ntrain)
return _main(tasks_func=task_func, methods=methods,
saveas=saveas, ntasks=510, limit_ntasks=limit_ntasks,
tasks_all_same_shape=True)
def main_ucr(methods=methods.USE_METHODS, saveas='ucr',
k=128, limit_ntasks=None, problem='rbf'):
# limit_ntasks = 10
# limit_ntasks = 13
# tasks = md.load_ucr_tasks(limit_ntasks=limit_ntasks)
# k = 128
tasks_func = functools.partial(
md.load_ucr_tasks, limit_ntasks=limit_ntasks, k=k, problem=problem)
saveas = '{}_k={}_problem={}'.format(saveas, k, problem)
return _main(tasks_func=tasks_func, methods=methods, saveas=saveas,
ntasks=76, limit_ntasks=limit_ntasks,
tasks_all_same_shape=False)
def main_cifar10(methods=methods.USE_METHODS, saveas='cifar10'):
# tasks = md.load_cifar10_tasks()
return _main(tasks_func=md.load_cifar10_tasks, methods=methods,
saveas=saveas, ntasks=1)
def main_cifar100(methods=methods.USE_METHODS, saveas='cifar100'):
# tasks = md.load_cifar100_tasks()
return _main(tasks_func=md.load_cifar100_tasks, methods=methods,
saveas=saveas, ntasks=1)
def main_all(methods=methods.USE_METHODS):
main_cifar10(methods=methods)
main_cifar100(methods=methods)
# main_ecg(methods=methods)
main_caltech(methods=methods)
def main():
# main_cifar10(methods='ScalarQuantize')
# main_cifar100(methods='ScalarQuantize')
# main_ucr(methods='ScalarQuantize')
main_caltech(methods='ScalarQuantize', filt='sobel')
main_caltech(methods='ScalarQuantize', filt='dog5x5')
# main_cifar10(methods='MithralPQ')
# main_cifar100(methods='Mithral')
# main_caltech(methods='Hadamard')
# main_cifar10(methods='MithralPQ')
# main_cifar100(methods='MithralPQ')
# main_ucr(methods='MithralPQ', k=64, limit_ntasks=5, problem='rbf')
# main_ucr(methods='Bolt', k=64, limit_ntasks=5, problem='softmax')
# rerun mithral stuff with fixed numerical issues
# main_cifar10(methods=['Mithral', 'MithralPQ'])
# main_cifar100(methods=['Mithral', 'MithralPQ'])
# main_ucr(methods=['Mithral', 'MithralPQ'], k=128, problem='rbf')
# main_caltech(methods=['Mithral', 'MithralPQ'], filt='sobel')
# main_caltech(methods=['Mithral', 'MithralPQ'], filt='dog5x5')
# #
# # TODO ideally run this too to put in appendix
# #
# use_methods = list(methods.USE_METHODS)
# use_methods.remove(methods.METHOD_SPARSE_PCA)
# main_ucr(methods=use_methods, k=128, problem='softmax')
# main_caltech('Mithral', filt='sobel', limit_ntrain=1e6, limit_ntasks=10)
# lim = 500e3
# lim = 2e6
# lim = -1
# lim = 4e6
# lim = 5e6
# main_caltech('Mithral', filt='sobel', limit_ntrain=lim, limit_ntasks=10)
# main_caltech('MithralPQ', filt='sobel', limit_ntrain=lim, limit_ntasks=10)
# main_caltech('Mithral', filt='dog5x5', limit_ntrain=lim, limit_ntasks=10)
# main_caltech('MithralPQ', filt='dog5x5', limit_ntrain=lim, limit_ntasks=10)
# main_caltech('OldMithralPQ', filt='sobel', limit_ntrain=lim, limit_ntasks=10)
# main_ucr(methods='MithralPQ', limit_ntasks=5)
# main_caltech(methods='Bolt', limit_ntasks=10, limit_ntrain=500e3, filt='dog5x5')
# main_caltech(methods='Bolt', limit_ntasks=10, limit_ntrain=500e3, filt='sobel')
# main_caltech(methods='SparsePCA')
if __name__ == '__main__':
np.set_printoptions(formatter={'float': lambda f: "{:.2f}".format(f)},
linewidth=100)
main()
|
#!/bin/env/python
# from sklearn.decomposition import PCA, SparsePCA
# import ffht # https://github.com/FALCONN-LIB/FFHT; python setup.py install
_memory = Memory('.', verbose=1, compress=9)
KEY_NMULTIPLIES = 'muls'
OSNAP_DEFAULT_S = 4
# OSNAP_DEFAULT_S = 2
# ================================================================ utils
def _nmultiplies_matmul(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
def _nmultiplies_matmul_with_sizes(N, D, M):
return N * D * M
def _nmultiplies_svd(N, D):
return min(N * N * D, N * D * D)
def _nmultiplies_qr(N, D):
return min(N * N * D, N * D * D)
# ================================================================ types
class InvalidParametersException(Exception):
pass
class ApproxMatmul(abc.ABC):
def __init__(*args_unused, **kwargs_unused):
pass
def fit(self, A, B, Y=None): # Y = A @ B if not specified
pass
def set_A(self, A):
pass
def set_B(self, B):
pass
def reset_for_new_task(self):
pass
@abc.abstractmethod
def __call__(self, A, B):
pass
def predict(self, A, B):
return self(A, B)
def get_params(self):
return {}
# def get_nmuls(self, A, B, fixedA=False, fixedB=False):
@abc.abstractmethod
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
pass
class ExactMatMul(ApproxMatmul):
def __call__(self, A, B):
return A @ B
def get_speed_metrics(self, A, B, **sink):
return {KEY_NMULTIPLIES: _nmultiplies_matmul(A, B)}
def _scalar_quantize(A, axis=1, signed=False, nbits=8):
unsigned_maxval = float(1 << int(nbits)) - 1
# # TODO rm
# # return np.zeros((A.shape[0], 1)), np.ones((A.shape[0], 1)), A
# # offsets = np.zeros((A.shape[0], 1))
# offsets = A.min(axis=1, keepdims=True)
# # scales = maxval / np.ones((A.shape[0], 1))
# scales = maxval / A.max(axis=1, keepdims=True)
# Aq = (A - offsets) * scales
# return offsets, scales, Aq
# maxval = float(1 << int(nbits)) - 1
mins = A.min(axis=axis, keepdims=True)
# A_offset = A - offsets
ranges = (A - mins).max(axis=axis, keepdims=True) + 1e-20
scales = unsigned_maxval / ranges
# Aq = (A_offset * (maxval / scales)).astype(np.int)
# Aq = (A_offset * scales).astype(np.int)
if signed:
# sign_offset = 1 << (nbits - 1) # 8 bits -> 128
# A_offset -= sign_offset
offsets = mins + (ranges * (128. / 255))
minval = -(1 << (nbits - 1))
maxval = -minval - 1
else:
offsets = mins
minval = 0
maxval = (1 << nbits) - 1
Aq = (A - offsets) * scales
# print("min, max A:", Aq.min(), Aq.max()) # looks good
Aq = np.clip(Aq, minval, maxval).astype(np.int)
return offsets, scales, Aq
class QuantizedMatmul(ApproxMatmul):
__slots__ = 'nbits a_offsets a_scales b_offsets b_scales A B'.split()
def __init__(self, nbits=8):
self.nbits = nbits
def __call__(self, A, B):
assert A.shape[1] == B.shape[0] # dims need to match
N, D = A.shape
D, M = B.shape
if self.A is None:
self.set_A(A)
if self.B is None:
self.set_B(B)
# print("QuantizedMatmul")
# print("min, max A:", self.A.min(), self.A.max())
# print("min, max A offsets:", self.a_offsets.min(), self.a_offsets.max())
# print("min, max A scales :", self.a_scales.min(), self.a_scales.max())
# print("min, max B:", self.B.min(), self.B.max())
# print("min, max B offsets:", self.b_offsets.min(), self.b_offsets.max())
# print("min, max B scales :", self.b_scales.min(), self.b_scales.max())
# ((A - a_offsets) / a_scales) @ ((B - b_offsets) / b_scales) # noqa
# ignoring scales, we have:
# (A - a_off) @ (B - b_off)
# = A @ B - (a_off @ B) - (A @ b_off) + a_off @ b_off
# maxval = (1 << int(self.nbits)) - 1
ret = (self.A @ self.B).astype(np.float32)
ret *= 1. / self.a_scales
ret *= 1. / self.b_scales
A_off = np.tile(self.a_offsets, (1, D))
B_off = np.tile(self.b_offsets, (D, 1))
return ret + (A_off @ B) + (A @ B_off) - (A_off @ B_off)
def set_A(self, A):
# unsigned quantization; we *could* learn the offsets and scales
# on the training set, but since this is a baseline, we're giving it
# the advantage of using the "true" offsets/scales
self.a_offsets, self.a_scales, self.A = _scalar_quantize(
A, axis=1, signed=False, nbits=self.nbits)
# mins = A.min(axis=1, keepdims=True)
# A_offset = A - mins
# scales = A_offset.max(axis=1, keepdims=True) + 1e-20
# self.A = (A_offset * (255. / scales)).astype(np.int)
def set_B(self, B):
# signed quantization (for maddubs instruction)
self.b_offsets, self.b_scales, self.B = _scalar_quantize(
B, axis=0, signed=True, nbits=self.nbits)
# self.b_offsets, self.b_scales, self.B = _scalar_quantize(
# B.T, nbits=self.nbits, signed=True)
# # quantize each col, not each row
# self.b_offsets = self.b_offsets.ravel()
# self.b_scales = self.b_scales.ravel()
# self.B = self.B.T
def reset_for_new_task(self):
self.A = None
self.B = None
def get_speed_metrics(self, A, B, **sink):
# neglect packing, postprocessing, etc
return {KEY_NMULTIPLIES: _nmultiplies_matmul(A, B)}
class SketchedMatmul(ApproxMatmul, abc.ABC):
__slots__ = 'd'
def __init__(self, d):
self.d = int(d)
def get_params(self):
return {'d': self.d}
def sketch(self, A, B):
pass
def call(self, A, B):
A_hat, B_hat = self.sketch(A, B)
assert A_hat.shape[0] == A.shape[0]
assert B_hat.shape[1] == B.shape[1]
assert A_hat.shape[1] <= self.d # verify sketch size not cheating
return A_hat @ B_hat
def __call__(self, A, B):
assert A.shape[1] == B.shape[0] # dims need to match
D = A.shape[1]
if D <= self.d:
raise InvalidParametersException(
'D <= d: {} < {}'.format(D, self.d))
if B.shape[1] <= self.d:
raise InvalidParametersException(
'M <= d: {} < {}'.format(B.shape[1], self.d))
return self.call(np.copy(A), np.copy(B)) # guarantee A, B unchanged
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
assert not (fixedA and fixedB) # this would be stupid, so fail fast
sketch_nmuls = self._get_nmuls(A.shape[0], A.shape[1], B.shape[1],
self.d, fixedA=fixedA, fixedB=fixedB)
N, D = A.shape
D, M = B.shape
sketched_matmul_nmuls = N * self.d * M
return {KEY_NMULTIPLIES: sketch_nmuls + sketched_matmul_nmuls}
def _get_nmuls(self, N, D, M, d, fixedA=False, fixedB=False):
# default nmuls = sketching with dense matrix
nmuls = 0
if not fixedA:
nmuls += N * D * d
if not fixedB:
nmuls += M * D * d
return nmuls
class RandGaussSketch(SketchedMatmul):
def sketch(self, A, B):
D = A.shape[1]
V = np.random.randn(D, self.d).astype(np.float32)
# dividing by expected norm is more similar to theory papers,
# but no reason this should actually be better AFAIK
# V /= np.sqrt(D)
V /= np.linalg.norm(V, axis=0)
A = A @ V
B = V.T @ B
return A, B
class RandOrthoGaussSketch(SketchedMatmul):
def sketch(self, A, B):
D = A.shape[1]
V = np.random.randn(D, self.d).astype(np.float32)
V, _ = np.linalg.qr(V)
A = A @ V
B = V.T @ B
return A, B
class RandRademacherSketch(SketchedMatmul):
def sketch(self, A, B):
D = A.shape[1]
V = np.random.randint(2, size=(D, self.d)).astype(np.float32) * 2 - 1
V /= np.sqrt(D)
A = A @ V
B = V.T @ B
return A, B
class HadamardSketch(SketchedMatmul):
def sketch(self, A, B):
D = A.shape[1]
use_D = 1 << int(np.ceil(np.log2(D)))
V = scipy.linalg.hadamard(use_D)[:D, :self.d].astype(np.float32)
V /= np.linalg.norm(V, axis=0)
# V /= np.sqrt(2)
# V *= np.sqrt(2)
# V *= np.sqrt(D / self.d)
# V *= (D / self.d) ** .25
A = A @ V
B = V.T @ B
return A, B
class SketchSqSample(SketchedMatmul):
def sketch(self, A, B):
return sketch_sq_sample(A, B, self.d)
def _get_nmuls(self, N, D, M, d, **sink):
return _nmultiplies_sketch_sq_sample(N, D, M, d)
class FdAmm(SketchedMatmul):
def sketch(self, A, B):
return fd_amm_sketches(A, B, self.d)
def _get_nmuls(self, N, D, M, d, **sink):
return _nmultiplies_fd_amm_sketches(N, D, M, d)
class CooccurSketch(SketchedMatmul):
def sketch(self, A, B):
return cooccur_sketches(A, B, self.d)
def _get_nmuls(self, N, D, M, d, **sink):
return _nmultiplies_cooccur_sketches(N, D, M, d)
class FastJlSketch(SketchedMatmul):
def sketch(self, A, B):
return fastjl_sketches(A, B, self.d)
def _get_nmuls(self, N, D, M, d, **sink):
return _nmultiplies_fastjl_sketches(N, D, M, d)
class HashJlSketch(SketchedMatmul):
def sketch(self, A, B):
return hash_sketches(A, B, self.d)
def _get_nmuls(self, N, D, M, d, **sink):
return _nmultiplies_hash_sketches(N, D, M, d)
class OsnapSketch(SketchedMatmul):
def sketch(self, A, B):
return osnap_sketches(A, B, self.d, s=OSNAP_DEFAULT_S)
# def get_params(self):
# return {'d': self.d, 's': OSNAP_DEFAULT_S}
def _get_nmuls(self, N, D, M, d, **sink):
return _nmultiplies_osnap_sketches(N, D, M, d)
class SvdSketch(SketchedMatmul):
__slots__ = 'd niters Ua SVTa Ub SVTb'.split()
def __init__(self, d, niters=5):
self.d = d
self.niters = niters
self.reset_for_new_task()
def get_params(self):
return {'d': self.d, 'niters': self.niters}
def _check_mat_shape(self, M):
if M is None:
return False
# if np.min(M.shape) < self.d:
if np.max(M.shape) < self.d:
raise InvalidParametersException(
'shape has entry < d: {} < {}'.format(M.shape, self.d))
return True
def set_A(self, A):
# if A is None:
# return
if self._check_mat_shape(A):
self.Ua, self. SVTa = svd_sketch(A, self.d)
def set_B(self, B):
if self._check_mat_shape(B):
self.Ub, self.SVTb = svd_sketch(B, self.d)
def reset_for_new_task(self):
self.Ua = None
self.SVTa = None
self.Ub = None
self.SVTb = None
# def __call__(self, A=None, B=None):
# assert A.shape[1] == B.shape[0] # dims need to match
# if A.shape[1] < self.d:
# raise InvalidParametersException('D < d')
def call(self, A=None, B=None):
if self.Ua is None:
self.set_A(A)
if self.Ub is None:
self.set_B(B)
D = self.Ua.shape[1]
if D < self.d:
raise InvalidParametersException(
'D < d: {} < {}'.format(D, self.d))
# verify sketch size isn't cheating
# print("A.shape", A.shape)
# print("B.shape", B.shape)
# print("self.Ua.shape: ", self.Ua.shape)
# print("self.SVTa.shape: ", self.SVTa.shape)
# print("self.Ub.shape: ", self.Ub.shape)
# print("self.SVTb.shape: ", self.SVTb.shape)
# print("self.d: ", self.d)
assert self.Ua.shape[1] <= self.d
assert self.SVTa.shape[0] <= self.d
assert self.SVTb.shape[0] <= self.d
assert self.Ub.shape[1] <= self.d
# innermost parens important so that matmuls actually use low rank
# outer parens help if B ncols < A nrows (which is true for us)
return self.Ua @ ((self.SVTa @ self.Ub) @ self.SVTb)
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
# XXX this will break if not called right after self.call()
total = 0
d = self.d
N, D = A.shape
_, M = B.shape
if not fixedA:
total += _nmultiplies_svd_sketch(N, D, d, niters=self.niters)
if not fixedB:
total += _nmultiplies_svd_sketch(D, M, d, niters=self.niters)
total += d * D * d # SVTa @ UB, d x D @ D x d
total += d * d * M # (above) @ SVTb, d x d @ d x M
total += N * d * M # Ua @ (above), N x d @ d x M
return {KEY_NMULTIPLIES: total}
@_memory.cache
def _fitted_pca(X, n_components):
pca = PCA(n_components=n_components)
return pca.fit(X)
class TrainedPcaSketch(ApproxMatmul):
__slots__ = 'pca d A B V'.split()
def __init__(self, d):
# self.pca = PCA(n_components=d)
self.d = d
self.reset_for_new_task()
def reset_for_new_task(self):
self.A = None
self.B = None
def fit(self, A, B, Y=None): # Y = A @ B if not specified
D, M = B.shape
print("called fit on TrainedPcaSketch!")
if D < self.d:
raise InvalidParametersException(
'D < d: {} < {}'.format(D, self.d))
if M < self.d:
raise InvalidParametersException(
'M < d: {} < {}'.format(M, self.d))
self.pca = _fitted_pca(A, n_components=self.d)
self.V = self.pca.components_.T
# print("components V.T @ V =\n", self.V.T @ self.V) # yep, orthonormal
def set_A(self, A):
self.A = A @ self.V
def set_B(self, B):
self.B = self.V.T @ B
def __call__(self, A, B):
assert A.shape[1] == B.shape[0] # dims need to match
if B.shape[1] < self.d:
raise InvalidParametersException(
'M < d: {} < {}'.format(B.shape[1], self.d))
if (self.A is None):
self.set_A(A)
if (self.B is None):
self.set_B(B)
return self.A @ self.B
def get_params(self):
return {'d': self.d}
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
N, D = A.shape
D, M = B.shape
d = self.d
nmuls = N * d * M # assuming matrices already sketched
if not fixedA:
nmuls += N * D * d
if not fixedB:
nmuls += D * M * d
return {KEY_NMULTIPLIES: nmuls}
@_memory.cache
def _fitted_sparse_pca(X, d, unscaled_alpha, **kwargs):
# this seems to work better than initializing with MiniBatchSparsePCA,
# svd of cov mat, or basically anything else I tried
U, _, Vt = randomized_svd(X, n_components=d, random_state=123)
U = U[:, :d]
V = Vt.T[:d]
# SparsePCA (and all the sklearn dictionary learning stuff)
# internally uses sum of squared errs for each sample, and L1 norm
# of parameter matrix; to make alpha meaningful across datasets,
# want to scale by number of examples (so it's effectively using MSE)
# and divide by L1 norm (which grows linearly with size of parameter
# matrix / vector); also scale by variance of data for similar reasons
N, D = X.shape
alpha = unscaled_alpha * np.var(X - X.mean(axis=0)) * N / D
verbose = 1
pca = SparsePCA(n_components=d, alpha=alpha, normalize_components=True,
method='lars', U_init=U, V_init=V, max_iter=10,
ridge_alpha=max(1, len(X) * X.std() * 10),
# ridge_alpha=1e8,
verbose=verbose, random_state=123)
if verbose > 0:
print("fitting sparse pca...")
return pca.fit(X)
class TrainedSparsePcaSketch(ApproxMatmul):
__slots__ = 'pca d alpha nnz can_optimize_transform A B'.split()
# def __init__(self, d, alpha, can_optimize_transform=True):
def __init__(self, d, alpha, can_optimize_transform=False):
self.d = d
self.alpha = alpha
self.can_optimize_transform = can_optimize_transform
self.reset_for_new_task()
def reset_for_new_task(self):
self.A = None
self.B = None
def fit(self, A, B, Y=None): # Y = A @ B if not specified
D, M = B.shape
# if M <= self.d:
# raise InvalidParametersException(
# 'M <= d: {} < {}'.format(M, self.d))
if D <= self.d:
raise InvalidParametersException(
'D < d: {} < {}'.format(D, self.d))
self.pca = _fitted_sparse_pca(A, d=self.d, unscaled_alpha=self.alpha)
self.nnz = np.sum(self.pca.components_ != 0)
sparsity = np.mean(self.pca.components_ == 0)
if self.nnz < self.d:
raise InvalidParametersException(
"ignoring SparsePCA with nnz < d: "
"{} < {}".format(self.nnz, self.d))
if sparsity == 0.:
raise InvalidParametersException(
"ignoring SparsePCA with no zeros")
def set_A(self, A):
if self.can_optimize_transform:
# uses ridge regression to get coeffs, instead of linear projection
# disabled by default because it produces garbage on caltech and
# is more expensive than just doing the matmul
self.A = self.pca.transform(A)
self.A += self.pca.mean_ @ self.pca.components_.T
else:
self.A = A @ self.pca.components_.T
def set_B(self, B):
if self.can_optimize_transform:
self.B = self.pca.transform(B.T).T
self.B += (self.pca.mean_ @ self.pca.components_.T).reshape(-1, 1)
else:
self.B = (B.T @ self.pca.components_.T).T
def __call__(self, A, B):
assert A.shape[1] == B.shape[0] # dims need to match
N, D = A.shape
D, M = B.shape
if D <= self.d:
raise InvalidParametersException(
'D < d: {} < {}'.format(D, self.d))
fixedA = self.A is not None
fixedB = self.B is not None
nmuls_naive = N * D * M
nmuls_ours = self.get_speed_metrics(
A, B, fixedA=fixedA, fixedB=fixedB)[KEY_NMULTIPLIES]
if nmuls_naive <= nmuls_ours:
raise InvalidParametersException(
"naive # of multiplies < sparse sketch # of multiplies: "
"{} < {}".format(nmuls_naive, nmuls_ours))
if not fixedA:
self.set_A(A)
if not fixedB:
self.set_B(B)
# if N == 700:
# if False:
print("got to weird dset!")
# print("pca means: ", self.pca.mean_[::20])
# print("A means:", A.mean(axis=0)[::20])
# print("B means:", B.mean(axis=1)[::20])
print("pca means sum: ", self.pca.mean_.sum())
print("A means sum: ", A.mean(axis=0).sum())
print("B means sum: ", B.mean(axis=1).sum())
offsets = (self.pca.mean_ @ self.pca.components_.T)
print("offsets: ", offsets)
print("offsets sum: ", offsets.sum())
# C = (A @ B)
# print("true mean of output: ", C.mean())
# print("true std of output: ", C.std())
return self.A @ self.B
def get_params(self):
return {'d': self.d, 'alpha': self.alpha,
'canCheat': self.can_optimize_transform}
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
N, D = A.shape
D, M = B.shape
nmuls_sketch_X = N * self.nnz
nmuls_sketch_W = M * self.nnz
nmuls_make_output = N * self.d * M
total_nmuls = nmuls_make_output
if not fixedA:
total_nmuls += nmuls_sketch_X
if not fixedB:
total_nmuls += nmuls_sketch_W
try: # compute degree of sparsity
nnz = self.nnz
sparsity = (self.pca.components_ == 0).mean()
except AttributeError: # model not fitted yet
nnz = -1
sparsity = -1
return {KEY_NMULTIPLIES: total_nmuls,
'nnz': nnz, 'sparsity': sparsity}
# ================================================================ drineas06
def _compute_dim_scores(A, B, A_col_norms=None, B_row_norms=None):
if A_col_norms is None:
A_col_norms = np.linalg.norm(A, axis=0)
if B_row_norms is None:
B_row_norms = np.linalg.norm(B, axis=1)
return A_col_norms * B_row_norms
def sketch_sq_sample(A, B, d):
scores = _compute_dim_scores(A, B)
idxs, weights = importance_sample(scores, d)
# idxs, weights = sample_varopt_1d(scores, d) # doesn't help
return A[:, idxs] / weights, B[idxs]
# weights = np.sqrt(weights)
# return A[:, idxs] / weights, B[idxs] / weights.reshape(-1, 1)
# probs = scores / np.sum(scores)
# D = A.shape[1]
# keep_idxs = np.random.choice(D, size=d, p=probs)
# # keep_idxs = np.random.choice(D, size=d, p=probs, replace=False)
# # keep_idxs = np.random.choice(D, size=d, replace=False)
# # keep_idxs = np.arange(D-1)
# # keep_idxs = np.arange(1, D)
# # keep_idxs = np.arange(D)
# weights = np.sqrt(d * probs) # what the paper says; huge errors
# # weights = np.sqrt(D * probs) # slightly less bad
# # weights = np.sqrt(np.sqrt(d * probs))
# # weights = np.ones(D)
# A = np.copy(A) / weights
# B = np.copy(B) / weights.reshape(-1, 1)
# return np.copy(A[:, keep_idxs]), np.copy(B[keep_idxs])
# return A[:, keep_idxs], B[keep_idxs]
# return A, B
def _nmultiplies_sketch_sq_sample(N, D, M, d):
scores_nmuls = N * D + M * D # sum of sizes of each mat
reweight_nmuls = N * d + M * d # sum of sizes of each sampled mat
return scores_nmuls + reweight_nmuls # neglect normalization of probs, etc
def sketch_sq_deterministic(A, B, d):
scores = _compute_dim_scores(A, B)
D = A.shape[1]
keep_idxs = np.argsort(scores)[::-d]
weights = np.sqrt(d * (1. / D)) # uniform prob
return A[:, keep_idxs] / weights, B[keep_idxs] / weights.reshape(-1, 1)
def test_sketch_sq_sample():
print("test_sketch_sq_sample")
N, M, D = 100, 50, 200
np.random.seed(1234)
# A = np.random.randint(5, size=(N, D)).astype(np.float32)
# B = np.random.randint(5, size=(D, M)).astype(np.float32)
# A -= np.mean(A)
# B -= np.mean(B)
A = np.random.randn(N, D).astype(np.float32)
B = np.random.randn(D, M).astype(np.float32)
AB = A @ B
orig_frob_sq = np.mean(AB * AB)
print("true mss: ", orig_frob_sq)
prev_normed_err = np.inf
for d in (10, 20, 30, 40, 50):
A_hat, B_hat = sketch_sq_sample(A, B, d)
# A_hat, B_hat = sketch_sq_deterministic(A, B, d)
AB_hat = A_hat @ B_hat
# print("AB_hat mss: ", (AB_hat * AB_hat).mean())
diffs = AB - AB_hat
err_frob_sq = np.mean(diffs * diffs)
normed_err_sq = err_frob_sq / orig_frob_sq
# print("orig mss: ", orig_frob_sq)
print('d = {}, err = {:.3f}'.format(d, normed_err_sq))
assert normed_err_sq < 2.
assert normed_err_sq < (prev_normed_err + .05) # should usually hold
prev_normed_err = normed_err_sq
# ================================================================ sampling
# wait, this just returns points summing to the true sample sum
# deterministically...
def importance_sample(sample_weights, m, replace=False):
probs = sample_weights / sample_weights.sum()
idxs = np.random.choice(
np.arange(len(sample_weights)), p=probs, replace=replace, size=m)
weights = 1. / (probs[idxs] * m)
return idxs, weights
def _invert_permutation(permutation):
return np.arange(len(permutation))[np.argsort(permutation)]
def _sum_for_tau(x, tau):
above_tau = x > tau
return x[above_tau].sum() + (x[~above_tau] / tau).sum()
def _compute_new_tau(x_sorted_desc, m, tau=0):
x = x_sorted_desc
current_sum = _sum_for_tau(x, tau)
assert current_sum >= m
while current_sum > m:
x = x[:-1]
current_sum = _sum_for_tau(x, tau)
def sample_varopt_1d(x, m):
# varopt sampling; original paper (see Algorithm 1 on p16):
# https://arxiv.org/pdf/0803.0473.pdf
# better intuition:
# https://datasketches.github.io/docs/Sampling/VarOptSampling.html
#
# unlike paper, we're just going to do it all at once since that will
# be simpler and vectorize way better; basically just recursively
# take largest point w_i if w_i > (m / sum_i w_i), with m decremented
# by 1 each time; if this doesn't take all the points, importance sample
# from the remaining points (with probs proportional to their weights)
#
# EDIT: this sucks unless really heavy tailed, so probably not a
# correct impl?
x = np.asarray(x, dtype=np.float32)
n = len(x)
if m >= n:
return np.arange(n)
maxval = np.max(x)
minval = np.min(x)
assert minval >= 0 # needs nonnegative entries
if minval == maxval or m == 1:
return np.random.choice(np.arange(n), size=m)
sort_idxs = np.argsort(x)[::-1] # in descending order
x_sorted = x[sort_idxs]
unsort_idxs = _invert_permutation(sort_idxs)
q = x_sorted * (m / np.sum(x_sorted)) # sums to m
# q_tailsums = np.cumsum(q[::-1])[::-1]
# next_val = x_sorted[0]
head_sz = 0
for i in range(m):
if q[0] >= 1.:
head_sz += 1
q = q[1:] * ((m - 1) / q[1:].sum())
# TODO just compute tail sums once for renormalization (below)
# q_mass_eliminated = q[i]
# next_val = q[i + 1] * (m - head_sz) / m * ()
# renormalize such that tail sums to m - 1
else:
break
tail_sz = m - head_sz
# print("m, head_sz, tail_sz:", m, head_sz, tail_sz)
# print("len(q)", len(q))
# probs = q / np.sum(q)
probs = x_sorted[head_sz:] / np.sum(x_sorted[head_sz:])
tail_idxs = np.random.choice(
np.arange(head_sz, n), p=probs, replace=False, size=tail_sz)
idxs = list(tail_idxs)
# idxs = tail_idxs
# tau = tail_sz / np.sum(x_sorted[head_sz:])
# print("tau: ", tau)
# print("x_sorted[:head_sz + 1]: ", x_sorted[:head_sz + 1])
# tau = x_sorted[head_sz]
true_probs = probs[tail_idxs - head_sz] * (tail_sz / m)
weights = list(1. / (m * true_probs)) # small err; definitely right
# weights = [tau] * tail_sz
if head_sz > 0:
head_idxs = list(np.arange(head_sz))
head_weights = list(np.ones(head_sz))
idxs = head_idxs + idxs
weights = head_weights + weights
return unsort_idxs[idxs], np.array(weights)
# ============================================================ random sketches
# sketch both A and B jointly using the same matrix to amortize overhead and
# because it seems like this should help accuracy
# @numba.jit(nopython=True)
def fastjl_sketches(A, B, d, P=None):
N, D = A.shape
M = B.shape[1]
# pad A and B for FHT
log2_D = int(np.ceil(np.log2(D)))
D_pad = 2 ** log2_D
A_pad = np.zeros((N, D_pad), dtype=np.float32)
A_pad[:, :D] = A
B_pad = np.zeros((D_pad, M), dtype=np.float32)
B_pad[:D] = B
# construct and apply random signs for each dim
randsigns = np.random.randint(0, 2, size=D_pad) * 2 - 1
# scale now instead of scaling FHT mat, so only O(D) multiplies
randsigns = randsigns.astype(np.float32) * (1. / np.sqrt(D_pad))
A_pad *= randsigns
B_pad *= randsigns.reshape(-1, 1)
# # apply fast hadamard transform
H = scipy.linalg.hadamard(D_pad, dtype=np.float32)
# H = scipy.linalg.hadamard(D_pad, dtype=np.float32) / np.sqrt(D_pad)
A_pad = A_pad @ H
B_pad = H @ B_pad
# dimensionalty reduction
if P is None:
# logd = np.log2(D_pad)
keep_prob = log2_D * log2_D / D_pad
# if (keep_prob) >= 1:
# print("WARNING: FastJL returning all zeros mat...")
P = (np.random.uniform(size=(D_pad, d)) > keep_prob).astype(np.float32)
# P *= np.random.randn(*P.shape) * (d / keep_prob)
# scaling sigma totally fails; need norm to actually be 1, not just
# have expected value of 1
P *= np.random.randn(*P.shape)
P *= (1. / np.linalg.norm(P, axis=0))
# print("P shape, Apad shape, Bpad shape: ", P.shape, A_pad.shape, B_pad.shape)
return A_pad @ P, P.T @ B_pad
def _nmultiplies_fastjl_sketches(N, D, M, d): # avg, not exact, since P sparse
# technically adds or subs, but you'd do fma ops regardless for floats
log2_D = int(np.ceil(np.log2(D)))
D_pad = 2 ** log2_D
fht_nmuls = D_pad * np.log2(D_pad)
sign_nmuls = D_pad
# trickier part; expected number of madds (or similar ops) to mul by P
construct_P_nmuls = D_pad * d # assuming only 1 mul for rng + threshold
keep_prob = log2_D * log2_D / D_pad
nnz_p = min(1, keep_prob) * D_pad # expected nnz per row of P
p_nmuls = N * nnz_p * d + d * nnz_p * M
return fht_nmuls + sign_nmuls + construct_P_nmuls + p_nmuls
@numba.jit(nopython=True)
def hash_sketches(A, B, d, scale=1., share_projections=True):
N, D = A.shape
D, M = B.shape
A_hat = np.zeros((N, d), dtype=A.dtype)
B_hat = np.zeros((d, M), dtype=B.dtype)
for j in range(D):
idx = np.random.randint(d)
sign = (np.random.randint(0, 2) * 2) - 1
# coeff = sign * scale # worse than predicting mean, esp for small d
coeff = sign * scale / np.sqrt(2) # actually pretty decent
# coeff = sign * scale * ((d / D) ** .25)
# coeff = sign * scale * np.sqrt(d / D) # best for small d / D
# coeff = sign * scale * d / D # best for larger d / D
A_hat[:, idx] += A[:, j] * coeff
if share_projections:
B_hat[idx] += B[j] * coeff
continue
# use a different projection for B
idx = np.random.randint(d)
sign = (np.random.randint(0, 2) * 2) - 1
B_hat[idx] += B[j] * sign
# using unscaled signs preserves norms really well, at least for
# random matrices
# print("A norm, A_hat norm:", np.linalg.norm(A), np.linalg.norm(A_hat))
# print("B norm, B_hat norm:", np.linalg.norm(B), np.linalg.norm(B_hat))
# A_norm = np.linalg.norm(A)
# B_norm = np.linalg.norm(B)
# A_hat *= np.linalg.norm(A) / np.linalg.norm(A_hat)
# B_hat *= np.linalg.norm(B) / np.linalg.norm(B_hat)
return A_hat, B_hat
def osnap_sketches(A, B, d, s=OSNAP_DEFAULT_S):
N, D = A.shape
D, M = B.shape
s = max(1, min(d // 2, s)) # handle s too large relative to d
A_hat = np.zeros((N, d), dtype=A.dtype)
B_hat = np.zeros((d, M), dtype=B.dtype)
scale = 1. / np.sqrt(s)
# scale = 1. / s
# scale = 1 # seems to often work better than dividing by 1/sqrt(s)?
# scale = np.sqrt(s)
# scale = s
subspace_len = (d + s - 1) // s # round up
for ss in range(s):
start_idx = ss * subspace_len
end_idx = min(D, start_idx + subspace_len)
A_hat[:, start_idx:end_idx], B_hat[start_idx:end_idx] = \
hash_sketches(A, B, subspace_len, scale=scale)
# A_hat /= np.linalg.norm(A_hat, axis=)
return A_hat, B_hat
def _nmultiplies_hash_sketches(N, D, M, d):
# technically adds or subs, but you'd do fma ops regardless for floats
return N * D + D * M
def _nmultiplies_osnap_sketches(N, D, M, d, s=4):
return 4 * _nmultiplies_hash_sketches(N, D, M, d)
def test_rand_sketches():
print("test_svd_sketches")
N, M, D = 100, 80, 50
np.random.seed(1234)
A = np.random.randint(5, size=(N, D)).astype(np.float32)
B = np.random.randint(5, size=(D, M)).astype(np.float32)
A -= np.mean(A)
B -= np.mean(B)
AB = A @ B
orig_frob_sq = np.sum(AB * AB)
prev_normed_err = np.inf
# for d in [10]:
for d in (1, 2, 4, 8, 16, 32):
# (Ua, SVTa), (Ub, SVTb) = svd_sketches(A, B, d)
# AB_hat = Ua @ (SVTa @ Ub) @ SVTb
A_hat, B_hat = fastjl_sketches(A, B, d)
# A_hat, B_hat = hash_sketches(A, B, d) # sharing projections helps
# A_hat, B_hat = hash_sketches(A, B, d, share_projections=False)
# A_hat, B_hat = osnap_sketches(A, B, d)
AB_hat = A_hat @ B_hat
# print("fused mats shapes: ")
# print(Ua.shape, SVTa.shape, Ub.shape, SVTb.shape)
diffs = AB - AB_hat
err_frob_sq = np.sum(diffs * diffs)
normed_err_sq = err_frob_sq / orig_frob_sq
print('d = {}, err = {:.5f}'.format(d, normed_err_sq))
# assert normed_err_sq < 1.
# assert normed_err_sq < prev_normed_err + .001
prev_normed_err = normed_err_sq
# ================================================================ Rand SVD
def svd_sketch(A, d, niters=5, **kwargs):
# assert A.shape[0] >= d
# assert A.shape[1] >= d
assert np.max(A.shape) >= d # can't truncate to larger size
U, S, Vt = randomized_svd(A, n_components=d, n_iter=niters, **kwargs)
# print("Vt shape: ", Vt.shape)
# print("S: ", S)
return (U, np.diag(S) @ Vt)
def _nmultiplies_svd_sketch(N, D, d, niters):
# # "In contrast, randomized schemes can produce an approximate SVD using
# # only O(mn log(k) + (m + n)k2) flops" -Halko et al. 2010
# # https://arxiv.org/pdf/0909.4061.pdf
# iter_cost = N * D * int(np.ceil(np.log2(d)))
# iter_cost += (N + D) * d * d
# return iter_cost * niters
# # assumes algorithm 4.4 in above; sklearn randomized_svd source
# # code says it implements algorithm 4.3, but paper says 4.3 should actually
# # be implemented as 4.4 in practice. Also 4x4's complexity is much easier
# # to understand and counting multiplies is at best a rough estimate
# # regardless.
# #
# # shapes:
# # A: N x D
# # A*: D x N
# # Omega: D x d
# # Y0 = A @ Omega: N x d
# # Q0: N x d
# # R0: d x d
# # Y_tilde_j:
# # gauss_mat_cost = D * d
# # Y0_cost = N * D * d
# Y0_cost = N * D * int(np.ceil(np.log2(d))) # subsampled FFT; see text
# Y0_cost += _nmultiplies_qr(N, d)
# Yj_tilde_cost = D * N * d + _nmultiplies_qr(N, d)
# Yj_cost =
# okay, sklearn says it uses algorithm 4.3 in Halko et al. 2010 [1],
# so we're going to go with that
# [1] https://arxiv.org/pdf/0909.4061.pdf
# shapes:
# A: N x D
# A.T: D x N
# G (Omega): D x d
# A @ G: N x d
# A.T @ (AG) D x d
# A @ (A.T@A@G) N x d
# Q0: N x d
# R0: d x d
Omega_cost = D * d
A_Omega_cost = N * D * d
# each iter: premul by A.T, then A; assumes no LU or QR for stability
iter_cost = D * N * d + N * D * d
return Omega_cost + A_Omega_cost + iter_cost * niters
def svd_sketches(A, B, d, **kwargs):
return svd_sketch(A, d, **kwargs), svd_sketch(B, d, **kwargs)
# Ua, Sa, VTa = randomized_svd(A, n_components=d, **kwargs)
# Ub, Sb, VTb = randomized_svd(B, n_components=d, **kwargs)
# print("truncated svd mat shapes:")
# print(Ua.shape, Sa.shape, VTa.shape)
# print(Ub.shape, Sb.shape, VTb.shape)
# return (Ua, np.diag(Sa) @ VTa), (Ub, np.diag(Sb) @ VTb)
def test_svd_sketches():
print("test_svd_sketches")
N, M, D = 100, 80, 50
np.random.seed(1234)
A = np.random.randint(5, size=(N, D)).astype(np.float32)
B = np.random.randint(5, size=(D, M)).astype(np.float32)
A -= np.mean(A)
B -= np.mean(B)
AB = A @ B
orig_frob_sq = np.sum(AB * AB)
prev_normed_err = np.inf
# for d in [10]:
for d in (1, 2, 4, 8, 16, 32):
(Ua, SVTa), (Ub, SVTb) = svd_sketches(A, B, d)
AB_hat = Ua @ (SVTa @ Ub) @ SVTb
# print("fused mats shapes: ")
# print(Ua.shape, SVTa.shape, Ub.shape, SVTb.shape)
diffs = AB - AB_hat
err_frob_sq = np.sum(diffs * diffs)
normed_err_sq = err_frob_sq / orig_frob_sq
print('d = {}, err = {:.5f}'.format(d, normed_err_sq))
assert normed_err_sq < 1.
assert normed_err_sq < prev_normed_err
prev_normed_err = normed_err_sq
# ================================================================ FD methods
# TODO impl fast-FD, which zeros out half the entries
def frequent_directions(A, d, variant=None):
N, D = A.shape
H = np.zeros((d, D))
assert N >= d
assert D >= d
# for i in range(N):
H[:d - 1] = A[:d - 1]
for i in range(d - 1, N):
H[-1] = A[i]
try:
U, S, Vt = np.linalg.svd(H, full_matrices=False) # d x d, d, d x D
except np.linalg.LinAlgError as e:
print("SVD failed at iter ", i - (d - 1))
print("H shape: ", H.shape)
print("A shape: ", A.shape)
print("d: ", d)
# print("svd mat shape: ", U.shape, S.shape, Vt.shape)
raise e
# cutoff = S[d - 1] # S is returned as a vector, not a diagonal mat
if variant == 'robust':
raise NotImplementedError()
else:
S = np.sqrt((S - S[-1]) ** 2) # note that last entry is dth entry
# print("new S shape: ", S.shape)
# H = np.diag(S) @ Vt # d x D
H = Vt * S.reshape(-1, 1) # d x D; equivalent to np.diag(S) @ Vt
return H
def fast_frequent_directions(A, d, variant=None, alpha=.5):
N, D = A.shape
# H = np.zeros((d, D))
H = np.copy(A[:d])
assert N >= d
assert D >= d
cutoff_idx = int(d * (1 - alpha))
cutoff_idx = min(d - 1, cutoff_idx) # always zero at least last element
ntrailing_zeros = d - cutoff_idx
i = d
while i < N:
try:
U, S, Vt = np.linalg.svd(H, full_matrices=False) # d x d, d, d x D
except np.linalg.LinAlgError as e:
print("SVD failed at iter ", i - (d - 1))
print("H shape: ", H.shape)
print("A shape: ", A.shape)
print("d: ", d)
# print("svd mat shape: ", U.shape, S.shape, Vt.shape)
raise e
cutoff = S[cutoff_idx]
if variant == 'parametrized':
raise NotImplementedError()
else:
S = np.sqrt(np.maximum(S - cutoff, 0) ** 2)
S = np.sqrt((S - S[-1]) ** 2) # note that last entry is dth entry
# print("new S shape: ", S.shape)
# H = np.diag(S) @ Vt # d x D
H = Vt * S.reshape(-1, 1) # d x D; equivalent to np.diag(S) @ Vt
# replace zeroed-out rows of H with next rows of A
end_dim = min(N, i + ntrailing_zeros)
nrows_to_copy = end_dim - i
end_row = cutoff_idx + nrows_to_copy
assert nrows_to_copy <= ntrailing_zeros
assert end_row <= d
H[-nrows_to_copy:] = A[i:end_dim]
i = end_dim
return H
def parametrized_fd_sketches(A, B, d):
# from "Improved Practical Matrix Sketching with Guarantees"
A_hat = fast_frequent_directions(A.T, d, variant='parametrized', alpha=.2)
B_hat = fast_frequent_directions(B.T, d, variant='parametrized', alpha=.2)
return A_hat.T, B_hat.T
def fd_amm_sketches(A, B, d):
# print("A shape: ", A.shape)
# print("B shape: ", B.shape)
G = np.hstack((A.T, B)) # D x (N + M)
H = frequent_directions(G, d)
assert H.shape == (d, A.shape[0] + B.shape[1])
C = H[:, :A.shape[0]] # d x N
D = H[:, A.shape[0]:] # d x M
return C.T, D
def fast_fd_amm_sketches(A, B, d):
# print("A shape: ", A.shape)
# print("B shape: ", B.shape)
G = np.hstack((A.T, B)) # D x (N + M)
H = fast_frequent_directions(G, d)
assert H.shape == (d, A.shape[0] + B.shape[1])
C = H[:, :A.shape[0]] # d x N
D = H[:, A.shape[0]:] # d x M
return C.T, D
def _nmultiplies_frequent_directions(N, D, d):
niters = N - d + 1
iter_svd_cost = _nmultiplies_svd(d, D)
iter_reweight_cost = d * D
iter_cost = iter_svd_cost + iter_reweight_cost
return niters * iter_cost
def _nmultiplies_fast_frequent_directions(N, D, d):
niters = int(np.ceil(N / d))
iter_svd_cost = _nmultiplies_svd(d, D)
iter_reweight_cost = d * D
iter_cost = iter_svd_cost + iter_reweight_cost
return niters * iter_cost
def _nmultiplies_fd_amm_sketches(N, D, M, d):
N, D = D, N + M # matrices get concatenated
return _nmultiplies_frequent_directions(N, D, d)
def test_fd_amm_sketches():
print("test_fd_amm_sketches")
N, M, D = 100, 80, 50
np.random.seed(1234)
A = np.random.randint(5, size=(N, D)).astype(np.float32)
B = np.random.randint(5, size=(D, M)).astype(np.float32)
# A -= np.mean(A)
# B -= np.mean(B)
AB = A @ B
orig_frob_sq = np.sum(AB * AB)
prev_normed_err = np.inf
for d in (1, 2, 4, 8, 16, 32):
A_hat, B_hat = fd_amm_sketches(A, B, d)
AB_hat = A_hat @ B_hat
diffs = AB - AB_hat
err_frob_sq = np.sum(diffs * diffs)
normed_err_sq = err_frob_sq / orig_frob_sq
print('d = {}, err = {:.5f}'.format(d, normed_err_sq))
assert normed_err_sq < 1.05
assert normed_err_sq < prev_normed_err
prev_normed_err = normed_err_sq
# ================================================================ Co-occurring
def cooccur_sketches(A, B, d):
N, D = A.shape
B = B.T
M, _ = B.shape
assert B.shape[1] == D
# assert N >= d # not enough rows in specified A matrix
# assert M >= d # not enough cols in specified B matrix
# add new rows to A or B so that R from QR factorization is at least d x d
if N < d:
A_new = np.zeros((d, D), dtype=A.dtype)
A_new[:N] = A
A = A_new
if M < d:
B_new = np.zeros((d, D), dtype=B.dtype)
B_new[:M] = B
B = B_new
X = np.copy(A[:, :d]) # N x d
Y = np.copy(B[:, :d]) # M x d
# mid_idx = d - 2 # does this make it work better for large d? EDIT: nope
mid_idx = d // 2
ntrailing_zeros = d - mid_idx
i = d
while i < D:
Qx, Rx = np.linalg.qr(X) # N x d, d x d
Qy, Ry = np.linalg.qr(Y) # M x d, d x d
prod = Rx @ Ry.T # d x d
U, S, Vt = np.linalg.svd(prod, full_matrices=False) # d x d, d, d x d
cutoff = S[mid_idx]
S = np.sqrt(np.maximum(S - cutoff, 0))
# print("prod.shape", prod.shape)
# print("orig X.shape", X.shape)
# print("orig Y.shape", Y.shape)
X = Qx @ (U * S) # equivalent to U @ np.diag(S)
Y = Qy @ (Vt.T * S) # equivalent to Vt.T @ np.diag(S)
# print("X.shape", X.shape)
# print("Qx.shape", Qx.shape)
# print("U.shape", U.shape)
# replace zeroed-out cols of X and Y with new cols of A and B
end_dim = min(D, i + ntrailing_zeros)
ncols_to_copy = end_dim - i
end_col = mid_idx + ncols_to_copy
assert ncols_to_copy <= ntrailing_zeros
assert end_col <= d
X[:, mid_idx:end_col] = A[:, i:end_dim]
Y[:, mid_idx:end_col] = B[:, i:end_dim]
i = end_dim
return X[:N], Y[:M].T # slicing is because we may have zero-padded
def _nmultiplies_cooccur_sketches(N, D, M, d):
niters = int(np.ceil(D / d))
iter_qr_cost = _nmultiplies_qr(N, d) + _nmultiplies_qr(M, d)
iter_RRt_cost = d * d * d
iter_svd_cost = _nmultiplies_svd(d, d)
iter_reweight_cost = N * d + M * d
iter_update_x_y_cost = (N * d * d) + (M * d * d)
iter_cost = (iter_qr_cost + iter_RRt_cost + iter_svd_cost +
iter_reweight_cost + iter_update_x_y_cost)
return niters * iter_cost
def test_cooccur_sketches():
print("test_cooccur_sketches")
# so this doesn't have monotonically better acc as d increases; seems to
# run into issues with d being a large fraction of D, possibly because
# then it doesn't have many iterations and it's just zeroing out a ton of
# the singular vectors
N, M, D = 100, 80, 50
# N, M, D = 100, 80, 160
np.random.seed(1234)
A = np.random.randint(5, size=(N, D)).astype(np.float32)
B = np.random.randint(5, size=(D, M)).astype(np.float32)
A -= np.mean(A)
B -= np.mean(B)
AB = A @ B
orig_frob_sq = np.sum(AB * AB)
# prev_normed_err = np.inf
# for d in [4]:
for d in (2, 4, 8, 16, 32):
# A_hat, B_hat = fd_amm_sketches(A, B, d)
A_hat, B_hat = cooccur_sketches(A, B, d)
AB_hat = A_hat @ B_hat
# print("fused mats shapes: ")
# print(Ua.shape, SVTa.shape, Ub.shape, SVTb.shape)
diffs = AB - AB_hat
err_frob_sq = np.sum(diffs * diffs)
normed_err_sq = err_frob_sq / orig_frob_sq
print('d = {}, err = {:.5f}'.format(d, normed_err_sq))
assert normed_err_sq < 1.
# assert normed_err_sq < prev_normed_err
# prev_normed_err = normed_err_sq
# ================================================================ main
# def main():
# pass
if __name__ == '__main__':
np.set_printoptions(formatter={'float': lambda f: "{:.3}".format(f)})
# test_sketch_sq_sample()
# test_svd_sketches()
# test_fd_amm_sketches()
# test_cooccur_sketches()
test_rand_sketches()
# # N = 1000
# # N = 100
# N = 20
# # N = 10
# M = 10
# # M = 5
# x = np.arange(N)
# # x *= x
# # x *= x
# # x = np.sqrt(x)
# # x = 1.1 ** x
# # x = 1.15 ** x
# x = 2 ** x
# # print("x = ", x)
# # idxs, weights = sample_varopt_1d(x, M)
# idxs, weights = importance_sample(x, M)
# y = x[idxs] * weights
# xsum, ysum = x.sum(), y.sum()
# # print("idxs = ", idxs)
# print("vals = ", x[idxs])
# print("weights = ", weights)
# print("vals * weights", y)
# # print("true sum, sample sum: ", xsum, ysum)
# print("sum rel err: ", (xsum - ysum) / xsum)
|
#!/bin/env/python
def energy(A):
if A.ndim < 2 or len(A) < 2:
return 0
diffs = A - A.mean(axis=0)
return np.sum(diffs * diffs)
def run_trial(N=100, D=3, seed=None):
if seed is not None:
np.random.seed(seed)
w0, w = np.random.randn(2, D)
X = np.random.randn(N, D)
X1 = X[(X @ w) > 0]
X2 = X[(X @ w) <= 0]
U = X[(X @ w0) > 0]
V = X[(X @ w0) <= 0]
U1 = U[(U @ w) > 0]
U2 = U[(U @ w) <= 0]
V1 = V[(V @ w) > 0]
V2 = V[(V @ w) <= 0]
energy_0 = energy(X)
energy_w = energy(X1) + energy(X2)
energy_w0 = energy(U) + energy(V)
energy_w0_w = energy(U1) + energy(U2) + energy(V1) + energy(V2)
gain1 = energy_0 - energy_w
gain2 = energy_w0 - energy_w0_w
if gain1 < gain2:
print("N, D, seed = ", N, D, seed)
print("energy_0:", energy_0)
print("energy_w:", energy_w)
print("energy_w0:", energy_w0)
print("energy_w0_w:", energy_w0_w)
print("gain1:", gain1)
print("gain2:", gain2)
print("w0:\n", w0)
print("w: \n", w)
# print("X\t({:.3f}):\n{}".format(energy(X), X))
# print("X1\t({:.3f}):\n{}".format(energy(X1), X1))
# print("X2\t({:.3f}):\n{}".format(energy(X2), X2))
# print("U\t({:.3f}):\n{}".format(energy(U), U))
# print("U1\t({:.3f}):\n{}".format(energy(U1), U1))
# print("U2\t({:.3f}):\n{}".format(energy(U2), U2))
# print("V\t({:.3f}):\n{}".format(energy(V), V))
# print("V1\t({:.3f}):\n{}".format(energy(V1), V1))
# print("V2\t({:.3f}):\n{}".format(energy(V2), V2))
print("X energy: \t{:.3f}".format(energy(X)))
print("X1 energy: \t{:.3f}".format(energy(X1)))
print("X2 energy: \t{:.3f}".format(energy(X2)))
print("U energy: \t{:.3f}".format(energy(U)))
print("U1 energy: \t{:.3f}".format(energy(U1)))
print("U2 energy: \t{:.3f}".format(energy(U2)))
print("V energy: \t{:.3f}".format(energy(V)))
print("V1 energy: \t{:.3f}".format(energy(V1)))
print("V2 energy: \t{:.3f}".format(energy(V2)))
if D == 2:
import matplotlib.pyplot as plt
_, axes = plt.subplots(2, 2, figsize=(7.5, 7))
# plt.scatter(X[:, 0], X[:, 1])
for ax in axes.ravel():
ax.set_xlim([-2.5, 2.5])
ax.set_ylim([-2.5, 2.5])
# ax.plot([0, w0[0]], [0, w0[1]])
# ax.plot([0, w[0]], [0, w[1]])
axes[0, 0].set_title("X")
axes[0, 0].scatter(X[:, 0], X[:, 1])
axes[0, 1].set_title("U and V (split on w0)")
axes[0, 1].plot([0, w0[0]], [0, w0[1]])
axes[0, 1].scatter(U[:, 0], U[:, 1])
axes[0, 1].scatter(V[:, 0], V[:, 1])
axes[1, 0].set_title("X1 and X2 (split on w)")
axes[1, 0].plot([0, w[0]], [0, w[1]])
axes[1, 0].scatter(X1[:, 0], X1[:, 1])
axes[1, 0].scatter(X2[:, 0], X2[:, 1])
axes[1, 1].set_title("U1, U2, V1, V2 (split on w0 and w)")
axes[1, 1].plot([0, w0[0]], [0, w0[1]])
axes[1, 1].plot([0, w[0]], [0, w[1]])
axes[1, 1].scatter(U1[:, 0], U1[:, 1])
axes[1, 1].scatter(U2[:, 0], U2[:, 1])
axes[1, 1].scatter(V1[:, 0], V1[:, 1])
axes[1, 1].scatter(V2[:, 0], V2[:, 1])
plt.tight_layout()
plt.show()
assert gain1 >= gain2
def main():
ntrials = 100
# for N in [4, 8, 16, 32, 64, 128, 256]:
for N in [64, 128, 256]:
# for D in [1, 2, 3, 5, 10, 100]:
for D in [100, 200]:
for trial in range(ntrials):
run_trial(N=N, D=D, seed=trial)
if __name__ == '__main__':
np.set_printoptions(precision=3)
main()
|
#!/bin/env/python
# from . import files
# from . import amm_methods as ameth
# sb.set_context('poster')
# sb.set_context('talk')
# sb.set_cmap('tab10')
FIGS_SAVE_DIR = pl.Path('../figs/amm')
USE_FONT = 'DejaVu Sans'
mpl.rcParams['font.family'] = 'sans-serif'
mpl.rcParams['font.sans-serif'] = [USE_FONT]
# to avoid type3 fonts; 42 = truetype, which is more flexible than type1
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
def fix_ticks():
# recover from seaborn white style messing this up
plt.rcParams['xtick.bottom'] = True
plt.rcParams['ytick.left'] = True
if not os.path.exists(FIGS_SAVE_DIR):
FIGS_SAVE_DIR.mkdir(parents=True)
def set_seaborn_style(stylename):
sb.set_style(stylename)
fix_ticks()
def save_fig(name):
# plt.savefig(os.path.join(FIGS_SAVE_DIR, name + '.png'),
# dpi=300, bbox_inches='tight')
plt.savefig(os.path.join(FIGS_SAVE_DIR, name + '.pdf'),
bbox_inches='tight')
def _xlabel_for_xmetric(x_metric):
return {'d': 'Sketch Size',
'secs': 'Time (s)',
'muls': 'Number of Multiplies',
'nlookups': 'Number of Lookups',
'ops': 'Number of Operations',
'Latency': 'Latency (ms)',
'Speedup': 'Speedup Over Exact Matrix Multiply',
'NormalizedTime': 'Normalized Latency',
'Throughput': 'Throughput (elements/s)'}[x_metric]
def _ylabel_for_xmetric(y_metric):
if y_metric == 'Relative Accuracy':
return 'Normalized\nAccuracy'
if y_metric == 'Accuracy':
return 'Classification\nAccuracy'
return y_metric
def add_ylabels_on_right(axes, fmt, vals):
for i, ax in enumerate(axes):
lbl = fmt.format(vals[i])
ax2 = ax.twinx()
ax2.get_xaxis().set_visible(False)
ax2.yaxis.set_label_position('right')
ax2.set_ylabel(lbl, fontsize=14, family=USE_FONT, labelpad=5)
sb.despine(ax=ax2, top=True, left=True, bottom=True, right=True)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
ax2.set_yticks([])
ax2.tick_params(axis='y', which='y', length=0)
def scan_speed_fig(save=True):
# ================================ data cleaning
df = res.scan_timings()
name_map = collections.OrderedDict()
# name_map['mithral scan'] = 'Mithral'
name_map['mithral scan'] = 'MADDNESS'
# name_map['mithral scan'] = 'Maddness'
# name_map['bolt scan uint8'] = 'Bolt\nCheating'
name_map['bolt scan safe uint16'] = 'Bolt'
name_map['popcount scan'] = 'Popcount'
name_map['pq scan'] = 'PQ / OPQ'
df = res.rename_values_in_col(df, 'algo', name_map)
df = res.melt_times(df)
# alright, can't get stds to show without really screwing with stuff
# times = np.array(df['time'])
# times += np.random.randn(len(df['time'])) * .1 # get 1px for stds
# # mask = df['algo'] == 'PQ / OPQ'
# mask = df['B'] == 64
# df['time'].loc[mask] = times[mask]
df['thruput'] = df['N'] * df['M'] / df['time']
df['thruput'] /= 1e6 # just use units of billions; times are in ms
# df['thruput'] *= 1e3 # times are in ms
# ================================ fig creation
sb.set_context("talk")
# fig, ax = plt.subplots(1, 1, figsize=(8, 5))
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
axes = [ax]
sb.barplot(data=df, x='algo', y='thruput', units='timing_trial',
hue='B', hue_order=[8, 16, 32, 64], order=name_map.values(),
ax=ax, ci='sd')
# ------------------------ clean up / format axes
for ax in axes[:-1]:
# remove x labels except for bottom axis
plt.setp(ax.get_xticklabels(), visible=False)
ax.get_xaxis().set_visible(False)
handles, labels = axes[0].get_legend_handles_labels()
labels = ['8B Codes', '16B Codes', '32B Codes', '64B Codes']
# labels = ['8 Bytes', '16 Bytes', '32 Bytes', '64 Bytes']
# labels = ['8B', '16B', '32B', '64B']
plt.figlegend(handles, labels, loc='lower center', ncol=4, fontsize=14)
for ax in axes:
ax.set_ylabel('Billion Dot Products/s', family=USE_FONT)
ax.get_legend().remove()
# ------------------------ have bottom / top axes print title, x info
axes[0].set_title('Speed of f() Functions for Different Encoding Sizes',
y=1.04, family=USE_FONT, fontsize=20)
# # get and set them again so we can make the first one bold; can't make
# # it bold beforehand because need a tick lbl object, not a string
# xlabels = list(axes[-1].get_xticklabels())
# xlabels[0].set_weight('bold')
# # axes[-1].set_xticklabels(xlabels, rotation=60, ha='right')
# axes[-1].set_xticklabels(xlabels)
axes[-1].tick_params(axis='x', which='major', pad=4)
axes[-1].set_xlabel("", labelpad=-30)
ax.xaxis.set_ticks_position('none')
# ------------------------ save / show plot
plt.tight_layout()
# plt.subplots_adjust(bottom=.21)
plt.subplots_adjust(bottom=.23)
if save:
save_fig('scan_speed')
else:
plt.show()
def encode_speed_fig(save=True):
# ================================ data cleaning
df = res.encode_timings()
df = df.loc[df['algo'] != 'mithral encode i16']
# print("df ours f32: ", df.loc[df['algo'].str.lower().str.strip() == 'mithral encode f32'])
# print("df ours f32: ", df.loc[df['algo'].str.lower().str.strip() == 'mithral encode i8'])
# print(df)
# # # print(df['B'])
# # # print(df['C'])
# import sys; sys.exit()
name_map = collections.OrderedDict()
# name_map['mithral encode i8'] = r'$\bf{Mithral}$ $\bf{i8}$')
# name_map['mithral encode i8'] = r'$\bf{Mithral}$ $\bf{i8}$')
# name_map['mithral encode i8'] = 'Mithral i8'
# name_map['mithral encode i16'] = 'Mithral i16' # no i16 in plot
# name_map['mithral encode f32'] = 'Mithral f32'
# name_map['mithral encode i8'] = 'MADDNESS i8'
# name_map['mithral encode f32'] = 'MADDNESS f32'
name_map['mithral encode f32'] = 'MADDNESS'
name_map['bolt encode'] = 'Bolt'
name_map['pq encode'] = 'PQ'
name_map['opq encode'] = 'OPQ'
df = res.rename_values_in_col(df, 'algo', name_map)
df = res.melt_times(df, ntimes=5)
order = 'MADDNESS Bolt PQ OPQ'.split()
# df['thruput'] = df['N'] * df['D'] / df['time']
# df['thruput'] = df['N'] / (df['time'] * .001) # rows/sec
time_secs = (df['time'] * .001)
df['elemsz'] = 4
df['elemsz'].loc[df['algo'].str.endswith('i8')] = 1
df['elemsz'].loc[df['algo'].str.endswith('i16')] = 2
df['thruput'] = df['N'] * df['D'] * df['elemsz'] / time_secs # GB/sec
df['thruput'] /= 1e9 # convert to GB
# df['thruput'] /= 1e6 # just use units of billions; times are in ms
# full_byte_per_codebook = df['algo'].isin(['PQ', 'OPQ'])
# df['B'] = df['C'].values / 2
# # cvals = df['C'].loc[full_byte_per_codebook]
# df['B'].loc[full_byte_per_codebook] = df['C'].loc[full_byte_per_codebook]
# df['B'] = df['B'].astype(np.int)
# # print("df.cols: ", df.columns)
# print(df)
# # # print(df['B'])
# # # print(df['C'])
# import sys; sys.exit()
# ================================ fig creation
sb.set_context('talk')
# sb.set_style('darkgrid')
# sb.set_style('white')
set_seaborn_style('white')
# use_nbytes = [8, 16, 32, 64]
use_nbytes = [8, 16, 32]
# fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 8), sharey=True)
# fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 6.5), sharey=True)
# fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 7), sharey=True)
fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 6.5), sharey=True)
for i, nbytes in enumerate(use_nbytes):
data = df.loc[df['B'] == nbytes]
# print("df.cols: ", df.columns)
# print(data)
# # # print(df['B'])
# # # print(df['C'])
# import sys; sys.exit()
order = name_map.values()
dashes = {name: ([] if name.lower().startswith('maddness') else
mpl.rcParams['lines.dashed_pattern'])
for name in order}
# dashes = None
# sb.lineplot(data=data, x='D', y='thruput', hue='algo',
# sb.lineplot(data=data, x='D', y='thruput', hue='algo', units='timing_trial',
sb.lineplot(data=data, x='D', y='thruput', hue='algo',
# ax=axes[i], ci='sd', estimator=None, hue_order=order,
ax=axes[i], ci='sd', estimator='mean', hue_order=order,
# ax=axes[i], ci=None, estimator='mean', hue_order=order,
style='algo', style_order=order, dashes=dashes,
palette=my_colors_list)
# import sys; sys.exit()
# ------------------------ axis cleanup
axes[0].set_title('Speed of g() Functions\nfor Different Encoding Sizes',
y=1.04, family=USE_FONT, fontsize=16)
handles, labels = axes[0].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm df column name
# plt.figlegend(handles, labels, loc='lower center', ncol=3, fontsize=13)
plt.figlegend(handles, labels, loc='lower center', ncol=4, fontsize=13)
for ax in axes:
# ax.semilogx()
ax.semilogy()
ax.set_ylim([.02, 1000])
# ax.set_yticks([.1, 1, 10, 100, 1000])
ax.set_yticks([.1, 10, 1000])
ax.get_legend().remove()
# ax.set_ylabel('Billions of\nScalars Encoded/s',
# ax.set_ylabel('Scalars Encoded/s\n(Billions)',
# ax.set_ylabel('Scalars Encoded\nper Second (Billions)',
# ax.set_ylabel('Scalars Encoded\nper Second',
# ax.set_ylabel('Scalars Encoded/s',
# ax.set_ylabel('Rows Encoded/s',
ax.set_ylabel('Encoding\nSpeed (GB/s)',
family=USE_FONT, fontsize=14)
for ax in axes[:-1]:
# remove x labels except for bottom axis
ax.tick_params(axis='x', which='x', length=0)
plt.setp(ax.get_xticklabels(), visible=False)
ax.set_xlabel("", visible=False)
# ax.get_xaxis().set_visible(False)
# ax.get_xticklabels().set_visible(False)
axes[-1].set_xlabel('Number of Columns in Matrix A',
family=USE_FONT, fontsize=14)
# add byte counts on the right
add_ylabels_on_right(axes, "{}B Encodings", use_nbytes)
plt.tight_layout()
# plt.subplots_adjust(bottom=.18, hspace=.15)
# plt.subplots_adjust(bottom=.19, hspace=.15)
plt.subplots_adjust(bottom=.17, hspace=.15)
# plt.subplots_adjust(bottom=.21, hspace=.15)
if save:
save_fig('encode_speed')
else:
plt.show()
def lut_speed_fig(save=True):
# ================================ data cleaning
df = res.lut_timings()
name_map = collections.OrderedDict()
# name_map['mithral lut dense'] = '$\bf{Mithral}$'
# name_map['mithral lut sparse'] = '$\bf{Mithral}$'
name_map['mithral lut dense'] = 'MADDNESS'
name_map['mithral lut sparse'] = 'MADDNESS'
name_map['bolt lut'] = 'Bolt'
name_map['pq lut'] = 'PQ'
name_map['opq lut'] = 'OPQ'
df = res.rename_values_in_col(df, 'algo', name_map)
# print(df[:20])
# df['lutconst'] = df['lutconst'].str.strip().astype(np.float).astype(np.int)
# print("df.dtypes", df.dtypes)
# import sys; sys.exit()
names = list(df['algo'])
consts = np.array(df['lutconst'])
# print("len(names)", len(names))
# print("len(consts)", len(consts))
mithral_const_to_name = collections.OrderedDict()
mithral_const_to_name[-1] = 'MADDNESS, L = ∞'
mithral_const_to_name[4] = 'MADDNESS, L = 4'
mithral_const_to_name[2] = 'MADDNESS, L = 2'
mithral_names = list(mithral_const_to_name.values())
# add lut constant into the name for mithral variations
new_names = []
ismithral = []
for i, name in enumerate(names):
if not name.startswith('Mithral'):
new_names.append(name)
ismithral.append(False)
continue
# const = consts[i]
# const = "{:d}".format(int(const)) if const > 0 else "∞"
# new_names.append(f"{name}, L = {const}")
new_names.append(mithral_const_to_name[int(consts[i])])
ismithral.append(True)
# print("len(new_names)", len(new_names))
df['algo'] = new_names
df['ismithral'] = ismithral
df = res.melt_times(df, ntimes=5)
# df = res.melt_times(df, ntimes=3) # TODO rerun with ntrials=5
# print(df)
df['thruput'] = df['N'] * df['D'] / df['time']
# df['thruput'] /= 1e6 # just use units of billions; times are in ms
# # TODO rm once we have updated results
# mask = df['algo'].isin(('PQ', 'OPQ'))
# df['B'] = -1 # create placeholder col
# df['B'].loc[mask] = df['C'].loc[mask]
# df['B'].loc[~mask] = df['C'].loc[~mask] / 2
# ================================ fig creation
sb.set_context('talk')
# sb.set_style('darkgrid')
# sb.set_style('white')
set_seaborn_style('white')
# use_nbytes = [8, 16, 32, 64]
use_nbytes = [8, 16, 32]
fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 8), sharey=True)
order = [mithral_names[2], 'Bolt',
mithral_names[1], 'PQ',
mithral_names[0], 'OPQ']
dashes = {k: ('-' if k in mithral_names else '--') for k in order}
# dashes = {k: ('solid' if k in mithral_names else 'dashed') for k in order}
# dashes = {k: (None if k in mithral_names else [3, 3]) for k in order}
# dashes = True
# print(dashes)
# import sys; sys.exit()
for i, nbytes in enumerate(use_nbytes):
data = df.loc[df['B'] == nbytes]
ax = axes[i]
# print(f"------------------------ {nbytes}B")
# manual version
# for algo in order:
# subdf = data.loc[df['algo'] == algo]
# print("plotting algo: ", algo)
# x = subdf['D'].as_matrix()
# y = subdf['thruput'].as_matrix()
# sort_idxs = np.argsort(x)
# x, y = x[sort_idxs], y[sort_idxs]
# ax.plot(x, y, dashes[algo], label=algo)
dashes = {name: ([] if name.lower().startswith('mithral') else
mpl.rcParams['lines.dashed_pattern'])
for name in order}
sb.lineplot(data=data, x='D', y='thruput', hue='algo',
units='timing_trial', ax=axes[i], ci='sd',
estimator=None, hue_order=order,
style='algo', style_order=order, dashes=dashes)
# sb.lineplot(data=data, x='D', y='thruput', hue='algo', units='timing_trial',
# hue_order=order,
# # hue_order=order, style='algo', style_order=order,
# # dashes=True,
# style='ismithral', style_order=[True, False], dashes=True,
# ax=axes[i], ci='sd', estimator=None)
# ------------------------ axis cleanup
axes[0].set_title('Speed of h() Functions\nfor Different Encoding Sizes',
y=1.04, family=USE_FONT, fontsize=18)
# for ax in axes:
# print("ax handles, labels: ")
# print(ax.get_legend_handles_labels())
handles, labels = axes[-1].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm df column name
# handles, labels = handles[:-3], labels[:-3] # rm ismithral
plt.figlegend(handles, labels, loc='lower center', ncol=3, fontsize=13)
for ax in axes:
# ax.semilogx()
ax.semilogy()
ax.get_legend().remove()
ax.set_ylabel('Scalars Encoded/s',
family=USE_FONT, fontsize=14)
for ax in axes[:-1]:
# remove x labels except for bottom axis
ax.tick_params(axis='x', which='x', length=0)
plt.setp(ax.get_xticklabels(), visible=False)
ax.set_xlabel("", visible=False)
axes[-1].set_xlabel('Number of Rows in Matrix B',
family=USE_FONT, fontsize=14)
# add byte counts on the right
add_ylabels_on_right(axes, "{}B Encodings", use_nbytes)
plt.tight_layout()
plt.subplots_adjust(bottom=.18, hspace=.15)
if save:
save_fig('lut_speed')
else:
plt.show()
def lotsa_colors_cmap(value):
assert 0 <= value <= 1 # if this throws, I don't understand cmaps
if value < .3333:
return plt.get_cmap('tab20')(3 * value)
elif value < .6666:
return plt.get_cmap('tab20b')((3 * value) - 1)
else:
return plt.get_cmap('tab20c')((3 * value) - 2)
# def my_tab10(value):
# assert 0 <= value <= 1
# value = int(value * 10)
# perm = [3, 1, 2, 4, 5, 6, 7, 8, 9] # make red first, then orange
# value = perm[value]
# return plt.get_cmap('tab10')((value / 10.) + .01)
# def my_cmap(value):
my_colors_list = (plt.get_cmap('Set1').colors
+ plt.get_cmap('Set3').colors[:1] # skip light yellow
+ plt.get_cmap('Set3').colors[2:]
+ plt.get_cmap('Dark2').colors[:6])
# my_colors_list = my_colors_list[:5] + () my_colors_list[6:] # rm bright yellow
# new_yellow = (240./255, 230./255, 140./255)
new_yellow = (204. / 255, 204. / 255, 0. / 255)
# print(type(my_colors_list))
# print(my_colors_list)
my_colors_list = my_colors_list[:5] + (new_yellow,) + my_colors_list[6:]
# print(type(my_colors_list))
# print(my_colors_list)
# import sys; sys.exit()
# DEFAULT_PLOT_METHODS = ('Mithral', 'MithralPQ', 'Brute Force', 'Bolt',
# DEFAULT_PLOT_METHODS = ('MADDNESS', 'MADDNESS-PQ', 'Exact', 'Bolt',
# 'FastJL', 'HashJL', 'OSNAP', 'PCA', 'SparsePCA',
# 'Rademacher', 'RandGauss', 'OrthoGauss')
DEFAULT_PLOT_METHODS = (
'MADDNESS', 'MADDNESS-PQ', 'Exact', 'ScalarQuantize', 'Bolt',
# 'MADDNESS', 'Exact', 'ScalarQuantize', 'Bolt',
# 'FastJL', 'HashJL', 'PCA', 'RandGauss', 'SparsePCA')
'FastJL', 'HashJL', 'PCA', 'SparsePCA')
# 'FastJL', 'HashJL', 'PCA', 'SparsePCA')
# 'MADDNESS', 'MADDNESS-PQ', 'Exact', 'Bolt',
# 'FastJL', 'HashJL', 'PCA', 'RandGauss', 'SparsePCA')
def lineplot(data, ax, x_metric, y_metric, units=None, scatter=False,
# plot_methods=None):
plot_methods=DEFAULT_PLOT_METHODS, first_two_same_marker=True,
**kwargs):
estimator = 'mean' if units is None else None
if plot_methods is not None:
data = data.loc[data['method'].isin(set(plot_methods))]
order = plot_methods
else:
# order = 'Ours Bolt Exact PQ SVD FD-AMM CD'.split()
# order = [m for m in order if m in data['Method'].unique()]
order = list(data['method'].unique())
# move_methods_to_front = ['Ours', 'OursPQ', 'Brute Force']
# move_methods_to_front = ['Mithral', 'MithralPQ', 'Brute Force']
mithral_methods = [method for method in order
# if method.lower().startswith('mithral')][::-1]
if method.lower().startswith('maddness')][::-1]
move_methods_to_front = mithral_methods[:]
# move_methods_to_front.append('Brute Force')
move_methods_to_front.append('Exact')
for elem in move_methods_to_front[:]:
if elem in order:
order.remove(elem)
else:
move_methods_to_front.remove(elem)
order = move_methods_to_front + sorted(order)
order = [method for method in order if method in data['method'].unique()]
# order = plot_methods
# order = list(data['method'].unique())
# have to specify markers or seaborn freaks out because it doesn't
# have enough of them
# filled_markers = ('o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h',
# 'H', 'D', 'd', 'P', 'X')
# use_markers = ('*', '*', 's') + (
initial_markers = ('D', 'D', 's') if first_two_same_marker else ('D', 's')
use_markers = initial_markers + (
'o', 'v', '^', '<', '>', '8', 'p', 'h', 'd', 'P', 'X', '*', 'D')
if scatter:
# sb.violinplot(cut=0, saturation=1, linewidth=.001, scale='width', inner='box',
# data['Speedup'] *= 1 + (np.random.randn(len(data['Speedup'])) / 100)
sb.scatterplot(alpha=.25, # seems to suck the least
data=data, x=x_metric, y=y_metric, hue='method',
style='method', style_order=order, hue_order=order,
markers=use_markers, estimator=estimator,
# units=units, estimator=estimator, markers=use_markers,
palette=my_colors_list, ax=ax)
# sb.boxplot(linewidth=.1, width=2, whis=999,
# sb.stripplot(alpha=.25, orient='v', jitter=False,
# data=data, x=x_metric, y=y_metric, hue='method', hue_order=order,
# palette=my_colors_list, ax=ax)
return
kwargs.setdefault('ci', 'sd')
sb.lineplot(data=data, x=x_metric, y=y_metric, hue='method',
# style='method', style_order=order[::-1], hue_order=order[::-1],
style='method', style_order=order, hue_order=order,
markers=use_markers, estimator=estimator,
# units=units, estimator=estimator, markers=use_markers,
dashes=False, palette=my_colors_list, ax=ax, **kwargs)
lines = ax.get_lines()
for i, line in enumerate(lines):
line.set_zorder(10 - i)
# def cifar_fig(save=False, x_metric='Throughput', y_metric='Accuracy'):
def cifar_fig(save=False, x_metric='Speedup', y_metric='Accuracy'):
df10 = res.cifar10_amm()
df100 = res.cifar100_amm()
sb.set_context('poster')
# fig, axes = plt.subplots(2, 1, figsize=(11, 13.5), sharex=True)
# fig, axes = plt.subplots(2, 1, figsize=(11, 10), sharex=True)
fig, axes = plt.subplots(2, 1, figsize=(11, 8.5), sharex=True)
# plot_methods = ['Mithral', 'MithralPQ', 'Brute Force', 'Bolt',
# plot_methods = ['MADDNESS', 'MADDNESS-PQ', 'Exact', 'Bolt',
# 'FastJL', 'HashJL', 'OSNAP', 'PCA', 'SparsePCA',
# 'Rademacher', 'RandGauss', 'OrthoGauss']
# # df10 = df10.loc[df10['method'].isin(set(plot_methods))]
# df100 = df100.loc[df100['method'].isin(set(plot_methods))]
# df10 = df10.loc[df10['method'] != 'OrthoGauss']
# df100 = df100.loc[df100['method'] != 'OrthoGauss']
lineplot(df10, axes[0], x_metric=x_metric, y_metric=y_metric)
lineplot(df100, axes[1], x_metric=x_metric, y_metric=y_metric)
# plt.suptitle('Sketch size vs Classification Accuracy')
xlbl = _xlabel_for_xmetric(x_metric)
# plt.suptitle('{} vs {}'.format(xlbl, y_metric))
plt.suptitle('Approximating Softmax Classifiers', family=USE_FONT)
axes[0].set_title('CIFAR-10', family=USE_FONT)
for ax in axes:
ax.set_ylabel(_ylabel_for_xmetric(y_metric), family=USE_FONT)
axes[0].set_xlabel(None)
axes[1].set_xlabel(xlbl, family=USE_FONT)
axes[1].set_title('CIFAR-100', family=USE_FONT)
handles, labels = axes[0].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
# axes[0].legend(handles, labels, fontsize='small')
# axes[1].legend(handles, labels, fontsize='small')
# plt.figlegend(handles, labels, loc='lower center', ncol=1)
# plt.figlegend(handles, labels, loc='center right', ncol=1)
for ax in axes.ravel():
ax.get_legend().remove()
if y_metric == 'Accuracy':
axes[0].set_ylim([.09, .96])
axes[1].set_ylim([.009, .73])
elif y_metric == '1 - NMSE':
axes[0].set_ylim([0, 1.02])
axes[1].set_ylim([0, 1.02])
# axes[1].get_legend().remove()
# axes[1].get_legend().remove()
plt.figlegend(handles, labels, loc='lower center', ncol=3)
# if x_metric in ('muls', 'ops', 'nlookups', 'Latency', 'Throughput'):
axes[0].semilogx()
for ax in axes:
if x_metric == 'Speedup':
ax.set_xlim([.94, ax.get_xlim()[1]])
elif x_metric == 'NormalizedTime':
ax.set_xlim([ax.get_xlim()[0], 1.06])
plt.tight_layout()
# plt.subplots_adjust(top=.91, bottom=.24)
plt.subplots_adjust(top=.89, bottom=.32)
# plt.subplots_adjust(top=.95, bottom=.1)
save_fig('cifar_{}_{}'.format(x_metric, y_metric))
# save_fig('cifar_{}_{}_no_maddnesspq'.format(x_metric, y_metric))
def fig1(save=False, x_metric='Speedup', y_metric='Accuracy'):
df10 = res.cifar10_amm()
df100 = res.cifar100_amm()
sb.set_context('poster')
fig, axes = plt.subplots(2, 1, figsize=(11, 10), sharex=True)
# df10['method'] = df10['method'].str.replace('Mithral', 'HashMul')
# replace_names_dict = {'Mithral': 'Ours',
replace_names_dict = {'MADDNESS': 'Ours',
# 'SparsePCA': '2nd best (Mairal et al.)',
# 'HashJL': '3rd best (Dasgupta et al.)',
'SparsePCA': 'Mairal et al.',
'HashJL': 'Dasgupta et al.',
'Exact': 'Exact Matrix Multiply'
}
# print("--- about to run the rename we care about")
df10 = res.rename_values_in_col(df10, 'method', replace_names_dict)
df100 = res.rename_values_in_col(df100, 'method', replace_names_dict)
# df10['method'] = df10['method'].str.replace(replace_names_dict)
# df100['method'] = df100['method'].str.replace(replace_names_dict)
# print('df10 methods: ', df10['method'].unique())
# import sys; sys.exit()
# plot_methods = ['Ours', '2nd best', '3rd best', 'Exact Matrix Multiply']
# plot_methods = ['Ours', 'Mairal et al.', 'Dasgupta et al.', 'Exact Matrix Multiply']
plot_methods = ['Ours', 'Exact Matrix Multiply', 'Mairal et al.', 'Dasgupta et al.']
# plot_methods = ['Ours', '3rd best', '2nd best', 'Exact Matrix Multiply']
# plot_methods = ['Mithral', 'SparsePCA', 'HashJL', 'Brute Force']
# df10 = df10.loc[df10['method'].isin(set(plot_methods))]
# df100 = df100.loc[df100['method'].isin(set(plot_methods))]
# df10 = df10.loc[df10['method'] != 'OrthoGauss']
# df100 = df100.loc[df100['method'] != 'OrthoGauss']
lineplot(df10, axes[0], x_metric=x_metric, y_metric=y_metric,
plot_methods=plot_methods, ci=None, first_two_same_marker=False)
lineplot(df100, axes[1], x_metric=x_metric, y_metric=y_metric,
plot_methods=plot_methods, ci=None, first_two_same_marker=False)
# plt.suptitle('Sketch size vs Classification Accuracy')
xlbl = _xlabel_for_xmetric(x_metric)
# plt.suptitle('{} vs {}'.format(xlbl, y_metric))
plt.suptitle('Approximating Softmax Classifiers', family=USE_FONT)
axes[0].set_title('CIFAR-10', family=USE_FONT)
for ax in axes:
ax.set_ylabel(_ylabel_for_xmetric(y_metric), family=USE_FONT)
axes[0].set_xlabel(None)
axes[1].set_xlabel(xlbl, family=USE_FONT)
axes[1].set_title('CIFAR-100', family=USE_FONT)
handles, labels = axes[0].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
# axes[0].legend(handles, labels, fontsize='small')
# axes[1].legend(handles, labels, fontsize='small')
# plt.figlegend(handles, labels, loc='lower center', ncol=1)
# plt.figlegend(handles, labels, loc='center right', ncol=1)
for ax in axes.ravel():
ax.get_legend().remove()
if y_metric == 'Accuracy':
axes[0].set_ylim([.09, .96])
axes[1].set_ylim([.009, .73])
elif y_metric == '1 - NMSE':
axes[0].set_ylim([0, 1.02])
axes[1].set_ylim([0, 1.02])
# axes[1].get_legend().remove()
# axes[1].get_legend().remove()
plt.figlegend(handles, labels, loc='lower center', ncol=2)
# if x_metric in ('muls', 'ops', 'nlookups', 'Latency', 'Throughput'):
axes[0].semilogx()
for ax in axes:
if x_metric == 'Speedup':
ax.set_xlim([.94, ax.get_xlim()[1]])
elif x_metric == 'NormalizedTime':
ax.set_xlim([ax.get_xlim()[0], 1.06])
plt.tight_layout()
plt.subplots_adjust(top=.89, bottom=.23)
save_fig('fig1')
def caltech_fig(x_metric='Speedup', y_metric='1 - NMSE'):
# df = res.caltech_amm()
# df = res.caltech_amm()
df0 = res.caltech_amm(filt='sobel')
df1 = res.caltech_amm(filt='dog5x5')
# print("df cols: ", df.columns)
sb.set_context('poster')
# fig, ax = plt.subplots(1, 1, figsize=(11, 6))
fig, axes = plt.subplots(2, 1, figsize=(12, 8))
# axes = [ax]
# is_mithral = df['method'].str.startswith('Mithral')
# is_exact = df['method'] == 'Brute Force'
# others_to_keep = df['method'].isin(['Brute Force', 'PCA', 'SparsePCA'])
# others_to_keep = df['method'].isin(['PCA', 'SparsePCA'])
# df = df.loc[is_mithral | others_to_keep] # others suck too hard
# df = df.loc[~(df['method'].isin(['Mithral, L = 2', 'Mithral, L = 4']))]
# df['method'].loc[df['method'] == 'Mithral, L = ∞'] = 'Mithral'
# print("df0 uniq methods: ", df0['method'].unique())
# print("df1 uniq methods: ", df1['method'].unique())
# import sys; sys.exit()
# keep_methods = ['Mithral', 'MithralPQ', 'SparsePCA', 'PCA', 'OSNAP']
# keep_methods = ['Mithral', 'MithralPQ', 'SparsePCA', 'PCA', 'HashJL', 'OSNAP', 'FastJL']
# keep_methods = ['Mithral', 'MithralPQ', 'SparsePCA', 'PCA']
# keep_methods = ['MADDNESS', 'MADDNESS-PQ', 'SparsePCA', 'PCA']
# even scalar quantize is slower than custom exact matmul; note that
# in the 5x5 plot, it's occluded by maddness (near perfect mse, but
# slightly to the left of 1x speedup)
# keep_methods = ['MADDNESS', 'MADDNESS-PQ', 'ScalarQuantize', 'SparsePCA']
keep_methods = ['MADDNESS', 'MADDNESS-PQ', 'SparsePCA']
df0 = df0.loc[df0['method'].isin(keep_methods)]
df1 = df1.loc[df1['method'].isin(keep_methods)]
# print("df0 kept methods: ", df0['method'].unique())
# print("df1 kept methods: ", df1['method'].unique())
# print("df1 scalar quantize numbers: ", df1.loc[df1['method'] == 'ScalarQuantize'])
# import sys; sys.exit()
# print("df1:\n", df1.loc[(df1['method'] == 'MithralPQ') & df1['task_id'].str.contains('509')])
# import sys; sys.exit()
# lineplot(df, ax, x_metric=x_metric, y_metric=y_metric, units=None)
lineplot(df0, axes[0], x_metric=x_metric, y_metric=y_metric,
plot_methods=keep_methods)
lineplot(df1, axes[1], x_metric=x_metric, y_metric=y_metric,
plot_methods=keep_methods)
handles, labels = axes[-1].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
# plt.figlegend(handles, labels, loc='lower center', ncol=2)
# plt.figlegend(handles, labels, loc='lower center', ncol=4)
plt.figlegend(handles, labels, loc='lower center', ncol=len(keep_methods))
# plt.suptitle('Approximating an Image Filter')
for ax in axes:
ax.set_xlabel(_xlabel_for_xmetric(x_metric), fontsize=20)
ax.set_ylabel(y_metric)
ax.get_legend().remove()
ax.set_ylim([-.01, 1.01])
ax.plot([1, 1], ax.get_ylim(), 'k--')
# for ax in axes[:-1]:
# # remove x labels except for bottom axis
# plt.setp(ax.get_xticklabels(), visible=False)
# ax.get_xaxis().set_visible(False)
axes[0].set_title('Approximating a Sobel Filter', y=1.02, fontsize=28)
axes[1].set_title('Approximating a Gaussian Filter', y=1.02, fontsize=28)
# plt.subplots_adjust(top=.91, bottom=.37)
plt.tight_layout()
# plt.subplots_adjust(bottom=.26, hspace=.72) # with ncol=2
plt.subplots_adjust(bottom=.22, hspace=.7) # with ncol=2
# plt.subplots_adjust(top=.95, bottom=.1)
save_fig('caltech_{}_{}'.format(x_metric, '1 - NMSE'))
# save_fig('caltech_sobel_{}_{}'.format(x_metric, '1 - NMSE'))
# save_fig('caltech_dog_{}_{}'.format(x_metric, '1 - NMSE'))
# def ucr_fig(x_metric='Speedup', y_metric='Accuracy'):
# def ucr_fig(x_metric='Speedup', y_metric='Change in Accuracy'):
def ucr_fig(x_metric='Speedup', y_metric='Relative Accuracy'):
# df = res.ucr_amm()
# df = res.ucr_amm(k=64)
# df = res.ucr_amm(k=128)
# df = res.ucr_amm(k=256)
df0 = res.ucr_amm(k=64)
df1 = res.ucr_amm(k=128)
df2 = res.ucr_amm(k=256)
sb.set_context('poster')
# fig, ax = plt.subplots(1, 1, figsize=(11, 8))
fig, axes = plt.subplots(3, 1, figsize=(12, 13), sharex=True)
# axes = [ax]
# df = df.loc[df['task_id'].str.lower().str.contains('starlight')]
# df = df.loc[df['method'] == 'Mithral']
# # df = df.loc[df['method'] == 'MithralPQ']
# # df = df.loc[df['ncodebooks'] == 4]
# df = df['Accuracy acc_orig acc_orig_1nn ncodebooks method task_id'.split() + ['Relative Accuracy']]
# df.reset_index(inplace=True, drop=True)
# print(df)
# import sys; sys.exit()
# df['Change in Accuracy'] = df['Accuracy'] - df['acc-1nn-raw']
# print("uniq N, D, M: ")
# print(df['N'].unique())
# print(df['D'].unique())
# print(df['M'].unique())
# df_brute = df.loc[df['method'] == 'Brute Force']
# print("uniq times from brute force: ", df_brute['time'].unique())
# print("df Brute:\n", df_brute['N D M method normalized_mse Accuracy time'.split()])
# import sys; sys.exit()
# df['acc']
# # TODO put in results cleaning after debug
# if 'Accuracy' in df.columns:
# # df['Relative Accuracy'] = df['Accuracy'] / (df['acc_orig'] + 1e-20)
# # # note that relative accuracy can actually be higher if errors
# # # happen to compensate for incorrect classification sometimes
# # print("max relative acc: ", df['Relative Accuracy'].values.max())
# # # assert df['Relative Accuracy'].values.max() <= 1.000001
# # acc_orig field is supposed to capture this, but I messed it up for
# # 1nn so this will also work
# tid2acc = {}
# exactdf = df.loc[df['method'] == 'Brute Force']
# for tid in df['task_id'].unique():
# subdf = exactdf.loc[exactdf['task_id'] == tid]
# if subdf.shape[0] != 1:
# print(f"tid = {tid} gives bad subdf:\n", subdf)
# tid2acc[tid] = subdf['Accuracy'].values[0]
# df['BaseAccuracy'] = [tid2acc[tid] for tid in df['task_id']]
# df['Relative Accuracy'] = df['Accuracy'] / df['BaseAccuracy']
# df = df.loc[~(df['method'].isin(['Mithral, L = 2', 'Mithral, L = 4']))]
# # df['method'].loc[df['method'] == 'Mithral, L = ∞'] = 'Mithral'
# df0 = df0.loc[df0['method'] != 'Brute Force']
# df1 = df1.loc[df1['method'] != 'Brute Force']
# df2 = df2.loc[df2['method'] != 'Brute Force']
# print(df.columns)
# import sys; sys.exit()
def clean_df(df):
df['Change in Accuracy'] = df['Accuracy'] - df['acc-1nn-raw']
# is_mithral = df['method'].str.startswith('Mithral')
# is_mithral = df['method'] == 'Mithral'
is_mithral = df['method'] == 'MADDNESS'
# # is_exact = df['method'] == 'Brute Force'
others_to_keep = df['method'].isin([
'PCA', 'SparsePCA', 'Bolt', 'HashJL', 'OSNAP'])
# others_to_keep = df['method'].isin(['PCA', 'SparsePCA'])
return df.loc[is_mithral | others_to_keep]
df0 = clean_df(df0)
df1 = clean_df(df1)
df2 = clean_df(df2)
# df = df.loc[df['method'] == 'Brute Force']
# df['not_mse'] = 1. - df['normalized_mse']
# df = df.loc[df['not_mse'] < 2]
lineplot(df0, axes[0], x_metric=x_metric, y_metric=y_metric, scatter=True)
lineplot(df1, axes[1], x_metric=x_metric, y_metric=y_metric, scatter=True)
lineplot(df2, axes[2], x_metric=x_metric, y_metric=y_metric, scatter=True)
plt.suptitle('Approximating an RBF Kernel Classifier')
axes[-1].set_xlabel(_xlabel_for_xmetric(x_metric))
# ax.set_ylabel('1. - NMSE')
handles, labels = axes[-1].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
plt.figlegend(handles, labels, loc='lower center', ncol=3)
for ax in axes:
ax.set_ylabel(_ylabel_for_xmetric(y_metric))
ax.get_legend().remove()
ax.semilogx()
ax.set_xlim([.9, ax.get_xlim()[1]])
# ax.set_ylim([.2, 1.1])
# plt.plot([1, 1], ax.get_ylim(), 'k--')
plt.tight_layout()
plt.subplots_adjust(top=.94, bottom=.25)
# plt.subplots_adjust(top=.95, bottom=.1)
save_fig('ucr_{}_{}'.format(x_metric, y_metric))
def ucr_fig2(x_metric='Speedup', y_metric='Relative Accuracy',
# problem='softmax'):
problem='rbf'):
# df0 = res.ucr_amm(k=64)
# df1 = res.ucr_amm(k=128)
# df2 = res.ucr_amm(k=256)
df = res.ucr_amm(k=128, problem=problem)
sb.set_context('poster')
# fig, axes = plt.subplots(3, 1, figsize=(12, 13), sharex=True)
fig, axes = plt.subplots(3, 1, figsize=(12, 12), sharex=True)
# df = res.ucr_amm(k=128, problem='rbf')
# df_bolt = df.loc[df['method'] == 'Bolt']
# print("number of uniq bolt speedups:")
# print(df_bolt['Speedup'].unique().size)
# import sys; sys.exit()
def clean_df(df):
df['Change in Accuracy'] = df['Accuracy'] - df['acc-1nn-raw']
return df
# # is_mithral = df['method'].str.startswith('Mithral')
# is_mithral = df['method'] == 'Mithral'
# # # is_exact = df['method'] == 'Brute Force'
# others_to_keep = df['method'].isin([
# 'PCA', 'SparsePCA', 'Bolt', 'HashJL', 'OSNAP'])
# # others_to_keep = df['method'].isin(['PCA', 'SparsePCA'])
# return df.loc[is_mithral | others_to_keep]
def frac_above_thresh(df, thresh):
return res.frac_above_thresh(
df, x_metric, y_metric, 'method', 'task_id', thresh)
df = clean_df(df)
# df0['frac_above_thresh'] = frac_above_thresh(df, .5)
# df_bolt = df.loc[df['method'] == 'Bolt']
# print("number of uniq bolt speedups:")
# print(df_bolt['Speedup'].unique().size)
# import sys; sys.exit()
# df = df.loc[df['method'] == 'SparsePCA']
# print(df.groupby('task_id')['Speedup'].count())
# import sys; sys.exit()
y_frac_thresholds = [.5, .75, .95]
df0 = frac_above_thresh(df, y_frac_thresholds[0])
df1 = frac_above_thresh(df, y_frac_thresholds[1])
df2 = frac_above_thresh(df, y_frac_thresholds[2])
# # print(df0['frac_above_thresh'])
# print(df0)
# # for row in df0.iterrows():
# # print(row)
# # print(df0.unstack(0))
# print("df cols: ", df.columns)
# print("df0 cols: ", df0.columns)
# print("uniq methods: ", df['method'].unique())
# df = df.loc[df['method'] == 'Brute Force']
# df['not_mse'] = 1. - df['normalized_mse']
# df = df.loc[df['not_mse'] < 2]
ycol = 'frac_above_thresh'
lineplot(df0, axes[0], x_metric=x_metric, y_metric=ycol, scatter=False)
lineplot(df1, axes[1], x_metric=x_metric, y_metric=ycol, scatter=False)
lineplot(df2, axes[2], x_metric=x_metric, y_metric=ycol, scatter=False)
kind = 'a Softmax' if problem == 'softmax' else 'an RBF Kernel'
plt.suptitle(f'Approximating {kind} Classifier')
axes[-1].set_xlabel(_xlabel_for_xmetric(x_metric))
# ax.set_ylabel('1. - NMSE')
handles, labels = axes[-1].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
plt.figlegend(handles, labels, loc='lower center', ncol=3)
for i, ax in enumerate(axes):
# ax.set_ylabel(_ylabel_for_xmetric(y_metric))
# ax.set_ylabel("Fraction of Datasets\nWith Relative Acc > "
# f"{y_frac_thresholds[i]}")
# ax.set_ylabel(f"Fraction with Relative\nAccuracy> {y_frac_thresholds[i]}")
ax.set_ylabel(f"Fraction > {y_frac_thresholds[i]}")
ax.get_legend().remove()
ax.semilogx()
ax.set_xlim([.9, ax.get_xlim()[1]])
ax.set_ylim([0, 1.03])
# ax.set_ylim([.2, 1.1])
# plt.plot([1, 1], ax.get_ylim(), 'k--')
plt.tight_layout()
# plt.subplots_adjust(top=.94, bottom=.25)
plt.subplots_adjust(top=.94, bottom=.22)
# plt.subplots_adjust(top=.95, bottom=.1)
save_fig('ucr2_{}_{}_{}'.format(x_metric, y_metric, problem))
def main():
scan_speed_fig()
encode_speed_fig()
lut_speed_fig()
fig1()
ucr_fig2()
caltech_fig()
# caltech_fig(y_metric='1 - NMSE')
# caltech_fig(x_metric='ops', y_metric='1 - NMSE')
cifar_fig()
# cifar_fig(y_metric='1 - NMSE')
# cifar_fig(x_metric='ops')
# cifar_fig(x_metric='ops', y_metric='1 - NMSE')
# ucr_fig2(x_metric='ops', y_metric='1 - NMSE')
# ucr_fig2(x_metric='ops')
# cifar_fig(y_metric='1 - NMSE')
# ucr_fig2()
# ucr_fig2(y_metric='1 - NMSE')
if __name__ == '__main__':
main()
|
#!/bin/env/python
METHOD_EXACT = 'Exact'
METHOD_SCALAR_QUANTIZE = 'ScalarQuantize'
METHOD_SKETCH_SQ_SAMPLE = 'SketchSqSample'
METHOD_SVD = 'SVD' # truncated SVD run on the matrix at test time
METHOD_FD_AMM = 'FD-AMM'
METHOD_COOCCUR = 'CooccurSketch'
METHOD_PCA = 'PCA' # PCA projection, with PCA basis learned at train time
METHOD_SPARSE_PCA = 'SparsePCA' # like above, using sklearn SparsePCA
METHOD_RANDGAUSS = 'RandGauss'
METHOD_ORTHOGAUSS = 'OrthoGauss'
METHOD_HADAMARD = 'Hadamard'
METHOD_RADEMACHER = 'Rademacher'
METHOD_FASTJL = 'FastJL'
METHOD_HASHJL = 'HashJL'
METHOD_OSNAP = 'OSNAP'
METHOD_OPQ = 'OPQ'
METHOD_BOLT = 'Bolt'
METHOD_BOLT_PERM = 'Bolt+Perm'
METHOD_BOLT_CORRPERM = 'Bolt+CorrPerm'
METHOD_BOLT_SPLITS = 'BoltSplits'
METHOD_BOLT_MULTISPLITS = 'Bolt+MultiSplits'
METHOD_BOLT_PERM_MULTISPLITS = 'Bolt+Perm+MultiSplits'
METHOD_PQ = 'PQ'
METHOD_PQ_PERM = 'PQ+Perm'
METHOD_PQ_MULTISPLITS = 'PQ+MultiSplits'
METHOD_PQ_PERM_MULTISPLITS = 'PQ+Perm+MultiSplits'
METHOD_MITHRALPQ = 'MithralPQ'
METHOD_OLD_MITHRALPQ = 'OldMithralPQ'
METHOD_MITHRAL = 'Mithral'
# these are for trying out different perm options
METHOD_BOLT_GEHT_COV_TOPK = 'Bolt_CovTopk'
METHOD_BOLT_GEHT_COV_SAMP = 'Bolt_CovSamp'
METHOD_BOLT_GEHT_COR_TOPK = 'Bolt_CorTopk'
METHOD_BOLT_GEHT_COR_SAMP = 'Bolt_CorSamp'
# DEFAULT_METHODS = (METHOD_EXACT, METHOD_SVD, METHOD_FD_AMM,
# METHOD_COOCCUR, METHOD_PCA, METHOD_PQ,
# METHOD_BOLT, METHOD_MITHRALPQ)
METHOD_TO_ESTIMATOR = {
METHOD_EXACT: amm.ExactMatMul,
METHOD_SCALAR_QUANTIZE: amm.QuantizedMatmul,
METHOD_SKETCH_SQ_SAMPLE: amm.SketchSqSample,
METHOD_SVD: amm.SvdSketch,
METHOD_FD_AMM: amm.FdAmm,
METHOD_COOCCUR: amm.CooccurSketch,
METHOD_PCA: amm.TrainedPcaSketch,
METHOD_SPARSE_PCA: amm.TrainedSparsePcaSketch,
METHOD_RANDGAUSS: amm.RandGaussSketch,
METHOD_ORTHOGAUSS: amm.RandOrthoGaussSketch,
METHOD_HADAMARD: amm.HadamardSketch,
METHOD_RADEMACHER: amm.RandRademacherSketch,
METHOD_FASTJL: amm.FastJlSketch,
METHOD_HASHJL: amm.HashJlSketch,
METHOD_OSNAP: amm.OsnapSketch,
METHOD_PQ: vq_amm.PQMatmul,
METHOD_BOLT: vq_amm.BoltMatmul,
METHOD_OPQ: vq_amm.OPQMatmul,
METHOD_BOLT_CORRPERM: vq_amm.GEHTBoltMatmul_CorrTopk,
METHOD_BOLT_GEHT_COV_TOPK: vq_amm.GEHTBoltMatmul_CovTopk,
METHOD_BOLT_GEHT_COV_SAMP: vq_amm.GEHTBoltMatmul_CovSamp,
METHOD_BOLT_GEHT_COR_TOPK: vq_amm.GEHTBoltMatmul_CorrTopk,
METHOD_BOLT_GEHT_COR_SAMP: vq_amm.GEHTBoltMatmul_CorrSamp,
METHOD_BOLT_PERM: vq_amm.GEHTBoltMatmul_CovTopk,
METHOD_BOLT_SPLITS: vq_amm.BoltSplits,
METHOD_BOLT_MULTISPLITS: vq_amm.BoltMultiSplits,
METHOD_BOLT_PERM_MULTISPLITS: vq_amm.BoltPermMultiSplits,
METHOD_PQ_PERM: vq_amm.PQPerm,
METHOD_PQ_MULTISPLITS: vq_amm.PQMultiSplits,
METHOD_PQ_PERM_MULTISPLITS: vq_amm.PQPermMultiSplits,
METHOD_OLD_MITHRALPQ: vq_amm.OldMithralPQ,
METHOD_MITHRALPQ: vq_amm.MithralPQ,
METHOD_MITHRAL: vq_amm.MithralMatmul
}
ALL_METHODS = sorted(list(METHOD_TO_ESTIMATOR.keys()))
ALL_METHODS.remove(METHOD_SKETCH_SQ_SAMPLE), # always terrible results
ALL_METHODS.remove(METHOD_OPQ) # takes forever to train, more muls than exact
# these were just for playing with different permuation options
ALL_METHODS.remove(METHOD_BOLT_GEHT_COV_TOPK)
ALL_METHODS.remove(METHOD_BOLT_GEHT_COV_SAMP)
ALL_METHODS.remove(METHOD_BOLT_GEHT_COR_TOPK)
ALL_METHODS.remove(METHOD_BOLT_GEHT_COR_SAMP)
RANDOM_SKETCHING_METHODS = (
METHOD_FASTJL, METHOD_HASHJL, METHOD_OSNAP, METHOD_RANDGAUSS,
METHOD_ORTHOGAUSS, METHOD_RADEMACHER)
DENSE_SKETCH_METHODS = (METHOD_PCA, METHOD_FASTJL, METHOD_RANDGAUSS,
METHOD_HADAMARD, METHOD_ORTHOGAUSS, METHOD_RADEMACHER)
FAST_SKETCH_METHODS = RANDOM_SKETCHING_METHODS + (
METHOD_HADAMARD, METHOD_PCA, METHOD_SPARSE_PCA)
# SLOW_SKETCH_METHODS = (METHOD_SVD, METHOD_FD_AMM, METHOD_COOCCUR)
SLOW_SKETCH_METHODS = (METHOD_FD_AMM, METHOD_COOCCUR, METHOD_SVD)
SKETCH_METHODS = FAST_SKETCH_METHODS + SLOW_SKETCH_METHODS
# VQ_METHODS = (METHOD_PQ, METHOD_BOLT, METHOD_OPQ)
# VQ_METHODS = (METHOD_PQ, METHOD_BOLT)
BOLT_METHODS = (METHOD_BOLT, METHOD_BOLT_PERM,
METHOD_BOLT_CORRPERM, METHOD_BOLT_SPLITS,
METHOD_BOLT_MULTISPLITS, METHOD_BOLT_PERM_MULTISPLITS)
PQ_METHODS = (METHOD_PQ, METHOD_PQ_PERM, METHOD_PQ_MULTISPLITS,
METHOD_PQ_PERM_MULTISPLITS)
MITHRAL_METHODS = (METHOD_MITHRALPQ, METHOD_MITHRAL, METHOD_OLD_MITHRALPQ)
VQ_METHODS = PQ_METHODS + BOLT_METHODS + MITHRAL_METHODS
NONDETERMINISTIC_METHODS = (METHOD_SKETCH_SQ_SAMPLE, METHOD_SVD) + VQ_METHODS
# USE_METHODS = (FAST_SKETCH_METHODS +
# (METHOD_EXACT, METHOD_BOLT, METHOD_MITHRALPQ, METHOD_MITHRAL))
USE_METHODS = ((METHOD_EXACT, METHOD_BOLT, METHOD_MITHRALPQ, METHOD_MITHRAL)
+ FAST_SKETCH_METHODS)
USE_CALTECH_METHODS = list(USE_METHODS)
USE_CALTECH_METHODS.remove(METHOD_BOLT) # Bolt *can't* be faster
|
#!/usr/bin/env python
microbench_output = \
"""
ncodebooks = 4
amm bolt N, D, M, ncodebooks: 10000, 512, 10, 4 (5x20): 7.574 (4.225e+07/s), 7.582 (4.221e+07/s), 7.584 (4.219e+07/s), 7.587 (4.218e+07/s), 7.579 (4.222e+07/s),
amm bolt N, D, M, ncodebooks: 10000, 512, 100, 4 (5x20): 7.747 (1.652e+08/s), 7.743 (1.653e+08/s), 7.757 (1.650e+08/s), 7.758 (1.650e+08/s), 7.743 (1.653e+08/s),
amm bolt N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 26.029 (2.749e+08/s), 26.028 (2.749e+08/s), 26.013 (2.751e+08/s), 26.010 (2.751e+08/s), 26.063 (2.745e+08/s),
amm bolt N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 1.931 (8.167e+08/s), 1.924 (8.197e+08/s), 1.925 (8.193e+08/s), 1.925 (8.193e+08/s), 1.929 (8.176e+08/s),
ncodebooks = 8
amm bolt N, D, M, ncodebooks: 10000, 512, 10, 8 (5x20): 6.912 (4.630e+07/s), 6.919 (4.625e+07/s), 6.912 (4.630e+07/s), 6.909 (4.632e+07/s), 6.911 (4.630e+07/s),
amm bolt N, D, M, ncodebooks: 10000, 512, 100, 8 (5x20): 7.169 (1.785e+08/s), 7.207 (1.776e+08/s), 7.200 (1.778e+08/s), 7.205 (1.777e+08/s), 7.205 (1.777e+08/s),
amm bolt N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 24.550 (2.914e+08/s), 24.514 (2.919e+08/s), 24.485 (2.922e+08/s), 24.470 (2.924e+08/s), 24.474 (2.923e+08/s),
amm bolt N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 2.445 (6.450e+08/s), 2.454 (6.427e+08/s), 2.436 (6.474e+08/s), 2.448 (6.442e+08/s), 2.446 (6.448e+08/s),
ncodebooks = 16
amm bolt N, D, M, ncodebooks: 10000, 512, 10, 16 (5x20): 6.350 (5.039e+07/s), 6.350 (5.039e+07/s), 6.347 (5.042e+07/s), 6.356 (5.035e+07/s), 6.438 (4.970e+07/s),
amm bolt N, D, M, ncodebooks: 10000, 512, 100, 16 (5x20): 7.340 (1.744e+08/s), 7.270 (1.761e+08/s), 7.302 (1.753e+08/s), 7.277 (1.759e+08/s), 7.299 (1.754e+08/s),
amm bolt N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 28.217 (2.536e+08/s), 28.063 (2.550e+08/s), 28.082 (2.548e+08/s), 28.086 (2.547e+08/s), 28.070 (2.549e+08/s),
amm bolt N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 3.525 (4.474e+08/s), 3.529 (4.469e+08/s), 3.525 (4.474e+08/s), 3.530 (4.468e+08/s), 3.527 (4.471e+08/s),
ncodebooks = 32
amm bolt N, D, M, ncodebooks: 10000, 512, 10, 32 (5x20): 6.036 (5.302e+07/s), 6.070 (5.272e+07/s), 6.085 (5.259e+07/s), 6.158 (5.196e+07/s), 6.176 (5.181e+07/s),
amm bolt N, D, M, ncodebooks: 10000, 512, 100, 32 (5x20): 7.473 (1.713e+08/s), 7.478 (1.712e+08/s), 7.571 (1.691e+08/s), 7.567 (1.692e+08/s), 7.571 (1.691e+08/s),
amm bolt N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 36.693 (1.950e+08/s), 36.721 (1.948e+08/s), 36.753 (1.947e+08/s), 36.805 (1.944e+08/s), 37.216 (1.923e+08/s),
ncodebooks = 64
amm bolt N, D, M, ncodebooks: 10000, 512, 10, 64 (5x20): 6.962 (4.596e+07/s), 6.959 (4.598e+07/s), 6.954 (4.602e+07/s), 6.959 (4.598e+07/s), 6.964 (4.595e+07/s),
amm bolt N, D, M, ncodebooks: 10000, 512, 100, 64 (5x20): 8.539 (1.499e+08/s), 8.598 (1.489e+08/s), 8.484 (1.509e+08/s), 8.572 (1.493e+08/s), 8.527 (1.501e+08/s),
amm bolt N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 64.087 (1.116e+08/s), 64.096 (1.116e+08/s), 64.638 (1.107e+08/s), 64.099 (1.116e+08/s), 64.079 (1.117e+08/s),
ncodebooks = 4
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 10, 4 (5x20): 0.021 (4.770e+09/s), 0.021 (4.770e+09/s), 0.021 (4.770e+09/s), 0.021 (4.770e+09/s), 0.021 (4.770e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 10, 4 (5x20): 0.016 (1.252e+09/s), 0.016 (1.252e+09/s), 0.016 (1.252e+09/s), 0.016 (1.252e+09/s), 0.016 (1.252e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 10, 4 (5x20): 0.000 (inf/s), 0.000 (inf/s), 0.000 (inf/s), 0.000 (inf/s), 0.000 (inf/s),
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 100, 4 (5x20): 0.077 (1.301e+10/s), 0.077 (1.301e+10/s), 0.076 (1.318e+10/s), 0.080 (1.252e+10/s), 0.077 (1.301e+10/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 100, 4 (5x20): 0.016 (1.252e+09/s), 0.016 (1.252e+09/s), 0.016 (1.252e+09/s), 0.016 (1.252e+09/s), 0.017 (1.178e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 100, 4 (5x20): 0.000 (inf/s), 0.000 (inf/s), 0.000 (inf/s), 0.000 (inf/s), 0.000 (inf/s),
----
f32 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 0.999 (2.686e+09/s), 0.974 (2.755e+09/s), 1.001 (2.681e+09/s), 1.000 (2.683e+09/s), 0.999 (2.686e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 0.614 (7.284e+08/s), 0.611 (7.320e+08/s), 0.598 (7.479e+08/s), 0.613 (7.296e+08/s), 0.601 (7.441e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 0.024 (1.863e+10/s), 0.024 (1.863e+10/s), 0.024 (1.863e+10/s), 0.024 (1.863e+10/s), 0.024 (1.863e+10/s),
----
i16 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 0.604 (4.443e+09/s), 0.603 (4.450e+09/s), 0.579 (4.635e+09/s), 0.604 (4.443e+09/s), 0.605 (4.435e+09/s),
i16 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 0.257 (1.740e+09/s), 0.280 (1.597e+09/s), 0.265 (1.688e+09/s), 0.254 (1.761e+09/s), 0.254 (1.761e+09/s),
i16 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 0.024 (1.863e+10/s), 0.024 (1.863e+10/s), 0.024 (1.863e+10/s), 0.024 (1.863e+10/s), 0.024 (1.863e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 0.083 (1.188e+09/s), 0.083 (1.188e+09/s), 0.085 (1.160e+09/s), 0.084 (1.174e+09/s), 0.084 (1.174e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 0.077 (1.281e+09/s), 0.077 (1.281e+09/s), 0.076 (1.298e+09/s), 0.076 (1.298e+09/s), 0.076 (1.298e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 0.004 (2.466e+10/s), 0.004 (2.466e+10/s), 0.004 (2.466e+10/s), 0.004 (2.466e+10/s), 0.004 (2.466e+10/s),
----
i8 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 0.034 (2.901e+09/s), 0.029 (3.401e+09/s), 0.029 (3.401e+09/s), 0.030 (3.287e+09/s), 0.030 (3.287e+09/s),
i8 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 0.023 (4.288e+09/s), 0.023 (4.288e+09/s), 0.023 (4.288e+09/s), 0.023 (4.288e+09/s), 0.023 (4.288e+09/s),
i8 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 0.004 (2.466e+10/s), 0.004 (2.466e+10/s), 0.004 (2.466e+10/s), 0.004 (2.466e+10/s), 0.004 (2.466e+10/s),
ncodebooks = 8
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 10, 8 (5x20): 0.043 (2.329e+09/s), 0.043 (2.329e+09/s), 0.043 (2.329e+09/s), 0.043 (2.329e+09/s), 0.043 (2.329e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 10, 8 (5x20): 0.031 (1.292e+09/s), 0.032 (1.252e+09/s), 0.033 (1.214e+09/s), 0.034 (1.178e+09/s), 0.034 (1.178e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 10, 8 (5x20): 0.001 (4.006e+10/s), 0.001 (4.006e+10/s), 0.001 (4.006e+10/s), 0.001 (4.006e+10/s), 0.001 (4.006e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 100, 8 (5x20): 0.154 (6.504e+09/s), 0.162 (6.183e+09/s), 0.155 (6.462e+09/s), 0.155 (6.462e+09/s), 0.162 (6.183e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 100, 8 (5x20): 0.035 (1.145e+09/s), 0.033 (1.214e+09/s), 0.032 (1.252e+09/s), 0.034 (1.178e+09/s), 0.034 (1.178e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 100, 8 (5x20): 0.001 (4.006e+10/s), 0.001 (4.006e+10/s), 0.001 (4.006e+10/s), 0.001 (4.006e+10/s), 0.001 (4.006e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 1.810 (1.483e+09/s), 1.790 (1.499e+09/s), 1.797 (1.493e+09/s), 1.809 (1.483e+09/s), 1.810 (1.483e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 1.395 (6.412e+08/s), 1.371 (6.524e+08/s), 1.394 (6.417e+08/s), 1.394 (6.417e+08/s), 1.393 (6.421e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 0.041 (2.182e+10/s), 0.041 (2.182e+10/s), 0.041 (2.182e+10/s), 0.041 (2.182e+10/s), 0.041 (2.182e+10/s),
----
i16 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 1.102 (2.435e+09/s), 1.106 (2.426e+09/s), 1.091 (2.460e+09/s), 1.101 (2.437e+09/s), 1.129 (2.377e+09/s),
i16 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 0.681 (1.313e+09/s), 0.653 (1.370e+09/s), 0.654 (1.368e+09/s), 0.653 (1.370e+09/s), 0.653 (1.370e+09/s),
i16 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 0.041 (2.182e+10/s), 0.041 (2.182e+10/s), 0.041 (2.182e+10/s), 0.043 (2.080e+10/s), 0.043 (2.080e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 0.173 (5.701e+08/s), 0.172 (5.734e+08/s), 0.173 (5.701e+08/s), 0.185 (5.331e+08/s), 0.173 (5.701e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 0.160 (1.233e+09/s), 0.176 (1.121e+09/s), 0.185 (1.066e+09/s), 0.165 (1.195e+09/s), 0.161 (1.225e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 0.008 (2.466e+10/s), 0.008 (2.466e+10/s), 0.008 (2.466e+10/s), 0.008 (2.466e+10/s), 0.008 (2.466e+10/s),
----
i8 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 0.059 (1.672e+09/s), 0.059 (1.672e+09/s), 0.059 (1.672e+09/s), 0.059 (1.672e+09/s), 0.059 (1.672e+09/s),
i8 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 0.049 (4.025e+09/s), 0.050 (3.945e+09/s), 0.049 (4.025e+09/s), 0.048 (4.109e+09/s), 0.048 (4.109e+09/s),
i8 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 0.008 (2.466e+10/s), 0.008 (2.466e+10/s), 0.008 (2.466e+10/s), 0.008 (2.466e+10/s), 0.008 (2.466e+10/s),
ncodebooks = 16
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 10, 16 (5x20): 0.094 (1.066e+09/s), 0.093 (1.077e+09/s), 0.100 (1.002e+09/s), 0.100 (1.002e+09/s), 0.097 (1.033e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 10, 16 (5x20): 0.065 (1.233e+09/s), 0.066 (1.214e+09/s), 0.066 (1.214e+09/s), 0.065 (1.233e+09/s), 0.066 (1.214e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 10, 16 (5x20): 0.003 (2.671e+10/s), 0.003 (2.671e+10/s), 0.003 (2.671e+10/s), 0.003 (2.671e+10/s), 0.003 (2.671e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 100, 16 (5x20): 0.367 (2.729e+09/s), 0.372 (2.692e+09/s), 0.374 (2.678e+09/s), 0.377 (2.657e+09/s), 0.374 (2.678e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 100, 16 (5x20): 0.067 (1.196e+09/s), 0.064 (1.252e+09/s), 0.064 (1.252e+09/s), 0.064 (1.252e+09/s), 0.064 (1.252e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 100, 16 (5x20): 0.003 (2.671e+10/s), 0.003 (2.671e+10/s), 0.003 (2.671e+10/s), 0.003 (2.671e+10/s), 0.003 (2.671e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 3.597 (7.460e+08/s), 3.607 (7.439e+08/s), 3.599 (7.456e+08/s), 3.610 (7.433e+08/s), 3.614 (7.425e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 2.761 (6.479e+08/s), 2.761 (6.479e+08/s), 2.760 (6.482e+08/s), 2.751 (6.503e+08/s), 2.763 (6.475e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 0.103 (1.737e+10/s), 0.105 (1.704e+10/s), 0.123 (1.454e+10/s), 0.128 (1.398e+10/s), 0.123 (1.454e+10/s),
----
i16 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 2.233 (1.202e+09/s), 2.261 (1.187e+09/s), 2.207 (1.216e+09/s), 2.207 (1.216e+09/s), 2.261 (1.187e+09/s),
i16 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 1.417 (1.262e+09/s), 1.563 (1.145e+09/s), 1.514 (1.182e+09/s), 1.464 (1.222e+09/s), 1.483 (1.206e+09/s),
i16 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 0.136 (1.315e+10/s), 0.130 (1.376e+10/s), 0.147 (1.217e+10/s), 0.133 (1.345e+10/s), 0.134 (1.335e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 0.397 (2.484e+08/s), 0.407 (2.423e+08/s), 0.395 (2.497e+08/s), 0.388 (2.542e+08/s), 0.385 (2.562e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 0.369 (1.069e+09/s), 0.368 (1.072e+09/s), 0.377 (1.046e+09/s), 0.375 (1.052e+09/s), 0.408 (9.669e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 0.019 (2.076e+10/s), 0.019 (2.076e+10/s), 0.019 (2.076e+10/s), 0.019 (2.076e+10/s), 0.019 (2.076e+10/s),
----
i8 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 0.131 (7.529e+08/s), 0.131 (7.529e+08/s), 0.131 (7.529e+08/s), 0.131 (7.529e+08/s), 0.131 (7.529e+08/s),
i8 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 0.103 (3.830e+09/s), 0.103 (3.830e+09/s), 0.103 (3.830e+09/s), 0.103 (3.830e+09/s), 0.104 (3.793e+09/s),
i8 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 0.019 (2.076e+10/s), 0.019 (2.076e+10/s), 0.019 (2.076e+10/s), 0.019 (2.076e+10/s), 0.019 (2.076e+10/s),
ncodebooks = 32
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 10, 32 (5x20): 0.201 (4.983e+08/s), 0.194 (5.163e+08/s), 0.205 (4.886e+08/s), 0.201 (4.983e+08/s), 0.200 (5.008e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 10, 32 (5x20): 0.142 (1.129e+09/s), 0.143 (1.121e+09/s), 0.144 (1.113e+09/s), 0.142 (1.129e+09/s), 0.161 (9.954e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 10, 32 (5x20): 0.007 (2.289e+10/s), 0.007 (2.289e+10/s), 0.007 (2.289e+10/s), 0.007 (2.289e+10/s), 0.007 (2.289e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 100, 32 (5x20): 0.762 (1.314e+09/s), 0.781 (1.282e+09/s), 0.756 (1.325e+09/s), 0.753 (1.330e+09/s), 0.798 (1.255e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 100, 32 (5x20): 0.183 (8.757e+08/s), 0.149 (1.076e+09/s), 0.154 (1.041e+09/s), 0.150 (1.068e+09/s), 0.147 (1.090e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 100, 32 (5x20): 0.007 (2.289e+10/s), 0.007 (2.289e+10/s), 0.007 (2.289e+10/s), 0.007 (2.289e+10/s), 0.007 (2.289e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 7.958 (3.372e+08/s), 7.142 (3.757e+08/s), 7.148 (3.754e+08/s), 7.114 (3.772e+08/s), 7.135 (3.761e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 5.589 (6.402e+08/s), 5.642 (6.341e+08/s), 5.563 (6.432e+08/s), 5.592 (6.398e+08/s), 5.579 (6.413e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 0.341 (1.049e+10/s), 0.330 (1.084e+10/s), 0.327 (1.094e+10/s), 0.327 (1.094e+10/s), 0.328 (1.091e+10/s),
----
i16 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 4.369 (6.142e+08/s), 4.357 (6.159e+08/s), 4.537 (5.914e+08/s), 4.361 (6.153e+08/s), 4.406 (6.090e+08/s),
i16 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 2.888 (1.239e+09/s), 2.889 (1.238e+09/s), 2.898 (1.235e+09/s), 2.898 (1.235e+09/s), 2.909 (1.230e+09/s),
i16 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 0.329 (1.087e+10/s), 0.326 (1.098e+10/s), 0.331 (1.081e+10/s), 0.328 (1.091e+10/s), 0.345 (1.037e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 32 (5x20): 0.781 (1.263e+08/s), 0.785 (1.256e+08/s), 0.793 (1.244e+08/s), 0.788 (1.252e+08/s), 0.787 (1.253e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 32 (5x20): 0.814 (9.693e+08/s), 0.828 (9.529e+08/s), 0.755 (1.045e+09/s), 0.766 (1.030e+09/s), 0.768 (1.027e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 32 (5x20): 0.045 (1.753e+10/s), 0.041 (1.924e+10/s), 0.041 (1.924e+10/s), 0.046 (1.715e+10/s), 0.041 (1.924e+10/s),
----
i8 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 32 (5x20): 0.320 (3.082e+08/s), 0.303 (3.255e+08/s), 0.301 (3.277e+08/s), 0.321 (3.072e+08/s), 0.301 (3.277e+08/s),
i8 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 32 (5x20): 0.279 (2.828e+09/s), 0.260 (3.035e+09/s), 0.263 (3.000e+09/s), 0.221 (3.570e+09/s), 0.242 (3.260e+09/s),
i8 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 32 (5x20): 0.061 (1.293e+10/s), 0.044 (1.793e+10/s), 0.041 (1.924e+10/s), 0.041 (1.924e+10/s), 0.040 (1.972e+10/s),
ncodebooks = 64
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 10, 64 (5x20): 0.454 (2.206e+08/s), 0.497 (2.015e+08/s), 0.489 (2.048e+08/s), 0.486 (2.061e+08/s), 0.457 (2.192e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 10, 64 (5x20): 0.349 (9.184e+08/s), 0.344 (9.317e+08/s), 0.385 (8.325e+08/s), 0.377 (8.502e+08/s), 0.344 (9.317e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 10, 64 (5x20): 0.019 (1.687e+10/s), 0.019 (1.687e+10/s), 0.019 (1.687e+10/s), 0.019 (1.687e+10/s), 0.020 (1.603e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 100, 64 (5x20): 1.586 (6.315e+08/s), 1.530 (6.546e+08/s), 1.531 (6.542e+08/s), 1.529 (6.551e+08/s), 1.539 (6.508e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 100, 64 (5x20): 0.405 (7.914e+08/s), 0.408 (7.856e+08/s), 0.449 (7.138e+08/s), 0.403 (7.953e+08/s), 0.411 (7.798e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 100, 64 (5x20): 0.020 (1.603e+10/s), 0.020 (1.603e+10/s), 0.019 (1.687e+10/s), 0.019 (1.687e+10/s), 0.019 (1.687e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 14.943 (1.796e+08/s), 15.205 (1.765e+08/s), 14.912 (1.799e+08/s), 14.951 (1.795e+08/s), 14.981 (1.791e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 11.376 (6.290e+08/s), 11.305 (6.330e+08/s), 11.313 (6.325e+08/s), 11.315 (6.324e+08/s), 11.312 (6.326e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 0.877 (8.159e+09/s), 0.822 (8.705e+09/s), 0.845 (8.468e+09/s), 0.849 (8.428e+09/s), 0.836 (8.559e+09/s),
----
i16 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 9.459 (2.837e+08/s), 9.458 (2.837e+08/s), 9.420 (2.849e+08/s), 9.457 (2.837e+08/s), 9.452 (2.839e+08/s),
i16 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 5.819 (1.230e+09/s), 5.820 (1.230e+09/s), 5.824 (1.229e+09/s), 5.845 (1.224e+09/s), 5.901 (1.213e+09/s),
i16 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 0.818 (8.748e+09/s), 0.823 (8.695e+09/s), 0.803 (8.911e+09/s), 0.818 (8.748e+09/s), 0.851 (8.409e+09/s),
----
f32 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 64 (5x20): 1.571 (6.278e+07/s), 1.571 (6.278e+07/s), 1.573 (6.270e+07/s), 1.574 (6.266e+07/s), 1.571 (6.278e+07/s),
f32 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 64 (5x20): 1.479 (1.067e+09/s), 1.473 (1.071e+09/s), 1.475 (1.070e+09/s), 1.476 (1.069e+09/s), 1.473 (1.071e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 64 (5x20): 0.114 (1.384e+10/s), 0.115 (1.372e+10/s), 0.115 (1.372e+10/s), 0.110 (1.435e+10/s), 0.115 (1.372e+10/s),
----
i8 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 64 (5x20): 0.561 (1.758e+08/s), 0.560 (1.761e+08/s), 0.561 (1.758e+08/s), 0.560 (1.761e+08/s), 0.560 (1.761e+08/s),
i8 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 64 (5x20): 0.453 (3.483e+09/s), 0.492 (3.207e+09/s), 0.470 (3.357e+09/s), 0.464 (3.401e+09/s), 0.494 (3.194e+09/s),
i8 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 64 (5x20): 0.114 (1.384e+10/s), 0.120 (1.315e+10/s), 0.116 (1.360e+10/s), 0.114 (1.384e+10/s), 0.114 (1.384e+10/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 2 (5x20): 3.827 (2.613e+07/s), 3.815 (2.621e+07/s), 3.830 (2.611e+07/s), 3.858 (2.592e+07/s), 3.832 (2.610e+07/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 2 (5x20): 1.080 (9.259e+07/s), 1.041 (9.606e+07/s), 1.049 (9.533e+07/s), 1.049 (9.533e+07/s), 1.045 (9.569e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 4 (5x20): 3.505 (2.853e+07/s), 3.568 (2.803e+07/s), 3.541 (2.824e+07/s), 3.431 (2.915e+07/s), 3.234 (3.092e+07/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 4 (5x20): 2.081 (4.805e+07/s), 2.135 (4.684e+07/s), 2.083 (4.801e+07/s), 2.077 (4.815e+07/s), 2.079 (4.810e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 8 (5x20): 3.772 (2.651e+07/s), 3.641 (2.746e+07/s), 3.617 (2.765e+07/s), 3.616 (2.765e+07/s), 4.002 (2.499e+07/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 8 (5x20): 2.864 (3.492e+07/s), 2.861 (3.495e+07/s), 2.901 (3.447e+07/s), 3.017 (3.315e+07/s), 2.880 (3.472e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 16 (5x20): 4.535 (2.205e+07/s), 4.565 (2.191e+07/s), 4.475 (2.235e+07/s), 4.476 (2.234e+07/s), 4.480 (2.232e+07/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 16 (5x20): 5.217 (1.917e+07/s), 5.185 (1.929e+07/s), 5.243 (1.907e+07/s), 5.256 (1.903e+07/s), 5.184 (1.929e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 32 (5x20): 6.537 (1.530e+07/s), 6.527 (1.532e+07/s), 6.517 (1.534e+07/s), 6.507 (1.537e+07/s), 6.512 (1.536e+07/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 32 (5x20): 9.143 (1.094e+07/s), 9.119 (1.097e+07/s), 9.137 (1.094e+07/s), 9.110 (1.098e+07/s), 9.128 (1.096e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 64 (5x20): 10.156 (9.846e+06/s), 10.136 (9.866e+06/s), 10.143 (9.859e+06/s), 10.146 (9.856e+06/s), 10.147 (9.855e+06/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 64 (5x20): 17.739 (5.637e+06/s), 17.767 (5.628e+06/s), 17.641 (5.669e+06/s), 17.647 (5.667e+06/s), 17.640 (5.669e+06/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 128 (5x20): 17.149 (5.831e+06/s), 17.183 (5.820e+06/s), 17.144 (5.833e+06/s), 17.109 (5.845e+06/s), 17.182 (5.820e+06/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 128 (5x20): 35.289 (2.834e+06/s), 35.025 (2.855e+06/s), 35.294 (2.833e+06/s), 35.022 (2.855e+06/s), 35.071 (2.851e+06/s),
blas matmul N, D, M: 10000, 512, 10 (5x20): 4.174 (2.396e+07/s), 4.136 (2.418e+07/s), 4.164 (2.402e+07/s), 4.198 (2.382e+07/s), 4.188 (2.388e+07/s),
our matmul N, D, M: 10000, 512, 10 (5x20): 3.546 (2.820e+07/s), 3.546 (2.820e+07/s), 3.553 (2.815e+07/s), 3.555 (2.813e+07/s), 3.560 (2.809e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 100, 2 (5x20): 4.085 (2.448e+08/s), 4.091 (2.444e+08/s), 4.055 (2.466e+08/s), 4.045 (2.472e+08/s), 4.057 (2.465e+08/s),
our sketch matmul N, D, M, d: 10000, 512, 100, 2 (5x20): 1.322 (7.564e+08/s), 1.337 (7.479e+08/s), 1.336 (7.485e+08/s), 1.323 (7.559e+08/s), 1.322 (7.564e+08/s),
blas sketch matmul N, D, M, d: 10000, 512, 100, 4 (5x20): 3.631 (2.754e+08/s), 3.843 (2.602e+08/s), 3.798 (2.633e+08/s), 3.848 (2.599e+08/s), 3.847 (2.599e+08/s),
our sketch matmul N, D, M, d: 10000, 512, 100, 4 (5x20): 2.626 (3.808e+08/s), 2.491 (4.014e+08/s), 2.510 (3.984e+08/s), 2.589 (3.862e+08/s), 2.480 (4.032e+08/s),
blas sketch matmul N, D, M, d: 10000, 512, 100, 8 (5x20): 4.275 (2.339e+08/s), 4.313 (2.319e+08/s), 4.333 (2.308e+08/s), 4.289 (2.332e+08/s), 4.130 (2.421e+08/s),
our sketch matmul N, D, M, d: 10000, 512, 100, 8 (5x20): 3.405 (2.937e+08/s), 3.571 (2.800e+08/s), 3.405 (2.937e+08/s), 3.423 (2.921e+08/s), 3.405 (2.937e+08/s),
blas sketch matmul N, D, M, d: 10000, 512, 100, 16 (5x20): 5.392 (1.855e+08/s), 5.316 (1.881e+08/s), 5.283 (1.893e+08/s), 5.281 (1.894e+08/s), 5.184 (1.929e+08/s),
our sketch matmul N, D, M, d: 10000, 512, 100, 16 (5x20): 6.046 (1.654e+08/s), 6.047 (1.654e+08/s), 6.076 (1.646e+08/s), 6.071 (1.647e+08/s), 6.044 (1.655e+08/s),
blas sketch matmul N, D, M, d: 10000, 512, 100, 32 (5x20): 7.291 (1.372e+08/s), 7.293 (1.371e+08/s), 7.308 (1.368e+08/s), 7.296 (1.371e+08/s), 7.294 (1.371e+08/s),
our sketch matmul N, D, M, d: 10000, 512, 100, 32 (5x20): 10.697 (9.348e+07/s), 10.584 (9.448e+07/s), 10.599 (9.435e+07/s), 10.611 (9.424e+07/s), 10.594 (9.439e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 100, 64 (5x20): 11.586 (8.631e+07/s), 11.528 (8.675e+07/s), 11.528 (8.675e+07/s), 11.535 (8.669e+07/s), 11.530 (8.673e+07/s),
our sketch matmul N, D, M, d: 10000, 512, 100, 64 (5x20): 20.459 (4.888e+07/s), 20.514 (4.875e+07/s), 20.542 (4.868e+07/s), 20.429 (4.895e+07/s), 20.532 (4.870e+07/s),
blas matmul N, D, M: 10000, 512, 100 (5x20): 13.506 (7.404e+07/s), 13.432 (7.445e+07/s), 13.467 (7.426e+07/s), 13.464 (7.427e+07/s), 13.484 (7.416e+07/s),
our matmul N, D, M: 10000, 512, 100 (5x20): 27.160 (3.682e+07/s), 27.135 (3.685e+07/s), 27.260 (3.668e+07/s), 27.213 (3.675e+07/s), 27.268 (3.667e+07/s),
blas sketch matmul N, D, M, d: 223590, 96, 12, 2 (5x20): 17.987 (1.492e+08/s), 17.601 (1.524e+08/s), 18.118 (1.481e+08/s), 17.847 (1.503e+08/s), 17.977 (1.493e+08/s),
our sketch matmul N, D, M, d: 223590, 96, 12, 2 (5x20): 5.117 (5.243e+08/s), 5.115 (5.246e+08/s), 5.102 (5.259e+08/s), 5.088 (5.273e+08/s), 5.111 (5.250e+08/s),
blas sketch matmul N, D, M, d: 223590, 96, 12, 4 (5x20): 11.524 (2.328e+08/s), 12.362 (2.170e+08/s), 11.828 (2.268e+08/s), 11.793 (2.275e+08/s), 11.785 (2.277e+08/s),
our sketch matmul N, D, M, d: 223590, 96, 12, 4 (5x20): 9.979 (2.689e+08/s), 10.007 (2.681e+08/s), 10.010 (2.680e+08/s), 10.010 (2.680e+08/s), 9.973 (2.690e+08/s),
blas sketch matmul N, D, M, d: 223590, 96, 12, 8 (5x20): 19.261 (1.393e+08/s), 19.116 (1.404e+08/s), 19.205 (1.397e+08/s), 19.342 (1.387e+08/s), 19.189 (1.398e+08/s),
our sketch matmul N, D, M, d: 223590, 96, 12, 8 (5x20): 14.543 (1.845e+08/s), 14.510 (1.849e+08/s), 14.570 (1.842e+08/s), 14.556 (1.843e+08/s), 14.509 (1.849e+08/s),
blas matmul N, D, M: 223590, 96, 12 (5x20): 19.189 (1.398e+08/s), 19.231 (1.395e+08/s), 19.378 (1.385e+08/s), 19.348 (1.387e+08/s), 19.390 (1.384e+08/s),
our matmul N, D, M: 223590, 96, 12 (5x20): 16.242 (1.652e+08/s), 16.194 (1.657e+08/s), 16.197 (1.657e+08/s), 16.230 (1.653e+08/s), 16.238 (1.652e+08/s),
blas sketch matmul N, D, M, d: 49284, 27, 2, 2 (5x20): 0.375 (2.628e+08/s), 0.373 (2.643e+08/s), 0.380 (2.594e+08/s), 0.380 (2.594e+08/s), 0.378 (2.608e+08/s),
our sketch matmul N, D, M, d: 49284, 27, 2, 2 (5x20): 0.219 (4.501e+08/s), 0.220 (4.480e+08/s), 0.219 (4.501e+08/s), 0.216 (4.563e+08/s), 0.203 (4.856e+08/s),
blas matmul N, D, M: 49284, 27, 2 (5x20): 0.327 (3.014e+08/s), 0.318 (3.100e+08/s), 0.319 (3.090e+08/s), 0.328 (3.005e+08/s), 0.328 (3.005e+08/s),
our matmul N, D, M: 49284, 27, 2 (5x20): 0.186 (5.299e+08/s), 0.181 (5.446e+08/s), 0.183 (5.386e+08/s), 0.174 (5.665e+08/s), 0.173 (5.698e+08/s),
"""
def _load_matmul_times_for_n_d_m(startswith):
lines = microbench_output.split('\n')
matmul_lines = [line for line in lines if line.startswith(startswith)]
matmul_shape_to_times = {}
matmul_shape_to_thruputs = {}
for line in matmul_lines:
start_idx = line.find(':') + 1
end_idx = line.find('(')
nmd_str = line[start_idx:end_idx]
N, D, M = [int(substr) for substr in nmd_str.split(',')[:3]]
speeds_str = line[line.find('):') + 2:]
speed_pairs = speeds_str.split(',')[:5]
# print("N, D, M: ", N, D, M)
# print("speed pairs: ", speed_pairs)
times = []
thruputs = []
for pair in speed_pairs:
pair = pair.strip()
if not len(pair):
continue # handle trailing comma on line
# print("pair: ", pair)
pair = pair.strip()
time_str, thruput_str = pair.split()
times.append(float(time_str))
thruput_str = thruput_str.strip('()s/')
thruputs.append(float(thruput_str))
key = (N, D, M)
matmul_shape_to_times[key] = times
matmul_shape_to_thruputs[key] = thruputs
# print("what we're getting from func:")
# pprint.pprint(matmul_shape_to_times)
# pprint.pprint(matmul_shape_to_thruputs)
return matmul_shape_to_times, matmul_shape_to_thruputs
def _load_sketch_times_for_n_d_m(startswith):
# print("loading sketch times for ", startswith)
lines = microbench_output.split('\n')
matmul_lines = [line for line in lines if line.startswith(startswith)]
matmul_shape_to_times = {}
matmul_shape_to_thruputs = {}
for line in matmul_lines:
start_idx = line.find(':') + 1
end_idx = line.find('(')
nmd_str = line[start_idx:end_idx]
N, D, M, d = [int(substr) for substr in nmd_str.split(',')[:4]]
speeds_str = line[line.find('):') + 2:]
speed_pairs = speeds_str.split(',')[:5]
# print("N, D, M: ", N, D, M)
# print("speed pairs: ", speed_pairs)
times = []
thruputs = []
for pair in speed_pairs:
pair = pair.strip()
if not len(pair):
continue # handle trailing comma on line
# print("pair: ", pair)
pair = pair.strip()
time_str, thruput_str = pair.split()
times.append(float(time_str))
thruput_str = thruput_str.strip('()s/')
thruputs.append(float(thruput_str))
key = (N, D, M, d)
matmul_shape_to_times[key] = times
matmul_shape_to_thruputs[key] = thruputs
# pprint.pprint(matmul_shape_to_times)
# pprint.pprint(matmul_shape_to_thruputs)
return matmul_shape_to_times, matmul_shape_to_thruputs
def load_matmul_times_for_n_d_m(key1='blas matmul', key2='our matmul',
sketches=False):
if sketches:
# print("results from blas:")
shape2lat0, shape2thruput0 = _load_sketch_times_for_n_d_m(key1)
# print("results from ours:")
shape2lat1, shape2thruput1 = _load_sketch_times_for_n_d_m(key2)
else:
# print("results from blas:")
shape2lat0, shape2thruput0 = _load_matmul_times_for_n_d_m(key1)
# print("results from ours:")
shape2lat1, shape2thruput1 = _load_matmul_times_for_n_d_m(key2)
# take minimum of time from eigen blas and our sgemm
shape2lat = {}
for k in shape2lat0:
vals0 = shape2lat0.get(k, [1e20])
vals1 = shape2lat1.get(k, [1e20])
mean0, mean1 = np.mean(vals0), np.mean(vals1)
if mean0 < mean1:
shape2lat[k] = shape2lat0[k]
else:
shape2lat[k] = shape2lat1[k]
shape2thruput = {}
for k in shape2thruput0:
vals0 = shape2thruput0.get(k, [-1e20])
vals1 = shape2thruput1.get(k, [-1e20])
# print("k, vals0, vals1: ", k)
# print(vals0)
# print(vals1)
mean0, mean1 = np.mean(vals0), np.mean(vals1)
if mean0 > mean1:
shape2thruput[k] = shape2thruput0[k]
else:
shape2thruput[k] = shape2thruput1[k]
# print("what we're returning:")
# pprint.pprint(shape2lat)
# pprint.pprint(shape2thruput)
return shape2lat, shape2thruput
def _load_vq_times_for_n_d_m(startswith):
lines = microbench_output.split('\n')
lines = [line for line in lines if line.startswith(startswith)]
shape_ncodebooks_to_times = {}
shape_ncodebooks_to_thruputs = {}
for line in lines:
start_idx = line.find(':') + 1
end_idx = line.find('(')
nmd_str = line[start_idx:end_idx]
N, D, M, C = [int(substr) for substr in nmd_str.split(',')[:4]]
speeds_str = line[line.find('):') + 2:]
speed_pairs = speeds_str.split(',')[:5]
times = []
thruputs = []
for pair in speed_pairs:
pair = pair.strip()
if not len(pair):
continue # handle trailing comma on line
time_str, thruput_str = pair.split()
times.append(float(time_str))
thruput_str = thruput_str.strip('()s/')
thruputs.append(float(thruput_str))
key = (N, D, M, C)
shape_ncodebooks_to_times[key] = times
shape_ncodebooks_to_thruputs[key] = thruputs
# print("startswith: ", startswith)
# if 'bolt' in startswith:
# print("bolt speed dicts:")
# pprint.pprint(shape_ncodebooks_to_times)
# pprint.pprint(shape_ncodebooks_to_thruputs)
return shape_ncodebooks_to_times, shape_ncodebooks_to_thruputs
# def load_multisplit_times_for_n_d_m():
# return _load_vq_times_for_n_d_m('famm mithral')
def load_bolt_times_for_n_d_m():
return _load_vq_times_for_n_d_m('amm bolt')
def load_mithral_f32_times_for_n_d_m():
# two spaces so it doesn't try to read enc and zip times
return _load_vq_times_for_n_d_m('f32 amm mithral ')
def load_mithral_i16_times_for_n_d_m():
return _load_vq_times_for_n_d_m('i16 amm mithral ')
def load_mithral_i8_times_for_n_d_m():
return _load_vq_times_for_n_d_m('i8 amm mithral ')
def load_mithral_times_for_n_d_m():
return _load_vq_times_for_n_d_m('f32 amm mithral ')
def load_sketch_times_for_n_d_m():
return load_matmul_times_for_n_d_m(
'blas sketch matmul', 'our sketch matmul', sketches=True)
def main():
# load_matmul_times_for_n_d_m()
# load_multisplit_times_for_n_d_m()
# load_bolt_times_for_n_d_m()
# pprint.pprint(load_sketch_times_for_n_d_m())
# pprint.pprint(load_multisplit_times_for_n_d_m())
# pprint.pprint(load_mithral_times_for_n_d_m())
ret = load_matmul_times_for_n_d_m()
print("matmul latencies, thruputs")
pprint.pprint(ret)
# pprint.pprint(load_bolt_times_for_n_d_m())
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# import types
_memory = Memory('.', verbose=0)
# ================================================================ misc
def is_dict(x):
return isinstance(x, dict)
def is_list_or_tuple(x):
return isinstance(x, (list, tuple))
def as_list_or_tuple(x):
return x if is_list_or_tuple(x) else [x]
def is_scalar_seq(x):
try:
[float(element) for element in x]
return True
except TypeError:
return False
def as_scalar_seq(x):
if is_scalar_seq(x):
return x
try:
_ = float(x)
return [x]
except TypeError:
raise TypeError("Couldn't convert value '{}' to sequence "
"of scalars".format(x))
def is_string(x):
return isinstance(x, (str,))
def flatten_list_of_lists(l):
return list(itertools.chain.from_iterable(l))
def element_size_bytes(x):
return np.dtype(x.dtype).itemsize
def invert_permutation(permutation):
return np.arange(len(permutation))[np.argsort(permutation)]
# ================================================================ image
def conv2d(img, filt, pad='same'):
# assert pad in ('same',) # TODO support valid
# mode = 'constant'
if len(img.shape) == 2:
return signal.correlate2d(img, filt, mode=pad)
# img is more than 2d; do a 2d conv for each channel and sum results
assert len(img.shape) == 3
out = np.zeros(img.shape[:2], dtype=np.float32)
for c in range(img.shape[2]):
f = filt[:, :, c] if len(filt.shape) == 3 else filt
out += signal.correlate2d(img[:, :, c], f, mode=pad)
return out
# def filter_img(img, filt):
# out = conv2d(img, filt)
# return out / np.max(out)
# ================================================================ distance
def dists_sq(X, q):
diffs = X - q
return np.sum(diffs * diffs, axis=-1)
def dists_l1(X, q):
diffs = np.abs(X - q)
return np.sum(diffs, axis=-1)
def sq_dists_to_vectors(X, queries, rowNorms=None, queryNorms=None):
Q = queries.shape[0]
mat_size = X.shape[0] * Q
mat_size_bytes = element_size_bytes(X[0] + queries[0])
if mat_size_bytes > int(1e9):
print("WARNING: sq_dists_to_vectors: attempting to create a matrix" \
"of size {} ({}B)".format(mat_size, mat_size_bytes))
if rowNorms is None:
rowNorms = np.sum(X * X, axis=1, keepdims=True)
if queryNorms is None:
queryNorms = np.sum(queries * queries, axis=1)
dotProds = np.dot(X, queries.T)
return (-2 * dotProds) + rowNorms + queryNorms # len(X) x len(queries)
def all_eq(x, y):
if len(x) != len(y):
return False
if len(x) == 0:
return True
return np.max(np.abs(x - y)) < .001
def top_k_idxs(elements, k, smaller_better=True, axis=-1):
if smaller_better: # return indices of lowest elements
which_nn = np.arange(k)
return np.argpartition(elements, kth=which_nn, axis=axis)[:k]
else: # return indices of highest elements
which_nn = len(elements) - 1 - np.arange(k)
return np.argpartition(elements, kth=which_nn, axis=axis)[-k:][::-1]
def compute_true_knn(X, Q, k=1000, print_every=5, block_sz=128):
nqueries = Q.shape[0]
nblocks = int(np.ceil(nqueries / float(block_sz)))
truth = np.full((nqueries, k), -999, dtype=np.int32)
if nqueries <= block_sz:
dists = sq_dists_to_vectors(Q, X)
assert dists.shape == (Q.shape[0], X.shape[0])
for i in range(nqueries):
truth[i, :] = top_k_idxs(dists[i, :], k)
# truth[i, :] = top_k_idxs(dists[:, i], k)
return truth
for b in range(nblocks):
# recurse to fill in knn for each block
start = b * block_sz
end = min(start + block_sz, nqueries)
rows = Q[start:end, :]
truth[start:end, :] = compute_true_knn(X, rows, k=k, block_sz=block_sz)
if b % print_every == 0:
print("computing top k for query block "
"{} (queries {}-{})...".format(b, start, end))
# for i in range(nqueries):
# if i % print_every == 0:
# print "computing top k for query {}...".format(i)
# truth[i, :] = top_k_idxs(dists[i, :], k)
print("done")
assert np.all(truth != -999)
return truth
def knn(X, q, k, dist_func=dists_sq):
dists = dist_func(X, q)
idxs = top_k_idxs(dists, k)
return idxs, dists[idxs]
@_memory.cache
def kmeans(X, k, max_iter=16, init='kmc2', return_sse=False):
X = X.astype(np.float32)
# handle fewer nonzero rows than centroids (mostly just don't choke
# if X all zeros, which happens when run in PQ with tiny subspaces)
rowsums = X.sum(axis=1)
nonzero_mask = rowsums != 0
nnz_rows = np.sum(nonzero_mask)
if nnz_rows < k:
print("X.shape: ", X.shape)
print("k: ", k)
print("nnz_rows: ", nnz_rows)
centroids = np.zeros((k, X.shape[1]), dtype=X.dtype)
labels = np.full(X.shape[0], nnz_rows, dtype=np.int)
if nnz_rows > 0: # special case, because can't have slice of size 0
# make a centroid out of each nonzero row, and assign only those
# rows to that centroid; all other rows get assigned to next
# centroid after those, which is all zeros
centroids[nnz_rows] = X[nonzero_mask]
labels[nonzero_mask] = np.arange(nnz_rows)
if return_sse:
return centroids, labels, 0
return centroids, labels
# if k is huge, initialize centers with cartesian product of centroids
# in two subspaces
sqrt_k = int(np.ceil(np.sqrt(k)))
if k >= 16 and init == 'subspaces':
print("kmeans: clustering in subspaces first; k, sqrt(k) ="
" {}, {}".format(k, sqrt_k))
_, D = X.shape
centroids0, _ = kmeans(X[:, :D/2], sqrt_k, max_iter=1)
centroids1, _ = kmeans(X[:, D/2:], sqrt_k, max_iter=1)
seeds = np.empty((sqrt_k * sqrt_k, D), dtype=np.float32)
for i in range(sqrt_k):
for j in range(sqrt_k):
row = i * sqrt_k + j
seeds[row, :D/2] = centroids0[i]
seeds[row, D/2:] = centroids1[j]
seeds = seeds[:k] # rounded up sqrt(k), so probably has extra rows
elif init == 'kmc2':
try:
seeds = kmc2.kmc2(X, k).astype(np.float32)
except ValueError: # can happen if dist of 0 to centroid
print("WARNING: couldn't use kmc2 initialization")
seeds = 'k-means++' if k < max_iter else 'random'
else:
raise ValueError("init parameter must be one of {'kmc2', 'subspaces'}")
est = cluster.MiniBatchKMeans(
k, init=seeds, max_iter=max_iter, n_init=1).fit(X)
if return_sse:
return est.cluster_centers_, est.labels_, est.inertia_
return est.cluster_centers_, est.labels_
def orthonormalize_rows(A):
Q, R = np.linalg.qr(A.T)
return Q.T
def random_rotation(D):
rows = np.random.randn(D, D)
return orthonormalize_rows(rows)
def hamming_dist(v1, v2):
return np.count_nonzero(v1 != v2)
def hamming_dists(X, q):
return np.array([hamming_dist(row, q) for row in X])
if __name__ == '__main__':
a = np.random.randn(10)
sort_idxs = np.argsort(a)[::-1]
print(a)
print(top_k_idxs(a, 3, smaller_better=False))
print(sort_idxs[:3])
|
#!/usr/bin/env python
# ================================================================ Funcs
def nbits_cost(diffs, signed=True):
"""
>>> [nbits_cost(i) for i in [0, 1, 2, 3, 4, 5, 7, 8, 9]]
[0, 2, 3, 3, 4, 4, 4, 5, 5]
>>> [nbits_cost(i) for i in [-1, -2, -3, -4, -5, -7, -8, -9]]
[1, 2, 3, 3, 4, 4, 4, 5]
>>> nbits_cost([])
array([], dtype=int32)
>>> nbits_cost([0, 2, 1, 0])
array([0, 3, 2, 0], dtype=int32)
>>> nbits_cost([0, 2, 1, 3, 4, 0], signed=False)
array([0, 2, 1, 2, 3, 0], dtype=int32)
"""
if diffs is None:
return None
diffs = np.asarray(diffs, dtype=np.int32)
if diffs.size == 0:
return np.array([], dtype=np.int32)
if not signed:
assert np.all(diffs >= 0)
pos_idxs = diffs > 0
nbits = np.zeros(diffs.shape, dtype=np.int32)
nbits[pos_idxs] = np.floor(np.log2(diffs[pos_idxs])) + 1
nbits[~pos_idxs] = 0
return nbits
# shape = diffs.shape
# diffs = diffs.ravel()
# zero_idxs = (diffs == 0)
# # nbits[zero_idxs] = 0
# nbits = np.zeros(len(diffs), dtype=np.int32)
# diffs = diffs[~zero_idxs]
# equiv_diffs = np.abs(diffs) + (diffs >= 0).astype(np.int32) # +1 if < 0
# # assert np.all(np.abs(diffs) > 0)
# # assert np.all(equiv_diffs > 0)
# nbits[~zero_idxs] = np.ceil(np.log2(equiv_diffs)) + 1
# nbits = np.asarray(nbits, dtype=np.int32) # next line can't handle scalar
# assert np.all(nbits >= 0)
shape = diffs.shape
diffs = diffs.ravel()
equiv_diffs = np.abs(diffs) + (diffs >= 0).astype(np.int32) # +1 if < 0
nbits = np.ceil(np.log2(equiv_diffs)) + 1
nbits = np.asarray(nbits, dtype=np.int32) # next line can't handle scalar
nbits[diffs == 0] = 0
assert np.all(nbits >= 0)
return nbits.reshape(shape) if nbits.size > 1 else nbits[0] # unpack if scalar
@numba.njit(fastmath=True)
def zigzag_encode(x):
"""
>>> [zigzag_encode(i) for i in [0,1,-1,2,-2,3,-3]]
[0, 1, 2, 3, 4, 5, 6]
>>> zigzag_encode([0,1,-1,2,-2,3,-3])
array([0, 1, 2, 3, 4, 5, 6], dtype=int32)
"""
x = np.asarray(x, dtype=np.int32)
return (np.abs(x) << 1) - (x > 0).astype(np.int32)
@numba.njit(fastmath=True)
def zigzag_decode(x):
return np.bitwise_xor(x >> 1, -np.bitwise_and(x, 1))
def quantize(X, nbits=16, minval=None, maxval=None):
minval = np.min(X) if minval is None else minval
maxval = np.max(X) if maxval is None else maxval
unsigned_max = (1 << nbits) - 1
dtype_min = 1 << (nbits - 1)
scale = float(unsigned_max) / maxval
X = np.maximum(0, X - minval)
X = np.minimum(unsigned_max, X * scale)
X -= dtype_min # center at 0
dtype = {16: np.int16, 12: np.int16, 8: np.int8}[nbits]
return X.astype(dtype)
# ================================================================
def zstd_compress(buff, comp=None):
comp = zstd.ZstdCompressor() if comp is None else comp
if isinstance(buff, str):
buff = bytes(buff, encoding='utf8')
return comp.compress(buff)
def zstd_decompress(buff, decomp=None):
decomp = zstd.ZstdDecompressor() if decomp is None else decomp
return decomp.decompress(decomp)
# ============================================================== sprintz
# except without the predictive coding part because we do that manually;
# we also omit the run-length encoding because the author says that's a
# huge pain to code and won't change the results much for our fast-changing
# time series; also we don't do the grouping thing since it only
# affects the decoding speed (it could affect the ratio slightly if the
# number of variables were really low and not a multiple of 8, but neither
# is the case for us)
# def bitpack_vec(x, nbits_per_element):
# n = len(x)
# total_nbits = n * nbits_per_element
# bitvec = np.zeros(total_nbits, dtype=np.bool)
# for i, val in enumerate(x):
# start_idx = i * nbits_per_element
# for b in range(nbits_per_element):
# bit = (val >> b) & 1
# bitvec[start_idx + b] = bit
# return np.packbits(bitvec)
# def bitunpack(X, nbits_per_element):
# was_1d = X.ndim == 1
# X = np.atleast_2d(X)
# N, D = X.shape
# ret = np.unpackbits(X, axis=1)
# if was_1d:
# ret = ret.squeeze()
# return ret
# @numba.njit(fastmath=True)
def bitpack(X, nbits_per_element):
was_1d = X.ndim == 1
X = np.atleast_2d(X)
N, D = X.shape
# orig_elemsz = X.dtype.itemsize
orig_elemsz_bits = 8 * X.dtype.itemsize
assert X.dtype in (np.uint8, np.uint16)
assert X.dtype in (np.uint8, np.uint16)
if nbits_per_element == orig_elemsz_bits:
ret = X
elif X.dtype == np.uint8:
# print("N, D, nbits: ", N, D, nbits_per_element)
# shape = X.shape
X = X.ravel()
# unpacked = np.unpackbits(X, count=nbits_per_element, bitorder='little', axis=-1)
unpacked = np.unpackbits(X, bitorder='little', axis=-1)
# print("unpacked initial shape: ", unpacked.shape)
unpacked = unpacked.reshape(N * D, 8)[:, :nbits_per_element]
# print("unpacked new shape: ", unpacked.shape)
ret = np.packbits(unpacked.reshape(N, -1), axis=1)
# ret = ret.reshape(N, -1)
# print("ret.shape: ", ret.shape)
else:
# X_low = (X & 0xff)[:, :, np.newaxis]
# X_high = ((X & 0xff00) >> 8)[:, :, np.newaxis]
# X_combined = np.concatenate([X_low, X_high], axis=-1)
# X = X[:, :, np.newaxis]
# X = np.concatenate([X, X], axis=-1)
# X[:, :, 0] = X[:, :, 0] & 0xff
# X[:, :, 1] = (X[:, :, 1] & 0xff00) >> 8
# X = X.reshape(N, 2 * D).astype(np.uint8)
X = np.ascontiguousarray(X).view(np.uint8).reshape(N, 2 * D)
# print("X shape: ", X.shape)
unpacked = np.unpackbits(X, axis=1, bitorder='little')
unpacked = unpacked.reshape(N, orig_elemsz_bits, D)
# unpacked = unpacked[:, ::-1, :] # low bits in low idxs
unpacked = np.ascontiguousarray(unpacked[:, :nbits_per_element])
ret = np.packbits(unpacked.reshape(N, -1))
# nbits_per_row = D * nbits_per_element
# bitmat = np.zeros((N, nbits_per_row), dtype=np.uint8)
# for j in range(D):
# col = X[:, j]
# start_idx = j * nbits_per_element
# for b in range(nbits_per_element):
# bit = (col >> b) & 1
# bitmat[:, start_idx + b] = bit
# ret = np.packbits(bitmat, axis=1)
if was_1d:
ret = ret.squeeze()
return ret
@numba.njit(fastmath=True)
def _sprintz_header_sz(headers, header_elem_nbits):
_, D = headers.shape
header_row_sz = int(np.ceil(D * header_elem_nbits / 8))
rows_total_nbits = headers.sum(axis=1)
# zero_rows = rows_total_nbits == 0
# header_sz = np.sum(nzero_rows) # one byte for run length
# pair_sums = zero_rows +
header_sz = 0
prev_was_zero = False
for row in rows_total_nbits:
is_zero = row == 0
if is_zero:
if prev_was_zero:
continue
else:
header_sz += 1 # start of run
else:
header_sz += header_row_sz
prev_was_zero = is_zero
return header_sz
# def sprintz_packed_size(X, nbits=None, just_return_sz=False, postproc='zstd'):
def sprintz_packed_size(X, nbits=None, just_return_sz=True, postproc=None):
if nbits is None:
nbits = {1: 8, 2: 16}.get(X.dtype.itemsize, 16)
unsigned_dtype = {8: np.uint8, 16: np.uint16}[nbits]
window_len = 8
pad_nrows = X.shape[0] % window_len
if pad_nrows != 0:
pad_rows = np.zeros((pad_nrows, X.shape[1]), dtype=X.dtype)
X = np.vstack([X, pad_rows])
N, D = X.shape
if X.dtype.itemsize > 2: # basically just catching floats
# print("sprintz: quantizing X...WTF")
X = quantize(X, nbits=nbits)
if np.min(X) < 0:
# print("sprintz: zigzag_encoding X!")
X = zigzag_encode(X).astype(unsigned_dtype)
# else:
# print("sprintz: not zigzag_encoding X!")
header_elem_nbits = {8: 3, 16: 4}[nbits]
X_nbits = nbits_cost(X, signed=False)
X_nbits = np.asfarray(X_nbits).reshape(N // window_len, window_len, -1)
block_nbits = X_nbits.max(axis=1).astype(np.uint8)
block_nbits[block_nbits == (nbits - 1)] = nbits
headers = block_nbits
if just_return_sz:
payload_sz = int(block_nbits.sum() * window_len / 8)
header_sz = _sprintz_header_sz(headers, header_elem_nbits)
# print("header sz: ", header_sz)
return header_sz + payload_sz
nwindows = N // window_len
payloads = []
for i in range(nwindows):
start_idx = i * window_len
end_idx = start_idx + window_len
X_slice = X[start_idx:end_idx]
for j in range(D):
col = X_slice[:, j]
payloads.append(bitpack(col, headers[i, j]))
headers = bitpack(headers, header_elem_nbits)
payloads = np.hstack(payloads)
if postproc is None:
return headers.nbytes + payloads.nbytes
elif postproc == 'zstd':
return len(zstd_compress(headers)) + len(zstd_compress(payloads))
# # nbits_slice = nbits_cost(X_slice, signed=False)
# nbits_slice = X_nbits[start_idx:end_idx]
# max_nbits = nbits_slice.max(axis=0)
# headers[i] = np.minimum(max_nbits, nbits - 1) # 8->7, 16->15
# max_nbits[max_nbits == nbits - 1] = nbits # 7->8, 15->16
# for j in range(D):
# col = X_slice[:, j]
# payloads.append(bitpack(col, max_nbits[j]))
# headers = bitpack(headers, header_elem_nbits)
# payloads = np.hstack(payloads)
# header_bytes = headers.tobytes()
# # payload_bytes = headers.tobytes()
# blosc.compress(buff, typesize=elem_sz,
# cname=compressor, shuffle=shuffle)
#
if __name__ == '__main__':
import doctest
doctest.testmod()
|
#!/usr/bin/env python
_memory = Memory('.', verbose=1)
pd.options.mode.chained_assignment = None # suppress stupid warning
RESULTS_DIR = os.path.join('results', 'amm')
TIMING_RESULTS_DIR = os.path.join(RESULTS_DIR, 'timing')
# we log these, but don't need them for the plots
AMM_DROP_COLS = ['__pyience_timestamp__', 'y_mean', 'y_std', 'bias',
'raw_mse', 'r', 'alpha', 'ncentroids']
def _read_csv_with_garbage(path, **kwargs):
with open(path, 'r') as f:
# print("\n".join(f.readlines()))
keep_lines = [line.strip() for line in f.readlines() if
(',' in line and not line.startswith('-'))]
contents = '\n'.join(keep_lines)
# print("contents\n", contents)
return pd.read_csv(StringIO(contents), **kwargs)
def rename_values_in_col(df, col, name_map, drop_others=True):
name_map = {k.strip().lower(): v for k, v in name_map.items()}
vals = [name_map.get(name.strip().lower(), "") for name in df[col]]
valid_vals = set(name_map.values())
# print("valid_vals: ", valid_vals)
valid_mask = np.array([val in valid_vals for val in vals])
# print("valid mask: ", valid_mask)
df = df.copy()
df[col] = vals
if drop_others:
df = df.loc[valid_mask]
return df
# print(df)
def melt_observation_cols(df, cols, var_name=None, value_name=None):
"""like pd.melt, but assumes only 1 observation var instead of 1 id var"""
independent_vars = [col for col in df.columns
if col not in set(cols)]
return pd.melt(df, id_vars=independent_vars, value_vars=cols,
var_name=var_name, value_name='time')
def melt_times(df, ntimes=5):
observation_vars = 't0 t1 t2 t3 t4'.split()
observation_vars = observation_vars[:ntimes]
return melt_observation_cols(
df, observation_vars, var_name='timing_trial', value_name='time')
def drop_cols_inplace(df, cols):
for col in AMM_DROP_COLS:
try:
df.drop([col], axis=1, inplace=True)
except KeyError:
pass
return df
def frac_above_thresh(df, xvar, yvar, methodvar, unitvar, ythresh):
"""
(method, xvar) -> [0, 1]
Assumes you have a tidy dataframe where method, xvar, yvar, and unit are
each a col.
"""
df = df.copy()
# df['frac_above_thresh'] = (df[yvar] > ythresh).astype(np.float)
# (method, xvar, [(unit, yvar)]) -> bool
df['frac_above_thresh'] = (df[yvar] > ythresh).astype(np.float)
# independent_vars = [methodvar, unitvar, xvar]
independent_vars = [methodvar, xvar]
# return df.groupby(independent_vars)['is_above_thresh'].transform('mean')
# last part converts from groupby back to regular df EDIT: no it doesn't
# return df.groupby(independent_vars)['frac_above_thresh'].mean().apply(pd.Series)
# return df.groupby(independent_vars)['is_above_thresh'].mean()
df = df.groupby(independent_vars)['frac_above_thresh'].mean()
# this is the magic line; turn multi-index levels into regular cols, with
# multi-index value broadcast all the corresponding rows;
# WOW this took a long time to figure out...
df = df.reset_index(level=independent_vars)
return df
# tmp = df.groupby(independent_vars)['frac_above_thresh'].mean()
# tmp = df.groupby(independent_vars)['frac_above_thresh'].transform('mean')
# print("tmp:\n", tmp)
# df['frac_above_thresh'] = tmp
# return df
# return df.groupby(independent_vars)[independent_vars + ['frac_above_thresh']]
# ret = df.groupby([methodvar, unitvar, xvar])['__above_thresh__']
# ret.drop(['__above_thresh__'], axis=1, inplace=True)
# return ret
def encode_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'encode-timing.csv')
ORIG_HEADERS = 'algo __ N D C B ___ t0 _0 t1 _1 t2 _2 t3 _3 t4 _4'.split()
USE_HEADERS = 'algo N D C B t0 t1 t2 t3 t4'.split()
# ORIG_HEADERS = 'algo __ N D C ___ t0 _0 t1 _1 t2 _2'.split()
# USE_HEADERS = 'algo N D C t0 t1 t2'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
return df
# print(df)
def lut_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'lut-timing.csv')
ORIG_HEADERS = ('algo __ N D C B lutconst ___ '
't0 _0 t1 _1 t2 _2 t3 _3 t4 _4').split()
USE_HEADERS = 'algo N D C B lutconst t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
return df
def scan_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'scan-timing.csv')
ORIG_HEADERS = 'algo __ N C B M ___ t0 _0 t1 _1 t2 _2 t3 _3 t4 _4'.split()
USE_HEADERS = 'algo N C B M t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None,
skiprows=1)
df = df[USE_HEADERS]
return df
def mithral_amm_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'amm-mithral-timing.csv')
ORIG_HEADERS = ('dset dtype algo __ N D M C lutconst ___ '
't0 _0 t1 _1 t2 _2 t3 _3 t4 _4').split()
USE_HEADERS = 'dset dtype algo N D M C lutconst t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
return df
def bolt_amm_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'amm-bolt-timing.csv')
ORIG_HEADERS = ('dset dtype algo __ N D M C ___ '
't0 _0 t1 _1 t2 _2 t3 _3 t4 _4').split()
USE_HEADERS = 'dset dtype algo N D M C t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
df['fixedB'] = df['algo'].str.strip().str.endswith('noenc')
df.drop('algo', axis=1, inplace=True)
df = df.loc[df['fixedB']]
# print("bolt df:\n", df)
# import sys; sys.exit()
return df
def dense_amm_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'amm-dense-timing.csv')
ORIG_HEADERS = ('dset algo __ N D M d ___ '
't0 _0 t1 _1 t2 _2 t3 _3 t4 _4').split()
USE_HEADERS = 'dset algo N D M d t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
df['algo'] = df['algo'].str.strip()
# drop stuff that doesn't have fixedW; we just let the existing methods
# use fixedW (same as fixedB in amm.py), instead of having to encode the
# smaller matrix
# df = df.loc[~df['algo'].isin(['blas sketch matmul', 'our sketch matmul'])]
t_sums = (df['t0'] + df['t1'] + df['t2'] + df['t3'] + df['t4']).values / 5
# df['t_avg'] = (df['t0'] + df['t1'] + df['t2'] + df['t3'] + df['t4']) / 5.
# # mark whether it's from our gemm or eigen gemm
# df['is_ours'] = df['algo'].str.startswith('our')
# print("uniq n vals: ", np.unique(df['N']))
sizes = np.empty((len(df), 4), dtype=np.int)
sizes[:, 0] = df['N']
sizes[:, 1] = df['D']
sizes[:, 2] = df['M']
sizes[:, 3] = df['d']
as_tuples = [tuple(row) for row in sizes]
uniq_tuples = sorted(list(set(as_tuples)))
keep_idxs = []
# print("sizes:\n", sizes)
# print("uniq_tuples:\n", uniq_tuples)
for tup in uniq_tuples:
row = np.array(tup)
idxs = np.where((sizes == row).sum(axis=1) == sizes.shape[1])[0]
best_idx = idxs[np.argmin(t_sums[idxs])]
# print(f"{tup} -> {best_idx}")
keep_idxs.append(best_idx)
df = df.iloc[keep_idxs]
rename_dict = {}
rename_dict['blas matmul'] = 'Brute Force'
rename_dict['our matmul'] = 'Brute Force'
rename_dict['blas sketch matmul'] = 'Dense Sketch'
rename_dict['our sketch matmul'] = 'Dense Sketch'
rename_dict['blas sketch fixedw matmul'] = 'Dense Sketch'
rename_dict['our sketch fixedw matmul'] = 'Dense Sketch'
df = rename_values_in_col(df, 'algo', rename_dict, drop_others=False)
return df
def osnap_amm_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'amm-osnap-timing.csv')
ORIG_HEADERS = ('dset algo __ N D M d s ___ '
't0 _0 t1 _1 t2 _2 t3 _3 t4 _4').split()
USE_HEADERS = 'dset algo N D M d s t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
df.drop('algo', axis=1, inplace=True)
return df
def sparse_amm_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'amm-sparse-timing.csv')
ORIG_HEADERS = ('dset algo __ N D M d frac ___ '
't0 _0 t1 _1 t2 _2 t3 _3 t4 _4').split()
USE_HEADERS = 'dset algo N D M d frac t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
df.drop('algo', axis=1, inplace=True)
return df
def scalar_quantized_amm_timings():
timings = []
timings.append(['Cifar10', 10000, 512, 10, 2.013])
timings.append(['Cifar100', 10000, 512, 100, 6.472])
timings.append(['Ucr128', 1000, 320, 128, .4808])
timings.append(['Caltech3x3', 49284, 27, 2, .894])
timings.append(['Caltech5x5', 48400, 75, 2, 1.409])
dicts = [{'dset': l[0], 'N': l[1], 'D': l[2],
'M': l[3], 'time': l[4]} for l in timings]
# df = pd.DataFrame.from_records(dicts)
# print("scalar_quantized_amm_timings: ")
# print(df)
return pd.DataFrame.from_records(dicts)
# import sys; sys.exit()
# df = pd.DataFrame.from_records(timings)
# output from ./GEMMsBenchmark after defining FBGEMM_MEASURE_TIME_BREAKDOWN
# in include/fbgemm/Fbgemm.h; recorded here since not in a results file
'''
M, N, K, Type, Packing (us), Kernel (us), Postproc (us), Total (us), GOPs
10000, 10, 512, FBGEMM_i8_acc32, 438.069, 1465.04, 43.5959, 2013.31, 50.6
10000, 10, 512, FBGEMM_i8_acc16, 512.8, 1338.1, 69.9, 2115.1, 48.3
10000, 100, 512, FBGEMM_i8_acc32, 473.7, 9203.9, 85.9, 9923.9, 103.1
10000, 100, 512, FBGEMM_i8_acc16, 569.8, 5558.7, 108.5, 6472.2, 158.1
1000, 128, 320, FBGEMM_i8_acc32, 39.5, 724.6, 5.8, 795.2, 101.8
1000, 128, 320, FBGEMM_i8_acc16, 43.5, 404.1, 3.1, 480.8, 168.4
49284, 2, 27, FBGEMM_i8_acc32, 298.5, 226.2, 139.6, 894.0, 5.9
49284, 2, 27, FBGEMM_i8_acc16, 333.6, 650.1, 162.5, 1608.7, 3.3
48400, 2, 75, FBGEMM_i8_acc32, 482.0, 546.0, 141.5, 1409.3, 10.2
48400, 2, 75, FBGEMM_i8_acc16, 438.3, 1228.7, 159.2, 2278.4, 6.4
'''
# def _extract_cols_into_list_of_tuples(df, cols):
def _extract_cols_into_list_of_tuples(df, cols):
# return [tuple(row) for row in df[cols].iterrows()]
ar = np.vstack([df[col] for col in cols]).T
# print("ar: \n", ar)
ar = np.atleast_2d(ar).astype(np.int)
# return [tuple(row) for row in ar]
return [sum([hash(-12435 * i + 1) ^ hash(1234567 * val)
for i, val in enumerate(row)]) for row in ar]
# return [int(hash(tuple(row))) for row in ar]
def _join_on_cols(df_left, left_cols, df_right, right_cols, verbose=0):
df_left = df_left.copy()
df_right = df_right.copy()
df_left['__index__'] = _extract_cols_into_list_of_tuples(
df_left, left_cols)
df_right['__index__'] = _extract_cols_into_list_of_tuples(
df_right, right_cols)
# dup_cols = set(left_cols) & set(right_cols)
# if verbose > 0:
# print("_join_on_cols(); dropping duplicate cols from rhs: ", dup_cols)
# df_right = df_right.drop(dup_cols, axis=1)
df = df_left.merge(
df_right, on='__index__', how='left', suffixes=('', '_rhs'))
df.drop(['__index__'], axis=1, inplace=True)
# df.sort_values(left_cols, axis=0, inplace=True)
return df
def _join_with_mithral_times(df, timing_dtype='f32'):
time_df = mithral_amm_timings()
if timing_dtype is not None:
time_df = time_df.loc[time_df['dtype'].str.strip() == timing_dtype]
df = df.loc[df['method'].str.lower().str.startswith('mithral')]
df['ncodebooks'] = df['ncodebooks'].astype(np.int)
# time_df.reset_index(inplace=True, drop=True)
# df.reset_index(inplace=True, drop=True)
# print("time_df with appropriate dtype:\n", time_df)
# import sys; sys.exit()
# we also report times for subroutines within mithral; can't let it
# use any of these; just use rename_values_in_col to drop them and also
# get more intuitive debug output
# rename_dict = {'amm mithral sparselut': 'Mithral, L = ??',
# 'amm mithral nolut': 'Mithral, L = ∞'}
name_mithral_dense = 'mithralDense' # only one we use; others arbitrary
rename_dict = {'amm mithral sparselut': 'mithralSparse',
'amm mithral denselut': name_mithral_dense,
'amm mithral nolut': 'mithralOffline'}
time_df = rename_values_in_col(time_df, 'algo', rename_dict)
# give MithralPQ a valid lut const so the join will work (pq is equivalent
# to constant of 1)
is_mithral_pq = df['method'].str.lower().str.startswith('mithralpq')
df.loc[is_mithral_pq, 'lut_work_const'] = 1
df_mpq = df.loc[is_mithral_pq].copy()
# there shouldn't be rows that violated this, but there are (probably
# from early runs that haven't been overwritten yet)
df = df.loc[df['lut_work_const'].values <= df['ncodebooks'].values]
# now add in extra rows for mithral with no lut computation (which is
# assumed to use dense luts because no reason not to) vs mithral
# with dense lut computation as part of the timing
is_any_mithral = df['method'].str.lower().str.startswith('mithral')
is_mithral = is_any_mithral & (~is_mithral_pq)
is_dense = df['lut_work_const'] == -1
df_mithral_dense = df.loc[is_mithral & is_dense].copy()
dummy_lutconst = -2
df_mithral_dense['lut_work_const'] = dummy_lutconst
time_df['lutconst'].loc[
time_df['algo'] == name_mithral_dense] = dummy_lutconst
# add in version of mithralpq with offline lut computation
df_mpq = df.loc[df['method'].str.lower().str.startswith('mithralpq')]
df_mpq['lut_work_const'] = -1
df = pd.concat([df, df_mithral_dense, df_mpq], axis=0)
cols_df = 'N D M ncodebooks lut_work_const'.split()
cols_time_df = 'N D M C lutconst'.split()
# print("df cols: ", df.columns)
# time_df.reset_index(inplace=True, drop=True)
# df.reset_index(inplace=True, drop=True)
df.sort_values(['method'] + cols_df, axis=0, inplace=True)
time_df.sort_values(cols_time_df, axis=0, inplace=True)
ret = _join_on_cols(df, cols_df, time_df, cols_time_df)
# ret['lut_work_const'].loc[ret['lut_work_const'] == dummy_lutconst] = -1
# show_keys = 'method N D M C ncodebooks lutconst lut_work_const'.split()
# print("mithral df:\n", df['method N D M ncodebooks lut_work_const'.split()])
# # print("mithral time df:\n", time_df.loc[time_df['dset'] == 'Cifar10'])
# print("mithral time df:\n", time_df.loc[time_df['dset'] == 'Caltech3x3'])
# print("joined df:\n", ret[show_keys])
# # print("joined df:\n", ret)
# import sys; sys.exit()
# one of these fails if the join failed; check if you have redundant
# rows in either df or missing rows in the time df
assert np.all(ret['C'] == ret['ncodebooks'])
assert np.all(ret['lutconst'] == ret['lut_work_const'])
return ret
def _join_with_bolt_times(df):
time_df = bolt_amm_timings()
df = df.loc[df['method'].str.lower().str.startswith('bolt')]
return _join_on_cols(df, 'N D M ncodebooks'.split(),
time_df, 'N D M C'.split())
def _join_with_osnap_times(df):
time_df = osnap_amm_timings()
# df = df.loc[df['method'].str.lower().str.startswith('osnap')]
df = df.loc[df['method'].isin(
[methods.METHOD_OSNAP, methods.METHOD_HASHJL])]
df['s'] = 1
df['s'].loc[df['method'] == methods.METHOD_OSNAP] = 4
# print("osnap df shape: ", df.shape)
df['d'] = df['d'].astype(np.int)
# print("time_df:\n", time_df[time_df['dset'] == 'Cifar10'])
# note that d < s isn't present in time_df, which makes sense
return _join_on_cols(df, 'N D M d s'.split(),
time_df, 'N D M d s'.split())
def _join_with_brute_force_times(df):
time_df = dense_amm_timings()
df = df.loc[df['method'].str.lower().str.startswith('exact')]
time_df = time_df.loc[time_df['algo'].str.lower().str.startswith('brute')]
# print("df:\n", df)
# print("time_df:\n", time_df)
return _join_on_cols(df, 'N D M'.split(), time_df, 'N D M'.split())
def _join_with_dense_sketch_times(df):
time_df = dense_amm_timings()
# print("found methods in df: ", df['method'].unique())
# print("dense sketch methods: ", methods.DENSE_SKETCH_METHODS)
df = df.loc[df['method'].isin(methods.DENSE_SKETCH_METHODS)]
time_df = time_df.loc[time_df['algo'].str.lower().str.startswith(
'dense sketch')]
# print("df:\n", df)
# print("time_df:\n", time_df)
return _join_on_cols(df, 'N D M d'.split(),
time_df, 'N D M d'.split())
def _join_with_scalar_quantize_times(df):
time_df = scalar_quantized_amm_timings()
df = df.loc[df['method'] == methods.METHOD_SCALAR_QUANTIZE]
# print("scalar quantize time df:\n", time_df)
# print("scalar quantize acc df:\n", df.columns)
# print(df['N D M'.split()])
# df_joint = _join_on_cols(df, 'N D M'.split(), time_df, 'N D M'.split())
# print("joined df: ")
# print(df_joint['N D M time'.split()])
# import sys; sys.exit()
return _join_on_cols(df, 'N D M'.split(), time_df, 'N D M'.split())
def extract_pareto_frontier_idxs(xvals, yvals):
"""assumes lower x is better and higher y is better"""
assert len(xvals) == len(yvals)
sort_idxs = np.argsort(xvals)
xvals = xvals[sort_idxs]
yvals = yvals[sort_idxs]
# orig_idxs = np.arange(len(xvals))
first_idx = sort_idxs[0]
curr_thresh = yvals[first_idx]
keep_idxs = [first_idx]
for i, y in enumerate(yvals[1:]):
if y > curr_thresh:
curr_thresh = y
keep_idxs.append(sort_idxs[i + 1])
return keep_idxs
def _join_with_sparse_sketch_times(df, sparse_pareto=True):
time_df = sparse_amm_timings()
df = df.loc[df['method'].str.lower().str.startswith('sparse')]
df['d'] = df['d'].astype(np.int)
new_rows = []
for _, row in df.iterrows():
# pprint.pprint(dict(row))
subdf = time_df
for key in 'N D M d'.split():
subdf = subdf.loc[subdf[key] == row[key]]
if len(subdf) < 1:
continue
sparsities = subdf['frac']
# print("subdf for N, D, M, D: ", [row[k] for k in 'N D M d'.split()])
# print(subdf)
# N, D, M, d = [row[k] for k in 'N D M d'.split()]
target_frac = row['sparsity']
small_enough_sparsities_idxs = np.where(sparsities.values <= target_frac)[0]
if len(small_enough_sparsities_idxs):
take_idx = small_enough_sparsities_idxs[-1]
else: # no nonzeros, or at least uselessly few of them
take_idx = np.argmin(sparsities.values)
time_keys = 't0 t1 t2 t3 t4'.split()
times_row = subdf.iloc[take_idx]
# times = subdf.loc[take_idx, time_keys]
row = dict(row)
for key in time_keys:
row[key] = float(times_row[key])
row['time'] = sum([float(times_row[key])
for key in time_keys]) / len(time_keys)
new_rows.append(row)
# return pd.DataFrame.from_records(new_rows)
df = pd.DataFrame.from_records(new_rows)
if not sparse_pareto:
return df
# # for dset in df['']
# subdf = df.loc[df['method'] == 'SparsePCA']
# here we have a bunch of hack stuff
# print("df columns: ", df.columns)
# yvals = 1. - df['normalized_mse'].values
subdfs = []
for tid in df['task_id'].unique():
subdf = df.loc[df['task_id'] == tid]
xvals = subdf['time'].values
if 'acc_amm' in df.columns:
yvals = subdf['acc_amm'].values
else:
yvals = 1. - subdf['normalized_mse'].values
idxs = extract_pareto_frontier_idxs(xvals, yvals)
subdfs.append(subdf.iloc[idxs])
df = pd.concat(subdfs, axis=0)
return df
def _clean_method_names_amm(df):
key = 'method' if 'method' in df else 'algo'
if 'lutconst' in df:
df.loc[df['lutconst'] == -2, key] = 'MADDNESS Dense'
is_lutconst_neg1 = df['lutconst'] == -1
is_mithral_pq = df['method'] == 'MithralPQ'
df.loc[is_lutconst_neg1 & is_mithral_pq, key] = 'MADDNESS-PQ'
df.loc[is_lutconst_neg1 & ~is_mithral_pq, key] = 'MADDNESS'
df.loc[df['lutconst'] == 1, key] = 'MADDNESS, L = 1'
df.loc[df['lutconst'] == 2, key] = 'MADDNESS, L = 2'
df.loc[df['lutconst'] == 4, key] = 'MADDNESS, L = 4'
# df.loc[df['lutconst'] == -2, key] = 'Mithral Dense'
# is_lutconst_neg1 = df['lutconst'] == -1
# is_mithral_pq = df['method'] == 'MithralPQ'
# df.loc[is_lutconst_neg1 & is_mithral_pq, key] = 'MithralPQ'
# df.loc[is_lutconst_neg1 & ~is_mithral_pq, key] = 'Mithral'
# df.loc[df['lutconst'] == 1, key] = 'Mithral, L = 1'
# df.loc[df['lutconst'] == 2, key] = 'Mithral, L = 2'
# df.loc[df['lutconst'] == 4, key] = 'Mithral, L = 4'
# mask = df['lutconst'] == 1
# is_mithral_pq = df[key].str.lower().str.startswith('mithralpq')
# mask &= ~is_mithral_pq
# df[key][mask] = 'Mithral, L = ∞'
# df[key].loc[df[key] == 'Exact'] = 'Brute Force'
df[key].loc[df[key] == 'Exact'] = 'Exact'
return df
def _clean_metrics_amm(df):
df = df.rename({'acc_amm': 'Accuracy'}, axis=1)
# if 'time' not in df.columns:
mask = df['time'].isna()
# df.loc['time', mask] = ((df['t0'] + df['t1'] + df['t2'] + df['t3'] + df['t4']).values / 5.)[mask]
times = (df['t0'] + df['t1'] + df['t2'] + df['t3'] + df['t4']) / 5.
df.loc[mask, 'time'] = times.values[mask]
df['Throughput'] = 1e3 * df['N'] * df['M'] / df['time']
# create ops column that sums number of multiplies + lookups
df['muls'] = df['muls'].fillna(0)
mask = ~df['nlookups'].isna()
df['ops'] = df['muls']
# print("debugging df[ops]: ")
# # print(type(df['ops']))
# # print(type(df['ops']))
# print(type(df['nlookups']))
df['ops'].loc[mask] += df['nlookups'].loc[mask]
# df['nor']
# df_exact = df.loc[df['method'] == 'Brute Force']
df_exact = df.loc[df['method'] == 'Exact']
# print("df_exact\n", df_exact)
if 'task_id' in df.columns:
nuniq_tasks = len(df['task_id'].unique())
else:
nuniq_tasks = 1 # cifar{10,100}
assert df_exact.shape[0] == nuniq_tasks
base_time = float(df_exact.loc[0, 'time'])
df['NormalizedTime'] = df['time'] / base_time
df['Speedup'] = 1. / df['NormalizedTime']
df['1 - NMSE'] = 1. - df['normalized_mse']
if 'Accuracy' in df.columns:
df['Relative Accuracy'] = df['Accuracy'] / (df['acc_orig'] + 1e-20)
# print("df.columns", df.columns)
# df['Change in Accuracy'] = df['Accuracy'] - df['acc-1nn-raw']
# if 'acc-1nn-raw' in df.columns:
# # # note that relative accuracy can actually be higher if errors
# # # happen to compensate for incorrect classification sometimes
# # print("max relative acc: ", df['Relative Accuracy'].values.max())
# # # assert df['Relative Accuracy'].values.max() <= 1.000001
# # acc_orig field is supposed to capture this, but I messed it up for
# # 1nn so this will also work
# tid2acc = {}
# exactdf = df.loc[df['method'] == 'Exact']
# for tid in df['task_id'].unique():
# subdf = exactdf.loc[exactdf['task_id'] == tid]
# tid2acc[tid] = subdf['Accuracy'].values[0]
# df['BaseAccuracy'] = [tid2acc[tid] for tid in df['task_id']]
# df['Relative Accuracy'] = df['Accuracy'] / df['BaseAccuracy']
return df
def _join_with_times(df, timing_dtype='f32', sparse_pareto=True):
df_quant = _join_with_scalar_quantize_times(df)
# print("scalar quantize time df:\n", time_df)
# print("scalar quantize acc df:\n", df)
# print("df scalar quant:\n", df_quant['dset N D M time'.split()])
# import sys; sys.exit()
df_bolt = _join_with_bolt_times(df)
# # print("df bolt:\n", df_bolt)
# df_tmp = df_bolt['N D M C ncodebooks method normalized_mse t0 t1 t2 t3 t4 task_id'.split()]
# df_tmp = df_tmp.loc[df_tmp['task_id'].isin(['ucr Yoga k=128', 'ucr Wafer k=128'])]
# df_tmp['time'] = (df_tmp['t0'] + df_tmp['t1'] + df_tmp['t2'] + df_tmp['t3'] + df_tmp['t4']) / 5.
# print("df tmp:\n", df_tmp)
# print("df tmp times:\n", df_tmp[['C', 'time', 'task_id']])
# # tids = df_tmp['task_id'].unique()
# # # yep, exactly 6 results per dset
# # counts = df_tmp.groupby('task_id')['task_id'].count()
# # print("task counts: ", counts)
# import sys; sys.exit()
# print("df bolt:\n", df_bolt) # looks good
# import sys; sys.exit()
# assert np.all(df_mithral['lutconst'] == df_mithral['lut_work_const'])
# df_mithral = df.loc[df['method'].str.startswith('Mithral')]
# df_mithral.to_csv('mithral-caltech-debug.csv')
df_mithral = _join_with_mithral_times(df, timing_dtype=timing_dtype)
# df_tmp = df_mithral
# df_tmp = df_tmp['N D M C ncodebooks lutconst lut_work_const method algo normalized_mse t0 t1'.split()]
# # print("mithral rows:\n", df.loc[df['method'].str.startswith('mithral')])
# print("mithralpq rows after join:\n", df_tmp.loc[df_tmp['method'] == 'MithralPQ'])
# print("mithral rows after join:\n", df_tmp[:100])
# mismatch_mask = df_tmp['lutconst'] != df_tmp['lut_work_const']
# print("mithral mismatched rows:\n", df_tmp.loc[mismatch_mask])
# print(df_mithral['lutconst', 'lut_work_const'])
# import sys; sys.exit()
# if this line fails, it's usually because the join with mithral times
# failed
assert np.all(df_mithral['lutconst'] == df_mithral['lut_work_const'])
df_osnap = _join_with_osnap_times(df)
df_brute = _join_with_brute_force_times(df)
df_sketch = _join_with_dense_sketch_times(df)
df_sparse = _join_with_sparse_sketch_times(df, sparse_pareto=sparse_pareto)
# dfs = [df_mithral, df_bolt, df_osnap, df_brute, df_sketch, df_sparse]
dfs = [df_quant, df_mithral, df_bolt, df_osnap, df_brute,
df_sketch, df_sparse]
return pd.concat(dfs, axis=0, join='outer', sort=False)
def _clean_amm_results_df(df, timing_dtype='f32', sparse_pareto=True):
# print("initial methods: ", df['method'].unique())
df = _join_with_times(
df, timing_dtype=timing_dtype, sparse_pareto=sparse_pareto)
# df['time'] = df['t_avg']
# df = melt_times(df)
# print("uniq methods after joining with times:\n", sorted(df['method'].unique()))
# import sys; sys.exit()
df = _clean_metrics_amm(df)
df = df.loc[~df['time'].isna()]
# print("uniq methods after rming nan times:\n", sorted(df['method'].unique()))
# import sys; sys.exit()
df = _clean_method_names_amm(df)
return df
@_memory.cache
def _read_amm_csv(fname, **kwargs):
df = pd.read_csv(os.path.join(RESULTS_DIR, fname), **kwargs)
drop_cols_inplace(df, AMM_DROP_COLS)
return df
def cifar10_amm():
# df = pd.read_csv(os.path.join(RESULTS_DIR, 'cifar10.csv'))
# drop_cols_inplace(df, AMM_DROP_COLS)
df = _read_amm_csv('cifar10.csv')
# print("initial uniq methods:\n", sorted(df['method'].unique()))
return _clean_amm_results_df(df)
def cifar100_amm():
# df = pd.read_csv(os.path.join(RESULTS_DIR, 'cifar100.csv'))
# drop_cols_inplace(df, AMM_DROP_COLS)
df = _read_amm_csv('cifar100.csv')
return _clean_amm_results_df(df)
@_memory.cache
def caltech_amm(filt='sobel'):
"""filt must be one of {'sobel','dog5x5'}"""
df = _read_amm_csv('caltech_{}.csv'.format(filt))
return _clean_amm_results_df(df, timing_dtype='i8')
@_memory.cache
def ucr_amm(k=128, problem='rbf'):
"""k must be one of {64, 128, 256}"""
df = _read_amm_csv('ucr_k={}_problem={}.csv'.format(k, problem))
df['origN'] = df['N'].values
df['N'] = 1000 # timing is for a test set size of 1000
if problem == 'softmax':
df['M'] = k # to get meaningful timing vs acc comparison
return _clean_amm_results_df(df, sparse_pareto=False)
def main():
# print(encode_timings())
# print(lut_timings())
# print(scan_timings())
# print(bolt_amm_timings())
# print(mithral_amm_timings())
# print(dense_amm_timings())
# print(osnap_amm_timings())
# print(sparse_amm_timings())
# print(cifar10_amm())
# print(cifar100_amm())
# print(caltech_amm(filt='sobel'))
# print(caltech_amm(filt='dog5x5'))
# print(ucr_amm(k=64))
# df = ucr_amm(k=128, problem='rbf')
# df_bolt = df.loc[df['method'] == 'Bolt']
# print("number of uniq bolt speedups:")
# print(df_bolt['Speedup'].unique().size)
# import sys; sys.exit()
# df = df.loc[df['method'] == 'Bolt']
# print("bolt dset counts")
# print(df.groupby('task_id')['task_id'].count())
# # print("bolt speedup per dset counts")
# # print(df.groupby('task_id')['Speedup'].unique().count())
# print("number of uniq speedups:")
# print(df['Speedup'].unique().size)
# df = cifar10_amm()
# df = cifar100_amm()
df = caltech_amm(filt='dog5x5')
# df = caltech_amm(filt='sobel')
print(sorted(df['method'].unique()))
# # df = df.loc[df['method'].isin(['Brute Force', 'Mithral', 'SparsePCA'])]
# df = df.loc[df['method'].isin(['Exact', 'ScalarQuantize', 'MADDNESS', 'SparsePCA'])]
# df = df.sort_values(['method', 'Speedup'], axis=0)
# print(df['method Speedup Accuracy'.split()])
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
KEY_NLOOKUPS = 'nlookups'
class VQMatmul(amm.ApproxMatmul, abc.ABC):
def __init__(self, ncodebooks, ncentroids=None):
self.ncodebooks = ncodebooks
self.ncentroids = (self._get_ncentroids() if ncentroids is None
else ncentroids)
self.enc = self._create_encoder(ncodebooks)
self.reset_for_new_task()
@abc.abstractmethod
def _create_encoder(self, ncodebooks): # to be overriden by subclasses
return vq.PQEncoder(ncodebooks=ncodebooks, ncentroids=self.ncentroids,
**self._get_encoder_kwargs())
# @abc.abstractmethod
def _get_ncentroids(self):
pass
@abc.abstractmethod
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
pass
def _get_encoder_kwargs(self): # to be overriden by subclasses
return {}
def reset_for_new_task(self):
self.A_enc = None
self.luts = None
def fit(self, A, B, Y=None):
_, D = A.shape
if D < self.ncodebooks:
raise amm.InvalidParametersException(
'D < C: {} < {}'.format(D, self.ncodebooks))
self.enc.fit(A, B.T)
def set_A(self, A):
self.A_enc = self.enc.encode_X(A)
def set_B(self, B):
self.luts = self.enc.encode_Q(B.T)
def __call__(self, A, B):
if self.A_enc is None:
self.set_A(A)
if self.luts is None:
self.set_B(B)
return self.enc.dists_enc(self.A_enc, self.luts)
def get_params(self):
return {'ncodebooks': self.ncodebooks}
# ================================================================ PQ
class PQMatmul(VQMatmul):
def _create_encoder(self, ncodebooks): # to be overriden by subclasses
return vq.PQEncoder(ncodebooks=ncodebooks, ncentroids=self.ncentroids,
**self._get_encoder_kwargs())
def _get_ncentroids(self):
return 256
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
# data encoding and LUT costs
nmuls = 0
nmuls += 0 if fixedA else A.shape[0] * A.shape[1] * self.ncentroids
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
nlookups = A.shape[0] * B.shape[1] * self.ncodebooks
return {amm.KEY_NMULTIPLIES: nmuls, KEY_NLOOKUPS: nlookups}
# ================================================================ OPQ
class OPQMatmul(PQMatmul):
def _get_encoder_kwargs(self):
return dict(preproc='OPQ')
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
rot_nmuls = A.shape[0] * A.shape[1] * A.shape[1] # OPQ rotation cost
metrics[amm.KEY_NMULTIPLIES] += rot_nmuls
return metrics
# ================================================================ Bolt
class BoltMatmul(PQMatmul):
# def __init__(self, ncodebooks):
# self.ncodebooks = ncodebooks
# self.ncentroids = 16
# self.enc = self._create_encoder(self.ncodebooks)
# self._reset()
def _get_ncentroids(self):
return 16
def _create_encoder(self, ncodebooks):
return vq.PQEncoder(ncodebooks=ncodebooks, ncentroids=self.ncentroids,
quantize_lut=True,
# quantize_lut=False,
# accumulate_how='mean',
accumulate_how='sum',
upcast_every=-1,
# upcast_every=2,
# upcast_every=4,
# upcast_every=256, # fine as long as using mean
# TODO set quantize_lut=True after debug
**self._get_encoder_kwargs())
class GEHTBoltMatmul_CovTopk(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(
preproc='GEHT', sample_how='deterministic', stats_mat='cov')
class GEHTBoltMatmul_CovSamp(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(
preproc='GEHT', sample_how='importance', stats_mat='cov')
class GEHTBoltMatmul_CorrTopk(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(
preproc='GEHT', sample_how='deterministic', stats_mat='corr')
class GEHTBoltMatmul_CorrSamp(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(
preproc='GEHT', sample_how='importance', stats_mat='corr')
class BoltSplits(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(
preproc='PQ', encode_algo='splits')
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
nmuls = 0
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
metrics[amm.KEY_NMULTIPLIES] = nmuls
return metrics
class BoltMultiSplits(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(encode_algo='multisplits')
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
nmuls = 0
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
metrics[amm.KEY_NMULTIPLIES] = nmuls
return metrics
class BoltPermMultiSplits(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(preproc='GEHT', encode_algo='multisplits')
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
nmuls = 0
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
metrics[amm.KEY_NMULTIPLIES] = nmuls
return metrics
class PQPerm(PQMatmul):
def _get_encoder_kwargs(self):
return dict(preproc='GEHT')
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
nmuls = 0
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
metrics[amm.KEY_NMULTIPLIES] = nmuls
return metrics
class PQMultiSplits(PQMatmul):
def __init__(self, ncodebooks, ncentroids=256):
super().__init__(ncodebooks=ncodebooks, ncentroids=ncentroids)
def _get_encoder_kwargs(self):
return dict(encode_algo='multisplits')
def get_params(self):
return {'ncodebooks': self.ncodebooks, 'ncentroids': self.ncentroids}
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
nmuls = 0
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
metrics[amm.KEY_NMULTIPLIES] = nmuls
return metrics
class PQPermMultiSplits(PQMatmul):
def __init__(self, ncodebooks, ncentroids=256):
super().__init__(ncodebooks=ncodebooks, ncentroids=ncentroids)
def _get_encoder_kwargs(self):
return dict(preproc='GEHT', encode_algo='multisplits')
def get_params(self):
return {'ncodebooks': self.ncodebooks, 'ncentroids': self.ncentroids}
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
nmuls = 0
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
metrics[amm.KEY_NMULTIPLIES] = nmuls
return metrics
# ================================================================ Mithral
class OldMithralPQ(PQMatmul):
# def _get_ncentroids(self):
# return 16
def __init__(self, ncodebooks):
super().__init__(ncodebooks=ncodebooks, ncentroids=16)
def _create_encoder(self, ncodebooks):
return vq.PQEncoder(ncodebooks=ncodebooks, ncentroids=self.ncentroids,
encode_algo='multisplits',
quantize_lut=True,
upcast_every=16, # fine as long as using mean
accumulate_how='mean')
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
N, D = A.shape
D, M = B.shape
# data encoding and LUT costs
nmuls = 0
nmuls += 0 if fixedA else N * D # offset + scale before quantize
nmuls += 0 if fixedB else M * self.ncentroids * D
# lookups given encoded data + luts
nlookups = N * M * self.ncodebooks
return {amm.KEY_NMULTIPLIES: nmuls, KEY_NLOOKUPS: nlookups}
class MithralMatmul(VQMatmul):
def __init__(self, ncodebooks, lut_work_const=-1):
self.lut_work_const = lut_work_const
if (lut_work_const is not None) and (lut_work_const > 0) and (
lut_work_const > ncodebooks):
raise amm.InvalidParametersException(
"lut_work_const > ncodebooks: {} > {}".format(
lut_work_const, ncodebooks))
super().__init__(ncodebooks=ncodebooks, ncentroids=16)
# def _get_ncentroids(self):
# return 16
# def fit(self, A, B, Y=None):
# super().fit(self, A, B, Y=Y)
def _create_encoder(self, ncodebooks):
return vq.MithralEncoder(
ncodebooks=ncodebooks, lut_work_const=self.lut_work_const)
def get_params(self):
return {'ncodebooks': self.ncodebooks,
'lut_work_const': self.lut_work_const}
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
N, D = A.shape
D, M = B.shape
# data encoding and LUT costs
nmuls = 0
nmuls += 0 if fixedA else N * D # offset + scale before quantize
nmuls_per_codebook_per_output = self.ncentroids * D
nmuls_per_output = nmuls_per_codebook_per_output * self.ncodebooks
nmuls += 0 if fixedB else nmuls_per_output * M
# lookups given encoded data + luts
nlookups = N * M * self.ncodebooks
return {amm.KEY_NMULTIPLIES: nmuls, KEY_NLOOKUPS: nlookups}
def set_B(self, B):
self.luts, self.offset, self.scale = self.enc.encode_Q(B.T)
def __call__(self, A, B):
if self.A_enc is None:
self.set_A(A)
if self.luts is None:
self.set_B(B)
return self.enc.dists_enc(self.A_enc, self.luts,
offset=self.offset, scale=self.scale)
class MithralPQ(MithralMatmul):
def __init__(self, ncodebooks):
super().__init__(ncodebooks=ncodebooks, lut_work_const=1)
|
#!/bin/env/python
"""utility functions for running experiments"""
# from sklearn.model_selection import StratifiedKFold
try:
from joblib import Memory
memory = Memory('.', verbose=0)
cache = memory.cache
except Exception:
def cache(f):
return f
# ================================================================ Constants
KEY_FINISHED_UPDATING = '__pyn_finished_updating__'
KEY_NEW_KEYS = '__pyn_newkeys__'
# ================================================================ Types
class UsageError(Exception):
pass
class Options(object):
"""Wrapper for a collection to signify that each element is one possible
parameter value"""
def __init__(self, *args):
if args is None or len(args) < 1:
raise ValueError("No options given!")
if len(args) == 1 and hasattr(args, '__len__'):
self.values = args[0] # given a list
else:
self.values = args # given individual objects
def __len__(self):
return len(self.values)
# deliberately don't act like a collection so that we fail fast if
# code doesn't know that this is supposed to represent Options, rather
# than a collection of values. This is mostly to ensure that Options
# are always expanded out when generating sets of parameters.
def __getitem__(self, idx):
self._raise()
def __setitem__(self, idx, item):
self._raise()
def _raise(self):
raise TypeError("Options object is not a collection; use options.values"
" to access the collection of individual options")
# ================================================================ Funcs
# ------------------------------------------------ misc utils
def make_immutable(x):
"""
>>> make_immutable(5) == 5
True
>>> make_immutable('a') == 'a'
True
>>> make_immutable((1, 2)) == (1, 2)
True
>>> make_immutable([1, 2]) == [1, 2]
False
>>> make_immutable([1, 2]) == (1, 2)
True
"""
# must either be not a collections or immutable
try:
{}[x] = 0 # dicts require immutability
return x
except TypeError:
# so it's mutable; either a collection or a
# mutable class; if a class, we're hosed, so
# assume it's a collection
try:
# if it's a singleton collection, try returning
# first element; this will jump to except
# unless x is a collection
_ = len(x)
# apparently a collection, so make it a tuple
return tuple(x)
except TypeError:
return repr(x) # not a collection; stringify as last resort
def as_key(x):
return make_immutable(x)
# ------------------------------------------------ IO / saving results
def now_as_string():
return datetime.datetime.now().strftime("%Y-%m-%dT%H_%M_%S")
def save_data_frame(df, save_dir='results', name="", timestamp='copy',
cols_in_filename=None, col_kv_fmt="_{}={}",
store_index=False, append=True, dedup_cols=None,
add_timestamp_col=True, sort_by=None, **sink):
if timestamp == 'copy': # save one copy with and without timestamp
kwargs = dict(name=name, col_kv_fmt=col_kv_fmt,
cols_in_filename=cols_in_filename, dedup_cols=dedup_cols,
store_index=store_index, append=append, sort_by=sort_by,
add_timestamp_col=add_timestamp_col)
backups_dir = os.path.join(save_dir, 'pyience-backups')
save_data_frame(df, timestamp=True, save_dir=backups_dir, **kwargs)
save_data_frame(df, timestamp=False, save_dir=save_dir, **kwargs)
return
# construct filename
name = name if name else ""
if cols_in_filename:
cols = list(df.columns.values)
# substrs = ["{%s}" % col for col in cols]
# name = name_fmt
# for ss in substrs:
# key = ss[1:-1]
for key in cols_in_filename:
if key not in cols:
warnings.warn("Column '{}' not found in Dataframe."
"Excluding it from filename".format(key))
continue
# get value associated with this key; ignored if col not constant
vals = df[key]
nuniq = len(vals.unique())
if nuniq != 1:
warnings.warn("Column '{}' has more than one value in Dataframe."
"Excluding it from filename".format(key))
continue
val = vals[0]
fmt = col_kv_fmt
if name == "" and not col_kv_fmt.startswith("{"):
fmt = col_kv_fmt[1:]
name += fmt.format(key, val)
ensure_dir_exists(save_dir)
raw_timestamp_str = now_as_string()
timestamp_str = ("_" + raw_timestamp_str) if timestamp else ""
fileName = "{}{}.csv".format(name, timestamp_str).strip("_")
save_path = os.path.join(save_dir, fileName)
if add_timestamp_col:
df['__pyience_timestamp__'] = [raw_timestamp_str] * df.shape[0]
if append and os.path.exists(save_path):
existing_df = pd.read_csv(save_path)
# print("existing_df_cols", existing_df.columns)
# print("existing_df_cols", df.columns)
# print("dedup_cols", dedup_cols)
df = pd.concat([existing_df, df], axis=0, sort=False, ignore_index=True)
# print("df secs: ")
# print(df['secs'])
dedup_cols = set(dedup_cols) & set(list(df.columns))
df.drop_duplicates(subset=dedup_cols, keep='last', inplace=True)
df = df.sort_index(axis=1)
if sort_by is not None:
df.sort_values(sort_by, inplace=True)
# also move these cols to the front for legibility, since they're
# probably something you care about
other_cols = [col for col in df.columns.values if col not in sort_by]
df = df[sort_by + other_cols]
df.to_csv(save_path, index=store_index)
def save_dicts_as_data_frame(d, **kwargs):
if not isinstance(d, dict):
try:
df = pd.DataFrame.from_records(d)
except Exception:
dfs = [pd.DataFrame.from_records(dd, index=[0]) for dd in d]
df = pd.concat(dfs, axis=0, ignore_index=True)
else:
df = pd.DataFrame.from_records(d, index=[0])
save_data_frame(df, **kwargs)
def generate_save_path(params, savedir, subdir_keys=None):
subdir = ''
# create nested subdirectories with names specified by
# the values for the keys in subdir_keys
if subdir_keys is not None:
subdir_keys = list(subdir_keys)
subdir_names = ["{}__{}".format(str(key), str(params[key]))
for key in subdir_keys]
subdir = os.path.join(*subdir_names)
savedir = os.path.join(savedir, subdir)
return savedir
# ------------------------------------------------ parameter generation
def expand_params(params):
"""dict of kv pairs -> list of dicts with one option selected for
each key whose value is an instance of Options."""
# keys with values that are Options; try all combos of these
options_keys = [key for key in params if isinstance(params[key], Options)]
options_keys = sorted(options_keys) # sort for reproducibility
options_vals = [params[key].values for key in options_keys]
# keys with values that aren't Options; these are the same every time
no_options_keys = [key for key in params if not isinstance(params[key], Options)]
no_options_vals = [params[key] for key in no_options_keys]
no_options_params = dict(zip(no_options_keys, no_options_vals))
# make a list of all possible combos of values for each key with Options
expanded_params_list = []
for v in itertools.product(*options_vals):
expanded_params = dict(zip(options_keys, v)) # pick one option for each
expanded_params.update(no_options_params) # add in fixed params
expanded_params_list.append(expanded_params)
return expanded_params_list
def update_func_from_dict(d):
def f(params, new_keys, d=d):
updated = False
for k, v in d.items():
if k in new_keys:
for kk, vv in v.items():
updated = updated or (kk not in params)
params.setdefault(kk, vv)
return updated
return f
def generate_params_combinations(params_list, update_func={}):
"""Uses update_func to update each dict based on its values (e.g., to
add SVM kernel params if it contains "classifier": "SVM")"""
if not isinstance(params_list, (list, set, frozenset, tuple)):
params_list = [params_list]
for params in params_list:
params[KEY_NEW_KEYS] = set(params.keys())
if isinstance(update_func, dict):
update_func = update_func_from_dict(update_func)
while True:
new_list = []
for params in params_list:
expanded = expand_params(params)
new_list += expanded
if not update_func:
params_list = new_list
break
allFinished = True
for params in new_list:
# if these params aren't fully updated, update them; keep
# track of which keys are added along the way so we can
# pass this set to the update function next time
if not params.get(KEY_FINISHED_UPDATING, False):
# read which keys were added last time and which keys
# are currently present
new_keys = params[KEY_NEW_KEYS]
existing_keys = frozenset(params.keys())
params.pop(KEY_NEW_KEYS)
unfinished = update_func(params, new_keys)
# compute and store which keys were added this time
new_keys = frozenset(params.keys()) - existing_keys
params[KEY_NEW_KEYS] = new_keys
if unfinished:
allFinished = False
params[KEY_FINISHED_UPDATING] = not unfinished
params_list = new_list
if allFinished:
break
for p in params_list:
p.pop(KEY_FINISHED_UPDATING)
p.pop(KEY_NEW_KEYS)
return params_list
# ------------------------------------------------ cross validation
def stratified_split_train_test(X, Y, train_frac=.8, random_state=123):
"""Returns X_train, X_test, y_train, y_test"""
return sklearn.model_selection.train_test_split(
X, Y, train_size=train_frac, stratify=Y, random_state=random_state)
def split_train_test(X, Y, train_frac=.8, random_state=123):
"""Returns X_train, X_test, y_train, y_test"""
np.random.seed(123)
return sklearn.model_selection.train_test_split(
X, Y, train_size=train_frac, random_state=random_state)
# n_folds = int(train_frac / (2. - train_frac))
# split = StratifiedKFold(Y, n_folds=n_folds, random_state=12345)
# train_index, test_index = next(iter(split))
# X, Xtest = X[train_index], X[test_index]
# Y, Ytest = Y[train_index], Y[test_index]
# return X, Xtest, Y, Ytest
# ------------------------------------------------ Command line
def _split_kv_arg(arg):
key, val = arg.split('=')
return key.strip('-'), val
def _is_kv_arg(arg):
return len(arg.split('=')) == 2
def _clean_flag_arg(arg):
return arg.strip('-')
def _is_flag_arg(arg):
return arg[0] == '-'
def _parse_func_call_cmd(s):
"""
>>> _parse_func_call_cmd("range(5)")
array([0, 1, 2, 3, 4])
>>> _parse_func_call_cmd("range(2, -3, -2)")
array([ 2, 0, -2])
>>> _parse_func_call_cmd("linspace( -2,-20, 3)")
array([ -2., -11., -20.])
>>> _parse_func_call_cmd("logspace(-1, 3, 3)")
array([1.e-01, 1.e+01, 1.e+03])
"""
fnames = 'randn randint range linspace logspace'.split()
nargs = [(1,), (1, 2, 3), (1, 2, 3), (2, 3), (2, 3)]
funcs = [np.random.randn, np.random.randint, np.arange,
np.linspace, np.logspace]
if not isinstance(s, str):
return None
for fname, argc, func in zip(fnames, nargs, funcs):
if not s.startswith(fname + '('):
continue
if not s.endswith(')'):
raise ValueError("You tried to call function '{}', but forgot the"
" closing parenthesis".format(fname))
in_parens = s[len(fname) + 1:-1]
maybe_args = in_parens.split(',')
if len(maybe_args) not in argc:
raise ValueError(
"You tried to call function '{}', but passed an invalid number"
" of arguments: {}. Needed to be one of: {}" .format(
fname, len(maybe_args), argc))
try:
nums = [int(arg) for arg in maybe_args]
return func(*nums)
except: # noqa
raise ValueError("Command '{}' has arguments that can't be coerced"
" into integers".format(s))
return None
def _to_appropriate_type(s):
"""convert string `s` to an int, bool, float, or integer range as
appropriate. Returns the original string if it does not appear to be
any of these types."""
if s == 'True' or s == 'T':
return True
elif s == 'False' or s == 'F':
return False
try:
return int(s)
except: # noqa
pass
try:
return float(s)
except: # noqa
pass
if len(s.split('..')) in (2, 3): # range
vals_as_strs = s.split('..')
try:
return np.arange(*[int(val) for val in vals_as_strs])
except: # noqa
pass
as_func_result = _parse_func_call_cmd(s)
if as_func_result is not None:
return as_func_result
return s
def parse_cmd_line(argv=None, positional_keys=None, allow_flags=True,
infer_types=True):
"""Parses the list of command line arguments into a dictionary of
key-value pairs
Parameters
----------
argv : iterable of strings
This should be sys.argv if supplied. Otherwise, sys.argv is read.
positional_keys : iterable of strings, optional
If k strings are specified, the up to the first k arguments will
be treated as values to be paired with these keys. Arguments of the
form foo=bar will never be treated this way.
allow_flags : bool, optional
If True, allows arguments of the form --myArg. When passed, this will
add {'myArg': True} to the returned dictionary. This is equivalent to
myArg=True
infer_types : bool, optional
If True, attempts to infer the type of each value in the returned
dictionary. E.g., instead of returning {'height': '72'}, it will
return {'height': 72}.
Returns
-------
argKV : dict: string -> inferred type or string
A dictionary whose keys and values are specified by the command line
arguments
>>> # ------------------------ positional args only
>>> argv = ['pyience.py', 'fooVal', 'barVal']
>>> d = parse_cmd_line(argv, positional_keys=['fooKey', 'barKey'])
>>> len(d)
2
>>> d['fooKey']
'fooVal'
>>> d['barKey']
'barVal'
>>> # ------------------------ key-value args
>>> argv = ['pyience.py', 'fooVal', 'bletchKey=bletchVal', 'blahKey=blahVal']
>>> d = parse_cmd_line(argv, positional_keys=['fooKey', 'barKey'])
>>> len(d)
3
>>> d['fooKey']
'fooVal'
>>> d.get('barKey', 'notHere')
'notHere'
>>> d['bletchKey']
'bletchVal'
>>> d['blahKey']
'blahVal'
>>> # ------------------------ flags
>>> argv = ['pyience.py', 'fooVal', 'bletchKey=bletchVal', '--myFlag']
>>> d = parse_cmd_line(argv, positional_keys=['fooKey', 'barKey'])
>>> d['myFlag']
True
>>> # ------------------------ type inference
>>> argv = ['pyience.py', '--myFlag', 'foo=1.1', 'bar=7', 'baz=T', 'r=1..5']
>>> d = parse_cmd_line(argv, positional_keys=['fooKey', 'barKey'])
>>> len(d)
5
>>> d['myFlag']
True
>>> d['foo']
1.1
>>> d['bar']
7
>>> d['baz']
True
>>> d['r']
array([1, 2, 3, 4])
>>> # ------------------------ no positional args
>>> d = parse_cmd_line(argv)
>>> len(d)
5
>>> d['myFlag']
True
>>> d['foo']
1.1
"""
if argv is None:
argv = sys.argv
args = argv[1:] # ignore file name
num_positional_keys = 0
if positional_keys is not None and len(positional_keys):
num_positional_keys = len(positional_keys)
# validate input; keyword arguments must come after positional
# arguments, and there must be no more positional arguments than
# we have keys to associate with them
kwargs_started = False
flags_started = False
for i, arg in enumerate(args):
if _is_kv_arg(arg): # it's a keyword argument
kwargs_started = True
elif _is_flag_arg(arg):
flags_started = True
else: # it's not a keyword argument or flag arguemnt
if kwargs_started:
raise UsageError("key=value arguments must come after"
"positional arguments!")
if flags_started:
raise UsageError("flag (e.g., --myFlag) arguments must come"
"after positional arguments!")
arg_num = i + 1
if arg_num > num_positional_keys:
raise UsageError("only expecting "
"{} positional arguments!".format(
num_positional_keys))
argKV = {}
for i, arg in enumerate(args):
if _is_kv_arg(arg):
key, val = _split_kv_arg(arg)
argKV[key] = val
elif _is_flag_arg(arg):
key = _clean_flag_arg(arg)
argKV[key] = 'True' # string so that all vals are strings
elif i < num_positional_keys:
key = positional_keys[i]
argKV[key] = arg
else:
raise UsageError("couldn't parse argument '{}'".format(arg))
if infer_types:
for k, v in argKV.items():
argKV[k] = _to_appropriate_type(v)
return argKV
# ------------------------------------------------ other stuff
def apply_funcs(funcs, data):
f = chain(funcs)
return f(data)
def chain(funcs):
if funcs is None or not len(funcs):
return lambda x: x
def f(*args, **kwargs):
res = funcs[0](*args, **kwargs)
for func in funcs[1:]:
res = func(res)
return f
def subdict(d, keys):
"""Returns a new dictionary composed of the (key, value) pairs
from d for the keys specified in keys"""
return {k: d[k] for k in keys}
# ------------------------------------------------ sklearn interop
def set_attrs(obj, attrs_dict, require_attrs_exist=False):
if require_attrs_exist:
keys_and_there = ([(k, k in obj.__dict__) for k in attrs_dict])
missing_keys = [k for (k, there) in keys_and_there if not there]
there = zip(*keys_and_there)[1]
if not all(there):
raise ValueError("Object is missing keys {}".format(
missing_keys))
obj.__dict__.update(attrs_dict)
# ------------------------------------------------ cross validation
def _uniq_element_positions(iterable):
"""
Returns a mapping of unique elements to positions at which they
occur within the iterable
"""
objs2positions = {}
for i, obj in enumerate(iterable):
key = as_key(obj)
positions = objs2positions.get(key, [])
positions.append(i)
objs2positions[key] = positions
return objs2positions
# def _group_start_idxs_eq_split(nelements, ngroups):
# group_sz = nelements // ngroups
# return np.arange(0, nelements, group_sz, dtype=np.int)
def _group_start_end_idxs(nelements, ngroups=-1, fractions=None):
hasFracs = fractions is not None and len(fractions)
if ngroups <= 1 and not hasFracs:
return np.array([0], dtype=np.int), np.array([nelements], dtype=np.int)
if not hasFracs:
fracs = np.ones(ngroups)
fractions = np.asarray(fracs)
fractions /= np.max(fracs)
cum_fracs = np.cumsum(fractions)
end_idxs = (nelements * cum_fracs).astype(np.int)
start_idxs = np.r_[0, end_idxs[:-1]]
return start_idxs, end_idxs
def _split_into_groups(iterable, ngroups=-1, fractions=None, shuffle=True):
if shuffle:
iterable = np.copy(iterable)
np.shuffle(iterable)
start_idxs, end_idxs = _group_start_end_idxs(len(iterable), ngroups,
fractions)
return [iterable[start:end] for start, end in zip(start_idxs, end_idxs)]
def cv_partition_idxs(labels, n_folds=5, fractions=None, stratified=True):
if fractions is not None and len(fractions):
if len(fractions) != n_folds:
raise ValueError("Specified fractions of total for {} groups, but "
"n_folds is {}; ignoring n_fold".format(
len(fractions), n_folds))
if stratified:
all_idxs = [[] for i in range(n_folds)]
lbl2idxs = _uniq_element_positions(labels)
for lbl, idxs in lbl2idxs.items():
if len(idxs) < n_folds:
warnings.warn(("Label {} appears only {} times, which is "
"less than the number of folds requested, {}"
.format(lbl, len(idxs), n_folds)), Warning)
idxGroups = _split_into_groups(idxs, n_folds, fractions)
for i, group in enumerate(idxGroups):
all_idxs[i] += group
return all_idxs
else:
possible_idxs = np.arange(len(labels))
return _split_into_groups(possible_idxs, n_folds, fractions)
def cv_split(X, y, n_folds=5, fractions=None, stratified=True):
if len(X) != len(y):
raise IndexError("len(X) {} != len(y) {}".format(len(X), len(y)))
all_idxs = cv_partition_idxs(y, n_folds=n_folds, fractions=fractions,
stratified=stratified)
X_split = [X[idxs] for idxs in all_idxs]
y_split = [y[idxs] for idxs in all_idxs]
return X_split, y_split
# ================================================================ Main
def update(params, new_keys):
if 'classifier' in new_keys:
params['kernel'] = Options('rbf', 'linear')
# we use setdefault here so that we don't overwrite values
# passed in at the top level
if 'kernel' in new_keys:
kernel = params['kernel']
params.setdefault('C', Options(10. ** np.arange(-5, 3)))
if kernel == 'rbf':
params.setdefault('gamma', Options([1, 10]))
return True if new_keys else False
def main():
cVals = 10. ** np.arange(-3, 3)
d = {"classifier": "SVM", 'C': Options(cVals)}
# generate_params_combinations(d, update)
combos = generate_params_combinations(d, update)
# add a fake outcome variable
for combo in combos:
combo['runtime'] = np.random.rand() * 10
# print out a dataframe so we can see that this worked
import pandas as pd
print(pd.DataFrame.from_records(combos)) # woot; it worked
if __name__ == '__main__':
from doctest import testmod
testmod()
main()
|
#!/usr/bin/env python
# import datasets
_memory = Memory('.', verbose=0)
np.set_printoptions(precision=3)
SAVE_DIR = '../results'
# ================================================================ Distances
def dists_elemwise_sq(x, q):
diffs = x - q
return diffs * diffs
def dists_elemwise_l1(x, q):
return np.abs(x - q)
def dists_elemwise_dot(x, q):
return x * q
# ================================================================ Clustering
def load_dataset_object(which_dataset, **load_dataset_kwargs):
X_train, Q, X_test, true_nn = dsets.load_dataset(
which_dataset, **load_dataset_kwargs)
assert Q.shape[-1] == X_train.shape[-1]
if isinstance(which_dataset, str):
name = files.basename(which_dataset, noext=True)
else:
name = which_dataset.__name__ # assumes which_dataset is a class
return Dataset(Q, X_train, X_test, true_nn, name)
Dataset = namedtuple('Dataset', [
'Q', 'X_train', 'X_test', 'true_nn', 'name'])
# ================================================================ Quantizers
# ------------------------------------------------ Product Quantization
def _learn_centroids(X, ncentroids, nsubvects, subvect_len):
ret = np.empty((ncentroids, nsubvects, subvect_len))
for i in range(nsubvects):
start_col = i * subvect_len
end_col = start_col + subvect_len
X_in = X[:, start_col:end_col]
centroids, labels = kmeans(X_in, ncentroids)
ret[:, i, :] = centroids
return ret
def _parse_codebook_params(D, code_bits=-1, bits_per_subvect=-1, nsubvects=-1):
if nsubvects < 0:
nsubvects = code_bits // bits_per_subvect
elif code_bits < 1:
code_bits = bits_per_subvect * nsubvects
elif bits_per_subvect < 1:
bits_per_subvect = code_bits // nsubvects
ncentroids = int(2 ** bits_per_subvect)
subvect_len = D // nsubvects
assert code_bits % bits_per_subvect == 0
if D % subvect_len:
print("D, nsubvects, subvect_len = ", D, nsubvects, subvect_len)
assert D % subvect_len == 0 # TODO rm this constraint
return nsubvects, ncentroids, subvect_len
def _fit_pq_lut(q, centroids, elemwise_dist_func):
_, nsubvects, subvect_len = centroids.shape
assert len(q) == nsubvects * subvect_len
q = q.reshape((1, nsubvects, subvect_len))
q_dists_ = elemwise_dist_func(centroids, q)
q_dists_ = np.sum(q_dists_, axis=-1)
return np.asfortranarray(q_dists_) # ncentroids, nsubvects, col-major
class PQEncoder(object):
def __init__(self, dataset, code_bits=-1, bits_per_subvect=-1,
nsubvects=-1, elemwise_dist_func=dists_elemwise_sq):
X = dataset.X_train
self.elemwise_dist_func = elemwise_dist_func
tmp = _parse_codebook_params(X.shape[1], code_bits=code_bits,
bits_per_subvect=bits_per_subvect,
nsubvects=nsubvects)
self.nsubvects, self.ncentroids, self.subvect_len = tmp
self.code_bits = int(np.log2(self.ncentroids))
# for fast lookups via indexing into flattened array
self.offsets = np.arange(self.nsubvects, dtype=np.int) * self.ncentroids
self.centroids = _learn_centroids(X, self.ncentroids, self.nsubvects,
self.subvect_len)
def name(self):
return "PQ_{}x{}b".format(self.nsubvects, self.code_bits)
def params(self):
return {'_algo': 'PQ', '_ncodebooks': self.nsubvects,
'_code_bits': self.code_bits}
def encode_X(self, X, **sink):
idxs = pq._encode_X_pq(X, codebooks=self.centroids)
return idxs + self.offsets # offsets let us index into raveled dists
def encode_q(self, q, **sink):
return None # we use fit_query() instead, so fail fast
def dists_true(self, X, q):
return np.sum(self.elemwise_dist_func(X, q), axis=-1)
def fit_query(self, q, **sink):
self.q_dists_ = _fit_pq_lut(q, centroids=self.centroids,
elemwise_dist_func=self.elemwise_dist_func)
def dists_enc(self, X_enc, q_unused=None):
# this line has each element of X_enc index into the flattened
# version of q's distances to the centroids; we had to add
# offsets to each col of X_enc above for this to work
centroid_dists = self.q_dists_.T.ravel()[X_enc.ravel()]
return np.sum(centroid_dists.reshape(X_enc.shape), axis=-1)
def _learn_best_quantization(luts): # luts can be a bunch of vstacked luts
best_loss = np.inf
best_alpha = None
best_floors = None
best_scale_by = None
for alpha in [.001, .002, .005, .01, .02, .05, .1]:
alpha_pct = int(100 * alpha)
# compute quantized luts this alpha would yield
floors = np.percentile(luts, alpha_pct, axis=0)
luts_offset = np.maximum(0, luts - floors)
ceil = np.percentile(luts_offset, 100 - alpha_pct)
scale_by = 255. / ceil
luts_quantized = np.floor(luts_offset * scale_by).astype(np.int)
luts_quantized = np.minimum(255, luts_quantized)
# compute err
luts_ideal = (luts - luts_offset) * scale_by
diffs = luts_ideal - luts_quantized
loss = np.sum(diffs * diffs)
if loss <= best_loss:
best_loss = loss
best_alpha = alpha
best_floors = floors
best_scale_by = scale_by
return best_floors, best_scale_by, best_alpha
class OPQEncoder(PQEncoder):
def __init__(self, dataset, code_bits=-1, bits_per_subvect=-1,
nsubvects=-1, elemwise_dist_func=dists_elemwise_sq,
opq_iters=20, quantize_lut=False, algo='OPQ', **opq_kwargs):
X = dataset.X_train
self.elemwise_dist_func = elemwise_dist_func
self.quantize_lut = quantize_lut
self.opq_iters = opq_iters
self.algo = algo
tmp = _parse_codebook_params(X.shape[1], code_bits=code_bits,
bits_per_subvect=bits_per_subvect,
nsubvects=nsubvects)
self.nsubvects, self.ncentroids, self.subvect_len = tmp
self.code_bits = int(np.log2(self.ncentroids))
# for fast lookups via indexing into flattened array
self.offsets = np.arange(self.nsubvects, dtype=np.int) * self.ncentroids
if self.algo == 'Bolt':
# Note: we always pass in 0 iters in the reported experiments,
# so it never rotates anything
self.centroids, _, self.rotations = pq.learn_bopq(
X, ncodebooks=nsubvects, codebook_bits=bits_per_subvect,
niters=opq_iters, **opq_kwargs)
elif self.algo == 'OPQ':
self.centroids, _, self.R = pq.learn_opq(
X, ncodebooks=nsubvects, codebook_bits=bits_per_subvect,
niters=opq_iters, **opq_kwargs)
else:
raise ValueError("argument algo must be one of {OPQ, Bolt}")
# learn appropriate offsets and shared scale factor for quantization
self.lut_offsets = np.zeros(self.nsubvects)
self.order_idxs = np.arange(self.nsubvects, dtype=np.int)
if self.quantize_lut: # TODO put this logic in separate function
print("learning quantization...")
num_rows = min(10*1000, len(X) // 2)
_, queries = dsets.extract_random_rows(
X[num_rows:], how_many=1000, remove_from_X=False)
X = X[:num_rows] # limit to first 10k rows of X
# compute luts for all the queries
luts = [self._fit_query(q, quantize=False) for q in queries]
luts = np.vstack(luts)
assert luts.shape == (self.ncentroids * len(queries), self.nsubvects)
self.lut_offsets, self.scale_by, _ = _learn_best_quantization(luts)
def name(self):
return "{}_{}x{}b_iters={}_quantize={}".format(
self.algo, self.nsubvects, self.code_bits, self.opq_iters,
int(self.quantize_lut))
def params(self):
return {'_algo': self.algo, '_ncodebooks': self.nsubvects,
'_code_bits': self.code_bits, 'opq_iters': self.opq_iters,
'_quantize': self.quantize_lut}
def _fit_query(self, q, quantize=False):
if self.algo == 'OPQ':
qR = pq.opq_rotate(q, self.R).ravel()
elif self.algo == 'Bolt':
qR = pq.bopq_rotate(q, self.rotations).ravel()
lut = _fit_pq_lut(qR, centroids=self.centroids,
elemwise_dist_func=self.elemwise_dist_func)
if quantize:
if False: # roughly laplace distro, reaching all the way to 0
ax = sb.distplot(lut.ravel(), hist=False, rug=True)
ax.set_xlabel('Query dist to centroids (lut dist histogram)')
ax.set_ylabel('Fraction of queries')
plt.show()
lut = np.maximum(0, lut - self.lut_offsets)
lut = np.floor(lut * self.scale_by).astype(np.int)
return np.minimum(lut, 255)
return lut
def encode_X(self, X, **sink):
if self.algo == 'OPQ':
X = pq.opq_rotate(X, self.R)
elif self.algo == 'Bolt':
X = pq.bopq_rotate(X, self.rotations)
idxs = pq._encode_X_pq(X, codebooks=self.centroids)
return idxs + self.offsets # offsets let us index into raveled dists
def fit_query(self, q, quantize=True, **sink):
quantize = quantize and self.quantize_lut
self.q_dists_ = self._fit_query(q, quantize=quantize)
if quantize:
# print "min, max lut values: {}, {}".format(np.min(self.q_dists_),
# np.max(self.q_dists_))
assert np.min(self.q_dists_) >= 0
assert np.max(self.q_dists_) <= 255
if False:
_, axes = plt.subplots(3, figsize=(9, 11))
sb.violinplot(data=self.q_dists_, inner="box", cut=0, ax=axes[0])
axes[0].set_xlabel('Codebook')
axes[0].set_ylabel('Distance to query')
axes[0].set_ylim([0, np.max(self.q_dists_)])
sb.heatmap(data=self.q_dists_, ax=axes[1], cbar=False, vmin=0)
axes[1].set_xlabel('Codebook')
axes[1].set_ylabel('Centroid')
sb.distplot(self.q_dists_.ravel(), hist=False, rug=True, vertical=False, ax=axes[2])
axes[2].set_xlabel('Centroid dist to query')
axes[2].set_ylabel('Fraction of centroids')
axes[2].set_xlim([0, np.max(self.q_dists_) + .5])
# plot where the mean is
mean_dist = np.mean(self.q_dists_)
ylim = axes[2].get_ylim()
axes[2].plot([mean_dist, mean_dist], ylim, 'r--')
axes[2].set_ylim(ylim)
plt.show()
# ================================================================ Main
def eval_encoder(dataset, encoder, dist_func_true=None, dist_func_enc=None,
eval_dists=True, verbosity=1, plot=False, smaller_better=True):
X = dataset.X_test
queries = dataset.Q
true_nn = dataset.true_nn
if true_nn is not None:
print("eval encoder(): got true_nn with shape: ", true_nn.shape)
queries = queries[:1000] # TODO rm for tables; fine for plots
print("queries.shape", queries.shape)
need_true_dists = eval_dists or plot or true_nn is None
if len(queries.shape) == 1:
queries = [queries]
if dist_func_true is None:
dist_func_true = encoder.dists_true
if dist_func_enc is None:
dist_func_enc = encoder.dists_enc
t0 = time.time()
# performance metrics
RECALL_Rs = [1, 5, 10, 50, 100, 500, 1000]
recall_counts = np.zeros(len(RECALL_Rs))
fracs_below_max = []
if eval_dists:
all_corrs = []
all_rel_errs = []
all_errs = []
total_dist = 0.
if need_true_dists:
X = X[:10000] # limit to 10k points because otherwise it takes forever
queries = queries[:256, :]
print("encoding X...")
X_enc = encoder.encode_X(X)
print("trying queries...")
for i, q in enumerate(queries):
if i % 100 == 0:
print("trying query {}...".format(i))
q_enc = encoder.encode_q(q)
encoder.fit_query(q)
if need_true_dists:
all_true_dists = dist_func_true(X, q)
all_enc_dists = dist_func_enc(X_enc, q_enc)
# ------------------------ begin analysis / reporting code
# find true knn
if need_true_dists:
knn_idxs = top_k_idxs(all_true_dists, 10, smaller_better=smaller_better)
else:
knn_idxs = true_nn[i, :10]
# compute fraction of points with enc dists as close as 10th nn
knn_enc_dists = all_enc_dists[knn_idxs]
if smaller_better:
max_enc_dist = np.max(knn_enc_dists)
num_below_max = np.sum(all_enc_dists <= max_enc_dist)
else:
max_enc_dist = np.min(knn_enc_dists)
num_below_max = np.sum(all_enc_dists >= max_enc_dist)
frac_below_max = float(num_below_max) / len(all_enc_dists)
fracs_below_max.append(frac_below_max)
# compute recall@R stats
top_1000 = top_k_idxs(all_enc_dists, 1000, smaller_better=smaller_better)
nn_idx = knn_idxs[0]
for i, r in enumerate(RECALL_Rs):
recall_counts[i] += nn_idx in top_1000[:r]
# compute distortion in distances, quantified by corr and rel err
if eval_dists:
total_dist += np.sum(all_true_dists)
corr, _ = pearsonr(all_enc_dists, all_true_dists)
all_corrs.append(corr)
rel_errs = (all_enc_dists - all_true_dists) / all_true_dists
all_rel_errs.append(rel_errs)
all_errs.append(all_enc_dists - all_true_dists)
assert not np.any(np.isinf(all_enc_dists))
assert not np.any(np.isnan(all_enc_dists))
assert not np.any(np.isinf(all_true_dists))
assert not np.any(np.isnan(all_true_dists))
if plot and i < 3: # at most 3 plots
num_nn = min(10000, len(all_true_dists) - 1)
xlim = [0, np.partition(all_true_dists, num_nn)[num_nn]]
ylim = [0, np.partition(all_enc_dists, num_nn)[num_nn]]
grid = sb.jointplot(x=all_true_dists, y=all_enc_dists,
xlim=xlim, ylim=ylim, joint_kws=dict(s=10))
# hack to bully the sb JointGrid into plotting a vert line
cutoff = all_true_dists[knn_idxs[-1]]
grid.x = [cutoff, cutoff]
grid.y = ylim
grid.plot_joint(plt.plot, color='r', linestyle='--')
# also make it plot cutoff in terms of quantized dist
grid.x = xlim
grid.y = [max_enc_dist, max_enc_dist]
grid.plot_joint(plt.plot, color='k', linestyle='--')
if plot:
plt.show()
t = time.time() - t0
# log a lot of performance metrics / experimental params
detailed_stats = [] # list of dicts
stats = {}
stats['X_rows'] = X.shape[0]
stats['X_cols'] = X.shape[1]
stats['nqueries'] = len(queries)
stats['eval_time_secs'] = t
stats['fracs_below_max_mean'] = np.mean(fracs_below_max)
stats['fracs_below_max_std'] = np.std(fracs_below_max)
stats['fracs_below_max_50th'] = np.median(fracs_below_max)
stats['fracs_below_max_90th'] = np.percentile(fracs_below_max, q=90)
for i, r in enumerate(RECALL_Rs):
key = 'recall@{}'.format(r)
val = float(recall_counts[i]) / len(queries)
stats[key] = val
if eval_dists:
corrs = np.hstack(all_corrs)
rel_errs = np.hstack(all_rel_errs)
rel_errs = rel_errs[~(np.isnan(rel_errs) + np.isinf(rel_errs))]
errs = np.hstack(all_errs)
stats['corr_mean'] = np.mean(all_corrs)
stats['corr_std'] = np.std(all_corrs)
stats['mse_mean'] = np.mean(errs * errs)
stats['mse_std'] = np.std(errs * errs)
stats['rel_err_mean'] = np.mean(rel_errs)
stats['rel_err_std'] = np.std(rel_errs)
stats['rel_err_sq_mean'] = np.mean(rel_errs * rel_errs)
stats['rel_err_sq_std'] = np.std(rel_errs * rel_errs)
# sample some relative errs cuz all we need them for is plotting
# confidence intervals
np.random.shuffle(rel_errs)
np.random.shuffle(errs)
detailed_stats = [{'corr': all_corrs[i], 'rel_err': rel_errs[i],
'err': errs[i]} for i in range(len(corrs))]
for d in detailed_stats:
d.update(encoder_params(encoder))
if verbosity > 0:
print("------------------------ {}".format(name_for_encoder(encoder)))
keys = sorted(stats.keys())
lines = ["{}: {}".format(k, stats[k]) for k in keys if isinstance(stats[k], str)]
lines += ["{}: {:.4g}".format(k, stats[k]) for k in keys if not isinstance(stats[k], str)]
print("\n".join(lines))
stats.update(encoder_params(encoder))
return stats, detailed_stats # detailed_stats empty unless `eval_dists`
def name_for_encoder(encoder):
try:
return encoder.name()
except AttributeError:
return str(type(encoder))
def encoder_params(encoder):
try:
return encoder.params()
except AttributeError:
return {'algo': name_for_encoder(encoder)}
# @_memory.cache
def _experiment_one_dataset(which_dataset, eval_dists=False, dotprods=False,
save_dir=None):
SAVE_DIR = save_dir if save_dir else '../results/acc/'
elemwise_dist_func = dists_elemwise_dot if dotprods else dists_elemwise_sq
smaller_better = not dotprods
N, D = -1, -1
num_queries = -1 # no effect for "real" datasets
if isinstance(which_dataset, str):
print("WARNING: sampling queries from data file")
num_queries = 128 # if just loading one file, need to sample queries
norm_len = False # set to true for cosine similarity
norm_mean = True
max_ncodebooks = 64 # 32B bolt has 64 codebooks
dataset_func = functools.partial(load_dataset_object, N=N, D=D,
num_queries=num_queries,
norm_len=norm_len, norm_mean=norm_mean,
D_multiple_of=max_ncodebooks)
dataset = dataset_func(which_dataset)
print("=== Using Dataset: {} ({}x{})".format(dataset.name, N, D))
dicts = []
detailed_dicts = []
nbytes_list = [8, 16, 32]
# max_opq_iters = 5 # TODO uncomment below
max_opq_iters = 20
# ------------------------------------------------ Bolt
# Note: we considered having learned rotations like OPQ but constrained
# to be block diagonal; this is why you'll see mentions of rotations
# in some of the Bolt code. However, it ended up not helping at all
# and also slows down Bolt considerably. All the reported results are
# without any rotation.
# rotation_sizes = [8, 16, 32]
rotation_sizes = [32]
# rotation_sizes = [16]
for nbytes in nbytes_list:
for opq_iters in [0]: # 0 opq iters -> no rotations
rot_sizes = rotation_sizes if opq_iters > 0 else [16]
for rot_sz in rot_sizes:
nsubvects = nbytes * 2
encoder = OPQEncoder(dataset, nsubvects=nsubvects,
bits_per_subvect=4,
opq_iters=opq_iters,
R_sz=rot_sz,
elemwise_dist_func=elemwise_dist_func,
algo='Bolt', quantize_lut=True)
stats, detailed_stats = eval_encoder(
dataset, encoder, eval_dists=eval_dists,
smaller_better=smaller_better)
stats['rot_sz'] = rot_sz
for d in detailed_dicts:
d['rot_sz'] = rot_sz
dicts.append(stats)
detailed_dicts += detailed_stats
# ------------------------------------------------ PQ
# for codebook_bits in [4, 8]:
for codebook_bits in [8]:
for nbytes in nbytes_list:
nsubvects = nbytes * (8 // codebook_bits)
encoder = PQEncoder(dataset, nsubvects=nsubvects,
bits_per_subvect=codebook_bits,
elemwise_dist_func=elemwise_dist_func)
stats, detailed_stats = eval_encoder(
dataset, encoder, eval_dists=eval_dists,
smaller_better=smaller_better)
dicts.append(stats)
detailed_dicts += detailed_stats
# ------------------------------------------------ OPQ
init = 'identity'
opq_iters = max_opq_iters
for codebook_bits in [8]:
for nbytes in nbytes_list:
nsubvects = nbytes * (8 // codebook_bits)
encoder = OPQEncoder(dataset, nsubvects=nsubvects,
bits_per_subvect=codebook_bits,
opq_iters=opq_iters, init=init,
elemwise_dist_func=elemwise_dist_func)
stats, detailed_stats = eval_encoder(
dataset, encoder, eval_dists=eval_dists,
smaller_better=smaller_better)
dicts.append(stats)
detailed_dicts += detailed_stats
for d in dicts:
d['dataset'] = dataset.name
d['norm_mean'] = norm_mean
for d in detailed_dicts:
d['dataset'] = dataset.name
d['norm_mean'] = norm_mean
savedir = os.path.join(SAVE_DIR, dataset.name)
pyn.save_dicts_as_data_frame(dicts, savedir, name='summary')
# also just save versions with timestamps to recover from clobbering
pyn.save_dicts_as_data_frame(dicts, savedir, name='summary',
timestamp=True)
if eval_dists:
pyn.save_dicts_as_data_frame(detailed_dicts, savedir, name='all_results')
pyn.save_dicts_as_data_frame(detailed_dicts, savedir, name='all_results',
timestamp=True)
return dicts, detailed_dicts
def experiment(eval_dists=False, dotprods=False):
# which_datasets = [dsets.Mnist]
which_datasets = [dsets.Mnist, dsets.Sift1M,
dsets.LabelMe, dsets.Convnet1M]
# which_datasets = [dsets.Glove]
# which_datasets = [dsets.Deep1M, dsets.Gist]
if eval_dists:
save_dir = '../results/acc_dotprods/' if dotprods else '../results/acc_l2'
else:
save_dir = '../results/recall_at_r/'
for which_dataset in which_datasets:
_dicts, _details = _experiment_one_dataset(
which_dataset, eval_dists=eval_dists, dotprods=dotprods,
save_dir=save_dir)
def main():
import doctest
doctest.testmod()
np.set_printoptions(precision=3)
opts = pyn.parse_cmd_line()
opts.setdefault('eval_l2_dists', False)
opts.setdefault('eval_dotprods', False)
opts.setdefault('eval_recall@R', False)
if opts['eval_l2_dists']:
print(">>>>>>>> evaluating l2 dists")
experiment(eval_dists=True, dotprods=False)
if opts['eval_dotprods']:
print(">>>>>>>> evaluating dot prods")
experiment(eval_dists=True, dotprods=True)
if opts['eval_recall@R']:
print(">>>>>>>> evaluating recall@R")
experiment(eval_dists=False, dotprods=False)
return
if __name__ == '__main__':
main()
|
#!/bin/env/python
_memory = Memory('.', verbose=0)
# def bucket_id_to_new_bucket_ids(old_id):
# i = 2 * old_id
# return i, i + 1
class Bucket(object):
__slots__ = 'N D id sumX sumX2 point_ids support_add_and_remove'.split()
def __init__(self, D=None, N=0, sumX=None, sumX2=None, point_ids=None,
bucket_id=0, support_add_and_remove=False):
# self.reset(D=D, sumX=sumX, sumX2=sumX2)
# assert point_ids is not None
if point_ids is None:
assert N == 0
point_ids = (set() if support_add_and_remove
else np.array([], dtype=np.int))
self.N = len(point_ids)
self.id = bucket_id
# this is just so that we can store the point ids as array instead of
# set, while still retaining option to run our old code that needs
# them to be a set for efficient inserts and deletes
self.support_add_and_remove = support_add_and_remove
if support_add_and_remove:
self.point_ids = set(point_ids)
else:
self.point_ids = np.asarray(point_ids)
# figure out D
if (D is None or D < 1) and (sumX is not None):
D = len(sumX)
elif (D is None or D < 1) and (sumX2 is not None):
D = len(sumX2)
assert D is not None
self.D = D
# figure out + sanity check stats arrays
self.sumX = np.zeros(D, dtype=np.float32) if (sumX is None) else sumX
self.sumX2 = np.zeros(D, dtype=np.float32) if (sumX2 is None) else sumX2 # noqa
# print("D: ", D)
# print("sumX type: ", type(sumX))
assert len(self.sumX) == D
assert len(self.sumX2) == D
self.sumX = np.asarray(self.sumX).astype(np.float32)
self.sumX2 = np.asarray(self.sumX2).astype(np.float32)
def add_point(self, point, point_id=None):
assert self.support_add_and_remove
# TODO replace with more numerically stable updates if necessary
self.N += 1
self.sumX += point
self.sumX2 += point * point
if point_id is not None:
self.point_ids.add(point_id)
def remove_point(self, point, point_id=None):
assert self.support_add_and_remove
self.N -= 1
self.sumX -= point
self.sumX2 -= point * point
if point_id is not None:
self.point_ids.remove(point_id)
def deepcopy(self, bucket_id=None): # deep copy
bucket_id = self.id if bucket_id is None else bucket_id
return Bucket(
sumX=np.copy(self.sumX), sumX2=np.copy(self.sumX2),
point_ids=copy.deepcopy(self.point_ids), bucket_id=bucket_id)
def split(self, X=None, dim=None, val=None, X_orig=None):
id0 = 2 * self.id
id1 = id0 + 1
if X is None or self.N < 2: # copy of this bucket + an empty bucket
return (self.deepcopy(bucket_id=id0),
Bucket(D=self.D, bucket_id=id1))
assert dim is not None
assert val is not None
assert self.point_ids is not None
my_idxs = np.asarray(self.point_ids)
# print("my_idxs shape, dtype", my_idxs.shape, my_idxs.dtype)
X = X[my_idxs]
X_orig = X if X_orig is None else X_orig[my_idxs]
mask = X_orig[:, dim] < val
not_mask = ~mask
X0 = X[mask]
X1 = X[not_mask]
ids0 = my_idxs[mask]
ids1 = my_idxs[not_mask]
def create_bucket(points, ids, bucket_id):
sumX = points.sum(axis=0) if len(ids) else None
sumX2 = (points * points).sum(axis=0) if len(ids) else None
# return Bucket(N=len(ids), D=self.D, point_ids=ids,
return Bucket(D=self.D, point_ids=ids, sumX=sumX, sumX2=sumX2,
bucket_id=bucket_id)
return create_bucket(X0, ids0, id0), create_bucket(X1, ids1, id1)
def optimal_split_val(self, X, dim, possible_vals=None, X_orig=None,
return_possible_vals_losses=False):
if self.N < 2 or self.point_ids is None:
if return_possible_vals_losses:
return 0, 0, np.zeros(len(possible_vals), dtype=X.dtype)
return 0, 0
# my_idxs = np.array(list(self.point_ids))
my_idxs = np.asarray(self.point_ids)
if X_orig is not None:
X_orig = X_orig[my_idxs]
return optimal_split_val(
X[my_idxs], dim, possible_vals=possible_vals, X_orig=X_orig,
return_possible_vals_losses=return_possible_vals_losses)
def col_means(self):
return self.sumX.astype(np.float64) / max(1, self.N)
def col_variances(self, safe=False):
if self.N < 1:
return np.zeros(self.D, dtype=np.float32)
E_X2 = self.sumX2 / self.N
E_X = self.sumX / self.N
ret = E_X2 - (E_X * E_X)
return np.maximum(0, ret) if safe else ret
def col_sum_sqs(self):
return self.col_variances() * self.N
@property
def loss(self):
# if self.N < 1:
# return 0
# # less stable version with one less divide and mul
# return max(0, np.sum(self.sumX2 - (self.sumX * (self.sumX / self.N))))
# more stable version, that also clamps variance at 0
return max(0, np.sum(self.col_sum_sqs()))
# expected_X = self.sumX / self.N
# expected_X2 = self.sumX2 / self.N
# return max(0, np.sum(expected_X2 - (expected_X * expected_X)) * self.N)
# @numba.jit(nopython=True) # numpy cumsum in insanely slow
# def _cumsum_cols(X):
# X = np.copy(X)
# for i in range(1, X.shape[0]):
# X[i] += X[i - 1]
# return X
# numpy cumsum in insanely slow; also, having the nested loops is twice
# as fast as assigning rows (ie, X[i] += X[i-1])
@numba.njit(fastmath=True)
def _cumsum_cols(X):
out = np.empty(X.shape, X.dtype)
for j in range(X.shape[1]):
out[0, j] = X[0, j]
for i in range(1, X.shape[0]):
for j in range(X.shape[1]):
out[i, j] = X[i, j] + out[i - 1, j]
return out
@numba.njit(fastmath=True, cache=True) # njit = no python, cache binary
def _cumsse_cols(X):
N, D = X.shape
cumsses = np.empty((N, D), X.dtype)
cumX_row = np.empty(D, X.dtype)
cumX2_row = np.empty(D, X.dtype)
for j in range(D):
cumX_row[j] = X[0, j]
cumX2_row[j] = X[0, j] * X[0, j]
cumsses[0, j] = 0 # no err in bucket with 1 element
for i in range(1, N):
one_over_count = 1. / (i + 1)
for j in range(D):
cumX_row[j] += X[i, j]
cumX2_row[j] += X[i, j] * X[i, j]
meanX = cumX_row[j] * one_over_count
cumsses[i, j] = cumX2_row[j] - (cumX_row[j] * meanX)
return cumsses
# def optimal_split_val(X, dim, possible_vals=None, return_val_idx=False):
# @_memory.cache
def optimal_split_val(X, dim, possible_vals=None, X_orig=None,
# return_possible_vals_losses=False, force_val='median'):
return_possible_vals_losses=False, force_val=None,
# shrink_towards_median=True):
shrink_towards_median=False):
X_orig = X if X_orig is None else X_orig
# X_orig = X # TODO rm
if X_orig.shape != X.shape:
print("X orig shape: ", X_orig.shape)
print("X shape: ", X.shape)
assert X_orig.shape == X.shape
if force_val in ('mean', 'median'):
assert not return_possible_vals_losses
x = X_orig[:, dim]
val = np.median(x) if force_val == 'median' else np.mean(x)
mask = X_orig < val
X0 = X[mask]
errs0 = X0 - X0.mean(axis=0)
loss0 = np.sum(errs0 * errs0)
X1 = X[~mask]
errs = X1 - X1.mean(axis=0)
loss1 = np.sum(errs * errs)
return val, loss0 + loss1
N, D = X.shape
# sort_idxs = np.argsort(X[:, dim])
sort_idxs = np.argsort(X_orig[:, dim])
X_sort = X[sort_idxs]
# use_jit = False
use_jit = True
if use_jit:
# X_sort = X_sort[:100] # TODO rm
# X_sort = np.ascontiguousarray(X_sort)
# N, D = X_sort.shape
# print("about to call jitted func; N, D = ", N, D)
sses_head = _cumsse_cols(X_sort)
# print("got thru first call...")
# X_sort_rev = np.ascontiguousarray(X_sort[::-1])
# sses_tail = _cumsse_cols(X_sort_rev)[::-1]
sses_tail = _cumsse_cols(X_sort[::-1])[::-1]
# print("returned from jitted func!")
else:
X_sort_sq = X_sort * X_sort
# cumX_head = np.cumsum(X_sort, axis=0)
# cumX2_head = np.cumsum(X_sort_sq, axis=0)
# cumX_tail = np.cumsum(X_sort[::-1], axis=0)[::-1]
# cumX2_tail = np.cumsum(X_sort_sq[::-1], axis=0)[::-1]
cumX_head = _cumsum_cols(X_sort)
cumX2_head = _cumsum_cols(X_sort_sq)
cumX_tail = _cumsum_cols(X_sort[::-1])[::-1]
cumX2_tail = _cumsum_cols(X_sort_sq[::-1])[::-1]
all_counts = np.arange(1, N + 1).reshape(-1, 1)
EX_head = cumX_head / all_counts # E[X], starting from 0
EX_tail = cumX_tail / all_counts[::-1] # E[X], starting from N-1
# EX2_head = cumX2_head / all_counts # E[X^2], starting from 0
# EX2_tail = cumX2_tail / all_counts[::-1] # E[X^2], starting from N-1
# mses_head = EX2_head - (EX_head * EX_head) # mses from 0
# mses_tail = EX2_tail - (EX_tail * EX_tail) # mses from N-1
# sses_head = mses_head * all_counts #
# sses_tail = mses_tail * all_counts[::-1]
# simpler equivalent of above; mse * N reduces to this
sses_head = cumX2_head - (cumX_head * EX_head)
sses_tail = cumX2_tail - (cumX_tail * EX_tail)
# # TODO rm
# mse_head_diffs = sses_head[1:] - sses_head[:-1]
# # print("mse_head_diffs[:20]", mse_head_diffs[:20])
# assert np.all(mse_head_diffs > -.1) # should be nondecreasing
# mse_tail_diffs = sses_tail[1:] - sses_tail[:-1]
# assert np.all(mse_tail_diffs < .1) # should be nonincreasing
sses = sses_head
sses[:-1] += sses_tail[1:] # sse of X_sort[:i] + sse of X_sort[i:]
sses = sses.sum(axis=1)
if shrink_towards_median:
minsse, maxsse = np.min(sses), np.max(sses)
scale = maxsse - minsse
# n_over_2 = N // 2
# scale = (maxsse - minsse) / n_over_2
coeffs = np.abs(np.arange(N, dtype=np.float32))
penalties = coeffs * (scale / np.max(coeffs))
sses += penalties
# # TODO rm
# E_X = X.mean(axis=0)
# E_X2 = (X * X).mean(axis=0)
# sse_true = np.sum(E_X2 - (E_X * E_X)) * N
# print("sses[0], sses[-1], true loss, np.sum(X.var(axis=0)) * N",
# sses[0], sses[-1], sse_true, np.sum(X.var(axis=0)) * N)
# X_orig_sort = X_orig[sort_idxs]
if possible_vals is None or not len(possible_vals): # can split anywhere
best_idx = np.argmin(sses)
next_idx = min(N - 1, best_idx + 1)
# best_val = (X_sort[best_idx, dim] + X_sort[next_idx, dim]) / 2.
# X_orig_sort = X_orig[sort_idxs]
col = X_orig[:, dim]
best_val = (col[sort_idxs[best_idx]] + col[sort_idxs[next_idx]]) / 2
# best_val = (X_orig_sort[best_idx, dim] + X_orig_sort[next_idx, dim]) / 2
else: # have to choose one of the values in possible_vals
sorted_col = X_orig[:, dim][sort_idxs]
idxs = np.searchsorted(sorted_col, possible_vals)
# idxs = np.unique(idxs)
idxs = np.maximum(0, idxs - 1) # searchsorted returns first idx larger
sses_for_idxs = sses[idxs]
which_idx_idx = np.argmin(sses_for_idxs)
best_idx = idxs[which_idx_idx]
best_val = possible_vals[which_idx_idx]
# print("return_possible_vals_losses: ", return_possible_vals_losses)
ret = best_val, sses[best_idx]
return ret + (sses_for_idxs,) if return_possible_vals_losses else ret
def evenly_spaced_quantiles(x, nquantiles, dedup=True):
x = np.unique(x)
# handle x with fewer unique elements than nquantiles (or same number, or
# not that many more; basically just want each returned value to be uniq
# and useful for binning the distribution)
if len(x) == nquantiles:
return x
elif len(x) == 1:
return np.linspace(-1, 3, num=nquantiles) * x[0]
elif len(x) < 2 * nquantiles:
return np.linspace(np.min(x), np.max(x), num=nquantiles)
n = nquantiles + 1
fracs = np.arange(1, n) / float(n)
return np.array([np.quantile(x, frac) for frac in fracs])
class PointInfo(object):
__slots__ = 'data bucket_id'.split()
def __init__(self, data, bucket_id):
self.data = data
self.bucket_id = bucket_id
class Split(object):
__slots__ = 'dim val loss_change'.split()
def __init__(self, dim, val, loss_change=None):
self.dim = dim
self.val = val
self.loss_change = loss_change
def _sort_and_append_orig_idx(x, ascending=True):
sort_idxs = np.argsort(x)
if not ascending:
sort_idxs = sort_idxs[::-1]
x_sort = x[sort_idxs]
orig_idxs = np.arange(len(x))[sort_idxs]
return list(zip(x_sort, orig_idxs))
def _split_existing_buckets(buckets):
return [buck.split() for buck in buckets]
# new_buckets = []
# # D = len(buckets[0].sumX)
# for buck in buckets:
# # buck0 = copy.deepcopy(bucket)
# # buck0 = Bucket(N=buck.N, D=D, point_ids=copy.deepcopy(buck.point_ids),
# # sumX=np.copy(buck.sumX), sumX2=np.copy(buck.sumX2))
# # buck0 = buck.copy()
# # buck1 = Bucket(D=buckets[0].D)
# new_buckets.append((buck0, buck1))
# return new_buckets
class MultiSplit(object):
__slots__ = 'dim vals scaleby offset'.split()
def __init__(self, dim, vals, scaleby=None, offset=None):
self.dim = dim
self.vals = np.asarray(vals)
self.scaleby = scaleby
self.offset = offset
def preprocess_x(self, x):
if self.offset is not None:
x = x - self.offset
if self.scaleby is not None:
x = x * self.scaleby
return x
def learn_multisplits_orig(X, nsplits, log2_max_vals_per_split=4,
try_nquantiles=16, return_centroids=True,
# learn_quantize_params=False,
learn_quantize_params='int16',
# learn_quantize_params=True,
# verbose=1):
verbose=2):
# verbose=3):
X = X.astype(np.float32)
N, D = X.shape
max_vals_per_split = 1 << log2_max_vals_per_split
X_hat = np.zeros_like(X)
# initially, one big bucket with everything
buckets = [Bucket(sumX=X.sum(axis=0), sumX2=(X * X).sum(axis=0),
point_ids=np.arange(N))]
total_loss = sum([bucket.loss for bucket in buckets])
# values to try in each dim, after buckets no longer get to pick optimal
# ones; we try values that at evenly spaced quantiles
possible_split_vals = np.empty((D, try_nquantiles), dtype=X.dtype)
for dim in range(D):
# possible_split_vals[dim] = evenly_spaced_quantiles(
# X[:, dim], try_nquantiles)
# exclude enpoints, so we get appropriate number of points linearly
# spaced *between* min and max values
minval, maxval = np.min(X[:, dim]), np.max(X[:, dim])
possible_split_vals[dim] = np.linspace(
minval, maxval, num=(try_nquantiles + 2))[1:-1]
# print("initial possible split vals: ")
# print(possible_split_vals[:8])
# print(possible_split_vals[8:16])
# import sys; sys.exit()
if verbose > 0:
print("================================")
print("learn_multisplits(): initial loss: ", total_loss)
splits = []
col_losses = np.zeros(D, dtype=np.float32) # TODO rm?
for s in range(nsplits):
# if s >= 2:
# print("exiting after two splits")
# import sys; sys.exit()
if verbose > 1:
print("------------------------ finding split #:", s)
for i, buck in enumerate(buckets): # TODO rm sanity check
assert buck.id == i
nbuckets = len(buckets)
# compute number of bucket groups and size of each
ngroups = min(nbuckets, max_vals_per_split)
nbuckets_per_group = nbuckets // ngroups
assert nbuckets_per_group * ngroups == nbuckets # sanity check
# try_ndims = 8
# try_ndims = 4
try_ndims = 1
# dim_heuristic = 'eigenvec'
# dim_heuristic = 'bucket_eigenvecs'
dim_heuristic = 'variance'
if dim_heuristic == 'eigenvec':
# compute current reconstruction of X, along with errs
for buck in buckets:
# print("point ids: ", buck.point_ids)
if len(buck.point_ids):
centroid = buck.col_means()
# X_hat[np.array(buck.point_ids)] = centroid
X_hat[buck.point_ids] = centroid
X_res = X - X_hat
# pick dims by looking at top principal component
v = subs.top_principal_component(X_res)
try_dims = np.argsort(np.abs(v))[-try_ndims:]
elif dim_heuristic == 'bucket_eigenvecs':
dim_scores = np.zeros(D, dtype=np.float32)
for buck in buckets:
if buck.N < 2:
continue
X_buck = X[buck.point_ids]
v, lamda = subs.top_principal_component(
X_buck, return_eigenval=True)
v *= lamda
dim_scores += np.abs(v)
# X_buck -= X_buck.mean(axis=0)
try_dims = np.argsort(dim_scores)[-try_ndims:]
elif dim_heuristic == 'variance':
# pick out dims to consider splitting on
# try_dims = np.arange(D) # TODO restrict to subset?
col_losses[:] = 0
for buck in buckets:
col_losses += buck.col_sum_sqs()
# try_dims = np.argsort(col_losses)[-8:]
try_dims = np.argsort(col_losses)[-try_ndims:]
# try_dims = np.argsort(col_losses)[-2:]
# try_dims = np.arange(2)
# try_dims = np.arange(D) # TODO restrict to subset?
losses = np.zeros(len(try_dims), dtype=X.dtype)
losses_for_vals = np.zeros(try_nquantiles, dtype=X.dtype)
all_split_vals = [] # vals chosen by each bucket/group for each dim
# determine for this dim what the best split vals are for each
# group and what the loss is when using these split vals
for d, dim in enumerate(try_dims):
if verbose > 2:
# print("---------------------- dim = ", dim)
print("======== dim = {}, ({:.5f}, {:.5f})".format(
dim, np.min(X[:, dim]), np.max(X[:, dim])))
# just let each bucket pick its optimal split val for this dim;
# special case of below where each "group" is one bucket, and
# instead of having to pick val from fixed set, can be anything
if nbuckets_per_group == 1:
split_vals = [] # each bucket contributes one split val
for buck in buckets:
val, loss = buck.optimal_split_val(X, dim)
losses[d] += loss
split_vals.append(val)
all_split_vals.append(split_vals)
# buckets have to pick from fixed set of possible values; each
# group of buckets (defined by common prefix) have to agree on
# one val, so we sum loss for each possible value across all
# buckets in the group, and then take val with lowest sum
else:
split_vals = [] # each group contributes one split val
for g in range(ngroups):
# print("------------------------ group #", g)
start_idx = g * nbuckets_per_group
end_idx = start_idx + nbuckets_per_group
group_buckets = buckets[start_idx:end_idx]
# print("bucket ids, counts: ",
# [buck.id for buck in group_buckets],
# [buck.N for buck in group_buckets])
# compute loss for each possible split value, summed
# across all buckets in this group; then choose best
possible_vals = possible_split_vals[dim]
# print("possible split vals: ", possible_vals)
losses_for_vals[:] = 0
# losses_for_vals = np.zeros_like(losses_for_vals)
# print("losses for vals: ", losses_for_vals)
for b, buck in enumerate(group_buckets):
_, _, val_losses = buck.optimal_split_val(
X, dim, possible_vals=possible_vals,
return_possible_vals_losses=True)
losses_for_vals += val_losses
best_val_idx = np.argmin(losses_for_vals)
best_val = possible_vals[best_val_idx]
best_loss = losses_for_vals[best_val_idx]
losses[d] += best_loss
# print("best {val idx, val, loss} = ",
# best_val_idx, best_val, best_loss)
split_vals.append(best_val)
all_split_vals.append(split_vals)
# determine best dim to split on, and pull out associated split
# vals for all buckets
best_tried_dim_idx = np.argmin(losses)
best_dim = try_dims[best_tried_dim_idx]
use_split_vals = all_split_vals[best_tried_dim_idx]
split = MultiSplit(dim=best_dim, vals=use_split_vals)
if learn_quantize_params:
# if len(use_split_vals) > 1: # after 1st split
# minsplitval = np.min(use_split_vals)
# maxsplitval = np.max(use_split_vals)
# gap = maxsplitval - minsplitval
# offset = minsplitval - .02 * gap
# scale = 250. / gap # slightly below 255. / gap
# else: # 1st split; only one bucket, so no intersplit range
# assert np.min(use_split_vals) == np.max(use_split_vals)
# x = X[:, best_dim]
# offset = np.min(x)
# scale = 255. / np.max(x - offset)
# # x -= offset
# # scale = 128. / np.max(split.vals - offset)
# # scale = 1 # TODO rm
# # x = X[:, best_dim].copy()
# x = X[:, best_dim]
# offset = np.min(x)
# # scale = 255. / np.max(x - offset)
# scale = 250. / np.max(use_split_vals) # slightly below 255
# simple version, which also handles 1 bucket: just set min
# value to be avg of min splitval and xval, and max value to
# be avg of max splitval and xval
x = X[:, best_dim]
offset = (np.min(x) + np.min(use_split_vals)) / 2
upper_val = (np.max(x) + np.max(use_split_vals)) / 2 - offset
scale = 254. / upper_val
if learn_quantize_params == 'int16':
scale = 2. ** int(np.log2(scale))
split.offset = offset
split.scaleby = scale
split.vals = (split.vals - split.offset) * split.scaleby
split.vals = np.clip(split.vals, 0, 255).astype(np.int32)
splits.append(split)
# apply this split to get next round of buckets
new_buckets = []
for i, buck in enumerate(buckets):
group_idx = i // nbuckets_per_group
val = use_split_vals[group_idx]
new_buckets += list(buck.split(X, dim=best_dim, val=val))
buckets = new_buckets
if verbose > 1:
print("bucket counts: ", [buck.N for buck in buckets])
print("loss from buckets: ",
sum([bucket.loss for bucket in buckets]))
print("dim losses: ", losses)
if verbose > 2:
print("loss from sse computation: ",
losses[best_tried_dim_idx])
print("using dim, split_vals:", best_dim, use_split_vals)
# maybe return centroids in addition to set of MultiSplits and loss
loss = sum([bucket.loss for bucket in buckets])
if verbose > 0:
print("learn_multisplits(): returning loss: ", loss)
if return_centroids:
centroids = np.vstack([buck.col_means() for buck in buckets])
assert centroids.shape == (len(buckets), X.shape[1])
return splits, loss, centroids
return splits, loss
@_memory.cache
def learn_multisplits(
X, nsplits=4, return_centroids=True, return_buckets=False,
# learn_quantize_params=False,
# learn_quantize_params='int16', X_orig=None, try_ndims=1,
# learn_quantize_params='int16', X_orig=None, try_ndims=2,
learn_quantize_params='int16', X_orig=None, try_ndims=4,
# learn_quantize_params='int16', X_orig=None, try_ndims=8,
# learn_quantize_params='int16', X_orig=None, try_ndims=16,
# learn_quantize_params=True,
# verbose=3):
# verbose=2):
verbose=1):
assert nsplits <= 4 # >4 splits means >16 split_vals for this func's impl
X = X.astype(np.float32)
N, D = X.shape
X_orig = X if X_orig is None else X_orig
X_hat = np.zeros_like(X)
# initially, one big bucket with everything
buckets = [Bucket(sumX=X.sum(axis=0), sumX2=(X * X).sum(axis=0),
point_ids=np.arange(N))]
total_loss = sum([bucket.loss for bucket in buckets])
if verbose > 0:
print("================================")
# print("learn_multisplits(): initial loss: ", total_loss)
print("learn_multisplits(): initial loss: ", total_loss)
# print("learn_multisplits(): trying ndims: ", min(D, try_ndims))
splits = []
col_losses = np.zeros(D, dtype=np.float32) # TODO rm?
for s in range(nsplits):
if verbose > 1:
print("------------------------ finding split #:", s)
# dim_heuristic = 'eigenvec'
# dim_heuristic = 'bucket_eigenvecs'
dim_heuristic = 'bucket_sse'
# dim_heuristic = 'kurtosis'
if dim_heuristic == 'eigenvec':
# compute current reconstruction of X, along with errs
if s > 0:
for buck in buckets:
# print("point ids: ", buck.point_ids)
if len(buck.point_ids):
centroid = buck.col_means()
# X_hat[np.array(buck.point_ids)] = centroid
X_hat[buck.point_ids] = centroid
X_res = X - X_hat
else:
X_res = X
# pick dims by looking at top principal component
v = subs.top_principal_component(X_res)
try_dims = np.argsort(np.abs(v))[-try_ndims:]
elif dim_heuristic == 'bucket_eigenvecs':
dim_scores = np.zeros(D, dtype=np.float32)
for buck in buckets:
if buck.N < 2:
continue
X_buck = X[buck.point_ids]
v, lamda = subs.top_principal_component(
X_buck, return_eigenval=True)
v *= lamda
dim_scores += np.abs(v)
# X_buck -= X_buck.mean(axis=0)
try_dims = np.argsort(dim_scores)[-try_ndims:]
elif dim_heuristic == 'bucket_sse':
col_losses[:] = 0
for buck in buckets:
col_losses += buck.col_sum_sqs()
try_dims = np.argsort(col_losses)[-try_ndims:]
elif dim_heuristic == 'kurtosis':
# compute X_res
if s > 0:
for buck in buckets:
# print("point ids: ", buck.point_ids)
if len(buck.point_ids):
centroid = buck.col_means()
# X_hat[np.array(buck.point_ids)] = centroid
X_hat[buck.point_ids] = centroid
X_res = X - X_hat
else:
X_res = X
col_losses[:] = 0
for buck in buckets:
col_losses += buck.col_sum_sqs()
try_dims = np.argsort(col_losses)[-try_ndims:]
from scipy import stats
col_losses *= col_losses # just 4th central moment
col_losses *= stats.kurtosis(X_res, axis=0)
try_dims = np.argsort(col_losses)[-try_ndims:]
losses = np.zeros(len(try_dims), dtype=X.dtype)
all_split_vals = [] # vals chosen by each bucket/group for each dim
# determine for this dim what the best split vals are for each
# group and what the loss is when using these split vals
# print("try_dims: ", try_dims)
for d, dim in enumerate(try_dims):
# print("s, d, dim = ", s, d, dim)
if verbose > 2:
# print("---------------------- dim = ", dim)
print("======== dim = {}, ({:.5f}, {:.5f})".format(
dim, np.min(X[:, dim]), np.max(X[:, dim])))
split_vals = [] # each bucket contributes one split val
for b, buck in enumerate(buckets):
val, loss = buck.optimal_split_val(X, dim, X_orig=X_orig)
losses[d] += loss
if d > 0 and losses[d] >= np.min(losses[:d]):
if verbose > 2:
print("early abandoning after bucket {}!".format(b))
break # this dim already can't be the best
split_vals.append(val)
all_split_vals.append(split_vals)
# determine best dim to split on, and pull out associated split
# vals for all buckets
best_tried_dim_idx = np.argmin(losses)
best_dim = try_dims[best_tried_dim_idx]
use_split_vals = all_split_vals[best_tried_dim_idx]
split = MultiSplit(dim=best_dim, vals=use_split_vals)
if learn_quantize_params:
# simple version, which also handles 1 bucket: just set min
# value to be avg of min splitval and xval, and max value to
# be avg of max splitval and xval
x = X[:, best_dim]
offset = (np.min(x) + np.min(use_split_vals)) / 2
upper_val = (np.max(x) + np.max(use_split_vals)) / 2 - offset
scale = 254. / upper_val
if learn_quantize_params == 'int16':
scale = 2. ** int(np.log2(scale))
split.offset = offset
split.scaleby = scale
split.vals = (split.vals - split.offset) * split.scaleby
split.vals = np.clip(split.vals, 0, 255).astype(np.int32)
splits.append(split)
# apply this split to get next round of buckets
new_buckets = []
for i, buck in enumerate(buckets):
group_idx = i
val = use_split_vals[group_idx]
new_buckets += list(buck.split(X, dim=best_dim, val=val,
X_orig=X_orig))
buckets = new_buckets
if verbose > 1:
print("bucket counts: ", [buck.N for buck in buckets])
# print("loss from buckets: ",
# sum([bucket.loss for bucket in buckets]))
print("dim losses: ", losses)
if verbose > 2:
print("loss from sse computation: ",
losses[best_tried_dim_idx])
print("using dim, split_vals:", best_dim, use_split_vals)
# maybe return centroids in addition to set of MultiSplits and loss
loss = sum([bucket.loss for bucket in buckets])
if verbose > 0:
print("learn_multisplits(): returning loss: ", loss)
ret = [splits, loss]
if return_centroids:
centroids = np.vstack([buck.col_means() for buck in buckets])
assert centroids.shape == (len(buckets), X.shape[1])
ret.append(centroids)
# return splits, loss, centroids
if return_buckets:
# print("returning buckets!")
ret.append(buckets)
return tuple(ret)
@numba.njit(fastmath=True, cache=True)
def _XtX_encoded(X_enc, K=16):
N, C = X_enc.shape
D = C * K # note that this is total number of centroids, not orig D
out = np.zeros((D, D), np.int32)
# out = np.zeros((D, D), np.float32)
# D = int(C * K) # note that this is total number of centroids, not orig D
# out = np.zeros((D, D), np.int8)
for n in range(N):
for c in range(C):
code_left = X_enc[n, c]
dim_left = (K * c) + code_left
out[dim_left, dim_left] += 1
for cc in range(c + 1, C):
code_right = X_enc[n, cc]
dim_right = (K * cc) + code_right
out[dim_left, dim_right] += 1
# populate lower triangle
for d in range(D):
for dd in range(d + 1, D):
out[dd, d] = out[d, dd]
return out
@numba.njit(fastmath=True, cache=True)
def _XtY_encoded(X_enc, Y, K=16):
N, C = X_enc.shape
N, M = Y.shape
D = int(C * K) # note that this is total number of centroids, not orig D
out = np.zeros((D, M), Y.dtype)
for n in range(N):
for c in range(C):
code_left = X_enc[n, c]
dim_left = (K * c) + code_left
for m in range(M):
out[dim_left, m] += Y[n, m]
return out
@numba.njit(fastmath=True, cache=True)
def _XW_encoded(X_enc, W, K=16):
N, C = X_enc.shape
D, M = W.shape
out = np.zeros((N, M), W.dtype)
for n in range(N):
for c in range(C):
code_left = X_enc[n, c]
dim_left = (K * c) + code_left
for m in range(M):
out[n, m] += W[dim_left, m]
return out
@numba.njit(fastmath=True, cache=True)
def _densify_X_enc(X_enc, K=16):
N, C = X_enc.shape
D = C * K
out = np.zeros((N, D), np.int8)
for n in range(N):
for c in range(C):
code_left = X_enc[n, c]
dim_left = (K * c) + code_left
out[n, dim_left] = 1
return out
def _fit_ridge_enc(X_enc=None, Y=None, K=16, lamda=1, X_bin=None):
if X_bin is None:
X_bin = _densify_X_enc(X_enc, K=K)
est = linear_model.Ridge(fit_intercept=False, alpha=lamda)
est.fit(X_bin, Y)
return est.coef_.T
def encoded_lstsq(X_enc=None, X_bin=None, Y=None, K=16, XtX=None, XtY=None,
precondition=True, stable_ridge=True):
if stable_ridge:
return _fit_ridge_enc(X_enc=X_enc, Y=Y, X_bin=X_bin, K=K, lamda=1)
if XtX is None:
XtX = _XtX_encoded(X_enc, K=K).astype(np.float32)
lamda = 1 # TODO cross-validate to get lamda
# N = X_enc.shape[0]
# # lamda = N / (K * K)
# Y_bar = Y - Y.mean(axis=0)
# lamda = N * np.var(Y - Y.mean(axis=0)) / (K * K)
# # lamda = N * np.var(Y - Y.mean(axis=0)) / K
# lamda = N * np.var(Y) / K
# lamda = N * np.var(Y) / (K * K)
# # lamda = N * 1e4 # should shrink coeffs to almost 0
# # alpha = unscaled_alpha * np.var(X - X.mean(axis=0)) * N / D
# lamda = N / (1e5) # sorta works
# lamda = N / (1e4) # sorta works
lamda = max(1, lamda)
print("using lamda = ", lamda)
# lamda = max(1, len(X_enc) / 1e6)
# lamda = max(1, len(X_enc) / 1e5)
# lamda = max(1, len(X_enc) / 1e4)
# lamda = max(1, len(X_enc) / float(K * K))
# lamda = len(X_enc) / float(K)
# print("computing and regularizing XtX using lambda = ", lamda)
XtX += np.diag(np.ones(XtX.shape[0]) * lamda).astype(np.float32) # ridge
if XtY is None:
XtY = _XtY_encoded(X_enc, Y, K=K)
XtX = XtX.astype(np.float64)
XtY = XtY.astype(np.float64)
# preconditioning to avoid numerical issues (seemingly unnecessary, but
# might as well do it)
# scale = 1. / np.std(XtX)
if precondition:
# # pretend cols of X were scaled differently
# xscales = np.linalg.norm(XtX, axis=0) + 1e-20
# mulby = (1. / xscales)
# XtX *= mulby * mulby
# XtY *= mulby.reshape(-1, 1)
# yscales = np.linalg.norm(XtY, axis=1) + 1e-20
# yscales = np.linalg.norm(XtY, axis=0) + 1e-20
# yscales = yscales.reshape(-1, 1)
# xscales = np.mean(np.linalg.norm(XtX, axis=0))
# xscales = 7
# xscales = 1
# XtY *= (1. / yscales)
# XtY *= (1. / yscales.reshape(-1, 1))
# scale = 1. / len(X_enc)
scale = 1. / np.linalg.norm(XtX, axis=0).max()
XtX = XtX * scale
XtY = XtY * scale
# W = np.linalg.solve(XtX, XtY)
W, _, _, _ = np.linalg.lstsq(XtX, XtY, rcond=None) # doesn't fix it
# W, _, _, _ = np.linalg.lstsq(X_bin, Y, rcond=None)
# import torch
# import torch.nn.functional as F
# import torch.optim as optim
# def _to_np(A):
# return A.cpu().detach().numpy()
# niters = 10
# for it in range(niters):
# if precondition:
# pass
# # W *= xscales
# # W *= xscales.reshape(-1, 1)
# # W /= xscales.reshape(-1, 1)
# # W *= yscales.ravel()
# # W *= yscales
# W *= yscales # undo preconditioning
# import matplotlib.pyplot as plt
# _, axes = plt.subplots(2, 2, figsize=(13, 10))
# axes[0, 0].imshow(_densify_X_enc(X_enc[:1000]), interpolation='nearest')
# axes[0, 1].imshow(XtX, interpolation='nearest')
# axes[1, 0].imshow(XtY, interpolation='nearest', cmap='RdBu')
# axes[1, 1].imshow(W, interpolation='nearest', cmap='RdBu')
# # plt.colorbar()
# plt.tight_layout()
# plt.show()
# import sys; sys.exit()
return W
def _sparse_encoded_lstsq_gomp(X_enc, Y, nnz_blocks, K=16):
assert nnz_blocks >= 1
ncodebooks = X_enc.shape[1]
M = Y.shape[1]
# precompute XtX and XtY and create initial dense W
XtX = _XtX_encoded(X_enc, K=K).astype(np.float32)
XtX += np.diag(np.ones(XtX.shape[0])).astype(np.float32) # ridge
XtY = _XtY_encoded(X_enc, Y, K=K)
W = encoded_lstsq(X_enc, Y, XtX=XtX, XtY=XtY)
XtX = np.asfarray(XtX) # since we'll be slicing columns
keep_codebook_idxs = np.empty((M, nnz_blocks), dtype=np.int)
XtX_G = np.empty((ncodebooks, K * ncodebooks, K), dtype=np.float32)
for c in range(ncodebooks):
start_idx = c * K
end_idx = start_idx + K
# use_XtX = XtX[start_idx:end_idx][:, start_idx:end_idx]
use_XtX = XtX[:, start_idx:end_idx]
XtX_G[c], _ = np.linalg.qr(use_XtX) # KC x K
codebook_scores = np.zeros(ncodebooks)
for m in range(M): # fully solve one output col at a time
# xty = np.ascontiguousarray(XtY[:, m])
targets = np.copy(XtY[:, m])
residuals = targets
keep_codebooks = set()
w = np.copy(W[:, m])
pq_codebook_idx = int(m / float(M) * ncodebooks)
# print("---- m = ", m)
for b in range(nnz_blocks):
# targets_normed = targets
# score each codebook to pick new one to add
if b > 0:
for c in range(ncodebooks):
if c in keep_codebooks:
codebook_scores[c] = -np.inf
continue
X_G = XtX_G[c]
codebook_scores[c] = np.linalg.norm(X_G.T @ residuals)
keep_codebooks.add(np.argmax(codebook_scores))
else:
keep_codebooks.add(pq_codebook_idx) # seed with pq idx
# refit model using all the groups selected so far
keep_idxs = [np.arange(i * K, (i + 1) * K)
for i in sorted(list(keep_codebooks))]
keep_idxs = np.hstack(keep_idxs)
XtX_subs = XtX[keep_idxs][:, keep_idxs]
targets_subs = targets[keep_idxs]
w_subs = np.linalg.solve(XtX_subs, targets_subs)
# XtX_subs = XtX[:, keep_idxs]
# targets_subs = targets[keep_idxs]
# w_subs = np.linalg.solve(XtX_subs, targets)
# w_subs, resid, _, _ = np.linalg.lstsq(XtX_subs, targets)
w[:] = 0
w[keep_idxs] = w_subs
# resid_norm_sq = np.linalg.norm(residuals)**2
# print("resid norm sq: ", resid_norm_sq)
# print("lstsq mse: ", resid / resid_norm_sq)
# residuals = targets - (XtX_subs @ w_subs)
residuals = targets - (XtX[:, keep_idxs] @ w_subs)
# resid_norm_sq = np.linalg.norm(residuals)**2
# print("new resid norm sq: ", resid_norm_sq)
# targets = np.copy(XtY[:, m]) - (XtX @ w)
# update return arrays
keep_codebook_idxs[m] = np.array(list(keep_codebooks))
W[:, m] = w
return W, keep_codebook_idxs
# each codebook has const number of nonzero idxs
def _sparse_encoded_lstsq_elim_v2(X_enc, Y, nnz_per_centroid, K=16,
# uniform_sparsity=False): # never better
uniform_sparsity=True, pq_perm_algo='start',
stable_ridge=True):
ncodebooks = X_enc.shape[1]
M = Y.shape[1]
nnz_per_centroid = min(M, int(nnz_per_centroid))
nnz_per_centroid = max(1, nnz_per_centroid)
assert nnz_per_centroid >= int(np.ceil(M / ncodebooks))
assert nnz_per_centroid <= M
X_bin = _densify_X_enc(X_enc, K=K)
if not stable_ridge:
# precompute XtX and XtY and create initial dense W
XtX = _XtX_encoded(X_enc, K=K).astype(np.float32)
lamda = 1
# # alpha = unscaled_alpha * np.var(X - X.mean(axis=0)) * N / D
# # lamda = np.sqrt(ncodebooks)
# N = XtX.shape[0]
# lamda = N / (K * K)
# lamda = max(1, lamda)
# print("using lamda = ", lamda)
# lamda = max(1, len(X_enc) / 1e4)
# lamda = max(1, len(X_enc) / float(K * K))
XtX += np.diag(np.ones(XtX.shape[0]) * lamda).astype(np.float32) # ridge
# XtX += np.diag(np.ones(XtX.shape[0])).astype(np.float32) # ridge
XtY = _XtY_encoded(X_enc, Y, K=K)
# scale = 1. / len(X_enc)
scale = 1. / np.linalg.norm(XtX, axis=0).max()
XtX = XtX * scale
XtY = XtY * scale
W = encoded_lstsq(X_bin=X_bin, Y=Y, XtX=XtX, XtY=XtY, precondition=False,
stable_ridge=stable_ridge) # KC x M
XtX = np.asfarray(XtX) # since we'll be slicing columns
else: # stable_ridge is True
W = encoded_lstsq(X_bin=X_bin, Y=Y, stable_ridge=stable_ridge)
# score all blocks of W
all_scores = np.empty((ncodebooks, M), dtype=np.float) # C x M
for c in range(ncodebooks):
Xc = X_enc[:, c].reshape(-1, 1)
start_idx = c * K
end_idx = start_idx + K
Wc = W[start_idx:end_idx]
Yc = _XtY_encoded(Xc, Wc, K=K) # N x M
all_scores[c] = np.linalg.norm(Yc, axis=0)
# pq_idxs = _pq_codebook_start_end_idxs(M, ncodebooks)
pq_idxs = _pq_codebook_start_end_idxs(Y, ncodebooks, algo=pq_perm_algo)
# now pick which cols to keep in each codebook
keep_mask = np.zeros((ncodebooks, M), dtype=np.bool)
# subvec_len = int(np.ceil(M / ncodebooks))
for c in range(ncodebooks):
# initialize with PQ
start_idx, end_idx = pq_idxs[c]
keep_mask[c, start_idx:end_idx] = 1
subvec_len = end_idx - start_idx
assert subvec_len >= 1
keep_nidxs_extra = nnz_per_centroid - subvec_len
scores = all_scores[c]
scores[start_idx:end_idx] = 0
if uniform_sparsity and keep_nidxs_extra > 0:
# take as many other (best) nonzero idxs as we we're allowed to
assert len(scores) >= keep_nidxs_extra
best_idxs = np.argsort(scores)[-keep_nidxs_extra:]
if len(best_idxs) != keep_nidxs_extra:
print("len(best_idxs)", len(best_idxs))
print("keep_nidxs_extra", keep_nidxs_extra)
assert len(best_idxs) == keep_nidxs_extra
keep_mask[c, best_idxs] = True
if not uniform_sparsity:
scores = all_scores.ravel()
nkept_idxs = M # number of nonzeros used already
keep_nidxs_total = nnz_per_centroid * ncodebooks
keep_nidxs_extra = keep_nidxs_total - nkept_idxs
keep_idxs = np.argsort(scores)[-keep_nidxs_extra:]
flat_mask = keep_mask.ravel()
flat_mask[keep_idxs] = 1
keep_mask = flat_mask.reshape(keep_mask.shape)
# at this point, we have the mask for which cols of each centroid to keep;
# now we just need to go from a mask to a set of indices and a sparse
# matrix of centroids
W_sparse = np.empty((ncodebooks * K, M), dtype=np.float32)
if uniform_sparsity:
ret_idxs = np.empty((ncodebooks, nnz_per_centroid), dtype=np.int)
else:
ret_idxs = []
# else:
# ret_idxs = np.zeros((ncodebooks, M), dtype=np.int) - 1
for c in range(ncodebooks):
idxs = np.where(keep_mask[c] != 0)[0]
if uniform_sparsity:
if len(idxs) != nnz_per_centroid:
print("c: ", c)
print("len(idxs): ", len(idxs))
print("nnz_per_centroid: ", nnz_per_centroid)
print("keep_mask counts:", keep_mask.sum(axis=1))
assert len(idxs) == nnz_per_centroid
ret_idxs[c] = idxs
else:
ret_idxs.append(idxs)
zero_idxs = np.where(keep_mask[c] == 0)[0]
start_idx = c * K
end_idx = start_idx + K
Wc = W[start_idx:end_idx]
Wc[:, zero_idxs] = 0
W_sparse[start_idx:end_idx] = Wc
# now refit W_sparse to each output col; right now it's just the original
# W with a bunch of entries zeroed
for m in range(M):
w = W_sparse[:, m]
keep_idxs = np.where(w != 0)[0]
if stable_ridge:
X_bin_subs = X_bin[:, keep_idxs]
w_subs = _fit_ridge_enc(X_bin=X_bin_subs, Y=Y[:, m])
else:
xty = XtY[:, m]
use_XtX = XtX[keep_idxs][:, keep_idxs]
use_xty = xty[keep_idxs]
w_subs = np.linalg.solve(use_XtX, use_xty)
w[:] = 0
w[keep_idxs] = w_subs
W_sparse[:, m] = w
# nnzs = [len(idxs) for idxs in ret_idxs]
# print("nnzs: ", nnzs)
# print(f"returning {ret_idxs.shape[1]} nonzeros per centroid...")
return W_sparse, ret_idxs
def _sparse_encoded_lstsq_backward_elim(X_enc, Y, nnz_blocks, K=16):
ncodebooks = X_enc.shape[1]
eliminate_nblocks = ncodebooks - nnz_blocks
M = Y.shape[1]
# precompute XtX and XtY and create initial dense W
XtX = _XtX_encoded(X_enc, K=K).astype(np.float32)
XtX += np.diag(np.ones(XtX.shape[0])).astype(np.float32) # ridge
XtY = _XtY_encoded(X_enc, Y, K=K)
W = encoded_lstsq(X_enc, Y, XtX=XtX, XtY=XtY)
XtX = np.asfarray(XtX) # since we'll be slicing columns
keep_codebook_idxs = np.empty((M, nnz_blocks), dtype=np.int)
codebook_scores = np.zeros(ncodebooks)
for m in range(M): # fully solve one output col at a time
xty = np.ascontiguousarray(XtY[:, m])
rm_codebook_idxs = set()
w = np.copy(W[:, m])
for b in range(eliminate_nblocks):
# evaluate contribution of each codebook
for c in range(ncodebooks):
# if c in rm_codebook_idxs or c == pq_codebook_idx:
if c in rm_codebook_idxs:
codebook_scores[c] = np.inf
continue
start_idx = c * K
end_idx = start_idx + K
# XtX_subs = XtX[:, start_idx:end_idx] # CK x K
# w_subs = w[start_idx:end_idx] # K
# xtyhat_subs = XtX_subs @ w_subs # CK x 1
# codebook_scores[c] = np.linalg.norm(xtyhat_subs)
XtX_subs = XtX[start_idx:end_idx][:, start_idx:end_idx]
w_subs = w[start_idx:end_idx] # K
xtyhat_subs = XtX_subs @ w_subs # K x 1
codebook_scores[c] = np.linalg.norm(xtyhat_subs)
# rm least helpful codebook and refit the least squares
rm_codebook_idxs.add(np.argmin(codebook_scores))
keep_codebooks = [i for i in range(ncodebooks)
if i not in rm_codebook_idxs]
keep_idxs = [np.arange(i * K, (i + 1) * K)
for i in keep_codebooks]
keep_idxs = np.hstack(keep_idxs)
use_XtX = XtX[keep_idxs][:, keep_idxs]
use_xty = xty[keep_idxs]
w_subs = np.linalg.solve(use_XtX, use_xty)
# print("w shape: ", w.shape)
# print("rm codebooks: ", rm_codebook_idxs)
# print("keep codebooks: ", keep_codebooks)
# print("keep idxs: ", keep_idxs)
# print("type(keep idxs): ", type(keep_idxs))
# print("w[keep idxs]: ", w[keep_idxs])
# print("resid: ", resid)
w[:] = 0
w[keep_idxs] = w_subs
# update return arrays
keep_idxs = [i for i in range(ncodebooks) if i not in rm_codebook_idxs]
keep_codebook_idxs[m] = np.array(keep_codebooks)
W[:, m] = w
return W, keep_codebook_idxs # CK x M, M x nnz
def sparse_encoded_lstsq(X_enc, Y, K=16, nnz_blocks=-1, **kwargs):
ncodebooks = X_enc.shape[1]
if nnz_blocks < 1:
# nnz_per_centroid = Y.shape[1]
# default to returning dense centroids
W = encoded_lstsq(X_enc, Y, K=16)
ncodebooks = X_enc.shape[1]
M = Y.shape[1]
keep_codebook_idxs = np.empty((ncodebooks, M), dtype=np.int)
all_idxs = np.arange(M)
for c in range(ncodebooks):
keep_codebook_idxs[c] = all_idxs
return W, keep_codebook_idxs
else:
nnz_per_centroid = int(nnz_blocks * Y.shape[1] / ncodebooks)
# nnz_blocks = int(np.sqrt(ncodebooks) + .5)
# return _sparse_encoded_lstsq_backward_elim(
# X_enc, Y, nnz_blocks=nnz_blocks, K=K)
# return _sparse_encoded_lstsq_gomp(X_enc, Y, nnz_blocks=nnz_blocks, K=K)
# print("nnz_per_centroid: ", nnz_per_centroid)
return _sparse_encoded_lstsq_elim_v2(
X_enc, Y, nnz_per_centroid=nnz_per_centroid, K=K, **kwargs)
# def _pq_codebook_start_end_idxs(D, ncodebooks):
def _pq_codebook_start_end_idxs(X, ncodebooks, algo='start'):
assert algo in ('start', 'end') # TODO do something smarter here
# D = int(D)
_, D = X.shape
ncodebooks = int(ncodebooks)
assert D >= ncodebooks
idxs = np.empty((ncodebooks, 2), dtype=np.int)
full_subvec_len = D // ncodebooks
start_idx = 0
for c in range(ncodebooks):
subvec_len = full_subvec_len
if algo == 'start': # wider codebooks at the start
if c < (D % ncodebooks):
subvec_len += 1
elif algo == 'end': # wider codebooks at the end
if (ncodebooks - c - 1) < (D % ncodebooks):
subvec_len += 1
end_idx = min(D, start_idx + subvec_len)
# print("c, start_idx, end_idx: ", c, start_idx, end_idx)
# print("start_idx, end_idx: ", c, start_idx, end_idx)
idxs[c, 0] = start_idx
idxs[c, 1] = end_idx
start_idx = end_idx
assert idxs[0, 0] == 0
assert idxs[-1, -1] == D
return idxs
@_memory.cache
def _learn_mithral_initialization(X, ncodebooks,
pq_perm_algo='start', **kwargs):
N, D = X.shape
ncentroids_per_codebook = 16
X = X.astype(np.float32)
X_res = X.copy()
X_orig = X
all_centroids = np.zeros(
(ncodebooks, ncentroids_per_codebook, D), dtype=np.float32)
all_splits = []
pq_idxs = _pq_codebook_start_end_idxs(X, ncodebooks, algo=pq_perm_algo)
subvec_len = int(np.ceil(D / ncodebooks)) # for non-pq heuristics
nonzeros_heuristic = 'pq'
# ------------------------ 0th iteration; initialize all codebooks
all_splits = []
all_buckets = []
for c in range(ncodebooks):
if nonzeros_heuristic == 'pq':
start_idx, end_idx = pq_idxs[c]
idxs = np.arange(start_idx, end_idx)
elif nonzeros_heuristic == 'pca':
v = subs.top_principal_component(X_res)
idxs = np.argsort(np.abs(v))[:-subvec_len]
elif nonzeros_heuristic == 'disjoint_pca':
use_X_res = X_res.copy()
if c > 0: # not the first codebook
use_X_res[:, idxs] = 0 # can't use same subspace
v = subs.top_principal_component(use_X_res)
idxs = np.argsort(np.abs(v))[:-subvec_len]
use_X_res = X_res[:, idxs]
use_X_orig = X_orig[:, idxs]
# learn codebook to soak current residuals
multisplits, _, buckets = learn_multisplits(
use_X_res, X_orig=use_X_orig,
return_centroids=False, return_buckets=True, **kwargs)
for split in multisplits:
split.dim = idxs[split.dim]
all_splits.append(multisplits)
all_buckets.append(buckets)
# update residuals and store centroids
centroid = np.zeros(D, dtype=np.float32)
for b, buck in enumerate(buckets):
if len(buck.point_ids):
centroid[:] = 0
centroid[idxs] = buck.col_means()
X_res[buck.point_ids] -= centroid
# update centroid here in case we want to regularize it somehow
all_centroids[c, b] = centroid
# print("X_res mse / X mse: ",
# (X_res * X_res).mean() / (X_orig * X_orig).mean())
return X_res, all_splits, all_centroids, all_buckets
@_memory.cache
def learn_mithral(X, ncodebooks, return_buckets=False,
lut_work_const=-1, **kwargs):
N, D = X.shape
ncentroids_per_codebook = 16
X_orig = X.astype(np.float32)
X_res0, all_splits0, all_centroids0, all_buckets0 = \
_learn_mithral_initialization(X, ncodebooks, pq_perm_algo='start')
mse_orig = (X_orig * X_orig).mean()
mse0 = (X_res0 * X_res0).mean()
print("X_res mse / X mse: ", mse0 / mse_orig)
used_perm_algo = 'start'
if False:
# choose between having wider codebooks at the start vs the end (if
# there might be a meaningful difference)
X_res1, all_splits1, all_centroids1, all_buckets1 = \
_learn_mithral_initialization(X, ncodebooks, pq_perm_algo='end')
mse1 = (X_res1 * X_res1).mean()
if mse0 <= mse1:
X_res, all_splits, all_centroids, all_buckets = (
X_res0, all_splits0, all_centroids0, all_buckets0)
else:
X_res, all_splits, all_centroids, all_buckets = (
X_res1, all_splits1, all_centroids1, all_buckets1)
used_perm_algo = 'end'
print("X_res1 mse / X mse: ", mse1 / mse_orig)
else:
X_res, all_splits, all_centroids, all_buckets = (
X_res0, all_splits0, all_centroids0, all_buckets0)
# optimize centroids discriminatively conditioned on assignments
X_enc = mithral_encode(X, all_splits)
if lut_work_const != 1: # if it's 1, equivalent to just doing PQ
#
# shrink W towards 0
#
# if lut_work_const < 0:
# W = encoded_lstsq(X_enc, X)
# else:
# W, nonzero_blocks = sparse_encoded_lstsq(
# X_enc, X, nnz_blocks=lut_work_const)
#
# shrink W towards initial centroids
#
if lut_work_const < 0:
print("fitting dense lstsq to X_res")
W = encoded_lstsq(X_enc=X_enc, Y=X_res)
else:
W, _ = sparse_encoded_lstsq(
X_enc, X_res, nnz_blocks=lut_work_const,
pq_perm_algo=used_perm_algo)
all_centroids_delta = W.reshape(ncodebooks, ncentroids_per_codebook, D)
all_centroids += all_centroids_delta
# check how much improvement we got
X_res -= _XW_encoded(X_enc, W) # if we fit to X_res
mse_res = (X_res * X_res).mean()
print("X_res mse / X mse after lstsq: ", mse_res / mse_orig)
# print("min, median, max, std, of all centroids after lstsq:\n",
# all_centroids.min(), np.median(all_centroids),
# all_centroids.max(), all_centroids.std())
if return_buckets:
return all_splits, all_centroids, all_buckets
return all_splits, all_centroids
def learn_mithral_v1(X, ncodebooks, niters=1, return_buckets=False, **kwargs):
# print("called learn_mithral!"); import sys; sys.exit()
N, D = X.shape
ncentroids_per_codebook = 16
X = X.astype(np.float32)
X_res = X.copy()
X_orig = X
X_hat = np.zeros_like(X)
all_centroids = np.zeros(
(ncodebooks, ncentroids_per_codebook, D), dtype=np.float32)
all_splits = []
subvec_len = int(np.ceil(D / ncodebooks))
# use_X_res = np.zeros_like(X_res)
# TODO multiple iters; also store assignments from each codebook, so
# that we can undo effect of its X_hat (can't store X_hat directly for
# large data, so recompute on the fly using assignments and centroids)
nonzeros_heuristic = 'pq'
# nonzeros_heuristic = 'pca'
# nonzeros_heuristic = 'disjoint_pca'
# TODO store assignments (or maybe just buckets directly)
# TODO update just centroids (not assignments) at iter end
# ------------------------ 0th iteration; initialize all codebooks
all_splits = []
all_buckets = []
for c in range(ncodebooks):
if nonzeros_heuristic == 'pq':
start_idx = c * subvec_len
end_idx = min(D, start_idx + subvec_len)
idxs = np.arange(start_idx, end_idx)
elif nonzeros_heuristic == 'pca':
v = subs.top_principal_component(X_res)
idxs = np.argsort(np.abs(v))[:-subvec_len]
elif nonzeros_heuristic == 'disjoint_pca':
use_X_res = X_res.copy()
if c > 0: # not the first codebook
use_X_res[:, idxs] = 0 # can't use same subspace
v = subs.top_principal_component(use_X_res)
idxs = np.argsort(np.abs(v))[:-subvec_len]
use_X_res = X_res[:, idxs]
use_X_orig = X_orig[:, idxs]
# learn codebook to soak current residuals
multisplits, _, buckets = learn_multisplits(
use_X_res, X_orig=use_X_orig,
return_centroids=False, return_buckets=True, **kwargs)
for split in multisplits:
split.dim = idxs[split.dim]
all_splits.append(multisplits)
all_buckets.append(buckets)
# use_X_res[:, start_idx:end_idx] = 0
# use_X_res[:] = 0
# update residuals and store centroids
centroid = np.zeros(D, dtype=np.float32)
for b, buck in enumerate(buckets):
if len(buck.point_ids):
centroid[:] = 0
centroid[idxs] = buck.col_means()
# centroid /= 2 # TODO rm
X_hat[buck.point_ids] = centroid
# update centroid here in case we want to regularize it somehow
all_centroids[c, b] = centroid
X_res -= X_hat
print("X res var / X var: ", X_res.var() / X_orig.var())
# ------------------------ remaining iters
for t in range(niters):
# now update centroids given assignments and all other centroids
# for _ in range(5):
# for _ in range(20):
for _ in range(10):
for c in range(ncodebooks):
# print("c: ", c)
# undo effect of this codebook
buckets = all_buckets[c]
for b, buck in enumerate(buckets):
if len(buck.point_ids):
X_hat[buck.point_ids] = all_centroids[c, b]
X_res += X_hat
# update centroids based on residuals given all other codebooks
for b, buck in enumerate(buckets):
if len(buck.point_ids):
centroid = X_res[buck.point_ids].mean(axis=0)
# keep_ndims = D // 2
# zero_idxs = np.argsort(np.abs(centroid))[:-keep_ndims]
# centroid[zero_idxs] = 0
# true_centroid = X_res[buck.point_ids].mean(axis=0)
# old_centroid = all_centroids[c, b]
# centroid = (true_centroid + old_centroid) / 2
X_hat[buck.point_ids] = centroid
all_centroids[c, b] = centroid
X_res -= X_hat
print("X res var / X var after centroid updates: ",
X_res.var() / X_orig.var())
# now update assignments
if t == niters - 1:
break # end after updating centroids, not assignments
for c in range(ncodebooks):
# print("c: ", c)
# undo effect of this codebook
buckets = all_buckets[c]
# orig_loss = sum([buck.loss for buck in buckets])
orig_loss = np.sum(X_res * X_res)
for b, buck in enumerate(buckets):
if len(buck.point_ids):
X_hat[buck.point_ids] = all_centroids[c, b]
X_res += X_hat
multisplits, loss, buckets = learn_multisplits(
X_res, X_orig=X_orig,
return_centroids=False, return_buckets=True, **kwargs)
print("orig loss, loss: ", orig_loss, loss)
if loss > orig_loss:
X_res -= X_hat
continue
all_splits[c] = multisplits
all_buckets[c] = buckets
# update residuals and store centroids
# centroid = np.zeros(D, dtype=np.float32)
for b, buck in enumerate(buckets):
if len(buck.point_ids):
centroid = buck.col_means()
# centroid /= 2 # TODO rm
X_hat[buck.point_ids] = centroid
# update centroid here in case we want to regularize it somehow
all_centroids[c, b] = centroid
X_res -= X_hat
print("new X res var / X var: ", X_res.var() / X_orig.var())
if return_buckets:
return all_splits, all_centroids, all_buckets
return all_splits, all_centroids
def mithral_encode(X, multisplits_lists):
N, D = X.shape
ncodebooks = len(multisplits_lists)
X_enc = np.empty((N, ncodebooks), dtype=np.int, order='f')
for c in range(ncodebooks):
X_enc[:, c] = assignments_from_multisplits(X, multisplits_lists[c])
return np.ascontiguousarray(X_enc)
def mithral_lut(q, all_centroids):
q = q.reshape(1, 1, -1) # all_centroids is shape ncodebooks, ncentroids, D
return (q * all_centroids).sum(axis=2) # ncodebooks, ncentroids
def learn_splits_greedy(X, nsplits, verbose=2):
N, D = X.shape
assert nsplits <= D
# # improve numerical stability
# scale = np.std(X)
# X *= (1. / scale)
# precompute sorted lists of values within each dimension,
# along with which row they originally were so look can look
# up the whole vector (and bucket) associated with each value
dim2sorted = []
for dim in range(D):
sorted_with_idx = _sort_and_append_orig_idx(X[:, dim])
dim2sorted.append(sorted_with_idx)
splits = []
# buckets = [Bucket(N=N, sumX=X.sum(axis=0), sumX2=(X * X).sum(axis=0),
buckets = [Bucket(sumX=X.sum(axis=0), sumX2=(X * X).sum(axis=0),
point_ids=np.arange(N))]
# all_point_infos = [PointInfo(data=row, bucket_id=0) for row in X]
bucket_assignments = np.zeros(N, dtype=np.int)
# Z = X - X.mean(axis=0)
# total_loss = np.sum(Z * Z)
# print("initial SSE: ", total_loss)
total_loss = sum([bucket.loss for bucket in buckets])
if verbose > 0:
print("learn_splits(): initial loss: ", total_loss)
# unused_dims = set(np.arange(X.shape[1]))
# all_dims = np.arange(D)
col_losses = np.zeros(D, dtype=np.float32) # TODO rm?
for s in range(nsplits):
if verbose > 1:
print("================================ finding split #:", s)
best_split = Split(dim=-1, val=-np.inf, loss_change=0)
# for d in unused_dims:
# for d in all_dims:
# for d in all_dims[:2]: # TODO rm
col_losses[:] = 0
for buck in buckets:
col_losses += buck.col_sum_sqs()
# try_dims = [np.argmax(col_losses)]
# try_dims = np.argsort(col_losses)[-nsplits:]
try_dims = np.argsort(col_losses)[-4:]
# for d in [dim]: # TODO multiple dim options?
if verbose > 1:
print("trying dims: ", try_dims)
print("with losses: ", col_losses[try_dims])
for d in try_dims:
vals_and_point_ids = dim2sorted[d]
new_buckets = _split_existing_buckets(buckets)
new_total_loss = total_loss
if verbose > 2:
print("---------------------- dim = ", d)
# for i, (val, point_id) in enumerate(vals_and_point_ids):
# skip last point since that just puts everything in one bucket,
# which is the same as what we're starting out with
for val, point_id in vals_and_point_ids[:-1]:
# if verbose > 1:
# print("i: {}/{}".format(i, len(vals_and_point_ids) - 1))
# info = all_point_infos[point_id]
# point, bucket_id = info.data, info.bucket_id
point = X[point_id]
bucket_id = bucket_assignments[point_id]
bucket0 = new_buckets[bucket_id][0]
bucket1 = new_buckets[bucket_id][1]
old_loss = bucket0.loss + bucket1.loss
bucket0.remove_point(point, point_id=point_id)
bucket1.add_point(point, point_id=point_id)
new_loss = bucket0.loss + bucket1.loss
new_total_loss -= old_loss # sub old loss from these buckets
new_total_loss += new_loss # add new loss from these buckets
loss_change = new_total_loss - total_loss
# if loss_change > .1: # should be nonincreasing
# print("got loss change: ", loss_change)
# print("old total loss:", total_loss)
# print("new total loss:", new_total_loss)
# assert loss_change <= .1 # should be nonincreasing
# # loss should be no worse than having new buckets unused
# assert loss_change <= .1
# if verbose > 2:
# print("-------- split point_id, val = ", point_id, val)
# print("bucket0 point ids, loss after update: ",
# bucket0.point_ids, bucket0.loss)
# print("bucket1 point ids, loss after update: ",
# bucket1.point_ids, bucket1.loss)
# print("loss change = {:.3f};\tnew_loss = {:.3f} ".format(
# loss_change, new_total_loss))
if loss_change < best_split.loss_change:
best_split.dim = d
best_split.val = val
best_split.loss_change = loss_change
if verbose > 2:
print("---------------------- split on dim={}, val={:.3f} ".format(
best_split.dim, best_split.val))
buckets = [buck.split(X, dim=best_split.dim, val=best_split.val)
for buck in buckets]
buckets = reduce(lambda b1, b2: b1 + b2, buckets) # flatten pairs
for i, buck in enumerate(buckets):
ids = np.asarray(list(buck.point_ids), dtype=np.int)
bucket_assignments[ids] = i
total_loss = sum([bucket.loss for bucket in buckets])
# unused_dims.remove(best_split.dim)
splits.append(best_split)
if verbose > 3:
print('learn_splits(): new loss: {:.3f} from split at dim {}, '
'value {:.3f}'.format(
total_loss, best_split.dim, best_split.val))
if verbose > 2:
print('bucket losses: ')
print([bucket.loss for bucket in buckets])
print('bucket N, sumX, sumX2')
print([bucket.N for bucket in buckets])
print([list(bucket.sumX) for bucket in buckets])
print([list(bucket.sumX2) for bucket in buckets])
# for split in splits:
# split.val *= scale # undo preconditioning
# total_loss *= scale * scale
return splits, total_loss
def learn_splits_conditional(X, nsplits, dim_algo='greedy_var',
split_algo='mean', **sink):
N, D = X.shape
assert nsplits <= D
# unused_dims = set(np.arange(X.shape[1]))
col_means = X.mean(axis=0)
# dims = np.arange(D)
used_mask = np.ones(D, dtype=np.float32)
splits = []
buckets = [Bucket(sumX=X.sum(axis=0), sumX2=(X * X).sum(axis=0),
point_ids=np.arange(N))]
col_losses = np.zeros(D, dtype=np.float32)
for s in range(nsplits):
print("---- learning split {}/{}...".format(s + 1, nsplits))
print("current number of buckets: ", len(buckets))
# col_vars = X.var(axis=0)
col_losses[:] = 0
for buck in buckets:
col_losses += buck.col_sum_sqs()
col_losses *= used_mask
if dim_algo == 'greedy_var':
dim = np.argmax(col_losses)
used_mask[dim] = 0
if split_algo == 'mean':
val = col_means[dim]
new_buckets = []
for buck in buckets:
new_buckets += list(buck.split(X=X, dim=dim, val=val))
buckets = new_buckets
splits.append(Split(dim=dim, val=val))
return splits, -1
# def learn_splits_simple(X, nsplits, dim_algo='randunif', split_algo='mean',
# def learn_splits_simple(X, nsplits, dim_algo='greedy_var', split_algo='median',
def learn_splits_simple(X, nsplits, dim_algo='greedy_var', split_algo='mean',
**sink):
# unused_dims = set(np.arange(X.shape[1]))
unused_dims = list(np.arange(X.shape[1])) # random.choice can't use set
col_means = X.mean(axis=0)
col_vars = X.var(axis=0)
col_medians = np.median(X, axis=0)
# overall_mean = np.mean(col_means)
# overall_median = np.median(col_medians)
# overall_var = X.var()
var_idxs_descending = np.argsort(col_vars)[::-1]
splits = []
for s in range(nsplits):
if dim_algo == 'randunif':
dim = np.random.choice(unused_dims)
unused_dims.remove(dim)
elif dim_algo == 'greedy_var':
dim = var_idxs_descending[s]
if split_algo == 'mean':
val = col_means[dim]
elif split_algo == 'median':
val = col_medians[dim]
splits.append(Split(dim=dim, val=val))
return splits, -1
def learn_splits(X, nsplits, return_centroids=True, algo='multisplits',
**kwargs):
# indirect to particular func; will likely need to try something simpler
# for debugging and/or as experimental control
# return learn_splits_greedy(X, nsplits, **kwargs)
# return learn_splits_simple(X, nsplits, **kwargs)
# return learn_splits_conditional(X, nsplits, **kwargs)
# return learn_splits_greedy(X, nsplits) # TODO fwd kwargs
if algo == 'multisplits':
return learn_multisplits(
X, nsplits, return_centroids=return_centroids)
if algo == 'splits':
splits, loss = learn_splits_greedy(X, nsplits)
if return_centroids:
centroids = centroids_from_splits(X, splits)
return splits, loss, centroids
return splits, loss
def assignments_from_splits(X, splits):
nsplits = len(splits)
indicators = np.empty((nsplits, len(X)), dtype=np.int)
for i, split in enumerate(splits):
indicators[i] = X[:, split.dim] > split.val
# compute assignments by treating indicators in a row as a binary num
# scales = (2 ** np.arange(nsplits)).astype(np.int)
scales = (1 << np.arange(nsplits)).astype(np.int)
return (indicators.T * scales).sum(axis=1).astype(np.int)
def assignments_from_multisplits(X, splits):
N, _ = X.shape
nsplits = len(splits)
# indicators = np.zeros((nsplits, len(X)), dtype=np.int)
assert len(splits) >= 1
# dim0 = splits[0].dim
# assert len(splits[0].vals) == 1 # only 1 initial split
# indicators[0] = X > splits[0].vals[0]
max_ngroups = len(splits[-1].vals)
nsplits_affecting_group_id = int(np.log2(max_ngroups))
assert 1 << nsplits_affecting_group_id == max_ngroups # power of 2
# np.log2(max_nsplits)
# determine group ids for each point; this is the one that's annoying
# because the number of bits changes after split
group_ids = np.zeros(N, dtype=np.int)
for i in range(min(nsplits, nsplits_affecting_group_id)):
split = splits[i]
vals = split.vals[group_ids]
# x = X[:, split.dim]
# if split.offset is not None:
# x = x - split.offset
# if split.scaleby is not None:
# x = x * split.scaleby
# indicators = x > vals
indicators = split.preprocess_x(X[:, split.dim]) > vals
group_ids = (group_ids * 2) + indicators
if nsplits <= nsplits_affecting_group_id:
return group_ids
# compute remaining bits
assignments = np.copy(group_ids)
for i in range(nsplits_affecting_group_id, nsplits):
split = splits[i]
vals = split.vals[group_ids]
# x = X[:, split.dim]
# if split.offset is not None:
# x = x - split.offset
# if split.scaleby is not None:
# x = x * split.scaleby
# indicators = x > vals
indicators = split.preprocess_x(X[:, split.dim]) > vals
assignments = (assignments * 2) + indicators
return assignments
def _centroids_from_assignments(X, assignments, ncentroids):
centroids = np.empty((ncentroids, X.shape[1]), dtype=X.dtype)
for c in range(ncentroids):
centroids[c] = X[assignments == c].mean(axis=0)
return centroids
def centroids_from_splits(X, splits):
ncentroids = int(1 << len(splits))
assignments = assignments_from_splits(X, splits)
return _centroids_from_assignments(X, assignments, ncentroids=ncentroids)
@_memory.cache
def learn_splits_in_subspaces(X, subvect_len, nsplits_per_subs,
return_centroids=True, algo='multisplits',
verbose=2):
N, D = X.shape
# N /= 100 # TODO rm after debug
splits_lists = []
nsubs = int(np.ceil(D) / subvect_len)
# stuff for sse stats
tot_sse = 0
X_bar = X - np.mean(X, axis=0)
col_sses = np.sum(X_bar * X_bar, axis=0) + 1e-14
tot_sse_using_mean = np.sum(col_sses)
if verbose > 1:
print("original sum of sses within each col: ", tot_sse_using_mean)
if return_centroids:
ncentroids = int(2 ** nsplits_per_subs)
# this order seems weird, but matches _learn_centroids, etc; helps with
# eventual vectorized lookups
centroids = np.empty((ncentroids, nsubs, subvect_len), dtype=X.dtype)
for m in range(nsubs):
start_col = m * subvect_len
end_col = start_col + subvect_len
X_subs = X[:, start_col:end_col]
splits, sse, subs_centroids = learn_splits(
X_subs, nsplits=nsplits_per_subs, verbose=(verbose - 1),
return_centroids=True, algo=algo)
centroids[:, m, :] = subs_centroids
splits_lists.append(splits)
tot_sse += sse
if verbose > 1:
# print("col sses in subspace: ", col_sses[start_col:end_col])
# print("sum col sses in subspace: ", col_sses[start_col:end_col].sum())
# print("buckets claim sse:", sse)
# print("N: ", N)
# print("(sse / N)", (sse / N))
# print("np.var(X_subs)", np.var(X_subs))
orig_sse_in_subs = col_sses[start_col:end_col].sum()
# print("learning splits: mse / var(X) in subs {}/{} = {:3g}".format(
# m + 1, nsubs, (sse / N) / np.var(X_subs)))
print("learning splits: sse / orig sse in subs {}/{} = {:3g}".format(
m + 1, nsubs, sse / orig_sse_in_subs))
# import sys; sys.exit()
# print("exiting after one subspace")
# import sys; sys.exit()
if verbose > 0:
print("-- learn_splits_in_subspaces: new / orig mse: {:.3g}".format(
tot_sse / tot_sse_using_mean))
# print("tot_sse_using_mean: ", tot_sse_using_mean)
if return_centroids:
return splits_lists, centroids
return splits_lists
def encode_using_splits(X, subvect_len, splits_lists, split_type='single'):
N, D = X.shape
nsubs = int(np.ceil(D) / subvect_len)
X_enc = np.empty((X.shape[0], nsubs), dtype=np.int, order='f')
for m in range(nsubs):
start_col = m * subvect_len
end_col = start_col + subvect_len
X_subs = X[:, start_col:end_col]
if split_type == 'single':
X_enc[:, m] = assignments_from_splits(X_subs, splits_lists[m])
elif split_type == 'multi':
X_enc[:, m] = assignments_from_multisplits(X_subs, splits_lists[m])
return np.ascontiguousarray(X_enc)
def _plot_stuff_on_trace():
import matplotlib as mpl
import matplotlib.pyplot as plt
from joblib import Memory
_memory = Memory('.', verbose=0)
mpl.rcParams['lines.linewidth'] = .5
@_memory.cache
def _load_trace():
return np.loadtxt('assets/debug/Trace/Trace_TRAIN.txt')
# try_ndims = 128
# try_ndims = 64
try_ndims = 4
# limit_n = 20
# limit_n = 50
# limit_n = 200
limit_n = 500
# X = np.loadtxt('assets/debug/Trace/Trace_TRAIN.txt')[:limit_n]
X = _load_trace()[:limit_n]
y = (X[:, 0] - 1).astype(np.int)
X = X[:, 1:]
_, axes = plt.subplots(3, 4, figsize=(13, 9), sharey=True)
colors = ('blue', 'red', 'green', 'black')
axes[0, 0].set_title('Trace Dataset\n(colored by class)')
for lbl in np.unique(y):
X_subset = X[y == lbl]
axes[0, 0].plot(X_subset.T, color=colors[lbl])
# visualize output with only 1 codebook (no need for updates)
ncodebooks = 1
splits, centroids, buckets = learn_mithral(
X, ncodebooks, return_buckets=True, try_ndims=try_ndims, niters=1)
centroids = centroids[0] # only one codebook
axes[0, 1].set_title('centroids')
axes[0, 1].plot(centroids.T)
X_hat = np.zeros_like(X)
for c, splitlist in enumerate(splits):
for s, split in enumerate(splitlist):
assert len(splitlist) == 4
vals = (split.vals / split.scaleby) + split.offset
for val in vals:
axes[0, c].scatter(split.dim, val, color=colors[s], marker='o', zorder=5)
for b in buckets[0]: # only one codebook, so use first list
if b.N > 0:
X_hat[b.point_ids] = b.col_means()
X_res = X - X_hat
axes[0, 2].set_title('reconstructions')
axes[0, 2].plot(X_hat.T)
# axes[0, 3].set_title('residuals (mean={:.2f})'.format(X_res.mean()))
axes[0, 3].set_title('residuals (var={:.2f})'.format(X_res.var()))
axes[0, 3].plot(X_res.T)
# visualize output with only 2 codebooks, no updates
ncodebooks = 2
splits, centroids, buckets = learn_mithral(
X, ncodebooks, return_buckets=True, try_ndims=try_ndims, niters=1)
# centroids = centroids[0] # only one codebook
axes[1, 0].set_title('centroids[0]')
axes[1, 0].plot(centroids[0].T)
axes[1, 1].set_title('centroids[1]')
axes[1, 1].plot(centroids[1].T)
X_hat = np.zeros_like(X)
# print("splits: ", splits)
for c, splitlist in enumerate(splits):
for s, split in enumerate(splitlist):
assert len(splitlist) == 4
vals = (split.vals / split.scaleby) + split.offset
for val in vals:
axes[1, c].scatter(split.dim, val, color=colors[s])
for c in range(len(buckets)): # for each codebook
for b, buck in enumerate(buckets[c]):
if buck.N > 0:
X_hat[buck.point_ids] += centroids[c, b]
X_res = X - X_hat
axes[1, 2].set_title('reconstructions')
axes[1, 2].plot(X_hat.T)
# axes[1, 3].set_title('residuals (mean={:.2f})'.format(X_res.mean()))
axes[1, 3].set_title('residuals (var={:.2f})'.format(X_res.var()))
axes[1, 3].plot(X_res.T)
# visualize output with only 2 codebooks, with centroid updates
ncodebooks = 2
splits, centroids, buckets = learn_mithral(
X, ncodebooks, return_buckets=True, try_ndims=try_ndims, niters=1)
axes[2, 0].set_title('centroids[0]')
axes[2, 0].plot(centroids[0].T)
axes[2, 1].set_title('centroids[1]')
axes[2, 1].plot(centroids[1].T)
X_hat = np.zeros_like(X)
for c in range(len(buckets)): # for each codebook
for b, buck in enumerate(buckets[c]):
if buck.N > 0:
X_hat[buck.point_ids] += centroids[c, b]
X_res = X - X_hat
axes[2, 2].set_title('reconstructions')
axes[2, 2].plot(X_hat.T)
# axes[2, 3].set_title('residuals (mean={:.2f})'.format(X_res.mean()))
axes[2, 3].set_title('residuals (var={:.2f})'.format(X_res.var()))
axes[2, 3].plot(X_res.T)
plt.tight_layout()
plt.show()
def test_encoded_ops():
N, C, K = 100, 8, 16
X_enc = np.random.randint(K, size=(N, C))
# print(X_enc)
X_bin = _densify_X_enc(X_enc)
# print(X_enc_binary)
assert np.all(X_bin.sum(axis=1) == C)
XtX = _XtX_encoded(X_enc)
XtX2 = X_bin.T @ X_bin
assert np.all(XtX == XtX2)
M = 17
Y = np.random.randn(N, M).astype(np.float32)
XtY = _XtY_encoded(X_enc, Y)
XtY2 = X_bin.T @ Y
# print(XtY[:2])
# print(XtY2[:2])
assert np.all(XtY == XtY2)
D = C * K
W = np.random.randn(D, M).astype(np.float32)
XW = _XW_encoded(X_enc, W)
XW2 = X_bin @ W
assert np.all(XW == XW2)
def main():
test_encoded_ops()
# print(_pq_codebook_start_end_idxs(6, 3))
# print(_pq_codebook_start_end_idxs(8, 3))
# print(_pq_codebook_start_end_idxs(9, 3))
# print(_pq_codebook_start_end_idxs(10, 3))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# ================================================================ misc funcs
def dists_elemwise_sq(x, q):
diffs = x - q
return diffs * diffs
def dists_elemwise_l1(x, q):
return np.abs(x - q)
def dists_elemwise_dot(x, q):
return x * q
def extract_random_rows(X, how_many, remove_from_X=True):
split_start = np.random.randint(len(X) - how_many - 1)
split_end = split_start + how_many
rows = np.copy(X[split_start:split_end])
if remove_from_X:
return np.vstack((X[:split_start], X[split_end:])), rows
return X, rows
# XXX: not clear whether this function is correct in general, but
# does always pass the asserts (which capture the invariants we want)
def _insert_zeros(X, nzeros):
N, D = X.shape
D_new = D + nzeros
X_new = np.zeros((N, D_new), dtype=X.dtype)
# print("attempting to insert {} zeros into X of shape {}".format(nzeros, X.shape))
step = int(D / (nzeros + 1)) - 1
step = max(1, step)
# print("using step: ", step)
for i in range(nzeros):
in_start = step * i
in_end = in_start + step
# out_start = in_start + i + 1
out_start = (step + 1) * i
out_end = out_start + step
X_new[:, out_start:out_end] = X[:, in_start:in_end]
# out_start = out_end
# out_end += step
out_end += 1 # account for the last 0
remaining_len = D - in_end
out_remaining_len = D_new - out_end
# print "step", step
# print "in_start, in_end", in_start, in_end
# print "out_start, out_end", out_start, out_end
# print "D, D_new", D, D_new
# print "remaining_len, out_remaining_len", remaining_len, out_remaining_len
assert remaining_len == out_remaining_len
assert remaining_len >= 0
if remaining_len:
# X_new[:, out_end:out_end+remaining_len] = X[:, in_end:D]
X_new[:, out_end:] = X[:, in_end:]
# print("first cols of old and new X:")
# print(X[:, 0])
# print(X_new[:, 0])
# print(X_new.shape)
# print((X_new.sum(axis=0) != 0).sum())
assert X.shape[0] == X_new.shape[0]
cols_nonzero = X_new.sum(axis=0) != 0
orig_cols_nonzero = X.sum(axis=0) != 0
# new_cols_nonzero = cols_nonzero & (~orig_cols_nonzero)
# print("zero cols: ", np.where(~cols_nonzero)[0])
assert cols_nonzero.sum() == orig_cols_nonzero.sum()
nzeros_added = (~cols_nonzero).sum() - (~orig_cols_nonzero).sum()
assert nzeros_added == nzeros
# assert np.array_equal(X[:, 0], X_new[:, 0])
# assert np.array_equal(X[:, -1], X_new[:, -1])
return X_new
# def ensure_num_cols_multiple_of(X, multiple_of, min_ncols=-1):
def ensure_num_cols_multiple_of(X, multiple_of):
remainder = X.shape[1] % multiple_of
if remainder > 0:
return _insert_zeros(X, multiple_of - remainder)
# # TODO rm and uncomment above after debug
# add_ncols = multiple_of - remainder
# new_ncols = X.shape[1] + add_ncols
# new_X = np.zeros((X.shape[0], new_ncols), dtype=X.dtype)
# new_X[:, :X.shape[1]] = X
# return new_X
return X
def _learn_best_quantization(luts):
assert luts.ndim == 2 # luts can be a bunch of vstacked luts, but not 3D
best_loss = np.inf
best_alpha = None
best_floors = None
best_scale_by = None
for alpha in [.001, .002, .005, .01, .02, .05, .1]:
# alpha_pct = int(100 * alpha)
alpha_pct = 100 * alpha
# compute quantized luts this alpha would yield
floors = np.percentile(luts, alpha_pct, axis=0)
luts_offset = np.maximum(0, luts - floors)
ceil = np.percentile(luts_offset, 100 - alpha_pct)
scale_by = 255. / ceil
# if only_shift:
# scale_by = 1 << int(np.log2(scale_by))
luts_quantized = np.floor(luts_offset * scale_by).astype(np.int)
luts_quantized = np.minimum(255, luts_quantized)
# compute err
luts_ideal = (luts - luts_offset) * scale_by
diffs = luts_ideal - luts_quantized
loss = np.sum(diffs * diffs)
if loss <= best_loss:
best_loss = loss
best_alpha = alpha
best_floors = floors
best_scale_by = scale_by
return best_floors, best_scale_by, best_alpha
# ================================================================ Quantizers
# ------------------------------------------------ Abstract Base Class
class MultiCodebookEncoder(abc.ABC):
def __init__(self, ncodebooks, ncentroids=256,
quantize_lut=False, upcast_every=-1, accumulate_how='sum'):
self.ncodebooks = ncodebooks
self.ncentroids = ncentroids
self.quantize_lut = quantize_lut
self.upcast_every = upcast_every if upcast_every >= 1 else 1
self.upcast_every = min(self.ncodebooks, self.upcast_every)
assert self.upcast_every in (1, 2, 4, 8, 16, 32, 64, 128, 256)
self.accumulate_how = accumulate_how
self.code_bits = int(np.log2(self.ncentroids))
# for fast lookups via indexing into flattened array
self.offsets = (np.arange(self.ncodebooks, dtype=np.int) *
self.ncentroids)
def name(self):
return "{}_{}x{}b_quantize={}".format(
self.preproc, self.ncodebooks, self.code_bits,
int(self.quantize_lut))
def params(self):
return {'ncodebooks': self.ncodebooks,
'code_bits': self.code_bits, 'quantize': self.quantize_lut}
def _learn_lut_quantization(self, X, Q=None):
if self.quantize_lut: # TODO put this logic in separate function
print("learning quantization...")
# print("initial Q: ", Q)
if Q is None:
# num_rows = min(10 * 1000, len(X) // 2)
# _, queries = extract_random_rows(
# X[num_rows:], how_many=1000, remove_from_X=False)
# X = X[:num_rows] # limit to first 10k rows of X
_, Q = extract_random_rows(
X, how_many=1000, remove_from_X=False)
Q = Q.T # want each row to be one query, not each col
# Q = self._pad_ncols(Q)
# if self.preproc == 'OPQ':
# Q = pq.opq_rotate(Q, self.R)
# elif self.preproc == 'BOPQ':
# Q = pq.bopq_rotate(Q, self.rotations)
# elif self.preproc == 'GEHT':
# Q = Q[:, self.perm]
# print("Q shape: ", Q.shape)
# compute luts for all the queries
# luts = [self.encode_Q(q, quantize=False) for q in Q]
luts = self.encode_Q(Q, quantize=False)
# luts = np.vstack(luts)
# print("ncodebooks: ", self.ncodebooks)
# print("luts shape: ", luts.shape)
assert luts.shape == (len(Q), self.ncodebooks, self.ncentroids)
luts = np.moveaxis(luts, 2, 1)
assert luts.shape == (len(Q), self.ncentroids, self.ncodebooks)
luts = luts.reshape(len(Q) * self.ncentroids, self.ncodebooks)
self.lut_offsets, self.scale_by, _ = _learn_best_quantization(luts)
# print("self.lut_offsets.shape", self.lut_offsets.shape)
# print("self.scale_by.shape", self.scale_by.shape)
# print("self.scale_by", self.scale_by)
assert self.lut_offsets.shape == (self.ncodebooks,)
# self.lut_offsets = self.lut_offsets[:, np.newaxis]
self.total_lut_offset = np.sum(self.lut_offsets)
# print("lut offsets: ", self.lut_offsets)
def dists_enc(self, X_enc, Q_luts, unquantize=True,
offset=None, scale=None):
X_enc = np.ascontiguousarray(X_enc)
if unquantize:
offset = self.total_lut_offset if offset is None else offset
scale = self.scale_by if scale is None else scale
all_dists = np.empty((len(Q_luts), len(X_enc)), dtype=np.float32)
for i, lut in enumerate(Q_luts):
centroid_dists = lut.ravel()[X_enc.ravel()]
dists = centroid_dists.reshape(X_enc.shape)
if self.upcast_every < 2 or not self.quantize_lut:
dists = dists.sum(axis=-1)
else:
dists = dists.reshape(dists.shape[0], -1, self.upcast_every)
if self.accumulate_how == 'sum':
# sum upcast_every vals, then clip to mirror saturating
# unsigned addition, then sum without saturation (like u16)
dists = dists.sum(2)
dists = np.clip(dists, 0, 255).sum(axis=-1)
elif self.accumulate_how == 'mean':
# mirror hierarchical avg_epu8
# print("reducing using mean!")
# print("fraction of low bits that are 1: ",
# np.mean(dists % 2 == 1)) # ya, ~.5, or maybe ~.495
while dists.shape[-1] > 2:
dists = (dists[:, :, ::2] + dists[:, :, 1::2] + 1) // 2
dists = (dists[:, :, 0] + dists[:, :, 1] + 1) // 2
dists = dists.sum(axis=-1) # clipping not needed
# undo biasing; if low bits are {0,0} or {1,1}, no bias
# from the averaging; but if {0,1}, then rounds up by
# .5; happens with prob ~=~ .5, so each avg op adds .25;
# the other tricky thing here is that rounding up when
# you're averaging averages biases it even farther
# base_bias = .5 * .5
# assert self.upcast_every >= 2
# bias_per_upcast = 0
# nlevels = int(np.log2(self.upcast_every))
# for level in range(nlevels):
# num_avg_ops = self.upcast_every / (2 << level)
# print("num_avg_ops: ", num_avg_ops)
# bias_per_op = (1 << level) * base_bias
# print("level multiplier: ", 1 << level)
# bias_per_upcast += num_avg_ops * bias_per_op
# bias = bias_per_upcast * (self.ncodebooks / self.upcast_every)
# num_avg_ops = (self.upcast_every - 1) * (
# self.ncodebooks / self.upcast_every)
# num_avg_ops = (self.upcast_every - 1) * np.sqrt(
# self.ncodebooks / self.upcast_every)
# num_avg_ops = (self.upcast_every - 1)
# bias = num_avg_ops * base_bias
# bias = (self.ncodebooks / 2) * int(np.log2(self.upcast_every))
# bias = (self.ncodebooks / 2) * int(np.log2(self.upcast_every))
# bias = 0
# dists -= int(bias * self.upcast_every)
dists *= self.upcast_every # convert mean to sum
# I honestly don't know why this is the formula, but wow
# does it work well
bias = self.ncodebooks / 4 * np.log2(self.upcast_every)
dists -= int(bias)
else:
raise ValueError("accumulate_how must be 'sum' or 'mean'")
if self.quantize_lut and unquantize:
# dists = (dists / self.scale_by) + self.total_lut_offset
dists = (dists / scale) + offset
all_dists[i] = dists
return all_dists.T
# ------------------------------------------------ Product Quantization
def _learn_centroids(X, ncentroids, ncodebooks, subvect_len):
ret = np.empty((ncentroids, ncodebooks, subvect_len))
# print("_learn_centroids(): running kmeans...")
tot_sse = 0
X_bar = X - np.mean(X, axis=0)
col_sses = np.sum(X_bar * X_bar, axis=0) + 1e-14
tot_sse_using_mean = np.sum(col_sses)
for i in range(ncodebooks):
print("running kmeans in subspace {}/{}...".format(
i + 1, ncodebooks), end=" ")
start_col = i * subvect_len
end_col = start_col + subvect_len
X_in = X[:, start_col:end_col]
# centroids, labels = kmeans(X_in, ncentroids)
centroids, labels, sse = kmeans(X_in, ncentroids, return_sse=True)
# X_bar = X_in - np.mean(X_in, axis=0)
# sse_using_mean = np.sum(X_bar * X_bar) + 1e-14
subspace_sse = np.sum(col_sses[start_col:end_col])
print("mse / {{var(X_subs), var(X)}}: {:.3g}, {:.3g}".format(
sse / subspace_sse, sse * ncodebooks / tot_sse_using_mean))
tot_sse += sse
# print("centroids shape: ", centroids.shape)
# print("ret shape: ", ret.shape)
ret[:, i, :] = centroids
print("--- total mse / var(X): {:.3g}".format(tot_sse / tot_sse_using_mean))
return ret
def _parse_codebook_params(D, code_bits=-1, bits_per_subvect=-1, ncodebooks=-1):
if ncodebooks < 0:
ncodebooks = code_bits // bits_per_subvect
elif code_bits < 1:
code_bits = bits_per_subvect * ncodebooks
elif bits_per_subvect < 1:
bits_per_subvect = code_bits // ncodebooks
ncentroids = int(2 ** bits_per_subvect)
subvect_len = D // ncodebooks
assert code_bits % bits_per_subvect == 0
if D % subvect_len:
print("D, ncodebooks, subvect_len = ", D, ncodebooks, subvect_len)
assert D % subvect_len == 0 # TODO rm this constraint
return ncodebooks, ncentroids, subvect_len
def _fit_pq_lut(q, centroids, elemwise_dist_func):
_, ncodebooks, subvect_len = centroids.shape
q = q.reshape((1, ncodebooks, subvect_len))
q_dists = np.sum(centroids * q, axis=-1)
return q_dists # ncentroids, ncodebooks, row-major
class PQEncoder(MultiCodebookEncoder):
def __init__(self, ncodebooks, ncentroids=256,
elemwise_dist_func=dists_elemwise_dot,
preproc='PQ', encode_algo=None, quantize_lut=False,
upcast_every=-1, accumulate_how='sum',
**preproc_kwargs):
super().__init__(
ncodebooks=ncodebooks, ncentroids=ncentroids,
quantize_lut=quantize_lut, upcast_every=upcast_every,
accumulate_how=accumulate_how)
self.elemwise_dist_func = elemwise_dist_func
self.preproc = preproc
self.encode_algo = encode_algo
self.preproc_kwargs = preproc_kwargs
def _pad_ncols(self, X):
return ensure_num_cols_multiple_of(X, self.ncodebooks)
def fit(self, X, Q=None):
self.subvect_len = int(np.ceil(X.shape[1] / self.ncodebooks))
X = self._pad_ncols(X)
self.centroids = None
if self.preproc == 'BOPQ':
self.centroids, _, self.rotations = pq.learn_bopq(
X, ncodebooks=self.ncodebooks, codebook_bits=self.code_bits,
**self.preproc_kwargs)
elif self.preproc == 'OPQ':
self.centroids, _, self.R = pq.learn_opq(
X, ncodebooks=self.ncodebooks, codebook_bits=self.code_bits,
**self.preproc_kwargs)
elif self.preproc == 'GEHT':
self.perm = subs.greedy_eigenvector_threshold(
X, subspace_len=self.subvect_len, **self.preproc_kwargs)
assert X.shape[1] == len(set(self.perm))
X = X[:, self.perm]
if self.centroids is None:
if self.encode_algo in ('splits', 'multisplits'):
assert self.encode_algo != 'splits' # TODO rm
self.splits_lists, self.centroids = \
clusterize.learn_splits_in_subspaces(
X, subvect_len=self.subvect_len,
nsplits_per_subs=self.code_bits, algo=self.encode_algo)
# print("centroids shape: ", self.centroids.shape)
# # TODO rm
# # yep, yields identical errs as mithral with pq_perm_algo='end'
# self.splits_lists, self.centroids = clusterize.learn_mithral(
# X, ncodebooks=self.ncodebooks)
# print("centroids shape: ", self.centroids.shape)
else:
self.centroids = _learn_centroids(
X, self.ncentroids, self.ncodebooks, self.subvect_len)
self._learn_lut_quantization(X, Q)
def name(self):
return "{}_{}".format(self.preproc, super().name())
def params(self):
d = super().params()
d['_preproc'] = self.preproc
return d
def encode_Q(self, Q, quantize=True):
# quantize param enables quantization if set in init; separate since
# quantization learning needs to call this func, but vars like
# lut_offsets aren't set when this function calls it
Q = np.atleast_2d(Q)
Q = self._pad_ncols(Q)
if self.preproc == 'OPQ':
Q = pq.opq_rotate(Q, self.R)
elif self.preproc == 'BOPQ':
Q = pq.bopq_rotate(Q, self.rotations)
elif self.preproc == 'GEHT':
Q = Q[:, self.perm]
luts = np.zeros((Q.shape[0], self.ncodebooks, self.ncentroids))
# print("Q shape: ", Q.shape)
for i, q in enumerate(Q):
lut = _fit_pq_lut(q, centroids=self.centroids,
elemwise_dist_func=self.elemwise_dist_func)
if self.quantize_lut and quantize:
lut = np.maximum(0, lut - self.lut_offsets)
lut = np.floor(lut * self.scale_by).astype(np.int)
lut = np.minimum(lut, 255)
luts[i] = lut.T
return luts
def encode_X(self, X, **sink):
X = self._pad_ncols(X)
if self.preproc == 'OPQ':
X = pq.opq_rotate(X, self.R)
elif self.preproc == 'BOPQ':
X = pq.bopq_rotate(X, self.rotations)
elif self.preproc == 'GEHT':
X = X[:, self.perm]
if self.encode_algo in ('splits', 'multisplits'):
split_type = ('multi' if self.encode_algo == 'multisplits'
else 'single')
idxs = clusterize.encode_using_splits(
X, self.subvect_len, self.splits_lists, split_type=split_type)
else:
idxs = pq._encode_X_pq(X, codebooks=self.centroids)
return idxs + self.offsets # offsets let us index into raveled dists
# ------------------------------------------------ Mithral
# def _mithral_quantize_luts(luts, lut_work_const, force_power_of_2=False):
def _mithral_quantize_luts(luts, lut_work_const, force_power_of_2=True):
nqueries, ncodebooks, ncentroids = luts.shape
# if lut_work_const < 0: # not time constrained
# assert luts.shape == (nqueries, ncodebooks, ncentroids)
# luts2d = np.moveaxis(luts, 2, 1)
# assert luts2d.shape == (nqueries, ncentroids, ncodebooks)
# luts2d = luts2d.reshape(nqueries * ncentroids, ncodebooks)
# # if True:
# if False:
# # ax = sb.distplot(luts.ravel(), hist=False, rug=True)
# _, ax = plt.subplots(1, figsize=(13, 5))
# # sb.violinplot(data=luts2d, inner='point', ax=ax)
# # sb.boxenplot(data=luts2d, ax=ax)
# means = luts2d.mean(axis=0)
# # # rm largest and smallest entry in each col
# # argmaxs = np.argmax(luts2d, axis=0)
# # argmins = np.argmax(luts2d, axis=0)
# # for c in range(luts.shape[1]):
# # luts2d[argmins[c], c] = means[c]
# # luts2d[argmaxs[c], c] = means[c]
# maxs = luts2d.max(axis=0)
# mins = luts2d.min(axis=0)
# gaps = maxs - mins
# max_idx = np.argmax(gaps)
# print(f"biggest gap = {np.max(gaps)} at idx {max_idx}")
# gaps[max_idx] = 0
# max_idx = np.argmax(gaps)
# print(f"2nd biggest gap = {np.max(gaps)} at idx {max_idx}")
# gaps[max_idx] = 0
# max_idx = np.argmax(gaps)
# print(f"3rd biggest gap = {np.max(gaps)} at idx {max_idx}")
# gaps[max_idx] = 0
# max_idx = np.argmax(gaps)
# print(f"4th biggest gap = {np.max(gaps)} at idx {max_idx}")
# gaps[max_idx] = 0
# max_idx = np.argmax(gaps)
# print(f"5th biggest gap = {np.max(gaps)} at idx {max_idx}")
# # for i in range(len(luts2d)):
# # row = luts2d[i]
# # luts2d[i, row == mins] = means
# # luts2d[i, row == maxs] = means
# luts2d -= mins
# # luts2d -= means
# # luts2d *= 255 / (maxs - mins).max()
# luts2d *= 255 / gaps.max()
# luts2d = np.minimum(luts2d, 255)
# sb.stripplot(data=luts2d, ax=ax, size=4)
# ax.set_xlabel('Query dist to centroids (lut dist histogram)')
# ax.set_ylabel('Fraction of queries')
# plt.show()
# import sys; sys.exit()
# offsets, scale, _ = _learn_best_quantization(luts2d)
# offsets = offsets[np.newaxis, :, np.newaxis]
# luts = np.maximum(0, luts - offsets) * scale
# luts = np.floor(luts).astype(np.int)
# luts = np.minimum(255, luts)
# return luts, offsets.sum(), scale
# luts = np.zeros((Q.shape[0], self.ncodebooks, self.ncentroids))
mins = luts.min(axis=(0, 2))
maxs = luts.max(axis=(0, 2))
gaps = maxs - mins
# gaps[np.argmax(gaps)] = 0 # use 2nd highest
gap = np.max(gaps)
if force_power_of_2:
exponent = np.ceil(np.log2(gap))
scale = 2 ** int(-exponent) # scale is a power of 2, so can just shift
scale *= (255.5 - 1e-10) # so max val is at most 255
else:
scale = (255.5 - 1e-10) / gap
offsets = mins[np.newaxis, :, np.newaxis]
luts_quantized = (luts - offsets) * scale
luts_quantized = (luts_quantized + .5).astype(np.int)
# luts_quantized = np.minimum(luts_quantized, 255)
assert np.min(luts_quantized) >= 0
assert np.max(luts_quantized) <= 255.
# print("total offset: ", mins.sum())
return luts_quantized, offsets.sum(), scale
# # compute offset taking into account stuff getting rounded down
# luts_hat = (luts / scale) + offsets
# diffs = luts - luts_hat
# print("mean of diffs: ", diffs.mean())
# offset = diffs.mean() + offsets.sum()
# return luts_quantized, offset, scale
class MithralEncoder(MultiCodebookEncoder):
def __init__(self, ncodebooks, lut_work_const=-1):
super().__init__(
ncodebooks=ncodebooks, ncentroids=16,
# quantize_lut=True, upcast_every=64,
# quantize_lut=True, upcast_every=32,
quantize_lut=True, upcast_every=16,
# quantize_lut=True, upcast_every=8,
# quantize_lut=True, upcast_every=4,
# quantize_lut=True, upcast_every=2,
# quantize_lut=True, upcast_every=1,
accumulate_how='mean')
self.lut_work_const = lut_work_const
def name(self):
return "{}_{}".format('mithral', super().name())
def params(self):
return {'ncodebooks': self.ncodebooks,
'lut_work_const': self.lut_work_const}
def fit(self, X, Q=None):
self.splits_lists, self.centroids = clusterize.learn_mithral(
X, self.ncodebooks, lut_work_const=self.lut_work_const)
# self._learn_lut_quantization(X, Q)
def encode_X(self, X):
idxs = clusterize.mithral_encode(X, self.splits_lists)
return idxs + self.offsets
def encode_Q(self, Q, quantize=True):
Q = np.atleast_2d(Q)
luts = np.zeros((Q.shape[0], self.ncodebooks, self.ncentroids))
for i, q in enumerate(Q):
luts[i] = clusterize.mithral_lut(q, self.centroids)
if self.quantize_lut:
luts, offset, scale = _mithral_quantize_luts(luts, self.lut_work_const)
return luts, offset, scale
return luts, 0, 1
def main():
X = np.ones((3, 75), dtype=np.int)
_insert_zeros(X, 53)
if __name__ == '__main__':
main()
|
#!/bin/env/python
# from . import files
sb.set_context('poster')
# sb.set_context('talk')
# sb.set_cmap('tab10')
RESULTS_DIR = pl.Path('results/amm')
FIGS_SAVE_DIR = pl.Path('../figs/amm')
if not os.path.exists(FIGS_SAVE_DIR):
FIGS_SAVE_DIR.mkdir(parents=True)
def save_fig(name):
plt.savefig(os.path.join(FIGS_SAVE_DIR, name + '.png'),
dpi=300, bbox_inches='tight')
def _xlabel_for_xmetric(x_metric):
return {'d': 'Sketch Size',
'secs': 'Time (s)',
'muls': 'Number of Multiplies',
'nlookups': 'Number of Lookups',
'ops': 'Number of Operations',
'Latency': 'Latency (ms)',
'Throughput': 'Throughput (elements/s)'}[x_metric]
# if x_metric == 'd':
# return 'Log2(Sketch Size)'
# elif x_metric == 'secs':
# return 'Time (s)'
# elif x_metric == 'muls':
# # return 'Log10(# of Multiplies)'
# return 'Number of Multiplies'
# elif x_metric == 'nlookups':
# # return 'Log10(# of Table Lookups)'
# return 'Number of Table Lookups'
# elif x_metric == 'ops':
# # return 'Log10(# of Operations)'
# return 'Number of Operations'
# elif x_metric == 'Latency':
# return 'Latency (ms)'
def _clean_results_df(df, default_D=None):
# for Exact, set d = D
if default_D is not None and ('d' in df):
mask = df['d'].isna()
df.loc[mask, 'd'] = default_D
# clean up column names + other strings
for old, new in [('method', 'Method'), ('acc_amm', 'Accuracy'),
('r_sq', 'R-Squared'), ('nmultiplies', 'muls')]:
try:
df.rename({old: new}, axis=1, inplace=True)
except KeyError:
pass
# replace_dict = {'Bolt+MultiSplits': 'Ours',
# replace_dict = {'Mithral': 'Ours',
replace_dict = {'Mithral': 'Ours',
'MithralPQ': 'OursPQ',
'Exact': 'Brute Force',
'CooccurSketch': 'CD'}
# def _replace_method_name(name):
# return replace_dict.get(name, name)
df['Method'] = df['Method'].apply(lambda s: replace_dict.get(s, s))
# create ops column that sums number of multiplies + lookups
df['muls'] = df['muls'].fillna(0)
mask = ~df['nlookups'].isna()
df['ops'] = df['muls']
df['ops'].loc[mask] += df['nlookups'].loc[mask]
# df['muls'] = np.log10(df['muls'])
# df['ops'] = np.log10(df['ops'])
# join with cpp timing results
matmul_latencies, matmul_thruputs = res.load_matmul_times_for_n_d_m()
sketch_latencies, sketch_thruputs = res.load_sketch_times_for_n_d_m()
# multisplit_latencies, multisplit_thruputs = \
# res.load_multisplit_times_for_n_d_m()
mithral_latencies, mithral_thruputs = res.load_mithral_times_for_n_d_m()
bolt_latencies, bolt_thruputs = res.load_bolt_times_for_n_d_m()
# row_dicts = []
all_latencies = []
all_thruputs = []
# for _, row in df.itertuples():
# print("d col: ")
# print(df['d'])
fast_sketch_methods = set([m.lower() for m in ameth.FAST_SKETCH_METHODS])
slow_sketch_methods = set([m.lower() for m in ameth.SLOW_SKETCH_METHODS])
for _, row in df.iterrows():
# row = dict(*row)
N, D, M = [int(row[k]) for k in ('N', 'D', 'M')]
method = row['Method'].lower()
# if 'split' in method.lower():
# print("using method: ", method)
if method in ('bolt', 'ours', 'ourspq'):
# TODO check if in vq methods, instead of hardcoding
ncodebooks = int(row['ncodebooks'])
key = (N, D, M, ncodebooks)
if method in ('ours', 'ourspq'):
# latencies = multisplit_latencies[key]
# thruputs = multisplit_thruputs[key]
latencies = mithral_latencies[key]
thruputs = mithral_thruputs[key]
elif method == 'bolt':
latencies = bolt_latencies[key]
thruputs = bolt_thruputs[key]
# all_latencies.append(np.median(latencies))
# all_thruputs.append(np.median(thruputs))
elif method == 'brute force':
key = (N, D, M)
latencies = matmul_latencies[key]
thruputs = matmul_thruputs[key]
elif method in fast_sketch_methods:
d = int(row['d'])
key = (N, D, M, d)
latencies = sketch_latencies[key]
thruputs = sketch_thruputs[key]
else: # slow sketch-based methods
# print("method: ", method)
# assert method in slow_sketch_methods
# print("method: ", method)
# print("fast sketch methods: ", fast_sketch_methods)
# assert False # TODO rm
secs = row['secs']
lat = secs * 1000
thruput = N * M / secs
latencies = [lat]
thruputs = [thruput]
# print("d: ", d)
# print("key:", key)
# print("sketch_latencies:")
# import pprint
# pprint.pprint(sketch_latencies)
# secs = row['secs']
# lat = secs * 1000
# thruput = N * M / secs
# # # version where we pretend same efficiency as matmul
# # nmuls = int(row['muls'])
# # exact_nmuls = N * D * M
# # scale = nmuls / exact_nmuls
# # lat *= scale
# # thruput /= scale
# all_latencies.append(lat)
# all_thruputs.append(thruput)
all_latencies.append(np.mean(latencies))
all_thruputs.append(np.mean(thruputs))
# print("len latencies: ", len(all_latencies))
# print("len thruputs: ", len(all_thruputs))
# print("df len: ", df.shape[0])
df['Latency'] = all_latencies
df['Throughput'] = all_thruputs
print("cleaned df:\n", df)
# print(df)
# print(df.loc[:11])
# print(df.loc[10:])
# for row in df.iterrows():
# print(row)
# import sys; sys.exit()
# make stuff log scale
# if 'd' in df:
# df['d'] = np.log2(df['d']).astype(np.int32)
df['Log10(MSE)'] = np.log10(1. - df['R-Squared'] + 1e-10)
df = df.sort_values('Method', axis=0)
return df
def make_cifar_fig(x_metric='d', y_metric='Accuracy'):
# fig, axes = plt.subplots(2, 1, figsize=(6, 9), sharex=True)
fig, axes = plt.subplots(2, 1, figsize=(11, 13.5), sharex=True)
df10 = pd.read_csv(RESULTS_DIR / 'cifar10.csv')
df100 = pd.read_csv(RESULTS_DIR / 'cifar100.csv')
# dfs = (df10, df100)
# for df in dfs:
df10 = df10.loc[~(df10['ncodebooks'] < 4)]
df100 = df100.loc[~(df100['ncodebooks'] < 4)]
# if x_metric in ('Latency', 'Throughput'):
# # TODO get results for PQ + Bolt
# # df10 = df10.loc[~df10['method'].isin(['PQ', 'Bolt'])]
# # include_methods = ('Bolt+MultiSplits', 'Bolt', 'Exact')
# include_methods = ['Bolt+MultiSplits', 'Bolt', 'Exact']
# include_methods += 'PQ SVD FD-AMM CooccurSketch'.split() # TODO rm
# # print("uniq methods: ", df10['method'].unique())
# # df10 = df10.loc[~df10['method'].isin(['PQ'])]
# df10 = df10.loc[df10['method'].isin(include_methods)]
# # df100 = df100.loc[~df100['method'].isin(['PQ', 'Bolt'])]
# # df100 = df100.loc[~df100['method'].isin(['PQ'])]
# df100 = df100.loc[df100['method'].isin(include_methods)]
df10 = _clean_results_df(df10, default_D=512)
df100 = _clean_results_df(df100, default_D=512)
def lineplot(data, ax):
# order = 'Ours Bolt Exact PQ SVD FD-AMM CD'.split()
# order = [m for m in order if m in data['Method'].unique()]
order = list(data['Method'].unique())
move_methods_to_front = ['Ours', 'OursPQ', 'Brute Force']
for elem in move_methods_to_front[:]:
if elem in order:
order.remove(elem)
else:
move_methods_to_front.remove(elem)
order = move_methods_to_front + order
# order = None
# print("uniq methods:\n", data['Method'].unique())
# print("using order:\n", order)
# cmap = plt.get_cmap('tab10')
# palette = {'Ours': 'red', 'Bolt': cmap(0), 'Exact': cmap(1),
# 'PQ': cmap(2), 'SVD': cmap(4), 'FD-AMM': cmap(5),
# 'CD': cmap(6)}
palette = None
# have to specify markers or seaborn freaks out because it doesn't
# have enough of them
filled_markers = ('o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h',
'H', 'D', 'd', 'P', 'X')
sb.lineplot(data=data, x=x_metric, y=y_metric, hue='Method',
style='Method', style_order=order, hue_order=order,
# markers=True, dashes=False, ax=ax, palette=palette)
markers=filled_markers, dashes=False, ax=ax, palette=palette)
# palette='tab10')
lineplot(df10, axes[0])
lineplot(df100, axes[1])
# plt.suptitle('Sketch size vs Classification Accuracy')
xlbl = _xlabel_for_xmetric(x_metric)
# plt.suptitle('{} vs {}'.format(xlbl, y_metric))
plt.suptitle('Approximating Softmax Layers')
axes[0].set_title('CIFAR-10')
for ax in axes:
ax.set_ylabel(y_metric)
axes[0].set_xlabel(None)
axes[1].set_xlabel(xlbl)
axes[1].set_title('CIFAR-100')
handles, labels = axes[0].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
axes[0].legend(handles, labels, fontsize='small')
# axes[1].legend(handles, labels, fontsize='small')
# plt.figlegend(handles, labels, loc='lower center', ncol=1)
# plt.figlegend(handles, labels, loc='center right', ncol=1)
axes[1].get_legend().remove()
# axes[1].get_legend().remove()
if x_metric in ('muls', 'ops', 'nlookups', 'Latency', 'Throughput'):
axes[0].semilogx()
plt.tight_layout()
# plt.subplots_adjust(top=.92, bottom=.2)
plt.subplots_adjust(top=.92, bottom=.22)
save_fig('cifar_{}_{}'.format(x_metric, y_metric))
# def make_ecg_fig(y_metric='R-Squared'):
def make_ecg_fig(x_metric='d'):
fig, axes = plt.subplots(2, 1, figsize=(6, 9))
df = pd.read_csv(RESULTS_DIR / 'ecg.csv')
df = _clean_results_df(df, default_D=24)
# D = 24
# if 'd' in df:
# mask = df['d'].isna()
# df.loc[mask, 'd'] = D
# df['d'] = np.log2(df['d'])
# df.rename({'method': 'Method', 'acc_amm': 'Accuracy',
# 'r_sq': 'R-Squared', 'nmultiplies': 'muls'},
# axis=1, inplace=True)
# df['Log10(MSE)'] = np.log10(1. - df['R-Squared'] + 1e-10) # avoid log10(0)
# df['muls'] = df['muls'].fillna(0)
# df['nlookups'] = df['nlookups'].fillna(0)
# # mask = ~df['nlookups'].isna()
# # print("mask: ", mask)
# # print('muls, nlookups')
# # print(df[['muls', 'nlookups']])
# # add_to_muls = df['nlookups'].loc[mask]
# equivalent_muls = df['muls'].add(df['nlookups'])
# # df['muls'] = equivalent_muls
# df['muls'] = equivalent_muls
# # import sys; sys.exit()
# df['muls'] = np.log10(df['muls'])
df['Compression Ratio'] = df['nbytes_orig'] / df['nbytes_blosc_byteshuf']
def lineplot(data, ycol, ax):
sb.lineplot(data=data, hue='Method', x=x_metric, y=ycol,
style='Method', markers=True, dashes=False, ax=ax)
lineplot(df, ycol='R-Squared', ax=axes[0])
lineplot(df, ycol='Compression Ratio', ax=axes[1])
xlbl = _xlabel_for_xmetric(x_metric)
axes[0].set_title('ECG: {} vs R-Squared'.format(xlbl))
axes[1].set_title('ECG: {} vs Compression Ratio'.format(xlbl))
axes[0].set_ylim([0, 1])
axes[0].set_ylabel('R-Squared')
axes[1].set_ylabel('Compression Ratio')
axes[1].set_xlabel(xlbl)
if x_metric in ('muls', 'ops', 'nlookups'):
axes[0].semilogx()
# axes[0].semilogx()
plt.tight_layout()
plt.subplots_adjust(top=.92, bottom=.2)
save_fig('ecg_{}'.format(x_metric))
def make_caltech_fig(x_metric='d'):
"""x_metric should be in {'d', 'secs', 'muls'}"""
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
df = pd.read_csv(RESULTS_DIR / 'caltech.csv')
df = _clean_results_df(df, default_D=27)
sb.lineplot(data=df, hue='Method', x=x_metric, y='Log10(MSE)',
style='Method', markers=True, dashes=False, ax=ax)
ax.set_ylabel('Log10(MSE + 1e-10)')
if x_metric == 'd':
ax.set_title('Caltech: Sketch Size vs Log Squared Error')
ax.set_xlabel('Log2(Sketch Size)')
elif x_metric == 'secs':
ax.set_title('Caltech: Computation Time vs Log Squared Error')
ax.set_xlabel('Time (s)')
elif x_metric == 'muls':
ax.set_title('Caltech: # of Multiplies vs Log Squared Error')
ax.set_xlabel('Log10(# of Multiplies)')
plt.tight_layout()
plt.subplots_adjust(top=.92, bottom=.2)
save_fig('caltech_{}'.format(x_metric))
def main():
# for x_metric in 'd secs muls'.split():
# for x_metric in ['muls']:
# for y_metric in ('Accuracy', 'R-Squared'):
# make_cifar_fig(x_metric, y_metric)
# make_cifar_fig('d', 'Accuracy')
# make_cifar_fig('muls', 'Accuracy')
make_cifar_fig('ops', 'Accuracy')
make_cifar_fig('Latency', 'Accuracy')
make_cifar_fig('Throughput', 'Accuracy')
# make_cifar_fig('Accuracy')
# make_cifar_fig('Accuracy')
# make_cifar_fig('R-Squared')
# make_ecg_fig(x_metric='d')
# make_ecg_fig(x_metric='secs')
# make_ecg_fig(x_metric='muls')
# make_caltech_fig(x_metric='d')
# make_caltech_fig(x_metric='secs')
# make_caltech_fig(x_metric='muls')
print("done")
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
_memory = Memory('.', verbose=0)
# ================================================================ PQ
@_memory.cache
def learn_pq(X, ncentroids, nsubvects, subvect_len, max_kmeans_iters=16):
codebooks = np.empty((ncentroids, nsubvects, subvect_len))
assignments = np.empty((X.shape[0], nsubvects), dtype=np.int)
# print "codebooks shape: ", codebooks.shape
for i in range(nsubvects):
start_col = i * subvect_len
end_col = start_col + subvect_len
X_in = X[:, start_col:end_col]
centroids, labels = kmeans(X_in, ncentroids, max_iter=max_kmeans_iters)
codebooks[:, i, :] = centroids
assignments[:, i] = labels
return codebooks, assignments # [2**nbits x M x D/M], [N x M]
def reconstruct_X_pq(assignments, codebooks):
"""assignments: N x M ints; codebooks: 2**nbits x M x D/M floats"""
_, M = assignments.shape
subvect_len = codebooks.shape[2]
assert assignments.shape[1] == codebooks.shape[1]
D = M * subvect_len
pointsCount = assignments.shape[0]
points = np.zeros((pointsCount, D), dtype=np.float32)
for i in range(M):
subspace_start = subvect_len * i
subspace_end = subspace_start + subvect_len
subspace_codes = assignments[:, i]
points[:, subspace_start:subspace_end] = codebooks[subspace_codes, i, :]
return points
def _dists_elemwise_sq(x, q):
diffs = x - q
return diffs * diffs
def _dists_elemwise_l1(x, q):
return np.abs(x - q)
def _encode_X_pq(X, codebooks, elemwise_dist_func=_dists_elemwise_sq):
ncentroids, nsubvects, subvect_len = codebooks.shape
assert X.shape[1] == (nsubvects * subvect_len)
idxs = np.empty((X.shape[0], nsubvects), dtype=np.int)
X = X.reshape((X.shape[0], nsubvects, subvect_len))
for i, row in enumerate(X):
row = row.reshape((1, nsubvects, subvect_len))
dists = elemwise_dist_func(codebooks, row)
dists = np.sum(dists, axis=2)
idxs[i, :] = np.argmin(dists, axis=0)
return idxs # [N x nsubvects]
def compute_reconstruction_error(X, X_hat, subvect_len=-1):
diffs = X - X_hat
diffs_sq = diffs * diffs
if subvect_len > 0:
errs = []
for i in range(0, diffs_sq.shape[1], subvect_len):
errs_block = diffs_sq[:, i:i+subvect_len]
errs.append(np.mean(errs_block))
print(" errors in each block: {} ({})".format(
np.array(errs), np.sum(errs)))
X_bar = X - np.mean(X, axis=0)
col_sses = np.sum(X_bar * X_bar, axis=0) + 1e-14
tot_sse_using_mean = np.sum(col_sses)
errors = np.mean(diffs_sq, axis=1)
# variances = np.var(X, axis=1)
# return np.mean(errors) / np.mean(variances)
return np.mean(errors) / (tot_sse_using_mean / X_bar.size)
# ================================================================ Gaussian OPQ
# https://github.com/yahoo/lopq/blob/master/python/lopq/model.py; see
# https://github.com/yahoo/lopq/blob/master/LICENSE. For this function only:
#
# Copyright 2015, Yahoo Inc.
# Licensed under the terms of the Apache License, Version 2.0.
# See the LICENSE file associated with the project for terms.
#
@_memory.cache
def eigenvalue_allocation(num_buckets, eigenvalues, shuffle=False):
"""
Compute a permutation of eigenvalues to balance variance accross buckets
of dimensions.
Described in section 3.2.4 in http://research.microsoft.com/pubs/187499/cvpr13opq.pdf
Note, the following slides indicate this function will break when fed eigenvalues < 1
without the scaling trick implemented below:
https://www.robots.ox.ac.uk/~vgg/rg/slides/ge__cvpr2013__optimizedpq.pdf
:param int num_buckets:
the number of dimension buckets over which to allocate eigenvalues
:param ndarray eigenvalues:
a vector of eigenvalues
:param bool shuffle:
whether to randomly shuffle the order of resulting buckets
:returns ndarray:
a vector of indices by which to permute the eigenvectors
"""
D = len(eigenvalues)
dims_per_bucket = D // num_buckets
eigenvalue_product = np.zeros(num_buckets, dtype=float)
bucket_size = np.zeros(num_buckets, dtype=int)
permutation = np.zeros((num_buckets, dims_per_bucket), dtype=int)
# We first must scale the eigenvalues by dividing by their
# smallets non-zero value to avoid problems with the algorithm
# when eigenvalues are less than 1.
min_non_zero_eigenvalue = np.min(np.abs(eigenvalues[np.nonzero(eigenvalues)]))
eigenvalues = eigenvalues / min_non_zero_eigenvalue
# this is not actually a requirement, but I'm curious about whether this
# condition is ever violated
if not np.all(eigenvalues > 0):
print("WARNING: some eigenvalues were nonpositive")
# Iterate eigenvalues in descending order
sorted_inds = np.argsort(eigenvalues)[::-1]
log_eigs = np.log2(abs(eigenvalues))
for ind in sorted_inds:
# Find eligible (not full) buckets
eligible = (bucket_size < dims_per_bucket).nonzero()
# Find eligible bucket with least eigenvalue product
i = eigenvalue_product[eligible].argmin(0)
bucket = eligible[0][i]
# Update eigenvalue product for this bucket
eigenvalue_product[bucket] = eigenvalue_product[bucket] + log_eigs[ind]
# Store bucket assignment and update size
permutation[bucket, bucket_size[bucket]] = ind
bucket_size[bucket] += 1
if shuffle:
shuffle_idxs = np.arange(num_buckets, dtype=np.int)
np.random.shuffle(shuffle_idxs)
permutation = permutation[shuffle_idxs]
# wow, these are within <1% of each other
# print "opq eigenvalue log prods: ", eigenvalue_product
return np.reshape(permutation, D)
def learn_opq_gaussian_rotation(X_train, ncodebooks, shuffle=False):
means = np.mean(X_train, axis=0)
cov = np.dot(X_train.T, X_train) - np.outer(means, means)
eigenvals, eigenvects = np.linalg.eigh(cov)
order_idxs = eigenvalue_allocation(ncodebooks, eigenvals, shuffle=shuffle)
assert len(order_idxs) == X_train.shape[1]
return eigenvects[:, order_idxs].T # rows are projections
# ================================================================ OPQ
def _update_centroids_opq(X, assignments, ncentroids): # [N x D], [N x M]
nsubvects = assignments.shape[1]
subvect_len = X.shape[1] // nsubvects
assert X.shape[0] == assignments.shape[0]
assert X.shape[1] % nsubvects == 0
codebooks = np.zeros((ncentroids, nsubvects, subvect_len), dtype=np.float32)
for i, row in enumerate(X):
for m in range(nsubvects):
start_col = m * subvect_len
end_col = start_col + subvect_len
codebooks[assignments[i, m], m, :] += row[start_col:end_col]
for m in range(nsubvects):
code_counts = np.bincount(assignments[:, m], minlength=ncentroids)
codebooks[:, m] /= np.maximum(code_counts, 1).reshape((-1, 1)) # no div by 0
return codebooks
class NumericalException(Exception):
pass
def _debug_rotation(R):
D = np.max(R.shape)
identity = np.identity(D, dtype=np.float32)
RtR = np.dot(R.T, R)
R_det = np.linalg.det(RtR)
print("determinant of R*R: ", R_det)
R_trace = np.trace(RtR)
print("trace of R*R, trace divided by D: {}, {}".format(R_trace, R_trace / D))
off_diagonal_abs_mean = np.mean(np.abs(RtR - identity))
print("mean(abs(off diagonals of R*R)): ", off_diagonal_abs_mean)
if R_det < .999 or R_det > 1.001:
raise NumericalException("Bad determinant")
if R_trace < .999 * D or R_trace > 1.001 * D:
raise NumericalException("Bad trace")
if off_diagonal_abs_mean > .001:
raise NumericalException("Bad off-diagonals")
def opq_rotate(X, R): # so other code need not know what to transpose
return np.dot(np.atleast_2d(X), R.T)
def opq_undo_rotate(X, R): # so other code need not know what to transpose
return np.dot(np.atleast_2d(X), R)
# @_memory.cache
def opq_initialize(X_train, ncodebooks, init='gauss'):
X = X_train
_, D = X.shape
if init == 'gauss' or init == 'gauss_flat' or init == 'gauss_shuffle':
permute = (init == 'gauss_shuffle')
R = learn_opq_gaussian_rotation(X_train, ncodebooks, shuffle=permute)
R = R.astype(np.float32)
if init == 'gauss_flat':
# assert R.shape[0] == R.shape[1]
D = R.shape[1]
d = D // ncodebooks
assert d * ncodebooks == D # same # of dims in each subspace
local_r = random_rotation(int(d))
tiled = np.zeros((D, D))
for c in range(ncodebooks):
start = c * d
end = start + d
tiled[start:end, start:end] = local_r
R = np.dot(R, tiled)
X_rotated = opq_rotate(X, R)
elif init == 'identity':
R = np.identity(D, dtype=np.float32) # D x D
X_rotated = X
elif init == 'random':
R = np.random.randn(D, D).astype(np.float32)
R = orthonormalize_rows(R)
X_rotated = opq_rotate(X, R)
else:
raise ValueError("Unrecognized initialization method: ".format(init))
return X_rotated, R
# loosely based on:
# https://github.com/arbabenko/Quantizations/blob/master/opqCoding.py
@_memory.cache
def learn_opq(X_train, ncodebooks, codebook_bits=8, niters=10,
initial_kmeans_iters=1, init='gauss', debug=False):
"""init in {'gauss', 'identity', 'random'}"""
print("OPQ: Using init '{}'".format(init))
t0 = time.time()
X = X_train.astype(np.float32)
N, D = X.shape
ncentroids = int(2**codebook_bits)
subvect_len = D // ncodebooks
assert D % subvect_len == 0 # equal number of dims for each codebook
X_rotated, R = opq_initialize(X_train, ncodebooks=ncodebooks, init=init)
# initialize codebooks by running kmeans on each rotated dim; this way,
# setting niters=0 corresponds to normal PQ
codebooks, assignments = learn_pq(X_rotated, ncentroids=ncentroids,
nsubvects=ncodebooks,
subvect_len=subvect_len,
max_kmeans_iters=1)
for it in np.arange(niters):
# compute reconstruction errors
X_hat = reconstruct_X_pq(assignments, codebooks)
# err = compute_reconstruction_error(X_rotated, X_hat, subvect_len=subvect_len)
err = compute_reconstruction_error(X_rotated, X_hat)
print("---- OPQ {}x{}b iter {}: mse / variance = {:.5f}".format(
ncodebooks, codebook_bits, it, err))
# update rotation matrix based on reconstruction errors
U, s, V = np.linalg.svd(np.dot(X_hat.T, X), full_matrices=False)
R = np.dot(U, V)
# update centroids using new rotation matrix
X_rotated = opq_rotate(X, R)
assignments = _encode_X_pq(X_rotated, codebooks)
codebooks = _update_centroids_opq(X_rotated, assignments, ncentroids)
X_hat = reconstruct_X_pq(assignments, codebooks)
err = compute_reconstruction_error(X_rotated, X_hat)
t = time.time() - t0
print("---- OPQ {}x{}b final mse / variance = {:.5f} ({:.3f}s)".format(
ncodebooks, codebook_bits, err, t))
return codebooks, assignments, R
# ================================================================ Block OPQ
def bopq_rotate(X, rotations):
X = np.atleast_2d(X)
_, D = X.shape
R_sz = len(rotations[0])
nrots = int(D / R_sz)
assert nrots == len(rotations)
rot_starts = R_sz * np.arange(nrots)
rot_ends = rot_starts + R_sz
X_out = np.copy(X)
for i, R in enumerate(rotations):
start, end = rot_starts[i], rot_ends[i]
X_out[:, start:end] = np.dot(X[:, start:end], R.T)
return X_out
@_memory.cache # opq with block diagonal rotations
def learn_bopq(X_train, ncodebooks, codebook_bits=4, niters=20,
initial_kmeans_iters=1, R_sz=16, **sink):
t0 = time.time()
X = X_train.astype(np.float32)
N, D = X.shape
ncentroids = int(2**codebook_bits)
subvect_len = D // ncodebooks
assert D % subvect_len == 0 # equal number of dims for each codebook
# compute number of rotations and subspaces associated with each
nrots = int(D / R_sz)
rot_starts = R_sz * np.arange(nrots)
rot_ends = rot_starts + R_sz
# X_rotated, R = opq_initialize(X_train, ncodebooks=ncodebooks, init=init)
X_rotated = X # hardcode identity init # TODO allow others
rotations = [np.eye(R_sz) for i in range(nrots)]
# initialize codebooks by running kmeans on each rotated dim; this way,
# setting niters=0 corresponds to normal PQ
codebooks, assignments = learn_pq(X_rotated, ncentroids=ncentroids,
nsubvects=ncodebooks,
subvect_len=subvect_len,
max_kmeans_iters=1)
for it in np.arange(niters):
# compute reconstruction errors
X_hat = reconstruct_X_pq(assignments, codebooks)
# err = compute_reconstruction_error(X_rotated, X_hat, subvect_len=subvect_len)
err = compute_reconstruction_error(X_rotated, X_hat)
print("---- BOPQ {} {}x{}b iter {}: mse / variance = {:.5f}".format(
R_sz, ncodebooks, codebook_bits, it, err))
rotations = []
for i in range(nrots):
start, end = rot_starts[i], rot_ends[i]
X_sub = X[:, start:end]
X_hat_sub = X_hat[:, start:end]
# update rotation matrix based on reconstruction errors
U, s, V = np.linalg.svd(np.dot(X_hat_sub.T, X_sub), full_matrices=False)
R = np.dot(U, V)
rotations.append(R)
X_rotated[:, start:end] = np.dot(X_sub, R.T)
# update assignments and codebooks based on new rotations
assignments = _encode_X_pq(X_rotated, codebooks)
codebooks = _update_centroids_opq(X_rotated, assignments, ncentroids)
X_hat = reconstruct_X_pq(assignments, codebooks)
err = compute_reconstruction_error(X_rotated, X_hat)
t = time.time() - t0
print("---- BOPQ {} {}x{}b final mse / variance = {:.5f} ({:.3f}s)".format(
R_sz, ncodebooks, codebook_bits, err, t))
return codebooks, assignments, rotations
|
#!/bin/env/python
def ls(dir='.'):
return os.listdir(dir)
def is_hidden(path):
return os.path.basename(path).startswith('.')
def is_visible(path):
return not is_hidden(path)
def join_paths(dir, contents):
return [os.path.join(dir, f) for f in contents]
def files_matching(dir, prefix=None, suffix=None, abs_paths=False,
only_files=False, only_dirs=False, recursive=False,
only_visible=False, only_hidden=False):
files = os.listdir(dir)
if recursive:
abs_dir = dir
paths = join_paths(abs_dir, files)
for path in paths:
if not os.path.isdir(path):
continue
matches = files_matching(
path, prefix=prefix, suffix=suffix,
abs_paths=abs_paths, only_files=only_files,
only_dirs=only_dirs, recursive=True)
matches = join_paths(path, matches)
matches = [os.path.relpath(m, start=dir) for m in matches]
files += matches
if prefix:
files = [f for f in files if f.startswith(prefix)]
if suffix:
files = [f for f in files if f.endswith(suffix)]
if only_files or only_dirs:
paths = join_paths(dir, files)
if only_files:
files = [f for f, p in zip(files, paths) if os.path.isfile(p)]
if only_dirs:
files = [f for f, p in zip(files, paths) if os.path.isdir(p)]
if abs_paths:
files = join_paths(os.path.abspath(dir), files)
if only_visible:
files = [f for f in files if is_visible(f)]
if only_hidden:
files = [f for f in files if is_hidden(f)]
return sorted(files)
def list_subdirs(dir, startswith=None, endswith=None, abs_paths=False,
recursive=False, only_visible=False):
return files_matching(dir, startswith, endswith, abs_paths,
only_dirs=True, recursive=recursive,
only_visible=only_visible)
def list_files(dir, startswith=None, endswith=None, abs_paths=False,
recursive=False, only_visible=False):
return files_matching(dir, startswith, endswith, abs_paths,
only_files=True, recursive=recursive,
only_visible=only_visible)
def remove(path):
if os.path.exists(path):
try:
os.remove(path)
except (OSError):
shutil.rmtree(path)
def force_create_dir(dir):
if os.path.exists(dir):
remove(dir)
os.makedirs(dir)
def ensure_dir_exists(dir_or_file):
if '.' in os.path.basename(dir_or_file): # this looks like a file
dirname = os.path.dirname(dir_or_file)
else:
dirname = dir_or_file
if not os.path.exists(dirname):
os.makedirs(dirname)
def basename(f, noext=False):
name = os.path.basename(f)
if noext:
name = name.split('.')[0]
return name
|
#!/bin/env python
# ================================ TODO rm duplicate code from imagenet.py
# adapted from https://github.com/keras-team/keras-preprocessing/blob/master/
# keras_preprocessing/image/utils.py under MIT license
def img_to_array(img, layout='nhwc', dtype='float32', mode='RGB'):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
layout: Image data format, either "nchw" or "nhwc".
dtype: Dtype to use for the returned array.
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `layout` is passed.
"""
# print("img info:", img.format, img.size, img.mode)
# if img.mode == 'L':
if img.mode != mode:
img = img.convert(mode=mode)
if layout not in ('nchw', 'nhwc'):
raise ValueError('Unknown layout: %s' % layout)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=dtype)
if len(x.shape) == 3:
if layout == 'nchw':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
# print("x is only rank 2...WTF!?")
if layout == 'nchw':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: %s' % (x.shape,))
return x
def resize_img(img, ratio_or_size):
if ratio_or_size is None or np.max(ratio_or_size) < 0:
return img
try:
nrows = ratio_or_size[0]
ncols = ratio_or_size[1]
nrows = img.height if nrows < 0 else nrows
ncols = img.width if ncols < 0 else ncols
except AttributeError:
nrows = img.height * ratio_or_size
ncols = img.width * ratio_or_size
new_size = (nrows, ncols)
is_downsampling = (nrows < img.height) or (ncols < img.width)
interp = PIL.Image.LANCZOS if is_downsampling else PIL.Image.BICUBIC
return img.resize(new_size, resample=interp)
def crop_img(img, crop_how=None, new_size=(224, 224), resize_shorter_to=256):
if crop_how is None:
return img
assert crop_how in ('center', 'square')
height, width = img.height, img.width
if (height == width) and (new_size is None):
return img
if crop_how == 'center':
return center_crop(img, new_size=new_size,
resize_shorter_to=resize_shorter_to)
if new_size is None:
new_width = min(width, height)
new_height = new_width
else:
new_height, new_width = new_size
assert new_width <= width
assert new_height <= height
left = (width - new_width) // 2
top = (height - new_height) // 2
# right = (width + new_width) // 2
# bottom = (height + new_height) // 2
right = left + new_width
bottom = top + new_height
return img.crop((left, top, right, bottom))
def center_crop(img, new_size=(224, 224), resize_shorter_to=256):
height, width = img.height, img.width
minsize = min(height, width)
new_height = (height * resize_shorter_to) // minsize
new_width = (width * resize_shorter_to) // minsize
img = img.resize(
(new_width, new_height), resample=Image.BICUBIC)
assert min(new_width, new_height) == resize_shorter_to
return crop_img(img, crop_how='square', new_size=new_size)
def pad_img(img, pad_how='square', fill_value=0):
if pad_how is None:
return img
assert pad_how == 'square' # no other kinds of cropping supported
height, width = img.height, img.width
if height == width:
return img
new_size = max(height, width)
delta_w = new_size - width
pad_left = delta_w // 2
pad_right = delta_w - pad_left
delta_h = new_size - height
pad_top = delta_h // 2
pad_bottom = delta_h - pad_top
padding = pad_left, pad_top, pad_right, pad_bottom
return ImageOps.expand(img, border=padding, fill=fill_value)
def load_jpg(path, layout='nhwc', dtype=None, resample=None,
crop=None, pad=None):
img = PIL.Image.open(path)
img = pad_img(img, pad)
img = crop_img(img, crop)
img = resize_img(img, ratio_or_size=resample)
return img_to_array(img, layout=layout, dtype=dtype)
# assumes one subdir for each class, with class name equal to subdir name
# @_memory.cache
def load_jpegs_from_dir(dirpath, remove_classes=None, require_suffix=None,
layout='nhwc', dtype=None, resample=(224, 224),
crop='center', pad=None, verbose=1,
limit_per_class=None, only_return_path=False):
subdirs = sorted(files.list_subdirs(dirpath, only_visible=True))
if remove_classes is not None:
if isinstance(remove_classes, str):
remove_classes = [remove_classes]
for classname in remove_classes:
subdirs.remove(classname)
if verbose > 0:
print("found {} classes in directory: {}".format(len(subdirs), dirpath))
classname_to_label = {name: i for i, name in enumerate(subdirs)} # noqa
label_to_classname = {i: name for name, i in classname_to_label.items()}
all_imgs = []
all_labels = []
for subdir in subdirs:
subdir_path = os.path.join(dirpath, subdir)
img_paths = files.list_files(
subdir_path, endswith=require_suffix, abs_paths=True,
only_visible=True)
if limit_per_class is not None and limit_per_class > 0:
img_paths = img_paths[:limit_per_class]
if verbose > 1:
print("loading {:4d} images for class '{}'".format(
len(img_paths), subdir))
# not certain += [...] version was working
label = classname_to_label[subdir]
for i in range(len(img_paths)):
all_labels.append(label)
# all_labels += [] * len(img_paths)
if only_return_path:
imgs = img_paths
else:
imgs = [load_jpg(f, layout=layout, dtype=dtype, resample=resample,
crop=crop, pad=pad)[np.newaxis, :, :, :]
for f in img_paths]
all_imgs += imgs
if only_return_path:
X = all_imgs
else:
try:
# this works iff resampled/padded/cropped to same size
X = np.concatenate(all_imgs, axis=0)
except ValueError:
# otherwise strip batch dim (so each image is 3D)
X = [img.reshape(img.shape[1:]) for img in all_imgs]
y = np.array(all_labels, dtype=np.int32)
return (X, y), label_to_classname
|
#!/bin/env python
# from python import imagenet, svhn, caltech
# from python.datasets import caltech
_memory = Memory('.', verbose=1)
# DATA_DIR = os.path.expanduser('~/Desktop/datasets/nn-search')
DATA_DIR = os.path.expanduser('data')
join = os.path.join
DEFAULT_AUG_KWARGS = {
'shear_range': 0.2,
'zoom_range': 0.2,
'horizontal_flip': True
}
class LabeledDataset(object):
__slots__ = 'name X_train y_train X_test y_test _collection'.split()
def __init__(self, name, X_train, y_train, X_test=None, y_test=None):
self.name = name
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.y_test = y_test
def generators(self, batch_size, augment=True,
preprocessing_function=None, aug_kwargs=None):
_aug_kwargs = DEFAULT_AUG_KWARGS
if aug_kwargs is not None:
_aug_kwargs.update(aug_kwargs)
if not augment:
_aug_kwargs = {}
nclasses = len(np.unique(self.y_train))
y_train = keras.utils.to_categorical(self.y_train, num_classes=nclasses)
y_test = keras.utils.to_categorical(self.y_test, num_classes=nclasses)
train_datagen = image.ImageDataGenerator(
preprocessing_function=preprocessing_function, **_aug_kwargs)
train_generator = train_datagen.flow(
self.X_train, y_train, batch_size=batch_size)
test_datagen = image.ImageDataGenerator(
preprocessing_function=preprocessing_function)
test_generator = test_datagen.flow(
self.X_test, y_test, batch_size=batch_size)
return train_generator, test_generator
class HugeLabeledDataset(object):
def __init__(self, name, train_dir, test_dir,
train_nsamples=None, test_nsamples=None):
self.name = name
# self.train_dir = os.path.abspath(train_dir)
# self.test_dir = os.path.abspath(test_dir)
self.train_dir = train_dir
self.test_dir = test_dir
self.train_nsamples = int(train_nsamples or -1)
self.test_nsamples = int(test_nsamples or -1)
def generators(self, batch_size=None, augment=True,
preprocessing_function=None, aug_kwargs=None,
train_batch_size=None, test_batch_size=None,
**flow_kwargs):
_aug_kwargs = DEFAULT_AUG_KWARGS
if aug_kwargs is not None:
_aug_kwargs.update(aug_kwargs)
if not augment:
_aug_kwargs = {}
flow_kwargs = flow_kwargs or {}
flow_kwargs.setdefault('target_size', (224, 224))
flow_kwargs.setdefault('class_mode', 'categorical')
train_generator = None
test_generator = None
if self.train_dir:
train_batch_size = int(train_batch_size or batch_size)
flow_kwargs['batch_size'] = train_batch_size
print("HugeLabeledDataset: creating flow from train dir: ",
self.train_dir)
train_datagen = image.ImageDataGenerator(
preprocessing_function=preprocessing_function, **_aug_kwargs)
train_generator = train_datagen.flow_from_directory(
self.train_dir, **flow_kwargs)
if self.test_dir:
test_batch_size = int(test_batch_size or batch_size)
flow_kwargs['batch_size'] = test_batch_size
print("HugeLabeledDataset: creating flow from test dir: ",
self.test_dir)
test_datagen = image.ImageDataGenerator(
preprocessing_function=preprocessing_function)
test_generator = test_datagen.flow_from_directory(
self.test_dir, **flow_kwargs)
return train_generator, test_generator
class Random:
UNIFORM = 'uniform'
GAUSS = 'gauss'
WALK = 'walk'
BLOBS = 'blobs'
DIGITS = 'Digits'
MNIST = 'MNIST'
FASHION_MNIST = 'FashionMNIST'
CIFAR10 = 'Cifar10'
CIFAR100 = 'Cifar100'
SVHN = 'SVHN'
CALTECH101 = 'Caltech101'
CALTECH256 = 'Caltech256'
CUB200 = 'CUB200'
FLOWERS102 = 'Flowers102'
INDOOR67 = 'Indoor67'
IMAGENET_TINY = 'TinyImagenet' # 64x64, 200? classes
IMAGENET_10_CLASSES = 'ImageNet-10-Classes' # full res, 10cls, 1k/cls
IMAGENET_100_CLASSES = 'ImageNet-100-Classes' # full res, 100cls, 1k/cls
IMAGENET_1_EXAMPLE = 'ImageNet-1-Example' # full res, 1k cls, 1/cls
IMAGENET_10_EXAMPLES = 'ImageNet-10-Examples' # full res, 1k cls, 10/cls
IMAGENET_25_EXAMPLES = 'ImageNet-25-Examples' # full res, 1k cls, 25/cls
IMAGENET_50_EXAMPLES = 'ImageNet-50-Examples' # full res, 1k cls, 50/cls
IMAGENET_100_EXAMPLES = 'ImageNet-100-Examples' # full res, 1k cls, 100/cls
IMAGENET_64PX = 'ImageNet64' # 64x64, all examples
IMAGENET = 'ImageNet'
IMAGENET_ONE_OF_EACH = 'ImagenetOneOfEach'
MINIPLACES = 'Miniplaces'
ALL_IMAGENET_DATASETS = [
IMAGENET, IMAGENET_64PX, IMAGENET_TINY, IMAGENET_ONE_OF_EACH,
IMAGENET_10_CLASSES, IMAGENET_100_CLASSES,
IMAGENET_1_EXAMPLE, IMAGENET_10_EXAMPLES, IMAGENET_100_EXAMPLES]
ALL_KERAS_DATASETS = [MNIST, CIFAR10, CIFAR100, FASHION_MNIST]
def _load_file(fname, *args, **kwargs):
fname = os.path.join(DATA_DIR, fname)
print("trying to load file at path: {}".format(fname))
if fname.split('.')[-1] == 'txt':
return np.loadtxt(fname, *args, **kwargs)
return np.load(fname, *args, **kwargs)
def _load_digits_X_y(ntrain=1000):
X, y = load_digits(return_X_y=True)
X_train, X_test = X[:ntrain], X[ntrain:]
y_train, y_test = y[:ntrain], y[ntrain:]
return LabeledDataset('Digits', X_train, y_train, X_test, y_test)
# return X[:-nqueries], X[-nqueries:] # X, Q
def _load_keras_dset(which_dataset):
from keras import datasets as kd
dataClass = {CIFAR10: kd.cifar10,
CIFAR100: kd.cifar100,
MNIST: kd.mnist,
FASHION_MNIST: kd.fashion_mnist}[which_dataset]
(X_train, y_train), (X_test, y_test) = dataClass.load_data()
pretty_name = str(which_dataset).split('.')[-1].split("'")[0]
return LabeledDataset(pretty_name, X_train, y_train, X_test, y_test)
def load_imagenet_64(limit_ntrain=-1):
# if we're not going to use the whole training set, don't even load in all
# the files it's split into (necessary unless you have >18GB of free RAM)
which_file_idxs = None
if limit_ntrain > 0:
nchunks = int(np.ceil(
limit_ntrain / imagenet.IMAGENET_64_TRAIN_CHUNK_NSAMPLES))
which_file_idxs = np.arange(1, nchunks + 1)
X_train, y_train = imagenet.load_train_data_64x64(
which_file_idxs=which_file_idxs)
X_test, y_test = imagenet.load_test_data_64x64()
return LabeledDataset(IMAGENET_64PX, X_train, y_train, X_test, y_test,
train_nsamples=1e6)
def load_imagenet_tiny():
X_train, y_train = imagenet.load_train_data_tiny()
X_test, y_test = imagenet.load_test_data_tiny()
return LabeledDataset(IMAGENET_TINY, X_train, y_train, X_test, y_test)
def load_imagenet_one_of_each():
X, y = imagenet.load_data_one_of_each()
return LabeledDataset(IMAGENET_ONE_OF_EACH, X, y, X, y, train_nsamples=1e3)
def load_imagenet(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_TRAIN_PATH if load_train else None
test_path = imagenet.IMAGENET_TEST_PATH if load_val else None
return HugeLabeledDataset(
IMAGENET, train_path, test_path,
train_nsamples=1281167, test_nsamples=50e3)
def load_imagenet_10_classes(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_10_CLASSES_TRAIN_PATH if load_train else None
test_path = imagenet.IMAGENET_10_CLASSES_TEST_PATH if load_val else None
return HugeLabeledDataset(
IMAGENET_10_CLASSES, train_path, test_path,
train_nsamples=13000, test_nsamples=500)
def load_imagenet_100_classes(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_100_CLASSES_TRAIN_PATH \
if load_train else None
test_path = imagenet.IMAGENET_100_CLASSES_TEST_PATH if load_val else None
return HugeLabeledDataset(IMAGENET_100_CLASSES, train_path, test_path,
train_nsamples=129395, test_nsamples=5000)
def load_imagenet_1_example(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_1_EXAMPLE_TRAIN_PATH \
if load_train else None
test_path = imagenet.IMAGENET_TEST_PATH if load_val else None
return HugeLabeledDataset(IMAGENET_10_EXAMPLES, train_path, test_path,
train_nsamples=1e3, test_nsamples=50e3)
def load_imagenet_10_examples(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_10_EXAMPLES_TRAIN_PATH \
if load_train else None
test_path = imagenet.IMAGENET_TEST_PATH if load_val else None
return HugeLabeledDataset(IMAGENET_10_EXAMPLES, train_path, test_path,
train_nsamples=10e3, test_nsamples=50e3)
def load_imagenet_25_examples(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_25_EXAMPLES_TRAIN_PATH \
if load_train else None
test_path = imagenet.IMAGENET_TEST_PATH if load_val else None
return HugeLabeledDataset(IMAGENET_25_EXAMPLES, train_path, test_path,
train_nsamples=25e3, test_nsamples=50e3)
def load_imagenet_50_examples(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_50_EXAMPLES_TRAIN_PATH \
if load_train else None
test_path = imagenet.IMAGENET_TEST_PATH if load_val else None
return HugeLabeledDataset(IMAGENET_50_EXAMPLES, train_path, test_path,
train_nsamples=50e3, test_nsamples=50e3)
def load_imagenet_100_examples(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_100_EXAMPLES_TRAIN_PATH \
if load_train else None
test_path = imagenet.IMAGENET_TEST_PATH if load_val else None
return HugeLabeledDataset(IMAGENET_10_EXAMPLES, train_path, test_path,
train_nsamples=100e3, test_nsamples=50e3)
def _load_miniplaces():
path = '/data/ddmg/neuro/datasets/Miniplaces/miniplaces.h5'
with h5py.File(path, 'r') as hf:
X_train = hf['X_train'][()]
Y_train = hf['Y_train'][()]
X_val = hf['X_val'][()]
Y_val = hf['Y_val'][()]
return LabeledDataset(MINIPLACES, X_train, Y_train, X_val, Y_val)
def _load_svhn():
(X_train, y_train), (X_test, y_test) = svhn.load_data()
return LabeledDataset(SVHN, X_train, y_train, X_test, y_test)
def load_caltech101():
data_dir = '../datasets/caltech/101_ObjectCategories'
return HugeLabeledDataset(CALTECH101, data_dir, None)
# (X, y), _ = caltech.load_caltech101()
# return LabeledDataset(IMAGENET_ONE_OF_EACH, X, y, X, y)
def load_caltech256():
data_dir = '../datasets/caltech/256_ObjectCategories'
return HugeLabeledDataset(CALTECH256, data_dir, None)
# (X, y), _ = caltech.load_caltech256()
# return LabeledDataset(IMAGENET_ONE_OF_EACH, X, y, X, y)
def load_flowers102():
data_dir = '../datasets/flowers102'
train_dir = os.path.join(data_dir, 'train')
test_dir = os.path.join(data_dir, 'test')
return HugeLabeledDataset(FLOWERS102, train_dir, test_dir,
train_nsamples=1020, test_nsamples=6149)
def load_cub200(): # note that this is 2011 version of CUB200
data_dir = '../datasets/cub200'
train_dir = os.path.join(data_dir, 'train')
test_dir = os.path.join(data_dir, 'test')
return HugeLabeledDataset(CUB200, train_dir, test_dir,
train_nsamples=5994, test_nsamples=5794)
def load_indoor67(): # this is the subset with predefined train vs test split
data_dir = '../datasets/indoor67'
train_dir = os.path.join(data_dir, 'train')
test_dir = os.path.join(data_dir, 'test')
return HugeLabeledDataset(INDOOR67, train_dir, test_dir,
train_nsamples=(67 * 80), test_nsamples=(67 * 20))
# @_memory.cache
def load_dataset(which_dataset, norm_mean=False, norm_len=False,
flatten=False, Ntrain=-1, Ntest=-1, ensure_channels=False,
test_frac=None, scale_to_0_1=False):
if which_dataset == DIGITS:
dset = _load_digits_X_y()
elif which_dataset in ALL_KERAS_DATASETS:
dset = _load_keras_dset(which_dataset)
elif which_dataset == IMAGENET_64PX:
dset = load_imagenet_64(limit_ntrain=Ntrain)
elif which_dataset == IMAGENET_TINY:
dset = load_imagenet_tiny()
elif which_dataset == IMAGENET_ONE_OF_EACH:
dset = load_imagenet_one_of_each()
elif which_dataset == MINIPLACES:
dset = _load_miniplaces()
elif which_dataset == SVHN:
dset = _load_svhn()
elif which_dataset == CALTECH101:
return load_caltech101()
elif which_dataset == CALTECH256:
return load_caltech256()
elif which_dataset == CUB200:
return load_cub200()
elif which_dataset == FLOWERS102:
return load_flowers102()
elif which_dataset == IMAGENET:
return load_imagenet()
elif which_dataset == IMAGENET_10_CLASSES:
return load_imagenet_10_classes()
elif which_dataset == IMAGENET_100_CLASSES:
return load_imagenet_100_classes()
elif which_dataset == IMAGENET_1_EXAMPLE:
return load_imagenet_1_example()
elif which_dataset == IMAGENET_10_EXAMPLES:
return load_imagenet_10_examples()
elif which_dataset == IMAGENET_25_EXAMPLES:
return load_imagenet_25_examples()
elif which_dataset == IMAGENET_50_EXAMPLES:
return load_imagenet_50_examples()
elif which_dataset == IMAGENET_100_EXAMPLES:
return load_imagenet_100_examples()
else:
raise ValueError("unrecognized dataset {}".format(which_dataset))
if isinstance(dset, HugeLabeledDataset):
# only has flow_from_directory() generators; no postprocessing
# possible, so go ahead and return immediately
return dset
train_is_test = (dset.X_train.base is dset.X_test) or \
(dset.X_test.base is dset.X_train)
train_test_equal = np.array_equal(dset.X_train[:10], dset.X_test[:10])
train_test_same = train_is_test or train_test_equal
if train_test_same:
if test_frac is None:
warnings.warn("WARNING: Training data is also the test data! "
"Reversing order of test data. Consider passing "
"test_frac > 0 to automatically perform a "
"stratified train-test split.")
dset.X_test = dset.X_test[::-1]
else:
X_train, X_test, y_train, y_test = stratified_split_train_test(
dset.X_train, dset.y_train, train_frac=(1. - test_frac))
dset = LabeledDataset(dset.name, X_train, y_train, X_test, y_test)
train_is_test = False
train_test_equal = False
train_test_same = False
if train_is_test:
dset.X_test = np.copy(dset.X_test)
dset.y_test = np.copy(dset.y_test)
train_is_test = False
if flatten:
dset.X_train = dset.X_train.reshape(dset.X_train.shape[0], -1)
dset.X_test = dset.X_test.reshape(dset.X_test.shape[0], -1)
dset.X_train = dset.X_train.astype(np.float32)
dset.X_test = dset.X_test.astype(np.float32)
X_train = dset.X_train
X_test = dset.X_test
if Ntrain > 0:
dset.X_train = X_train[:Ntrain]
dset.y_train = dset.y_train[:Ntrain]
if Ntest > 0:
dset.X_test = np.copy(X_test[:Ntest])
dset.y_test = np.copy(dset.y_test[:Ntest])
if scale_to_0_1:
min_X = min(np.min(dset.X_train), np.min(dset.X_test))
max_X = max(np.max(dset.X_train), np.max(dset.X_test))
dset.X_train = (dset.X_train - min_X) / max_X
# if not train_is_test:
dset.X_test = (dset.X_test - min_X) / max_X
if norm_mean:
means = np.mean(dset.X_train, axis=0)
dset.X_train -= means
# if not train_is_test: # don't subtract means twice from same array
dset.X_test -= means
if norm_len:
dset.X_train /= np.linalg.norm(dset.X_train, axis=1, keepdims=True)
# if not train_is_test: # don't divide by norms twice on same array
dset.X_test /= np.linalg.norm(dset.X_test, axis=1, keepdims=True)
if ensure_channels:
import keras.backend as K # don't import keras unless we need it
if len(X_train.shape) == 3: # no channels; e.g., MNIST
img_rows, img_cols = X_train.shape[-2], X_train.shape[-1]
# K.set_image_data_format('channels_last') # for argmax layer
if K.image_data_format() == 'channels_first':
dset.X_train = dset.X_train.reshape(
X_train.shape[0], 1, img_rows, img_cols)
dset.X_test = dset.X_test.reshape(
X_test.shape[0], 1, img_rows, img_cols)
else:
dset.X_train = dset.X_train.reshape(
X_train.shape[0], img_rows, img_cols, 1)
dset.X_test = dset.X_test.reshape(
X_test.shape[0], img_rows, img_cols, 1)
return dset
# if D_multiple_of > 1:
# X_train = ensure_num_cols_multiple_of(X_train, D_multiple_of)
# X_test = ensure_num_cols_multiple_of(X_test, D_multiple_of)
# Q = ensure_num_cols_multiple_of(Q, D_multiple_of)
# return X_train, Q, X_test, true_nn
|
#!/bin/env python
# import pyedflib as edf # pip install pyedflib
# import mne
ECG_DIR = paths.UCD_ECG
NUM_RECORDINGS = 25
def main():
pass
print("ecg dir: ", ECG_DIR)
fpaths = files.list_files(ECG_DIR, abs_paths=True)
# fpaths = files.list_files(ECG_DIR)
assert len(fpaths) == NUM_RECORDINGS
# print("fpaths: ", "\n".join(fpaths))
# print("number of fpaths: ", len(fpaths))
for path in fpaths:
print("------------------------ ", path)
# f = edf.EdfReader(path)
# print(f.signals_in_file)
magical_start_offset = 1025 # from looking at raw binary
# raw = bytes(open(path, 'rb').read())[magical_start_offset:]
with open(path, 'rb') as f:
raw = f.read()
# raw = open(path, 'rb').read()
print("length of raw: ", len(raw))
print("type(raw)", type(raw))
a = np.frombuffer(raw, offset=magical_start_offset, dtype=np.uint16)
# a = np.frombuffer(raw, dtype=np.uint16)
print(len(a))
print(len(a) / 3)
# print("number of bytes: ", len(raw))
# with open(path, 'rb') as f:
# # f.seek(magical_start_offset)
# f.read(magical_start_offset)
# a = np.fromfile(f, dtype=np.int16)
# print(len(a))
# print(len(a) / 3)
if __name__ == '__main__':
main()
|
#!/usr/env/python
DATASETS_DIR = os.path.expanduser("~/Desktop/datasets/")
def to_path(*args):
return os.path.join(DATASETS_DIR, *args)
# straightforward datasets
MSRC_12 = to_path('MSRC-12', 'origData')
UCR = to_path('ucr/UCRArchive_2018')
UCR_INFO = to_path('ucr/DataSummary.csv')
UWAVE = to_path('uWave', 'extracted')
PAMAP = to_path('PAMAP_Dataset')
PAMAP2 = to_path('PAMAP2_Dataset')
WARD = to_path('WARD1.0')
UCI_GAS = to_path('uci-gas-sensor')
# ampds2
AMPD2_POWER = to_path('ampds2', 'electric')
AMPD2_GAS = to_path('ampds2', 'gas')
AMPD2_WEATHER = to_path('ampds2', 'weather')
AMPD2_WATER = to_path('ampds2', 'water')
# caltech-{101,256}
CALTECH_101 = to_path('caltech', '101_ObjectCategories')
CALTECH_256 = to_path('caltech', '256_ObjectCategories')
# ECG data
SHAREE_ECG = to_path('sharee-ecg-database')
INCART_ECG = to_path('incart-12-lead-ecg')
|
#!/bin/env python
_memory = Memory('.', verbose=1)
DATA_DIR = os.path.expanduser('~/Desktop/datasets/nn-search')
join = os.path.join
class Random:
UNIFORM = 'uniform'
GAUSS = 'gauss'
WALK = 'walk'
BLOBS = 'blobs'
class Gist:
DIR = join(DATA_DIR, 'gist')
TRAIN = join(DIR, 'gist_train.npy') # noqa
TEST = join(DIR, 'gist.npy') # noqa
TEST_100 = join(DIR, 'gist_100k.npy') # noqa
TEST_200 = join(DIR, 'gist_200k.npy') # noqa
QUERIES = join(DIR, 'gist_queries.npy') # noqa
TRUTH = join(DIR, 'gist_truth.npy') # noqa
class Sift1M:
DIR = join(DATA_DIR, 'sift1m')
TRAIN = join(DIR, 'sift_learn.npy') # noqa
TEST = join(DIR, 'sift_base.npy') # noqa
TEST_100 = join(DIR, 'sift_100k.txt') # noqa
TEST_200 = join(DIR, 'sift_200k.txt') # noqa
QUERIES = join(DIR, 'sift_queries.npy') # noqa
TRUTH = join(DIR, 'sift_groundtruth.npy') # noqa
class Sift10M:
DIR = join(DATA_DIR, 'sift1b')
# TRAIN = join(DIR, 'big_ann_learn_10M.npy') # noqa
TRAIN = join(DIR, 'big_ann_learn_1M.npy') # noqa # TODO use 10M?
TRAIN_1M = join(DIR, 'big_ann_learn_1M.npy') # noqa
TEST = join(DIR, 'sift_10M.npy') # noqa
QUERIES = join(DIR, 'sift_queries.npy') # noqa
TRUTH = join(DIR, 'true_nn_idxs_10M.npy') # noqa
class Deep1M:
"""256D PCA of convnet activations; see OTQ paper supporting
webiste, http://sites.skoltech.ru/compvision/projects/aqtq/"""
DIR = join(DATA_DIR, 'deep1m') # noqa
TRAIN = join(DIR, 'deep1M_learn.npy') # noqa
TEST = join(DIR, 'deep1M_base.npy') # noqa
TEST_100 = join(DIR, 'deep1M_test_100k.npy') # noqa
QUERIES = join(DIR, 'deep1M_queries.npy') # noqa
TRUTH_TRAIN = join(DIR, 'deep1M_truth_train.npy') # noqa
TRUTH = join(DIR, 'deep1M_groundtruth.npy') # noqa
class Convnet1M:
DIR = join(DATA_DIR, 'convnet1m') # noqa
TRAIN = join(DIR, 'convnet_train.npy') # noqa
TEST = join(DIR, 'convnet_test.npy') # noqa
TEST_100 = join(DIR, 'convnet_test_100k.npy') # noqa
QUERIES = join(DIR, 'convnet_queries.npy') # noqa
TRUTH_TRAIN = join(DIR, 'truth_train.npy') # noqa
TRUTH = join(DIR, 'truth_test.npy') # noqa
class Mnist:
# following other papers (eg, "revisiting additive quantization"),
# use mnist test set as queries and training set as database
DIR = join(DATA_DIR, 'mnist') # noqa
TEST = join(DIR, 'X_train.npy') # noqa
QUERIES = join(DIR, 'X_test.npy') # noqa
TRUTH = join(DIR, 'truth_Q=test_X=train.npy') # noqa
class LabelMe:
DIR = join(DATA_DIR, 'labelme') # noqa
TRAIN = join(DIR, 'labelme_train.npy') # noqa
TEST = join(DIR, 'labelme_train.npy') # noqa
QUERIES = join(DIR, 'labelme_test.npy') # noqa
TRUTH = join(DIR, 'labelme_truth.npy') # noqa
class Glove:
DIR = join(DATA_DIR, 'glove') # noqa
TEST = join(DIR, 'glove_test.npy') # noqa
TEST_100 = join(DIR, 'glove_100k.txt') # noqa
TEST_200 = join(DIR, 'glove_200k.txt') # noqa
QUERIES = join(DIR, 'glove_queries.npy') # noqa
TRUTH = join(DIR, 'glove_truth.npy') # noqa
# note that we've only run the real experiments on the ones reported
# in the paper (i.e., no cherrypicking)
ALL_REAL_DATASETS = [
Gist, Sift1M, Sift10M, Deep1M, Convnet1M, Mnist, LabelMe, Glove]
def load_file(fname, *args, **kwargs):
if fname.split('.')[-1] == 'txt':
return np.loadtxt(fname, *args, **kwargs)
return np.load(fname, *args, **kwargs)
def extract_random_rows(X, how_many, remove_from_X=True):
split_start = np.random.randint(len(X) - how_many - 1)
split_end = split_start + how_many
rows = np.copy(X[split_start:split_end])
if remove_from_X:
return np.vstack((X[:split_start], X[split_end:])), rows
return X, rows
def _load_complete_dataset(which_dataset, num_queries=10):
X_test = np.load(which_dataset.TEST)
try:
X_train = np.load(which_dataset.TRAIN)
print("using separate test set!")
except AttributeError:
print("No training set found for dataset {}".format(str(which_dataset)))
X_train = np.copy(X_test)
try:
Q = np.load(which_dataset.QUERIES)
except AttributeError:
assert num_queries > 1
X_train, Q = extract_random_rows(X_train, how_many=num_queries)
try:
true_nn = np.load(which_dataset.TRUTH).astype(np.int)
except AttributeError:
true_nn = None
return X_train, Q, X_test, true_nn
def _ground_truth_for_dataset(which_dataset):
return None # TODO
# XXX: not clear whether this function is correct in general, but works for
# 784D with the nzeros we get for 32 and 64 codebooks
def _insert_zeros(X, nzeros):
N, D = X.shape
D_new = D + nzeros
X_new = np.zeros((N, D_new), dtype=X.dtype)
step = int(D / (nzeros + 1)) - 1
for i in range(nzeros):
in_start = step * i
in_end = in_start + step
# out_start = in_start + i + 1
out_start = (step + 1) * i
out_end = out_start + step
X_new[:, out_start:out_end] = X[:, in_start:in_end]
# out_start = out_end
# out_end += step
out_end += 1 # account for the last 0
remaining_len = D - in_end
out_remaining_len = D_new - out_end
# print "step", step
# print "in_start, in_end", in_start, in_end
# print "out_start, out_end", out_start, out_end
# print "D, D_new", D, D_new
# print "remaining_len, out_remaining_len", remaining_len, out_remaining_len
assert remaining_len == out_remaining_len
assert remaining_len >= 0
if remaining_len:
# X_new[:, out_end:out_end+remaining_len] = X[:, in_end:D]
X_new[:, out_end:] = X[:, in_end:]
assert np.array_equal(X[:, 0], X_new[:, 0])
assert np.array_equal(X[:, -1], X_new[:, -1])
return X_new
def ensure_num_cols_multiple_of(X, multiple_of):
remainder = X.shape[1] % multiple_of
if remainder > 0:
return _insert_zeros(X, multiple_of - remainder)
return X
# @_memory.cache # uncomment to get same randomness each time
def load_dataset(which_dataset, N=-1, D=-1, norm_mean=False, norm_len=False,
num_queries=10, Ntrain=-1, D_multiple_of=-1):
true_nn = None
# randomly generated datasets
if which_dataset == Random.UNIFORM:
X_test = np.random.rand(N, D)
X_train = np.random.rand(Ntrain, D) if Ntrain > 0 else X_test
Q = np.random.rand(num_queries, D)
elif which_dataset == Random.GAUSS:
X_test = np.random.randn(N, D)
X_train = np.random.randn(Ntrain, D) if Ntrain > 0 else X_test
Q = np.random.randn(num_queries, D)
elif which_dataset == Random.WALK:
X_test = np.random.randn(N, D)
X_test = np.cumsum(X_test, axis=1)
X_train = np.copy(X_test)
if Ntrain > 0:
X_train = np.random.randn(Ntrain, D)
X_train = np.cumsum(X_train)
Q = np.random.randn(num_queries, D)
Q = np.cumsum(Q, axis=-1)
elif which_dataset == Random.BLOBS:
# centers is D x D, and centers[i, j] = (i + j)
centers = np.arange(D)
centers = np.sum(np.meshgrid(centers, centers), axis=0)
X_test, _ = make_blobs(n_samples=N, centers=centers)
X_train = np.copy(X_test)
if Ntrain > 0:
X_train, _ = make_blobs(n_samples=Ntrain, centers=centers)
Q, true_nn = make_blobs(n_samples=num_queries, centers=centers)
# datasets that are just one block of a "real" dataset
elif isinstance(which_dataset, str):
# assert False # TODO rm after real experiments
X_test = load_file(which_dataset)
X_test, Q = extract_random_rows(X_test, how_many=num_queries)
X_train = np.copy(X_test)
true_nn = _ground_truth_for_dataset(which_dataset)
# "real" datasets with predefined train, test, queries, truth
elif which_dataset in ALL_REAL_DATASETS:
X_train, Q, X_test, true_nn = _load_complete_dataset(
which_dataset, num_queries=num_queries)
else:
raise ValueError("unrecognized dataset {}".format(which_dataset))
N = X_test.shape[0] if N < 1 else N
D = X_test.shape[1] if D < 1 else D
X_test, X_train = np.copy(X_test)[:N, :D], X_train[:N, :D]
Q = Q[:, :D] if len(Q.shape) > 1 else Q[:D]
train_is_test = X_train.base is X_test or X_test.base is X_train
train_test_equal = np.array_equal(X_train[:100], X_test[:100])
train_test_same = train_is_test or train_test_equal
if train_test_same:
print("WARNING: Training data is also the test data!")
if train_is_test:
X_test = np.copy(X_test)
if norm_mean:
means = np.mean(X_train, axis=0)
X_train -= means
X_test -= means
Q -= means
if norm_len:
X_test /= np.linalg.norm(X_test, axis=1, keepdims=True)
X_train /= np.linalg.norm(X_train, axis=1, keepdims=True)
Q /= np.linalg.norm(Q, axis=-1, keepdims=True)
# np.set_printoptions(precision=6)
# print "start of Q:", Q[:5, :5]
# print "start of X_test:", X_test[:5, :5]
# TODO don't convert datasets that are originally uint8s to floats
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
# Q = np.squeeze(Q.astype(np.float32))
Q = Q.astype(np.float32)
if D_multiple_of > 1:
X_train = ensure_num_cols_multiple_of(X_train, D_multiple_of)
X_test = ensure_num_cols_multiple_of(X_test, D_multiple_of)
Q = ensure_num_cols_multiple_of(Q, D_multiple_of)
return X_train, Q, X_test, true_nn
def read_yael_vecs(path, c_contiguous=True, limit_rows=-1, dtype=None):
dim = np.fromfile(path, dtype=np.int32, count=2)[0]
print("vector length = {}".format(dim))
if dtype is None:
if 'fvecs' in path:
dtype = np.float32
elif 'ivecs' in path:
dtype = np.int32
elif 'bvecs' in path:
dtype = np.uint8
else:
raise ValueError("couldn't infer dtype from path {}".format(path))
itemsize = np.dtype(dtype).itemsize
assert dim > 0
assert itemsize in (1, 2, 4)
cols_for_dim = 4 // itemsize
row_size_bytes = 4 + dim * itemsize
row_size_elems = row_size_bytes // itemsize
limit = int(limit_rows) * row_size_elems if limit_rows > 0 else -1
fv = np.fromfile(path, dtype=dtype, count=limit)
fv = fv.reshape((-1, row_size_elems))
if not all(fv.view(np.int32)[:, 0] == dim):
raise IOError("Non-uniform vector sizes in " + path)
fv = fv[:, cols_for_dim:]
if c_contiguous:
fv = fv.copy()
return fv
|
#!/usr/bin/env python
# import matplotlib as mpl
_memory = Memory('./')
def _list_csvs(directory):
return files.list_files(directory, endswith='.csv', abs_paths=True)
ELECTRIC_PATHS = _list_csvs(paths.AMPD2_POWER)
GAS_PATHS = _list_csvs(paths.AMPD2_GAS)
WATER_PATHS = _list_csvs(paths.AMPD2_WATER)
WEATHER_PATHS = _list_csvs(paths.AMPD2_WEATHER)
ELECTRIC_COLS = 'UNIX_TS,WHE,RSE,GRE,MHE,B1E,BME,CWE,DWE,EQE,FRE,HPE,OFE,' \
'UTE,WOE,B2E,CDE,DNE,EBE,FGE,HTE,OUE,TVE,UNE'.split(',')
ELECTRIC_DATA_COLS = ELECTRIC_COLS[1:]
# ELECTRIC_DATA_COLS.remove('MHE') # linear combo of other cols
# ELECTRIC_DATA_COLS.remove('UNE') # linear combo of other cols
GAS_DATA_COLS = ['counter', 'avg_rate', 'inst_rate']
WATER_DATA_COLS = ['counter', 'avg_rate']
WEATHER_TIME_COL = 'Date/Time'
WEATHER_DATA_COLS = ['Temp (C)', 'Dew Point Temp (C)', 'Rel Hum (%)',
'Wind Dir (10s deg)', 'Wind Spd (km/h)',
'Visibility (km)', 'Stn Press (kPa)']
WEATHER_ALL_COLS = [WEATHER_TIME_COL] + WEATHER_DATA_COLS
FIG_SAVE_DIR = os.path.join('figs', 'ampds')
# ================================================================ public
class HouseRecording(object):
def __init__(self, path, cols=None):
data = _read_file(path)
self.path = path
self.name = os.path.basename(path).split('.')[0]
self.col_names = cols
self.sampleTimes = data[:, 0]
self.data = data[:, 1:] # XXX have to use all cols after the first
# if 'power' in self.name:
# print "initial sample times: ", self.sampleTimes[:50]
# print
# hack to deal with DWW water not having inst_rate
# self.col_names = self.col_names[:self.data.shape[1]]
self.data = self.data[:, :len(self.col_names)]
class WeatherRecording(object):
def __init__(self):
df = _load_weather_data()
self.name = 'weather'
self.col_names = WEATHER_DATA_COLS
self.sampleTimes = _datetime_strs_to_unix_timestamps(df[WEATHER_TIME_COL])
self.data = df[WEATHER_DATA_COLS].values.astype(np.float32)
# ------------------------ top-level data loading functions
def all_power_recordings():
return [HouseRecording(path, cols=ELECTRIC_DATA_COLS) for path in ELECTRIC_PATHS]
def all_gas_recordings():
return [HouseRecording(path, cols=GAS_DATA_COLS) for path in GAS_PATHS]
def all_water_recordings():
return [HouseRecording(path, cols=WATER_DATA_COLS) for path in WATER_PATHS]
def all_weather_recordings():
return [WeatherRecording()] # just one data file, so just one recording
def all_timestamp_recordings():
all_recordings = all_power_recordings() + all_gas_recordings() + \
all_water_recordings() + all_weather_recordings()
# all_recordings = all_weather_recordings() # TODO rm
for r in all_recordings:
r.data = r.sampleTimes.astype(np.float64)
r.name += '_timestamps'
return all_recordings
# ================================================================ private
# def _read_file(path, cols=None):
@_memory.cache
def _read_file(path):
df = pd.read_csv(path).fillna(method='backfill') # hold prev val
# if cols is not None and len(cols) > 0:
# timestamps = df[df.columns[0]]
# return df.values.astype(np.int32)
return df.values.astype(np.float64) # need f64 to not lose timestamps
@_memory.cache
def _load_weather_data():
path = WEATHER_PATHS[0]
df = pd.read_csv(path, sep=',').fillna(method='backfill') # hold prev val
return df[WEATHER_ALL_COLS]
def _datetimes_to_unix_timestamps(datetimes):
# https://stackoverflow.com/q/34038273
return (datetimes.astype(np.int64) / 1e6).astype(np.uint64)
def _datetime_strs_to_unix_timestamps(strs):
return _datetimes_to_unix_timestamps(pd.to_datetime(strs))
# ================================================================ main
def save_fig_png(path):
plt.savefig(path, dpi=300, bbox_inches='tight')
def _prev_corrs_stats(corr):
assert corr.shape[0] == corr.shape[1] # needs to be a correlation mat
abs_corr = np.abs(corr)
prev_corrs = np.zeros(len(corr) - 1)
best_corrs = np.zeros(len(corr) - 1)
for i, row in enumerate(abs_corr[1:]): # each row after the first
prev_corrs[i] = row[i] # note that i is row index - 1
try:
best_corr_idx = np.nanargmax(row[:i+1])
best_corrs[i] = row[best_corr_idx]
except ValueError: # if row all nans
best_corrs[i] = prev_corrs[i]
assert not (best_corrs[i] < prev_corrs[i]) # double neg for nans
# avg corr with prev variable, avg highest corr with any preceding variable
return np.nanmean(prev_corrs), np.nanmean(best_corrs)
def _plot_corr(data, fig, ax, add_title=True):
"""assumes data is row-major; ie, each col is one variable over time"""
# cov = np.cov(data.T)
corr = np.corrcoef(data.T)
# im = ax.imshow(corr, interpolation='nearest',
# cmap=plt.cm.RdBu,
# norm=mpl.colors.Normalize(vmin=-1., vmax=1.))
# fig.colorbar(im, ax=ax)
# sb.heatmap(corr, center=0, ax=ax, square=True)
sb.heatmap(corr, vmin=-1, vmax=1, center=0, ax=ax, square=True)
if add_title:
mean_prev_corr, mean_best_corr = _prev_corrs_stats(corr)
ax.set_title("|rho| prev, best prev =\n{:.2f}, {:.2f}".format(
mean_prev_corr, mean_best_corr))
def plot_recordings(recordings, interval_len=1000, norm_means=False,
mins_zero=False, savedir=None):
for r in recordings:
print(("recording {} has data of shape {}".format(r.name, r.data.shape)))
fig, axes = plt.subplots(2, 4, figsize=(13, 7))
start_idxs = [0, len(r.data) - interval_len]
end_idxs = [interval_len, len(r.data)]
# any_nans_in_row = np.isnan(r.data).sum(axis=1)
# print np.where(any_nans_in_row)[0]
# continue
for i, (start, end) in enumerate(zip(start_idxs, end_idxs)):
timestamps = r.sampleTimes[start:end]
data = r.data[start:end]
if norm_means:
data -= np.mean(data, axis=0).astype(data.dtype)
elif mins_zero:
data -= np.min(data, axis=0).astype(data.dtype)
# print "data shape", data.shape
# print "data final vals", data[-20:]
# continue
col = i + 1
axes[0, col].plot(timestamps, data, lw=1)
axes[1, col].plot(timestamps[1:], np.diff(data, axis=0), lw=1)
axes[0, col].set_title('data')
axes[1, col].set_title('first derivs')
# plot correlation matrices for orig data and first derivs
cor_sample_length = max(10000, len(r.data) // 5)
data = r.data[:cor_sample_length]
_plot_corr(data, fig, axes[0, 0])
_plot_corr(np.diff(data, axis=0), fig, axes[1, 0])
data = r.data[-cor_sample_length:]
_plot_corr(data, fig, axes[0, -1])
_plot_corr(np.diff(data, axis=0), fig, axes[1, -1])
# _plot_corr(r.data[:cor_sample_length], fig, axes[0, 0])
# data = r.data[-cor_sample_length:]
# _plot_corr(data, fig, axes[2, 1])
plt.tight_layout()
# plt.show()
if savedir is not None:
files.ensure_dir_exists(savedir)
# plt.savefig(os.path.join(savedir, r.name))
save_fig_png(os.path.join(savedir, r.name))
def main():
recordings = []
recordings += all_gas_recordings()
recordings += all_water_recordings()
recordings += all_power_recordings()
recordings += all_weather_recordings()
norm_means = False
# norm_means = True
mins_zero = True
plot_recordings(recordings, norm_means=norm_means, mins_zero=mins_zero,
savedir=FIG_SAVE_DIR)
# plt.show()
if __name__ == '__main__':
main()
|
#!/bin/env python
# import warnings
_memory = Memory('.', verbose=1)
IMAGENET_ONE_OF_EACH_PATH = '../datasets/one-of-each-imagenet'
IMAGENET_ONE_OF_EACH_FLOW_PATH = '../datasets/one-of-each-imagenet-as-folders'
# IMAGENET_64_PATH = os.path.expanduser("~/Desktop/datasets/imagenet64")
# IMAGENET_TINY_PATH = os.path.expanduser("~/Desktop/datasets/tiny-imagenet-200")
IMAGENET_64_PATH = '../datasets/imagenet64'
IMAGENET_TINY_PATH = '../datasets/tiny-imagenet-200'
IMAGENET_64_TRAIN_CHUNK_NSAMPLES = 128116
IMAGENET_TRAIN_PATH = '../datasets/ILSVRC2012/ILSVRC2012_img_train'
IMAGENET_TEST_PATH = '/home/dblalock/datasets/ILSVRC2012/ILSVRC2012_img_val'
if not os.path.exists(IMAGENET_TEST_PATH): # try to load local version
IMAGENET_TEST_PATH = '../datasets/ILSVRC2012/ILSVRC2012_img_val'
IMAGENET_10_CLASSES_TRAIN_PATH = '../datasets/ILSVRC2012_10/ILSVRC2012_img_train'
IMAGENET_10_CLASSES_TEST_PATH = '../datasets/ILSVRC2012_10/ILSVRC2012_img_val'
IMAGENET_100_CLASSES_TRAIN_PATH = '../datasets/ILSVRC2012_100/ILSVRC2012_img_train'
IMAGENET_100_CLASSES_TEST_PATH = '../datasets/ILSVRC2012_100/ILSVRC2012_img_val'
IMAGENET_1_EXAMPLE_TRAIN_PATH = '../datasets/imagenet-001-of-each'
IMAGENET_10_EXAMPLES_TRAIN_PATH = '../datasets/imagenet-010-of-each'
IMAGENET_25_EXAMPLES_TRAIN_PATH = '../datasets/imagenet-025-of-each'
IMAGENET_50_EXAMPLES_TRAIN_PATH = '../datasets/imagenet-050-of-each'
IMAGENET_100_EXAMPLES_TRAIN_PATH = '../datasets/imagenet-100-of-each'
# ================================================================ Downsampled
def _unpickle_file(path):
with open(path, 'rb') as f:
pydict = pickle.load(f)
return pydict
# @_memory.cache
def _load_downsampled_data_file(path, layout='nhwc', dtype=None,
X_out=None, y_out=None, start_row=None):
d = _unpickle_file(path)
X = d['data']
# NOTE: subtracting 1 so min idx is 0; this breaks synset lookup
y = np.array(d['labels'], dtype=np.int32) - 1
y = y.ravel() # shouldn't be necessary, but might as well
assert X.shape[0] == y.shape[0]
assert len(X.shape) == 2
nchan = 3
npixels = X.shape[1] / nchan
assert npixels * nchan == X.shape[1] # each row not one img?
side_len = int(np.sqrt(npixels))
assert side_len * side_len == npixels
X = X.reshape(X.shape[0], nchan, side_len, side_len)
layout = 'nhwc' if layout is None else layout
assert layout in ('nhwc', 'nchw')
if layout == 'nhwc':
X = np.moveaxis(X, 1, -1) # make channels last axis
X = np.ascontiguousarray(X)
if X_out is not None:
assert dtype in (None, X_out.dtype)
dtype = X_out.dtype
if dtype is not None:
X = X.astype(dtype)
# print("X shape: ", X.shape)
# print("y shape: ", y.shape)
if start_row is not None:
end_row = start_row + X.shape[0]
if X_out is not None:
assert start_row is not None
X_out[start_row:end_row] = X
if y_out is not None:
assert start_row is not None
y_out[start_row:end_row] = y
return X, y
def load_train_file_64x64(idx, verbose=0, **kwargs):
assert idx in np.arange(1, 11) # valid indices are 1 thru 10
path = os.path.join(IMAGENET_64_PATH, "train_data_batch_{}".format(idx))
if verbose > 1:
print("loading train file: ", path)
return _load_downsampled_data_file(path, **kwargs)
def _clean_which_file_idxs(which_file_idxs=None, dtype=None):
if which_file_idxs is None:
which_file_idxs = np.arange(1, 11)
which_file_idxs = np.asarray(which_file_idxs, dtype=np.int32)
# don't try to load more training data then we can actually fit in RAM
mem_available = psutil.virtual_memory().available
itemsize = dtype.itemsize if dtype is not None else 1
one_img_nbytes = 64 * 64 * 3 * itemsize
one_file_nbytes = IMAGENET_64_TRAIN_CHUNK_NSAMPLES * one_img_nbytes
max_nfiles = (mem_available // one_file_nbytes) - 1
# print("one_img_nbytes", one_img_nbytes)
# print("one_file_nbytes", one_file_nbytes)
# print("available mem", mem_available)
# print("max_nfiles", max_nfiles)
if max_nfiles < 1:
raise MemoryError(
"Minimum amount of RAM needed to load one chunk of ImageNet64x64 "
"is {}B, but only {}B are available".format(
one_file_nbytes, mem_available))
requested_nfiles = len(which_file_idxs)
if max_nfiles < requested_nfiles:
requested_nbytes = (requested_nfiles + 1) * one_file_nbytes
requested_MB = requested_nbytes // int(1e6)
available_MB = mem_available // int(1e6)
print("imagenet.load_train_data_64x64: MemoryWarning: "
"Only loading {}/10 chunks of ImageNet64 instead of requested "
"{}/10 since not enough memory; would need {:}MB, but only {:}MB "
"are available".format(
max_nfiles, requested_nfiles, requested_MB, available_MB),
file=sys.stderr)
# warnings.warn(msg, UserWarning)
which_file_idxs = which_file_idxs[:max_nfiles]
assert np.min(which_file_idxs) >= 1
assert np.max(which_file_idxs) <= 10
return which_file_idxs
# NOTE: total size of training data is around 16GB
def load_train_data_64x64(which_file_idxs=None, layout='nhwc', dtype=None,
verbose=1):
which_file_idxs = _clean_which_file_idxs(which_file_idxs, dtype=dtype)
if verbose > 0:
print("load_train_data_64x64: loading file numbers: ", which_file_idxs)
# import sys; sys.exit()
if dtype is None:
dtype = np.uint8 # default dtype
# preallocate output matrix of appropriate size so that we can just
# keep one copy of the data in memory (as opposed to loading all the
# data matrices and then concatenating them)
assert layout in ('nhwc', 'nchw')
nrows_per_file = IMAGENET_64_TRAIN_CHUNK_NSAMPLES
img_shape = (64, 64, 3) if layout == 'nhwc' else (3, 64, 64)
combined_nrows = nrows_per_file * len(which_file_idxs)
combined_shape = (combined_nrows,) + img_shape
X_combined = np.zeros(combined_shape, dtype=dtype)
y_combined = np.zeros(combined_nrows, dtype=np.int32)
for i, idx in enumerate(which_file_idxs):
start_row = nrows_per_file * i
load_train_file_64x64(
idx, layout=layout, X_out=X_combined, y_out=y_combined,
start_row=start_row, verbose=verbose)
return X_combined, y_combined
def load_test_data_64x64(layout='nhwc', dtype=None):
path = os.path.join(IMAGENET_64_PATH, "val_data")
return _load_downsampled_data_file(path, layout=layout, dtype=dtype)
# ================================================================ Tiny
# # adapted from https://github.com/keras-team/keras-preprocessing/blob/master/
# # keras_preprocessing/image/utils.py under MIT license
# def img_to_array(img, layout='nhwc', dtype='float32', mode='RGB'):
# """Converts a PIL Image instance to a Numpy array.
# # Arguments
# img: PIL Image instance.
# layout: Image data format, either "nchw" or "nhwc".
# dtype: Dtype to use for the returned array.
# # Returns
# A 3D Numpy array.
# # Raises
# ValueError: if invalid `img` or `layout` is passed.
# """
# # print("img info:", img.format, img.size, img.mode)
# # if img.mode == 'L':
# if img.mode != mode:
# img = img.convert(mode=mode)
# if layout not in ('nchw', 'nhwc'):
# raise ValueError('Unknown layout: %s' % layout)
# # Numpy array x has format (height, width, channel)
# # or (channel, height, width)
# # but original PIL image has format (width, height, channel)
# x = np.asarray(img, dtype=dtype)
# if len(x.shape) == 3:
# if layout == 'nchw':
# x = x.transpose(2, 0, 1)
# elif len(x.shape) == 2:
# # print("x is only rank 2...WTF!?")
# if layout == 'nchw':
# x = x.reshape((1, x.shape[0], x.shape[1]))
# else:
# x = x.reshape((x.shape[0], x.shape[1], 1))
# else:
# raise ValueError('Unsupported image shape: %s' % (x.shape,))
# return x
# def _resize_img(img, ratio_or_size):
# if ratio_or_size is None or np.min(ratio_or_size) < 0:
# return img
# try:
# nrows = ratio_or_size[0]
# ncols = ratio_or_size[1]
# except AttributeError:
# nrows = img.height * ratio_or_size
# ncols = img.width * ratio_or_size
# new_size = (nrows, ncols)
# is_downsampling = (nrows < img.height) or (ncols < img.width)
# interp = PIL.Image.LANCZOS if is_downsampling else PIL.Image.BICUBIC
# return img.resize(new_size, resample=interp)
# def image_utils.load_jpg(path, layout='nhwc', dtype='float32', resample=None):
# img = Image.open(path)
# img = _resize_img(img, ratio_or_size=resamp)
# return img_to_array(img, layout=layout, dtype=dtype)
@_memory.cache
def _load_tiny_clsids_to_nums():
wnids_path = os.path.join(IMAGENET_TINY_PATH, 'wnids.txt')
with open(wnids_path) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
return {s: i for i, s in enumerate(lines)}
def _imagenet_tiny_cls_to_number(classnames):
if isinstance(classnames, str):
return _load_tiny_clsids_to_nums()[classnames]
return [_load_tiny_clsids_to_nums()[name] for name in classnames]
@_memory.cache
def load_train_data_tiny(layout='nhwc', dtype=None, verbose=1):
train_dir = os.path.join(IMAGENET_TINY_PATH, 'train')
subdirs = files.list_subdirs(train_dir)
all_classes = subdirs
assert len(all_classes) == 200 # wrong number of classes??
subdir_paths = files.list_subdirs(train_dir, abs_paths=True)
all_imgs = []
all_labels = []
for i, pth in enumerate(np.sort(subdir_paths)):
classname = os.path.basename(pth)
if verbose > 0:
print("loading images for class {}...".format(classname))
imgs_subdir = os.path.join(pth, 'images')
img_paths = files.list_files(
imgs_subdir, endswith='.JPEG', abs_paths=True)
assert len(img_paths) == 500 # supposed to be 500 examples per class...
imgs = [image_utils.load_jpg(f, layout=layout,
dtype=dtype)[np.newaxis, :, :, :]
for f in img_paths]
all_imgs += imgs
lbl = _imagenet_tiny_cls_to_number(classname)
all_labels += [lbl] * len(img_paths)
X = np.concatenate(all_imgs, axis=0)
y = np.array(all_labels, dtype=np.int32)
return X, y
@_memory.cache
def load_test_data_tiny(layout='nhwc', dtype=None):
# no labels given for "true" test set, so use the "val" subset as the
# test set
test_dir = os.path.join(IMAGENET_TINY_PATH, 'val')
imgs_subdir = os.path.join(test_dir, 'images')
img_paths = files.list_files(
imgs_subdir, endswith='.JPEG', abs_paths=True)
assert len(img_paths) == 10000 # wrong number of val images?
# load images
imgs = [image_utils.load_jpg(f, layout=layout,
dtype=dtype)[np.newaxis, :, :, :]
for f in img_paths]
X = np.concatenate(imgs, axis=0)
# load labels # TODO make sure this computation is correct
lbls_path = os.path.join(test_dir, 'val_annotations.txt')
with open(lbls_path, 'r') as f:
lines = f.readlines()
fnames = [line.split()[0] for line in lines]
class_ids = [line.split()[1] for line in lines]
# complicated way that doesn't rely on annotations being sorted
fname_to_class_id = dict(zip(fnames, class_ids))
img_fnames = [os.path.basename(pth) for pth in img_paths]
img_class_ids = [fname_to_class_id[fname] for fname in img_fnames]
labels = _imagenet_tiny_cls_to_number(img_class_ids)
y = np.array(labels, dtype=np.int32)
return X, y
# ================================================================ K-of-each
# def load_data_one_of_each(layout='nhwc', dtype=None, size=None):
def load_data_one_of_each(layout='nhwc', dtype=None, size=(224, 224)):
# np_save_file = os.path.join(IMAGENET_ONE_OF_EACH_PATH, 'oneOfEach.npy')
# cached_exists = os.path.exists(np_save_file)
# if cached_exists:
# return np.load(np_save_file)
img_paths = files.list_files(IMAGENET_ONE_OF_EACH_PATH, endswith='.JPEG',
abs_paths=True)
assert len(img_paths) == 1000 # should be 1000 images...
imgs = [image_utils.load_jpg(f, layout=layout, dtype=dtype, resample=size)
for f in img_paths]
if size is not None: # can only concat if same size
imgs = [img[np.newaxis, :, :, :] for img in imgs]
X = np.concatenate(imgs, axis=0)
else:
X = imgs
# XXX this is a total hack that will break if we get >1 img per class, and
# already (probably) doesn't match up with the synsets
# lbls = [os.path.basename(path).split('_')[0] for path in img_paths]
y = np.arange(len(X))
return X, y
# ================================================================ example
def load_flow_example(**kwargs):
IMAGENET_EXAMPLE_PATH = os.path.abspath('../datasets/imagenet-example')
# print("files in data dir:")
# print(files.list_subdirs(IMAGENET_EXAMPLE_PATH))
# import sys; sys.exit()
j = os.path.join
kwargs.setdefault('target_size', (224, 224))
kwargs.setdefault('batch_size', 16)
kwargs.setdefault('class_mode', 'categorical')
import keras
from keras.preprocessing import image
train_datagen = image.ImageDataGenerator()
val_datagen = image.ImageDataGenerator()
test_datagen = image.ImageDataGenerator()
# print("contents of train dir: ", files.list_subdirs(j(IMAGENET_EXAMPLE_PATH, 'train')))
train_generator = train_datagen.flow_from_directory(
j(IMAGENET_EXAMPLE_PATH, 'train'),
**kwargs)
val_generator = val_datagen.flow_from_directory(
j(IMAGENET_EXAMPLE_PATH, 'val'),
**kwargs)
test_generator = val_datagen.flow_from_directory(
j(IMAGENET_EXAMPLE_PATH, 'val'),
**kwargs)
return train_generator, val_generator, test_generator
def example_imagenet_train():
import tensorflow as tf
import keras
from python import models
from python import approx_conv_v2 as aconv
# both are necessary to actually get consistent output
np.random.seed(123)
tf.random.set_random_seed(123)
model = models.get_model(models.VGG16, weights=None, input_shape=(224, 224, 3))
# swap out normal conv layer with our custom layer
model = models.replace_layer_classes(
model, {keras.layers.Conv2D: aconv.MyConv2D})
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.summary()
train_generator, val_generator, test_generator = load_flow_example()
model.fit_generator(train_generator, steps_per_epoch=10, epochs=1,
validation_steps=1, validation_data=val_generator)
model.evaluate_generator(test_generator, steps=2)
def main():
# X, y = load_train_file_64x64(1)
# X, y = load_train_data_64x64([1, 2, 3])
# X, y = load_train_data_64x64([1, 2, 3, 4, 5]) # works
# X, y = load_train_data_64x64() # correctly yields mem warning
# X, y = load_train_data_64x64([1])
# X, y = load_test_data_64x64()
# X, y = load_data_one_of_each(size=None) # no resampling
# X, y = load_data_one_of_each(size=(224, 224))
# wow, imagenet-tiny looks like crap; lots of aliasing
X, y = load_train_data_tiny()
# X, y = load_test_data_tiny()
print("X, y dtypes and shapes:")
print(X.dtype)
print(y.dtype)
print(X.shape)
print(y.shape)
import matplotlib.pyplot as plt
inp = 'y'
count = 0
while inp == 'y':
_, axes = plt.subplots(3, 3, figsize=(9, 9))
idxs = np.random.choice(np.arange(len(X)), size=axes.size)
# offset = 0
# offset = 10000
# idxs = np.arange(offset + 9*count, offset + 9 + 9*count)
for i, ax in enumerate(axes.ravel()):
idx = idxs[i]
img, classId = X[idx], y[idx]
ax.imshow(img, interpolation='nearest')
ax.set_title("Idx = {}, class = {}".format(idx, classId))
# plt.imshow(X[100*1000], interpolation='nearest')
# plt.imshow(X[300*1000], interpolation='nearest')
plt.tight_layout()
plt.show()
count += 1
inp = input("enter y to plot more random imgs; anything else to stop: ")
def _folderize_imagenet_one_of_each(): # one-off script
olddir = IMAGENET_ONE_OF_EACH_PATH
newdir = IMAGENET_ONE_OF_EACH_FLOW_PATH
files.ensure_dir_exists(newdir)
old_files = files.list_files(olddir, endswith='.JPEG', abs_paths=True)
for f in files.list_files(olddir, endswith='.JPEG', abs_paths=True):
basename = os.path.basename(f)
label = basename.split('_')[0]
subdir = os.path.join(newdir, label)
files.ensure_dir_exists(subdir)
newpath = os.path.join(subdir, basename)
# newpath = os.path.join(newdir, )
# print("oldpath: ", f, os.path.exists(f))
# print("newpath: ", newpath)
shutil.copy(f, newpath)
def _make_imagenet_k_of_each(k=10):
out_path = '../datasets/imagenet-{:03d}-of-each'.format(k)
print("writing to path: ", out_path)
src_dir = IMAGENET_TRAIN_PATH
for synset in files.list_subdirs(src_dir):
subdir_path = os.path.join(src_dir, synset)
img_paths = sorted(files.list_files(subdir_path, abs_paths=True))
img_paths = img_paths[:k]
new_subdir = os.path.join(out_path, synset)
files.ensure_dir_exists(new_subdir)
for path in img_paths:
fname = os.path.basename(path)
new_path = os.path.join(new_subdir, fname)
shutil.copy(path, new_path)
if __name__ == '__main__':
# example_imagenet_train()
main()
# _folderize_imagenet_one_of_each()
# _make_imagenet_k_of_each(10)
# _make_imagenet_k_of_each(25)
# _make_imagenet_k_of_each(50)
# _make_imagenet_k_of_each(100)
|
#!/usr/bin/env/python
_memory = Memory('.', verbose=1, compress=9)
UCR_DATASETS_DIR = paths.UCR
UCR_INFO_PATH = paths.UCR_INFO
# ================================================================
# Public
# ================================================================
def all_ucr_datasets():
for dataDir in sorted(all_ucr_dataset_dirs()):
yield UCRDataset(dataDir)
class UCRDataset(object):
def __init__(self, dataset_dir, sep='\t', precondition=True, znorm=True):
self.name = name_from_dir(dataset_dir)
self.X_train, y_train = read_ucr_train_data(dataset_dir, sep=sep)
self.X_test, y_test = read_ucr_test_data(dataset_dir, sep=sep)
# self.y_train = y_train
# self.y_test = y_test
all_lbls = np.r_[y_train, y_test]
uniq_lbls = np.unique(all_lbls)
new_lbls = np.argsort(uniq_lbls) # same if labels are 0..(nclasses-1)
mapping = dict(zip(uniq_lbls, new_lbls))
self.y_train = np.array([mapping[lbl] for lbl in y_train])
self.y_test = np.array([mapping[lbl] for lbl in y_test])
# self.nclasses = len(uniq_lbls)
# MelbournePedestrian has nans, even though not in missing data list
for X in (self.X_train, self.X_test):
for d in range(X.shape[1]):
col = X[:, d]
nan_idxs = np.isnan(col)
if nan_idxs.sum() > 0:
# print("self.name: ", self.name)
# print("original number of nans: ", np.sum(nan_idxs))
# X[nan_idxs, d] = col.mean()
fillval = np.nanmedian(col)
if np.isnan(fillval):
# handle all-nan cols, which happens in Crop
fillval = np.nanmedian(X)
col[nan_idxs] = fillval
# np.nan_to_num(col, copy=False, nan=np.median(col))
# print("new number of nans: ", np.isnan(X[:, d]).sum())
# print("new number of nans: ", np.isnan(col).sum())
if znorm:
self.X_train -= self.X_train.mean(axis=1, keepdims=True)
self.X_test -= self.X_test.mean(axis=1, keepdims=True)
eps = 1e-20
self.X_train *= 1 / (self.X_train.std(axis=1, keepdims=True) + eps)
self.X_test *= 1 / (self.X_test.std(axis=1, keepdims=True) + eps)
elif precondition:
# weaker than znormalization since one offset and scale applied
# to all dims and all samples in both train and test sets; this
# is basically just here because the values in MelbournePedestrian
# are huge and screw up numerical algorithms
self.orig_mean = np.mean(self.X_train)
self.X_train -= self.orig_mean
self.X_test -= self.orig_mean
self.orig_std = np.std(self.X_train)
self.X_train /= self.orig_std
self.X_test /= self.orig_std
assert len(self.X_train) == len(self.y_train)
assert len(self.X_test) == len(self.y_test)
# if self.name == 'MelbournePedestrian':
# print("I am MelbournePedestrian!")
# print('new labels: ', new_lbls)
# print("X_train num nans", np.sum(np.isnan(self.X_train)))
# print("X_test num nans", np.sum(np.isnan(self.X_test)))
# # import sys; sys.exit()
# if self.name == 'Wafer':
# print("original uniq labels train", np.unique(self.y_train))
# print("original uniq labels test", np.unique(self.y_test))
def all_ucr_dataset_dirs():
return _ucr_datasets_in_dir(UCR_DATASETS_DIR)
# ================================================================
# Private
# ================================================================
def _ucr_datasets_in_dir(dirpath):
datasetsPath = os.path.expanduser(dirpath)
files = os.listdir(datasetsPath)
rm_dir = 'Missing_value_and_variable_length_datasets_adjusted'
if rm_dir in files:
files.remove(rm_dir)
for i in range(len(files)):
files[i] = os.path.join(datasetsPath, files[i])
dirs = list(filter(os.path.isdir, files))
return dirs
@_memory.cache
def _readtxt(path, sep=None):
return np.genfromtxt(path, delimiter=sep).astype(np.float32)
def read_data_file(path, sep=None, mean_norm=False):
D = _readtxt(path, sep=sep)
labels = D[:, 0].astype(np.int)
X = D[:, 1:]
if mean_norm:
X -= np.mean(X, axis=1, keepdims=True)
return (X, labels)
def name_from_dir(datasetDir):
return os.path.basename(datasetDir)
def dir_from_name(datasetName):
return os.path.join(paths.UCR, datasetName)
def read_ucr_data_in_dir(datasetDir, train, sep=None):
datasetName = name_from_dir(datasetDir)
if train:
fileName = datasetName + "_TRAIN.tsv"
else:
fileName = datasetName + "_TEST.tsv"
filePath = os.path.join(datasetDir, fileName)
return read_data_file(filePath, sep=sep)
def read_ucr_train_data(datasetDir, sep=None):
return read_ucr_data_in_dir(datasetDir, train=True, sep=sep)
def read_ucr_test_data(datasetDir, sep=None):
return read_ucr_data_in_dir(datasetDir, train=False, sep=sep)
# combines train and test data
def read_all_ucr_data(ucrDatasetDir):
X_train, Y_train = read_ucr_train_data(ucrDatasetDir)
X_test, Y_test = read_ucr_test_data(ucrDatasetDir)
X = np.r_[X_train, X_test]
Y = np.r_[Y_train, Y_test]
return X, Y
@_memory.cache
def load_ucr_dset_stats():
df = pd.read_csv(UCR_INFO_PATH)
df['l2-1nn-acc'] = 1. - df['ED (w=0)']
return df
# ================================================================ Main
@_memory.cache
def _load_ucr_stats_df():
stats = []
for i, datasetDir in enumerate(all_ucr_dataset_dirs()):
# Xtrain, _ = read_ucr_train_data(datasetDir)
# Xtest, Ytest = read_ucr_test_data(datasetDir)
dset = UCRDataset(datasetDir)
N, D = dset.X_train.shape
M, D = dset.X_test.shape
nclasses = len(np.unique(dset.y_test))
stats.append({'Dataset': dset.name, 'N': N, 'D': D, 'M': M,
'nclasses': nclasses})
# print('%30s:\t%d\t%d\t%d\t%d' % (name_from_dir(datasetDir),
# N, M, D, nclasses)
return pd.DataFrame.from_records(stats)
def main():
# dsets = all_ucr_datasets()
# for dset in dsets:
# print("loaded ucr dset:", dset.name)
# # return
df = _load_ucr_stats_df()
# df = df.sort_values(axis=1)
# df = df.loc[df['N'] > 100]
# df = df.loc[df['M'] > 100]
print("ucr dset stats:")
# print(df['M'].sort_values(ascending=False))
print("number of dsets:", df.shape[0])
print("mean, median Ntrain: ", df['N'].mean(), df['N'].median())
print("mean, median Ntest: ", df['M'].mean(), df['M'].median())
print("mean, median length: ", df['D'].mean(), df['D'].median())
mvals = df['M'].to_numpy()
mvals = np.sort(mvals)
length = len(mvals)
total_sizes = np.array([m * (length - i) for i, m in enumerate(mvals)])
max_idx = np.argmax(total_sizes)
best_m_cutoff = mvals[max_idx]
print("best num dsets, m, sz = ",
length - max_idx, best_m_cutoff, total_sizes[max_idx])
print("type of mvals: ", type(mvals))
for cutoff in [100, 200, 256, 300, 400, 500, 512, 1000]:
ndsets = (mvals >= cutoff).sum()
total_sz = total_sizes[ndsets-1]
print(f"n >= {cutoff}: {ndsets} dsets, total sz = {total_sz}")
# import matplotlib.pyplot as plt
# xvals = length - np.arange(length)
# # xvals = np.arange(length)
# # plt.plot(xvals, total_sizes[::-1])
# plt.plot(xvals, total_sizes)
# plt.plot(xvals, mvals)
# plt.show()
# df = df.loc[df['M'] >= best_m_cutoff]
# print("---- after cutting off M to maximize mat sizes:")
df = df.loc[df['N'] >= 128]
print("---- after cutting off N to number of centroids:")
print("number of dsets: ", len(df))
print("mean, median Ntrain: ", df['N'].mean(), df['N'].median())
print("mean, median Ntest: ", df['M'].mean(), df['M'].median())
print("mean, median length: ", df['D'].mean(), df['D'].median())
print("mean, median nclasses: ", df['nclasses'].mean(), df['nclasses'].median())
print("min, max nclasses: ", df['nclasses'].min(), df['nclasses'].max())
if __name__ == '__main__':
np.set_printoptions(formatter={'float': lambda f: "{:.3}".format(f)})
main()
|
#!/bin/env python
_memory = Memory('.', verbose=1)
DATADIR = '../datasets/svhn'
TRAIN_PATH = os.path.join(DATADIR, 'train_32x32.mat')
TEST_PATH = os.path.join(DATADIR, 'test_32x32.mat')
EXTRA_PATH = os.path.join(DATADIR, 'extra_32x32.mat')
def extract_data_from_mat_file(path):
matlab_dict = io.loadmat(path)
X, y = matlab_dict['X'], matlab_dict['y'].ravel()
X = np.transpose(X, (3, 0, 1, 2))
# make classes be 0-9 instead of 1-10; this way the classes line up
# with the actual digits
y[y == 10] = 0
assert len(y.shape) == 1
assert X.shape[0] == len(y)
assert X.shape[1] == 32
assert X.shape[2] == 32
assert X.shape[-1] == 3
return X, y
@_memory.cache
def load_data():
X_train, y_train = extract_data_from_mat_file(TRAIN_PATH)
X_test, y_test = extract_data_from_mat_file(TEST_PATH)
return (X_train, y_train), (X_test, y_test)
def load_extra_data():
return extract_data_from_mat_file(EXTRA_PATH)
def main():
import matplotlib.pyplot as plt
(X_train, y_train), (X_test, y_test) = load_data()
# hacky way to visualize extra data using same code
# X_extra, y_extra = load_extra_data()
# X_train, X_test = X_extra, X_extra
# y_train, y_test = y_extra, y_extra
_, axes = plt.subplots(4, 4, figsize=(9, 9))
for i, ax in enumerate(axes.ravel()):
X = X_test if i % 2 else X_train
y = y_test if i % 2 else y_train
idx = np.random.choice(X.shape[0])
ax.imshow(X[idx])
ax.set_title("class = {}".format(y[idx]))
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
|
#!/bin/env/python
"""utility functions for data munging"""
def split_train_test(X, Y, train_frac=.8, random_state=123):
"""Returns X_train, X_test, y_train, y_test"""
np.random.seed(123)
return sklearn.model_selection.train_test_split(
X, Y, train_size=train_frac, random_state=random_state)
def stratified_split_train_test(X, Y, train_frac=.8, random_state=123):
"""Returns X_train, X_test, y_train, y_test"""
return sklearn.model_selection.train_test_split(
X, Y, train_size=train_frac, stratify=Y, random_state=random_state)
|
#!/bin/env python
# Load 3-lead ECG recordings from SHAREE Database:
# https://physionet.org/content/shareedb/1.0.0/
_memory = Memory('.', verbose=0)
DATA_DIR = paths.SHAREE_ECG
NUM_RECORDINGS = 139
NUM_CHANNELS = 3
RAW_DTYPE = np.uint16
# RAW_DTYPE = np.int16
SAMPLES_PER_SEC = 128
SAMPLES_PER_MIN = SAMPLES_PER_SEC * 60
SAMPLES_PER_HOUR = SAMPLES_PER_MIN * 60
@_memory.cache
def load_recording_ids():
fpaths = files.list_files(DATA_DIR, abs_paths=False, endswith='.dat')
assert len(fpaths) == NUM_RECORDINGS
return fpaths
@_memory.cache
def load_recording(rec_id, limit_nhours=None, dtype=np.float32):
# dtype = np.float32 if dtype is None else dtype
path = os.path.join(DATA_DIR, rec_id)
a = np.fromfile(path, dtype=RAW_DTYPE)
assert len(a) % NUM_CHANNELS == 0
a = a.reshape(-1, NUM_CHANNELS) # looks like it's rowmajor
# a = a.reshape(NUM_CHANNELS, -1).T # is colmajor clearly wrong? EDIT: yes
if limit_nhours and limit_nhours > 0:
a = a[:int(limit_nhours * SAMPLES_PER_HOUR)]
a = a[SAMPLES_PER_MIN:] # often a bunch of garbage at the beginning
a = a.astype(dtype)
# small amount of smoothing since heavily oversampled + noisy
# filt = np.hamming(5).astype(np.float32)
filt = np.hamming(5).astype(np.float32)
filt /= np.sum(filt)
for j in range(a.shape[1]):
a[:, j] = np.convolve(a[:, j], filt, mode='same')
return a
# def load_recordings(generator=False, plot=False, **kwargs):
def load_recordings(plot=False, **kwargs):
rec_ids = load_recording_ids()
recs = []
for i, rec_id in enumerate(rec_ids):
print("loading rec id: ", rec_id)
rec = load_recording(rec_id, **kwargs)
recs.append(rec)
if plot:
if i < 5:
offset = SAMPLES_PER_MIN
a = rec[offset:(offset + 1000)]
print('about to plot recording', rec_id)
plt.figure(figsize=(9, 7))
plt.plot(a)
plt.show()
else:
return
return recs
if __name__ == '__main__':
# print("done")
print("about to call load_recordings")
load_recordings(plot=True)
# print("rec ids: ", load_recording_ids())
print("called load_recordings")
|
#!/bin/env python
# Load 3-lead ECG recordings from SHAREE Database:
# https://physionet.org/content/shareedb/1.0.0/
_memory = Memory('.', verbose=0)
DATA_DIR = paths.INCART_ECG
NUM_RECORDINGS = 75
NUM_CHANNELS = 12
RAW_DTYPE = np.int16
SAMPLES_PER_SEC = 257
SAMPLES_PER_MIN = SAMPLES_PER_SEC * 60
SAMPLES_PER_HOUR = SAMPLES_PER_MIN * 60
@_memory.cache
def load_recording_ids():
fpaths = files.list_files(DATA_DIR, abs_paths=False, endswith='.dat')
assert len(fpaths) == NUM_RECORDINGS
return fpaths
@_memory.cache
def load_recording(rec_id, limit_nhours=None, dtype=np.float32):
path = os.path.join(DATA_DIR, rec_id)
a = np.fromfile(path, dtype=RAW_DTYPE)
assert len(a) % NUM_CHANNELS == 0
a = a.reshape(-1, NUM_CHANNELS) # yep, clearly rowmajor when plotted
if limit_nhours and limit_nhours > 0:
a = a[:int(limit_nhours * SAMPLES_PER_HOUR)]
a = a[SAMPLES_PER_MIN:] # often a bunch of garbage at the beginning
a = a.astype(dtype)
a -= a.mean(axis=0) # just so r_sq values are more meaningful
# small amount of smoothing since heavily oversampled + noisy
# filt = np.hamming(5).astype(np.float32)
# filt = np.hamming(5).astype(np.float32)
# filt /= np.sum(filt)
# for j in range(a.shape[1]):
# a[:, j] = np.convolve(a[:, j], filt, mode='same')
return a
def load_recordings(plot=False, **kwargs):
rec_ids = load_recording_ids()
recs = []
for i, rec_id in enumerate(rec_ids):
print("loading rec id: ", rec_id)
rec = load_recording(rec_id, **kwargs)
recs.append(rec)
if plot:
if i < 5:
offset = 0
a = rec[offset:(offset + 1000)]
print("plotting recording {} with shape: {}".format(
rec_id, rec.shape))
plt.figure(figsize=(9, 7))
plt.plot(a)
plt.show()
else:
return
return recs
if __name__ == '__main__':
print("about to call load_recordings")
load_recordings(plot=True)
print("called load_recordings")
|
#!/bin/env python
# from __future__ import absolute_import, division, print_function
_memory = Memory('.', verbose=1)
DATADIR_101 = paths.CALTECH_101
DATADIR_256 = paths.CALTECH_256
# _DEFAULT_CALTECH_KWARGS = dict(resample=(224, 224), crop='center', verbose=2)
_DEFAULT_CALTECH_KWARGS = dict(resample=(224, 224), crop='center')
_CALTECH_101_KWARGS = dict(
dirpath=DATADIR_101, remove_classes='BACKGROUND_Google')
_CALTECH_256_KWARGS = dict(
dirpath=DATADIR_256, remove_classes='257.clutter')
@_memory.cache
def load_caltech101(**kwargs):
[kwargs.setdefault(*item) for item in _DEFAULT_CALTECH_KWARGS.items()]
return imgs.load_jpegs_from_dir(**_CALTECH_101_KWARGS, **kwargs)
@_memory.cache
def load_caltech256(**kwargs):
[kwargs.setdefault(*item) for item in _DEFAULT_CALTECH_KWARGS.items()]
return imgs.load_jpegs_from_dir(**_CALTECH_256_KWARGS, **kwargs)
@_memory.cache
def load_caltech101_ids(**kwargs):
return imgs.load_jpegs_from_dir(
**_CALTECH_101_KWARGS, only_return_path=True, **kwargs)
@_memory.cache
def load_caltech256_ids(**kwargs):
return imgs.load_jpegs_from_dir(
**_CALTECH_256_KWARGS, only_return_path=True, **kwargs)
# @_memory.cache
def load_caltech_img(img_id, **kwargs):
[kwargs.setdefault(*item) for item in _DEFAULT_CALTECH_KWARGS.items()]
path = img_id # load_jpegs_from_dir returns abs path as id
return imgs.load_jpg(path, **kwargs).astype(np.float32)
# img = imgs.load_jpg(path, **kwargs).astype(np.float32)
# print("img.shape", img.shape)
# assert img.shape[:2] == (224, 224)
# return img
def main():
import matplotlib.pyplot as plt
# caltech 101
(X, y), label2cls = imgs.load_jpegs_from_dir(
# DATADIR_101, remove_classes='BACKGROUND_Google')
# DATADIR_101, remove_classes='BACKGROUND_Google', crop='center')
DATADIR_101, remove_classes='BACKGROUND_Google', pad='square')
# # DATADIR_101, remove_classes='BACKGROUND_Google', resample=(224, 224))
# caltech 256
# (X, y), label2cls = imgs.load_jpegs_from_dir(
# DATADIR_256, remove_classes='257.clutter', verbose=2)
if isinstance(X, np.ndarray):
print("X shape: ", X.shape)
else:
print("X is a list of length", len(X))
print("X[0] has shape: ", X[0].shape)
print("y shape: ", y.shape)
_, axes = plt.subplots(4, 4, figsize=(9, 9))
for i, ax in enumerate(axes.ravel()):
idx = np.random.choice(len(X))
ax.imshow(X[idx])
label = label2cls[y[idx]]
ax.set_title(label)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
|
#!/bin/env python
_memory = Memory('.', verbose=1)
DATADIR_101 = '../datasets/caltech/101_ObjectCategories'
def main():
import matplotlib.pyplot as plt
# caltech 101
(X, y), label2cls = imgs.load_jpegs_from_dir(
# TODO
)
if isinstance(X, np.ndarray):
print("X shape: ", X.shape)
else:
print("X is a list of length", len(X))
print("X[0] has shape: ", X[0].shape)
print("y shape: ", y.shape)
_, axes = plt.subplots(4, 4, figsize=(9, 9))
for i, ax in enumerate(axes.ravel()):
idx = np.random.choice(len(X))
ax.imshow(X[idx])
label = label2cls[y[idx]]
ax.set_title(label)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# ================================================================ utils
def _dists_sq(X, q):
diffs = X - q
return np.sum(diffs * diffs, axis=-1)
def _dists_l1(X, q):
diffs = np.abs(X - q)
return np.sum(diffs, axis=-1)
def _element_size_bytes(x):
return np.dtype(x.dtype).itemsize
def _corr(x, y):
x, y = x.astype(np.float64), y.astype(np.float64)
x = x.ravel() - np.mean(x)
y = y.ravel() - np.mean(y)
r = np.mean(x * y) / (np.std(x) * np.std(y))
assert -1.00001 <= r <= 1.00001
return r
def _sq_dists_to_vectors(X, queries, rowNorms=None, queryNorms=None):
Q = queries.shape[0]
mat_size = X.shape[0] * Q
mat_size_bytes = _element_size_bytes(X[0] + queries[0])
if mat_size_bytes > int(1e9):
print("WARNING: _sq_dists_to_vectors: attempting to create a matrix"
"of size {} ({}B)".format(mat_size, mat_size_bytes))
if rowNorms is None:
rowNorms = np.sum(X * X, axis=1, keepdims=True)
if queryNorms is None:
queryNorms = np.sum(queries * queries, axis=1)
dotProds = np.dot(X, queries.T)
return (-2 * dotProds) + rowNorms + queryNorms # len(X) x len(queries)
def top_k_idxs(elements, k, smaller_better=True, axis=-1):
if smaller_better: # return indices of lowest elements
which_nn = np.arange(k)
return np.argpartition(elements, kth=which_nn, axis=axis)[:k]
else: # return indices of highest elements
which_nn = (elements.shape[axis] - 1 - np.arange(k))[::-1]
# print "elements.shape", elements.shape
# print "using which_nn: ", which_nn
return np.argpartition(elements, kth=which_nn, axis=axis)[-k:][::-1]
def _knn(X, Q, k=1000, print_every=5, block_sz=128):
nqueries = Q.shape[0]
nblocks = int(np.ceil(nqueries / float(block_sz)))
truth = np.full((nqueries, k), -999, dtype=np.int32)
if nqueries <= block_sz:
dists = _sq_dists_to_vectors(Q, X)
assert dists.shape == (Q.shape[0], X.shape[0])
for i in range(nqueries):
truth[i, :] = top_k_idxs(dists[i, :], k)
return truth
for b in range(nblocks):
# recurse to fill in knn for each block
start = b * block_sz
end = min(start + block_sz, nqueries)
rows = Q[start:end, :]
truth[start:end, :] = _knn(X, rows, k=k, block_sz=block_sz)
if b % print_every == 0:
print("computing top k for query block " \
"{} (queries {}-{})...".format(b, start, end))
assert np.all(truth != -999)
return truth
def _create_randn_encoder(Ntrain=100, Ntest=20, D=64):
enc = bolt.Encoder()
X_train = np.random.randn(Ntrain, D)
X_test = np.random.randn(Ntest, D)
enc.fit(X_train, just_train=True)
enc.set_data(X_test)
return enc
# ================================================================ tests
def test_smoketest():
"""Test that `bolt.Encoder`'s methods don't crash"""
D = 64
enc = _create_randn_encoder(D=D)
Nqueries = 5
Q = np.random.randn(Nqueries, D)
[enc.transform(q) for q in Q]
for k in [1, 3]:
[enc.knn(q, k) for q in Q]
def _fmt_float(x):
return '{}.'.format(int(x)) if x == int(x) else '{:.3f}'.format(x)
def _load_digits_X_Q(nqueries):
X, _ = load_digits(return_X_y=True)
return X[:-nqueries], X[-nqueries:] # X, Q
def test_time_space_savings(): # mostly to verify readme code
np.set_printoptions(formatter={'float_kind': _fmt_float})
nqueries = 20
X, Q = _load_digits_X_Q(nqueries)
enc = bolt.Encoder(accuracy='lowest', reduction=bolt.Reductions.DOT_PRODUCT)
enc.fit(X)
# massive space savings
print("original space usage: {}B".format(X.nbytes)) # 1777 * 64 * 8B = 909KB
print("bolt space usage: {}B".format(enc.nbytes)) # 1777 * 2B = 3.55KB
# massive time savings (~10x here, but often >100x on larger datasets
# with less Python overhead; see the Bolt paper)
t_np = timeit.Timer(lambda: [np.dot(X, q) for q in Q]).timeit(5) # ~8ms
t_bolt = timeit.Timer(lambda: [enc.transform(q) for q in Q]).timeit(5) # ~800us
print("Numpy / BLAS time, Bolt time: {:.3f}ms, {:.3f}ms".format(
t_np * 1000, t_bolt * 1000))
def test_unquantize():
X, Q = _load_digits_X_Q(nqueries=20)
enc = bolt.Encoder('dot', accuracy='high').fit(X)
dots_true = [np.dot(X, q) for q in Q]
dots_bolt = [enc.transform(q, unquantize=True) for q in Q]
diffs = [true_vals - bolt_vals
for true_vals, bolt_vals in zip(dots_true, dots_bolt)]
mse = np.mean([np.mean(diff*diff) for diff in diffs])
var = np.mean([np.var(true_vals) for true_vals in dots_true])
print("dot product unquantize mse / variance: ", mse / var)
assert (mse / var) < .01
# print "true, bolt dot prods"
# print dots_true[0][:20].astype(np.int32)
# print dots_bolt[0][:20].astype(np.int32)
enc = bolt.Encoder('l2', accuracy='high').fit(X)
dists_true = [_dists_sq(X, q) for q in Q]
dists_bolt = [enc.transform(q, unquantize=True) for q in Q]
diffs = [true_vals - bolt_vals
for true_vals, bolt_vals in zip(dists_true, dists_bolt)]
mse = np.mean([np.mean(diff*diff) for diff in diffs])
var = np.mean([np.var(true_vals) for true_vals in dots_true])
print("squared l2 unquantize mse / variance: ", mse / var)
assert (mse / var) < .01
def test_basic():
# np.set_printoptions(precision=3)
np.set_printoptions(formatter={'float_kind': _fmt_float})
nqueries = 20
# nqueries = 10
# nqueries = 3
X, Q = _load_digits_X_Q(nqueries)
# TODO rm this block
# shift = 100.
# shift = 100
# scaleby = 1.
# scaleby = 3.5 # acc goes to **** at accelerating rate as this gets larger...
# scaleby = 4
# scaleby = 1.0
# X, Q = X + shift, Q + shift
# X, Q = X * scaleby, Q * scaleby
# X = X[:200]
# X = X[:50]
# X = X[:20]
# X, _ = load_digits(return_X_y=True)
# Q = X[-nqueries:]
# X = X[:-nqueries]
# print "X.shape", X.shape
# print "X nbytes", X.nbytes
# ------------------------------------------------ squared l2
enc = bolt.Encoder(accuracy='low', reduction=bolt.Reductions.SQUARED_EUCLIDEAN)
enc.fit(X)
l2_corrs = np.empty(len(Q))
for i, q in enumerate(Q):
l2_true = _dists_sq(X, q).astype(np.int)
l2_bolt = enc.transform(q)
l2_corrs[i] = _corr(l2_true, l2_bolt)
if i == nqueries - 1:
print("l2 true: ", l2_true)
print("l2 bolt: ", l2_bolt)
print("corr: ", l2_corrs[i])
mean_l2 = np.mean(l2_corrs)
std_l2 = np.std(l2_corrs)
assert mean_l2 > .95
print("--> squared l2 dist correlation: {} +/- {}".format(mean_l2, std_l2))
# return
# ------------------------------------------------ dot product
enc = bolt.Encoder(accuracy='low', reduction=bolt.Reductions.DOT_PRODUCT)
enc.fit(X)
dot_corrs = np.empty(nqueries)
for i, q in enumerate(Q):
dots_true = np.dot(X, q)
dots_bolt = enc.transform(q)
dot_corrs[i] = _corr(dots_true, dots_bolt)
mean_dot = np.mean(dot_corrs)
std_dot = np.std(dot_corrs)
assert mean_dot > .95
print("--> dot product correlation: {} +/- {}".format(mean_dot, std_dot))
# ------------------------------------------------ l2 knn
enc = bolt.Encoder(accuracy='low', reduction='l2')
enc.fit(X)
k_bolt = 10 # tell bolt to search for true knn
k_true = 10 # compute this many true neighbors
true_knn = _knn(X, Q, k_true)
bolt_knn = [enc.knn(q, k_bolt) for q in Q]
contained = np.empty((nqueries, k_bolt), dtype=np.bool)
for i in range(nqueries):
true_neighbors = true_knn[i]
bolt_neighbors = bolt_knn[i]
for j in range(k_bolt):
contained[i, j] = bolt_neighbors[j] in true_neighbors
precision = np.mean(contained)
print("--> l2 knn precision@{}: {}".format(k_bolt, precision))
assert precision > .6
# # print "true_knn, bolt_knn:"
# # print true_knn[:20, :20]
# # print bolt_knn[:20]
# ------------------------------------------------ dot knn
enc = bolt.Encoder(accuracy='low', reduction='dot')
# enc = bolt.Encoder(accuracy='high', reduction='dot')
enc.fit(X)
k_bolt = 10 # tell bolt to search for true knn
k_true = 10 # compute this many true neighbors
true_dists = np.dot(X, Q.T)
# true_dists = [np.dot(X, q) for q in Q]
true_knn = np.empty((nqueries, k_true), dtype=np.int64)
for i in range(nqueries):
true_knn[i, :] = top_k_idxs(
true_dists[:, i], k_true, smaller_better=False)
bolt_knn = [enc.knn(q, k_bolt) for q in Q]
contained = np.empty((len(Q), k_bolt), dtype=np.bool)
for i in range(len(Q)):
true_neighbors = true_knn[i]
# bolt_dists = enc.transform(Q[i])
# bolt_neighbors = top_k_idxs(bolt_dists, k_bolt, smaller_better=True)
bolt_neighbors = bolt_knn[i] # TODO uncomment
for j in range(k_bolt):
contained[i, j] = bolt_neighbors[j] in true_neighbors
precision = np.mean(contained)
print("--> max inner product knn precision@{}: {}".format(
k_bolt, precision))
assert precision > .6
# print("true_knn, bolt_knn:")
# print(true_knn[:5])
# print(bolt_knn[:5])
if __name__ == '__main__':
test_basic()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.