python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from http.client import RemoteDisconnected
import json
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class NBLoss(torch.nn.Module):
def __init__(self):
super(NBLoss, self).__init__()
def forward(self, mu, y, theta, eps=1e-8):
"""Negative binomial negative log-likelihood. It assumes targets `y` with n
rows and d columns, but estimates `yhat` with n rows and 2d columns.
The columns 0:d of `yhat` contain estimated means, the columns d:2*d of
`yhat` contain estimated variances. This module assumes that the
estimated mean and inverse dispersion are positive---for numerical
stability, it is recommended that the minimum estimated variance is
greater than a small number (1e-3).
Parameters
----------
yhat: Tensor
Torch Tensor of reeconstructed data.
y: Tensor
Torch Tensor of ground truth data.
eps: Float
numerical stability constant.
"""
if theta.ndimension() == 1:
# In this case, we reshape theta for broadcasting
theta = theta.view(1, theta.size(0))
log_theta_mu_eps = torch.log(theta + mu + eps)
res = (
theta * (torch.log(theta + eps) - log_theta_mu_eps)
+ y * (torch.log(mu + eps) - log_theta_mu_eps)
+ torch.lgamma(y + theta)
- torch.lgamma(theta)
- torch.lgamma(y + 1)
)
res = _nan2inf(res)
return -torch.mean(res)
def _nan2inf(x):
return torch.where(torch.isnan(x), torch.zeros_like(x) + np.inf, x)
class MLP(torch.nn.Module):
"""
A multilayer perceptron with ReLU activations and optional BatchNorm.
"""
def __init__(self, sizes, batch_norm=True, last_layer_act="linear"):
super(MLP, self).__init__()
layers = []
for s in range(len(sizes) - 1):
layers += [
torch.nn.Linear(sizes[s], sizes[s + 1]),
torch.nn.BatchNorm1d(sizes[s + 1])
if batch_norm and s < len(sizes) - 2
else None,
torch.nn.ReLU(),
]
layers = [l for l in layers if l is not None][:-1]
self.activation = last_layer_act
if self.activation == "linear":
pass
elif self.activation == "ReLU":
self.relu = torch.nn.ReLU()
else:
raise ValueError("last_layer_act must be one of 'linear' or 'ReLU'")
self.network = torch.nn.Sequential(*layers)
def forward(self, x):
if self.activation == "ReLU":
x = self.network(x)
dim = x.size(1) // 2
return torch.cat((self.relu(x[:, :dim]), x[:, dim:]), dim=1)
return self.network(x)
class GeneralizedSigmoid(torch.nn.Module):
"""
Sigmoid, log-sigmoid or linear functions for encoding dose-response for
drug perurbations.
"""
def __init__(self, dim, device, nonlin="sigmoid"):
"""Sigmoid modeling of continuous variable.
Params
------
nonlin : str (default: logsigm)
One of logsigm, sigm.
"""
super(GeneralizedSigmoid, self).__init__()
self.nonlin = nonlin
self.beta = torch.nn.Parameter(
torch.ones(1, dim, device=device), requires_grad=True
)
self.bias = torch.nn.Parameter(
torch.zeros(1, dim, device=device), requires_grad=True
)
def forward(self, x):
if self.nonlin == "logsigm":
c0 = self.bias.sigmoid()
return (torch.log1p(x) * self.beta + self.bias).sigmoid() - c0
elif self.nonlin == "sigm":
c0 = self.bias.sigmoid()
return (x * self.beta + self.bias).sigmoid() - c0
else:
return x
def one_drug(self, x, i):
if self.nonlin == "logsigm":
c0 = self.bias[0][i].sigmoid()
return (torch.log1p(x) * self.beta[0][i] + self.bias[0][i]).sigmoid() - c0
elif self.nonlin == "sigm":
c0 = self.bias[0][i].sigmoid()
return (x * self.beta[0][i] + self.bias[0][i]).sigmoid() - c0
else:
return x
class CPA(torch.nn.Module):
"""
Our main module, the CPA autoencoder
"""
def __init__(
self,
num_genes,
num_drugs,
num_covariates,
device="cuda",
seed=0,
patience=5,
loss_ae="gauss",
doser_type="mlp",
decoder_activation="linear",
hparams="",
):
super(CPA, self).__init__()
# set generic attributes
self.num_genes = num_genes
self.num_drugs = num_drugs
self.num_covariates = num_covariates
self.device = device
self.seed = seed
self.loss_ae = loss_ae
# early-stopping
self.patience = patience
self.best_score = -1e3
self.patience_trials = 0
# set hyperparameters
self.set_hparams_(hparams)
# set models
self.encoder = MLP(
[num_genes]
+ [self.hparams["autoencoder_width"]] * self.hparams["autoencoder_depth"]
+ [self.hparams["dim"]]
)
self.decoder = MLP(
[self.hparams["dim"]]
+ [self.hparams["autoencoder_width"]] * self.hparams["autoencoder_depth"]
+ [num_genes * 2],
last_layer_act=decoder_activation,
)
self.adversary_drugs = MLP(
[self.hparams["dim"]]
+ [self.hparams["adversary_width"]] * self.hparams["adversary_depth"]
+ [num_drugs]
)
self.loss_adversary_drugs = torch.nn.BCEWithLogitsLoss()
self.doser_type = doser_type
if doser_type == "mlp":
self.dosers = torch.nn.ModuleList()
for _ in range(num_drugs):
self.dosers.append(
MLP(
[1]
+ [self.hparams["dosers_width"]] * self.hparams["dosers_depth"]
+ [1],
batch_norm=False,
)
)
else:
self.dosers = GeneralizedSigmoid(num_drugs, self.device, nonlin=doser_type)
if self.num_covariates == [0]:
pass
else:
assert 0 not in self.num_covariates
self.adversary_covariates = []
self.loss_adversary_covariates = []
self.covariates_embeddings = (
[]
) # TODO: Continue with checking that dict assignment is possible via covaraites names and if dict are possible to use in optimisation
for num_covariate in self.num_covariates:
self.adversary_covariates.append(
MLP(
[self.hparams["dim"]]
+ [self.hparams["adversary_width"]]
* self.hparams["adversary_depth"]
+ [num_covariate]
)
)
self.loss_adversary_covariates.append(torch.nn.CrossEntropyLoss())
self.covariates_embeddings.append(
torch.nn.Embedding(num_covariate, self.hparams["dim"])
)
self.covariates_embeddings = torch.nn.Sequential(
*self.covariates_embeddings
)
self.drug_embeddings = torch.nn.Embedding(self.num_drugs, self.hparams["dim"])
# losses
if self.loss_ae == "nb":
self.loss_autoencoder = NBLoss()
elif self.loss_ae == 'gauss':
self.loss_autoencoder = nn.GaussianNLLLoss()
self.iteration = 0
self.to(self.device)
# optimizers
has_drugs = self.num_drugs > 0
has_covariates = self.num_covariates[0] > 0
get_params = lambda model, cond: list(model.parameters()) if cond else []
_parameters = (
get_params(self.encoder, True)
+ get_params(self.decoder, True)
+ get_params(self.drug_embeddings, has_drugs)
)
for emb in self.covariates_embeddings:
_parameters.extend(get_params(emb, has_covariates))
self.optimizer_autoencoder = torch.optim.Adam(
_parameters,
lr=self.hparams["autoencoder_lr"],
weight_decay=self.hparams["autoencoder_wd"],
)
_parameters = get_params(self.adversary_drugs, has_drugs)
for adv in self.adversary_covariates:
_parameters.extend(get_params(adv, has_covariates))
self.optimizer_adversaries = torch.optim.Adam(
_parameters,
lr=self.hparams["adversary_lr"],
weight_decay=self.hparams["adversary_wd"],
)
if has_drugs:
self.optimizer_dosers = torch.optim.Adam(
self.dosers.parameters(),
lr=self.hparams["dosers_lr"],
weight_decay=self.hparams["dosers_wd"],
)
# learning rate schedulers
self.scheduler_autoencoder = torch.optim.lr_scheduler.StepLR(
self.optimizer_autoencoder, step_size=self.hparams["step_size_lr"]
)
self.scheduler_adversary = torch.optim.lr_scheduler.StepLR(
self.optimizer_adversaries, step_size=self.hparams["step_size_lr"]
)
if has_drugs:
self.scheduler_dosers = torch.optim.lr_scheduler.StepLR(
self.optimizer_dosers, step_size=self.hparams["step_size_lr"]
)
self.history = {"epoch": [], "stats_epoch": []}
def set_hparams_(self, hparams):
"""
Set hyper-parameters to default values or values fixed by user for those
hyper-parameters specified in the JSON string `hparams`.
"""
self.hparams = {
"dim": 128,
"dosers_width": 128,
"dosers_depth": 2,
"dosers_lr": 4e-3,
"dosers_wd": 1e-7,
"autoencoder_width": 128,
"autoencoder_depth": 3,
"adversary_width": 64,
"adversary_depth": 2,
"reg_adversary": 60,
"penalty_adversary": 60,
"autoencoder_lr": 3e-4,
"adversary_lr": 3e-4,
"autoencoder_wd": 4e-7,
"adversary_wd": 4e-7,
"adversary_steps": 3,
"batch_size": 256,
"step_size_lr": 45,
}
# the user may fix some hparams
if hparams != "":
if isinstance(hparams, str):
self.hparams.update(json.loads(hparams))
else:
self.hparams.update(hparams)
return self.hparams
def move_inputs_(self, genes, drugs, covariates):
"""
Move minibatch tensors to CPU/GPU.
"""
if genes.device.type != self.device:
genes = genes.to(self.device)
if drugs is not None:
drugs = drugs.to(self.device)
if covariates is not None:
covariates = [cov.to(self.device) for cov in covariates]
return (genes, drugs, covariates)
def compute_drug_embeddings_(self, drugs):
"""
Compute sum of drug embeddings, each of them multiplied by its
dose-response curve.
"""
if self.doser_type == "mlp":
doses = []
for d in range(drugs.size(1)):
this_drug = drugs[:, d].view(-1, 1)
doses.append(self.dosers[d](this_drug).sigmoid() * this_drug.gt(0))
return torch.cat(doses, 1) @ self.drug_embeddings.weight
else:
return self.dosers(drugs) @ self.drug_embeddings.weight
def predict(
self,
genes,
drugs,
covariates,
return_latent_basal=False,
return_latent_treated=False,
):
"""
Predict "what would have the gene expression `genes` been, had the
cells in `genes` with cell types `cell_types` been treated with
combination of drugs `drugs`.
"""
genes, drugs, covariates = self.move_inputs_(genes, drugs, covariates)
if self.loss_ae == 'nb':
genes = torch.log1p(genes)
latent_basal = self.encoder(genes)
latent_treated = latent_basal
if self.num_drugs > 0:
latent_treated = latent_treated + self.compute_drug_embeddings_(drugs)
if self.num_covariates[0] > 0:
for i, emb in enumerate(self.covariates_embeddings):
emb = emb.to(self.device)
latent_treated = latent_treated + emb(
covariates[i].argmax(1)
) #argmax because OHE
gene_reconstructions = self.decoder(latent_treated)
if self.loss_ae == 'gauss':
# convert variance estimates to a positive value in [1e-3, \infty)
dim = gene_reconstructions.size(1) // 2
gene_means = gene_reconstructions[:, :dim]
gene_vars = F.softplus(gene_reconstructions[:, dim:]).add(1e-3)
#gene_vars = gene_reconstructions[:, dim:].exp().add(1).log().add(1e-3)
if self.loss_ae == 'nb':
gene_means = F.softplus(gene_means).add(1e-3)
#gene_reconstructions[:, :dim] = torch.clamp(gene_reconstructions[:, :dim], min=1e-4, max=1e4)
#gene_reconstructions[:, dim:] = torch.clamp(gene_reconstructions[:, dim:], min=1e-4, max=1e4)
gene_reconstructions = torch.cat([gene_means, gene_vars], dim=1)
if return_latent_basal:
if return_latent_treated:
return gene_reconstructions, latent_basal, latent_treated
else:
return gene_reconstructions, latent_basal
if return_latent_treated:
return gene_reconstructions, latent_treated
return gene_reconstructions
def early_stopping(self, score):
"""
Decays the learning rate, and possibly early-stops training.
"""
self.scheduler_autoencoder.step()
self.scheduler_adversary.step()
self.scheduler_dosers.step()
if score > self.best_score:
self.best_score = score
self.patience_trials = 0
else:
self.patience_trials += 1
return self.patience_trials > self.patience
def update(self, genes, drugs, covariates):
"""
Update CPA's parameters given a minibatch of genes, drugs, and
cell types.
"""
genes, drugs, covariates = self.move_inputs_(genes, drugs, covariates)
gene_reconstructions, latent_basal = self.predict(
genes,
drugs,
covariates,
return_latent_basal=True,
)
dim = gene_reconstructions.size(1) // 2
gene_means = gene_reconstructions[:, :dim]
gene_vars = gene_reconstructions[:, dim:]
reconstruction_loss = self.loss_autoencoder(gene_means, genes, gene_vars)
adversary_drugs_loss = torch.tensor([0.0], device=self.device)
if self.num_drugs > 0:
adversary_drugs_predictions = self.adversary_drugs(latent_basal)
adversary_drugs_loss = self.loss_adversary_drugs(
adversary_drugs_predictions, drugs.gt(0).float()
)
adversary_covariates_loss = torch.tensor(
[0.0], device=self.device
)
if self.num_covariates[0] > 0:
adversary_covariate_predictions = []
for i, adv in enumerate(self.adversary_covariates):
adv = adv.to(self.device)
adversary_covariate_predictions.append(adv(latent_basal))
adversary_covariates_loss += self.loss_adversary_covariates[i](
adversary_covariate_predictions[-1], covariates[i].argmax(1)
)
# two place-holders for when adversary is not executed
adversary_drugs_penalty = torch.tensor([0.0], device=self.device)
adversary_covariates_penalty = torch.tensor([0.0], device=self.device)
if self.iteration % self.hparams["adversary_steps"]:
def compute_gradients(output, input):
grads = torch.autograd.grad(output, input, create_graph=True)
grads = grads[0].pow(2).mean()
return grads
if self.num_drugs > 0:
adversary_drugs_penalty = compute_gradients(
adversary_drugs_predictions.sum(), latent_basal
)
if self.num_covariates[0] > 0:
adversary_covariates_penalty = torch.tensor([0.0], device=self.device)
for pred in adversary_covariate_predictions:
adversary_covariates_penalty += compute_gradients(
pred.sum(), latent_basal
) # TODO: Adding up tensor sum, is that right?
self.optimizer_adversaries.zero_grad()
(
adversary_drugs_loss
+ self.hparams["penalty_adversary"] * adversary_drugs_penalty
+ adversary_covariates_loss
+ self.hparams["penalty_adversary"] * adversary_covariates_penalty
).backward()
self.optimizer_adversaries.step()
else:
self.optimizer_autoencoder.zero_grad()
if self.num_drugs > 0:
self.optimizer_dosers.zero_grad()
(
reconstruction_loss
- self.hparams["reg_adversary"] * adversary_drugs_loss
- self.hparams["reg_adversary"] * adversary_covariates_loss
).backward()
self.optimizer_autoencoder.step()
if self.num_drugs > 0:
self.optimizer_dosers.step()
self.iteration += 1
return {
"loss_reconstruction": reconstruction_loss.item(),
"loss_adv_drugs": adversary_drugs_loss.item(),
"loss_adv_covariates": adversary_covariates_loss.item(),
"penalty_adv_drugs": adversary_drugs_penalty.item(),
"penalty_adv_covariates": adversary_covariates_penalty.item(),
}
@classmethod
def defaults(self):
"""
Returns the list of default hyper-parameters for CPA
"""
return self.set_hparams_(self, "")
|
CPA-main
|
cpa/model.py
|
import copy
import itertools
import os
import pprint
import time
from collections import defaultdict
from typing import Optional, Union, Tuple
import numpy as np
import pandas as pd
import scanpy as sc
import torch
from torch.distributions import (
NegativeBinomial,
Normal
)
from cpa.train import evaluate, prepare_cpa
from cpa.helper import _convert_mean_disp_to_counts_logits
from sklearn.metrics import r2_score
from sklearn.metrics.pairwise import cosine_distances, euclidean_distances
from tqdm import tqdm
class API:
"""
API for CPA model to make it compatible with scanpy.
"""
def __init__(
self,
data,
perturbation_key="condition",
covariate_keys=["cell_type"],
split_key="split",
dose_key="dose_val",
control=None,
doser_type="mlp",
decoder_activation="linear",
loss_ae="gauss",
patience=200,
seed=0,
pretrained=None,
device="cuda",
save_dir="/tmp/", # directory to save the model
hparams={},
only_parameters=False,
):
"""
Parameters
----------
data : str or `AnnData`
AnndData object or a full path to the file in the .h5ad format.
covariate_keys : list (default: ['cell_type'])
List of names in the .obs of AnnData that should be used as
covariates.
split_key : str (default: 'split')
Name of the column in .obs of AnnData to use for splitting the
dataset into train, test and validation.
perturbation_key : str (default: 'condition')
Name of the column in .obs of AnnData to use for perturbation
variable.
dose_key : str (default: 'dose_val')
Name of the column in .obs of AnnData to use for continious
covariate.
doser_type : str (default: 'mlp')
Type of the nonlinearity in the latent space for the continious
covariate encoding: sigm, logsigm, mlp.
decoder_activation : str (default: 'linear')
Last layer of the decoder.
loss_ae : str (default: 'gauss')
Loss (currently only gaussian loss is supported).
patience : int (default: 200)
Patience for early stopping.
seed : int (default: 0)
Random seed.
pretrained : str (default: None)
Full path to the pretrained model.
only_parameters : bool (default: False)
Whether to load only arguments or also weights from pretrained model.
save_dir : str (default: '/tmp/')
Folder to save the model.
device : str (default: 'cpu')
Device for model computations. If None, will try to use CUDA if
available.
hparams : dict (default: {})
Parameters for the architecture of the CPA model.
control: str
Obs columns with booleans that identify control. If it is not provided
the model will look for them in adata.obs["control"]
"""
args = locals()
del args["self"]
if not (pretrained is None):
state, self.used_args, self.history = torch.load(
pretrained, map_location=torch.device(device)
)
self.args = self.used_args
self.args["data"] = data
self.args["covariate_keys"] = covariate_keys
self.args["device"] = device
self.args["control"] = control
if only_parameters:
state = None
print(f"Loaded ARGS of the model from:\t{pretrained}")
else:
print(f"Loaded pretrained model from:\t{pretrained}")
else:
state = None
self.args = args
self.model, self.datasets = prepare_cpa(self.args, state_dict=state)
if not (pretrained is None) and (not only_parameters):
self.model.history = self.history
self.args["save_dir"] = save_dir
self.args["hparams"] = self.model.hparams
if not (save_dir is None):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
dataset = self.datasets["training"]
self.perturbation_key = dataset.perturbation_key
self.dose_key = dataset.dose_key
self.covariate_keys = covariate_keys # very important, specifies the order of
# covariates during training
self.min_dose = dataset.drugs[dataset.drugs > 0].min().item()
self.max_dose = dataset.drugs[dataset.drugs > 0].max().item()
self.var_names = dataset.var_names
self.unique_perts = list(dataset.perts_dict.keys())
self.unique_covars = {}
for cov in dataset.covars_dict:
self.unique_covars[cov] = list(dataset.covars_dict[cov].keys())
self.num_drugs = dataset.num_drugs
self.perts_dict = dataset.perts_dict
self.covars_dict = dataset.covars_dict
self.drug_ohe = torch.Tensor(list(dataset.perts_dict.values()))
self.covars_ohe = {}
for cov in dataset.covars_dict:
self.covars_ohe[cov] = torch.LongTensor(
list(dataset.covars_dict[cov].values())
)
self.emb_covars = {}
for cov in dataset.covars_dict:
self.emb_covars[cov] = None
self.emb_perts = None
self.seen_covars_perts = None
self.comb_emb = None
self.control_cat = None
self.seen_covars_perts = {}
for k in self.datasets.keys():
self.seen_covars_perts[k] = np.unique(self.datasets[k].pert_categories)
self.measured_points = {}
self.num_measured_points = {}
for k in self.datasets.keys():
self.measured_points[k] = {}
self.num_measured_points[k] = {}
for pert in np.unique(self.datasets[k].pert_categories):
num_points = len(np.where(self.datasets[k].pert_categories == pert)[0])
self.num_measured_points[k][pert] = num_points
*cov_list, drug, dose = pert.split("_")
cov = "_".join(cov_list)
if not ("+" in dose):
dose = float(dose)
if cov in self.measured_points[k].keys():
if drug in self.measured_points[k][cov].keys():
self.measured_points[k][cov][drug].append(dose)
else:
self.measured_points[k][cov][drug] = [dose]
else:
self.measured_points[k][cov] = {drug: [dose]}
self.measured_points["all"] = copy.deepcopy(self.measured_points["training"])
for cov in self.measured_points["ood"].keys():
for pert in self.measured_points["ood"][cov].keys():
if pert in self.measured_points["training"][cov].keys():
self.measured_points["all"][cov][pert] = (
self.measured_points["training"][cov][pert].copy()
+ self.measured_points["ood"][cov][pert].copy()
)
else:
self.measured_points["all"][cov][pert] = self.measured_points[
"ood"
][cov][pert].copy()
def load_from_old(self, pretrained):
"""
Parameters
----------
pretrained : str
Full path to the pretrained model.
"""
print(f"Loaded pretrained model from:\t{pretrained}")
state, self.used_args, self.history = torch.load(
pretrained, map_location=torch.device(self.args["device"])
)
self.model.load_state_dict(state_dict)
self.model.history = self.history
def print_args(self):
pprint.pprint(self.args)
def load(self, pretrained):
"""
Parameters
----------
pretrained : str
Full path to the pretrained model.
""" # TODO fix compatibility
print(f"Loaded pretrained model from:\t{pretrained}")
state, self.used_args, self.history = torch.load(
pretrained, map_location=torch.device(self.args["device"])
)
self.model.load_state_dict(state_dict)
def train(
self,
max_epochs=1,
checkpoint_freq=20,
run_eval=False,
max_minutes=60,
filename="model.pt",
batch_size=None,
save_dir=None,
seed=0,
):
"""
Parameters
----------
max_epochs : int (default: 1)
Maximum number epochs for training.
checkpoint_freq : int (default: 20)
Checkoint frequencty to save intermediate results.
run_eval : bool (default: False)
Whether or not to run disentanglement and R2 evaluation during training.
max_minutes : int (default: 60)
Maximum computation time in minutes.
filename : str (default: 'model.pt')
Name of the file without the directoty path to save the model.
Name should be with .pt extension.
batch_size : int, optional (default: None)
Batch size for training. If None, uses default batch size specified
in hparams.
save_dir : str, optional (default: None)
Full path to the folder to save the model. If None, will use from
the path specified during init.
seed : int (default: None)
Random seed. If None, uses default random seed specified during init.
"""
args = locals()
del args["self"]
if batch_size is None:
batch_size = self.model.hparams["batch_size"]
args["batch_size"] = batch_size
self.args["batch_size"] = batch_size
if save_dir is None:
save_dir = self.args["save_dir"]
print("Results will be saved to the folder:", save_dir)
self.datasets.update(
{
"loader_tr": torch.utils.data.DataLoader(
self.datasets["training"], batch_size=batch_size, shuffle=True
)
}
)
self.model.train()
start_time = time.time()
pbar = tqdm(range(max_epochs), ncols=80)
try:
for epoch in pbar:
epoch_training_stats = defaultdict(float)
for data in self.datasets["loader_tr"]:
genes, drugs, covariates = data[0], data[1], data[2:]
minibatch_training_stats = self.model.update(
genes, drugs, covariates
)
for key, val in minibatch_training_stats.items():
epoch_training_stats[key] += val
for key, val in epoch_training_stats.items():
epoch_training_stats[key] = val / len(self.datasets["loader_tr"])
if not (key in self.model.history.keys()):
self.model.history[key] = []
self.model.history[key].append(epoch_training_stats[key])
self.model.history["epoch"].append(epoch)
ellapsed_minutes = (time.time() - start_time) / 60
self.model.history["elapsed_time_min"] = ellapsed_minutes
# decay learning rate if necessary
# also check stopping condition: patience ran out OR
# time ran out OR max epochs achieved
stop = ellapsed_minutes > max_minutes or (epoch == max_epochs - 1)
pbar.set_description(
f"Rec: {epoch_training_stats['loss_reconstruction']:.4f}, "
+ f"AdvPert: {epoch_training_stats['loss_adv_drugs']:.2f}, "
+ f"AdvCov: {epoch_training_stats['loss_adv_covariates']:.2f}"
)
if (epoch % checkpoint_freq) == 0 or stop:
if run_eval == True:
evaluation_stats = evaluate(self.model, self.datasets)
for key, val in evaluation_stats.items():
if not (key in self.model.history.keys()):
self.model.history[key] = []
self.model.history[key].append(val)
self.model.history["stats_epoch"].append(epoch)
stop = stop or self.model.early_stopping(
np.mean(evaluation_stats["test"])
)
else:
stop = stop or self.model.early_stopping(
np.mean(epoch_training_stats["test"])
)
evaluation_stats = None
if stop:
self.save(f"{save_dir}{filename}")
pprint.pprint(
{
"epoch": epoch,
"training_stats": epoch_training_stats,
"evaluation_stats": evaluation_stats,
"ellapsed_minutes": ellapsed_minutes,
}
)
print(f"Stop epoch: {epoch}")
break
except KeyboardInterrupt:
self.save(f"{save_dir}{filename}")
self.save(f"{save_dir}{filename}")
def save(self, filename):
"""
Parameters
----------
filename : str
Full path to save pretrained model.
"""
torch.save((self.model.state_dict(), self.args, self.model.history), filename)
self.history = self.model.history
print(f"Model saved to: {filename}")
def _init_pert_embeddings(self):
dose = 1.0
self.emb_perts = (
self.model.compute_drug_embeddings_(
dose * self.drug_ohe.to(self.model.device)
)
.cpu()
.clone()
.detach()
.numpy()
)
def get_drug_embeddings(self, dose=1.0, return_anndata=True):
"""
Parameters
----------
dose : int (default: 1.0)
Dose at which to evaluate latent embedding vector.
return_anndata : bool, optional (default: True)
Return embedding wrapped into anndata object.
Returns
-------
If return_anndata is True, returns anndata object. Otherwise, doesn't
return anything. Always saves embeddding in self.emb_perts.
"""
self._init_pert_embeddings()
emb_perts = (
self.model.compute_drug_embeddings_(
dose * self.drug_ohe.to(self.model.device)
)
.cpu()
.clone()
.detach()
.numpy()
)
if return_anndata:
adata = sc.AnnData(emb_perts)
adata.obs[self.perturbation_key] = self.unique_perts
return adata
def _init_covars_embeddings(self):
combo_list = []
for covars_key in self.covariate_keys:
combo_list.append(self.unique_covars[covars_key])
if self.emb_covars[covars_key] is None:
i_cov = self.covariate_keys.index(covars_key)
self.emb_covars[covars_key] = dict(
zip(
self.unique_covars[covars_key],
self.model.covariates_embeddings[i_cov](
self.covars_ohe[covars_key].to(self.model.device).argmax(1)
)
.cpu()
.clone()
.detach()
.numpy(),
)
)
self.emb_covars_combined = {}
for combo in list(itertools.product(*combo_list)):
combo_name = "_".join(combo)
for i, cov in enumerate(combo):
covars_key = self.covariate_keys[i]
if i == 0:
emb = self.emb_covars[covars_key][cov]
else:
emb += self.emb_covars[covars_key][cov]
self.emb_covars_combined[combo_name] = emb
def get_covars_embeddings_combined(self, return_anndata=True):
"""
Parameters
----------
return_anndata : bool, optional (default: True)
Return embedding wrapped into anndata object.
Returns
-------
If return_anndata is True, returns anndata object. Otherwise, doesn't
return anything. Always saves embeddding in self.emb_covars.
"""
self._init_covars_embeddings()
if return_anndata:
adata = sc.AnnData(np.array(list(self.emb_covars_combined.values())))
adata.obs["covars"] = self.emb_covars_combined.keys()
return adata
def get_covars_embeddings(self, covars_tgt, return_anndata=True):
"""
Parameters
----------
covars_tgt : str
Name of covariate for which to return AnnData
return_anndata : bool, optional (default: True)
Return embedding wrapped into anndata object.
Returns
-------
If return_anndata is True, returns anndata object. Otherwise, doesn't
return anything. Always saves embeddding in self.emb_covars.
"""
self._init_covars_embeddings()
if return_anndata:
adata = sc.AnnData(np.array(list(self.emb_covars[covars_tgt].values())))
adata.obs[covars_tgt] = self.emb_covars[covars_tgt].keys()
return adata
def _get_drug_encoding(self, drugs, doses=None):
"""
Parameters
----------
drugs : str
Drugs combination as a string, where individual drugs are separated
with a plus.
doses : str, optional (default: None)
Doses corresponding to the drugs combination as a string. Individual
drugs are separated with a plus.
Returns
-------
One hot encodding for a mixture of drugs.
"""
drug_mix = np.zeros([1, self.num_drugs])
atomic_drugs = drugs.split("+")
doses = str(doses)
if doses is None:
doses_list = [1.0] * len(atomic_drugs)
else:
doses_list = [float(d) for d in str(doses).split("+")]
for j, drug in enumerate(atomic_drugs):
drug_mix += doses_list[j] * self.perts_dict[drug]
return drug_mix
def mix_drugs(self, drugs_list, doses_list=None, return_anndata=True):
"""
Gets a list of drugs combinations to mix, e.g. ['A+B', 'B+C'] and
corresponding doses.
Parameters
----------
drugs_list : list
List of drug combinations, where each drug combination is a string.
Individual drugs in the combination are separated with a plus.
doses_list : str, optional (default: None)
List of corresponding doses, where each dose combination is a string.
Individual doses in the combination are separated with a plus.
return_anndata : bool, optional (default: True)
Return embedding wrapped into anndata object.
Returns
-------
If return_anndata is True, returns anndata structure of the combinations,
otherwise returns a np.array of corresponding embeddings.
"""
drug_mix = np.zeros([len(drugs_list), self.num_drugs])
for i, drug_combo in enumerate(drugs_list):
drug_mix[i] = self._get_drug_encoding(drug_combo, doses=doses_list[i])
emb = (
self.model.compute_drug_embeddings_(
torch.Tensor(drug_mix).to(self.model.device)
)
.cpu()
.clone()
.detach()
.numpy()
)
if return_anndata:
adata = sc.AnnData(emb)
adata.obs[self.perturbation_key] = drugs_list
adata.obs[self.dose_key] = doses_list
return adata
else:
return emb
def latent_dose_response(
self, perturbations=None, dose=None, contvar_min=0, contvar_max=1, n_points=100
):
"""
Parameters
----------
perturbations : list
List containing two names for which to return complete pairwise
dose-response.
doses : np.array (default: None)
Doses values. If None, default values will be generated on a grid:
n_points in range [contvar_min, contvar_max].
contvar_min : float (default: 0)
Minimum dose value to generate for default option.
contvar_max : float (default: 0)
Maximum dose value to generate for default option.
n_points : int (default: 100)
Number of dose points to generate for default option.
Returns
-------
pd.DataFrame
"""
# dosers work only for atomic drugs. TODO add drug combinations
self.model.eval()
if perturbations is None:
perturbations = self.unique_perts
if dose is None:
dose = np.linspace(contvar_min, contvar_max, n_points)
n_points = len(dose)
df = pd.DataFrame(columns=[self.perturbation_key, self.dose_key, "response"])
for drug in perturbations:
d = np.where(self.perts_dict[drug] == 1)[0][0]
this_drug = torch.Tensor(dose).to(self.model.device).view(-1, 1)
if self.model.doser_type == "mlp":
response = (
(self.model.dosers[d](this_drug).sigmoid() * this_drug.gt(0))
.cpu()
.clone()
.detach()
.numpy()
.reshape(-1)
)
else:
response = (
self.model.dosers.one_drug(this_drug.view(-1), d)
.cpu()
.clone()
.detach()
.numpy()
.reshape(-1)
)
df_drug = pd.DataFrame(
list(zip([drug] * n_points, dose, list(response))),
columns=[self.perturbation_key, self.dose_key, "response"],
)
df = pd.concat([df, df_drug])
return df
def latent_dose_response2D(
self,
perturbations,
dose=None,
contvar_min=0,
contvar_max=1,
n_points=100,
):
"""
Parameters
----------
perturbations : list, optional (default: None)
List of atomic drugs for which to return latent dose response.
Currently drug combinations are not supported.
doses : np.array (default: None)
Doses values. If None, default values will be generated on a grid:
n_points in range [contvar_min, contvar_max].
contvar_min : float (default: 0)
Minimum dose value to generate for default option.
contvar_max : float (default: 0)
Maximum dose value to generate for default option.
n_points : int (default: 100)
Number of dose points to generate for default option.
Returns
-------
pd.DataFrame
"""
# dosers work only for atomic drugs. TODO add drug combinations
assert len(perturbations) == 2, "You should provide a list of 2 perturbations."
self.model.eval()
if dose is None:
dose = np.linspace(contvar_min, contvar_max, n_points)
n_points = len(dose)
df = pd.DataFrame(columns=perturbations + ["response"])
response = {}
for drug in perturbations:
d = np.where(self.perts_dict[drug] == 1)[0][0]
this_drug = torch.Tensor(dose).to(self.model.device).view(-1, 1)
if self.model.doser_type == "mlp":
response[drug] = (
(self.model.dosers[d](this_drug).sigmoid() * this_drug.gt(0))
.cpu()
.clone()
.detach()
.numpy()
.reshape(-1)
)
else:
response[drug] = (
self.model.dosers.one_drug(this_drug.view(-1), d)
.cpu()
.clone()
.detach()
.numpy()
.reshape(-1)
)
l = 0
for i in range(len(dose)):
for j in range(len(dose)):
df.loc[l] = [
dose[i],
dose[j],
response[perturbations[0]][i] + response[perturbations[1]][j],
]
l += 1
return df
def compute_comb_emb(self, thrh=30):
"""
Generates an AnnData object containing all the latent vectors of the
cov+dose*pert combinations seen during training.
Called in api.compute_uncertainty(), stores the AnnData in self.comb_emb.
Parameters
----------
Returns
-------
"""
if self.seen_covars_perts["training"] is None:
raise ValueError("Need to run parse_training_conditions() first!")
emb_covars = self.get_covars_embeddings_combined(return_anndata=True)
# Generate adata with all cov+pert latent vect combinations
tmp_ad_list = []
for cov_pert in self.seen_covars_perts["training"]:
if self.num_measured_points["training"][cov_pert] > thrh:
*cov_list, pert_loop, dose_loop = cov_pert.split("_")
cov_loop = "_".join(cov_list)
emb_perts_loop = []
if "+" in pert_loop:
pert_loop_list = pert_loop.split("+")
dose_loop_list = dose_loop.split("+")
for _dose in pd.Series(dose_loop_list).unique():
tmp_ad = self.get_drug_embeddings(dose=float(_dose))
tmp_ad.obs["pert_dose"] = tmp_ad.obs.condition + "_" + _dose
emb_perts_loop.append(tmp_ad)
emb_perts_loop = emb_perts_loop[0].concatenate(emb_perts_loop[1:])
X = emb_covars.X[
emb_covars.obs.covars == cov_loop
] + np.expand_dims(
emb_perts_loop.X[
emb_perts_loop.obs.pert_dose.isin(
[
pert_loop_list[i] + "_" + dose_loop_list[i]
for i in range(len(pert_loop_list))
]
)
].sum(axis=0),
axis=0,
)
if X.shape[0] > 1:
raise ValueError("Error with comb computation")
else:
emb_perts = self.get_drug_embeddings(dose=float(dose_loop))
X = (
emb_covars.X[emb_covars.obs.covars == cov_loop]
+ emb_perts.X[emb_perts.obs.condition == pert_loop]
)
tmp_ad = sc.AnnData(X=X)
tmp_ad.obs["cov_pert"] = "_".join([cov_loop, pert_loop, dose_loop])
tmp_ad_list.append(tmp_ad)
self.comb_emb = tmp_ad_list[0].concatenate(tmp_ad_list[1:])
def compute_uncertainty(self, cov, pert, dose, thrh=30):
"""
Compute uncertainties for the queried covariate+perturbation combination.
The distance from the closest condition in the training set is used as a
proxy for uncertainty.
Parameters
----------
cov: dict
Provide a value for each covariate (eg. cell_type) as a dictionaty
for the queried uncertainty (e.g. cov_dict={'cell_type': 'A549'}).
pert: string
Perturbation for the queried uncertainty. In case of combinations the
format has to be 'pertA+pertB'
dose: string
String which contains the dose of the perturbation queried. In case
of combinations the format has to be 'doseA+doseB'
Returns
-------
min_cos_dist: float
Minimum cosine distance with the training set.
min_eucl_dist: float
Minimum euclidean distance with the training set.
closest_cond_cos: string
Closest training condition wrt cosine distances.
closest_cond_eucl: string
Closest training condition wrt euclidean distances.
"""
if self.comb_emb is None:
self.compute_comb_emb(thrh=30)
drug_ohe = torch.Tensor(self._get_drug_encoding(pert, doses=dose)).to(
self.model.device
)
pert = drug_ohe.expand([1, self.drug_ohe.shape[1]])
drug_emb = self.model.compute_drug_embeddings_(pert).detach().cpu().numpy()
cond_emb = drug_emb
for cov_key in cov:
cond_emb += self.emb_covars[cov_key][cov[cov_key]]
cos_dist = cosine_distances(cond_emb, self.comb_emb.X)[0]
min_cos_dist = np.min(cos_dist)
cos_idx = np.argmin(cos_dist)
closest_cond_cos = self.comb_emb.obs.cov_pert[cos_idx]
eucl_dist = euclidean_distances(cond_emb, self.comb_emb.X)[0]
min_eucl_dist = np.min(eucl_dist)
eucl_idx = np.argmin(eucl_dist)
closest_cond_eucl = self.comb_emb.obs.cov_pert[eucl_idx]
return min_cos_dist, min_eucl_dist, closest_cond_cos, closest_cond_eucl
def predict(
self,
genes,
cov,
pert,
dose,
uncertainty=True,
return_anndata=True,
sample=False,
n_samples=1,
):
"""Predict values of control 'genes' conditions specified in df.
Parameters
----------
genes : np.array
Control cells.
cov: dict of lists
Provide a value for each covariate (eg. cell_type) as a dictionaty
for the queried uncertainty (e.g. cov_dict={'cell_type': 'A549'}).
pert: list
Perturbation for the queried uncertainty. In case of combinations the
format has to be 'pertA+pertB'
dose: list
String which contains the dose of the perturbation queried. In case
of combinations the format has to be 'doseA+doseB'
uncertainty: bool (default: True)
Compute uncertainties for the generated cells.
return_anndata : bool, optional (default: True)
Return embedding wrapped into anndata object.
sample : bool (default: False)
If sample is True, returns samples from gausssian distribution with
mean and variance estimated by the model. Otherwise, returns just
means and variances estimated by the model.
n_samples : int (default: 10)
Number of samples to sample if sampling is True.
Returns
-------
If return_anndata is True, returns anndata structure. Otherwise, returns
np.arrays for gene_means, gene_vars and a data frame for the corresponding
conditions df_obs.
"""
assert len(dose) == len(pert), "Check the length of pert, dose"
for cov_key in cov:
assert len(cov[cov_key]) == len(pert), "Check the length of covariates"
df = pd.concat(
[
pd.DataFrame({self.perturbation_key: pert, self.dose_key: dose}),
pd.DataFrame(cov),
],
axis=1,
)
self.model.eval()
num = genes.shape[0]
dim = genes.shape[1]
genes = torch.Tensor(genes).to(self.model.device)
gene_means_list = []
gene_vars_list = []
df_list = []
for i in range(len(df)):
comb_name = pert[i]
dose_name = dose[i]
covar_name = {}
for cov_key in cov:
covar_name[cov_key] = cov[cov_key][i]
drug_ohe = torch.Tensor(
self._get_drug_encoding(comb_name, doses=dose_name)
).to(self.model.device)
drugs = drug_ohe.expand([num, self.drug_ohe.shape[1]])
covars = []
for cov_key in self.covariate_keys:
covar_ohe = torch.Tensor(
self.covars_dict[cov_key][covar_name[cov_key]]
).to(self.model.device)
covars.append(covar_ohe.expand([num, covar_ohe.shape[0]]).clone())
gene_reconstructions = (
self.model.predict(genes, drugs, covars).cpu().clone().detach().numpy()
)
if sample:
df_list.append(
pd.DataFrame(
[df.loc[i].values] * num * n_samples, columns=df.columns
)
)
if self.args['loss_ae'] == 'gauss':
dist = Normal(
torch.Tensor(gene_reconstructions[:, :dim]),
torch.Tensor(gene_reconstructions[:, dim:]),
)
elif self.args['loss_ae'] == 'nb':
counts, logits = _convert_mean_disp_to_counts_logits(
torch.clamp(
torch.Tensor(gene_reconstructions[:, :dim]),
min=1e-8,
max=1e8,
),
torch.clamp(
torch.Tensor(gene_reconstructions[:, dim:]),
min=1e-8,
max=1e8,
)
)
dist = NegativeBinomial(
total_count=counts,
logits=logits
)
sampled_gexp = (
dist.sample(torch.Size([n_samples]))
.cpu()
.detach()
.numpy()
.reshape(-1, dim)
)
sampled_gexp[sampled_gexp < 0] = 0 #set negative values to 0, since gexp can't be negative
gene_means_list.append(sampled_gexp)
else:
df_list.append(
pd.DataFrame([df.loc[i].values] * num, columns=df.columns)
)
gene_means_list.append(gene_reconstructions[:, :dim])
if uncertainty:
(
cos_dist,
eucl_dist,
closest_cond_cos,
closest_cond_eucl,
) = self.compute_uncertainty(
cov=covar_name, pert=comb_name, dose=dose_name
)
df_list[-1] = df_list[-1].assign(
uncertainty_cosine=cos_dist,
uncertainty_euclidean=eucl_dist,
closest_cond_cosine=closest_cond_cos,
closest_cond_euclidean=closest_cond_eucl,
)
gene_vars_list.append(gene_reconstructions[:, dim:])
gene_means = np.concatenate(gene_means_list)
gene_vars = np.concatenate(gene_vars_list)
df_obs = pd.concat(df_list)
del df_list, gene_means_list, gene_vars_list
if return_anndata:
adata = sc.AnnData(gene_means)
adata.var_names = self.var_names
adata.obs = df_obs
if not sample:
adata.layers["variance"] = gene_vars
adata.obs.index = adata.obs.index.astype(str) # type fix
del gene_means, gene_vars, df_obs
return adata
else:
return gene_means, gene_vars, df_obs
def get_latent(
self,
genes,
cov,
pert,
dose,
return_anndata=True,
):
"""Get latent values of control 'genes' with conditions specified in df.
Parameters
----------
genes : np.array
Control cells.
cov: dict of lists
Provide a value for each covariate (eg. cell_type) as a dictionaty
for the queried uncertainty (e.g. cov_dict={'cell_type': 'A549'}).
pert: list
Perturbation for the queried uncertainty. In case of combinations the
format has to be 'pertA+pertB'
dose: list
String which contains the dose of the perturbation queried. In case
of combinations the format has to be 'doseA+doseB'
return_anndata : bool, optional (default: True)
Return embedding wrapped into anndata object.
Returns
-------
If return_anndata is True, returns anndata structure. Otherwise, returns
np.arrays for latent and a data frame for the corresponding
conditions df_obs.
"""
assert len(dose) == len(pert), "Check the length of pert, dose"
for cov_key in cov:
assert len(cov[cov_key]) == len(pert), "Check the length of covariates"
df = pd.concat(
[
pd.DataFrame({self.perturbation_key: pert, self.dose_key: dose}),
pd.DataFrame(cov),
],
axis=1,
)
self.model.eval()
num = genes.shape[0]
genes = torch.Tensor(genes).to(self.model.device)
latent_list = []
df_list = []
for i in range(len(df)):
comb_name = pert[i]
dose_name = dose[i]
covar_name = {}
for cov_key in cov:
covar_name[cov_key] = cov[cov_key][i]
drug_ohe = torch.Tensor(
self._get_drug_encoding(comb_name, doses=dose_name)
).to(self.model.device)
drugs = drug_ohe.expand([num, self.drug_ohe.shape[1]])
covars = []
for cov_key in self.covariate_keys:
covar_ohe = torch.Tensor(
self.covars_dict[cov_key][covar_name[cov_key]]
).to(self.model.device)
covars.append(covar_ohe.expand([num, covar_ohe.shape[0]]).clone())
_, latent_treated = self.model.predict(
genes,
drugs,
covars,
return_latent_treated=True,
)
latent_treated = latent_treated.cpu().clone().detach().numpy()
df_list.append(
pd.DataFrame([df.loc[i].values] * num, columns=df.columns)
)
latent_list.append(latent_treated)
latent = np.concatenate(latent_list)
df_obs = pd.concat(df_list)
del df_list
if return_anndata:
adata = sc.AnnData(latent)
adata.obs = df_obs
adata.obs.index = adata.obs.index.astype(str) # type fix
return adata
else:
return latent, df_obs
def get_response(
self,
genes_control=None,
doses=None,
contvar_min=None,
contvar_max=None,
n_points=10,
ncells_max=100,
perturbations=None,
control_name="test",
):
"""Decoded dose response data frame.
Parameters
----------
genes_control : np.array (deafult: None)
Genes for which to predict values. If None, take from 'test_control'
split in datasets.
doses : np.array (default: None)
Doses values. If None, default values will be generated on a grid:
n_points in range [contvar_min, contvar_max].
contvar_min : float (default: 0)
Minimum dose value to generate for default option.
contvar_max : float (default: 0)
Maximum dose value to generate for default option.
n_points : int (default: 100)
Number of dose points to generate for default option.
perturbations : list (default: None)
List of perturbations for dose response
Returns
-------
pd.DataFrame
of decoded response values of genes and average response.
"""
if genes_control is None:
genes_control = self.datasets["test"].subset_condition(control=True).genes
if contvar_min is None:
contvar_min = 0
if contvar_max is None:
contvar_max = self.max_dose
self.model.eval()
if doses is None:
doses = np.linspace(contvar_min, contvar_max, n_points)
if perturbations is None:
perturbations = self.unique_perts
response = pd.DataFrame(
columns=self.covariate_keys
+ [self.perturbation_key, self.dose_key, "response"]
+ list(self.var_names)
)
if ncells_max < len(genes_control):
ncells_max = min(ncells_max, len(genes_control))
idx = torch.LongTensor(
np.random.choice(range(len(genes_control)), ncells_max, replace=False)
)
genes_control = genes_control[idx]
j = 0
for covar_combo in self.emb_covars_combined:
cov_dict = {}
for i, cov_val in enumerate(covar_combo.split("_")):
cov_dict[self.covariate_keys[i]] = [cov_val]
print(cov_dict)
for _, drug in enumerate(perturbations):
if not (drug in self.datasets[control_name].subset_condition(control=True).ctrl_name):
for dose in doses:
# TODO handle covars
gene_means, _, _ = self.predict(
genes_control,
cov=cov_dict,
pert=[drug],
dose=[dose],
return_anndata=False,
)
predicted_data = np.mean(gene_means, axis=0).reshape(-1)
response.loc[j] = (
covar_combo.split("_")
+ [drug, dose, np.linalg.norm(predicted_data)]
+ list(predicted_data)
)
j += 1
return response
def get_response_reference(self, perturbations=None):
"""Computes reference values of the response.
Parameters
----------
dataset : CompPertDataset
The file location of the spreadsheet
perturbations : list (default: None)
List of perturbations for dose response
Returns
-------
pd.DataFrame
of decoded response values of genes and average response.
"""
if perturbations is None:
perturbations = self.unique_perts
reference_response_curve = pd.DataFrame(
columns=self.covariate_keys
+ [self.perturbation_key, self.dose_key, "split", "num_cells", "response"]
+ list(self.var_names)
)
dataset_ctr = self.datasets["training"].subset_condition(control=True)
i = 0
for split in ["training", "ood"]:
if split == 'ood':
dataset = self.datasets[split]
else:
dataset = self.datasets["training"].subset_condition(control=False)
for pert in self.seen_covars_perts[split]:
*covars, drug, dose_val = pert.split("_")
if drug in perturbations:
if not ("+" in dose_val):
dose = float(dose_val)
else:
dose = dose_val
idx = np.where((dataset.pert_categories == pert))[0]
if len(idx):
y_true = dataset.genes[idx, :].numpy().mean(axis=0)
reference_response_curve.loc[i] = (
covars
+ [drug, dose, split, len(idx), np.linalg.norm(y_true)]
+ list(y_true)
)
i += 1
reference_response_curve = reference_response_curve.replace(
"training_treated", "train"
)
return reference_response_curve
def get_response2D(
self,
perturbations,
covar,
genes_control=None,
doses=None,
contvar_min=None,
contvar_max=None,
n_points=10,
ncells_max=100,
#fixed_drugs="",
#fixed_doses="",
):
"""Decoded dose response data frame.
Parameters
----------
perturbations : list
List of length 2 of perturbations for dose response.
covar : dict
Name of a covariate for which to compute dose-response.
genes_control : np.array (deafult: None)
Genes for which to predict values. If None, take from 'test_control'
split in datasets.
doses : np.array (default: None)
Doses values. If None, default values will be generated on a grid:
n_points in range [contvar_min, contvar_max].
contvar_min : float (default: 0)
Minimum dose value to generate for default option.
contvar_max : float (default: 0)
Maximum dose value to generate for default option.
n_points : int (default: 100)
Number of dose points to generate for default option.
Returns
-------
pd.DataFrame
of decoded response values of genes and average response.
"""
assert len(perturbations) == 2, "You should provide a list of 2 perturbations."
if contvar_min is None:
contvar_min = self.min_dose
if contvar_max is None:
contvar_max = self.max_dose
self.model.eval()
# doses = torch.Tensor(np.linspace(contvar_min, contvar_max, n_points))
if doses is None:
doses = np.linspace(contvar_min, contvar_max, n_points)
# genes_control = dataset.genes[dataset.indices['control']]
if genes_control is None:
genes_control = self.datasets["test"].subset_condition(control=True).genes
ncells_max = min(ncells_max, len(genes_control))
idx = torch.LongTensor(np.random.choice(range(len(genes_control)), ncells_max))
genes_control = genes_control[idx]
response = pd.DataFrame(
columns=perturbations+["response"]+list(self.var_names)
)
drug = perturbations[0] + "+" + perturbations[1]
dose_vals = [f"{d[0]}+{d[1]}" for d in itertools.product(*[doses, doses])]
dose_comb = [list(d) for d in itertools.product(*[doses, doses])]
i = 0
if not (drug in self.datasets['training'].subset_condition(control=True).ctrl_name):
for dose in dose_vals:
gene_means, _, _ = self.predict(
genes_control,
cov=covar,
pert=[drug],# + fixed_drugs],
dose=[dose],# + fixed_doses],
return_anndata=False,
)
predicted_data = np.mean(gene_means, axis=0).reshape(-1)
response.loc[i] = (
dose_comb[i]
+ [np.linalg.norm(predicted_data)]
+ list(predicted_data)
)
i += 1
# i = 0
# if not (drug in ["Vehicle", "EGF", "unst", "control", "ctrl"]):
# for dose in dose_vals:
# gene_means, _, _ = self.predict(
# genes_control,
# cov=covar,
# pert=[drug + fixed_drugs],
# dose=[dose + fixed_doses],
# return_anndata=False,
# )
# predicted_data = np.mean(gene_means, axis=0).reshape(-1)
# response.loc[i] = (
# dose_comb[i]
# + [np.linalg.norm(predicted_data)]
# + list(predicted_data)
# )
# i += 1
return response
def evaluate_r2(self, dataset, genes_control, adata_random=None):
"""
Measures different quality metrics about an CPA `autoencoder`, when
tasked to translate some `genes_control` into each of the drug/cell_type
combinations described in `dataset`.
Considered metrics are R2 score about means and variances for all genes, as
well as R2 score about means and variances about differentially expressed
(_de) genes.
"""
self.model.eval()
scores = pd.DataFrame(
columns=self.covariate_keys
+ [
self.perturbation_key,
self.dose_key,
"R2_mean",
"R2_mean_DE",
"R2_var",
"R2_var_DE",
"model",
"num_cells",
]
)
num, dim = genes_control.size(0), genes_control.size(1)
total_cells = len(dataset)
icond = 0
for pert_category in np.unique(dataset.pert_categories):
# pert_category category contains: 'celltype_perturbation_dose' info
de_idx = np.where(
dataset.var_names.isin(np.array(dataset.de_genes[pert_category]))
)[0]
idx = np.where(dataset.pert_categories == pert_category)[0]
*covars, pert, dose = pert_category.split("_")
cov_dict = {}
for i, cov_key in enumerate(self.covariate_keys):
cov_dict[cov_key] = [covars[i]]
if len(idx) > 0:
mean_predict, var_predict, _ = self.predict(
genes_control,
cov=cov_dict,
pert=[pert],
dose=[dose],
return_anndata=False,
sample=False,
)
# estimate metrics only for reasonably-sized drug/cell-type combos
y_true = dataset.genes[idx, :].numpy()
# true means and variances
yt_m = y_true.mean(axis=0)
yt_v = y_true.var(axis=0)
# predicted means and variances
yp_m = mean_predict.mean(0)
yp_v = var_predict.mean(0)
#yp_v = np.var(mean_predict, axis=0)
mean_score = r2_score(yt_m, yp_m)
var_score = r2_score(yt_v, yp_v)
mean_score_de = r2_score(yt_m[de_idx], yp_m[de_idx])
var_score_de = r2_score(yt_v[de_idx], yp_v[de_idx])
scores.loc[icond] = pert_category.split("_") + [
mean_score,
mean_score_de,
var_score,
var_score_de,
"cpa",
len(idx),
]
icond += 1
if adata_random is not None:
yp_m_bl = np.mean(adata_random, axis=0)
yp_v_bl = np.var(adata_random, axis=0)
mean_score_bl = r2_score(yt_m, yp_m_bl)
var_score_bl = r2_score(yt_v, yp_v_bl)
mean_score_de_bl = r2_score(yt_m[de_idx], yp_m_bl[de_idx])
var_score_de_bl = r2_score(yt_v[de_idx], yp_v_bl[de_idx])
scores.loc[icond] = pert_category.split("_") + [
mean_score_bl,
mean_score_de_bl,
var_score_bl,
var_score_de_bl,
"baseline",
len(idx),
]
icond += 1
return scores
def get_reference_from_combo(perturbations_list, datasets, splits=["training", "ood"]):
"""
A simple function that produces a pd.DataFrame of individual
drugs-doses combinations used among the splits (for a fixed covariate).
"""
df_list = []
for split_name in splits:
full_dataset = datasets[split_name]
ref = {"num_cells": []}
for pp in perturbations_list:
ref[pp] = []
ndrugs = len(perturbations_list)
for pert_cat in np.unique(full_dataset.pert_categories):
_, pert, dose = pert_cat.split("_")
pert_list = pert.split("+")
if set(pert_list) == set(perturbations_list):
dose_list = dose.split("+")
ncells = len(
full_dataset.pert_categories[
full_dataset.pert_categories == pert_cat
]
)
for j in range(ndrugs):
ref[pert_list[j]].append(float(dose_list[j]))
ref["num_cells"].append(ncells)
print(pert, dose, ncells)
df = pd.DataFrame.from_dict(ref)
df["split"] = split_name
df_list.append(df)
return pd.concat(df_list)
def linear_interp(y1, y2, x1, x2, x):
a = (y1 - y2) / (x1 - x2)
b = y1 - a * x1
y = a * x + b
return y
def evaluate_r2_benchmark(cpa_api, datasets, pert_category, pert_category_list):
scores = pd.DataFrame(
columns=[
cpa_api.covars_key,
cpa_api.perturbation_key,
cpa_api.dose_key,
"R2_mean",
"R2_mean_DE",
"R2_var",
"R2_var_DE",
"num_cells",
"benchmark",
"method",
]
)
de_idx = np.where(
datasets["ood"].var_names.isin(
np.array(datasets["ood"].de_genes[pert_category])
)
)[0]
idx = np.where(datasets["ood"].pert_categories == pert_category)[0]
y_true = datasets["ood"].genes[idx, :].numpy()
# true means and variances
yt_m = y_true.mean(axis=0)
yt_v = y_true.var(axis=0)
icond = 0
if len(idx) > 0:
for pert_category_predict in pert_category_list:
if "+" in pert_category_predict:
pert1, pert2 = pert_category_predict.split("+")
idx_pred1 = np.where(datasets["training"].pert_categories == pert1)[0]
idx_pred2 = np.where(datasets["training"].pert_categories == pert2)[0]
y_pred1 = datasets["training"].genes[idx_pred1, :].numpy()
y_pred2 = datasets["training"].genes[idx_pred2, :].numpy()
x1 = float(pert1.split("_")[2])
x2 = float(pert2.split("_")[2])
x = float(pert_category.split("_")[2])
yp_m1 = y_pred1.mean(axis=0)
yp_m2 = y_pred2.mean(axis=0)
yp_v1 = y_pred1.var(axis=0)
yp_v2 = y_pred2.var(axis=0)
yp_m = linear_interp(yp_m1, yp_m2, x1, x2, x)
yp_v = linear_interp(yp_v1, yp_v2, x1, x2, x)
# yp_m = (y_pred1.mean(axis=0) + y_pred2.mean(axis=0))/2
# yp_v = (y_pred1.var(axis=0) + y_pred2.var(axis=0))/2
else:
idx_pred = np.where(
datasets["training"].pert_categories == pert_category_predict
)[0]
print(pert_category_predict, len(idx_pred))
y_pred = datasets["training"].genes[idx_pred, :].numpy()
# predicted means and variances
yp_m = y_pred.mean(axis=0)
yp_v = y_pred.var(axis=0)
mean_score = r2_score(yt_m, yp_m)
var_score = r2_score(yt_v, yp_v)
mean_score_de = r2_score(yt_m[de_idx], yp_m[de_idx])
var_score_de = r2_score(yt_v[de_idx], yp_v[de_idx])
scores.loc[icond] = pert_category.split("_") + [
mean_score,
mean_score_de,
var_score,
var_score_de,
len(idx),
pert_category_predict,
"benchmark",
]
icond += 1
return scores
|
CPA-main
|
cpa/api.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import warnings
import numpy as np
import pandas as pd
import scanpy as sc
from sklearn.metrics import r2_score
from scipy.sparse import issparse
from scipy.stats import wasserstein_distance
import torch
warnings.filterwarnings("ignore")
import sys
if not sys.warnoptions:
warnings.simplefilter("ignore")
warnings.simplefilter(action="ignore", category=FutureWarning)
def _convert_mean_disp_to_counts_logits(mu, theta, eps=1e-6):
r"""NB parameterizations conversion
Parameters
----------
mu :
mean of the NB distribution.
theta :
inverse overdispersion.
eps :
constant used for numerical log stability. (Default value = 1e-6)
Returns
-------
type
the number of failures until the experiment is stopped
and the success probability.
"""
assert (mu is None) == (
theta is None
), "If using the mu/theta NB parameterization, both parameters must be specified"
logits = (mu + eps).log() - (theta + eps).log()
total_count = theta
return total_count, logits
def rank_genes_groups_by_cov(
adata,
groupby,
control_group,
covariate,
pool_doses=False,
n_genes=50,
rankby_abs=True,
key_added="rank_genes_groups_cov",
return_dict=False,
):
"""
Function that generates a list of differentially expressed genes computed
separately for each covariate category, and using the respective control
cells as reference.
Usage example:
rank_genes_groups_by_cov(
adata,
groupby='cov_product_dose',
covariate_key='cell_type',
control_group='Vehicle_0'
)
Parameters
----------
adata : AnnData
AnnData dataset
groupby : str
Obs column that defines the groups, should be
cartesian product of covariate_perturbation_cont_var,
it is important that this format is followed.
control_group : str
String that defines the control group in the groupby obs
covariate : str
Obs column that defines the main covariate by which we
want to separate DEG computation (eg. cell type, species, etc.)
n_genes : int (default: 50)
Number of DEGs to include in the lists
rankby_abs : bool (default: True)
If True, rank genes by absolute values of the score, thus including
top downregulated genes in the top N genes. If False, the ranking will
have only upregulated genes at the top.
key_added : str (default: 'rank_genes_groups_cov')
Key used when adding the dictionary to adata.uns
return_dict : str (default: False)
Signals whether to return the dictionary or not
Returns
-------
Adds the DEG dictionary to adata.uns
If return_dict is True returns:
gene_dict : dict
Dictionary where groups are stored as keys, and the list of DEGs
are the corresponding values
"""
gene_dict = {}
cov_categories = adata.obs[covariate].unique()
for cov_cat in cov_categories:
print(cov_cat)
# name of the control group in the groupby obs column
control_group_cov = "_".join([cov_cat, control_group])
# subset adata to cells belonging to a covariate category
adata_cov = adata[adata.obs[covariate] == cov_cat]
# compute DEGs
sc.tl.rank_genes_groups(
adata_cov,
groupby=groupby,
reference=control_group_cov,
rankby_abs=rankby_abs,
n_genes=n_genes,
)
# add entries to dictionary of gene sets
de_genes = pd.DataFrame(adata_cov.uns["rank_genes_groups"]["names"])
for group in de_genes:
gene_dict[group] = de_genes[group].tolist()
adata.uns[key_added] = gene_dict
if return_dict:
return gene_dict
def rank_genes_groups(
adata,
groupby,
pool_doses=False,
n_genes=50,
rankby_abs=True,
key_added="rank_genes_groups_cov",
return_dict=False,
):
"""
Function that generates a list of differentially expressed genes computed
separately for each covariate category, and using the respective control
cells as reference.
Usage example:
rank_genes_groups_by_cov(
adata,
groupby='cov_product_dose',
covariate_key='cell_type',
control_group='Vehicle_0'
)
Parameters
----------
adata : AnnData
AnnData dataset
groupby : str
Obs column that defines the groups, should be
cartesian product of covariate_perturbation_cont_var,
it is important that this format is followed.
control_group : str
String that defines the control group in the groupby obs
covariate : str
Obs column that defines the main covariate by which we
want to separate DEG computation (eg. cell type, species, etc.)
n_genes : int (default: 50)
Number of DEGs to include in the lists
rankby_abs : bool (default: True)
If True, rank genes by absolute values of the score, thus including
top downregulated genes in the top N genes. If False, the ranking will
have only upregulated genes at the top.
key_added : str (default: 'rank_genes_groups_cov')
Key used when adding the dictionary to adata.uns
return_dict : str (default: False)
Signals whether to return the dictionary or not
Returns
-------
Adds the DEG dictionary to adata.uns
If return_dict is True returns:
gene_dict : dict
Dictionary where groups are stored as keys, and the list of DEGs
are the corresponding values
"""
covars_comb = []
for i in range(len(adata)):
cov = "_".join(adata.obs["cov_drug_dose_name"].values[i].split("_")[:-2])
covars_comb.append(cov)
adata.obs["covars_comb"] = covars_comb
gene_dict = {}
for cov_cat in np.unique(adata.obs["covars_comb"].values):
adata_cov = adata[adata.obs["covars_comb"] == cov_cat]
control_group_cov = (
adata_cov[adata_cov.obs["control"] == 1].obs[groupby].values[0]
)
# compute DEGs
sc.tl.rank_genes_groups(
adata_cov,
groupby=groupby,
reference=control_group_cov,
rankby_abs=rankby_abs,
n_genes=n_genes,
)
# add entries to dictionary of gene sets
de_genes = pd.DataFrame(adata_cov.uns["rank_genes_groups"]["names"])
for group in de_genes:
gene_dict[group] = de_genes[group].tolist()
adata.uns[key_added] = gene_dict
if return_dict:
return gene_dict
# def evaluate_r2_(adata, pred_adata, condition_key, sampled=False):
# r2_list = []
# if issparse(adata.X):
# adata.X = adata.X.A
# if issparse(pred_adata.X):
# pred_adata.X = pred_adata.X.A
# for cond in pred_adata.obs[condition_key].unique():
# adata_ = adata[adata.obs[condition_key] == cond]
# pred_adata_ = pred_adata[pred_adata.obs[condition_key] == cond]
# r2_mean = r2_score(adata_.X.mean(0), pred_adata_.X.mean(0))
# if sampled:
# r2_var = r2_score(adata_.X.var(0), pred_adata_.X.var(0))
# else:
# r2_var = r2_score(
# adata_.X.var(0),
# pred_adata_.layers['variance'].var(0)
# )
# r2_list.append(
# {
# 'condition': cond,
# 'r2_mean': r2_mean,
# 'r2_var': r2_var,
# }
# )
# r2_df = pd.DataFrame(r2_list).set_index('condition')
# return r2_df
def evaluate_r2_(adata, pred_adata, condition_key, sampled=False, de_genes_dict=None):
r2_list = []
if issparse(adata.X):
adata.X = adata.X.A
if issparse(pred_adata.X):
pred_adata.X = pred_adata.X.A
for cond in pred_adata.obs[condition_key].unique():
adata_ = adata[adata.obs[condition_key] == cond]
pred_adata_ = pred_adata[pred_adata.obs[condition_key] == cond]
r2_mean = r2_score(adata_.X.mean(0), pred_adata_.X.mean(0))
if sampled:
r2_var = r2_score(adata_.X.var(0), pred_adata_.X.var(0))
else:
r2_var = r2_score(
adata_.X.var(0),
pred_adata_.layers['variance'].var(0)
)
r2_list.append(
{
'condition': cond,
'r2_mean': r2_mean,
'r2_var': r2_var,
}
)
if de_genes_dict:
de_genes = de_genes_dict[cond]
sub_adata_ = adata_[:, de_genes]
sub_pred_adata_ = pred_adata_[:, de_genes]
r2_mean_deg = r2_score(sub_adata_.X.mean(0), sub_pred_adata_.X.mean(0))
if sampled:
r2_var_deg = r2_score(sub_adata_.X.var(0), sub_pred_adata_.X.var(0))
else:
r2_var_deg = r2_score(
sub_adata_.X.var(0),
sub_pred_adata_.layers['variance'].var(0)
)
r2_list[-1]['r2_mean_deg'] = r2_mean_deg
r2_list[-1]['r2_var_deg'] = r2_var_deg
r2_df = pd.DataFrame(r2_list).set_index('condition')
return r2_df
def evaluate_mmd(adata, pred_adata, condition_key, de_genes_dict=None):
mmd_list = []
for cond in pred_adata.obs[condition_key].unique():
adata_ = adata[adata.obs[condition_key] == cond].copy()
pred_adata_ = pred_adata[pred_adata.obs[condition_key] == cond].copy()
if issparse(adata_.X):
adata_.X = adata_.X.A
if issparse(pred_adata_.X):
pred_adata_.X = pred_adata_.X.A
mmd = mmd_loss_calc(torch.Tensor(adata_.X), torch.Tensor(pred_adata_.X))
mmd_list.append(
{
'condition': cond,
'mmd': mmd.detach().cpu().numpy()
}
)
if de_genes_dict:
de_genes = de_genes_dict[cond]
sub_adata_ = adata_[:, de_genes]
sub_pred_adata_ = pred_adata_[:, de_genes]
mmd_deg = mmd_loss_calc(torch.Tensor(sub_adata_.X), torch.Tensor(sub_pred_adata_.X))
mmd_list[-1]['mmd_deg'] = mmd_deg.detach().cpu().numpy()
mmd_df = pd.DataFrame(mmd_list).set_index('condition')
return mmd_df
def evaluate_emd(adata, pred_adata, condition_key, de_genes_dict=None):
emd_list = []
for cond in pred_adata.obs[condition_key].unique():
adata_ = adata[adata.obs[condition_key] == cond].copy()
pred_adata_ = pred_adata[pred_adata.obs[condition_key] == cond].copy()
if issparse(adata_.X):
adata_.X = adata_.X.A
if issparse(pred_adata_.X):
pred_adata_.X = pred_adata_.X.A
wd = []
for i, _ in enumerate(adata_.var_names):
wd.append(
wasserstein_distance(torch.Tensor(adata_.X[:, i]), torch.Tensor(pred_adata_.X[:, i]))
)
emd_list.append(
{
'condition': cond,
'emd': np.mean(wd)
}
)
if de_genes_dict:
de_genes = de_genes_dict[cond]
sub_adata_ = adata_[:, de_genes]
sub_pred_adata_ = pred_adata_[:, de_genes]
wd_deg = []
for i, _ in enumerate(sub_adata_.var_names):
wd_deg.append(
wasserstein_distance(torch.Tensor(sub_adata_.X[:, i]), torch.Tensor(sub_pred_adata_.X[:, i]))
)
emd_list[-1]['emd_deg'] = np.mean(wd_deg)
emd_df = pd.DataFrame(emd_list).set_index('condition')
return emd_df
def pairwise_distance(x, y):
x = x.view(x.shape[0], x.shape[1], 1)
y = torch.transpose(y, 0, 1)
output = torch.sum((x - y) ** 2, 1)
output = torch.transpose(output, 0, 1)
return output
def gaussian_kernel_matrix(x, y, alphas):
"""Computes multiscale-RBF kernel between x and y.
Parameters
----------
x: torch.Tensor
Tensor with shape [batch_size, z_dim].
y: torch.Tensor
Tensor with shape [batch_size, z_dim].
alphas: Tensor
Returns
-------
Returns the computed multiscale-RBF kernel between x and y.
"""
dist = pairwise_distance(x, y).contiguous()
dist_ = dist.view(1, -1)
alphas = alphas.view(alphas.shape[0], 1)
beta = 1. / (2. * alphas)
s = torch.matmul(beta, dist_)
return torch.sum(torch.exp(-s), 0).view_as(dist)
def mmd_loss_calc(source_features, target_features):
"""Initializes Maximum Mean Discrepancy(MMD) between source_features and target_features.
- Gretton, Arthur, et al. "A Kernel Two-Sample Test". 2012.
Parameters
----------
source_features: torch.Tensor
Tensor with shape [batch_size, z_dim]
target_features: torch.Tensor
Tensor with shape [batch_size, z_dim]
Returns
-------
Returns the computed MMD between x and y.
"""
alphas = [
1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100,
1e3, 1e4, 1e5, 1e6
]
alphas = torch.autograd.Variable(torch.FloatTensor(alphas)).to(device=source_features.device)
cost = torch.mean(gaussian_kernel_matrix(source_features, source_features, alphas))
cost += torch.mean(gaussian_kernel_matrix(target_features, target_features, alphas))
cost -= 2 * torch.mean(gaussian_kernel_matrix(source_features, target_features, alphas))
return cost
|
CPA-main
|
cpa/helper.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import json
import os
import time
from collections import defaultdict
import numpy as np
import torch
from cpa.data import load_dataset_splits
from cpa.model import CPA, MLP
from sklearn.metrics import r2_score
from torch.autograd import Variable
from torch.distributions import NegativeBinomial
from torch import nn
def pjson(s):
"""
Prints a string in JSON format and flushes stdout
"""
print(json.dumps(s), flush=True)
def _convert_mean_disp_to_counts_logits(mu, theta, eps=1e-6):
r"""NB parameterizations conversion
Parameters
----------
mu :
mean of the NB distribution.
theta :
inverse overdispersion.
eps :
constant used for numerical log stability. (Default value = 1e-6)
Returns
-------
type
the number of failures until the experiment is stopped
and the success probability.
"""
assert (mu is None) == (
theta is None
), "If using the mu/theta NB parameterization, both parameters must be specified"
logits = (mu + eps).log() - (theta + eps).log()
total_count = theta
return total_count, logits
def evaluate_disentanglement(autoencoder, dataset):
"""
Given a CPA model, this function measures the correlation between
its latent space and 1) a dataset's drug vectors 2) a datasets covariate
vectors.
"""
with torch.no_grad():
_, latent_basal = autoencoder.predict(
dataset.genes,
dataset.drugs,
dataset.covariates,
return_latent_basal=True,
)
mean = latent_basal.mean(dim=0, keepdim=True)
stddev = latent_basal.std(0, unbiased=False, keepdim=True)
normalized_basal = (latent_basal - mean) / stddev
criterion = nn.CrossEntropyLoss()
pert_scores, cov_scores = 0, []
def compute_score(labels):
if len(np.unique(labels)) > 1:
unique_labels = set(labels)
label_to_idx = {labels: idx for idx, labels in enumerate(unique_labels)}
labels_tensor = torch.tensor(
[label_to_idx[label] for label in labels], dtype=torch.long, device=autoencoder.device
)
assert normalized_basal.size(0) == len(labels_tensor)
#might have to perform a train/test split here
dataset = torch.utils.data.TensorDataset(normalized_basal, labels_tensor)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=256, shuffle=True)
# 2 non-linear layers of size <input_dimension>
# followed by a linear layer.
disentanglement_classifier = MLP(
[normalized_basal.size(1)]
+ [normalized_basal.size(1) for _ in range(2)]
+ [len(unique_labels)]
).to(autoencoder.device)
optimizer = torch.optim.Adam(disentanglement_classifier.parameters(), lr=1e-2)
for epoch in range(50):
for X, y in data_loader:
pred = disentanglement_classifier(X)
loss = Variable(criterion(pred, y), requires_grad=True)
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
pred = disentanglement_classifier(normalized_basal).argmax(dim=1)
acc = torch.sum(pred == labels_tensor) / len(labels_tensor)
return acc.item()
else:
return 0
if dataset.perturbation_key is not None:
pert_scores = compute_score(dataset.drugs_names)
for cov in list(dataset.covariate_names):
cov_scores = []
if len(np.unique(dataset.covariate_names[cov])) == 0:
cov_scores = [0]
break
else:
cov_scores.append(compute_score(dataset.covariate_names[cov]))
return [np.mean(pert_scores), *[np.mean(cov_score) for cov_score in cov_scores]]
def evaluate_r2(autoencoder, dataset, genes_control):
"""
Measures different quality metrics about an CPA `autoencoder`, when
tasked to translate some `genes_control` into each of the drug/covariates
combinations described in `dataset`.
Considered metrics are R2 score about means and variances for all genes, as
well as R2 score about means and variances about differentially expressed
(_de) genes.
"""
mean_score, var_score, mean_score_de, var_score_de = [], [], [], []
num, dim = genes_control.size(0), genes_control.size(1)
total_cells = len(dataset)
for pert_category in np.unique(dataset.pert_categories):
# pert_category category contains: 'celltype_perturbation_dose' info
de_idx = np.where(
dataset.var_names.isin(np.array(dataset.de_genes[pert_category]))
)[0]
idx = np.where(dataset.pert_categories == pert_category)[0]
if len(idx) > 30:
emb_drugs = dataset.drugs[idx][0].view(1, -1).repeat(num, 1).clone()
emb_covars = [
covar[idx][0].view(1, -1).repeat(num, 1).clone()
for covar in dataset.covariates
]
genes_predict = (
autoencoder.predict(genes_control, emb_drugs, emb_covars).detach().cpu()
)
mean_predict = genes_predict[:, :dim]
var_predict = genes_predict[:, dim:]
if autoencoder.loss_ae == 'nb':
counts, logits = _convert_mean_disp_to_counts_logits(
torch.clamp(
torch.Tensor(mean_predict),
min=1e-4,
max=1e4,
),
torch.clamp(
torch.Tensor(var_predict),
min=1e-4,
max=1e4,
)
)
dist = NegativeBinomial(
total_count=counts,
logits=logits
)
nb_sample = dist.sample().cpu().numpy()
yp_m = nb_sample.mean(0)
yp_v = nb_sample.var(0)
else:
# predicted means and variances
yp_m = mean_predict.mean(0)
yp_v = var_predict.mean(0)
# estimate metrics only for reasonably-sized drug/cell-type combos
y_true = dataset.genes[idx, :].numpy()
# true means and variances
yt_m = y_true.mean(axis=0)
yt_v = y_true.var(axis=0)
mean_score.append(r2_score(yt_m, yp_m))
var_score.append(r2_score(yt_v, yp_v))
mean_score_de.append(r2_score(yt_m[de_idx], yp_m[de_idx]))
var_score_de.append(r2_score(yt_v[de_idx], yp_v[de_idx]))
return [
np.mean(s) if len(s) else -1
for s in [mean_score, mean_score_de, var_score, var_score_de]
]
def evaluate(autoencoder, datasets):
"""
Measure quality metrics using `evaluate()` on the training, test, and
out-of-distribution (ood) splits.
"""
autoencoder.eval()
with torch.no_grad():
stats_test = evaluate_r2(
autoencoder,
datasets["test"].subset_condition(control=False),
datasets["test"].subset_condition(control=True).genes
)
disent_scores = evaluate_disentanglement(autoencoder, datasets["test"])
stats_disent_pert = disent_scores[0]
stats_disent_cov = disent_scores[1:]
evaluation_stats = {
"training": evaluate_r2(
autoencoder,
datasets["training"].subset_condition(control=False),
datasets["training"].subset_condition(control=True).genes,
),
"test": stats_test,
"ood": evaluate_r2(
autoencoder, datasets["ood"], datasets["test"].subset_condition(control=True).genes
),
"perturbation disentanglement": stats_disent_pert,
"optimal for perturbations": 1 / datasets["test"].num_drugs
if datasets["test"].num_drugs > 0
else None,
}
if len(stats_disent_cov) > 0:
for i in range(len(stats_disent_cov)):
evaluation_stats[
f"{list(datasets['test'].covariate_names)[i]} disentanglement"
] = stats_disent_cov[i]
evaluation_stats[
f"optimal for {list(datasets['test'].covariate_names)[i]}"
] = 1 / datasets["test"].num_covariates[i]
autoencoder.train()
return evaluation_stats
def prepare_cpa(args, state_dict=None):
"""
Instantiates autoencoder and dataset to run an experiment.
"""
device = "cuda" if torch.cuda.is_available() else "cpu"
datasets = load_dataset_splits(
args["data"],
args["perturbation_key"],
args["dose_key"],
args["covariate_keys"],
args["split_key"],
args["control"],
)
autoencoder = CPA(
datasets["training"].num_genes,
datasets["training"].num_drugs,
datasets["training"].num_covariates,
device=device,
seed=args["seed"],
loss_ae=args["loss_ae"],
doser_type=args["doser_type"],
patience=args["patience"],
hparams=args["hparams"],
decoder_activation=args["decoder_activation"],
)
if state_dict is not None:
autoencoder.load_state_dict(state_dict)
return autoencoder, datasets
def train_cpa(args, return_model=False):
"""
Trains a CPA autoencoder
"""
autoencoder, datasets = prepare_cpa(args)
datasets.update(
{
"loader_tr": torch.utils.data.DataLoader(
datasets["training"],
batch_size=autoencoder.hparams["batch_size"],
shuffle=True,
)
}
)
pjson({"training_args": args})
pjson({"autoencoder_params": autoencoder.hparams})
args["hparams"] = autoencoder.hparams
start_time = time.time()
for epoch in range(args["max_epochs"]):
epoch_training_stats = defaultdict(float)
for data in datasets["loader_tr"]:
genes, drugs, covariates = data[0], data[1], data[2:]
minibatch_training_stats = autoencoder.update(genes, drugs, covariates)
for key, val in minibatch_training_stats.items():
epoch_training_stats[key] += val
for key, val in epoch_training_stats.items():
epoch_training_stats[key] = val / len(datasets["loader_tr"])
if not (key in autoencoder.history.keys()):
autoencoder.history[key] = []
autoencoder.history[key].append(epoch_training_stats[key])
autoencoder.history["epoch"].append(epoch)
ellapsed_minutes = (time.time() - start_time) / 60
autoencoder.history["elapsed_time_min"] = ellapsed_minutes
# decay learning rate if necessary
# also check stopping condition: patience ran out OR
# time ran out OR max epochs achieved
stop = ellapsed_minutes > args["max_minutes"] or (
epoch == args["max_epochs"] - 1
)
if (epoch % args["checkpoint_freq"]) == 0 or stop:
evaluation_stats = evaluate(autoencoder, datasets)
for key, val in evaluation_stats.items():
if not (key in autoencoder.history.keys()):
autoencoder.history[key] = []
autoencoder.history[key].append(val)
autoencoder.history["stats_epoch"].append(epoch)
pjson(
{
"epoch": epoch,
"training_stats": epoch_training_stats,
"evaluation_stats": evaluation_stats,
"ellapsed_minutes": ellapsed_minutes,
}
)
torch.save(
(autoencoder.state_dict(), args, autoencoder.history),
os.path.join(
args["save_dir"],
"model_seed={}_epoch={}.pt".format(args["seed"], epoch),
),
)
pjson(
{
"model_saved": "model_seed={}_epoch={}.pt\n".format(
args["seed"], epoch
)
}
)
stop = stop or autoencoder.early_stopping(np.mean(evaluation_stats["test"]))
if stop:
pjson({"early_stop": epoch})
break
if return_model:
return autoencoder, datasets
def parse_arguments():
"""
Read arguments if this script is called from a terminal.
"""
parser = argparse.ArgumentParser(description="Drug combinations.")
# dataset arguments
parser.add_argument("--data", type=str, required=True)
parser.add_argument("--perturbation_key", type=str, default="condition")
parser.add_argument("--control", type=str, default=None)
parser.add_argument("--dose_key", type=str, default="dose_val")
parser.add_argument("--covariate_keys", nargs="*", type=str, default="cell_type")
parser.add_argument("--split_key", type=str, default="split")
parser.add_argument("--loss_ae", type=str, default="gauss")
parser.add_argument("--doser_type", type=str, default="sigm")
parser.add_argument("--decoder_activation", type=str, default="linear")
# CPA arguments (see set_hparams_() in cpa.model.CPA)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--hparams", type=str, default="")
# training arguments
parser.add_argument("--max_epochs", type=int, default=2000)
parser.add_argument("--max_minutes", type=int, default=300)
parser.add_argument("--patience", type=int, default=20)
parser.add_argument("--checkpoint_freq", type=int, default=20)
# output folder
parser.add_argument("--save_dir", type=str, required=True)
# number of trials when executing cpa.sweep
parser.add_argument("--sweep_seeds", type=int, default=200)
return dict(vars(parser.parse_args()))
if __name__ == "__main__":
train_cpa(parse_arguments())
|
CPA-main
|
cpa/train.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import warnings
import numpy as np
import torch
warnings.simplefilter(action="ignore", category=FutureWarning)
from typing import Union
import pandas as pd
import scanpy as sc
import scipy
from cpa.helper import rank_genes_groups
from sklearn.preprocessing import OneHotEncoder
def ranks_to_df(data, key="rank_genes_groups"):
"""Converts an `sc.tl.rank_genes_groups` result into a MultiIndex dataframe.
You can access various levels of the MultiIndex with `df.loc[[category]]`.
Params
------
data : `AnnData`
key : str (default: 'rank_genes_groups')
Field in `.uns` of data where `sc.tl.rank_genes_groups` result is
stored.
"""
d = data.uns[key]
dfs = []
for k in d.keys():
if k == "params":
continue
series = pd.DataFrame.from_records(d[k]).unstack()
series.name = k
dfs.append(series)
return pd.concat(dfs, axis=1)
def check_adata(adata, special_fields):
replaced = False
for sf in special_fields:
if sf in adata.obs:
flag = 0
for el in adata.obs[sf].values:
if "_" in str(el):
flag += 1
if flag:
print(
f"WARNING. Special characters ('_') were found in: '{sf}'.",
"They will be replaced with '-'.",
"Be careful, it may lead to errors downstream.",
)
adata.obs[sf] = [s.replace("_", "-") for s in adata.obs[sf].values]
replaced = True
return adata, replaced
indx = lambda a, i: a[i] if a is not None else None
class Dataset:
def __init__(
self,
data,
perturbation_key=None,
dose_key=None,
covariate_keys=None,
split_key="split",
control=None,
):
if type(data) == str:
data = sc.read(data)
#Assert that keys are present in the adata object
assert perturbation_key in data.obs.columns, f"Perturbation {perturbation_key} is missing in the provided adata"
for key in covariate_keys:
assert key in data.obs.columns, f"Covariate {key} is missing in the provided adata"
assert dose_key in data.obs.columns, f"Dose {dose_key} is missing in the provided adata"
assert split_key in data.obs.columns, f"Split {split_key} is missing in the provided adata"
assert not (split_key is None), "split_key can not be None"
#If covariate keys is empty list create dummy covariate
if len(covariate_keys) == 0:
print("Adding a dummy covariate...")
data.obs['dummy_cov'] = 'dummy_cov'
covariate_keys = ['dummy_cov']
self.perturbation_key = perturbation_key
self.dose_key = dose_key
if scipy.sparse.issparse(data.X):
self.genes = torch.Tensor(data.X.A)
else:
self.genes = torch.Tensor(data.X)
self.var_names = data.var_names
if isinstance(covariate_keys, str):
covariate_keys = [covariate_keys]
self.covariate_keys = covariate_keys
data, replaced = check_adata(
data, [perturbation_key, dose_key] + covariate_keys
)
for cov in covariate_keys:
if not (cov in data.obs):
data.obs[cov] = "unknown"
if split_key in data.obs:
pass
else:
print("Performing automatic train-test split with 0.25 ratio.")
from sklearn.model_selection import train_test_split
data.obs[split_key] = "train"
idx = list(range(len(data)))
idx_train, idx_test = train_test_split(
data.obs_names, test_size=0.25, random_state=42
)
data.obs[split_key].loc[idx_train] = "train"
data.obs[split_key].loc[idx_test] = "test"
if "control" in data.obs:
self.ctrl = data.obs["control"].values
else:
print(f"Assigning control values for {control}")
assert_msg = "Please provide a name for control condition."
assert not (control is None), assert_msg
data.obs["control"] = 0
if dose_key in data.obs:
pert, dose = control.split("_")
data.obs.loc[
(data.obs[perturbation_key] == pert) & (data.obs[dose_key] == dose),
"control",
] = 1
else:
pert = control
data.obs.loc[(data.obs[perturbation_key] == pert), "control"] = 1
self.ctrl = data.obs["control"].values
assert_msg = "Cells to assign as control not found! Please check the name of control variable."
assert sum(self.ctrl), assert_msg
print(f"Assigned {sum(self.ctrl)} control cells")
if perturbation_key is not None:
if dose_key is None:
raise ValueError(
f"A 'dose_key' is required when provided a 'perturbation_key'({perturbation_key})."
)
if not (dose_key in data.obs):
print(
f"Creating a default entrance for dose_key {dose_key}:",
"1.0 per perturbation",
)
dose_val = []
for i in range(len(data)):
pert = data.obs[perturbation_key].values[i].split("+")
dose_val.append("+".join(["1.0"] * len(pert)))
data.obs[dose_key] = dose_val
if not ("cov_drug_dose_name" in data.obs) or replaced:
print("Creating 'cov_drug_dose_name' field.")
cov_drug_dose_name = []
for i in range(len(data)):
comb_name = ""
for cov_key in self.covariate_keys:
comb_name += f"{data.obs[cov_key].values[i]}_"
comb_name += f"{data.obs[perturbation_key].values[i]}_{data.obs[dose_key].values[i]}"
cov_drug_dose_name.append(comb_name)
data.obs["cov_drug_dose_name"] = cov_drug_dose_name
if not ("rank_genes_groups_cov" in data.uns) or replaced:
print("Ranking genes for DE genes.")
rank_genes_groups(data, groupby="cov_drug_dose_name")
self.pert_categories = np.array(data.obs["cov_drug_dose_name"].values)
self.de_genes = data.uns["rank_genes_groups_cov"]
self.drugs_names = np.array(data.obs[perturbation_key].values)
self.dose_names = np.array(data.obs[dose_key].values)
# get unique drugs
drugs_names_unique = set()
for d in self.drugs_names:
[drugs_names_unique.add(i) for i in d.split("+")]
self.drugs_names_unique = np.array(list(drugs_names_unique))
# save encoder for a comparison with Mo's model
# later we need to remove this part
encoder_drug = OneHotEncoder(sparse=False)
encoder_drug.fit(self.drugs_names_unique.reshape(-1, 1))
# Store as attribute for molecular featurisation
self.encoder_drug = encoder_drug
self.perts_dict = dict(
zip(
self.drugs_names_unique,
encoder_drug.transform(self.drugs_names_unique.reshape(-1, 1)),
)
)
# get drug combinations
drugs = []
for i, comb in enumerate(self.drugs_names):
drugs_combos = encoder_drug.transform(
np.array(comb.split("+")).reshape(-1, 1)
)
dose_combos = str(data.obs[dose_key].values[i]).split("+")
for j, d in enumerate(dose_combos):
if j == 0:
drug_ohe = float(d) * drugs_combos[j]
else:
drug_ohe += float(d) * drugs_combos[j]
drugs.append(drug_ohe)
self.drugs = torch.Tensor(drugs)
atomic_ohe = encoder_drug.transform(self.drugs_names_unique.reshape(-1, 1))
self.drug_dict = {}
for idrug, drug in enumerate(self.drugs_names_unique):
i = np.where(atomic_ohe[idrug] == 1)[0][0]
self.drug_dict[i] = drug
else:
self.pert_categories = None
self.de_genes = None
self.drugs_names = None
self.dose_names = None
self.drugs_names_unique = None
self.perts_dict = None
self.drug_dict = None
self.drugs = None
if isinstance(covariate_keys, list) and covariate_keys:
if not len(covariate_keys) == len(set(covariate_keys)):
raise ValueError(f"Duplicate keys were given in: {covariate_keys}")
self.covariate_names = {}
self.covariate_names_unique = {}
self.covars_dict = {}
self.covariates = []
for cov in covariate_keys:
self.covariate_names[cov] = np.array(data.obs[cov].values)
self.covariate_names_unique[cov] = np.unique(self.covariate_names[cov])
names = self.covariate_names_unique[cov]
encoder_cov = OneHotEncoder(sparse=False)
encoder_cov.fit(names.reshape(-1, 1))
self.covars_dict[cov] = dict(
zip(list(names), encoder_cov.transform(names.reshape(-1, 1)))
)
names = self.covariate_names[cov]
self.covariates.append(
torch.Tensor(encoder_cov.transform(names.reshape(-1, 1))).float()
)
else:
self.covariate_names = None
self.covariate_names_unique = None
self.covars_dict = None
self.covariates = None
if perturbation_key is not None:
self.ctrl_name = list(
np.unique(data[data.obs["control"] == 1].obs[self.perturbation_key])
)
else:
self.ctrl_name = None
if self.covariates is not None:
self.num_covariates = [
len(names) for names in self.covariate_names_unique.values()
]
else:
self.num_covariates = [0]
self.num_genes = self.genes.shape[1]
self.num_drugs = len(self.drugs_names_unique) if self.drugs is not None else 0
self.is_control = data.obs["control"].values.astype(bool)
self.indices = {
"all": list(range(len(self.genes))),
"control": np.where(data.obs["control"] == 1)[0].tolist(),
"treated": np.where(data.obs["control"] != 1)[0].tolist(),
"train": np.where(data.obs[split_key] == "train")[0].tolist(),
"test": np.where(data.obs[split_key] == "test")[0].tolist(),
"ood": np.where(data.obs[split_key] == "ood")[0].tolist(),
}
def subset(self, split, condition="all"):
idx = list(set(self.indices[split]) & set(self.indices[condition]))
return SubDataset(self, idx)
def __getitem__(self, i):
return (
self.genes[i],
indx(self.drugs, i),
*[indx(cov, i) for cov in self.covariates],
)
def __len__(self):
return len(self.genes)
class SubDataset:
"""
Subsets a `Dataset` by selecting the examples given by `indices`.
"""
def __init__(self, dataset, indices):
self.perturbation_key = dataset.perturbation_key
self.dose_key = dataset.dose_key
self.covariate_keys = dataset.covariate_keys
self.perts_dict = dataset.perts_dict
self.covars_dict = dataset.covars_dict
self.genes = dataset.genes[indices]
self.drugs = indx(dataset.drugs, indices)
self.covariates = [indx(cov, indices) for cov in dataset.covariates]
self.drugs_names = indx(dataset.drugs_names, indices)
self.pert_categories = indx(dataset.pert_categories, indices)
self.covariate_names = {}
for cov in self.covariate_keys:
self.covariate_names[cov] = indx(dataset.covariate_names[cov], indices)
self.var_names = dataset.var_names
self.de_genes = dataset.de_genes
self.ctrl_name = indx(dataset.ctrl_name, 0)
self.num_covariates = dataset.num_covariates
self.num_genes = dataset.num_genes
self.num_drugs = dataset.num_drugs
self.is_control = dataset.is_control[indices]
def __getitem__(self, i):
return (
self.genes[i],
indx(self.drugs, i),
*[indx(cov, i) for cov in self.covariates],
)
def subset_condition(self, control=True):
idx = np.where(self.is_control == control)[0].tolist()
return SubDataset(self, idx)
def __len__(self):
return len(self.genes)
def load_dataset_splits(
data: str,
perturbation_key: Union[str, None],
dose_key: Union[str, None],
covariate_keys: Union[list, str, None],
split_key: str,
control: Union[str, None],
return_dataset: bool = False,
):
dataset = Dataset(
data, perturbation_key, dose_key, covariate_keys, split_key, control
)
splits = {
"training": dataset.subset("train", "all"),
"test": dataset.subset("test", "all"),
"ood": dataset.subset("ood", "all"),
}
if return_dataset:
return splits, dataset
else:
return splits
|
CPA-main
|
cpa/data.py
|
import sys
sys.path.append("../")
import cpa
import scanpy as sc
import scvi
from cpa.helper import rank_genes_groups_by_cov
def sim_adata():
adata = scvi.data.synthetic_iid(run_setup_anndata=False)
sc.pp.filter_cells(adata, min_counts=0)
sc.pp.log1p(adata)
adata.obs["condition"] = "drugA"
adata.obs["condition"].values[:100] = "control"
adata.obs["condition"].values[350:400] = "control"
adata.obs["condition"].values[100:200] = "drugB"
adata.obs["split"] = "train"
return adata
if __name__ == "__main__":
adata = sim_adata()
cpa_api = cpa.api.API(
adata,
pretrained=None,
perturbation_key="condition",
dose_key="dose_val",
covariate_keys=["batch"],
hparams={},
device="cuda:0",
control="control",
)
print("\nStart training")
cpa_api.train(max_epochs=1)
|
CPA-main
|
tests/test.py
|
"""For pip."""
from setuptools import setup
exec(open("unagi/_version.py").read())
setup(
name="unagi",
version=__version__,
description="Official repo for the paper 'Perfectly Balanced: Improving Transfer and Robustness of Supervised Contrastive Learning'",
long_description=open("README.md").read(),
packages=['unagi'],
scripts=["bin/unagi"],
install_requires=[
"cmake>=3.21.2, <4.0.0",
"datasets>=1.11.0, <2.0.0",
"einops>=0.3.2, <1.0.0",
"meerkat-ml",
"opt-einsum>=3.3.0, <4.0.0",
"pykeops>=1.5, <2.0",
"pytorch-lightning>=1.4.5, <1.4.9",
"torch",
"torchvision>=0.10.0, <2.0.0",
"transformers",
],
include_package_data=True,
url="https://github.com/HazyResearch/thanos-code",
classifiers=[ # https://pypi.python.org/pypi?:action=list_classifiers
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
],
python_requires=">=3.8",
author="HazyResearch Team",
)
|
thanos-code-main
|
setup.py
|
import logging
from typing import Any, Dict, Sequence
import hydra
import torch
from torch import nn
from unagi.trainer import MODULE_REGISTRY
from unagi.trainer.task import UnagiTask
logger = logging.getLogger(__name__)
def _instantiate_modules(module_config: Dict[str, Any], type: str):
# Assert type with useful error message
assert type in {
"preprocessors",
"embeddings",
"encoders",
"decoders",
"losses",
}, f"{type} is not a valid module type"
module_dict = {}
for uid, cfg in module_config.items():
module_name, _target_ = cfg["module"], cfg["_target_"]
module_kwargs = {
k: v
for k, v in cfg.items()
if k
not in ["uid", "module", "_target_", "path_to_checkpoint", "source_module"]
}
if isinstance(module_name, str):
try:
# TODO: add this import
module = MODULE_REGISTRY[type][module_name](**module_kwargs)
except KeyError:
raise KeyError(f"{module} is not a valid module for {type} type")
else:
if isinstance(_target_, str):
module = hydra.utils.get_method(path=_target_)(**module_kwargs)
# load in weights from pretrained state_dict
if "path_to_checkpoint" in cfg and cfg["path_to_checkpoint"] is not None:
ckpt = torch.load(cfg["path_to_checkpoint"])
source_module = uid
if "source_module" in cfg and cfg["source_module"] is not None:
source_module = cfg["source_module"]
module.load_state_dict(
{
".".join(k.split(".")[3:]): v
for k, v in ckpt["state_dict"].items()
if (
source_module in k
and "model" in k
and k.split(".")[2] == source_module
)
},
strict=True,
)
# Add to all modules for this type
module_dict[uid] = module
return module_dict
def instantiate_modules(
preprocessors_config: Sequence[Dict[str, Any]],
embeddings_config: Sequence[Dict[str, Any]],
encoders_config: Sequence[Dict[str, Any]],
decoders_config: Sequence[Dict[str, Any]],
losses_config: Sequence[Dict[str, Any]],
):
"""
Instantiate all modules for the model.
"""
module_dict, loss_dict = {}, {}
module_dict.update(_instantiate_modules(preprocessors_config, "preprocessors"))
module_dict.update(_instantiate_modules(embeddings_config, "embeddings"))
module_dict.update(_instantiate_modules(encoders_config, "encoders"))
module_dict.update(_instantiate_modules(decoders_config, "decoders"))
loss_dict.update(_instantiate_modules(losses_config, "losses"))
return nn.ModuleDict(module_dict), nn.ModuleDict(loss_dict)
def create_tasks(task_configs):
return {
name: UnagiTask(
name=name,
# module_dict=module_dict,
# loss_dict=loss_dict,
task_flow=task_config["task_flow"],
losses=task_config["losses"],
task_weight=task_config["task_weight"],
metrics=task_config["metrics"],
torchmetrics=task_config["torchmetrics"],
callbacks=task_config["callbacks"],
# weight=task_config["task_weight"],
)
for name, task_config in task_configs.items()
}
# def create_task(config):
# """
# Builds Unagi tasks.
# In Unagi, we define task as the learning criterea
# (e.g. supervised, contrastive, masked).
# # Inputs
# :param config: (dict) dictionary representation of experiment config file
# # Returns
# :return: list of Unagi tasks
# """
# # dataset_desc = vars(get_dataset_config(config["dataset"]["name"]))
# # model_params = get_model_params(config["model"], dataset_desc)
# # model_params = get_text_embed_layer(config, model_params)
# # task_metric = dataset_desc["TASK_METRIC"]
# """if "contrastive" in config["tasks"]:
# n_views = config["tasks"]["contrastive"]["contrastive_views"]
# else:
# n_views = 1"""
# # shared_modules = get_module_dict_v2(config["model"]["name"], model_params)
# # shared_modules = get_module_dict_v2(config["model"])
# # augmentation_modules = add_augmentation_modules(config["augmentations"])
# # shared_modules.update(augmentation_modules)
# """loss_fns, _ = get_loss_fns(
# config["tasks"],
# config["model"]["label_smoothing"],
# task_type=dataset_desc["TASK_TYPE"],
# )"""
# # loss_fns = get_loss_fns_v2(
# # config["tasks"],
# # config["model"]["label_smoothing"],
# # classification_type=,
# # # classification_type=dataset_desc["TASK_TYPE"],
# # )
# aug_type = None
# if config["augmentations"]["patch"]["type"] is not None:
# aug_type = "patch"
# elif config["augmentations"]["feature"]["type"] is not None:
# aug_type = "feature"
# all_tasks = []
# for task in config["tasks"]:
# task_module_dict = get_input_transform_layers()
# #task_module_dict.update(shared_modules)
# module_pool = nn.ModuleDict(task_module_dict)
# task_name = task["name"]
# task_flow = task["task_flow"]
# task_type = task["type"]
# loss_module = get_loss_module(task_type, loss_fn=task["loss_fn"])
# # output_func = get_output_layer(dataset_desc, task_type)
# # scorer = Scorer(
# # metrics=task_metric) if task_type == "supervised" else Scorer()
# n_views = task["contrastive_views"] if "contrastive_views"
# in task.keys() else 1
# encoder_module_names = task["embed_layers"]
# if task_type == "supervised":
# classification_module_name = task["classification_layers"]
# encoder_module_names = classification_module_name
# loss_func = partial(
# loss_module, loss_fns, aug_type, n_views, encoder_module_names,
# task_name
# )
# if "weight" in task.keys():
# weight = task["weight"]
# else:
# weight = 1
# """action_outputs = None
# if "action_outputs" in task.keys():
# action_outputs = task["action_outputs"]"""
# # if output_func is not None:
# # output_func = partial(output_func, classification_module_name[
# 0])
# # task = EmmentalTask(
# # name=task_name,
# # module_pool=module_pool,
# # task_flow=task_flow,
# # loss_func=loss_func,
# # output_func=output_func,
# # action_outputs=action_outputs,
# # scorer=scorer,
# # weight=weight,
# # )
# task = UnagiTask(
# name=task_name,
# module_pool=module_pool,
# task_flow=task_flow,
# loss_func=loss_func,
# weight=weight,
# )
# all_tasks.append(task)
# return all_tasks
|
thanos-code-main
|
unagi/task.py
|
import logging
from types import SimpleNamespace
from unagi.datasets import DATASET_CLASSES
logger = logging.getLogger(__name__)
def get_data(dataflow_config):
"""
Builds datasets and dataloaders from config file.
# Inputs
:param config: (dict) dictionary representation of experiment config file
# Returns
:return: SimpleNamespace containing datasets and dataloaders (train, val, test).
A dataloader is built for every task / dataset type split.
"""
datasets = list(dataflow_config.dataset.keys())
assert len(datasets) == 1, "Only one dataset is supported."
dataset_name = datasets[0]
dataset = DATASET_CLASSES[dataset_name](
data_dir=dataflow_config.data_dir,
x_transforms=dataflow_config.x,
y_transforms=dataflow_config.y,
transform_pool=dataflow_config.transforms,
**dataflow_config.dataset[dataset_name],
)
train_dataloaders = dataset.train_dataloader(
batch_size=dataflow_config.batch_size,
num_workers=dataflow_config.num_workers,
drop_last=True,
)
val_dataloaders = dataset.val_dataloader(
batch_size=dataflow_config.batch_size,
num_workers=dataflow_config.num_workers,
drop_last=True,
)
test_dataloaders = dataset.test_dataloader(
batch_size=dataflow_config.batch_size,
num_workers=dataflow_config.num_workers,
drop_last=True,
)
return SimpleNamespace(
dataset=dataset,
train_dataloaders=train_dataloaders,
val_dataloaders=val_dataloaders,
test_dataloaders=test_dataloaders,
)
|
thanos-code-main
|
unagi/data_driver.py
|
"""Unagi version."""
__version__ = "0.0.1+dev"
|
thanos-code-main
|
unagi/_version.py
|
import logging
import pytorch_lightning as pl
from unagi.data_driver import get_data
from unagi.trainer.trainer import UnagiModule
logger = logging.getLogger(__name__)
def main(config):
# Create dataloaders
data = get_data(config.dataflow)
# set seed
if (
"random_seed" in config.dataflow.keys()
and config.dataflow.random_seed is not None
):
pl.seed_everything(seed=config.dataflow.random_seed)
if config.model.train:
unagi_module = UnagiModule(
config=config,
dataset=data.dataset,
train_dataloaders=data.train_dataloaders,
val_dataloaders=data.val_dataloaders,
test_dataloaders=data.test_dataloaders,
)
if "wandb" in config.keys():
logger = pl.loggers.WandbLogger(**{**config.wandb, "config": config})
# Create trainer
trainer = pl.Trainer(
**{
**config.trainer,
"logger": logger,
"callbacks": unagi_module.configure_callbacks(),
}
)
trainer.fit(unagi_module)
trainer.test(ckpt_path="best")
|
thanos-code-main
|
unagi/unagi.py
|
TEXT = "text"
TIMESERIES = "timeseries"
IMAGE = "image"
TYPE = "type"
AUGMENTATIONS = "augmentations"
CONTRASTIVE = "contrastive"
MASKED = "masked"
NAME = "name"
DATASET = "dataset"
TASKS = "tasks"
RAW = "raw"
PATCH = "patch"
FEATURE = "feature"
|
thanos-code-main
|
unagi/constants.py
|
import logging
from functools import partial
from emmental.scorer import Scorer
from emmental.task import EmmentalTask
from torch import nn
logger = logging.getLogger(__name__)
def create_unagi_task(
model_name,
model,
dataset_name,
task_flow,
loss_module,
loss_fns,
output_classification,
task_metric,
n_views,
):
loss = loss_module
output = output_classification
logger.info(f"Built model: {model_name}")
return EmmentalTask(
name=dataset_name,
module_pool=nn.ModuleDict({"base_model": model}),
task_flow=task_flow,
loss_func=partial(loss, "base_model", model, loss_fns, n_views),
output_func=partial(output, "base_model"),
scorer=Scorer(metrics=task_metric),
)
|
thanos-code-main
|
unagi/tasks/unagi_task_template.py
|
from torch.nn import (
BCEWithLogitsLoss as BCELoss,
CrossEntropyLoss as CELoss,
L1Loss as L1Loss,
MSELoss as MSELoss,
)
from unagi.tasks.loss_fns.ce_loss import LabelSmoothing, SoftCrossEntropyLoss
from unagi.tasks.loss_fns.contrastive_loss import ContrastiveLoss
from unagi.tasks.loss_fns.mask_loss import BatchMask, BatchMaskDup
"""from unagi.tasks.loss_modules import (
ce_loss,
contrastive_loss,
contrastive_loss_clip,
mask_loss,
sce_loss,
)"""
from unagi.tasks.output_layer_modules import (
multiclass_classification,
multilabel_classification,
)
from unagi.tasks.task_preprocessing_layer import (
CLIPInputTransforms,
ContrastiveInputTransforms,
MaskInputTransforms,
SupervisedInputTransform,
ViewSelect,
)
LOSS_MODULE_REGISTRY = {
"sup_con": ContrastiveLoss,
"sim_clr": ContrastiveLoss,
"l_spread": ContrastiveLoss,
"l_attract": ContrastiveLoss,
"batch_mask": BatchMask,
"batch_mask_dup": BatchMaskDup,
"label_smoothing": LabelSmoothing,
"cross_entropy": CELoss,
"binary_cross_entropy": BCELoss,
"soft_cross_entropy": SoftCrossEntropyLoss,
"mask_regular": L1Loss,
"mse_loss": MSELoss,
}
"""LOSS_MODULE_REGISTRY = {
"masked": mask_loss,
"contrastive": contrastive_loss,
"clip": contrastive_loss_clip,
"cross_entropy": ce_loss,
"soft_cross_entropy": sce_loss,
}"""
OUTPUT_LAYER_REGISTRY = {
"multi_class": multiclass_classification,
"binary_class": multiclass_classification,
"multi_label": multilabel_classification,
}
TASK_PREPROCESSING_LAYER = {
"supervised": SupervisedInputTransform,
"masked": MaskInputTransforms,
"contrastive": ContrastiveInputTransforms,
"clip": CLIPInputTransforms,
}
INTERMEDIATE_TRANSFORM_LAYER = {"view_select": ViewSelect}
supervised_taskflow_default = [
{
"name": "supervised_task_preprocessing",
"module": "supervised_task_preprocessing",
"inputs": [("_input_", "inputs")],
},
{
"name": "pre_encoder",
"module": "pre_encoder",
"inputs": [("supervised_task_preprocessing", 0)],
},
{"name": "encoder", "module": "encoder", "inputs": [("pre_encoder", 0)]},
{"name": "classifier", "module": "classifier", "inputs": [("encoder", 1)]},
]
supervised_taskflow_patchaug = [
{
"name": "supervised_task_preprocessing",
"module": "supervised_task_preprocessing",
"inputs": [("_input_", "inputs")],
},
{
"name": "pre_encoder",
"module": "pre_encoder",
"inputs": [("supervised_task_preprocessing", 0)],
},
{
"name": "patch_augmentation",
"module": "patch_augmentation",
"inputs": [
("pre_encoder", 0),
("supervised_task_preprocessing", 1),
("supervised_task_preprocessing", 2),
],
},
{
"name": "encoder",
"module": "encoder",
"inputs": [("patch_augmentation", 0)],
},
{"name": "classifier", "module": "classifier", "inputs": [("encoder", 1)]},
]
supervised_taskflow_featureaug = [
{
"name": "supervised_task_preprocessing",
"module": "supervised_task_preprocessing",
"inputs": [("_input_", "inputs")],
},
{
"name": "pre_encoder",
"module": "pre_encoder",
"inputs": [("supervised_task_preprocessing", 0)],
},
{"name": "encoder", "module": "encoder", "inputs": [("pre_encoder", 0)]},
{
"name": "feature_augmentation",
"module": "feature_augmentation",
"inputs": [
("encoder", 1),
("supervised_task_preprocessing", 1),
("supervised_task_preprocessing", 2),
],
},
{
"name": "classifier",
"module": "classifier",
"inputs": [("feature_augmentation", 0)],
},
]
masked_taskflow_default = [
{
"name": "masked_task_preprocessing",
"module": "masked_task_preprocessing",
"inputs": [("_input_", "inputs")],
},
{
"name": "pre_encoder",
"module": "pre_encoder",
"inputs": [
("masked_task_preprocessing", 0),
("masked_task_preprocessing", 1),
],
},
{
"name": "encoder",
"module": "encoder",
"inputs": [("pre_encoder", 0)], # src_pre_enccoding
},
{
"name": "decoder",
"module": "decoder",
"inputs": [
("encoder", 1), # src_encoding_hidden
("pre_encoder", 1), # target_pre_encoding
("masked_task_preprocessing", 2), # mask
],
},
]
masked_taskflow_patchaug = [
{
"name": "masked_task_preprocessing",
"module": "masked_task_preprocessing",
"inputs": [("_input_", "inputs")],
},
{
"name": "pre_encoder",
"module": "pre_encoder",
"inputs": [
("masked_task_preprocessing", 0),
("masked_task_preprocessing", 1),
],
},
{
"name": "patch_augmentation",
"module": "patch_augmentation",
"inputs": [("pre_encoder", 0), ("masked_task_preprocessing", 1)],
},
{
"name": "encoder",
"module": "encoder",
"inputs": [("patch_augmentation", 0)], # src_pre_enccoding
},
{
"name": "decoder",
"module": "decoder",
"inputs": [
("encoder", 1), # src_encoding_hidden
("pre_encoder", 1), # target_pre_encoding
("masked_task_preprocessing", 2), # mask
],
},
]
masked_taskflow_featureaug = [
{
"name": "masked_task_preprocessing",
"module": "masked_task_preprocessing",
"inputs": [("_input_", "inputs")],
},
{
"name": "pre_encoder",
"module": "pre_encoder",
"inputs": [
("masked_task_preprocessing", 0),
("masked_task_preprocessing", 1),
],
},
{
"name": "encoder",
"module": "encoder",
"inputs": [("pre_encoder", 0)], # src_pre_enccoding
},
{
"name": "patch_augmentation",
"module": "patch_augmentation",
"inputs": [("encoder", 1), ("masked_task_preprocessing", 1)],
},
{
"name": "decoder",
"module": "decoder",
"inputs": [
("encoder", 1), # src_encoding_hidden
("pre_encoder", 1), # target_pre_encoding
("masked_task_preprocessing", 2), # mask
],
},
]
contrastive_taskflow_default = [
{
"name": "contrastive_task_preprocessing",
"module": "contrastive_task_preprocessing",
"inputs": [("_input_", "inputs")],
},
{
"name": "pre_encoder",
"module": "pre_encoder",
"inputs": [("contrastive_task_preprocessing", 0)],
},
{"name": "encoder", "module": "encoder", "inputs": [("pre_encoder", 0)]},
]
contrastive_taskflow_patchaug = [
{
"name": "contrastive_task_preprocessing",
"module": "contrastive_task_preprocessing",
"inputs": [("_input_", "inputs")],
},
{
"name": "pre_encoder",
"module": "pre_encoder",
"inputs": [("contrastive_task_preprocessing", 0)],
},
{
"name": "patch_augmentation",
"module": "patch_augmentation",
"inputs": [("pre_encoder", 0), ("contrastive_task_preprocessing", 1)],
},
{
"name": "encoder",
"module": "encoder",
"inputs": [("patch_augmentation", 0)],
},
]
clip_taskflow_default = [
{
"name": "clip_task_preprocessing",
"module": "clip_task_preprocessing",
"inputs": [("_input_", "inputs")],
},
{
"name": "pre_encoder_img",
"module": "pre_encoder_img",
"inputs": [("clip_task_preprocessing", "image")],
},
{
"name": "pre_encoder_text",
"module": "pre_encoder_text",
"inputs": [("clip_task_preprocessing", "text")],
},
{
"name": "text_encoder",
"module": "encoder_text",
"inputs": [("pre_encoder_text", 0)],
},
{
"name": "image_encoder",
"module": "encoder_img",
"inputs": [("pre_encoder_image", 0)],
},
]
TASK_FLOWS = {
"supervised": {
"default": supervised_taskflow_default,
"patch_aug": supervised_taskflow_patchaug,
"feature_aug": supervised_taskflow_featureaug,
},
"contrastive": {
"default": contrastive_taskflow_default,
"patch_aug": contrastive_taskflow_patchaug,
},
"masked": {
"default": masked_taskflow_default,
"patch_aug": masked_taskflow_patchaug,
"feature_aug": masked_taskflow_featureaug,
},
"clip": {"default": clip_taskflow_default},
}
AUGMENTATION_AUGMODULE_MAP = {
"raw": "classifier",
"patchnet": "patchnet_augmentation",
"feature": "feature_augmentation",
}
|
thanos-code-main
|
unagi/tasks/__init__.py
|
import torch
from einops import rearrange
def mask_loss(
loss_fns,
aug_type,
n_views,
module_names,
task_name,
intermediate_output_dict,
Y,
):
# TODO: MODIFY THIS TO SUPPORT NEW FLOW
total_loss = 0
pre_encoding_embs = intermediate_output_dict["pre_encoder"][0]
decoder_ouputs = intermediate_output_dict["decoder"][0]
for field_name, field_emb in pre_encoding_embs.items():
total_loss += loss_fns["masked_loss"](decoder_ouputs[field_name], field_emb)
return total_loss
def ce_loss(
loss_fns,
aug_type,
n_views,
module_names,
task_name,
intermediate_output_dict,
Y,
):
total_loss = 0
if aug_type is not None and aug_type != "raw":
Y = intermediate_output_dict[f"{aug_type}_augmentation"][-1]
# Y = rearrange(Y, "b v d -> (b v) d", v=n_views)
for mname in module_names:
output = intermediate_output_dict[mname][0]
# output = rearrange(output, "(b v) d -> b v d", v=n_views)
total_loss += loss_fns[task_name](output, Y)
return total_loss
def sce_loss(
loss_fns,
aug_type,
n_views,
module_names,
task_name,
intermediate_output_dict,
Y,
):
if aug_type is not None and aug_type != "raw":
Y = intermediate_output_dict[f"{aug_type}_augmentation"][-1]
if len(Y.size()) == 1:
label = intermediate_output_dict[module_names][0].new_zeros(
intermediate_output_dict[module_names][0].size()
)
label.scatter_(1, Y.view(Y.size()[0], 1), 1.0)
else:
label = Y
return loss_fns[task_name](intermediate_output_dict[module_names][0], label)
def contrastive_loss(
loss_fns,
aug_type,
n_views, # number of views passed to contrastive loss function
module_names,
task_name,
intermediate_output_dict,
Y,
):
total_loss = 0
if aug_type is not None and aug_type != "raw":
Y = intermediate_output_dict[f"{aug_type}_augmentation"][-1]
# tgts = Y.unsqueeze(1).repeat(1, n_views)
tgts = rearrange(Y, "(b v) ... -> b v ...", v=n_views)
assert n_views == len(module_names), "Please properly select views"
field_embs = [
intermediate_output_dict[embed_layer_name][0]
for embed_layer_name in module_names
]
embs = torch.stack(field_embs, dim=1)
total_loss += loss_fns[task_name](embs, tgts)
"""for embed_layer_name in module_names:
field_emb = intermediate_output_dict[embed_layer_name][0]
embs = rearrange(field_emb, "(b v) d -> b v d", b=n_views)
total_loss += loss_fns[task_name](embs, tgts)"""
return total_loss
def contrastive_loss_clip(
loss_fns,
aug_type,
n_views,
module_names,
task_name,
intermediate_output_dict,
Y,
):
total_loss = 0
if aug_type is not None and aug_type != "raw":
Y = intermediate_output_dict[f"{aug_type}_augmentation"][-1]
tgts = rearrange(Y, "(b v) ... -> b v ...", v=n_views)
assert n_views == len(module_names), "Please properly select views"
field_embs = [
intermediate_output_dict[embed_layer_name][0]
for embed_layer_name in module_names
]
embs = torch.stack(field_embs, dim=1)
total_loss += loss_fns[task_name](embs, tgts)
"""tgts = Y.unsqueeze(1).repeat(1, n_views)
for embed_layer_name in module_names:
field_emb = intermediate_output_dict[embed_layer_name][0]
# if len(field_emb.shape) == 3: field_emb = field_emb.squeeze(1)
embs = rearrange(field_emb, "(b v) d -> b v d", v=n_views)
total_loss += loss_fns[task_name](embs, tgts)"""
return total_loss
|
thanos-code-main
|
unagi/tasks/loss_modules.py
|
from torch.nn import functional as F
def multiclass_classification(module_name, immediate_output_dict):
return F.softmax(
immediate_output_dict[module_name][len(immediate_output_dict[module_name]) - 1],
dim=1,
)
def multilabel_classification(module_name, immediate_output_dict):
return F.sigmoid(
immediate_output_dict[module_name][len(immediate_output_dict[module_name]) - 1]
)
|
thanos-code-main
|
unagi/tasks/output_layer_modules.py
|
import copy
import torch
import torch.nn as nn
from einops import rearrange
class MaskInputTransforms(nn.Module):
def __init__(self):
super().__init__()
self.name = "masked_input_transform"
def forward(self, x_batch):
temp_x_batch = copy.deepcopy(x_batch)
is_train = temp_x_batch["is_train"]
field_names = copy.deepcopy(list(temp_x_batch.keys()))
new_x_batch = {"inputs": {}, "mask": {}}
for field_name in field_names:
if field_name not in [
"is_train",
]:
values = temp_x_batch[field_name]
input = tuple(map(torch.stack, zip(*values)))
inputs, mask = input
if is_train:
mask = rearrange(mask, "b v d -> (b v) d")
else:
mask = None
new_x_batch["inputs"][field_name] = {
"value": inputs,
}
new_x_batch["mask"][field_name] = mask
return (
new_x_batch["inputs"],
new_x_batch["inputs"],
new_x_batch["mask"],
temp_x_batch["labels"],
) # (src, tgt, mask)
class ContrastiveInputTransforms(nn.Module):
def __init__(self):
super().__init__()
self.name = "contrastive_input_transform"
def forward(self, x_batch):
"""temp_x_batch = copy.deepcopy(x_batch)
is_train = temp_x_batch["is_train"]
feature_type_map = temp_x_batch["feature_type_map"]
field_names = copy.deepcopy(list(temp_x_batch.keys()))
new_x_batch = {"inputs": {}}
for field_name in field_names:
if field_name not in ["is_train", "feature_type_map", "labels"]:
values = temp_x_batch[field_name]
if is_train:
values = rearrange(values, "b v c d -> (b v) c d")
new_x_batch["inputs"][field_name] = {
"value": values,
"type": feature_type_map[field_name],
}
return [new_x_batch["inputs"], temp_x_batch["labels"]]"""
temp_x_batch = copy.deepcopy(x_batch)
for param, values in temp_x_batch.items():
if param not in [
"is_train",
]:
values = temp_x_batch[param]
values = rearrange(values, "b v ... -> (b v) ...")
temp_x_batch[param] = values
del temp_x_batch["is_train"]
return temp_x_batch
class CLIPInputTransforms(nn.Module):
def __init__(self):
super().__init__()
self.name = "clip_input_transform"
def forward(self, x_batch):
"""temp_x_batch = copy.deepcopy(x_batch)
is_train = temp_x_batch["is_train"]
feature_type_map = temp_x_batch["feature_type_map"]
field_names = copy.deepcopy(list(temp_x_batch.keys()))
final_output = {}
for field_name in field_names:
new_x_batch = {field_name: {}}
if field_name not in ["is_train", "feature_type_map", "labels"]:
values = temp_x_batch[field_name]
if is_train:
values = rearrange(values, "b v c d -> (b v) c d")
new_x_batch[field_name] = {
"value": values,
"type": feature_type_map[field_name],
}
if feature_type_map[field_name] == "text":
final_output["text"] = new_x_batch
elif feature_type_map[field_name] == "image":
final_output["image"] = new_x_batch
final_output["labels"] = temp_x_batch["labels"]"""
temp_x_batch = copy.deepcopy(x_batch)
for param, values in temp_x_batch.items():
if param not in [
"is_train",
]:
values = temp_x_batch[param]
values = rearrange(values, "b v ... -> (b v) ...")
temp_x_batch[param] = values
del temp_x_batch["is_train"]
return temp_x_batch
class SupervisedInputTransform(nn.Module):
def __init__(self):
super().__init__()
self.name = "supervised_input_transform"
def forward(self, x_batch):
# is_train = False
"""temp_x_batch = copy.deepcopy(x_batch)
feature_type_map = temp_x_batch["feature_type_map"]
field_names = copy.deepcopy(list(temp_x_batch.keys()))
new_x_batch = {"inputs": {}}
for field_name in field_names:
if field_name not in ["is_train", "feature_type_map", "labels"]:
values = temp_x_batch[field_name]
if field_name in feature_type_map.keys():
new_x_batch["inputs"][field_name] = {
"value": values,
"type": feature_type_map[field_name],
}
if field_name == "is_train":
is_train = temp_x_batch[field_name]"""
# pass foward inputs and labels
# return [new_x_batch["inputs"], temp_x_batch["labels"], is_train]
temp_x_batch = copy.deepcopy(x_batch)
for param, values in temp_x_batch.items():
if param not in [
"is_train",
]:
values = temp_x_batch[param]
values = rearrange(values, "b v ... -> (b v) ...")
temp_x_batch[param] = values
del temp_x_batch["is_train"]
return temp_x_batch
class ViewSelect(nn.Module):
def __init__(self, view_idx, n_views, **kwargs):
super().__init__()
self.name = "view_select"
self.view_idx = view_idx
self.n_views = n_views
def forward(self, input):
embs = rearrange(input, "(b v) ... -> b v ...", v=self.n_views)
embs = embs[:, self.view_idx, ...]
return embs
|
thanos-code-main
|
unagi/tasks/task_preprocessing_layer.py
|
import torch
import torch.nn.functional as F
from unagi.tasks.loss_fns.base_loss import UnagiLoss
class BatchMask(UnagiLoss):
def __init__(self):
super().__init__()
def forward(self, last_layer, embs):
# embs == output of embedding layers
# last_layer == output of the decoder
# Both embs and last_layer
# batch x sentence x dims
# for each prediction in the last layer, we assume no duplicate ftm.
a = torch.einsum("b s d, b t d -> b s t", last_layer, embs)
return -torch.diagonal(a.log_softmax(-1), dim1=1, dim2=2).mean()
class BatchMaskDup(UnagiLoss):
def __init__(self, eps=1e-5):
super().__init__()
print("Using Batchmask")
self.eps = eps
def forward(self, last_layer, embs):
# embs == output of embedding layers
# last_layer == output of the decoder
#
# Both embs and last_layer
# batch x sentence x dims
# for each prediction in the last layer, we assume no duplicate ftm.
def _g(x, y):
return torch.einsum("b s d, b t d -> b s t", x, y)
def _dupe_check(x):
b, s, _ = x.size()
x = F.normalize(x, dim=-1)
mask = _g(x, x) > 1 - self.eps
mask = mask.masked_fill(
torch.triu(torch.ones(b, s, s, device=x.device)) > 0, False
)
# The mask is true, if there is a duplicate that comes before it in order.
# As a result, only the first duplicate is counted.
return mask.any(-1)
a = _g(last_layer, embs)
a = a.masked_fill(_dupe_check(embs).unsqueeze(1), -1e9)
return -torch.diagonal(a.log_softmax(-1), dim1=1, dim2=2).mean()
|
thanos-code-main
|
unagi/tasks/loss_fns/mask_loss.py
|
from typing import List
import torch
import torch.nn.functional as F
from torch import Tensor
from unagi.tasks.loss_fns.base_loss import UnagiLoss
class SoftCrossEntropyLoss(UnagiLoss):
"""Calculate the CrossEntropyLoss with soft targets.
:param weight: Weight to assign to each of the classes. Default: None
:type weight: list of float
:param reduction: The way to reduce the losses: 'none' | 'mean' | 'sum'.
'none': no reduction,
'mean': the mean of the losses,
'sum': the sum of the losses.
:type reduction: str
"""
def __init__(self, weight: List[float] = None, reduction: str = "mean"):
super().__init__()
if weight is None:
self.weight = None
else:
self.register_buffer("weight", torch.Tensor(weight))
self.reduction = reduction
def forward(self, input: Tensor, target: Tensor) -> Tensor: # type:ignore
"""Calculate the loss.
:param input: prediction logits
:param target: target probabilities
:return: loss
"""
n, k = input.shape
losses = input.new_zeros(n)
for i in range(k):
cls_idx = input.new_full((n,), i, dtype=torch.long)
loss = F.cross_entropy(input, cls_idx, reduction="none")
if self.weight is not None:
loss = loss * self.weight[i]
losses += target[:, i].float() * loss
if self.reduction == "mean":
losses = losses.mean()
elif self.reduction == "sum":
losses = losses.sum()
elif self.reduction != "none":
raise ValueError(f"Unrecognized reduction: {self.reduction}")
return losses
class LabelSmoothing(UnagiLoss):
"""NLL loss with label smoothing."""
def __init__(self, smoothing=0.0):
"""Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super().__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
# nll_loss = -logprobs.gather(dim=-1, index=target)
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
# smooth_loss = smooth_loss.unsqueeze(-1) # added
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
|
thanos-code-main
|
unagi/tasks/loss_fns/ce_loss.py
|
import numpy as np
import torch
from einops import rearrange
from unagi.tasks.loss_fns.base_loss import UnagiLoss
class UnagiContrastiveLoss(UnagiLoss):
def __init__(self, views):
super().__init__()
self.views = views
def combine_views(self, *views):
all_views = [view for view in views]
return torch.stack(all_views, dim=1)
def expand_target(self, Y):
return rearrange(Y, "(b v) ... -> b v ...", v=self.views)
def forward(self, *args):
raise NotImplementedError
def weighted_logsumexp(mat, axis, weights):
_max, _ = torch.max(mat, dim=axis, keepdim=True)
lse = ((torch.exp(mat - _max) * weights).sum(dim=axis, keepdim=True)).log() + _max
return lse.squeeze(axis)
class ContrastiveLoss(UnagiContrastiveLoss):
def __init__(
self,
views,
type="l_spread", # sup_con, sim_clr, l_attract, l_spread
temp=0.5,
pos_in_denom=False, # as per dan, false by default
log_first=True, # TODO (ASN): should this be true (false originally)
a_lc=1.0,
a_spread=1.0,
lc_norm=False,
use_labels=True,
clip_pos=1.0,
pos_in_denom_weight=1.0,
):
super().__init__(views)
self.temp = temp
self.log_first = log_first
self.a_lc = a_lc
self.a_spread = a_spread
self.pos_in_denom = pos_in_denom
self.lc_norm = lc_norm
self.use_labels = use_labels
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.clip_pos = clip_pos
self.pos_in_denom_weight = pos_in_denom_weight
if type == "sup_con":
print(f"Using {type} contrastive loss function")
self.a_spread = 0
# self.pos_in_denom = False # this isn't doing anything
# elif type == "l_attract":
# print(f"Using {type} contrastive loss function")
# self.a_spread = 0
# self.pos_in_denom = False # working
elif type == "l_repel":
print(f"Using {type} contrastive loss function")
self.a_spread = 1
self.a_lc = 0
elif type == "sim_clr":
print(f"Using {type} contrastive loss function")
self.a_spread = 0
self.a_lc = 1
self.use_labels = False
def forward(self, *args):
inputs = args[:-1]
Y = args[-1]
x, labels = self.combine_views(*inputs), self.expand_target(Y)
# x has shape batch * num views * dimension
# labels has shape batch * num views
b, nViews, d = x.size()
vs = torch.split(x, 1, dim=1) # images indexed by view
if not self.use_labels:
labels = torch.full(labels.shape, -1)
ts = torch.split(labels, 1, dim=1) # labels indexed by view
l = 0.0
pairs = nViews * (nViews - 1) // 2
for ii in range(nViews):
vi = vs[ii].squeeze()
ti = ts[ii].squeeze()
ti_np = np.array([int(label) for label in ti])
for jj in range(ii):
vj = vs[jj].squeeze()
# num[i,j] is f(xi) * f(xj) / tau, for i,j
if self.lc_norm:
num = (
torch.einsum("b d, c d -> b c", vi, vj)
.div(self.temp)
.div(torch.norm(vi, dim=1) * torch.norm(vj, dim=1))
)
else:
num = torch.einsum("b d, c d -> b c", vi, vj).div(self.temp)
# store the first positive (augmentation of the same view)
pos_ones = []
neg_ones = [] # store the first negative
M_indices = []
div_factor = []
for i, cls in enumerate(ti_np):
# fall back to SimCLR
pos_indices = torch.tensor([i]).to(ti.device)
if cls != -1:
pos_indices = torch.where(ti == cls)[0]
# fall back to SimCLR
neg_indices = torch.tensor(
[idx for idx in range(ti.shape[0]) if idx != i]
).to(ti.device)
if cls != -1:
neg_indices = torch.where(ti != cls)[0]
all_indices = torch.stack(
[
torch.cat(
(
pos_indices
if self.pos_in_denom
else pos_indices[j : j + 1],
neg_indices,
)
)
for j in range(len(pos_indices))
]
)
# store all the positive indices
pos_ones.append(pos_indices)
# store all the negative indices that go up to m
neg_ones.append(neg_indices)
M_indices.append(all_indices)
div_factor.append(len(pos_indices))
if self.pos_in_denom_weight == 1.0:
# denominator for each point in the batch
denominator = torch.stack(
[
# reshape num with an extra dimension, then take the
# sum over everything
torch.logsumexp(num[i][M_indices[i]], 1).sum()
for i in range(len(ti))
]
)
else:
# denominator for each Mpoint in the batch
denominator = torch.stack(
[
# reshape num with an extra dimension, then take the
# sum over everything
weighted_logsumexp(
num[i][M_indices[i]],
1,
torch.tensor(
np.concatenate(
[
np.full(
len(pos_ones[i]),
self.pos_in_denom_weight,
),
np.ones(len(neg_ones[i])),
]
)
).to(ti.device),
).sum()
for i in range(len(ti))
]
)
if self.clip_pos != 1.0:
# numerator
numerator = torch.stack(
[
# sum over all the positives
torch.sum(-1 * num[i][pos_ones[i]])
# -1 * num[i][pos_ones[i]]
for i in range(len(ti))
]
)
else:
# numerator
numerator = torch.stack(
[
# sum over all the positives
torch.sum(
-1 * torch.clamp(num[i][pos_ones[i]], max=self.clip_pos)
)
# -1 * num[i][pos_ones[i]]
for i in range(len(ti))
]
)
log_prob = numerator + denominator
if self.a_spread > 0.0:
assert self.a_lc + self.a_spread != 0
numerator_spread = -1 * torch.diagonal(num, 0)
denominator_spread = torch.stack(
[
# reshape num with an extra dimension,
# then take the sum over everything
torch.logsumexp(num[i][pos_ones[i]], 0).sum()
for i in range(len(ti))
]
)
log_prob_spread = numerator_spread + denominator_spread
a = (
self.a_lc * log_prob.div(torch.tensor(div_factor).to(ti.device))
+ self.a_spread * log_prob_spread
) / (self.a_lc + self.a_spread)
else:
log_prob = log_prob.to(ti.device)
a = torch.tensor(self.a_lc).to(ti.device) * log_prob.to(
ti.device
).div(torch.tensor(div_factor).to(ti.device))
l += a.mean()
out = l / pairs
return out
|
thanos-code-main
|
unagi/tasks/loss_fns/contrastive_loss.py
|
import torch.nn as nn
class UnagiLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self):
raise NotImplementedError
|
thanos-code-main
|
unagi/tasks/loss_fns/base_loss.py
|
import copy
from collections import defaultdict
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Sequence, Tuple, Union
import torch
from einops import rearrange
from unagi.data.data_utils.transform_util import get_transforms
from unagi.utils.misc import list_to_tensor
def is_list(x):
return isinstance(x, Sequence) and not isinstance(x, str)
class TBPTTDataLoader(torch.utils.data.DataLoader):
"""
Adapted from https://github.com/deepsound-project/samplernn-pytorch
"""
def __init__(self, dataset, batch_size, chunk_len, overlap_len, *args, **kwargs):
super().__init__(dataset, batch_size, *args, **kwargs)
# Zero padding value, given by the dataset
self.zero = dataset.zero if hasattr(dataset, "zero") else 0
# Size of the chunks to be fed into the model
self.chunk_len = chunk_len
# Keep `overlap_len` from the previous chunk (e.g. SampleRNN requires this)
self.overlap_len = overlap_len
def __iter__(self):
for batch in super().__iter__():
x, y, *z = batch
# Pad with self.overlap_len - 1 zeros
x = torch.cat(
[
torch.zeros((x.shape[0], self.overlap_len - 1, *x.shape[2:]))
.to(x.device)
.to(x.dtype)
+ self.zero,
x,
],
dim=1,
)
y = torch.cat(
[
torch.zeros((y.shape[0], self.overlap_len - 1, *y.shape[2:]))
.to(y.device)
.to(y.dtype)
+ self.zero,
y,
],
dim=1,
)
z = [
torch.cat(
[
torch.zeros(
(
z[i].shape[0],
self.overlap_len - 1,
*z[i].shape[2:],
)
)
.to(z[i].device)
.to(z[i].dtype),
z[i],
],
dim=1,
)
for i in range(len(z))
]
_, seq_len, *_ = x.shape
reset = True
for seq_begin in list(range(self.overlap_len - 1, seq_len, self.chunk_len))[
:-1
]:
from_index = seq_begin - self.overlap_len + 1
to_index = seq_begin + self.chunk_len
# TODO: check this
# Ensure divisible by overlap_len
if self.overlap_len > 0:
to_index = min(
to_index,
seq_len - ((seq_len - self.overlap_len + 1) % self.overlap_len),
)
x_chunk = x[:, from_index:to_index]
if len(y.shape) == 3:
y_chunk = y[:, seq_begin:to_index]
else:
y_chunk = y
z_chunk = [z_[:, from_index:to_index] for z_ in z]
yield (x_chunk, y_chunk, *z_chunk, reset)
reset = False
def __len__(self):
raise NotImplementedError()
class UnagiDatasetBuilder:
registry = {}
_name_ = NotImplementedError("Dataset must have shorthand name.")
# Important:
# - dataset should return x, y in the order in which the keys
# are defined in the dicts below
# - the shapes should always have channels first
input_shapes: dict = NotImplementedError("Dataset must have input shapes.")
output_shapes: dict = NotImplementedError("Dataset must have output shapes.")
uid = "index"
@property
def x_names(self):
self._x_names = list(self.input_shapes.keys())
return self._x_names
@property
def y_names(self):
self._y_names = list(self.output_shapes.keys())
return self._y_names
# https://www.python.org/dev/peps/pep-0487/#subclass-registration
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.registry[cls._name_] = cls
@property
def init_defaults(self):
return {}
def __init__(
self,
data_dir=None,
tbptt=False,
chunk_len=None,
overlap_len=None,
x_transforms=None,
y_transforms=None,
transform_pool=None,
**dataset_cfg,
):
self.data_dir = Path(data_dir).absolute() if data_dir is not None else None
self.x_transforms = x_transforms
self.y_transforms = y_transforms
self.transform_pool = transform_pool
# Arguments for TBPTT: only used if tbptt is True and are passed to
# TBPTTDataLoader
self.tbptt = tbptt
self.chunk_len = chunk_len
self.overlap_len = overlap_len
self.subset_split_percent = None
self.subset_split_seed = 42
# Add all arguments to self
init_args = self.init_defaults
init_args.update(
dataset_cfg
) # TODO this overrides the default dict which is bad
for k, v in init_args.items():
setattr(self, k, v)
self.dataset_train = None
self.dataset_val = None
self.dataset_test = None
self.setup()
self.subsample_dataset()
self._wrap_datasets()
def setup(self):
"""
This method should set
self.dataset_train, self.dataset_val, and self.dataset_test.
"""
raise NotImplementedError
def subsample_dataset(self):
if self.subset_split_percent:
subset_size = int(len(self.dataset_train) * self.subset_split_percent)
dataset_train_subset, dataset_train_heldout = torch.utils.data.random_split(
self.dataset_train,
(subset_size, len(self.dataset_train) - subset_size),
generator=torch.Generator().manual_seed(
getattr(self, "seed", self.subset_split_seed)
),
)
self.dataset_train = dataset_train_subset
self.dataset_test = {
"original_testset": self.dataset_test,
"heldout_trainset": dataset_train_heldout,
}
def _wrap_datasets(self):
self._create_input_to_output_mapping()
# Create all the transforms
self.transforms_train = self.transforms("train")
self.transforms_eval = self.transforms("eval")
UnagiDatasetWrapped = partial(
UnagiDataset,
input_to_output_mapping=self.input_to_output_mapping,
x_names=self.x_names,
y_names=self.y_names,
uid=self.uid,
)
if isinstance(self.dataset_train, torch.utils.data.Dataset):
self.dataset_train = UnagiDatasetWrapped(
dataset=self.dataset_train,
feature_transforms=self.transforms_train,
split="train",
)
elif isinstance(self.dataset_train, dict):
self.dataset_train = {
k: UnagiDatasetWrapped(
dataset=v,
feature_transforms=self.transforms_train,
split=k,
)
for k, v in self.dataset_train.items()
}
else:
raise TypeError(
"dataset_train must be a torch.utils.data.Dataset or dict,"
f"got {type(self.dataset_train)}"
)
if isinstance(self.dataset_val, torch.utils.data.Dataset):
self.dataset_val = UnagiDatasetWrapped(
dataset=self.dataset_val,
feature_transforms=self.transforms_eval,
split="val",
)
elif isinstance(self.dataset_val, dict):
self.dataset_val = {
k: UnagiDatasetWrapped(
dataset=v,
feature_transforms=self.transforms_eval,
split=k,
)
for k, v in self.dataset_val.items()
}
else:
raise TypeError(
"dataset_val must be a torch.utils.data.Dataset or dict, "
f"got {type(self.dataset_val)}"
)
if isinstance(self.dataset_test, torch.utils.data.Dataset):
self.dataset_test = UnagiDatasetWrapped(
dataset=self.dataset_test,
feature_transforms=self.transforms_eval,
split="test",
)
elif isinstance(self.dataset_test, dict):
self.dataset_test = {
k: UnagiDatasetWrapped(
dataset=v,
feature_transforms=self.transforms_eval,
split=k,
)
for k, v in self.dataset_test.items()
}
else:
raise TypeError(
"dataset_test must be a torch.utils.data.Dataset or dict, "
f"got {type(self.dataset_test)}"
)
def split_train_val(self, val_split: float):
train_len = int(len(self.dataset_train) * (1.0 - val_split))
dataset_train, dataset_val = torch.utils.data.random_split(
self.dataset_train,
(train_len, len(self.dataset_train) - train_len),
generator=torch.Generator().manual_seed(getattr(self, "seed", 42)),
)
return dataset_train, dataset_val
def transforms(self, split: str):
# returns a Composed transform
return get_transforms(
input_features=self.x_transforms,
dataset_split=split,
augmentations=self.transform_pool,
)
def _create_input_to_output_mapping(self):
# for contrastive, keep track of which input features as are transformed
# with which output features
self.input_to_output_mapping = {}
for name, output_feat in self.y_transforms.items():
if "transform_with" in output_feat:
input_feature_map = output_feat.transform_with
if input_feature_map not in self.input_to_output_mapping:
self.input_to_output_mapping[input_feature_map] = [name]
else:
self.input_to_output_mapping[input_feature_map].append(name)
@staticmethod
def collate_fn(
batch: Union[
List[Tuple[Dict[str, Any], Dict[str, torch.Tensor]]], List[Dict[str, Any]]
],
resolution: int = 1,
is_train: bool = True,
) -> Union[Tuple[Dict[str, Any], Dict[str, torch.Tensor]], Dict[str, Any]]:
"""Collate function.
Args:
batch: The batch to collate.
min_data_len: The minimal data sequence length, defaults to 0.
max_data_len: The maximal data sequence length (0 means no limit),
defaults to 0.
Returns:
The collated batch.
"""
X_batch: defaultdict = defaultdict(list)
Y_batch: defaultdict = defaultdict(list)
for item in batch:
# Check if batch is (x_dict, y_dict) pair
if isinstance(item, dict):
x_dict = item
y_dict: Dict[str, Any] = dict()
else:
x_dict, y_dict = item
for field_name, value in x_dict.items():
if isinstance(value, list):
X_batch[field_name] += value
else:
X_batch[field_name].append(value)
for label_name, value in y_dict.items():
if isinstance(value, list):
Y_batch[label_name] += value
else:
Y_batch[label_name].append(value)
field_names = copy.deepcopy(list(X_batch.keys()))
for field_name in field_names:
values = X_batch[field_name]
# Only merge list of tensors
if isinstance(values[0], torch.Tensor):
item_tensor, item_mask_tensor = list_to_tensor(
values,
)
X_batch[field_name] = item_tensor
if item_mask_tensor is not None:
X_batch[f"{field_name}_mask"] = item_mask_tensor
# TODO: generalize this to handle the case where resolutions are
# different per field
X_batch[field_name] = X_batch[field_name][..., ::resolution]
# TODO: figure out how to handle the mask
# X_batch[f"{field_name}_mask"] = X_batch[f"{field_name}_mask"]
# [..., ::resolution]
for label_name, values in Y_batch.items():
Y_batch[label_name] = list_to_tensor(
values,
)[0]
X_batch = dict(X_batch)
X_batch["is_train"] = is_train
new_X_batch = {}
new_X_batch["index"] = X_batch["index"]
del X_batch["index"]
new_X_batch["inputs"] = X_batch
if len(Y_batch) != 0:
Y_batch = dict(Y_batch)
Y_batch = {
k: rearrange(v, "b v ... -> (b v) ...") for k, v in Y_batch.items()
}
return new_X_batch, Y_batch
return new_X_batch
def train_dataloader(self, train_resolution=None, **kwargs):
if train_resolution is None:
train_resolution = [1]
if not is_list(train_resolution):
train_resolution = [train_resolution]
assert len(train_resolution) == 1, "Only one train resolution supported for now"
return self._dataloader(
self.dataset_train,
is_train=True,
resolutions=train_resolution,
shuffle=True,
**kwargs,
)[0]
def val_dataloader(self, eval_resolutions=None, **kwargs):
if isinstance(self.dataset_val, dict):
val_dataloaders = {}
for prefix, dataset in self.dataset_val.items():
dls = self._eval_dataloader(
dataset,
prefix=f"val/{prefix}",
eval_resolutions=eval_resolutions,
**kwargs,
)
val_dataloaders = {**val_dataloaders, **dls}
return val_dataloaders
else:
return self._eval_dataloader(
self.dataset_val,
prefix="val",
eval_resolutions=eval_resolutions,
**kwargs,
)
def test_dataloader(self, eval_resolutions=None, **kwargs):
if isinstance(self.dataset_test, dict):
test_dataloaders = {}
for prefix, dataset in self.dataset_test.items():
dls = self._eval_dataloader(
dataset,
prefix=f"test/{prefix}",
eval_resolutions=eval_resolutions,
**kwargs,
)
test_dataloaders = {**test_dataloaders, **dls}
return test_dataloaders
else:
return self._eval_dataloader(
self.dataset_test,
prefix="test",
eval_resolutions=eval_resolutions,
**kwargs,
)
def _eval_dataloader(self, dataset, prefix, eval_resolutions=None, **kwargs):
if eval_resolutions is None:
eval_resolutions = [1]
if not is_list(eval_resolutions):
eval_resolutions = [eval_resolutions]
kwargs["shuffle"] = False if "shuffle" not in kwargs else kwargs["shuffle"]
dataloaders = self._dataloader(
dataset,
is_train=False,
resolutions=eval_resolutions,
**kwargs,
)
return (
{
f"{prefix}/{res}" if res > 1 else prefix: dl
for res, dl in zip(eval_resolutions, dataloaders)
}
if dataloaders is not None
else None
)
def _dataloader(self, dataset, is_train, resolutions, **loader_args):
if dataset is None:
return None
if self.tbptt:
DataLoader = partial(
TBPTTDataLoader,
chunk_len=self.chunk_len,
overlap_len=self.overlap_len,
)
else:
DataLoader = torch.utils.data.DataLoader
return [
DataLoader(
dataset=dataset,
collate_fn=partial(
self.collate_fn, resolution=resolution, is_train=is_train
)
if self.collate_fn is not None
else None,
**loader_args,
)
for resolution in resolutions
]
def __str__(self):
return self._name_
class UnagiDataset(torch.utils.data.Dataset):
def __init__(
self,
dataset: torch.utils.data.Dataset,
feature_transforms: dict,
input_to_output_mapping: dict,
x_names: list,
y_names: list,
uid: str,
split: str,
):
self.dataset = dataset
self.feature_transforms = feature_transforms
self.input_to_output_mapping = input_to_output_mapping
self.x_names = x_names
self.y_names = y_names
self.uid = uid
self.split = split
def __getitem__(self, index):
"""
Get item by index.
Args:
index(index): The index of the item.
Returns:
Tuple[Dict[str, Any], Dict[str, Tensor]]: Tuple of x_dict and y_dict
"""
x, y = self.dataset[index]
if not isinstance(y, torch.Tensor):
y = torch.tensor(y)
x_dict, y_dict = {}, {}
# add uid
x_dict[self.uid] = index
for idx, input_feature in enumerate(self.x_names):
if len(self.x_names) > 1:
feature = x[idx]
else:
feature = x
x_dict[input_feature] = []
transforms = self.feature_transforms[input_feature]
if input_feature in self.input_to_output_mapping:
output_targets = self.input_to_output_mapping[input_feature]
labels = torch.stack(
[
y[self.y_names.index(target)] if len(self.y_names) > 1 else y
for target in output_targets
]
)
feature, y = transforms(
feature, # input
labels, # label
)
else:
feature, _ = transforms(
feature, # input
None, # label
)
x_dict[input_feature].append(feature)
for index, output_feature in enumerate(self.y_names):
y_dict[output_feature] = y[index]
return x_dict, y_dict
def __len__(self):
return len(self.dataset)
if __name__ == "__main__":
dataset = UnagiDataset(
data_dir=".",
tbptt=True,
chunk_len=10,
overlap_len=5,
permute=False,
n_classes=20,
)
|
thanos-code-main
|
unagi/datasets/base_dataset.py
|
from unagi.datasets.celeba.celeba_dataset import CelebA
from unagi.datasets.cifar.cifar_dataset import CIFAR10, CIFAR100
from unagi.datasets.mnist.mnist_dataset import MNIST
from unagi.datasets.tiny_imagenet.tinyimagenet_dataset import TinyImageNet
DATASET_CLASSES = {
"cifar10": CIFAR10,
"cifar100": CIFAR100,
"cifar10_coarse": CIFAR10,
"cifar100_coarse": CIFAR100,
"tinyimagenet": TinyImageNet,
"tinyimagenet_coarse": TinyImageNet,
"mnist": MNIST,
"celeba": CelebA,
}
|
thanos-code-main
|
unagi/datasets/__init__.py
|
from torch.utils.data import Dataset
class MeerkatDataset(Dataset):
"""Torch dataset wrapper around meerkat dp"""
def __init__(self, datapanel, xs, ys):
self.dataset = datapanel
self.x_names = xs
self.y_names = ys
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
# if self.x_names is single element, return single element
if len(self.x_names) > 1:
x = [self.dataset[idx][input_feat] for input_feat in self.x_names]
else:
x = self.dataset[idx][self.x_names[0]]
if len(self.y_names) > 1:
y = [self.dataset[idx][output_feat] for output_feat in self.y_names]
else:
y = self.dataset[idx][self.y_names[0]]
return (x, y)
|
thanos-code-main
|
unagi/datasets/meerkat_dataset.py
|
import meerkat as mk
import torchvision
from unagi.datasets.base_dataset import UnagiDatasetBuilder
from unagi.datasets.meerkat_dataset import MeerkatDataset
from unagi.datasets.mnist.utils import sparse2coarse
class MNIST(UnagiDatasetBuilder):
"""Dataset to load MNIST dataset."""
_name_ = "mnist"
# TODO: these can be modified by the transforms (e.g. grayscale)
# and need to be up to date
input_shapes = {
"image": (1, 28, 28),
}
output_shapes = {
"label": (10,),
}
@property
def init_defaults(self):
return {
"val_split": 0.1,
"seed": 42, # For validation split
"coarse_labels": False,
}
def setup(self):
self.dataset_train = torchvision.datasets.MNIST(
root=self.data_dir,
train=True,
download=True,
)
self.dataset_train, self.dataset_val = self.split_train_val(
val_split=self.val_split
)
self.dataset_test = torchvision.datasets.MNIST(
root=self.data_dir,
train=False,
download=True,
)
self.dataset_train = self.to_meerkat(self.dataset_train)
self.dataset_val = self.to_meerkat(self.dataset_val)
self.dataset_test = self.to_meerkat(self.dataset_test)
def to_meerkat(self, dataset):
if self.coarse_labels:
# TODO: split train and val
img_pil, label = [], []
for _, (x, y) in enumerate(dataset):
img_pil.append(x)
label.append(y)
coarse_label = sparse2coarse(label, dataset="mnist")
obj = {
"image": mk.ListColumn(img_pil),
"label": mk.TensorColumn(coarse_label),
}
dp = mk.DataPanel(obj)
# TODO: combine this with the UnagiDataset as an option
dataset = MeerkatDataset(dp, xs=["image"], ys=["label"])
self.output_shapes["label"] = (2,)
return dataset
|
thanos-code-main
|
unagi/datasets/mnist/mnist_dataset.py
|
import numpy as np
def sparse2coarse(targets, scramble=False, dataset="mnist"):
"""Convert Pytorch MNIST sparse targets.
trainset = torchvision.datasets.CIFAR100(path)
trainset.targets = sparse2coarse(trainset.targets)
"""
if dataset == "mnist":
sparse_coarse_array = [
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
]
targets = np.array(sparse_coarse_array)[targets]
return targets.tolist()
|
thanos-code-main
|
unagi/datasets/mnist/utils.py
|
import os
import numpy as np
def sparse2coarse(targets, scramble=False):
"""Convert Pytorch CIFAR100 sparse targets to coarse targets.
Usage:
trainset = torchvision.datasets.CIFAR100(path)
trainset.targets = sparse2coarse(trainset.targets)
"""
sparse_coarse_array = [
14,
31,
16,
16,
8,
34,
35,
0,
0,
0,
4,
3,
22,
14,
7,
23,
23,
23,
9,
9,
3,
3,
3,
21,
11,
11,
11,
11,
11,
11,
6,
6,
6,
39,
39,
2,
4,
4,
4,
4,
4,
4,
4,
4,
5,
5,
12,
30,
19,
37,
37,
37,
37,
37,
28,
28,
28,
37,
2,
61,
46,
53,
46,
41,
53,
45,
45,
47,
43,
53,
65,
45,
56,
52,
46,
62,
58,
41,
49,
62,
62,
64,
45,
62,
66,
46,
61,
50,
62,
47,
49,
45,
50,
45,
65,
65,
58,
53,
53,
55,
62,
58,
58,
64,
48,
46,
41,
65,
44,
61,
50,
65,
46,
49,
65,
44,
65,
62,
58,
46,
46,
65,
62,
41,
45,
55,
55,
50,
50,
51,
47,
62,
60,
65,
46,
52,
62,
66,
60,
61,
53,
50,
53,
43,
46,
65,
60,
61,
60,
46,
54,
50,
58,
65,
58,
50,
46,
58,
46,
62,
48,
63,
45,
62,
65,
58,
65,
61,
41,
46,
58,
43,
47,
58,
48,
48,
59,
48,
52,
52,
52,
52,
52,
38,
38,
18,
17,
17,
17,
17,
52,
52,
52,
52,
58,
58,
7,
58,
58,
27,
]
targets = np.array(sparse_coarse_array)[targets]
return targets.tolist()
def create_val_img_folder(path_to_dataset: str):
"""
This method is responsible for separating validation images into
separate sub folders
"""
val_dir = os.path.join(path_to_dataset, "val")
img_dir = os.path.join(val_dir, "images")
fp = open(os.path.join(val_dir, "val_annotations.txt"), "r")
data = fp.readlines()
val_img_dict = {}
for line in data:
words = line.split("\t")
val_img_dict[words[0]] = words[1]
fp.close()
# Create folder if not present and move images into proper folders
for img, folder in val_img_dict.items():
newpath = os.path.join(img_dir, folder)
if not os.path.exists(newpath):
os.makedirs(newpath)
if os.path.exists(os.path.join(img_dir, img)):
os.rename(os.path.join(img_dir, img), os.path.join(newpath, img))
|
thanos-code-main
|
unagi/datasets/tiny_imagenet/utils.py
|
import os
import meerkat as mk
import torchvision
from unagi.datasets.base_dataset import UnagiDatasetBuilder
from unagi.datasets.meerkat_dataset import MeerkatDataset
from unagi.datasets.tiny_imagenet.utils import create_val_img_folder, sparse2coarse
class TinyImageNet(UnagiDatasetBuilder):
"""Dataset to load TinyImageNet dataset."""
_name_ = "tinyimagenet"
# TODO: these can be modified by the transforms (e.g. grayscale)
# and need to be up to date
input_shapes = {
"image": (3, 64, 64),
}
output_shapes = {
"label": (200,),
}
@property
def init_defaults(self):
return {
"val_split": 0.1,
"seed": 42, # For validation split
"coarse_labels": False,
"root_folder": None,
}
def setup(self):
if self.root_folder is None:
raise Exception(
"Please specify the path to root folder containing " "TinyImageNet"
)
dp = {"train": {}, "val": {}}
for split in ["train", "val"]:
if split in ["val"]:
folder_path = os.path.join(self.root_folder, split, "images")
# make val image folder
if (
sum(
[
os.path.isdir(os.path.join(self.root_folder, x))
for x in os.listdir(folder_path)
]
)
== 0
):
print("create val folder")
create_val_img_folder(self.root_folder)
else:
folder_path = os.path.join(self.root_folder, split)
labels = sorted(os.listdir(folder_path))
class_to_idx = {cls: i for i, cls in enumerate(labels)}
# get image paths
img_paths, classes = zip(
*torchvision.datasets.DatasetFolder.make_dataset(
folder_path,
class_to_idx=class_to_idx,
extensions="jpeg",
)
)
if self.coarse_labels:
classes = sparse2coarse(list(classes))
self.output_shapes["label"] = (67,)
split_dp = mk.DataPanel(
{
"image": mk.ImageColumn.from_filepaths(list(img_paths)),
"label": mk.TensorColumn(classes),
}
)
dp[split] = split_dp
self.dataset_train = MeerkatDataset(
dp["train"], xs=list(self.input_shapes.keys()), ys=["label"]
)
self.dataset_val = MeerkatDataset(
dp["val"], xs=list(self.input_shapes.keys()), ys=["label"]
)
self.dataset_test = MeerkatDataset(
dp["val"], xs=list(self.input_shapes.keys()), ys=["label"]
)
|
thanos-code-main
|
unagi/datasets/tiny_imagenet/tinyimagenet_dataset.py
|
import os
import meerkat as mk
import numpy as np
import pandas as pd
import torch
from unagi.datasets.base_dataset import UnagiDatasetBuilder
from unagi.datasets.meerkat_dataset import MeerkatDataset
class CelebA(UnagiDatasetBuilder):
"""Dataset to load CelebA dataset."""
_name_ = "celeba"
# TODO: these can be modified by the transforms (e.g. grayscale)
# and need to be up to date
input_shapes = {
"image": (3, 224, 224),
}
output_shapes = {
"label": (10,),
}
@property
def init_defaults(self):
return {
"val_split": 0.1,
"seed": 42, # For validation split
"target_name": "Blond_Hair",
"confounder_names": ["Male"],
"root_dir": None,
}
def setup(self):
if not os.path.exists(self.root_dir):
raise ValueError(
f"{self.root_dir} does not exist yet. "
f"Please generate the dataset first."
)
self.metadata_df = pd.read_csv(
os.path.join(self.root_dir, "list_attr_celeba.csv"),
delim_whitespace=True,
)
self.split_df = pd.read_csv(
os.path.join(self.root_dir, "list_eval_partition.csv"),
delim_whitespace=True,
)
dp = {"train": [], "test": [], "val": []}
for split in ["train", "test", "val"]:
self.metadata_df["partition"] = self.split_df["partition"]
self.metadata_df = self.metadata_df[
self.split_df["partition"] == self.split_dict[split]
]
self.y_array = self.metadata_df[self.target_name].values
self.confounder_array = self.metadata_df[self.confounder_names].values
self.y_array[self.y_array == -1] = 0
self.confounder_array[self.confounder_array == -1] = 0
self.n_classes = len(np.unique(self.y_array))
self.n_confounders = len(self.confounder_names)
self.output_shapes = (self.n_classes,)
# Get sub_targets / group_idx
self.metadata_df["sub_target"] = (
self.metadata_df[self.target_name].astype(str)
+ "_"
+ self.metadata_df[self.confounder_names].astype(str)
)
# print('> Sub_target loaded!')
# Get subclass map
attributes = [self.target_name, self.confounder_names]
self.df_groups = (
self.metadata_df[attributes].groupby(attributes).size().reset_index()
)
# print('> Groups loaded!')
self.df_groups["group_id"] = (
self.df_groups[self.target_name].astype(str)
+ "_"
+ self.df_groups[self.confounder_names].astype(str)
)
# print('> Group IDs loaded!')
self.subclass_map = (
self.df_groups["group_id"]
.reset_index()
.set_index("group_id")
.to_dict()["index"]
)
self.group_array = (
self.metadata_df["sub_target"].map(self.subclass_map).values
)
groups, group_counts = np.unique(self.group_array, return_counts=True)
self.n_groups = len(groups)
# Extract filenames and splits
self.filename_array = self.metadata_df["image_id"].values
self.split_array = self.metadata_df["partition"].values
self.targets = torch.tensor(self.y_array)
self.targets_all = {
"target": np.array(self.y_array),
"group_idx": np.array(self.group_array),
"spurious": np.array(self.confounder_array),
"sub_target": np.array(list(zip(self.y_array, self.confounder_array))),
}
self.group_labels = [self.group_str(i) for i in range(self.n_groups)]
file_paths = [
os.path.join(self.root_dir, "img_align_celeba", fname)
for fname in self.filename_array
]
dp[split] = mk.DataPanel(
{
"image": mk.ImageColumn.from_filepaths(file_paths),
# "label": mk.TensorColumn(label),
"label": mk.TensorColumn(self.targets),
}
)
self.dataset_train = MeerkatDataset(
dp["train"], xs=list(self.input_shapes.keys()), ys=["label"]
)
self.dataset_test = MeerkatDataset(
dp["test"], xs=list(self.input_shapes.keys()), ys=["label"]
)
self.dataset_test = MeerkatDataset(
dp["val"], xs=list(self.input_shapes.keys()), ys=["label"]
)
|
thanos-code-main
|
unagi/datasets/celeba/celeba_dataset.py
|
import meerkat as mk
import numpy as np
import torch
import torchvision
from unagi.datasets.base_dataset import UnagiDatasetBuilder
from unagi.datasets.cifar.utils import get_superclass_subclass_mapping, sparse2coarse
from unagi.datasets.meerkat_dataset import MeerkatDataset
class CIFAR10(UnagiDatasetBuilder):
"""Dataset to load CIFAR 10 dataset."""
_name_ = "cifar10"
# TODO: these can be modified by the transforms (e.g. grayscale)
# and need to be up to date
input_shapes = {
"image": (3, 32, 32),
}
output_shapes = {
"label": (10,),
}
@property
def init_defaults(self):
return {
"val_split": 0.1,
"seed": 42, # For validation split
"coarse_labels": False,
"return_train_as_test": False,
"subset_split_percent": None,
"subset_split_seed": 42,
}
def setup(self):
self.dataset_train = torchvision.datasets.CIFAR10(
root=self.data_dir,
train=True,
download=True,
)
self.dataset_train, self.dataset_val = self.split_train_val(
val_split=self.val_split
)
self.dataset_test = torchvision.datasets.CIFAR10(
root=self.data_dir,
train=False,
download=True,
)
self.dataset_train = self.to_meerkat(self.dataset_train)
self.dataset_val = self.to_meerkat(self.dataset_val)
if self.return_train_as_test:
self.dataset_test = self.to_meerkat(self.dataset_train)
else:
self.dataset_test = self.to_meerkat(self.dataset_test)
def to_meerkat(self, dataset):
if self.coarse_labels:
# TODO: split train and val
img_pil, label = [], []
for _, (x, y) in enumerate(dataset):
img_pil.append(x)
label.append(y)
coarse_label = sparse2coarse(label, dataset="cifar10")
obj = {
"image": mk.ListColumn(img_pil),
# "label": mk.TensorColumn(label),
"label": mk.TensorColumn(coarse_label),
}
dp = mk.DataPanel(obj)
# TODO: combine this with the UnagiDataset as an option
dataset = MeerkatDataset(dp, xs=["image"], ys=["label"])
self.output_shapes["label"] = (2,)
return dataset
class CIFAR10Subset(CIFAR10):
"""Dataset to load subset of CIFAR10 dataset"""
_name_ = "cifar10_subset"
@property
def init_defaults(self):
return {
"val_split": 0.1,
"val_split_seed": 42, # For validation split
"subset_split_seed": 42,
"subset_split_percent": 0.5,
"coarse_labels": False,
}
def setup(self):
super().setup()
# randomly split the train set
subset_size = int(len(self.dataset_train) * self.subset_split_percent)
(dataset_train_subset, dataset_train_heldout,) = torch.utils.data.random_split(
self.dataset_train,
(subset_size, len(self.dataset_train) - subset_size),
generator=torch.Generator().manual_seed(
getattr(self, "seed", self.subset_split_seed)
),
)
self.dataset_train = dataset_train_subset
self.dataset_test = {
"original_testset": self.dataset_test,
"heldout_trainset": dataset_train_heldout,
}
class CIFAR100(UnagiDatasetBuilder):
"""Dataset to load CIFAR 100 dataset."""
_name_ = "cifar100"
# TODO: these can be modified by the transforms (e.g. grayscale)
# and need to be up to date
input_shapes = {
"image": (3, 32, 32),
}
output_shapes = {
"label": (100,),
}
@property
def init_defaults(self):
return {
"val_split": 0.1,
"seed": 42, # For validation split
"coarse_labels": False,
"coarse_labels_u": False,
}
def setup(self):
self.dataset_train = torchvision.datasets.CIFAR100(
root=self.data_dir,
train=True,
download=True,
)
self.dataset_train, self.dataset_val = self.split_train_val(
val_split=self.val_split
)
self.dataset_test = torchvision.datasets.CIFAR100(
root=self.data_dir,
train=False,
download=True,
)
self.dataset_train = self.to_meerkat(self.dataset_train)
self.dataset_val = self.to_meerkat(self.dataset_val)
self.dataset_test = self.to_meerkat(self.dataset_test)
def to_meerkat(self, dataset):
if self.coarse_labels or self.coarse_labels_u:
# TODO: split train and val
img_pil, label = [], []
for _, (x, y) in enumerate(dataset):
img_pil.append(x)
label.append(y)
if self.coarse_labels_u:
# img_pil = np.array(img_pil)
label = np.array(label)
coarse_u_mapping = get_superclass_subclass_mapping()
indices = []
for coarse_label, subclass_labels in coarse_u_mapping.items():
total_samples = [500, 250, 100, 50, 50]
for subclass_label, samples in zip(subclass_labels, total_samples):
indices.extend(
np.random.choice(
np.argwhere(label == subclass_label).squeeze(-1),
size=samples,
)
)
temp_img_list = []
for idx in indices:
temp_img_list.append(img_pil[idx])
img_pil = temp_img_list
label = label[indices].tolist()
if self.coarse_labels:
coarse_label = sparse2coarse(label, dataset="cifar100")
self.output_shapes["label"] = (20,)
else:
coarse_label = label
obj = {
"image": mk.ListColumn(img_pil),
"label": mk.TensorColumn(coarse_label),
}
dp = mk.DataPanel(obj)
# TODO: combine this with the UnagiDataset as an option
dataset = MeerkatDataset(dp, xs=["image"], ys=["label"])
return dataset
|
thanos-code-main
|
unagi/datasets/cifar/cifar_dataset.py
|
import numpy as np
# https://github.com/ryanchankh/cifar100coarse/blob/master/sparse2coarse.py
def sparse2coarse(targets, scramble=False, dataset="cifar10"):
"""Convert Pytorch CIFAR100 sparse targets to coarse targets.
Usage:
trainset = torchvision.datasets.CIFAR100(path)
trainset.targets = sparse2coarse(trainset.targets)
"""
if dataset == "cifar100":
sparse_coarse_array = [
4,
1,
14,
8,
0,
6,
7,
7,
18,
3,
3,
14,
9,
18,
7,
11,
3,
9,
7,
11,
6,
11,
5,
10,
7,
6,
13,
15,
3,
15,
0,
11,
1,
10,
12,
14,
16,
9,
11,
5,
5,
19,
8,
8,
15,
13,
14,
17,
18,
10,
16,
4,
17,
4,
2,
0,
17,
4,
18,
17,
10,
3,
2,
12,
12,
16,
12,
1,
9,
19,
2,
10,
0,
1,
16,
12,
9,
13,
15,
13,
16,
19,
2,
4,
6,
19,
5,
5,
8,
19,
18,
1,
2,
15,
6,
0,
17,
8,
14,
13,
]
else:
# index of original labels:
# [b'airplane', b'automobile', b'bird', b'cat', b'deer',
# b'dog', b'frog', b'horse', b'ship', b'truck']
sparse_coarse_array = [1, 1, 0, 0, 0, 0, 0, 0, 1, 1]
targets = np.array(sparse_coarse_array)[targets]
return targets.tolist()
def get_superclass_subclass_mapping():
return {
0: [4, 30, 55, 72, 95],
1: [1, 32, 67, 73, 91],
2: [54, 62, 70, 82, 92],
3: [9, 10, 16, 28, 61],
4: [0, 51, 53, 57, 83],
5: [22, 39, 40, 86, 87],
6: [5, 20, 25, 84, 94],
7: [6, 7, 14, 18, 24],
8: [3, 42, 43, 88, 97],
9: [12, 17, 37, 68, 76],
10: [23, 33, 49, 60, 71],
11: [15, 19, 21, 31, 38],
12: [34, 63, 64, 66, 75],
13: [26, 45, 77, 79, 99],
14: [2, 11, 35, 46, 98],
15: [27, 29, 44, 78, 93],
16: [36, 50, 65, 74, 80],
17: [47, 52, 56, 59, 96],
18: [8, 13, 48, 58, 90],
19: [41, 69, 81, 85, 89],
}
|
thanos-code-main
|
unagi/datasets/cifar/utils.py
|
"""Unagi utils. Credit: Emmental """
import json
import random
import string
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from numpy import ndarray
from torch import Tensor
def list_to_tensor(
item_list: List[Tensor], min_len: int = 0, max_len: int = 0
) -> Tuple[Tensor, Optional[Tensor]]:
"""Convert the list of torch.Tensor into a torch.Tensor.
Args:
item_list: The tensor for converting.
min_len: Min length of sequence of data, defaults to 0.
max_len: Max length of sequence of data, defaults to 0.
Returns:
The converted tensor and the corresponding mask tensor.
"""
item_mask_tensor = None
# Convert single value tensor
if all(item_list[i].dim() == 0 for i in range(len(item_list))):
item_tensor = torch.stack(item_list, dim=0)
# Convert 2 or more-D tensor with the same shape
elif all(
(item_list[i].size() == item_list[0].size()) and (len(item_list[i].size()) != 1)
for i in range(len(item_list))
):
item_tensor = torch.stack(item_list, dim=0)
# Convert reshape to 1-D tensor and then convert
else:
item_tensor, item_mask_tensor = pad_batch(
[item.view(-1) for item in item_list], min_len, max_len
)
return item_tensor, item_mask_tensor
def pad_batch(
batch: List[Tensor],
min_len: int = 0,
max_len: int = 0,
pad_value: int = 0,
left_padded: bool = False,
) -> Tuple[Tensor, Tensor]:
"""Convert the batch into a padded tensor and mask tensor.
Args:
batch: The tensor for padding.
min_len: Min length of sequence of padding, defaults to 0.
max_len: Max length of sequence of padding, defaults to 0.
pad_value: The value to use for padding, defaults to 0.
left_padding: If True, pad on the left, otherwise on the right,
defaults to False.
Returns:
The padded tensor and corresponding mask tensor.
"""
batch_size = len(batch)
max_seq_len = int(np.max([item.size()[0] for item in batch]))
if max_len > 0 and max_len < max_seq_len:
max_seq_len = max_len
max_seq_len = max(max_seq_len, min_len)
padded_batch = batch[0].new_full((batch_size, max_seq_len), pad_value)
for i, item in enumerate(batch):
length = min(item.size()[0], max_seq_len)
if left_padded:
padded_batch[i, -length:] = item[-length:]
else:
padded_batch[i, :length] = item[:length]
mask_batch = torch.eq(padded_batch.clone().detach(), pad_value)
return padded_batch, mask_batch
def prob_to_pred(probs: Union[ndarray, List[ndarray]]) -> ndarray:
"""Identify the class with the maximum probability.
Args:
probs: predicted probabilities.
Returns:
predicted labels.
"""
if isinstance(probs, ndarray):
return np.array(np.argmax(probs, axis=-1))
elif isinstance(probs, list):
return np.array([np.argmax(prob, axis=-1) for prob in probs])
else:
raise ValueError(f"Unrecognized type {type(probs)}")
def pred_to_prob(preds: ndarray, n_classes: int) -> ndarray:
"""Convert predicted labels to probabilistic labels.
Args:
preds: Predicted labels.
n_classes: Total number of classes.
Returns:
predicted probabilities.
"""
preds = preds.reshape(-1)
probs = np.zeros((preds.shape[0], n_classes))
for idx, class_idx in enumerate(preds):
probs[idx, class_idx] = 1.0
return probs
def move_to_device(
obj: Any, device: Optional[Union[int, str, torch.device]] = -1
) -> Any:
"""Move object to specified device.
Given a structure (possibly) containing Tensors on the CPU, move all the Tensors
to the specified GPU (or do nothing, if they should beon the CPU).
device = -1 -> "cpu"
device = 0 -> "cuda:0"
Originally from:
https://github.com/HazyResearch/metal/blob/mmtl_clean/metal/utils.py
Args:
obj: The object to convert.
device: The device id, defaults to -1.
Returns:
The converted object.
"""
device = torch.device("cpu") if device == -1 else torch.device(device)
if not torch.cuda.is_available():
device = torch.device("cpu")
if isinstance(obj, torch.Tensor):
return obj.to(device)
elif isinstance(obj, dict):
return {key: move_to_device(value, device) for key, value in obj.items()}
elif isinstance(obj, list):
return [move_to_device(item, device) for item in obj]
elif isinstance(obj, tuple):
return tuple([move_to_device(item, device) for item in obj])
else:
return obj
def array_to_numpy(
array: Union[ndarray, List[Any], Tensor], flatten: bool = False
) -> ndarray:
"""Covert an array to a numpy array.
Args:
array: An array to convert.
flatten: Whether to flatten or not.
Returns:
Converted array.
"""
if isinstance(array, np.ndarray):
pass
elif isinstance(array, list):
array = np.array(array)
elif isinstance(array, torch.Tensor):
array = array.cpu().numpy()
else:
raise ValueError(f"Unrecognized type {type(array)} to convert to ndarray")
if flatten:
array = array.reshape(-1) # type: ignore
return array # type: ignore
def merge(
x: Dict[str, Any], y: Dict[str, Any], specical_keys: Union[str, List[str]] = None
) -> Dict[str, Any]:
"""Merge two nested dictionaries. Overwrite values in x with values in y.
Args:
x: The original dict.
y: The new dict.
specical_keys: The specical keys to replace instead of merging, defaults to None.
Returns:
The updated dic.
"""
if x is None:
return y
if y is None:
return x
if isinstance(specical_keys, str):
specical_keys = [specical_keys]
merged = {**x, **y}
xkeys = x.keys()
for key in xkeys:
if specical_keys is not None and key in specical_keys and key in y:
merged[key] = y[key]
elif isinstance(x[key], dict) and key in y:
merged[key] = merge(x[key], y[key], specical_keys)
return merged
def str2bool(v: str) -> bool:
"""Parse str to bool.
Args:
v: The string to parse.
Returns:
The parsed value.
"""
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ValueError("Boolean value expected.")
def str2dict(v: str) -> Dict[str, str]:
"""Parse str to dict.
Args:
v: The string to parse.
Returns:
The parsed dict.
"""
dict = {}
for token in v.split(","):
key, value = token.split(":")
dict[key] = value
return dict
def str2list(v: str, delim: str = ",") -> List[str]:
"""Parse str to list.
Args:
v: The string to parse.
delim: The delimiter used to split string.
Returns:
The parsed list.
"""
return [t.strip() for t in v.split(delim)]
def nullable_float(v: str) -> Optional[float]:
"""Parse string to nullable float.
Args:
v: The string to parse.
Returns:
The parsed value.
"""
if not v or v.lower() in ["none", "null"]:
return None
return float(v)
def nullable_int(v: str) -> Optional[int]:
"""Parse string to nullable int.
Args:
v: The string to parse.
Returns:
The parsed value.
"""
if not v or v.lower() in ["none", "null"]:
return None
return int(v)
def nullable_string(v: str) -> Optional[str]:
"""Parse string to nullable string.
Args:
v: The string to parse.
Returns:
The parsed value.
"""
if not v or v.lower() in ["none", "null"]:
return None
return v
def construct_identifier(
task_name: str, data_name: str, split_name: str, metric_name: Optional[str] = None
) -> str:
"""Construct identifier.
Args:
task_name: Task name.
data_name: Data set name.
split_name: Split name.
metric_name: Metric name, defaults to None.
Returns:
The identifier.
"""
if metric_name:
return f"{task_name}/{data_name}/{split_name}/{metric_name}"
else:
return f"{task_name}/{data_name}/{split_name}"
def random_string(length: int = 5) -> str:
"""Generate a random string of fixed length.
Args:
length: The length of random string, defaults to 5.
Returns:
The random string.
"""
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(length))
def convert_to_serializable_json(obj: Dict[str, Any]) -> Dict[str, Any]:
"""Covert a dict to a serializable json.
Args:
obj: A dict.
Returns:
Serializable json.
"""
try:
json.dumps(obj)
return obj
except TypeError:
# Convert function
if hasattr(obj, "__name__"):
return f"Function: {obj.__name__}" # type: ignore
# Convert partial function
if hasattr(obj, "func"):
if hasattr(obj.func, "__name__"): # type: ignore
return f"Function: {obj.func.__name__}" # type: ignore
# Convert dict
if isinstance(obj, dict):
for key in obj.keys():
obj[key] = convert_to_serializable_json(obj[key])
return obj
# Convert list
if isinstance(obj, list):
for i in range(len(obj)):
obj[i] = convert_to_serializable_json(obj[i])
return obj
# Convert tuple
if isinstance(obj, tuple):
return tuple([convert_to_serializable_json(item) for item in obj])
# Convert class
if hasattr(type(obj), "__name__"):
return f"Class: {type(obj).__name__}" # type: ignore
return None
|
thanos-code-main
|
unagi/utils/misc.py
|
import numpy as np
from PIL import Image
def relu_model(X, E, W, classification=False, delta=0.1):
"""Relu model."""
Y = W.dot(np.maximum((E.T @ X.T), 0.0)).reshape(-1)
if classification:
idx = abs(Y) >= delta
X = X[idx]
Y = Y[idx]
return X, ((Y < 0).astype(int)).reshape(-1)
else:
return X, Y
def pil_loader(path):
# open path as file to avoid ResourceWarning
# (https://github.com/python-pillow/Pillow/issues/835)
with open(path, "rb") as f:
img = Image.open(f)
return img.convert("RGB")
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == "accimage":
return accimage_loader(path)
else:
return pil_loader(path)
|
thanos-code-main
|
unagi/utils/image_utils.py
|
thanos-code-main
|
unagi/utils/__init__.py
|
|
import json
import os
import numpy as np
import yaml
def load_yaml(file_path):
with open(file_path, "r") as f:
return yaml.load(f)
def write_to_file(file_path, value):
"""Write value to file."""
directory = os.path.dirname(file_path)
os.makedirs(directory, exist_ok=True)
if not isinstance(value, str):
value = str(value)
fout = open(file_path, "w")
fout.write(value + "\n")
fout.close()
def write_to_json_file(file_path, dict):
"""Write dict to json file."""
directory = os.path.dirname(file_path)
os.makedirs(directory, exist_ok=True)
for k in dict.keys():
if isinstance(dict[k], (np.float32, np.float64)):
dict[k] = dict[k].item()
json_obj = json.dumps(dict)
fout = open(file_path, "w")
fout.write(json_obj)
fout.close()
def load_json(file_path):
with open(file_path) as json_file:
data = json.load(json_file)
return data
|
thanos-code-main
|
unagi/utils/file_utils.py
|
import copy
import importlib
import logging
from collections.abc import Mapping
from transformers import AutoModel, AutoTokenizer
# from unagi.configs import MODEL_DEFAULT_PATHS
from unagi.datasets import DATASET_CONFIG_REGISTRY
from unagi.models import AUGMENTATION_LAYERS, MODULE_DICTS
from unagi.tasks import ( # LOSS_FN_REGISTRY,
LOSS_MODULE_REGISTRY,
TASK_FLOWS,
TASK_PREPROCESSING_LAYER,
)
# from unagi.utils.file_utils import load_yaml
logger = logging.getLogger(__name__)
def get_dataset_config(dataset_name):
config = importlib.import_module(DATASET_CONFIG_REGISTRY[dataset_name])
return config
def merge_dict(dct, merge_dct):
"""Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
dct = copy.deepcopy(dct)
for k, v in merge_dct.items():
if k in dct and isinstance(dct[k], dict) and isinstance(merge_dct[k], Mapping):
dct[k] = merge_dict(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
return dct
def get_module_dict_v2(model_config):
module_dict = {}
# root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
for module_layer in ["embeddings", "encoders", "decoders"]:
if module_layer in model_config.keys():
for layer in model_config[module_layer]:
name = layer["name"]
type = layer["type"]
"""base_model = load_yaml(
os.path.join(root_path, MODEL_DEFAULT_PATHS[model])
)
layer = merge_dict(base_model["model"], layer)"""
# md = copy.deepcopy(MODULE_DICTS[model][module_layer])
md = copy.deepcopy(MODULE_DICTS[module_layer][type])
del layer["name"]
del layer["type"]
module = md(**layer)
module_dict[name] = module
return module_dict
def collect_input_feature_views(task_flow):
feat_to_view = {}
for module in task_flow:
for input in module["inputs"]:
if isinstance(input[1], list):
feat_to_view[input[1][0]] = input[1][0]
return feat_to_view
def get_model_params(model_config, dataset_desc):
"""
if args.use_cat:
model_params.update({
'cat': True,
'd_cat': 64,
})
"""
# TODO (ASN): Clean this up
assert not (
model_config["use_cls_token"] and model_config["use_all_tokens"]
), "can't have both use_cls_token and use_all_tokens assigned to True"
model_params = {
# Set nClasses for LM model later
"num_classes": dataset_desc["TASK_NUM_CLASS"],
"num_features": dataset_desc["TASK_TOTAL_INPUT_FEATURES"],
}
model_params.update(model_config)
if model_params["learn_pos"] and model_params["name"] in [
"mixer",
"hippo",
]:
raise ValueError(
"learn_pos cannot be set to True when training with hippo or mixer."
" Please update your config file."
)
if model_params["use_cls_token"] and model_params["name"] in ["mixer"]:
raise ValueError(
"use_cls_token cannot be set to True when training with mixer."
" Please update your config file."
)
if model_params["use_all_tokens"] and model_params["name"] in ["hippo"]:
raise ValueError(
"use_all_tokens cannot be set to True when training with hippo."
" Please update your config file."
)
if model_config["use_cls_token"] and model_config["use_all_tokens"]:
raise ValueError(
"can't have both use_cls_token and use_all_tokens assigned to True."
" Please update your config file."
)
return model_params
def get_mask_length(config, model_params=None):
# TODO (ASN): fix to support new config
model_config = config["model"]
use_cls_token = (
True
if model_config["name"] != "mixer" and not model_config["use_all_tokens"]
else False
)
use_mask = False
if "masked" in config["tasks"].keys():
use_mask = True
mask_length = 0
if use_mask:
# TODO (ASN): check 1024 literal
if model_config["patch_emb_type"] == "linear":
mask_length = 1024 // model_config["patch_size"]
if model_config["patch_emb_type"] == "square":
mask_length = 1024 // model_config["patch_size"] ** 2
if use_cls_token:
mask_length += 1
if model_params:
model_params.update({"mask_length": mask_length})
config["tasks"]["masked"]["mask_length"] = mask_length
return model_params
def get_text_embed_layer(dataset_config, model_params):
pretrained_lm_name = None
contains_text_feature = False
for input_feat in dataset_config["dataset"]["input_features"]:
if input_feat["type"] == "text":
default_text_transform = input_feat["default_transformation"]
default_text_transforms = dataset_config["augmentations"]["raw"][
default_text_transform
]
for augmentation in default_text_transforms:
if augmentation["type"] == "PretrainedLMTokenize":
pretrained_lm_name = augmentation["params"]["model"]
if contains_text_feature and not pretrained_lm_name:
return ValueError(
"Dataset contains a text feature but no pretrained language model "
"is specified in augmentations for tokenization. "
"Please specifiy a PretrainedLMTokenize augmentation in your config."
)
if not pretrained_lm_name:
return model_params
else:
model_params["text_encoder"] = AutoModel.from_pretrained(
pretrained_lm_name
).embeddings
tokenizer = AutoTokenizer.from_pretrained(pretrained_lm_name)
model_params["emb_mask_id"] = tokenizer.mask_token_id
model_params["text_dim"] = 768 if "base" in pretrained_lm_name else 1024
return model_params
"""def get_loss_fns_v2(
learning_task_config,
label_smoothing=False,
classification_type="multi_class",
):
# loss_fns = {"masked_loss": None, "contrastive_loss": None, "ce_loss": None}
loss_fns = {}
for task in learning_task_config:
task_name, type, loss_fn = task["name"], task["type"], task["loss_fn"]
loss_fns[task_name] = LOSS_FN_REGISTRY[loss_fn]()
if type == "supervised":
if label_smoothing and loss_fn == "cross_entropy":
loss_fns[task_name] = LOSS_FN_REGISTRY["label_smoothing"](0.1)
if classification_type == "multi_label":
loss_fns[task_name] = LOSS_FN_REGISTRY["binary_cross_entropy_loss"]()
return loss_fns"""
# def get_loss_fns(learning_task_config, label_smoothing=False,
# task_type="multi_class"):
# loss_fns = {"masked_loss": None, "contrastive_loss": None, "ce_loss": None}
# if "masked" in learning_task_config:
# loss_fns["masked_loss"] = LOSS_FN_REGISTRY[
# learning_task_config["masked"]["loss_fn"]
# ]()
# loss_module = LOSS_MODULE_REGISTRY["masked"]
# if "contrastive" in learning_task_config:
# params = learning_task_config["contrastive"]["loss_fn_params"]
# params["type"] = learning_task_config["contrastive"]["loss_fn"]
# loss_fns["contrastive_loss"] = LOSS_FN_REGISTRY[params["type"]](
# **params)
# loss_module = LOSS_MODULE_REGISTRY["contrastive"]
# if "clip" in learning_task_config:
# loss_fns["contrastive_loss"] = LOSS_FN_REGISTRY[
# learning_task_config["clip"]["loss_fn"]
# ]()
# loss_module = LOSS_MODULE_REGISTRY["contrastive"]
# if "supervised" in learning_task_config:
# loss_fns["ce_loss"] = LOSS_FN_REGISTRY[
# learning_task_config["supervised"]["loss_fn"]
# ]()
# if loss_fns["ce_loss"] == "soft_cross_entropy":
# loss_module = LOSS_MODULE_REGISTRY["soft_cross_entropy"]
# else:
# loss_module = LOSS_MODULE_REGISTRY["cross_entropy"]
# if (
# label_smoothing
# and learning_task_config["supervised"]["loss_fn"] == "cross_entropy"
# ):
# loss_fns["ce_loss"] = LOSS_FN_REGISTRY["label_smoothing"](0.1)
# if task_type == "multi_label":
# loss_fns["ce_loss"] = LOSS_FN_REGISTRY["binary_cross_entropy_loss"]()
# return loss_fns, loss_module
# def get_output_layer(dataset_desc, task_name):
# # Only add an output layer if task is supervised task
# if task_name == "supervised":
# return OUTPUT_LAYER_REGISTRY[dataset_desc["TASK_TYPE"]]
# else:
# return None
def get_module_dict(model_name, model_params):
if model_name == "clip":
md = copy.deepcopy(MODULE_DICTS[model_name])
for md_name, module in md.items():
enc_params = model_params[md_name]
if "text_encoder" in model_params:
enc_params["text_encoder"] = model_params["text_encoder"]
enc_params["emb_mask_id"] = model_params["emb_mask_id"]
enc_params["text_dim"] = model_params["text_dim"]
md[md_name] = module(**enc_params)
return md
else:
if model_name in MODULE_DICTS.keys():
md = copy.deepcopy(MODULE_DICTS[model_name])
# instantiate dict
for md_name, module in md.items():
md[md_name] = module(**model_params)
return md
else:
# TODO (ASN) : move all config sanity checks to a seperate Config
# Preprocessing Step
raise ValueError(f"No avalibale modules for this {model_name} model type")
clip_taskflow_default = [
{
"name": "clip_task_preprocessing",
"module": "clip_task_preprocessing",
"inputs": [("_input_", "inputs")],
},
{
"name": "pre_encoder_img",
"module": "pre_encoder_img",
"inputs": [("clip_task_preprocessing", "image")],
},
{
"name": "pre_encoder_text",
"module": "pre_encoder_text",
"inputs": [("clip_task_preprocessing", "text")],
},
{
"name": "text_encoder",
"module": "encoder_text",
"inputs": [("pre_encoder_text", 0)],
},
{
"name": "image_encoder",
"module": "encoder_img",
"inputs": [("pre_encoder_image", 0)],
},
]
def get_task_flow(task_name, model_name, aug_type=None):
if aug_type == "patch":
task_flow = TASK_FLOWS[task_name]["patch_aug"]
elif aug_type == "feature":
task_flow = TASK_FLOWS[task_name]["feature_aug"]
else:
task_flow = TASK_FLOWS[task_name]["default"]
# TODO (ASN): FIX UGLY LOGIC
if task_name == "masked" and model_name in ["mixer", "hippo"]:
modified_task_flow = []
for task_module in task_flow:
if task_module["name"] == "encoder":
modified_task_module = copy.deepcopy(task_module)
modified_task_module["inputs"] = [
("pre_encoder", 0),
("masked_task_preprocessing", 2),
]
modified_task_flow.append(modified_task_module)
elif task_module["name"] == "decoder":
modified_task_module = copy.deepcopy(task_module)
modified_task_module["inputs"] = [
("encoder", 0),
("encoder", 1),
]
modified_task_flow.append(modified_task_module)
else:
modified_task_flow.append(task_module)
return modified_task_flow
else:
return task_flow
def get_loss_module(task_name, loss_fn):
if task_name == "masked":
return LOSS_MODULE_REGISTRY["masked"]
elif task_name == "contrastive":
return LOSS_MODULE_REGISTRY["contrastive"]
elif task_name == "clip":
return LOSS_MODULE_REGISTRY["clip"]
elif task_name == "supervised":
if loss_fn == "soft_cross_entropy":
return LOSS_MODULE_REGISTRY["soft_cross_entropy"]
else:
return LOSS_MODULE_REGISTRY["cross_entropy"]
else:
raise ValueError(f"{task_name} has no associated loss module")
def get_input_transform_layers():
task_preprocessing_layers = {}
for task_name, task_module in TASK_PREPROCESSING_LAYER.items():
task_preprocessing_layers[f"{task_name}_task_preprocessing"] = task_module()
return task_preprocessing_layers
def add_augmentation_modules(augmentation_config):
# patch_augmentation, feature_augmentation
keys = ["patch", "feature"]
augmentation_modules = {}
for layer_type in keys:
aug_type = augmentation_config[layer_type]["type"]
if aug_type is not None:
params = augmentation_config[layer_type]["params"]
layer_module = AUGMENTATION_LAYERS[layer_type][aug_type]
module = layer_module(**params)
module_name = (
"patch_augmentation"
if layer_type == "patch"
else "feature_augmentation"
)
augmentation_modules[module_name] = module
return augmentation_modules
|
thanos-code-main
|
unagi/utils/task_utils.py
|
import copy
import os
from collections.abc import Mapping
from unagi.configs import (
BASE_CONFIG_PATH,
BASE_DATASET_PATH,
BASE_INPUT_FEATURE_PATH,
BASE_OUTPUT_FEATURE_PATH,
DATASET_DEFAULT_PATHS,
)
from unagi.utils.file_utils import load_yaml
def get_feature_to_type(config):
dataset_config = config["dataset"]
input_features = dataset_config["input_features"]
mapping = {}
for feat in input_features:
mapping[feat["name"]] = feat["type"]
return mapping
def merge_dict(dct, merge_dct):
"""Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
dct = copy.deepcopy(dct)
for k, v in merge_dct.items():
if k in dct and isinstance(dct[k], dict) and isinstance(merge_dct[k], Mapping):
dct[k] = merge_dict(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
return dct
# TODO (ASN): modularize to have functions for each
def config_preprocessing(config: dict):
# model_section = config["model"]
# tasks_section = config["tasks"]
return
def build_config(user_config: dict) -> dict:
"""
Merges user specified config file with default config file and performs
section pre-processing
# Inputs
:param root_path: (str) filepath to unagi directory
:param user_config (dict) dictionary representationn of use config
# Returns
:return: prepcessed config
"""
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
base_cf = load_yaml(os.path.join(root_path, BASE_CONFIG_PATH))
base_cf = merge_dict(base_cf, user_config)
# load the dataset defaults
default_dataset = load_yaml(os.path.join(root_path, BASE_DATASET_PATH))
base_input_feature = load_yaml(os.path.join(root_path, BASE_INPUT_FEATURE_PATH))
base_output_feature = load_yaml(os.path.join(root_path, BASE_OUTPUT_FEATURE_PATH))
# merge model config
# TODO (ASN): replace w/generic modules
"""
base_model = load_yaml(
os.path.join(root_path, MODEL_DEFAULT_PATHS[user_config["model"]["name"]])
)
base_cf["model"] = merge_dict(base_model["model"], base_cf["model"])
"""
# merge dataset config
base_dataset = load_yaml(
os.path.join(root_path, DATASET_DEFAULT_PATHS[user_config["dataset"]["name"]])
)
base_dataset = merge_dict(default_dataset["dataset"], base_dataset["dataset"])
base_cf["dataset"] = merge_dict(base_dataset, base_cf["dataset"])
for key, input_feature in base_cf["dataset"]["input_features"].items():
base_cf["dataset"]["input_features"][key] = merge_dict(
base_input_feature, input_feature
)
for key, output_feature in base_cf["dataset"]["output_features"].items():
base_cf["dataset"]["output_features"][key] = merge_dict(
base_output_feature, output_feature
)
# pre_processed_config = config_preprocessing(base_cf)
# TODO (ASN): ADD SECTION PRE-PROCESSING
# MODEL : --> correct for all conflicting params (use_all_toksn vs use_cls_token)
# TASKS :
# check that all tasks that are specified are valid
# check that all loss functions are valid
# compute any task specific computations (mask length)
#
return base_cf
|
thanos-code-main
|
unagi/utils/config_utils.py
|
from unagi.models.decoders.classifier import ClassificationDecoder
from unagi.models.decoders.image.resnet import ResnetDecoder
from unagi.models.decoders.image.resnet_autoencoder import (
Resnet18Decoder,
Resnet50Decoder,
)
from unagi.models.decoders.sequence.mixer import MixerDecoder
from unagi.models.decoders.sequence.transformer import TransformerDecoder
from unagi.models.embeddings.embeddings import (
CategoricalEmbed,
Conv2DEmbed,
ConvEmbed,
IdentityEmbed,
LinearPatchEmbed,
PretrainedLMEmbed,
SquarePatchEmbed,
)
from unagi.models.encoders.image.resnet.resnet import ResnetEncoder
from unagi.models.encoders.sequence.bert.bert import BertEncoder
from unagi.models.encoders.sequence.mixer.mixer import MixerEncoder
from unagi.models.encoders.sequence.transformer.transformer import TransformerEncoder
from unagi.models.layers.patch_augmentations import (
BrightnessLayer,
CutoutLayer,
InvertLayer,
MixUpLayer,
RotateLayer,
SolarizeLayer,
)
from unagi.models.ops.grayscale import Grayscale
from unagi.models.ops.image_reshape import ImageReshape
from unagi.models.ops.linear_proj import LinearProj
from unagi.models.ops.pool import PoolDecoder
from unagi.models.ops.sequence_concat import SequenceConcat
from unagi.models.ops.view_concat import ViewConcat
from unagi.models.ops.view_select import ViewSelect
MODULE_DICTS = {
"embeddings": {
"square_patch": SquarePatchEmbed,
"linear_patch": LinearPatchEmbed,
"categorical": CategoricalEmbed,
"conv2d": Conv2DEmbed,
"conv1d": ConvEmbed,
"pretrained_lm": PretrainedLMEmbed,
"identity": IdentityEmbed,
"sequence_concat": SequenceConcat,
},
"encoders": {
"mixer": MixerEncoder,
"transformer": TransformerEncoder,
"resnet": ResnetEncoder,
"bert": BertEncoder,
},
"decoders": {
"classifier": ClassificationDecoder,
"pool": PoolDecoder,
"view_select": ViewSelect,
"view_concat": ViewConcat,
"transformer": TransformerDecoder,
"mixer": MixerDecoder,
"resnet": ResnetDecoder,
"resnet18decoder": Resnet18Decoder,
"resnet50decoder": Resnet50Decoder,
"image_reshape": ImageReshape,
"sequence_concat": SequenceConcat,
"linear_proj": LinearProj,
"grayscale": Grayscale,
},
}
AUGMENTATION_LAYERS = {
"patch": {
"mixup": MixUpLayer,
"invert": InvertLayer,
"cutout": CutoutLayer,
"solarize": SolarizeLayer,
"brightness": BrightnessLayer,
"rotate": RotateLayer,
},
"feature": {
"mixup": MixUpLayer,
"invert": InvertLayer,
"cutout": CutoutLayer,
"solarize": SolarizeLayer,
"brightness": BrightnessLayer,
"rotate": RotateLayer,
},
}
|
thanos-code-main
|
unagi/models/__init__.py
|
from math import sqrt
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import nn
from transformers import AutoModel
from unagi.models.embeddings.base_embedding import EmbeddingModule
from unagi.models.layers.blocks import Transpose
class SquarePatchEmbed(EmbeddingModule):
def __init__(
self,
d_input: int,
d_model: int,
patch_size: int,
dropout=0.1,
layernorm=True,
):
super().__init__(d_input=d_input, d_model=d_model)
self.patch_side = patch_size
self.patch_size = patch_size ** 2 * d_input
layers = [nn.Linear(self.patch_size, d_model)]
if layernorm:
layers.append(nn.LayerNorm(d_model))
if dropout > 1e-3:
layers.append(nn.Dropout(dropout))
self.emb = nn.Sequential(*layers)
def forward(self, x):
"""
input: (B, C, S)
output: (B, S // patch_size, d_model)
"""
b, c, s = x.size()
h_dim = int(sqrt(s))
assert h_dim ** 2 == s, f"SquareEmb Only works on square images ({s} v {h_dim})"
x = rearrange(x, "b c (h w) -> b c h w", b=b, c=c, h=h_dim, w=h_dim)
# assert c == self.d_input, f"Patchsize expected {self.d_input} channels
# got {c} channels"
patches = rearrange(
x,
"b c (h p) (w q) -> b (h w) (p q c)",
p=self.patch_side,
q=self.patch_side,
)
return self.emb(patches)
@staticmethod
def init_weight(m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
class LinearPatchEmbed(EmbeddingModule):
def __init__(
self,
d_input,
d_model,
patch_size,
dropout=0.1,
layernorm=True,
):
super().__init__()
self.d_input = d_input
self.patch_size = patch_size * d_input
layers = [nn.Linear(self.patch_size, d_model)]
if layernorm:
layers.append(nn.LayerNorm(d_model))
if dropout > 1e-3:
layers.append(nn.Dropout(dropout))
self.emb = nn.Sequential(*layers)
def forward(self, x):
"""
input: (B, C, S)
output: (B, S // patch_size, d_model)
"""
b, c, s = x.size()
# assert c == self.d_input, f"Patchsize expected {self.d_input}
# channels got {c} channels"
x = x.view(b, c * s // self.patch_size, self.patch_size)
return self.emb(x)
class ConvEmbed(EmbeddingModule):
def __init__(self, d_input, d_model, patch_size, n_layers=1, dropout=0.1):
"""
input: (B, C, S)
output: (B, (S - patch_size ) // (patch_size)^(n_layers+1), d_model)
"""
super().__init__()
layers = [
nn.Conv1d(d_input, d_model, kernel_size=patch_size, stride=2),
nn.Dropout(dropout),
] + [
nn.Conv1d(d_model, d_model, kernel_size=patch_size, stride=2)
for k in range(n_layers)
]
self.emb = nn.Sequential(*layers)
def forward(self, x):
return rearrange(self.emb(x), "b c s -> b s c")
class Conv2DEmbed(EmbeddingModule):
def __init__(
self,
d_input,
d_model,
patch_size,
n_layers=1,
):
"""
input: (B, C, S)
output: (B, (((sqrt(S) - patch_size ) // (patch_size))+ 1)^2, d_model)
"""
super().__init__()
layers = [
nn.Conv2d(
d_input,
d_model,
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
padding=0,
bias=False,
),
nn.Flatten(2, 3),
Transpose(-2, -1),
]
self.emb = nn.Sequential(*layers)
def forward(self, x):
b, c, s = x.size()
h_dim = int(sqrt(s))
assert h_dim ** 2 == s, f"Only works on square images ({s} v {h_dim})"
x = rearrange(x, "b c (h w) -> b c h w", b=b, c=c, h=h_dim, w=h_dim)
return self.emb(x)
# https://github.com/SHI-Labs/Compact-Transformers/blob/main/src/cct.py
@staticmethod
def init_weight(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
class CategoricalEmbed(EmbeddingModule):
def __init__(
self,
d_input,
d_model,
embed_size,
patch_size=1,
n_layers=1,
dropout=0.1,
layernorm=False,
):
"""
input: (B, 1)
output: (B, 1, d_model)
"""
super().__init__()
self.embed_size = embed_size
self.emb = nn.Linear(embed_size, d_model)
def forward(self, x):
# convert to one-hot
input_one_hot = F.one_hot(x.to(torch.int64), num_classes=self.embed_size)
return self.emb(input_one_hot.type(torch.FloatTensor))
class NumericalEmbed(EmbeddingModule):
def __init__(
self,
d_input,
d_model,
patch_size,
n_layers=1,
dropout=0.1,
layernorm=False,
):
"""
input: (B, 1)
output: (B, 1, d_model)
"""
super().__init__()
self.emb = nn.Linear(1, d_model)
def forward(self, x):
x = self.emb(x)
x = x.unsqueeze(-2)
return x
class IdentityEmbed(EmbeddingModule):
def __init__(
self,
d_input,
d_model,
patch_size,
n_layers=1,
dropout=0.1,
layernorm=False,
):
super().__init__(d_input=d_input, d_model=d_model)
def forward(self, x):
return x
class PretrainedLMEmbed(EmbeddingModule):
def __init__(
self,
d_input,
d_model,
patch_size,
pretrained_lm_name: str = "bert-base-uncased",
):
""" "
input: (B, C, S) where C=1
output: (B, S, d_model)
"""
super().__init__(d_input=d_input, d_model=d_model)
self.d_input = d_input
self.patch_size = patch_size
self.text_encoder = AutoModel.from_pretrained(pretrained_lm_name).embeddings
self.embedding_dim = self.text_encoder.word_embeddings.embedding_dim
self.projection_layer = nn.Linear(self.embedding_dim, d_model)
self.emb = nn.Sequential(self.text_encoder, self.projection_layer)
def forward(self, x):
# TODO (ASN): add patching logic
# b, c, s = x.size()
# x = rearrange(x, "b c s -> (b c) s") # get rid of single channel dim
return self.emb(x)
# TODO: mean, sum, concat embeddings
|
thanos-code-main
|
unagi/models/embeddings/embeddings.py
|
from torch import nn
class EmbeddingModule(nn.Module):
def __init__(
self,
d_input: int,
d_model: int,
):
super().__init__()
self.d_input = d_input
self.d_model = d_model
|
thanos-code-main
|
unagi/models/embeddings/base_embedding.py
|
from math import sqrt
from einops import rearrange
from torch import nn
class Transpose(nn.Module):
def __init__(self, i, j):
super().__init__()
self.i = i
self.j = j
def forward(self, x):
return x.transpose(self.i, self.j)
class SquareEmb(nn.Module):
def __init__(self, in_d, d, _patch_size, dropout=0.1, layernorm=True):
super(SquareEmb, self).__init__()
self.in_d = in_d
self.patch_side = _patch_size
self.patch_size = _patch_size ** 2 * in_d
layers = []
layers.append(nn.Linear(self.patch_size, d))
if layernorm:
layers.append(nn.LayerNorm(d))
if dropout > 1e-3:
layers.append(nn.Dropout(dropout))
self.emb = nn.Sequential(*layers)
def forward(self, x):
b, c, n = x.size()
rt_n = int(sqrt(n))
# assert rt_n**2 == n, f"Only works on square images ({n} v {rt_n})"
x = rearrange(x, "b c (h w) -> b c h w", b=b, c=c, h=rt_n, w=rt_n)
# assert c == self.in_d, f"Patchsize expected {self.in_d} channels
# got {c} channels"
patches = rearrange(
x,
"b c (h p) (w q) -> b (h w) (p q c)",
p=self.patch_side,
q=self.patch_side,
)
return self.emb(patches)
@staticmethod
def init_weight(m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
class LinearEmb(nn.Module):
def __init__(self, in_d, d, _patch_size, dropout=0.1):
super().__init__()
self.in_d = in_d
self.patch_size = _patch_size * in_d
self.emb = nn.Sequential(nn.Linear(self.patch_size, d), nn.LayerNorm(d))
def forward(self, x):
b, c, n = x.size()
# assert c == self.in_d, f"Patchsize expected {self.in_d}
# channels got {c} channels"
xx = x.view(b, c * n // self.patch_size, self.patch_size)
return self.emb(xx)
class ConvEmb(nn.Module):
def __init__(self, in_d, d, patch_size, nLayers=1, dropout=0.1):
super(ConvEmb, self).__init__()
_f = [
nn.Conv1d(in_d, d, kernel_size=patch_size, stride=patch_size),
nn.Dropout(dropout),
] + [
nn.Conv1d(d, d, kernel_size=patch_size, stride=patch_size)
for k in range(nLayers)
]
self.f = nn.Sequential(*_f)
def forward(self, x):
return rearrange(self.f(x), "b c s -> b s c")
class Conv2DEmb(nn.Module):
def __init__(self, in_d, d, patch_size):
super().__init__()
emb = nn.Conv2d(
in_d,
d,
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
padding=0,
bias=False,
)
# Add batch batch or layernorm?
self.f = nn.Sequential(emb, nn.Flatten(2, 3), Transpose(-2, -1))
def forward(self, x):
b, c, n = x.size()
rt_n = int(sqrt(n))
assert rt_n ** 2 == n, f"Only works on square images ({n} v {rt_n})"
x = rearrange(x, "b c (h w) -> b c h w", b=b, c=c, h=rt_n, w=rt_n)
return self.f(x)
# https://github.com/SHI-Labs/Compact-Transformers/blob/main/src/cct.py
@staticmethod
def init_weight(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
emb_type = {
"square": SquareEmb,
"linear": LinearEmb,
"conv": ConvEmb,
"conv2d": Conv2DEmb,
}
|
thanos-code-main
|
unagi/models/layers/embeds.py
|
import torch
import torch.nn.functional as F
from torch import nn
class Transpose(nn.Module):
def __init__(self, i, j):
super().__init__()
self.i = i
self.j = j
def forward(self, x):
return x.transpose(self.i, self.j)
class Truncate(nn.Module):
def __init__(self, max_sequence_length):
super().__init__()
self.max_sequence_length = max_sequence_length
def forward(self, input):
if self.max_sequence_length is not None:
# NOTE: assumes input is of form (batch, seq_length, hidden_dim)
# or (batch, seq_length)
if input.size(1) > self.max_sequence_length:
input = input[:, 0 : self.max_sequence_length, :]
elif input.size(1) < self.max_sequence_length:
if len(input.size()) == 2:
pad = (0, self.max_sequence_length - input.size(1))
elif len(input.size()) == 3:
pad = (0, 0, self.max_sequence_length - input.size(1), 0)
input = F.pad(input, pad, mode="constant", value=0)
return input
class PreNorm(nn.Module):
def __init__(self, d, f):
super().__init__()
self.f = f
self.norm = nn.LayerNorm(d)
def forward(self, x, **kwargs):
return self.f(self.norm(x), **kwargs)
class FFN(nn.Module):
def __init__(self, d, mlp_dim, out_dim=None, dropout=0.1):
super().__init__()
out_dim = d if out_dim is None else out_dim
self.f = nn.Sequential(
nn.Linear(d, mlp_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(mlp_dim, out_dim),
nn.Dropout(dropout),
)
def forward(self, x, **kwargs):
return self.f(x, **kwargs)
# https://github.com/SHI-Labs/Compact-Transformers/blob/f6d43e50ece006b933eeb27b087a0c3cad3bc635/src/transformers.py#L90
class DropPath(nn.Module):
def __init__(self, drop_prob):
super().__init__()
self.keep_prob = 1 - drop_prob
def forward(self, x):
if self.keep_prob >= 1.0 or not self.training:
return x
# work with diff dim tensors, not just 2D ConvNets
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = self.keep_prob + torch.rand(
shape, dtype=x.dtype, device=x.device
)
random_tensor.floor_() # binarize
return x.div(self.keep_prob) * random_tensor
class Residual(nn.Module):
def __init__(self, d, f, trainable=False, per_channel=False, drop_path=0.0):
super().__init__()
_init = [1.0] * d if per_channel else [1.0]
self.scalar = nn.Parameter(torch.tensor(_init)) if trainable else 1.0
self.f = f
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x, **kwargs):
return self.drop_path(self.f(x, **kwargs)) + x * self.scalar
class Cat(nn.Module):
def __init__(self, f, drop_path=0.0):
super().__init__()
self.f = f
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x, **kwargs):
y = self.drop_path(self.f(x, **kwargs))
return torch.cat([x, y], dim=-1)
class Classifier(nn.Module):
def __init__(self, input_dim, target_dim):
super().__init__()
self.classification_layer = nn.Linear(input_dim, target_dim)
def forward(self, x, **kwargs):
return self.classification_layer(x)
|
thanos-code-main
|
unagi/models/layers/blocks.py
|
import random
from typing import Dict
import numpy as np
import torch
import torchvision.transforms.functional as TF
from torch import nn
from torch.functional import Tensor
class MixUpAugmentation(nn.Module):
"""
Inter-image augmentation: Computes augmentations on an individual sample
"""
def __init__(self, p=0.3, prob_label=False, alpha=1): # is label a float?
super().__init__()
self.device = torch.device("cpu")
if torch.cuda.is_available():
self.device = torch.device("cuda")
self.prob = p
self.alpha = alpha
self.prob_label = prob_label
def forward(self, x, y):
if self.alpha > 0.0:
mix_ratio = np.random.beta(self.alpha, self.alpha)
else:
mix_ratio = 1.0
x_aug, y_aug = x, y
if random.random() <= self.prob:
perm_idxs = torch.randperm(x.shape[0])
x_perm, y_perm = x[perm_idxs], y[perm_idxs]
# input augmentation
x_aug = mix_ratio * x + (1 - mix_ratio) * x_perm
# label augmentation
if self.prob_label:
y_aug = mix_ratio * y + (1 - mix_ratio) * y_perm
else:
y_aug = torch.tensor(
[
y[idx] if np.random.random() < mix_ratio else y_perm[idx]
for idx in range(int(y.size(0)))
]
).to(self.device)
return x_aug, y_aug
class CutoutAugmentation(nn.Module):
"""
Intra-image augmentation: masks out a random patch from image
"""
def __init__(self, p=0.3, prob_label=False): # is label a float?
super().__init__()
self.device = torch.device("cpu")
if torch.cuda.is_available():
self.device = torch.device("cuda")
self.prob = p
self.prob_label = prob_label
def forward(self, x, y):
x_aug, y_aug = x, y
if random.random() <= self.prob:
seq_dim = x.size(1)
# randomly select patch to mask out. chooses a random patch for
# each sample in batch
for batch_idx in range(x.size(0)):
patch_idx = random.sample(range(seq_dim), 1)[0]
x_aug[batch_idx][patch_idx] = 0
return x_aug, y_aug
class SolarizeAugmentation(nn.Module):
"""
Pixel Level Augmentation: Inverts all tensor values above a threshold.
Assumes input is normalized.
"""
def __init__(self, p=0.3, threshold=0.5, prob_label=False): # is label a float?
super().__init__()
self.device = torch.device("cpu")
if torch.cuda.is_available():
self.device = torch.device("cuda")
self.prob = p
self.prob_label = prob_label
self.threshold = threshold
def forward(self, x, y):
x_aug, y_aug = x, y
if random.random() <= self.prob:
x_aug = torch.where(abs(x_aug) > self.threshold, 1 - x_aug, x_aug)
return x_aug, y_aug
class BrightnessAugmentation(nn.Module):
"""
Pixel Level Augmentation: Modifies brightness by increasing (or decreasing)
the tensor value evenly across all pixels. Values are modified using the
factor value
"""
def __init__(self, p=0.3, factor=1.0, prob_label=False): # is label a float?
super().__init__()
self.device = torch.device("cpu")
if torch.cuda.is_available():
self.device = torch.device("cuda")
self.prob = p
self.prob_label = prob_label
self.factor = factor
def forward(self, x, y):
x_aug, y_aug = x, y
if random.random() <= self.prob:
x_aug = x_aug * self.factor
return x_aug, y_aug
class InvertAugmentation(nn.Module):
"""
Pixel Level Augmentation: Inverts tensor values -- assumes that
input is normalized
"""
def __init__(self, p=0.3, prob_label=False): # is label a float?
super().__init__()
self.device = torch.device("cpu")
if torch.cuda.is_available():
self.device = torch.device("cuda")
self.prob = p
self.prob_label = prob_label
def forward(self, x, y):
x_aug, y_aug = x, y
if random.random() <= self.prob:
x_aug = 1 - x_aug
return x_aug, y_aug
class RotateAugmentation(nn.Module):
"""
Pixel Level Augmentation: Inverts tensor values -- assumes that input
is normalized
"""
def __init__(self, p=0.3, degree=90, prob_label=False): # is label a float?
super().__init__()
self.device = torch.device("cpu")
if torch.cuda.is_available():
self.device = torch.device("cuda")
self.prob = p
self.prob_label = prob_label
self.degree = degree
def forward(self, x, y):
x_aug, y_aug = x, y
if random.random() <= self.prob:
bs = x.size(0)
seq_len = x_aug.size(1)
h_dim = x_aug.size(2)
rot_samples = []
for sample_idx in range(bs):
batch_mat = x_aug[sample_idx]
batch_mat = batch_mat.reshape(1, seq_len * h_dim)
rot_mat = TF.rotate(batch_mat.unsqueeze(1), self.degree)
rot_samples.append(rot_mat.reshape(1, seq_len, h_dim))
x_aug = torch.stack(rot_samples)
return x_aug, y_aug
class MixUpLayer(nn.Module):
def __init__(self, prob=0.3, prob_label=False, alpha=1): # is label a float?
super().__init__()
self.patch_aug = MixUpAugmentation(p=prob, prob_label=prob_label, alpha=alpha)
def forward(self, patch_feature_dict: Dict[str, Tensor], Y: Tensor, is_train=False):
"""
patch_aug (Dict[str, Tensor]): mapping between input feature name,
and corresponding augmentated patch representation
Y (Tensor): augmentated label
"""
if is_train:
aug_patches = {}
for feat_name, feat_value in patch_feature_dict.items():
feat_aug, Y_aug = self.patch_aug(feat_value, Y)
aug_patches[feat_name] = feat_aug
return aug_patches, Y_aug
else:
return patch_feature_dict, Y
class InvertLayer(nn.Module):
def __init__(self, prob=0.3, prob_label=False): # is label a float?
super().__init__()
self.patch_aug = InvertAugmentation(p=prob, prob_label=prob_label)
def forward(self, patch_feature_dict: Dict[str, Tensor], Y: Tensor, is_train=False):
"""
patch_aug (Dict[str, Tensor]): mapping between input feature name,
and corresponding augmentated patch representation
Y (Tensor): augmentated label
"""
if is_train:
aug_patches = {}
for feat_name, feat_value in patch_feature_dict.items():
feat_aug, Y_aug = self.patch_aug(feat_value, Y)
aug_patches[feat_name] = feat_aug
return aug_patches, Y_aug
else:
return patch_feature_dict, Y
class CutoutLayer(nn.Module):
def __init__(self, prob=0.3, prob_label=False): # is label a float?
super().__init__()
self.patch_aug = CutoutAugmentation(p=prob, prob_label=prob_label)
def forward(self, patch_feature_dict: Dict[str, Tensor], Y: Tensor, is_train=False):
"""
patch_aug (Dict[str, Tensor]): mapping between input feature name,
and corresponding augmentated patch representation
Y (Tensor): augmentated label
"""
if is_train:
aug_patches = {}
for feat_name, feat_value in patch_feature_dict.items():
feat_aug, Y_aug = self.patch_aug(feat_value, Y)
aug_patches[feat_name] = feat_aug
return aug_patches, Y_aug
else:
return patch_feature_dict, Y
class BrightnessLayer(nn.Module):
def __init__(self, prob=0.3, factor=1.0, prob_label=False): # is label a float?
super().__init__()
self.patch_aug = BrightnessAugmentation(prob, factor, prob_label)
def forward(self, patch_feature_dict: Dict[str, Tensor], Y: Tensor, is_train=False):
"""
patch_aug (Dict[str, Tensor]): mapping between input feature name,
and corresponding augmentated patch representation
Y (Tensor): augmentated label
"""
if is_train:
aug_patches = {}
for feat_name, feat_value in patch_feature_dict.items():
feat_aug, Y_aug = self.patch_aug(feat_value, Y)
aug_patches[feat_name] = feat_aug
return aug_patches, Y_aug
else:
return patch_feature_dict, Y
class SolarizeLayer(nn.Module):
def __init__(self, prob=0.3, threshold=1.0, prob_label=False): # is label a float?
super().__init__()
self.patch_aug = SolarizeAugmentation(prob, threshold, prob_label)
def forward(self, patch_feature_dict: Dict[str, Tensor], Y: Tensor, is_train=False):
"""
patch_aug (Dict[str, Tensor]): mapping between input feature name,
and corresponding augmentated patch representation
Y (Tensor): augmentated label
"""
if is_train:
aug_patches = {}
for feat_name, feat_value in patch_feature_dict.items():
feat_aug, Y_aug = self.patch_aug(feat_value, Y)
aug_patches[feat_name] = feat_aug
return aug_patches, Y_aug
else:
return patch_feature_dict, Y
class RotateLayer(nn.Module):
def __init__(self, prob=0.3, degree=90, prob_label=False): # is label a float?
super().__init__()
self.patch_aug = RotateAugmentation(prob, degree=degree, prob_label=prob_label)
def forward(self, patch_feature_dict: Dict[str, Tensor], Y: Tensor, is_train=False):
"""
patch_aug (Dict[str, Tensor]): mapping between input feature name,
and corresponding augmentated patch representation
Y (Tensor): augmentated label
"""
if is_train:
aug_patches = {}
for feat_name, feat_value in patch_feature_dict.items():
feat_aug, Y_aug = self.patch_aug(feat_value, Y)
aug_patches[feat_name] = feat_aug
return aug_patches, Y_aug
else:
return patch_feature_dict, Y
|
thanos-code-main
|
unagi/models/layers/patch_augmentations.py
|
from torch import nn
class SequenceModule(nn.Module):
"""Abstract sequence model class. All layers that the backbones
use must adhere to this
A sequence model is a layer that transforms an input of shape
(n_batch, l_sequence, d_input) to (n_batch, l_sequence, d_output)
Additionally, it returns a "state" which can be any additional information
For example, RNN and SSM layers may return their hidden state,
while some types of transformer layers (e.g. Transformer-XL) may want to pass
through state as well
- default_state receives a batch_shape with device and returns an initial state
- step simulates a single step of the sequence (e.g. one unroll for an RNN).
It receives a state and single input (n_batch, d_input) and returns a state
and output (n_batch, d_output)
- forward is a sequence-to-sequence transformation that receives an optional state
"""
# def __init__(self, transposed=False, *args, **kwargs):
# """ model should support regular (B, L, H) and transposed (B, H, L)
# axes ordering """
# self.transposed = transposed
@property
def d_output(self):
return self._d_output
@d_output.setter
def d_output(self, d):
self._d_output = d
@property
def state_to_tensor(self):
"""Returns a function mapping a state to a single tensor,
in case one wants to use the hidden state instead of the output
for final prediction"""
return lambda _: None
@property
def d_state(self):
"""Returns dimension of output of self.state_to_tensor"""
return None
@property
def transposed(self):
return self._transposed
@transposed.setter
def transposed(self, x):
self._transposed = x
def default_state(self, *batch_shape, device=None):
# TODO device shouldn't be needed; models should store their own
# initial state at initialization
return None
def step(self, x, state=None, *args, **kwargs):
return x, state
def forward(self, x, state=None, *args, **kwargs):
return x, state
|
thanos-code-main
|
unagi/models/encoders/base_sequence.py
|
import torch
import torch.nn as nn
from torchvision.models import resnet18, resnet34, resnet50 # noqa: F401
class ResnetEncoder(nn.Module):
def __init__(
self,
model="resnet18",
use_pretrained=True,
**kwargs,
):
super().__init__()
encoder = eval(model)(pretrained=use_pretrained)
self.f = []
"""for name, module in encoder.named_children():
if name == "conv1":
module = nn.Conv2d(
3, 64, kernel_size=3, stride=1, padding=1, bias=False
)
if not isinstance(module, nn.Linear) and not isinstance(
module, nn.MaxPool2d
):
self.f.append(module)"""
for name, module in encoder.named_children():
if not isinstance(module, nn.Linear):
self.f.append(module)
self.f = nn.Sequential(*self.f)
self.feature_size = encoder.fc.in_features
self.d_model = encoder.fc.in_features
def forward(self, x):
x = self.f(x)
x = torch.flatten(x, start_dim=1)
return x
|
thanos-code-main
|
unagi/models/encoders/image/resnet/resnet.py
|
import torch
from torch import nn
from unagi.models.encoders.base_sequence import SequenceModule
from unagi.models.encoders.sequence.transformer.transformer_modules import (
MHA_Encoder,
MHA_Encoder_Cat,
)
class TransformerEncoder(SequenceModule):
def __init__(
self,
d_model,
n_heads,
l_max=512,
n_layers=4,
dropout=0.1,
head_dropout=0.1,
mlp_dim=None,
tie_them_all=False,
cat=False,
d_cat=None,
att_dim=None,
learn_pos=True,
use_cls_token=True,
use_all_tokens=False,
ret_cls_token=True,
**kwargs
):
super().__init__()
if cat:
assert d_cat is not None
_f = []
dim = d_model
for _ in range(n_layers):
layer = MHA_Encoder_Cat(
dim,
n_heads,
mlp_dim=mlp_dim,
att_dim=att_dim,
d_out=d_cat,
head_dropout=head_dropout,
drop_path=dropout,
dropout=dropout,
)
_f += [layer]
dim += 2 * d_cat
_f += [nn.LayerNorm(dim)]
_f += [nn.Linear(dim, d_model)]
else:
def _block(drop_path=0.0):
return MHA_Encoder(
d_model,
n_heads,
mlp_dim=mlp_dim,
head_dropout=head_dropout,
drop_path=drop_path,
dropout=dropout,
)
if tie_them_all:
_f = [_block()] * n_layers
else:
_f = [
_block(
drop_path=k * dropout / (n_layers - 1) if n_layers > 1 else 0
)
for k in range(n_layers)
]
_f += [nn.LayerNorm(d_model)]
self.f = nn.Sequential(*_f)
self.use_cls_token = use_cls_token
self.use_all_tokens = use_all_tokens
self.ret_cls_token = ret_cls_token
self.learn_pos = learn_pos
self.pe = nn.Parameter(1e-1 * torch.randn(l_max + 2, d_model).clamp(-1, 1))
self.cls_token = (
nn.Parameter(1e-1 * torch.randn(1, 1, d_model).clamp(-1, 1))
if self.use_cls_token
else None
)
def add_tokens(self, x):
b, _, d = x.size()
if self.use_cls_token:
x = torch.cat(
[self.cls_token.expand(b, self.cls_token.size(1), d), x],
dim=1,
)
if self.learn_pos:
x += self.pe[0 : x.size(1)]
return x
def forward(self, x, state=None, *args, **kwargs):
x = self.add_tokens(x)
x = self.f(x)
if self.ret_cls_token:
x = x[:, 0]
return x
|
thanos-code-main
|
unagi/models/encoders/sequence/transformer/transformer.py
|
import torch
from einops import rearrange
from torch import nn
from unagi.models.layers.blocks import FFN, Cat, PreNorm, Residual
class MHA(nn.Module):
def __init__(self, d_model, n_heads, dropout=0.1, head_dropout=None):
super().__init__()
self.n_heads = n_heads
self.head_dim = d_model // n_heads
self.scale = self.head_dim ** (-0.5)
# All the dropouts
self.out_dropout = nn.Dropout(dropout)
self.attn_dropout = nn.Dropout(dropout)
self.head_dropout = (
nn.Dropout2d(head_dropout) if head_dropout is not None else None
)
def forward(self, q, k, v, mask=None):
attn = torch.einsum("b s h d, b t h d -> b s h t", q, k).mul(self.scale)
if self.head_dropout is not None:
attn = self.head_dropout(attn)
if mask is not None:
attn[~mask] = -1e9
attn = attn.softmax(dim=-1)
# attn is batch x sentence x head x sentence
# v is batch x sentence x head x dim
ret = torch.einsum("b s h d, b t h s -> b t h d", v, self.attn_dropout(attn))
return self.out_dropout(rearrange(ret, "b t h d -> b t (h d)"))
# Initialization from
# https://github.com/SHI-Labs/Compact-Transformers/blob/main/src/cct.py
@staticmethod
def init_weight(m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
# An optimization to fuse keys-and-values
class MHA_fused(nn.Module):
def __init__(
self, d_model, n_heads, dropout=0.1, head_dropout=None, d_att=None, d_out=None
):
"""d_att: dimension of Q, K, V matrices (must be multiple of n_heads)
d_out: output dimension of module"""
super().__init__()
if d_att is None:
d_att = d_model
if d_out is not None:
self.proj = nn.Linear(d_att, d_out)
else:
self.proj = nn.Identity()
self.qkv = nn.Linear(d_model, 3 * d_att, bias=False)
self.f = MHA(d_att, n_heads, dropout=dropout, head_dropout=head_dropout)
self.n_heads = n_heads
def forward(self, x, mask=None):
# Now batch x sentence x 3*dim
qkv = rearrange(self.qkv(x), "b s (k h d) -> b s k h d", k=3, h=self.n_heads)
return self.proj(self.f(*torch.unbind(qkv, dim=2), mask=mask))
class MHA_split(nn.Module):
def __init__(self, d_model, n_heads, out_d=None, dropout=0.1, head_dropout=None):
super().__init__()
out_d = d_model if out_d is None else out_d
assert (d_model % n_heads == 0) and (out_d % n_heads == 0), (
f"The input {d_model} and output {out_d} dimensions must be multiplies of"
f" {n_heads}"
)
self.q = nn.Linear(d_model, d_model, bias=False)
self.k = nn.Linear(d_model, d_model, bias=False)
self.v = nn.Linear(d_model, out_d, bias=False)
self.f = MHA(d_model, n_heads, dropout=dropout, head_dropout=head_dropout)
self.n_heads = n_heads
def forward(self, q, k, v, mask=None):
def _r(f, x):
return rearrange(f(x), "b s (h d) -> b s h d", h=self.n_heads)
return self.f(_r(self.q, q), _r(self.k, k), _r(self.v, v), mask=mask)
#
# Encoder and decoder blocks
#
def _prenorm(d_model, x, drop_path=0.0):
return Residual(d_model, PreNorm(d_model, x), drop_path=drop_path)
class MHA_Encoder(nn.Module):
def __init__(
self,
d_model,
n_heads,
mlp_dim=None,
dropout=0.1,
drop_path=0.0,
head_dropout=None,
):
super().__init__()
def _pre(x):
return _prenorm(d_model, x, drop_path=drop_path)
# out_dim = d_model if out_dim is None else out_dim
self.mha = _pre(
MHA_fused(d_model, n_heads, dropout=dropout, head_dropout=head_dropout)
)
mlp_dim = d_model << 1 if mlp_dim is None else mlp_dim
self.ffn = _pre(FFN(d_model, mlp_dim, dropout=dropout))
def forward(self, x, mask=None):
x = self.mha(x, mask=mask)
return self.ffn(x)
def _cat(d_model, x, drop_path=0.0):
return Cat(PreNorm(d_model, x), drop_path=drop_path)
class MHA_Encoder_Cat(nn.Module):
def __init__(
self,
d_model,
n_heads,
mlp_dim=None,
dropout=0.1,
drop_path=0.0,
head_dropout=None,
d_out=None,
att_dim=None,
):
super().__init__()
def _pre(x):
return _cat(d_model, x, drop_path=drop_path)
d_out = d_model if d_out is None else d_out
d_att = 4 * d_out if att_dim is None else att_dim
mha = MHA_fused(
d_model,
n_heads,
dropout=dropout,
head_dropout=head_dropout,
d_att=d_att,
d_out=d_out,
)
self.mha = _cat(d_model, mha, drop_path)
# self.mha = _pre(
# MHA_fused(d_model, n_heads, dropout=dropout,
# head_dropout=head_dropout, d_att=4*d_out, d_out=d_out))
#
# self.mha = Cat(
# PreNorm(d_model, MHA_fused(
# d_model, n_heads, dropout=dropout, head_dropout=head_dropout,
# d_att=4*d_out, d_out=d_out)), drop_path=drop_path)
mlp_dim = 2 * d_out if mlp_dim is None else mlp_dim
mlp = FFN(d_model + d_out, mlp_dim, out_dim=d_out, dropout=dropout)
self.ffn = _cat(d_model + d_out, mlp, drop_path)
def forward(self, x, mask=None):
x = self.mha(x, mask=mask)
return self.ffn(x)
class MHA_Decoder(nn.Module):
def __init__(
self,
d_model,
n_heads,
mlp_dim=None,
dropout=0.1,
drop_path=0.0,
head_dropout=None,
):
super().__init__()
def _pre(x):
return _prenorm(d_model, x, drop_path=drop_path)
self._mha1 = _pre(
MHA_fused(d_model, n_heads, dropout=dropout, head_dropout=head_dropout)
)
self._mha2 = _pre(
MHA_split(d_model, n_heads, dropout=dropout, head_dropout=head_dropout)
)
mlp_dim = d_model << 1 if mlp_dim is None else mlp_dim
self.ffn = _pre(FFN(d_model, mlp_dim, dropout=dropout))
def forward(self, x, e_outputs, src_mask=None, tgt_mask=None):
x = self._mha1(x, mask=tgt_mask)
x = self._mha2(x, k=e_outputs, v=e_outputs, mask=src_mask)
return self.ffn(x)
|
thanos-code-main
|
unagi/models/encoders/sequence/transformer/transformer_modules.py
|
import torch
from transformers import AutoTokenizer, BertModel
from unagi.models.encoders.base_sequence import SequenceModule
class BertEncoder(SequenceModule):
def __init__(
self,
freeze_layers=True,
pretrained_lm_name="bert-base-uncased",
use_cls_token=True,
use_all_tokens=False,
pretrained_weights=None,
**kwargs,
):
super().__init__()
self.f = BertModel.from_pretrained(pretrained_lm_name)
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_lm_name)
self.f = self.f.train()
self.use_cls_token = use_cls_token
self.use_all_tokens = use_all_tokens
"""if freeze_layers:
for param in self.f.parameters():
param.requires_grad = False"""
self.d_model = self.f.encoder.layer[-1].output.dense.out_features
self.padding = "max_length"
self.truncation = True
self.max_length = 128
def forward(self, x):
# tok_out = self.tokenizer(
# x,
# padding=self.padding,
# truncation=self.truncation,
# max_length=self.max_length,
# )
# input_ids = torch.LongTensor(tok_out["input_ids"])
# attention_mask = torch.LongTensor(tok_out["attention_mask"])
input_ids = x
attention_mask = (x != 0).long()
token_type_ids = torch.zeros_like(input_ids)
# output = self.f(inputs_embeds=x, return_dict=True)
output = self.f(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
return_dict=True,
)
if self.use_cls_token:
# return output["pooler_output"]
return output["last_hidden_state"][:, 0, :].squeeze(dim=1)
else:
return output["last_hidden_state"]
|
thanos-code-main
|
unagi/models/encoders/sequence/bert/bert.py
|
from torch import nn
from unagi.models.layers.blocks import FFN, PreNorm, Residual
class mixer(nn.Module):
def __init__(self, d, n=64, dropout=0.0):
super().__init__()
self.f = FFN(n, n << 1)
def forward(self, x):
# b x p x c
return self.f(x.transpose(1, 2)).transpose(1, 2)
# Encoder and decoder blocks
def _prenorm(d, x, drop_path=0.0):
return Residual(d, PreNorm(d, x), drop_path=drop_path)
class mixer_encoder(nn.Module):
def __init__(
self,
d,
num_heads,
l_max, # should be equal to the sequence length
mlp_dim=None,
dropout=0.1,
drop_path=0.0,
head_dropout=None,
):
super().__init__()
def _pre(x):
return _prenorm(d, x, drop_path=drop_path)
self.mlp = _pre(mixer(d, n=l_max, dropout=dropout))
mlp_dim = d << 1 if mlp_dim is None else mlp_dim
self.ffn = _pre(FFN(d, mlp_dim, dropout=dropout))
def forward(self, x, mask=None):
x = self.mlp(x)
return self.ffn(x)
|
thanos-code-main
|
unagi/models/encoders/sequence/mixer/mixer_modules.py
|
from torch import nn
from unagi.models.encoders.base_sequence import SequenceModule
from unagi.models.encoders.sequence.mixer.mixer_modules import mixer_encoder
class MixerEncoder(SequenceModule):
def __init__(
self,
d_model,
n_heads,
l_max, # can be computed based on embedding
n_layers=4,
dropout=0.1,
head_dropout=0.1,
mlp_dim=None,
tie_them_all=False,
**kwargs,
):
super().__init__()
def _block():
return mixer_encoder(
d_model,
n_heads,
l_max=l_max,
mlp_dim=mlp_dim,
head_dropout=head_dropout,
dropout=dropout,
)
_f = (
[_block()] * n_layers
if tie_them_all
else [_block() for k in range(n_layers)]
)
_f += [nn.LayerNorm(d_model)]
self.f = nn.Sequential(*_f)
self.d_model = d_model
self.n_heads = n_heads
self.mlp_dim = mlp_dim
self.head_dropout = head_dropout
self.dropout = dropout
self.n_layers = n_layers
self.tie_them_all = tie_them_all
def forward(self, x, state=None, mask=None, *args, **kwargs):
# print(f"px={px.size()} mask={mask.size()}")
if mask is not None:
mask = self.truncate(mask)
x = x.masked_fill(~mask.unsqueeze(-1), 0) if mask is not None else x
x = self.f(x)
return x, state
|
thanos-code-main
|
unagi/models/encoders/sequence/mixer/mixer.py
|
import torch
from torch import nn
class ClassificationDecoder(nn.Module):
def __init__(self, d_input, d_output, **kwargs):
super().__init__()
# NOTE: compute d_input as module instantiation time
# d_input = sum(d_model of all encoders being fed to Classifier)
self.classification_layer = nn.Linear(d_input, d_output)
def forward(
self,
*final_outs,
):
"""
final_outs List[Tensor]: intermediate outputs from encoder. shape: (B, S, H)
"""
fx = torch.cat(final_outs, dim=-1)
return self.classification_layer(fx)
|
thanos-code-main
|
unagi/models/decoders/classifier.py
|
import torch
from torch import nn
class ViewConcat(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.name = "view_concat"
def forward(self, *args):
return torch.stack(args, dim=1)
|
thanos-code-main
|
unagi/models/decoders/view_concat.py
|
"""
Modified from PyTorch Lightning Bolts implementation.
https://github.com/PyTorchLightning/lightning-bolts/blob/master/pl_bolts/models/autoencoders/components.py
"""
import torch
from torch import nn
from torch.nn import functional as F
class Interpolate(nn.Module):
"""nn.Module wrapper for F.interpolate."""
def __init__(self, size=None, scale_factor=None):
super().__init__()
self.size, self.scale_factor = size, scale_factor
def forward(self, x):
return F.interpolate(x, size=self.size, scale_factor=self.scale_factor)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding."""
return nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution."""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def resize_conv3x3(in_planes, out_planes, scale=1):
"""upsample + 3x3 convolution with padding to avoid checkerboard artifact."""
if scale == 1:
return conv3x3(in_planes, out_planes)
return nn.Sequential(
Interpolate(scale_factor=scale), conv3x3(in_planes, out_planes)
)
def resize_conv1x1(in_planes, out_planes, scale=1):
"""upsample + 1x1 convolution with padding to avoid checkerboard artifact."""
if scale == 1:
return conv1x1(in_planes, out_planes)
return nn.Sequential(
Interpolate(scale_factor=scale), conv1x1(in_planes, out_planes)
)
class EncoderBlock(nn.Module):
"""ResNet block, copied from
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py#L35."""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class EncoderBottleneck(nn.Module):
"""ResNet bottleneck, copied from
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py#L75."""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super().__init__()
width = planes # this needs to change if we want wide resnets
self.conv1 = conv1x1(inplanes, width)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = conv3x3(width, width, stride)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class DecoderBlock(nn.Module):
"""ResNet block, but convs replaced with resize convs, and channel
increase is in second conv, not first."""
expansion = 1
def __init__(self, inplanes, planes, scale=1, upsample=None):
super().__init__()
self.conv1 = resize_conv3x3(inplanes, inplanes)
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = resize_conv3x3(inplanes, planes, scale)
self.bn2 = nn.BatchNorm2d(planes)
self.upsample = upsample
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.upsample is not None:
identity = self.upsample(x)
out += identity
out = self.relu(out)
return out
class DecoderBottleneck(nn.Module):
"""ResNet bottleneck, but convs replaced with resize convs."""
expansion = 4
def __init__(self, inplanes, planes, scale=1, upsample=None):
super().__init__()
width = planes # this needs to change if we want wide resnets
self.conv1 = resize_conv1x1(inplanes, width)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = resize_conv3x3(width, width, scale)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.upsample = upsample
self.scale = scale
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.upsample is not None:
identity = self.upsample(x)
out += identity
out = self.relu(out)
return out
class ResNetEncoder(nn.Module):
def __init__(self, block, layers, first_conv=False, maxpool1=False):
super().__init__()
self.inplanes = 64
self.first_conv = first_conv
self.maxpool1 = maxpool1
if self.first_conv:
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
else:
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
if self.maxpool1:
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
else:
self.maxpool = nn.MaxPool2d(kernel_size=1, stride=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
return x
class ResNetDecoder(nn.Module):
"""Resnet in reverse order."""
def __init__(
self,
block,
layers,
d_input,
input_height,
first_conv=False,
maxpool1=False,
**kwargs
):
super().__init__()
latent_dim = d_input
self.expansion = block.expansion
self.inplanes = 512 * block.expansion
self.first_conv = first_conv
self.maxpool1 = maxpool1
self.input_height = input_height
self.upscale_factor = 8
self.linear = nn.Linear(latent_dim, self.inplanes * 4 * 4)
self.layer1 = self._make_layer(block, 256, layers[0], scale=2)
self.layer2 = self._make_layer(block, 128, layers[1], scale=2)
self.layer3 = self._make_layer(block, 64, layers[2], scale=2)
if self.maxpool1:
self.layer4 = self._make_layer(block, 64, layers[3], scale=2)
self.upscale_factor *= 2
else:
self.layer4 = self._make_layer(block, 64, layers[3])
if self.first_conv:
self.upscale = Interpolate(scale_factor=2)
self.upscale_factor *= 2
else:
self.upscale = Interpolate(scale_factor=1)
# interpolate after linear layer using scale factor
self.upscale1 = Interpolate(size=input_height // self.upscale_factor)
self.conv1 = nn.Conv2d(
64 * block.expansion, 3, kernel_size=3, stride=1, padding=1, bias=False
)
def _make_layer(self, block, planes, blocks, scale=1):
upsample = None
if scale != 1 or self.inplanes != planes * block.expansion:
upsample = nn.Sequential(
resize_conv1x1(self.inplanes, planes * block.expansion, scale),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, scale, upsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.linear(x)
# NOTE: replaced this by Linear(in_channels, 514 * 4 * 4)
# x = F.interpolate(x, scale_factor=4)
x = x.view(x.size(0), 512 * self.expansion, 4, 4)
x = self.upscale1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.upscale(x)
x = self.conv1(x)
return x
# def resnet18_encoder(first_conv, maxpool1):
# return ResNetEncoder(EncoderBlock, [2, 2, 2, 2], first_conv, maxpool1)
def resnet18_decoder(latent_dim, input_height, first_conv, maxpool1):
return ResNetDecoder(
DecoderBlock, [2, 2, 2, 2], latent_dim, input_height, first_conv, maxpool1
)
# def resnet50_encoder(first_conv, maxpool1):
# return ResNetEncoder(EncoderBottleneck, [3, 4, 6, 3], first_conv, maxpool1)
def resnet50_decoder(latent_dim, input_height, first_conv, maxpool1):
return ResNetDecoder(
DecoderBottleneck, [3, 4, 6, 3], latent_dim, input_height, first_conv, maxpool1
)
class Resnet18Decoder(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.decoder = ResNetDecoder(DecoderBlock, [2, 2, 2, 2], **kwargs)
def forward(self, x):
return self.decoder(x)
class Resnet50Decoder(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.decoder = ResNetDecoder(DecoderBlock, [3, 4, 6, 3], **kwargs)
def forward(self, x):
return self.decoder(x)
|
thanos-code-main
|
unagi/models/decoders/image/resnet_autoencoder.py
|
import torch.nn as nn
from torchvision.models import resnet18, resnet34, resnet50 # noqa: F401
class ResnetDecoder(nn.Module):
def __init__(
self,
decoder_hidden_dim,
decoder_projection_dim,
model="resnet18",
d_model=None,
**kwargs,
):
super().__init__()
# self.d_model = model
if not self.d_model:
encoder = eval(model)()
self.d_model = encoder.fc.in_features
self.decoder = nn.Sequential(
nn.Linear(self.d_model, decoder_hidden_dim),
nn.ReLU(),
nn.Linear(decoder_hidden_dim, decoder_projection_dim),
)
def forward(self, x):
return self.decoder(x)
|
thanos-code-main
|
unagi/models/decoders/image/resnet.py
|
from torch import nn
from unagi.models.encoders.base_sequence import SequenceModule
from unagi.models.encoders.sequence.transformer.transformer_modules import MHA_Decoder
class TransformerDecoder(SequenceModule):
def __init__(self, d_model, n_heads, dropout=0.1, head_dropout=0.1, **kwargs):
super().__init__()
self.blocks = nn.ModuleList(
[MHA_Decoder(d_model, n_heads, dropout=dropout, head_dropout=head_dropout)]
)
def forward(self, x, target, state=None, mask=None, *args, **kwargs):
for b in self.blocks:
tgt = b(target, x, src_mask=mask, tgt_mask=mask)
return tgt
|
thanos-code-main
|
unagi/models/decoders/sequence/transformer.py
|
import torch
from einops import rearrange
from torch import nn
from unagi.models.encoders.base_sequence import SequenceModule
from unagi.models.encoders.sequence.mixer.mixer_modules import mixer_encoder
class MixerDecoder(SequenceModule):
def __init__(
self,
d_model,
n_heads,
l_max, # can be computed based on embedding
n_layers=4,
dropout=0.1,
head_dropout=0.1,
mlp_dim=None,
tie_them_all=False,
**kwargs,
):
super().__init__()
def _block():
return mixer_encoder(
d_model,
n_heads,
l_max=l_max,
mlp_dim=mlp_dim,
head_dropout=head_dropout,
dropout=dropout,
)
_f = (
[_block()] * n_layers
if tie_them_all
else [_block() for k in range(n_layers)]
)
_f += [nn.LayerNorm(d_model)]
self.decoder = nn.Sequential(*_f)
self.d_model = d_model
self.n_heads = n_heads
self.mlp_dim = mlp_dim
self.head_dropout = head_dropout
self.dropout = dropout
self.n_layers = n_layers
self.tie_them_all = tie_them_all
self.device = torch.device("cpu")
if torch.cuda.is_available():
self.device = torch.device("cuda")
def forward(self, x, state=None, mask=None, *args, **kwargs):
# print(f"px={px.size()} mask={mask.size()}")
pooled_output = x.mean(-2)
if not self.expand:
self.expand = nn.Linear(self.d_model, self.d_model * x.size(1)).to(
self.device
)
temp_enc = rearrange(self.expand(pooled_output), "b (s d) -> b s d", d=self.d)
decoded_outputs = self.decoder(temp_enc, encoding=True)
return decoded_outputs
|
thanos-code-main
|
unagi/models/decoders/sequence/mixer.py
|
import torch
from torch import nn
class EinsumReduceDecoder(nn.Module):
def __init__(self, d_model, **kwargs):
super().__init__()
# NOTE: compute d_input as module instantiation time
# d_input = sum(d_model of all encoders being fed to Classifier)
self.attend = nn.Linear(d_model, 1)
def forward(self, x):
"""
x: intermediate outpus from encoder. shape: (B, S, H)
"""
x = torch.einsum("b s o, b s d -> b d", self.attend(x).softmax(-1), x)
return x.mean(-2)
|
thanos-code-main
|
unagi/models/ops/einsum_reduce.py
|
import torch
from torch import nn
from torchvision import transforms as transforms
class Grayscale(nn.Module):
def __init__(self, dim=1, resize=None, **kwargs):
super().__init__()
self.dim = dim
self.resize = resize
if self.resize:
self.resize_func = transforms.Resize(
self.resize, transforms.InterpolationMode.BILINEAR
)
def forward(self, x):
grayscale_image = torch.mean(x, dim=self.dim, keepdim=True)
if self.resize:
return self.resize_func(grayscale_image)
return grayscale_image
|
thanos-code-main
|
unagi/models/ops/grayscale.py
|
import torch
from torch import nn
class SequenceConcat(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.name = "sequence_concat"
def forward(self, *args):
return torch.cat(args, dim=1)
|
thanos-code-main
|
unagi/models/ops/sequence_concat.py
|
from einops import rearrange
from torch import nn
class ViewSelect(nn.Module):
def __init__(self, view_idx, n_views, **kwargs):
super().__init__()
self.name = "view_select"
self.view_idx = view_idx
self.n_views = n_views
def forward(self, input):
embs = rearrange(input, "(b v) ... -> b v ...", v=self.n_views)
embs = embs[:, self.view_idx, ...]
return embs
|
thanos-code-main
|
unagi/models/ops/view_select.py
|
import torch
from torch import nn
class ViewConcat(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.name = "view_concat"
def forward(self, *args):
return torch.stack(args, dim=1)
|
thanos-code-main
|
unagi/models/ops/view_concat.py
|
from torch import nn
class LinearProj(nn.Module):
def __init__(self, d_input, d_output, **kwargs):
super().__init__()
self.linear_proj = nn.Linear(d_input, d_output)
def forward(self, x):
return self.linear_proj(x)
|
thanos-code-main
|
unagi/models/ops/linear_proj.py
|
from torch import nn
class PoolDecoder(nn.Module):
def __init__(self, **kwargs):
super().__init__()
# NOTE: compute d_input as module instantiation time
# d_input = sum(d_model of all encoders being fed to Classifier)
def forward(self, x):
"""
x: intermediate outpus from encoder. shape: (B, S, H)
"""
return x.mean(-2)
|
thanos-code-main
|
unagi/models/ops/pool.py
|
from einops import rearrange
from torch import nn
class ImageReshape(nn.Module):
def __init__(self, d_input, output_height, output_width, **kwargs):
super().__init__()
self.name = "view_select"
self.d_input = d_input
self.output_height = output_height
self.output_width = output_width
def forward(self, input):
embs = rearrange(
input, "... (h w) -> ... h w", h=self.output_height, w=self.output_width
)
return embs
|
thanos-code-main
|
unagi/models/ops/image_reshape.py
|
from copy import deepcopy
from unagi.data.transforms import ALL_TRANSFORMS
from unagi.data.transforms.image.compose import Compose
def get_transforms(
input_features: dict,
dataset_split: str,
augmentations: dict,
default_transforms: dict = {},
):
"""
Gets list of transforms for each input feature.
# Inputs
:param input_features: (dict) contains all imput feature metadata including
transform information used by this module.
# Returns
:return: returns a dict mapping input feature name to relevamt transforms.
"""
ifeat_to_transforms = {}
for name, inpt_feat in input_features.items():
transforms_list = []
feat_type = inpt_feat["type"]
# key that has corresponding mapping in augmentations.raw section
if dataset_split == "train":
augmentation_key = (
inpt_feat["transform"] if "transform" in inpt_feat.keys() else None
)
if augmentation_key is not None and augmentations is not None:
augmentation_list = augmentations[augmentation_key]
for aug in augmentation_list:
type = aug["type"]
aug = deepcopy(aug)
del aug["type"]
if type in ALL_TRANSFORMS[feat_type]:
transforms_list.append(ALL_TRANSFORMS[feat_type][type](**aug))
else:
raise ValueError(
f"Unknown transform type: {type} for feature: {name}"
)
# check if default transformation is specified in experiment config
# if yes, overwrite preset dataset transformation
default_transforms_key = (
inpt_feat["default_transform"]
if "default_transform" in inpt_feat.keys()
else None
)
if default_transforms_key is not None and augmentations is not None:
augmentation_list = augmentations[default_transforms_key]
for aug in augmentation_list:
type = "" + aug["type"]
aug = deepcopy(aug)
del aug["type"]
if type in ALL_TRANSFORMS[feat_type]:
transforms_list.append(ALL_TRANSFORMS[feat_type][type](**aug))
else:
raise ValueError(
f"Unknown transform type: {type} for feature: {name}"
)
else:
# use dataset preset transform
if feat_type in default_transforms:
transforms_list.extend(default_transforms[feat_type])
composed_transforms = Compose(transforms_list)
if inpt_feat["views"] >= 1:
contrastive_transform = ALL_TRANSFORMS["task"]["Contrastive"]
composed_transforms = contrastive_transform(
composed_transforms,
inpt_feat["views"] if dataset_split == "train" else 1,
)
if inpt_feat["mask"]:
tuple_transform = ALL_TRANSFORMS["task"]["Mask"]
mask_gen = ALL_TRANSFORMS["task"]["MaskGenerator"]
composed_transforms = tuple_transform(
composed_transforms,
mask_gen(
1, # task_config["contrastive"]["contrastive_views"],
inpt_feat["mask_length"],
),
)
ifeat_to_transforms[name] = composed_transforms
return ifeat_to_transforms
|
thanos-code-main
|
unagi/data/data_utils/transform_util.py
|
thanos-code-main
|
unagi/data/data_utils/__init__.py
|
|
# flake8: noqa
# from __future__ import annotation
import logging
from typing import Collection, List, Sequence, Tuple
import meerkat as mk
import pandas as pd
import torch
from meerkat.columns.lambda_column import LambdaColumn
from meerkat.tools.lazy_loader import LazyLoader
from unagi.data.transforms.task import GroupTransform, TupleTransform
folder = LazyLoader("torchvision.datasets.folder")
logger = logging.getLogger(__name__)
"""
class MultiImageColumn(LambdaColumn):
def __init__(
self,
data: Sequence[Tuple(str, str)] = None,
transform: List[callable] = None,
loader: callable = None,
*args,
**kwargs,
):
super(MultiImageColumn, self).__init__(
mk.PandasSeriesColumn.from_data(data), *args, **kwargs
)
self.loader = self.default_loader if loader is None else loader
self.transform = transform
def fn(self, filepaths: Tuple(str, str)):
image_0, image_1 = self.loader(filepaths[0]), self.loader(filepaths[1])
image_0, image_1 = self.transform[0](image_0), self.transform[1](image_1)
image_cat = torch.cat((image_0, image_1), 1)
return self.transform[2](image_cat)
@classmethod
def from_filepaths(
cls,
filepaths: List[Sequence[str]],
loader: callable = None,
transform: List[callable] = None,
*args,
**kwargs,
):
return cls(data=filepaths, loader=loader, transform=transform, *args, **kwargs)
@classmethod
def default_loader(cls, *args, **kwargs):
return folder.default_loader(*args, **kwargs)
@classmethod
def _state_keys(cls) -> Collection:
return (super()._state_keys() | {"transform", "loader"}) - {"fn"}
def _repr_pandas_(self) -> pd.Series:
return "ImageCell(" + self.data.data.reset_index(drop=True) + ")"
"""
class TextTransformCell(mk.AbstractCell):
def __init__(self, input_text: str, transforms):
self.input = input_text
self.transforms = transforms
self._token_ids = None
def get(self):
if self._token_ids is None:
token_ids = self.transforms(self.input, None)[0]
self._token_ids = token_ids
return self._token_ids
def data(self):
return self.input
def __repr__(self):
return "TextTransformCell"
class PILImgTransformCell(mk.AbstractCell):
def __init__(self, pil_image, transforms):
self.pil_image = pil_image
self.transforms = transforms
def get(self):
if self.transforms is None:
return self.pil_image
else:
transformed_img = self.transforms(self.pil_image, None)
if not isinstance(self.transforms, TupleTransform) and not isinstance(
self.transforms, GroupTransform
):
transformed_img = transformed_img[0]
return transformed_img
def data(self):
return self.pil_image
def transforms(self):
return self.transforms
def __repr__(self):
return "PILImgTransformCell"
|
thanos-code-main
|
unagi/data/data_utils/meerkat_processors.py
|
from typing import Any, Dict, List, Tuple, Union
from einops import rearrange
from torch import Tensor
from unagi.trainer.data import default_unagi_collate_fn
def unagi_collate_fn(
# is_train,
# feature_type_map,
# feature_view_map,
batch: Union[List[Tuple[Dict[str, Any], Dict[str, Tensor]]], List[Dict[str, Any]]],
):
(x_dict, y_dict) = default_unagi_collate_fn(batch)
# x_dict["is_train"] = is_train
# x_dict["feature_type_map"] = feature_type_map
# x_dict["labels"] = y_dict["labels"]
"""x_dict.update(
y_dict
) # ADD THIS LINE, AND IN YOUR DATALOADER ADD MORE LABELES
"""
new_x_dict = {}
new_x_dict["index"] = x_dict["index"]
del x_dict["index"]
new_x_dict["inputs"] = x_dict
new_y_dict = {k: rearrange(v, "b v ... -> (b v) ...") for k, v in y_dict.items()}
return (new_x_dict, new_y_dict)
|
thanos-code-main
|
unagi/data/data_utils/collate_fns.py
|
from unagi.data.transforms.image import ALL_TRANSFORMS as ALL_IMAGE_TRANSFORMS
from unagi.data.transforms.task import ALL_TRANSFORMS as ALL_TASK_TRANSFORMS
from unagi.data.transforms.text import ALL_TRANSFORMS as ALL_TEXT_TRANSFORMS
ALL_TRANSFORMS = {
"text": ALL_TEXT_TRANSFORMS,
"image": ALL_IMAGE_TRANSFORMS,
"task": ALL_TASK_TRANSFORMS,
}
|
thanos-code-main
|
unagi/data/transforms/__init__.py
|
from unagi.data.transforms.image.transform import UnagiTransform
class Reshape2D(UnagiTransform):
def __init__(self, h_dim, w_dim, name=None, prob=1.0, level=0):
self.h_dim = h_dim
self.w_dim = w_dim
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return pil_img.view(self.h_dim, self.w_dim), label
|
thanos-code-main
|
unagi/data/transforms/image/reshape2d.py
|
from PIL import ImageOps
from unagi.data.transforms.image.transform import UnagiTransform
from unagi.data.transforms.image.utils import categorize_value
class Solarize(UnagiTransform):
value_range = (0, 256)
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, "float")
return ImageOps.solarize(pil_img, degree), label
|
thanos-code-main
|
unagi/data/transforms/image/solarize.py
|
from PIL import ImageOps
from unagi.data.transforms.image.transform import UnagiTransform
class AutoContrast(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return ImageOps.autocontrast(pil_img), label
|
thanos-code-main
|
unagi/data/transforms/image/auto_contrast.py
|
from torchvision import transforms
from unagi.data.transforms.image.transform import UnagiTransform
class GaussianBlur(UnagiTransform):
def __init__(self, kernel_size, sigma=(0.1, 2.0), name=None, prob=1.0, level=0):
self.kernel_size = kernel_size
self.sigma = sigma
self.transform_func = transforms.GaussianBlur(self.kernel_size, self.sigma)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return self.transform_func(pil_img), label
|
thanos-code-main
|
unagi/data/transforms/image/gaussian_blur.py
|
from PIL import ImageEnhance
from unagi.data.transforms.image.transform import UnagiTransform
from unagi.data.transforms.image.utils import categorize_value
class Color(UnagiTransform):
value_range = (0.1, 1.9)
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, "float")
return ImageEnhance.Color(pil_img).enhance(degree), label
|
thanos-code-main
|
unagi/data/transforms/image/color.py
|
from PIL import Image
from unagi.data.transforms.image.transform import UnagiTransform
class HorizontalFlip(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return pil_img.transpose(Image.FLIP_LEFT_RIGHT), label
|
thanos-code-main
|
unagi/data/transforms/image/horizontal_filp.py
|
from torchvision import transforms as transforms
from unagi.data.transforms.image.transform import UnagiTransform
class Grayscale(UnagiTransform):
def __init__(self, num_output_channels=1):
self.num_output_channels = num_output_channels
self.transform_func = transforms.Grayscale(self.num_output_channels)
super().__init__(name="Grayscale", prob=1.0, level=0)
def transform(self, pil_img, label, **kwargs):
return self.transform_func(pil_img), label
def __repr__(self):
return (
f"<Transform ({self.name}), num_output_channels={self.num_output_channels}>"
)
|
thanos-code-main
|
unagi/data/transforms/image/grayscale.py
|
class Compose(object):
"""Composes several transforms together.
Originally from:
https://pytorch.org/docs/stable/_modules/torchvision/transforms/transforms.html#Compose
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, label, **kwargs):
for idx, t in enumerate(self.transforms):
kwargs["idx"] = idx
img, label = t(img, label, **kwargs)
return img, label
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
|
thanos-code-main
|
unagi/data/transforms/image/compose.py
|
from torchvision import transforms
from unagi.data.transforms.image.transform import UnagiTransform
class ColorDistortion(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0, strength=0.5):
super().__init__(name, prob, level)
self.strength = strength
self.color_jitter = transforms.ColorJitter(
0.8 * self.strength,
0.8 * self.strength,
0.8 * self.strength,
0.2 * self.strength,
)
self.rnd_color_jitter = transforms.RandomApply([self.color_jitter], p=0.8)
self.rnd_gray = transforms.RandomGrayscale(p=0.2)
self.color_distort = transforms.Compose([self.rnd_color_jitter, self.rnd_gray])
def transform(self, pil_img, label, **kwargs):
return self.color_distort(pil_img), label
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f"strength={self.strength}"
)
|
thanos-code-main
|
unagi/data/transforms/image/color_distortion.py
|
from torchvision import transforms as transforms
from unagi.data.transforms.image.transform import UnagiTransform
class RandomGrayscale(UnagiTransform):
def __init__(self, p=0.1, name=None, prob=1.0, level=0):
self.p = p
self.transform_func = transforms.RandomGrayscale(self.p)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return self.transform_func(pil_img), label
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f"p={self.p}"
)
|
thanos-code-main
|
unagi/data/transforms/image/random_grayscale.py
|
import numpy as np
from PIL import ImageDraw
from unagi.data.transforms.image.transform import UnagiTransform
from unagi.data.transforms.image.utils import categorize_value
class Cutout(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0, max_pixel=20, color=None):
self.max_pixel = max_pixel
self.value_range = (0, self.max_pixel)
self.color = color
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
pil_img = pil_img.copy()
degree = categorize_value(self.level, self.value_range, "int")
width, height = pil_img.size
x0 = np.random.uniform(width)
y0 = np.random.uniform(height)
x0 = int(max(0, x0 - degree / 2.0))
y0 = int(max(0, y0 - degree / 2.0))
x1 = min(width, x0 + degree)
y1 = min(height, y0 + degree)
xy = (x0, y0, x1, y1)
if self.color is not None:
color = self.color
elif pil_img.mode == "RGB":
color = (125, 123, 114)
elif pil_img.mode == "L":
color = 121
else:
raise ValueError(f"Unspported image mode {pil_img.mode}")
ImageDraw.Draw(pil_img).rectangle(xy, color)
return pil_img, label
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f"max_pixel={self.max_pixel}>"
)
|
thanos-code-main
|
unagi/data/transforms/image/cutout.py
|
from torchvision import transforms as transforms
from unagi.data.transforms.image.transform import UnagiTransform
class Normalize(UnagiTransform):
def __init__(self, mean, std, name=None, prob=1.0, level=0):
self.mean = mean
self.std = std
self.transform_func = transforms.Normalize(mean, std)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return self.transform_func(pil_img), label
|
thanos-code-main
|
unagi/data/transforms/image/normalize.py
|
from torchvision import transforms as transforms
from unagi.data.transforms.image.transform import UnagiTransform
class CenterCrop(UnagiTransform):
def __init__(self, size, name=None, prob=1.0, level=0):
self.size = size
self.transform_func = transforms.CenterCrop(self.size)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return self.transform_func(pil_img), label
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f"size={self.size}>"
)
|
thanos-code-main
|
unagi/data/transforms/image/center_crop.py
|
from unagi.data.transforms.image.auto_contrast import AutoContrast
from unagi.data.transforms.image.blur import Blur
from unagi.data.transforms.image.brightness import Brightness
from unagi.data.transforms.image.center_crop import CenterCrop
from unagi.data.transforms.image.color import Color
from unagi.data.transforms.image.color_distortion import ColorDistortion
from unagi.data.transforms.image.color_jitter import ColorJitter
from unagi.data.transforms.image.contrast import Contrast
from unagi.data.transforms.image.cutout import Cutout
from unagi.data.transforms.image.equalize import Equalize
from unagi.data.transforms.image.gaussian_blur import GaussianBlur
from unagi.data.transforms.image.grayscale import Grayscale
from unagi.data.transforms.image.horizontal_filp import HorizontalFlip
from unagi.data.transforms.image.identity import Identity
from unagi.data.transforms.image.invert import Invert
from unagi.data.transforms.image.normalize import Normalize
from unagi.data.transforms.image.posterize import Posterize
from unagi.data.transforms.image.random_crop import RandomCrop
from unagi.data.transforms.image.random_grayscale import RandomGrayscale
from unagi.data.transforms.image.random_horizontal_flip import RandomHorizontalFlip
from unagi.data.transforms.image.random_resize_crop import RandomResizedCrop
from unagi.data.transforms.image.reshape2d import Reshape2D
from unagi.data.transforms.image.resize import Resize
from unagi.data.transforms.image.resize_and_pad import ResizeAndPad
from unagi.data.transforms.image.rotate import Rotate
from unagi.data.transforms.image.sharpness import Sharpness
from unagi.data.transforms.image.shear_x import ShearX
from unagi.data.transforms.image.shear_y import ShearY
from unagi.data.transforms.image.smooth import Smooth
from unagi.data.transforms.image.solarize import Solarize
from unagi.data.transforms.image.to_tensor import ToTensor
from unagi.data.transforms.image.translate_x import TranslateX
from unagi.data.transforms.image.translate_y import TranslateY
from unagi.data.transforms.image.vertical_flip import VerticalFlip
ALL_TRANSFORMS = {
"AutoContrast": AutoContrast,
"Blur": Blur,
"Brightness": Brightness,
"GaussianBlur": GaussianBlur,
"CenterCrop": CenterCrop,
"Color": Color,
"Contrast": Contrast,
"Cutout": Cutout,
"Equalize": Equalize,
"GaussianBlur": GaussianBlur,
"Grayscale": Grayscale,
"ColorDistortion": ColorDistortion,
"HorizontalFlip": HorizontalFlip,
"Identity": Identity,
"Invert": Invert,
"Posterize": Posterize,
"RandomCrop": RandomCrop,
"RandomResizedCrop": RandomResizedCrop,
"Resize": Resize,
"Rotate": Rotate,
"Sharpness": Sharpness,
"ShearX": ShearX,
"ShearY": ShearY,
"Smooth": Smooth,
"Solarize": Solarize,
"TranslateX": TranslateX,
"TranslateY": TranslateY,
"VerticalFlip": VerticalFlip,
"ToTensor": ToTensor,
"Normalize": Normalize,
"Reshape2D": Reshape2D,
"RandomHorizontalFlip": RandomHorizontalFlip,
"ResizeAndPad": ResizeAndPad,
"ColorJitter": ColorJitter,
"RandomGrayscale": RandomGrayscale,
}
|
thanos-code-main
|
unagi/data/transforms/image/__init__.py
|
from PIL import ImageOps
from unagi.data.transforms.image.transform import UnagiTransform
class Equalize(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return ImageOps.equalize(pil_img), label
|
thanos-code-main
|
unagi/data/transforms/image/equalize.py
|
from torchvision import transforms as transforms
from unagi.data.transforms.image.transform import UnagiTransform
class ColorJitter(UnagiTransform):
def __init__(
self,
brightness=0.0,
contrast=0.0,
saturation=0.0,
hue=0.0,
name=None,
prob=1.0,
level=0,
):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
self.transform_func = transforms.ColorJitter(
brightness=self.brightness,
contrast=self.contrast,
saturation=self.saturation,
hue=self.hue,
)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return self.transform_func(pil_img), label
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f" brightness={self.brightness}, contrast={self.contrast}, "
f" saturation={self.saturation}, hue={self.hue}"
)
|
thanos-code-main
|
unagi/data/transforms/image/color_jitter.py
|
from PIL import ImageFilter
from unagi.data.transforms.image.transform import UnagiTransform
class Blur(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return pil_img.filter(ImageFilter.BLUR), label
|
thanos-code-main
|
unagi/data/transforms/image/blur.py
|
from torchvision import transforms as transforms
from unagi.data.transforms.image.transform import UnagiTransform
class Resize(UnagiTransform):
def __init__(
self,
size,
name=None,
prob=1.0,
level=0,
interpolation=transforms.InterpolationMode.BILINEAR,
):
self.size = size
self.interpolation = interpolation
self.transform_func = transforms.Resize(self.size, self.interpolation)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return self.transform_func(pil_img), label
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f"size={self.size}, interpolation={self.interpolation}>"
)
|
thanos-code-main
|
unagi/data/transforms/image/resize.py
|
import random
from PIL import Image
from unagi.data.transforms.image.transform import UnagiTransform
from unagi.data.transforms.image.utils import categorize_value
class TranslateX(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0, max_degree=10):
self.max_degree = max_degree
self.value_range = (0, self.max_degree)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, "float")
if random.random() > 0.5:
degree = -degree
return (
pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, degree, 0, 1, 0)),
label,
)
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f"max_degree={self.max_degree}>"
)
|
thanos-code-main
|
unagi/data/transforms/image/translate_x.py
|
def categorize_value(level, value_range, type="int"):
val = value_range[0] + level * (value_range[1] - value_range[0])
return int(val) if type == "int" else float(val)
|
thanos-code-main
|
unagi/data/transforms/image/utils.py
|
import random
class UnagiTransform(object):
"""Base UnagiTransform transfrom class.
Args:
name(str): Transformation name.
prob(float): Transformation probability.
level(int): Transformation level.
"""
def __init__(self, name=None, prob=1.0, level=0):
self.name = name if name is not None else type(self).__name__
self.prob = prob
assert 0 <= level <= 1.0, "Invalid level, level must be in [0, 1.0]."
self.level = level
def transform(self, pil_img, label, **kwargs):
return pil_img, label
def __call__(self, pil_img, label, **kwargs):
if random.random() <= self.prob:
return self.transform(pil_img, label, **kwargs)
else:
return pil_img, label
def __repr__(self):
return f"<Transform ({self.name}), prob={self.prob}, level={self.level}>"
|
thanos-code-main
|
unagi/data/transforms/image/transform.py
|
import random
from PIL import Image
from unagi.data.transforms.image.transform import UnagiTransform
from unagi.data.transforms.image.utils import categorize_value
class TranslateY(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0, max_degree=10):
self.max_degree = max_degree
self.value_range = (0, self.max_degree)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, "float")
if random.random() > 0.5:
degree = -degree
return (
pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, 0, 0, 1, degree)),
label,
)
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f"max_degree={self.max_degree}>"
)
|
thanos-code-main
|
unagi/data/transforms/image/translate_y.py
|
from PIL import ImageFilter
from unagi.data.transforms.image.transform import UnagiTransform
class Smooth(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return pil_img.filter(ImageFilter.SMOOTH), label
|
thanos-code-main
|
unagi/data/transforms/image/smooth.py
|
from PIL import ImageOps
from unagi.data.transforms.image.transform import UnagiTransform
from unagi.data.transforms.image.utils import categorize_value
class Posterize(UnagiTransform):
value_range = (0, 4)
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, "int")
return ImageOps.posterize(pil_img, degree), label
|
thanos-code-main
|
unagi/data/transforms/image/posterize.py
|
from PIL import ImageEnhance
from unagi.data.transforms.image.transform import UnagiTransform
from unagi.data.transforms.image.utils import categorize_value
class Sharpness(UnagiTransform):
value_range = (0.1, 1.9)
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, "float")
return ImageEnhance.Sharpness(pil_img).enhance(degree), label
|
thanos-code-main
|
unagi/data/transforms/image/sharpness.py
|
from PIL import ImageOps
from unagi.data.transforms.image.transform import UnagiTransform
class Invert(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return ImageOps.invert(pil_img), label
|
thanos-code-main
|
unagi/data/transforms/image/invert.py
|
from PIL import Image, ImageOps
from torchvision import transforms as transforms
from unagi.data.transforms.image.transform import UnagiTransform
class ResizeAndPad(UnagiTransform):
def __init__(
self,
resized_width,
resized_height,
name=None,
prob=1.0,
level=0,
ratio=(0.75, 1.333_333_333_333_333_3),
interpolation=transforms.InterpolationMode.BILINEAR,
):
self.resized_height = resized_height
self.resized_width = resized_width
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
original_size = pil_img.size
ratio = float(self.resized_width) / max(original_size)
new_size = (int(self.resized_width * ratio), int(self.resized_height * ratio))
pil_img = pil_img.resize(new_size, Image.ANTIALIAS)
delta_w = self.resized_width - new_size[0]
delta_h = self.resized_height - new_size[1]
padding = (
delta_w // 2,
delta_h // 2,
delta_w - (delta_w // 2),
delta_h - (delta_h // 2),
)
resized_img = ImageOps.expand(pil_img, padding)
return resized_img
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f"size={self.size}, scale={self.scale}, ratio={self.ratio}, "
f"interpolation={self.interpolation}>"
)
|
thanos-code-main
|
unagi/data/transforms/image/resize_and_pad.py
|
from PIL import ImageEnhance
from unagi.data.transforms.image.transform import UnagiTransform
from unagi.data.transforms.image.utils import categorize_value
class Brightness(UnagiTransform):
value_range = (0.1, 1.9)
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, "float")
return ImageEnhance.Brightness(pil_img).enhance(degree), label
|
thanos-code-main
|
unagi/data/transforms/image/brightness.py
|
import random
from PIL import Image
from unagi.data.transforms.image.transform import UnagiTransform
from unagi.data.transforms.image.utils import categorize_value
class ShearY(UnagiTransform):
value_range = (0.0, 0.3)
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, "float")
if random.random() > 0.5:
degree = -degree
return (
pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, 0, degree, 1, 0)),
label,
)
|
thanos-code-main
|
unagi/data/transforms/image/shear_y.py
|
import random
from unagi.data.transforms.image.transform import UnagiTransform
from unagi.data.transforms.image.utils import categorize_value
class Rotate(UnagiTransform):
value_range = (0, 30)
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, "float")
if random.random() > 0.5:
degree = -degree
return pil_img.rotate(degree), label
|
thanos-code-main
|
unagi/data/transforms/image/rotate.py
|
from unagi.data.transforms.image.transform import UnagiTransform
class Identity(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return pil_img, label
|
thanos-code-main
|
unagi/data/transforms/image/identity.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.