python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import logging
import os
from omegaconf import OmegaConf
import dinov2.distributed as distributed
from dinov2.logging import setup_logging
from dinov2.utils import utils
from dinov2.configs import dinov2_default_config
logger = logging.getLogger("dinov2")
def apply_scaling_rules_to_cfg(cfg): # to fix
if cfg.optim.scaling_rule == "sqrt_wrt_1024":
base_lr = cfg.optim.base_lr
cfg.optim.lr = base_lr
cfg.optim.lr *= math.sqrt(cfg.train.batch_size_per_gpu * distributed.get_global_size() / 1024.0)
logger.info(f"sqrt scaling learning rate; base: {base_lr}, new: {cfg.optim.lr}")
else:
raise NotImplementedError
return cfg
def write_config(cfg, output_dir, name="config.yaml"):
logger.info(OmegaConf.to_yaml(cfg))
saved_cfg_path = os.path.join(output_dir, name)
with open(saved_cfg_path, "w") as f:
OmegaConf.save(config=cfg, f=f)
return saved_cfg_path
def get_cfg_from_args(args):
args.output_dir = os.path.abspath(args.output_dir)
args.opts += [f"train.output_dir={args.output_dir}"]
default_cfg = OmegaConf.create(dinov2_default_config)
cfg = OmegaConf.load(args.config_file)
cfg = OmegaConf.merge(default_cfg, cfg, OmegaConf.from_cli(args.opts))
return cfg
def default_setup(args):
distributed.enable(overwrite=True)
seed = getattr(args, "seed", 0)
rank = distributed.get_global_rank()
global logger
setup_logging(output=args.output_dir, level=logging.INFO)
logger = logging.getLogger("dinov2")
utils.fix_random_seeds(seed + rank)
logger.info("git:\n {}\n".format(utils.get_sha()))
logger.info("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg_from_args(args)
os.makedirs(args.output_dir, exist_ok=True)
default_setup(args)
apply_scaling_rules_to_cfg(cfg)
write_config(cfg, args.output_dir)
return cfg
| EXA-1-master | exa/models/dinov2/dinov2/utils/config.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Union
import numpy as np
import torch
TypeSpec = Union[str, np.dtype, torch.dtype]
_NUMPY_TO_TORCH_DTYPE: Dict[np.dtype, torch.dtype] = {
np.dtype("bool"): torch.bool,
np.dtype("uint8"): torch.uint8,
np.dtype("int8"): torch.int8,
np.dtype("int16"): torch.int16,
np.dtype("int32"): torch.int32,
np.dtype("int64"): torch.int64,
np.dtype("float16"): torch.float16,
np.dtype("float32"): torch.float32,
np.dtype("float64"): torch.float64,
np.dtype("complex64"): torch.complex64,
np.dtype("complex128"): torch.complex128,
}
def as_torch_dtype(dtype: TypeSpec) -> torch.dtype:
if isinstance(dtype, torch.dtype):
return dtype
if isinstance(dtype, str):
dtype = np.dtype(dtype)
assert isinstance(dtype, np.dtype), f"Expected an instance of nunpy dtype, got {type(dtype)}"
return _NUMPY_TO_TORCH_DTYPE[dtype]
| EXA-1-master | exa/models/dinov2/dinov2/utils/dtype.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/models/dinov2/dinov2/utils/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
import subprocess
from urllib.parse import urlparse
import numpy as np
import torch
from torch import nn
logger = logging.getLogger("dinov2")
def load_pretrained_weights(model, pretrained_weights, checkpoint_key):
if urlparse(pretrained_weights).scheme: # If it looks like an URL
state_dict = torch.hub.load_state_dict_from_url(pretrained_weights, map_location="cpu")
else:
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
logger.info(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
logger.info("Pretrained weights found at {} and loaded with msg: {}".format(pretrained_weights, msg))
def fix_random_seeds(seed=31):
"""
Fix random seeds.
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode("ascii").strip()
sha = "N/A"
diff = "clean"
branch = "N/A"
try:
sha = _run(["git", "rev-parse", "HEAD"])
subprocess.check_output(["git", "diff"], cwd=cwd)
diff = _run(["git", "diff-index", "HEAD"])
diff = "has uncommited changes" if diff else "clean"
branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
class CosineScheduler(object):
def __init__(self, base_value, final_value, total_iters, warmup_iters=0, start_warmup_value=0, freeze_iters=0):
super().__init__()
self.final_value = final_value
self.total_iters = total_iters
freeze_schedule = np.zeros((freeze_iters))
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(total_iters - warmup_iters - freeze_iters)
schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters)))
self.schedule = np.concatenate((freeze_schedule, warmup_schedule, schedule))
assert len(self.schedule) == self.total_iters
def __getitem__(self, it):
if it >= self.total_iters:
return self.final_value
else:
return self.schedule[it]
def has_batchnorms(model):
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)
for name, module in model.named_modules():
if isinstance(module, bn_types):
return True
return False
| EXA-1-master | exa/models/dinov2/dinov2/utils/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
import os
from pathlib import Path
from typing import Any, Dict, Optional
class ClusterType(Enum):
AWS = "aws"
FAIR = "fair"
RSC = "rsc"
def _guess_cluster_type() -> ClusterType:
uname = os.uname()
if uname.sysname == "Linux":
if uname.release.endswith("-aws"):
# Linux kernel versions on AWS instances are of the form "5.4.0-1051-aws"
return ClusterType.AWS
elif uname.nodename.startswith("rsc"):
# Linux kernel versions on RSC instances are standard ones but hostnames start with "rsc"
return ClusterType.RSC
return ClusterType.FAIR
def get_cluster_type(cluster_type: Optional[ClusterType] = None) -> Optional[ClusterType]:
if cluster_type is None:
return _guess_cluster_type()
return cluster_type
def get_checkpoint_path(cluster_type: Optional[ClusterType] = None) -> Optional[Path]:
cluster_type = get_cluster_type(cluster_type)
if cluster_type is None:
return None
CHECKPOINT_DIRNAMES = {
ClusterType.AWS: "checkpoints",
ClusterType.FAIR: "checkpoint",
ClusterType.RSC: "checkpoint/dino",
}
return Path("/") / CHECKPOINT_DIRNAMES[cluster_type]
def get_user_checkpoint_path(cluster_type: Optional[ClusterType] = None) -> Optional[Path]:
checkpoint_path = get_checkpoint_path(cluster_type)
if checkpoint_path is None:
return None
username = os.environ.get("USER")
assert username is not None
return checkpoint_path / username
def get_slurm_partition(cluster_type: Optional[ClusterType] = None) -> Optional[str]:
cluster_type = get_cluster_type(cluster_type)
if cluster_type is None:
return None
SLURM_PARTITIONS = {
ClusterType.AWS: "learnlab",
ClusterType.FAIR: "learnlab",
ClusterType.RSC: "learn",
}
return SLURM_PARTITIONS[cluster_type]
def get_slurm_executor_parameters(
nodes: int, num_gpus_per_node: int, cluster_type: Optional[ClusterType] = None, **kwargs
) -> Dict[str, Any]:
# create default parameters
params = {
"mem_gb": 0, # Requests all memory on a node, see https://slurm.schedmd.com/sbatch.html
"gpus_per_node": num_gpus_per_node,
"tasks_per_node": num_gpus_per_node, # one task per GPU
"cpus_per_task": 10,
"nodes": nodes,
"slurm_partition": get_slurm_partition(cluster_type),
}
# apply cluster-specific adjustments
cluster_type = get_cluster_type(cluster_type)
if cluster_type == ClusterType.AWS:
params["cpus_per_task"] = 12
del params["mem_gb"]
elif cluster_type == ClusterType.RSC:
params["cpus_per_task"] = 12
# set additional parameters / apply overrides
params.update(kwargs)
return params
| EXA-1-master | exa/models/dinov2/dinov2/utils/cluster.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
import logging
logger = logging.getLogger("dinov2")
def get_vit_lr_decay_rate(name, lr_decay_rate=1.0, num_layers=12, force_is_backbone=False, chunked_blocks=False):
"""
Calculate lr decay rate for different ViT blocks.
Args:
name (string): parameter name.
lr_decay_rate (float): base lr decay rate.
num_layers (int): number of ViT blocks.
Returns:
lr decay rate for the given parameter.
"""
layer_id = num_layers + 1
if name.startswith("backbone") or force_is_backbone:
if ".pos_embed" in name or ".patch_embed" in name or ".mask_token" in name or ".cls_token" in name:
layer_id = 0
elif force_is_backbone and (
"pos_embed" in name or "patch_embed" in name or "mask_token" in name or "cls_token" in name
):
layer_id = 0
elif ".blocks." in name and ".residual." not in name:
layer_id = int(name[name.find(".blocks.") :].split(".")[2]) + 1
elif chunked_blocks and "blocks." in name and "residual." not in name:
layer_id = int(name[name.find("blocks.") :].split(".")[2]) + 1
elif "blocks." in name and "residual." not in name:
layer_id = int(name[name.find("blocks.") :].split(".")[1]) + 1
return lr_decay_rate ** (num_layers + 1 - layer_id)
def get_params_groups_with_decay(model, lr_decay_rate=1.0, patch_embed_lr_mult=1.0):
chunked_blocks = False
if hasattr(model, "n_blocks"):
logger.info("chunked fsdp")
n_blocks = model.n_blocks
chunked_blocks = model.chunked_blocks
elif hasattr(model, "blocks"):
logger.info("first code branch")
n_blocks = len(model.blocks)
elif hasattr(model, "backbone"):
logger.info("second code branch")
n_blocks = len(model.backbone.blocks)
else:
logger.info("else code branch")
n_blocks = 0
all_param_groups = []
for name, param in model.named_parameters():
name = name.replace("_fsdp_wrapped_module.", "")
if not param.requires_grad:
continue
decay_rate = get_vit_lr_decay_rate(
name, lr_decay_rate, num_layers=n_blocks, force_is_backbone=n_blocks > 0, chunked_blocks=chunked_blocks
)
d = {"params": param, "is_last_layer": False, "lr_multiplier": decay_rate, "wd_multiplier": 1.0, "name": name}
if "last_layer" in name:
d.update({"is_last_layer": True})
if name.endswith(".bias") or "norm" in name or "gamma" in name:
d.update({"wd_multiplier": 0.0})
if "patch_embed" in name:
d.update({"lr_multiplier": d["lr_multiplier"] * patch_embed_lr_mult})
all_param_groups.append(d)
logger.info(f"""{name}: lr_multiplier: {d["lr_multiplier"]}, wd_multiplier: {d["wd_multiplier"]}""")
return all_param_groups
def fuse_params_groups(all_params_groups, keys=("lr_multiplier", "wd_multiplier", "is_last_layer")):
fused_params_groups = defaultdict(lambda: {"params": []})
for d in all_params_groups:
identifier = ""
for k in keys:
identifier += k + str(d[k]) + "_"
for k in keys:
fused_params_groups[identifier][k] = d[k]
fused_params_groups[identifier]["params"].append(d["params"])
return fused_params_groups.values()
| EXA-1-master | exa/models/dinov2/dinov2/utils/param_groups.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# References:
# https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py
from functools import partial
import math
import logging
from typing import Sequence, Tuple, Union, Callable
import torch
import torch.nn as nn
import torch.utils.checkpoint
from torch.nn.init import trunc_normal_
from dinov2.layers import Mlp, PatchEmbed, SwiGLUFFNFused, MemEffAttention, NestedTensorBlock as Block
logger = logging.getLogger("dinov2")
def named_apply(fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False) -> nn.Module:
if not depth_first and include_root:
fn(module=module, name=name)
for child_name, child_module in module.named_children():
child_name = ".".join((name, child_name)) if name else child_name
named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
fn(module=module, name=name)
return module
class BlockChunk(nn.ModuleList):
def forward(self, x):
for b in self:
x = b(x)
return x
class DinoVisionTransformer(nn.Module):
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
ffn_bias=True,
proj_bias=True,
drop_path_rate=0.0,
drop_path_uniform=False,
init_values=None, # for layerscale: None or 0 => no layerscale
embed_layer=PatchEmbed,
act_layer=nn.GELU,
block_fn=Block,
ffn_layer="mlp",
block_chunks=1,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
proj_bias (bool): enable bias for proj in attn if True
ffn_bias (bool): enable bias for ffn if True
drop_path_rate (float): stochastic depth rate
drop_path_uniform (bool): apply uniform drop rate across blocks
weight_init (str): weight init scheme
init_values (float): layer-scale init values
embed_layer (nn.Module): patch embedding layer
act_layer (nn.Module): MLP activation layer
block_fn (nn.Module): transformer block class
ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity"
block_chunks: (int) split block sequence into block_chunks units for FSDP wrap
"""
super().__init__()
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_tokens = 1
self.n_blocks = depth
self.num_heads = num_heads
self.patch_size = patch_size
self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
if drop_path_uniform is True:
dpr = [drop_path_rate] * depth
else:
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
if ffn_layer == "mlp":
logger.info("using MLP layer as FFN")
ffn_layer = Mlp
elif ffn_layer == "swiglufused" or ffn_layer == "swiglu":
logger.info("using SwiGLU layer as FFN")
ffn_layer = SwiGLUFFNFused
elif ffn_layer == "identity":
logger.info("using Identity layer as FFN")
def f(*args, **kwargs):
return nn.Identity()
ffn_layer = f
else:
raise NotImplementedError
blocks_list = [
block_fn(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_bias=proj_bias,
ffn_bias=ffn_bias,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
ffn_layer=ffn_layer,
init_values=init_values,
)
for i in range(depth)
]
if block_chunks > 0:
self.chunked_blocks = True
chunked_blocks = []
chunksize = depth // block_chunks
for i in range(0, depth, chunksize):
# this is to keep the block index consistent if we chunk the block list
chunked_blocks.append([nn.Identity()] * i + blocks_list[i : i + chunksize])
self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks])
else:
self.chunked_blocks = False
self.blocks = nn.ModuleList(blocks_list)
self.norm = norm_layer(embed_dim)
self.head = nn.Identity()
self.mask_token = nn.Parameter(torch.zeros(1, embed_dim))
self.init_weights()
def init_weights(self):
trunc_normal_(self.pos_embed, std=0.02)
nn.init.normal_(self.cls_token, std=1e-6)
named_apply(init_weights_vit_timm, self)
def interpolate_pos_encoding(self, x, w, h):
previous_dtype = x.dtype
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
pos_embed = self.pos_embed.float()
class_pos_embed = pos_embed[:, 0]
patch_pos_embed = pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_size
h0 = h // self.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode="bicubic",
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype)
def prepare_tokens_with_masks(self, x, masks=None):
B, nc, w, h = x.shape
x = self.patch_embed(x)
if masks is not None:
x = torch.where(masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x)
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
x = x + self.interpolate_pos_encoding(x, w, h)
return x
def forward_features_list(self, x_list, masks_list):
x = [self.prepare_tokens_with_masks(x, masks) for x, masks in zip(x_list, masks_list)]
for blk in self.blocks:
x = blk(x)
all_x = x
output = []
for x, masks in zip(all_x, masks_list):
x_norm = self.norm(x)
output.append(
{
"x_norm_clstoken": x_norm[:, 0],
"x_norm_patchtokens": x_norm[:, 1:],
"x_prenorm": x,
"masks": masks,
}
)
return output
def forward_features(self, x, masks=None):
if isinstance(x, list):
return self.forward_features_list(x, masks)
x = self.prepare_tokens_with_masks(x, masks)
for blk in self.blocks:
x = blk(x)
x_norm = self.norm(x)
return {
"x_norm_clstoken": x_norm[:, 0],
"x_norm_patchtokens": x_norm[:, 1:],
"x_prenorm": x,
"masks": masks,
}
def _get_intermediate_layers_not_chunked(self, x, n=1):
x = self.prepare_tokens_with_masks(x)
# If n is an int, take the n last blocks. If it's a list, take them
output, total_block_len = [], len(self.blocks)
blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
for i, blk in enumerate(self.blocks):
x = blk(x)
if i in blocks_to_take:
output.append(x)
assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
return output
def _get_intermediate_layers_chunked(self, x, n=1):
x = self.prepare_tokens_with_masks(x)
output, i, total_block_len = [], 0, len(self.blocks[-1])
# If n is an int, take the n last blocks. If it's a list, take them
blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
for block_chunk in self.blocks:
for blk in block_chunk[i:]: # Passing the nn.Identity()
x = blk(x)
if i in blocks_to_take:
output.append(x)
i += 1
assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
return output
def get_intermediate_layers(
self,
x: torch.Tensor,
n: Union[int, Sequence] = 1, # Layers or n last layers to take
reshape: bool = False,
return_class_token: bool = False,
norm=True,
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]:
if self.chunked_blocks:
outputs = self._get_intermediate_layers_chunked(x, n)
else:
outputs = self._get_intermediate_layers_not_chunked(x, n)
if norm:
outputs = [self.norm(out) for out in outputs]
class_tokens = [out[:, 0] for out in outputs]
outputs = [out[:, 1:] for out in outputs]
if reshape:
B, _, w, h = x.shape
outputs = [
out.reshape(B, w // self.patch_size, h // self.patch_size, -1).permute(0, 3, 1, 2).contiguous()
for out in outputs
]
if return_class_token:
return tuple(zip(outputs, class_tokens))
return tuple(outputs)
def forward(self, *args, is_training=False, **kwargs):
ret = self.forward_features(*args, **kwargs)
if is_training:
return ret
else:
return self.head(ret["x_norm_clstoken"])
def init_weights_vit_timm(module: nn.Module, name: str = ""):
"""ViT weight initialization, original timm impl (for reproducibility)"""
if isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=0.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
def vit_small(patch_size=16, **kwargs):
model = DinoVisionTransformer(
patch_size=patch_size,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4,
block_fn=partial(Block, attn_class=MemEffAttention),
**kwargs,
)
return model
def vit_base(patch_size=16, **kwargs):
model = DinoVisionTransformer(
patch_size=patch_size,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
block_fn=partial(Block, attn_class=MemEffAttention),
**kwargs,
)
return model
def vit_large(patch_size=16, **kwargs):
model = DinoVisionTransformer(
patch_size=patch_size,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
block_fn=partial(Block, attn_class=MemEffAttention),
**kwargs,
)
return model
def vit_giant2(patch_size=16, **kwargs):
"""
Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64
"""
model = DinoVisionTransformer(
patch_size=patch_size,
embed_dim=1536,
depth=40,
num_heads=24,
mlp_ratio=4,
block_fn=partial(Block, attn_class=MemEffAttention),
**kwargs,
)
return model
| EXA-1-master | exa/models/dinov2/dinov2/models/vision_transformer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from . import vision_transformer as vits
logger = logging.getLogger("dinov2")
def build_model(args, only_teacher=False, img_size=224):
args.arch = args.arch.removesuffix("_memeff")
if "vit" in args.arch:
vit_kwargs = dict(
img_size=img_size,
patch_size=args.patch_size,
init_values=args.layerscale,
ffn_layer=args.ffn_layer,
block_chunks=args.block_chunks,
qkv_bias=args.qkv_bias,
proj_bias=args.proj_bias,
ffn_bias=args.ffn_bias,
)
teacher = vits.__dict__[args.arch](**vit_kwargs)
if only_teacher:
return teacher, teacher.embed_dim
student = vits.__dict__[args.arch](
**vit_kwargs,
drop_path_rate=args.drop_path_rate,
drop_path_uniform=args.drop_path_uniform,
)
embed_dim = student.embed_dim
return student, teacher, embed_dim
def build_model_from_cfg(cfg, only_teacher=False):
return build_model(cfg.student, only_teacher=only_teacher, img_size=cfg.crops.global_crops_size)
| EXA-1-master | exa/models/dinov2/dinov2/models/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any
import torch
import dinov2.distributed as distributed
from functools import partial
from fvcore.common.checkpoint import Checkpointer
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import ShardingStrategy
from torch.distributed.fsdp import MixedPrecision
from torch.distributed.fsdp import StateDictType
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.distributed.fsdp._runtime_utils import _reshard
def get_fsdp_wrapper(model_cfg, modules_to_wrap=set()):
sharding_strategy_dict = {
"NO_SHARD": ShardingStrategy.NO_SHARD,
"SHARD_GRAD_OP": ShardingStrategy.SHARD_GRAD_OP,
"FULL_SHARD": ShardingStrategy.FULL_SHARD,
}
dtype_dict = {
"fp32": torch.float32,
"fp16": torch.float16,
"bf16": torch.bfloat16,
}
mixed_precision_config = MixedPrecision(
param_dtype=dtype_dict[model_cfg.mixed_precision.param_dtype],
reduce_dtype=dtype_dict[model_cfg.mixed_precision.reduce_dtype],
buffer_dtype=dtype_dict[model_cfg.mixed_precision.buffer_dtype],
)
sharding_strategy_config = sharding_strategy_dict[model_cfg.sharding_strategy]
local_rank = distributed.get_local_rank()
fsdp_wrapper = partial(
FSDP,
sharding_strategy=sharding_strategy_config,
mixed_precision=mixed_precision_config,
device_id=local_rank,
sync_module_states=True,
use_orig_params=True,
auto_wrap_policy=ModuleWrapPolicy(modules_to_wrap),
)
return fsdp_wrapper
def is_fsdp(x):
return isinstance(x, FSDP)
def is_sharded_fsdp(x):
return is_fsdp(x) and x.sharding_strategy is not ShardingStrategy.NO_SHARD
def free_if_fsdp(x):
if is_sharded_fsdp(x):
handles = x._handles
true_list = [True for h in handles]
_reshard(x, handles, true_list)
def get_fsdp_modules(x):
return FSDP.fsdp_modules(x)
def reshard_fsdp_model(x):
for m in get_fsdp_modules(x):
free_if_fsdp(m)
def rankstr():
return f"rank_{distributed.get_global_rank()}"
class FSDPCheckpointer(Checkpointer):
def save(self, name: str, **kwargs: Any) -> None:
"""
Dump model and checkpointables to a file.
Args:
name (str): name of the file.
kwargs (dict): extra arbitrary data to save.
"""
if not self.save_dir or not self.save_to_disk:
return
data = {}
with FSDP.state_dict_type(self.model, StateDictType.LOCAL_STATE_DICT):
data["model"] = self.model.state_dict()
# data["model"] = self.model.state_dict()
for key, obj in self.checkpointables.items():
data[key] = obj.state_dict()
data.update(kwargs)
basename = f"{name}.{rankstr()}.pth"
save_file = os.path.join(self.save_dir, basename)
assert os.path.basename(save_file) == basename, basename
self.logger.info("Saving checkpoint to {}".format(save_file))
with self.path_manager.open(save_file, "wb") as f:
torch.save(data, f)
self.tag_last_checkpoint(basename)
def load(self, *args, **kwargs):
with FSDP.state_dict_type(self.model, StateDictType.LOCAL_STATE_DICT):
return super().load(*args, **kwargs)
def has_checkpoint(self) -> bool:
"""
Returns:
bool: whether a checkpoint exists in the target directory.
"""
save_file = os.path.join(self.save_dir, f"last_checkpoint.{rankstr()}")
return self.path_manager.exists(save_file)
def get_checkpoint_file(self) -> str:
"""
Returns:
str: The latest checkpoint file in target directory.
"""
save_file = os.path.join(self.save_dir, f"last_checkpoint.{rankstr()}")
try:
with self.path_manager.open(save_file, "r") as f:
last_saved = f.read().strip()
except IOError:
# if file doesn't exist, maybe because it has just been
# deleted by a separate process
return ""
# pyre-fixme[6]: For 2nd param expected `Union[PathLike[str], str]` but got
# `Union[bytes, str]`.
return os.path.join(self.save_dir, last_saved)
def tag_last_checkpoint(self, last_filename_basename: str) -> None:
"""
Tag the last checkpoint.
Args:
last_filename_basename (str): the basename of the last filename.
"""
if distributed.is_enabled():
torch.distributed.barrier()
save_file = os.path.join(self.save_dir, f"last_checkpoint.{rankstr()}")
with self.path_manager.open(save_file, "w") as f:
f.write(last_filename_basename) # pyre-ignore
ShardedGradScaler = ShardedGradScaler
| EXA-1-master | exa/models/dinov2/dinov2/fsdp/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pathlib
from omegaconf import OmegaConf
def load_config(config_name: str):
config_filename = config_name + ".yaml"
return OmegaConf.load(pathlib.Path(__file__).parent.resolve() / config_filename)
dinov2_default_config = load_config("ssl_default_config")
def load_and_merge_config(config_name: str):
default_config = OmegaConf.create(dinov2_default_config)
loaded_config = load_config(config_name)
return OmegaConf.merge(default_config, loaded_config)
| EXA-1-master | exa/models/dinov2/dinov2/configs/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .train import get_args_parser, main
from .ssl_meta_arch import SSLMetaArch
| EXA-1-master | exa/models/dinov2/dinov2/train/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import logging
import torch
from torch import nn
from dinov2.loss import DINOLoss, iBOTPatchLoss, KoLeoLoss
from dinov2.models import build_model_from_cfg
from dinov2.layers import DINOHead
from dinov2.utils.utils import has_batchnorms
from dinov2.utils.param_groups import get_params_groups_with_decay, fuse_params_groups
from dinov2.fsdp import get_fsdp_wrapper, ShardedGradScaler, get_fsdp_modules, reshard_fsdp_model
from dinov2.models.vision_transformer import BlockChunk
try:
from xformers.ops import fmha
XFORMERS_AVAILABLE = True
except ImportError:
XFORMERS_AVAILABLE = False
assert XFORMERS_AVAILABLE, "xFormers is required for DINOv2 training"
logger = logging.getLogger("dinov2")
class SSLMetaArch(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.fp16_scaler = ShardedGradScaler() if cfg.compute_precision.grad_scaler else None
student_model_dict = dict()
teacher_model_dict = dict()
student_backbone, teacher_backbone, embed_dim = build_model_from_cfg(cfg)
student_model_dict["backbone"] = student_backbone
teacher_model_dict["backbone"] = teacher_backbone
logger.info(f"OPTIONS -- architecture : embed_dim: {embed_dim}")
if cfg.student.pretrained_weights:
chkpt = torch.load(cfg.student.pretrained_weights)
logger.info(f"OPTIONS -- pretrained weights: loading from {cfg.student.pretrained_weights}")
student_backbone.load_state_dict(chkpt["model"], strict=False)
self.embed_dim = embed_dim
self.dino_out_dim = cfg.dino.head_n_prototypes
self.do_dino = cfg.dino.loss_weight > 0
self.do_koleo = cfg.dino.koleo_loss_weight > 0
self.do_ibot = cfg.ibot.loss_weight > 0
self.ibot_separate_head = cfg.ibot.separate_head
logger.info("OPTIONS -- DINO")
if self.do_dino:
logger.info(f"OPTIONS -- DINO -- loss_weight: {cfg.dino.loss_weight}")
logger.info(f"OPTIONS -- DINO -- head_n_prototypes: {cfg.dino.head_n_prototypes}")
logger.info(f"OPTIONS -- DINO -- head_bottleneck_dim: {cfg.dino.head_bottleneck_dim}")
logger.info(f"OPTIONS -- DINO -- head_hidden_dim: {cfg.dino.head_hidden_dim}")
self.dino_loss_weight = cfg.dino.loss_weight
dino_head = partial(
DINOHead,
in_dim=embed_dim,
out_dim=cfg.dino.head_n_prototypes,
hidden_dim=cfg.dino.head_hidden_dim,
bottleneck_dim=cfg.dino.head_bottleneck_dim,
nlayers=cfg.dino.head_nlayers,
)
self.dino_loss = DINOLoss(self.dino_out_dim)
if self.do_koleo:
logger.info("OPTIONS -- DINO -- applying KOLEO regularization")
self.koleo_loss = KoLeoLoss()
else:
logger.info("OPTIONS -- DINO -- not using DINO")
if self.do_dino or self.do_ibot:
student_model_dict["dino_head"] = dino_head()
teacher_model_dict["dino_head"] = dino_head()
logger.info("OPTIONS -- IBOT")
logger.info(f"OPTIONS -- IBOT -- loss_weight: {cfg.ibot.loss_weight}")
logger.info(f"OPTIONS -- IBOT masking -- ibot_mask_ratio_tuple: {cfg.ibot.mask_ratio_min_max}")
logger.info(f"OPTIONS -- IBOT masking -- ibot_mask_sample_probability: {cfg.ibot.mask_sample_probability}")
if self.do_ibot:
self.ibot_loss_weight = cfg.ibot.loss_weight
assert max(cfg.ibot.mask_ratio_min_max) > 0, "please provide a positive mask ratio tuple for ibot"
assert cfg.ibot.mask_sample_probability > 0, "please provide a positive mask probability for ibot"
self.ibot_out_dim = cfg.ibot.head_n_prototypes if self.ibot_separate_head else cfg.dino.head_n_prototypes
self.ibot_patch_loss = iBOTPatchLoss(self.ibot_out_dim)
if self.ibot_separate_head:
logger.info(f"OPTIONS -- IBOT -- loss_weight: {cfg.ibot.loss_weight}")
logger.info(f"OPTIONS -- IBOT -- head_n_prototypes: {cfg.ibot.head_n_prototypes}")
logger.info(f"OPTIONS -- IBOT -- head_bottleneck_dim: {cfg.ibot.head_bottleneck_dim}")
logger.info(f"OPTIONS -- IBOT -- head_hidden_dim: {cfg.ibot.head_hidden_dim}")
ibot_head = partial(
DINOHead,
in_dim=embed_dim,
out_dim=cfg.ibot.head_n_prototypes,
hidden_dim=cfg.ibot.head_hidden_dim,
bottleneck_dim=cfg.ibot.head_bottleneck_dim,
nlayers=cfg.ibot.head_nlayers,
)
student_model_dict["ibot_head"] = ibot_head()
teacher_model_dict["ibot_head"] = ibot_head()
else:
logger.info("OPTIONS -- IBOT -- head shared with DINO")
self.need_to_synchronize_fsdp_streams = True
self.student = nn.ModuleDict(student_model_dict)
self.teacher = nn.ModuleDict(teacher_model_dict)
# there is no backpropagation through the teacher, so no need for gradients
for p in self.teacher.parameters():
p.requires_grad = False
logger.info(f"Student and Teacher are built: they are both {cfg.student.arch} network.")
def forward(self, inputs):
raise NotImplementedError
def backprop_loss(self, loss):
if self.fp16_scaler is not None:
self.fp16_scaler.scale(loss).backward()
else:
loss.backward()
def forward_backward(self, images, teacher_temp):
n_global_crops = 2
assert n_global_crops == 2
n_local_crops = self.cfg.crops.local_crops_number
global_crops = images["collated_global_crops"].cuda(non_blocking=True)
local_crops = images["collated_local_crops"].cuda(non_blocking=True)
masks = images["collated_masks"].cuda(non_blocking=True)
mask_indices_list = images["mask_indices_list"].cuda(non_blocking=True)
n_masked_patches_tensor = images["n_masked_patches"].cuda(non_blocking=True)
n_masked_patches = mask_indices_list.shape[0]
upperbound = images["upperbound"]
masks_weight = images["masks_weight"].cuda(non_blocking=True)
n_local_crops_loss_terms = max(n_local_crops * n_global_crops, 1)
n_global_crops_loss_terms = (n_global_crops - 1) * n_global_crops
do_dino = self.do_dino
do_ibot = self.do_ibot
# loss scales
ibot_loss_scale = 1.0 / n_global_crops
# teacher output
@torch.no_grad()
def get_teacher_output():
x, n_global_crops_teacher = global_crops, n_global_crops
teacher_backbone_output_dict = self.teacher.backbone(x, is_training=True)
teacher_cls_tokens = teacher_backbone_output_dict["x_norm_clstoken"]
teacher_cls_tokens = teacher_cls_tokens.chunk(n_global_crops_teacher)
# watch out: these are chunked and cat'd in reverse so A is matched to B in the global crops dino loss
teacher_cls_tokens = torch.cat((teacher_cls_tokens[1], teacher_cls_tokens[0]))
ibot_teacher_patch_tokens = teacher_backbone_output_dict["x_norm_patchtokens"]
_dim = ibot_teacher_patch_tokens.shape[-1]
n_cls_tokens = teacher_cls_tokens.shape[0]
if do_ibot and not self.ibot_separate_head:
buffer_tensor_teacher = ibot_teacher_patch_tokens.new_zeros(upperbound + n_cls_tokens, _dim)
buffer_tensor_teacher[:n_cls_tokens].copy_(teacher_cls_tokens)
torch.index_select(
ibot_teacher_patch_tokens.flatten(0, 1),
dim=0,
index=mask_indices_list,
out=buffer_tensor_teacher[n_cls_tokens : n_cls_tokens + n_masked_patches],
)
tokens_after_head = self.teacher.dino_head(buffer_tensor_teacher)
teacher_cls_tokens_after_head = tokens_after_head[:n_cls_tokens]
masked_teacher_patch_tokens_after_head = tokens_after_head[
n_cls_tokens : n_cls_tokens + n_masked_patches
]
elif do_ibot and self.ibot_separate_head:
buffer_tensor_teacher = ibot_teacher_patch_tokens.new_zeros(upperbound, _dim)
torch.index_select(
ibot_teacher_patch_tokens.flatten(0, 1),
dim=0,
index=mask_indices_list,
out=buffer_tensor_teacher[:n_masked_patches],
)
teacher_cls_tokens_after_head = self.teacher.dino_head(teacher_cls_tokens)
masked_teacher_patch_tokens_after_head = self.teacher.ibot_head(buffer_tensor_teacher)[
:n_masked_patches
]
else:
teacher_cls_tokens_after_head = self.teacher.dino_head(teacher_cls_tokens)
masked_teacher_ibot_softmaxed_centered = None
if self.cfg.train.centering == "centering":
teacher_dino_softmaxed_centered_list = self.dino_loss.softmax_center_teacher(
teacher_cls_tokens_after_head, teacher_temp=teacher_temp
).view(n_global_crops_teacher, -1, *teacher_cls_tokens_after_head.shape[1:])
self.dino_loss.update_center(teacher_cls_tokens_after_head)
if do_ibot:
masked_teacher_patch_tokens_after_head = masked_teacher_patch_tokens_after_head.unsqueeze(0)
masked_teacher_ibot_softmaxed_centered = self.ibot_patch_loss.softmax_center_teacher(
masked_teacher_patch_tokens_after_head[:, :n_masked_patches], teacher_temp=teacher_temp
)
masked_teacher_ibot_softmaxed_centered = masked_teacher_ibot_softmaxed_centered.squeeze(0)
self.ibot_patch_loss.update_center(masked_teacher_patch_tokens_after_head[:n_masked_patches])
elif self.cfg.train.centering == "sinkhorn_knopp":
teacher_dino_softmaxed_centered_list = self.dino_loss.sinkhorn_knopp_teacher(
teacher_cls_tokens_after_head, teacher_temp=teacher_temp
).view(n_global_crops_teacher, -1, *teacher_cls_tokens_after_head.shape[1:])
if do_ibot:
masked_teacher_ibot_softmaxed_centered = self.ibot_patch_loss.sinkhorn_knopp_teacher(
masked_teacher_patch_tokens_after_head,
teacher_temp=teacher_temp,
n_masked_patches_tensor=n_masked_patches_tensor,
)
else:
raise NotImplementedError
return teacher_dino_softmaxed_centered_list, masked_teacher_ibot_softmaxed_centered
teacher_dino_softmaxed_centered_list, masked_teacher_ibot_softmaxed_centered = get_teacher_output()
reshard_fsdp_model(self.teacher)
loss_dict = {}
loss_accumulator = 0 # for backprop
student_global_backbone_output_dict, student_local_backbone_output_dict = self.student.backbone(
[global_crops, local_crops], masks=[masks, None], is_training=True
)
inputs_for_student_head_list = []
# 1a: local crops cls tokens
student_local_cls_tokens = student_local_backbone_output_dict["x_norm_clstoken"]
inputs_for_student_head_list.append(student_local_cls_tokens.unsqueeze(0))
# 1b: global crops cls tokens
student_global_cls_tokens = student_global_backbone_output_dict["x_norm_clstoken"]
inputs_for_student_head_list.append(student_global_cls_tokens.unsqueeze(0))
# 1c: global crops patch tokens
if do_ibot:
_dim = student_global_backbone_output_dict["x_norm_clstoken"].shape[-1]
ibot_student_patch_tokens = student_global_backbone_output_dict["x_norm_patchtokens"]
buffer_tensor_patch_tokens = ibot_student_patch_tokens.new_zeros(upperbound, _dim)
buffer_tensor_patch_tokens[:n_masked_patches].copy_(
torch.index_select(ibot_student_patch_tokens.flatten(0, 1), dim=0, index=mask_indices_list)
)
if not self.ibot_separate_head:
inputs_for_student_head_list.append(buffer_tensor_patch_tokens.unsqueeze(0))
else:
student_global_masked_patch_tokens_after_head = self.student.ibot_head(buffer_tensor_patch_tokens)[
:n_masked_patches
]
# 2: run
_attn_bias, cat_inputs = fmha.BlockDiagonalMask.from_tensor_list(inputs_for_student_head_list)
outputs_list = _attn_bias.split(self.student.dino_head(cat_inputs))
# 3a: local crops cls tokens
student_local_cls_tokens_after_head = outputs_list.pop(0).squeeze(0)
# 3b: global crops cls tokens
student_global_cls_tokens_after_head = outputs_list.pop(0).squeeze(0)
# 3c: global crops patch tokens
if do_ibot and not self.ibot_separate_head:
student_global_masked_patch_tokens_after_head = outputs_list.pop(0).squeeze(0)[:n_masked_patches]
if n_local_crops > 0:
dino_local_crops_loss = self.dino_loss(
student_output_list=student_local_cls_tokens_after_head.chunk(n_local_crops),
teacher_out_softmaxed_centered_list=teacher_dino_softmaxed_centered_list,
) / (n_global_crops_loss_terms + n_local_crops_loss_terms)
# store for display
loss_dict["dino_local_crops_loss"] = dino_local_crops_loss
# accumulate loss
loss_accumulator += self.dino_loss_weight * dino_local_crops_loss
# process global crops
loss_scales = 2 # this is here since we process global crops together
if do_dino:
# compute loss
dino_global_crops_loss = (
self.dino_loss(
student_output_list=[student_global_cls_tokens_after_head],
teacher_out_softmaxed_centered_list=[
teacher_dino_softmaxed_centered_list.flatten(0, 1)
], # these were chunked and stacked in reverse so A is matched to B
)
* loss_scales
/ (n_global_crops_loss_terms + n_local_crops_loss_terms)
)
loss_dict["dino_global_crops_loss"] = dino_global_crops_loss
# accumulate loss
loss_accumulator += self.dino_loss_weight * dino_global_crops_loss
student_cls_tokens = student_global_cls_tokens
if self.do_koleo:
koleo_loss = self.cfg.dino.koleo_loss_weight * sum(
self.koleo_loss(p) for p in student_cls_tokens.chunk(2)
) # we don't apply koleo loss between cls tokens of a same image
loss_accumulator += koleo_loss
loss_dict["koleo_loss"] = (
koleo_loss / loss_scales
) # this is to display the same losses as before but we can remove eventually
if do_ibot:
# compute loss
ibot_patch_loss = (
self.ibot_patch_loss.forward_masked(
student_global_masked_patch_tokens_after_head,
masked_teacher_ibot_softmaxed_centered,
student_masks_flat=masks,
n_masked_patches=n_masked_patches,
masks_weight=masks_weight,
)
* loss_scales
* ibot_loss_scale
)
# store for display
loss_dict["ibot_loss"] = ibot_patch_loss / 2
# accumulate loss
loss_accumulator += self.ibot_loss_weight * ibot_patch_loss
self.backprop_loss(loss_accumulator)
self.fsdp_synchronize_streams()
return loss_dict
def fsdp_synchronize_streams(self):
if self.need_to_synchronize_fsdp_streams:
torch.cuda.synchronize()
self.student.dino_head._streams = (
self.teacher.dino_head._streams
) = self.student.backbone._streams = self.teacher.backbone._streams
self.need_to_synchronize_fsdp_streams = False
def update_teacher(self, m):
student_param_list = []
teacher_param_list = []
with torch.no_grad():
for k in self.student.keys():
for ms, mt in zip(get_fsdp_modules(self.student[k]), get_fsdp_modules(self.teacher[k])):
student_param_list += ms.params
teacher_param_list += mt.params
torch._foreach_mul_(teacher_param_list, m)
torch._foreach_add_(teacher_param_list, student_param_list, alpha=1 - m)
def train(self):
super().train()
self.teacher.eval()
def get_maybe_fused_params_for_submodel(self, m):
params_groups = get_params_groups_with_decay(
model=m,
lr_decay_rate=self.cfg.optim.layerwise_decay,
patch_embed_lr_mult=self.cfg.optim.patch_embed_lr_mult,
)
fused_params_groups = fuse_params_groups(params_groups)
logger.info("fusing param groups")
for g in fused_params_groups:
g["foreach"] = True
return fused_params_groups
def get_params_groups(self):
all_params_groups = []
for m in self.student.values():
all_params_groups += self.get_maybe_fused_params_for_submodel(m)
return all_params_groups
def prepare_for_distributed_training(self):
logger.info("DISTRIBUTED FSDP -- preparing model for distributed training")
if has_batchnorms(self.student):
raise NotImplementedError
# below will synchronize all student subnetworks across gpus:
for k, v in self.student.items():
self.teacher[k].load_state_dict(self.student[k].state_dict())
student_model_cfg = self.cfg.compute_precision.student[k]
self.student[k] = get_fsdp_wrapper(student_model_cfg, modules_to_wrap={BlockChunk})(self.student[k])
teacher_model_cfg = self.cfg.compute_precision.teacher[k]
self.teacher[k] = get_fsdp_wrapper(teacher_model_cfg, modules_to_wrap={BlockChunk})(self.teacher[k])
| EXA-1-master | exa/models/dinov2/dinov2/train/ssl_meta_arch.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import math
import os
from functools import partial
from fvcore.common.checkpoint import PeriodicCheckpointer
import torch
from dinov2.data import SamplerType, make_data_loader, make_dataset
from dinov2.data import collate_data_and_cast, DataAugmentationDINO, MaskingGenerator
import dinov2.distributed as distributed
from dinov2.fsdp import FSDPCheckpointer
from dinov2.logging import MetricLogger
from dinov2.utils.config import setup
from dinov2.utils.utils import CosineScheduler
from dinov2.train.ssl_meta_arch import SSLMetaArch
torch.backends.cuda.matmul.allow_tf32 = True # PyTorch 1.12 sets this to False by default
logger = logging.getLogger("dinov2")
def get_args_parser(add_help: bool = True):
parser = argparse.ArgumentParser("DINOv2 training", add_help=add_help)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument(
"--no-resume",
action="store_true",
help="Whether to not attempt to resume from the checkpoint directory. ",
)
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
parser.add_argument("--eval", type=str, default="", help="Eval type to perform")
parser.add_argument(
"opts",
help="""
Modify config options at the end of the command. For Yacs configs, use
space-separated "PATH.KEY VALUE" pairs.
For python-based LazyConfig, use "path.key=value".
""".strip(),
default=None,
nargs=argparse.REMAINDER,
)
parser.add_argument(
"--output-dir",
"--output_dir",
default="",
type=str,
help="Output directory to save logs and checkpoints",
)
return parser
def build_optimizer(cfg, params_groups):
return torch.optim.AdamW(params_groups, betas=(cfg.optim.adamw_beta1, cfg.optim.adamw_beta2))
def build_schedulers(cfg):
OFFICIAL_EPOCH_LENGTH = cfg.train.OFFICIAL_EPOCH_LENGTH
lr = dict(
base_value=cfg.optim["lr"],
final_value=cfg.optim["min_lr"],
total_iters=cfg.optim["epochs"] * OFFICIAL_EPOCH_LENGTH,
warmup_iters=cfg.optim["warmup_epochs"] * OFFICIAL_EPOCH_LENGTH,
start_warmup_value=0,
)
wd = dict(
base_value=cfg.optim["weight_decay"],
final_value=cfg.optim["weight_decay_end"],
total_iters=cfg.optim["epochs"] * OFFICIAL_EPOCH_LENGTH,
)
momentum = dict(
base_value=cfg.teacher["momentum_teacher"],
final_value=cfg.teacher["final_momentum_teacher"],
total_iters=cfg.optim["epochs"] * OFFICIAL_EPOCH_LENGTH,
)
teacher_temp = dict(
base_value=cfg.teacher["teacher_temp"],
final_value=cfg.teacher["teacher_temp"],
total_iters=cfg.teacher["warmup_teacher_temp_epochs"] * OFFICIAL_EPOCH_LENGTH,
warmup_iters=cfg.teacher["warmup_teacher_temp_epochs"] * OFFICIAL_EPOCH_LENGTH,
start_warmup_value=cfg.teacher["warmup_teacher_temp"],
)
lr_schedule = CosineScheduler(**lr)
wd_schedule = CosineScheduler(**wd)
momentum_schedule = CosineScheduler(**momentum)
teacher_temp_schedule = CosineScheduler(**teacher_temp)
last_layer_lr_schedule = CosineScheduler(**lr)
last_layer_lr_schedule.schedule[
: cfg.optim["freeze_last_layer_epochs"] * OFFICIAL_EPOCH_LENGTH
] = 0 # mimicking the original schedules
logger.info("Schedulers ready.")
return (
lr_schedule,
wd_schedule,
momentum_schedule,
teacher_temp_schedule,
last_layer_lr_schedule,
)
def apply_optim_scheduler(optimizer, lr, wd, last_layer_lr):
for param_group in optimizer.param_groups:
is_last_layer = param_group["is_last_layer"]
lr_multiplier = param_group["lr_multiplier"]
wd_multiplier = param_group["wd_multiplier"]
param_group["weight_decay"] = wd * wd_multiplier
param_group["lr"] = (last_layer_lr if is_last_layer else lr) * lr_multiplier
def do_test(cfg, model, iteration):
new_state_dict = model.teacher.state_dict()
if distributed.is_main_process():
iterstring = str(iteration)
eval_dir = os.path.join(cfg.train.output_dir, "eval", iterstring)
os.makedirs(eval_dir, exist_ok=True)
# save teacher checkpoint
teacher_ckp_path = os.path.join(eval_dir, "teacher_checkpoint.pth")
torch.save({"teacher": new_state_dict}, teacher_ckp_path)
def do_train(cfg, model, resume=False):
model.train()
inputs_dtype = torch.half
fp16_scaler = model.fp16_scaler # for mixed precision training
# setup optimizer
optimizer = build_optimizer(cfg, model.get_params_groups())
(
lr_schedule,
wd_schedule,
momentum_schedule,
teacher_temp_schedule,
last_layer_lr_schedule,
) = build_schedulers(cfg)
# checkpointer
checkpointer = FSDPCheckpointer(model, cfg.train.output_dir, optimizer=optimizer, save_to_disk=True)
start_iter = checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1
OFFICIAL_EPOCH_LENGTH = cfg.train.OFFICIAL_EPOCH_LENGTH
max_iter = cfg.optim.epochs * OFFICIAL_EPOCH_LENGTH
periodic_checkpointer = PeriodicCheckpointer(
checkpointer,
period=3 * OFFICIAL_EPOCH_LENGTH,
max_iter=max_iter,
max_to_keep=3,
)
# setup data preprocessing
img_size = cfg.crops.global_crops_size
patch_size = cfg.student.patch_size
n_tokens = (img_size // patch_size) ** 2
mask_generator = MaskingGenerator(
input_size=(img_size // patch_size, img_size // patch_size),
max_num_patches=0.5 * img_size // patch_size * img_size // patch_size,
)
data_transform = DataAugmentationDINO(
cfg.crops.global_crops_scale,
cfg.crops.local_crops_scale,
cfg.crops.local_crops_number,
global_crops_size=cfg.crops.global_crops_size,
local_crops_size=cfg.crops.local_crops_size,
)
collate_fn = partial(
collate_data_and_cast,
mask_ratio_tuple=cfg.ibot.mask_ratio_min_max,
mask_probability=cfg.ibot.mask_sample_probability,
n_tokens=n_tokens,
mask_generator=mask_generator,
dtype=inputs_dtype,
)
# setup data loader
dataset = make_dataset(
dataset_str=cfg.train.dataset_path,
transform=data_transform,
target_transform=lambda _: (),
)
# sampler_type = SamplerType.INFINITE
sampler_type = SamplerType.SHARDED_INFINITE
data_loader = make_data_loader(
dataset=dataset,
batch_size=cfg.train.batch_size_per_gpu,
num_workers=cfg.train.num_workers,
shuffle=True,
seed=start_iter, # TODO: Fix this -- cfg.train.seed
sampler_type=sampler_type,
sampler_advance=0, # TODO(qas): fix this -- start_iter * cfg.train.batch_size_per_gpu,
drop_last=True,
collate_fn=collate_fn,
)
# training loop
iteration = start_iter
logger.info("Starting training from iteration {}".format(start_iter))
metrics_file = os.path.join(cfg.train.output_dir, "training_metrics.json")
metric_logger = MetricLogger(delimiter=" ", output_file=metrics_file)
header = "Training"
for data in metric_logger.log_every(
data_loader,
10,
header,
max_iter,
start_iter,
):
current_batch_size = data["collated_global_crops"].shape[0] / 2
if iteration > max_iter:
return
# apply schedules
lr = lr_schedule[iteration]
wd = wd_schedule[iteration]
mom = momentum_schedule[iteration]
teacher_temp = teacher_temp_schedule[iteration]
last_layer_lr = last_layer_lr_schedule[iteration]
apply_optim_scheduler(optimizer, lr, wd, last_layer_lr)
# compute losses
optimizer.zero_grad(set_to_none=True)
loss_dict = model.forward_backward(data, teacher_temp=teacher_temp)
# clip gradients
if fp16_scaler is not None:
if cfg.optim.clip_grad:
fp16_scaler.unscale_(optimizer)
for v in model.student.values():
v.clip_grad_norm_(cfg.optim.clip_grad)
fp16_scaler.step(optimizer)
fp16_scaler.update()
else:
if cfg.optim.clip_grad:
for v in model.student.values():
v.clip_grad_norm_(cfg.optim.clip_grad)
optimizer.step()
# perform teacher EMA update
model.update_teacher(mom)
# logging
if distributed.get_global_size() > 1:
for v in loss_dict.values():
torch.distributed.all_reduce(v)
loss_dict_reduced = {k: v.item() / distributed.get_global_size() for k, v in loss_dict.items()}
if math.isnan(sum(loss_dict_reduced.values())):
logger.info("NaN detected")
raise AssertionError
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
metric_logger.update(lr=lr)
metric_logger.update(wd=wd)
metric_logger.update(mom=mom)
metric_logger.update(last_layer_lr=last_layer_lr)
metric_logger.update(current_batch_size=current_batch_size)
metric_logger.update(total_loss=losses_reduced, **loss_dict_reduced)
# checkpointing and testing
if cfg.evaluation.eval_period_iterations > 0 and (iteration + 1) % cfg.evaluation.eval_period_iterations == 0:
do_test(cfg, model, f"training_{iteration}")
torch.cuda.synchronize()
periodic_checkpointer.step(iteration)
iteration = iteration + 1
metric_logger.synchronize_between_processes()
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def main(args):
cfg = setup(args)
model = SSLMetaArch(cfg).to(torch.device("cuda"))
model.prepare_for_distributed_training()
logger.info("Model:\n{}".format(model))
if args.eval_only:
iteration = (
FSDPCheckpointer(model, save_dir=cfg.train.output_dir)
.resume_or_load(cfg.MODEL.WEIGHTS, resume=not args.no_resume)
.get("iteration", -1)
+ 1
)
return do_test(cfg, model, f"manual_{iteration}")
do_train(cfg, model, resume=not args.no_resume)
if __name__ == "__main__":
args = get_args_parser(add_help=True).parse_args()
main(args)
| EXA-1-master | exa/models/dinov2/dinov2/train/train.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
import logging
from typing import Any, Dict, Optional
import torch
from torch import Tensor
from torchmetrics import Metric, MetricCollection
from torchmetrics.classification import MulticlassAccuracy
from torchmetrics.utilities.data import dim_zero_cat, select_topk
logger = logging.getLogger("dinov2")
class MetricType(Enum):
MEAN_ACCURACY = "mean_accuracy"
MEAN_PER_CLASS_ACCURACY = "mean_per_class_accuracy"
PER_CLASS_ACCURACY = "per_class_accuracy"
IMAGENET_REAL_ACCURACY = "imagenet_real_accuracy"
@property
def accuracy_averaging(self):
return getattr(AccuracyAveraging, self.name, None)
def __str__(self):
return self.value
class AccuracyAveraging(Enum):
MEAN_ACCURACY = "micro"
MEAN_PER_CLASS_ACCURACY = "macro"
PER_CLASS_ACCURACY = "none"
def __str__(self):
return self.value
def build_metric(metric_type: MetricType, *, num_classes: int, ks: Optional[tuple] = None):
if metric_type.accuracy_averaging is not None:
return build_topk_accuracy_metric(
average_type=metric_type.accuracy_averaging,
num_classes=num_classes,
ks=(1, 5) if ks is None else ks,
)
elif metric_type == MetricType.IMAGENET_REAL_ACCURACY:
return build_topk_imagenet_real_accuracy_metric(
num_classes=num_classes,
ks=(1, 5) if ks is None else ks,
)
raise ValueError(f"Unknown metric type {metric_type}")
def build_topk_accuracy_metric(average_type: AccuracyAveraging, num_classes: int, ks: tuple = (1, 5)):
metrics: Dict[str, Metric] = {
f"top-{k}": MulticlassAccuracy(top_k=k, num_classes=int(num_classes), average=average_type.value) for k in ks
}
return MetricCollection(metrics)
def build_topk_imagenet_real_accuracy_metric(num_classes: int, ks: tuple = (1, 5)):
metrics: Dict[str, Metric] = {f"top-{k}": ImageNetReaLAccuracy(top_k=k, num_classes=int(num_classes)) for k in ks}
return MetricCollection(metrics)
class ImageNetReaLAccuracy(Metric):
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
def __init__(
self,
num_classes: int,
top_k: int = 1,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.num_classes = num_classes
self.top_k = top_k
self.add_state("tp", [], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
# preds [B, D]
# target [B, A]
# preds_oh [B, D] with 0 and 1
# select top K highest probabilities, use one hot representation
preds_oh = select_topk(preds, self.top_k)
# target_oh [B, D + 1] with 0 and 1
target_oh = torch.zeros((preds_oh.shape[0], preds_oh.shape[1] + 1), device=target.device, dtype=torch.int32)
target = target.long()
# for undefined targets (-1) use a fake value `num_classes`
target[target == -1] = self.num_classes
# fill targets, use one hot representation
target_oh.scatter_(1, target, 1)
# target_oh [B, D] (remove the fake target at index `num_classes`)
target_oh = target_oh[:, :-1]
# tp [B] with 0 and 1
tp = (preds_oh * target_oh == 1).sum(dim=1)
# at least one match between prediction and target
tp.clip_(max=1)
# ignore instances where no targets are defined
mask = target_oh.sum(dim=1) > 0
tp = tp[mask]
self.tp.append(tp) # type: ignore
def compute(self) -> Tensor:
tp = dim_zero_cat(self.tp) # type: ignore
return tp.float().mean()
| EXA-1-master | exa/models/dinov2/dinov2/eval/metrics.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from functools import partial
import json
import logging
import os
import sys
from typing import List, Optional
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer
from dinov2.data import SamplerType, make_data_loader, make_dataset
from dinov2.data.transforms import make_classification_eval_transform, make_classification_train_transform
import dinov2.distributed as distributed
from dinov2.eval.metrics import MetricType, build_metric
from dinov2.eval.setup import get_args_parser as get_setup_args_parser
from dinov2.eval.setup import setup_and_build_model
from dinov2.eval.utils import ModelWithIntermediateLayers, evaluate
from dinov2.logging import MetricLogger
logger = logging.getLogger("dinov2")
def get_args_parser(
description: Optional[str] = None,
parents: Optional[List[argparse.ArgumentParser]] = [],
add_help: bool = True,
):
setup_args_parser = get_setup_args_parser(parents=parents, add_help=False)
parents = [setup_args_parser]
parser = argparse.ArgumentParser(
description=description,
parents=parents,
add_help=add_help,
)
parser.add_argument(
"--train-dataset",
dest="train_dataset_str",
type=str,
help="Training dataset",
)
parser.add_argument(
"--val-dataset",
dest="val_dataset_str",
type=str,
help="Validation dataset",
)
parser.add_argument(
"--test-datasets",
dest="test_dataset_strs",
type=str,
nargs="+",
help="Test datasets, none to reuse the validation dataset",
)
parser.add_argument(
"--epochs",
type=int,
help="Number of training epochs",
)
parser.add_argument(
"--batch-size",
type=int,
help="Batch Size (per GPU)",
)
parser.add_argument(
"--num-workers",
type=int,
help="Number de Workers",
)
parser.add_argument(
"--epoch-length",
type=int,
help="Length of an epoch in number of iterations",
)
parser.add_argument(
"--save-checkpoint-frequency",
type=int,
help="Number of epochs between two named checkpoint saves.",
)
parser.add_argument(
"--eval-period-iterations",
type=int,
help="Number of iterations between two evaluations.",
)
parser.add_argument(
"--learning-rates",
nargs="+",
type=float,
help="Learning rates to grid search.",
)
parser.add_argument(
"--no-resume",
action="store_true",
help="Whether to not resume from existing checkpoints",
)
parser.add_argument(
"--val-metric-type",
type=MetricType,
choices=list(MetricType),
help="Validation metric",
)
parser.add_argument(
"--test-metric-types",
type=MetricType,
choices=list(MetricType),
nargs="+",
help="Evaluation metric",
)
parser.add_argument(
"--classifier-fpath",
type=str,
help="Path to a file containing pretrained linear classifiers",
)
parser.add_argument(
"--val-class-mapping-fpath",
type=str,
help="Path to a file containing a mapping to adjust classifier outputs",
)
parser.add_argument(
"--test-class-mapping-fpaths",
nargs="+",
type=str,
help="Path to a file containing a mapping to adjust classifier outputs",
)
parser.set_defaults(
train_dataset_str="ImageNet:split=TRAIN",
val_dataset_str="ImageNet:split=VAL",
test_dataset_strs=None,
epochs=10,
batch_size=128,
num_workers=8,
epoch_length=1250,
save_checkpoint_frequency=20,
eval_period_iterations=1250,
learning_rates=[1e-5, 2e-5, 5e-5, 1e-4, 2e-4, 5e-4, 1e-3, 2e-3, 5e-3, 1e-2, 2e-2, 5e-2, 0.1],
val_metric_type=MetricType.MEAN_ACCURACY,
test_metric_types=None,
classifier_fpath=None,
val_class_mapping_fpath=None,
test_class_mapping_fpaths=[None],
)
return parser
def has_ddp_wrapper(m: nn.Module) -> bool:
return isinstance(m, DistributedDataParallel)
def remove_ddp_wrapper(m: nn.Module) -> nn.Module:
return m.module if has_ddp_wrapper(m) else m
def _pad_and_collate(batch):
maxlen = max(len(targets) for image, targets in batch)
padded_batch = [
(image, np.pad(targets, (0, maxlen - len(targets)), constant_values=-1)) for image, targets in batch
]
return torch.utils.data.default_collate(padded_batch)
def create_linear_input(x_tokens_list, use_n_blocks, use_avgpool):
intermediate_output = x_tokens_list[-use_n_blocks:]
output = torch.cat([class_token for _, class_token in intermediate_output], dim=-1)
if use_avgpool:
output = torch.cat(
(
output,
torch.mean(intermediate_output[-1][0], dim=1), # patch tokens
),
dim=-1,
)
output = output.reshape(output.shape[0], -1)
return output.float()
class LinearClassifier(nn.Module):
"""Linear layer to train on top of frozen features"""
def __init__(self, out_dim, use_n_blocks, use_avgpool, num_classes=1000):
super().__init__()
self.out_dim = out_dim
self.use_n_blocks = use_n_blocks
self.use_avgpool = use_avgpool
self.num_classes = num_classes
self.linear = nn.Linear(out_dim, num_classes)
self.linear.weight.data.normal_(mean=0.0, std=0.01)
self.linear.bias.data.zero_()
def forward(self, x_tokens_list):
output = create_linear_input(x_tokens_list, self.use_n_blocks, self.use_avgpool)
return self.linear(output)
class AllClassifiers(nn.Module):
def __init__(self, classifiers_dict):
super().__init__()
self.classifiers_dict = nn.ModuleDict()
self.classifiers_dict.update(classifiers_dict)
def forward(self, inputs):
return {k: v.forward(inputs) for k, v in self.classifiers_dict.items()}
def __len__(self):
return len(self.classifiers_dict)
class LinearPostprocessor(nn.Module):
def __init__(self, linear_classifier, class_mapping=None):
super().__init__()
self.linear_classifier = linear_classifier
self.register_buffer("class_mapping", None if class_mapping is None else torch.LongTensor(class_mapping))
def forward(self, samples, targets):
preds = self.linear_classifier(samples)
return {
"preds": preds[:, self.class_mapping] if self.class_mapping is not None else preds,
"target": targets,
}
def scale_lr(learning_rates, batch_size):
return learning_rates * (batch_size * distributed.get_global_size()) / 256.0
def setup_linear_classifiers(sample_output, n_last_blocks_list, learning_rates, batch_size, num_classes=1000):
linear_classifiers_dict = nn.ModuleDict()
optim_param_groups = []
for n in n_last_blocks_list:
for avgpool in [False, True]:
for _lr in learning_rates:
lr = scale_lr(_lr, batch_size)
out_dim = create_linear_input(sample_output, use_n_blocks=n, use_avgpool=avgpool).shape[1]
linear_classifier = LinearClassifier(
out_dim, use_n_blocks=n, use_avgpool=avgpool, num_classes=num_classes
)
linear_classifier = linear_classifier.cuda()
linear_classifiers_dict[
f"classifier_{n}_blocks_avgpool_{avgpool}_lr_{lr:.5f}".replace(".", "_")
] = linear_classifier
optim_param_groups.append({"params": linear_classifier.parameters(), "lr": lr})
linear_classifiers = AllClassifiers(linear_classifiers_dict)
if distributed.is_enabled():
linear_classifiers = nn.parallel.DistributedDataParallel(linear_classifiers)
return linear_classifiers, optim_param_groups
@torch.no_grad()
def evaluate_linear_classifiers(
feature_model,
linear_classifiers,
data_loader,
metric_type,
metrics_file_path,
training_num_classes,
iteration,
prefixstring="",
class_mapping=None,
best_classifier_on_val=None,
):
logger.info("running validation !")
num_classes = len(class_mapping) if class_mapping is not None else training_num_classes
metric = build_metric(metric_type, num_classes=num_classes)
postprocessors = {k: LinearPostprocessor(v, class_mapping) for k, v in linear_classifiers.classifiers_dict.items()}
metrics = {k: metric.clone() for k in linear_classifiers.classifiers_dict}
_, results_dict_temp = evaluate(
feature_model,
data_loader,
postprocessors,
metrics,
torch.cuda.current_device(),
)
logger.info("")
results_dict = {}
max_accuracy = 0
best_classifier = ""
for i, (classifier_string, metric) in enumerate(results_dict_temp.items()):
logger.info(f"{prefixstring} -- Classifier: {classifier_string} * {metric}")
if (
best_classifier_on_val is None and metric["top-1"].item() > max_accuracy
) or classifier_string == best_classifier_on_val:
max_accuracy = metric["top-1"].item()
best_classifier = classifier_string
results_dict["best_classifier"] = {"name": best_classifier, "accuracy": max_accuracy}
logger.info(f"best classifier: {results_dict['best_classifier']}")
if distributed.is_main_process():
with open(metrics_file_path, "a") as f:
f.write(f"iter: {iteration}\n")
for k, v in results_dict.items():
f.write(json.dumps({k: v}) + "\n")
f.write("\n")
return results_dict
def eval_linear(
*,
feature_model,
linear_classifiers,
train_data_loader,
val_data_loader,
metrics_file_path,
optimizer,
scheduler,
output_dir,
max_iter,
checkpoint_period, # In number of iter, creates a new file every period
running_checkpoint_period, # Period to update main checkpoint file
eval_period,
metric_type,
training_num_classes,
resume=True,
classifier_fpath=None,
val_class_mapping=None,
):
checkpointer = Checkpointer(linear_classifiers, output_dir, optimizer=optimizer, scheduler=scheduler)
start_iter = checkpointer.resume_or_load(classifier_fpath or "", resume=resume).get("iteration", -1) + 1
periodic_checkpointer = PeriodicCheckpointer(checkpointer, checkpoint_period, max_iter=max_iter)
iteration = start_iter
logger.info("Starting training from iteration {}".format(start_iter))
metric_logger = MetricLogger(delimiter=" ")
header = "Training"
for data, labels in metric_logger.log_every(
train_data_loader,
10,
header,
max_iter,
start_iter,
):
data = data.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
features = feature_model(data)
outputs = linear_classifiers(features)
losses = {f"loss_{k}": nn.CrossEntropyLoss()(v, labels) for k, v in outputs.items()}
loss = sum(losses.values())
# compute the gradients
optimizer.zero_grad()
loss.backward()
# step
optimizer.step()
scheduler.step()
# log
if iteration % 10 == 0:
torch.cuda.synchronize()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
print("lr", optimizer.param_groups[0]["lr"])
if iteration - start_iter > 5:
if iteration % running_checkpoint_period == 0:
torch.cuda.synchronize()
if distributed.is_main_process():
logger.info("Checkpointing running_checkpoint")
periodic_checkpointer.save("running_checkpoint_linear_eval", iteration=iteration)
torch.cuda.synchronize()
periodic_checkpointer.step(iteration)
if eval_period > 0 and (iteration + 1) % eval_period == 0 and iteration != max_iter - 1:
_ = evaluate_linear_classifiers(
feature_model=feature_model,
linear_classifiers=remove_ddp_wrapper(linear_classifiers),
data_loader=val_data_loader,
metrics_file_path=metrics_file_path,
prefixstring=f"ITER: {iteration}",
metric_type=metric_type,
training_num_classes=training_num_classes,
iteration=iteration,
class_mapping=val_class_mapping,
)
torch.cuda.synchronize()
iteration = iteration + 1
val_results_dict = evaluate_linear_classifiers(
feature_model=feature_model,
linear_classifiers=remove_ddp_wrapper(linear_classifiers),
data_loader=val_data_loader,
metrics_file_path=metrics_file_path,
metric_type=metric_type,
training_num_classes=training_num_classes,
iteration=iteration,
class_mapping=val_class_mapping,
)
return val_results_dict, feature_model, linear_classifiers, iteration
def make_eval_data_loader(test_dataset_str, batch_size, num_workers, metric_type):
test_dataset = make_dataset(
dataset_str=test_dataset_str,
transform=make_classification_eval_transform(),
)
test_data_loader = make_data_loader(
dataset=test_dataset,
batch_size=batch_size,
num_workers=num_workers,
sampler_type=SamplerType.DISTRIBUTED,
drop_last=False,
shuffle=False,
persistent_workers=False,
collate_fn=_pad_and_collate if metric_type == MetricType.IMAGENET_REAL_ACCURACY else None,
)
return test_data_loader
def test_on_datasets(
feature_model,
linear_classifiers,
test_dataset_strs,
batch_size,
num_workers,
test_metric_types,
metrics_file_path,
training_num_classes,
iteration,
best_classifier_on_val,
prefixstring="",
test_class_mappings=[None],
):
results_dict = {}
for test_dataset_str, class_mapping, metric_type in zip(test_dataset_strs, test_class_mappings, test_metric_types):
logger.info(f"Testing on {test_dataset_str}")
test_data_loader = make_eval_data_loader(test_dataset_str, batch_size, num_workers, metric_type)
dataset_results_dict = evaluate_linear_classifiers(
feature_model,
remove_ddp_wrapper(linear_classifiers),
test_data_loader,
metric_type,
metrics_file_path,
training_num_classes,
iteration,
prefixstring="",
class_mapping=class_mapping,
best_classifier_on_val=best_classifier_on_val,
)
results_dict[f"{test_dataset_str}_accuracy"] = 100.0 * dataset_results_dict["best_classifier"]["accuracy"]
return results_dict
def run_eval_linear(
model,
output_dir,
train_dataset_str,
val_dataset_str,
batch_size,
epochs,
epoch_length,
num_workers,
save_checkpoint_frequency,
eval_period_iterations,
learning_rates,
autocast_dtype,
test_dataset_strs=None,
resume=True,
classifier_fpath=None,
val_class_mapping_fpath=None,
test_class_mapping_fpaths=[None],
val_metric_type=MetricType.MEAN_ACCURACY,
test_metric_types=None,
):
seed = 0
if test_dataset_strs is None:
test_dataset_strs = [val_dataset_str]
if test_metric_types is None:
test_metric_types = [val_metric_type] * len(test_dataset_strs)
else:
assert len(test_metric_types) == len(test_dataset_strs)
assert len(test_dataset_strs) == len(test_class_mapping_fpaths)
train_transform = make_classification_train_transform()
train_dataset = make_dataset(
dataset_str=train_dataset_str,
transform=train_transform,
)
training_num_classes = len(torch.unique(torch.Tensor(train_dataset.get_targets().astype(int))))
sampler_type = SamplerType.SHARDED_INFINITE
# sampler_type = SamplerType.INFINITE
n_last_blocks_list = [1, 4]
n_last_blocks = max(n_last_blocks_list)
autocast_ctx = partial(torch.cuda.amp.autocast, enabled=True, dtype=autocast_dtype)
feature_model = ModelWithIntermediateLayers(model, n_last_blocks, autocast_ctx)
sample_output = feature_model(train_dataset[0][0].unsqueeze(0).cuda())
linear_classifiers, optim_param_groups = setup_linear_classifiers(
sample_output,
n_last_blocks_list,
learning_rates,
batch_size,
training_num_classes,
)
optimizer = torch.optim.SGD(optim_param_groups, momentum=0.9, weight_decay=0)
max_iter = epochs * epoch_length
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, max_iter, eta_min=0)
checkpointer = Checkpointer(linear_classifiers, output_dir, optimizer=optimizer, scheduler=scheduler)
start_iter = checkpointer.resume_or_load(classifier_fpath or "", resume=resume).get("iteration", -1) + 1
train_data_loader = make_data_loader(
dataset=train_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=True,
seed=seed,
sampler_type=sampler_type,
sampler_advance=start_iter,
drop_last=True,
persistent_workers=True,
)
val_data_loader = make_eval_data_loader(val_dataset_str, batch_size, num_workers, val_metric_type)
checkpoint_period = save_checkpoint_frequency * epoch_length
if val_class_mapping_fpath is not None:
logger.info(f"Using class mapping from {val_class_mapping_fpath}")
val_class_mapping = np.load(val_class_mapping_fpath)
else:
val_class_mapping = None
test_class_mappings = []
for class_mapping_fpath in test_class_mapping_fpaths:
if class_mapping_fpath is not None and class_mapping_fpath != "None":
logger.info(f"Using class mapping from {class_mapping_fpath}")
class_mapping = np.load(class_mapping_fpath)
else:
class_mapping = None
test_class_mappings.append(class_mapping)
metrics_file_path = os.path.join(output_dir, "results_eval_linear.json")
val_results_dict, feature_model, linear_classifiers, iteration = eval_linear(
feature_model=feature_model,
linear_classifiers=linear_classifiers,
train_data_loader=train_data_loader,
val_data_loader=val_data_loader,
metrics_file_path=metrics_file_path,
optimizer=optimizer,
scheduler=scheduler,
output_dir=output_dir,
max_iter=max_iter,
checkpoint_period=checkpoint_period,
running_checkpoint_period=epoch_length,
eval_period=eval_period_iterations,
metric_type=val_metric_type,
training_num_classes=training_num_classes,
resume=resume,
val_class_mapping=val_class_mapping,
classifier_fpath=classifier_fpath,
)
results_dict = {}
if len(test_dataset_strs) > 1 or test_dataset_strs[0] != val_dataset_str:
results_dict = test_on_datasets(
feature_model,
linear_classifiers,
test_dataset_strs,
batch_size,
0, # num_workers,
test_metric_types,
metrics_file_path,
training_num_classes,
iteration,
val_results_dict["best_classifier"]["name"],
prefixstring="",
test_class_mappings=test_class_mappings,
)
results_dict["best_classifier"] = val_results_dict["best_classifier"]["name"]
results_dict[f"{val_dataset_str}_accuracy"] = 100.0 * val_results_dict["best_classifier"]["accuracy"]
logger.info("Test Results Dict " + str(results_dict))
return results_dict
def main(args):
model, autocast_dtype = setup_and_build_model(args)
run_eval_linear(
model=model,
output_dir=args.output_dir,
train_dataset_str=args.train_dataset_str,
val_dataset_str=args.val_dataset_str,
test_dataset_strs=args.test_dataset_strs,
batch_size=args.batch_size,
epochs=args.epochs,
epoch_length=args.epoch_length,
num_workers=args.num_workers,
save_checkpoint_frequency=args.save_checkpoint_frequency,
eval_period_iterations=args.eval_period_iterations,
learning_rates=args.learning_rates,
autocast_dtype=autocast_dtype,
resume=not args.no_resume,
classifier_fpath=args.classifier_fpath,
val_metric_type=args.val_metric_type,
test_metric_types=args.test_metric_types,
val_class_mapping_fpath=args.val_class_mapping_fpath,
test_class_mapping_fpaths=args.test_class_mapping_fpaths,
)
return 0
if __name__ == "__main__":
description = "DINOv2 linear evaluation"
args_parser = get_args_parser(description=description)
args = args_parser.parse_args()
sys.exit(main(args))
| EXA-1-master | exa/models/dinov2/dinov2/eval/linear.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import gc
import logging
import sys
import time
from typing import List, Optional
from cuml.linear_model import LogisticRegression
import torch
import torch.backends.cudnn as cudnn
import torch.distributed
from torch import nn
from torch.utils.data import TensorDataset
from torchmetrics import MetricTracker
from dinov2.data import make_dataset
from dinov2.data.transforms import make_classification_eval_transform
from dinov2.distributed import get_global_rank, get_global_size
from dinov2.eval.metrics import MetricType, build_metric
from dinov2.eval.setup import get_args_parser as get_setup_args_parser
from dinov2.eval.setup import setup_and_build_model
from dinov2.eval.utils import evaluate, extract_features
from dinov2.utils.dtype import as_torch_dtype
logger = logging.getLogger("dinov2")
DEFAULT_MAX_ITER = 1_000
C_POWER_RANGE = torch.linspace(-6, 5, 45)
_CPU_DEVICE = torch.device("cpu")
def get_args_parser(
description: Optional[str] = None,
parents: Optional[List[argparse.ArgumentParser]] = [],
add_help: bool = True,
):
setup_args_parser = get_setup_args_parser(parents=parents, add_help=False)
parents = [setup_args_parser]
parser = argparse.ArgumentParser(
description=description,
parents=parents,
add_help=add_help,
)
parser.add_argument(
"--train-dataset",
dest="train_dataset_str",
type=str,
help="Training dataset",
)
parser.add_argument(
"--val-dataset",
dest="val_dataset_str",
type=str,
help="Validation dataset",
)
parser.add_argument(
"--finetune-dataset-str",
dest="finetune_dataset_str",
type=str,
help="Fine-tuning dataset",
)
parser.add_argument(
"--finetune-on-val",
action="store_true",
help="If there is no finetune dataset, whether to choose the "
"hyperparameters on the val set instead of 10%% of the train dataset",
)
parser.add_argument(
"--metric-type",
type=MetricType,
choices=list(MetricType),
help="Metric type",
)
parser.add_argument(
"--train-features-device",
type=str,
help="Device to gather train features (cpu, cuda, cuda:0, etc.), default: %(default)s",
)
parser.add_argument(
"--train-dtype",
type=str,
help="Data type to convert the train features to (default: %(default)s)",
)
parser.add_argument(
"--max-train-iters",
type=int,
help="Maximum number of train iterations (default: %(default)s)",
)
parser.set_defaults(
train_dataset_str="ImageNet:split=TRAIN",
val_dataset_str="ImageNet:split=VAL",
finetune_dataset_str=None,
metric_type=MetricType.MEAN_ACCURACY,
train_features_device="cpu",
train_dtype="float64",
max_train_iters=DEFAULT_MAX_ITER,
finetune_on_val=False,
)
return parser
class LogRegModule(nn.Module):
def __init__(
self,
C,
max_iter=DEFAULT_MAX_ITER,
dtype=torch.float64,
device=_CPU_DEVICE,
):
super().__init__()
self.dtype = dtype
self.device = device
self.estimator = LogisticRegression(
penalty="l2",
C=C,
max_iter=max_iter,
output_type="numpy",
tol=1e-12,
linesearch_max_iter=50,
)
def forward(self, samples, targets):
samples_device = samples.device
samples = samples.to(dtype=self.dtype, device=self.device)
if self.device == _CPU_DEVICE:
samples = samples.numpy()
probas = self.estimator.predict_proba(samples)
return {"preds": torch.from_numpy(probas).to(samples_device), "target": targets}
def fit(self, train_features, train_labels):
train_features = train_features.to(dtype=self.dtype, device=self.device)
train_labels = train_labels.to(dtype=self.dtype, device=self.device)
if self.device == _CPU_DEVICE:
# both cuML and sklearn only work with numpy arrays on CPU
train_features = train_features.numpy()
train_labels = train_labels.numpy()
self.estimator.fit(train_features, train_labels)
def evaluate_model(*, logreg_model, logreg_metric, test_data_loader, device):
postprocessors = {"metrics": logreg_model}
metrics = {"metrics": logreg_metric}
return evaluate(nn.Identity(), test_data_loader, postprocessors, metrics, device)
def train_for_C(*, C, max_iter, train_features, train_labels, dtype=torch.float64, device=_CPU_DEVICE):
logreg_model = LogRegModule(C, max_iter=max_iter, dtype=dtype, device=device)
logreg_model.fit(train_features, train_labels)
return logreg_model
def train_and_evaluate(
*,
C,
max_iter,
train_features,
train_labels,
logreg_metric,
test_data_loader,
train_dtype=torch.float64,
train_features_device,
eval_device,
):
logreg_model = train_for_C(
C=C,
max_iter=max_iter,
train_features=train_features,
train_labels=train_labels,
dtype=train_dtype,
device=train_features_device,
)
return evaluate_model(
logreg_model=logreg_model,
logreg_metric=logreg_metric,
test_data_loader=test_data_loader,
device=eval_device,
)
def sweep_C_values(
*,
train_features,
train_labels,
test_data_loader,
metric_type,
num_classes,
train_dtype=torch.float64,
train_features_device=_CPU_DEVICE,
max_train_iters=DEFAULT_MAX_ITER,
):
if metric_type == MetricType.PER_CLASS_ACCURACY:
# If we want to output per-class accuracy, we select the hyperparameters with mean per class
metric_type = MetricType.MEAN_PER_CLASS_ACCURACY
logreg_metric = build_metric(metric_type, num_classes=num_classes)
metric_tracker = MetricTracker(logreg_metric, maximize=True)
ALL_C = 10**C_POWER_RANGE
logreg_models = {}
train_features = train_features.to(dtype=train_dtype, device=train_features_device)
train_labels = train_labels.to(device=train_features_device)
for i in range(get_global_rank(), len(ALL_C), get_global_size()):
C = ALL_C[i].item()
logger.info(
f"Training for C = {C:.5f}, dtype={train_dtype}, "
f"features: {train_features.shape}, {train_features.dtype}, "
f"labels: {train_labels.shape}, {train_labels.dtype}"
)
logreg_models[C] = train_for_C(
C=C,
max_iter=max_train_iters,
train_features=train_features,
train_labels=train_labels,
dtype=train_dtype,
device=train_features_device,
)
gather_list = [None for _ in range(get_global_size())]
torch.distributed.all_gather_object(gather_list, logreg_models)
logreg_models_gathered = {}
for logreg_dict in gather_list:
logreg_models_gathered.update(logreg_dict)
for i in range(len(ALL_C)):
metric_tracker.increment()
C = ALL_C[i].item()
evals = evaluate_model(
logreg_model=logreg_models_gathered[C],
logreg_metric=metric_tracker,
test_data_loader=test_data_loader,
device=torch.cuda.current_device(),
)
logger.info(f"Trained for C = {C:.5f}, accuracies = {evals}")
best_stats, which_epoch = metric_tracker.best_metric(return_step=True)
best_stats_100 = {k: 100.0 * v for k, v in best_stats.items()}
if which_epoch["top-1"] == i:
best_C = C
logger.info(f"Sweep best {best_stats_100}, best C = {best_C:.6f}")
return best_stats, best_C
def eval_log_regression(
*,
model,
train_dataset,
val_dataset,
finetune_dataset,
metric_type,
batch_size,
num_workers,
finetune_on_val=False,
train_dtype=torch.float64,
train_features_device=_CPU_DEVICE,
max_train_iters=DEFAULT_MAX_ITER,
):
"""
Implements the "standard" process for log regression evaluation:
The value of C is chosen by training on train_dataset and evaluating on
finetune_dataset. Then, the final model is trained on a concatenation of
train_dataset and finetune_dataset, and is evaluated on val_dataset.
If there is no finetune_dataset, the value of C is the one that yields
the best results on a random 10% subset of the train dataset
"""
start = time.time()
train_features, train_labels = extract_features(
model, train_dataset, batch_size, num_workers, gather_on_cpu=(train_features_device == _CPU_DEVICE)
)
val_features, val_labels = extract_features(
model, val_dataset, batch_size, num_workers, gather_on_cpu=(train_features_device == _CPU_DEVICE)
)
val_data_loader = torch.utils.data.DataLoader(
TensorDataset(val_features, val_labels),
batch_size=batch_size,
drop_last=False,
num_workers=0,
persistent_workers=False,
)
if finetune_dataset is None and finetune_on_val:
logger.info("Choosing hyperparameters on the val dataset")
finetune_features, finetune_labels = val_features, val_labels
elif finetune_dataset is None and not finetune_on_val:
logger.info("Choosing hyperparameters on 10% of the train dataset")
torch.manual_seed(0)
indices = torch.randperm(len(train_features), device=train_features.device)
finetune_index = indices[: len(train_features) // 10]
train_index = indices[len(train_features) // 10 :]
finetune_features, finetune_labels = train_features[finetune_index], train_labels[finetune_index]
train_features, train_labels = train_features[train_index], train_labels[train_index]
else:
logger.info("Choosing hyperparameters on the finetune dataset")
finetune_features, finetune_labels = extract_features(
model, finetune_dataset, batch_size, num_workers, gather_on_cpu=(train_features_device == _CPU_DEVICE)
)
# release the model - free GPU memory
del model
gc.collect()
torch.cuda.empty_cache()
finetune_data_loader = torch.utils.data.DataLoader(
TensorDataset(finetune_features, finetune_labels),
batch_size=batch_size,
drop_last=False,
)
if len(train_labels.shape) > 1:
num_classes = train_labels.shape[1]
else:
num_classes = train_labels.max() + 1
logger.info("Using cuML for logistic regression")
best_stats, best_C = sweep_C_values(
train_features=train_features,
train_labels=train_labels,
test_data_loader=finetune_data_loader,
metric_type=metric_type,
num_classes=num_classes,
train_dtype=train_dtype,
train_features_device=train_features_device,
max_train_iters=max_train_iters,
)
if not finetune_on_val:
logger.info("Best parameter found, concatenating features")
train_features = torch.cat((train_features, finetune_features))
train_labels = torch.cat((train_labels, finetune_labels))
logger.info("Training final model")
logreg_metric = build_metric(metric_type, num_classes=num_classes)
evals = train_and_evaluate(
C=best_C,
max_iter=max_train_iters,
train_features=train_features,
train_labels=train_labels,
logreg_metric=logreg_metric.clone(),
test_data_loader=val_data_loader,
eval_device=torch.cuda.current_device(),
train_dtype=train_dtype,
train_features_device=train_features_device,
)
best_stats = evals[1]["metrics"]
best_stats["best_C"] = best_C
logger.info(f"Log regression evaluation done in {int(time.time() - start)}s")
return best_stats
def eval_log_regression_with_model(
model,
train_dataset_str="ImageNet:split=TRAIN",
val_dataset_str="ImageNet:split=VAL",
finetune_dataset_str=None,
autocast_dtype=torch.float,
finetune_on_val=False,
metric_type=MetricType.MEAN_ACCURACY,
train_dtype=torch.float64,
train_features_device=_CPU_DEVICE,
max_train_iters=DEFAULT_MAX_ITER,
):
cudnn.benchmark = True
transform = make_classification_eval_transform(resize_size=224)
target_transform = None
train_dataset = make_dataset(dataset_str=train_dataset_str, transform=transform, target_transform=target_transform)
val_dataset = make_dataset(dataset_str=val_dataset_str, transform=transform, target_transform=target_transform)
if finetune_dataset_str is not None:
finetune_dataset = make_dataset(
dataset_str=finetune_dataset_str, transform=transform, target_transform=target_transform
)
else:
finetune_dataset = None
with torch.cuda.amp.autocast(dtype=autocast_dtype):
results_dict_logreg = eval_log_regression(
model=model,
train_dataset=train_dataset,
val_dataset=val_dataset,
finetune_dataset=finetune_dataset,
metric_type=metric_type,
batch_size=256,
num_workers=0, # 5,
finetune_on_val=finetune_on_val,
train_dtype=train_dtype,
train_features_device=train_features_device,
max_train_iters=max_train_iters,
)
results_dict = {
"top-1": results_dict_logreg["top-1"].cpu().numpy() * 100.0,
"top-5": results_dict_logreg.get("top-5", torch.tensor(0.0)).cpu().numpy() * 100.0,
"best_C": results_dict_logreg["best_C"],
}
logger.info(
"\n".join(
[
"Training of the supervised logistic regression on frozen features completed.\n"
"Top-1 test accuracy: {acc:.1f}".format(acc=results_dict["top-1"]),
"Top-5 test accuracy: {acc:.1f}".format(acc=results_dict["top-5"]),
"obtained for C = {c:.6f}".format(c=results_dict["best_C"]),
]
)
)
torch.distributed.barrier()
return results_dict
def main(args):
model, autocast_dtype = setup_and_build_model(args)
eval_log_regression_with_model(
model=model,
train_dataset_str=args.train_dataset_str,
val_dataset_str=args.val_dataset_str,
finetune_dataset_str=args.finetune_dataset_str,
autocast_dtype=autocast_dtype,
finetune_on_val=args.finetune_on_val,
metric_type=args.metric_type,
train_dtype=as_torch_dtype(args.train_dtype),
train_features_device=torch.device(args.train_features_device),
max_train_iters=args.max_train_iters,
)
return 0
if __name__ == "__main__":
description = "DINOv2 logistic regression evaluation"
args_parser = get_args_parser(description=description)
args = args_parser.parse_args()
sys.exit(main(args))
| EXA-1-master | exa/models/dinov2/dinov2/eval/log_regression.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/models/dinov2/dinov2/eval/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from typing import Any, List, Optional, Tuple
import torch
import torch.backends.cudnn as cudnn
from dinov2.models import build_model_from_cfg
from dinov2.utils.config import setup
import dinov2.utils.utils as dinov2_utils
def get_args_parser(
description: Optional[str] = None,
parents: Optional[List[argparse.ArgumentParser]] = [],
add_help: bool = True,
):
parser = argparse.ArgumentParser(
description=description,
parents=parents,
add_help=add_help,
)
parser.add_argument(
"--config-file",
type=str,
help="Model configuration file",
)
parser.add_argument(
"--pretrained-weights",
type=str,
help="Pretrained model weights",
)
parser.add_argument(
"--output-dir",
default="",
type=str,
help="Output directory to write results and logs",
)
parser.add_argument(
"--opts",
help="Extra configuration options",
default=[],
nargs="+",
)
return parser
def get_autocast_dtype(config):
teacher_dtype_str = config.compute_precision.teacher.backbone.mixed_precision.param_dtype
if teacher_dtype_str == "fp16":
return torch.half
elif teacher_dtype_str == "bf16":
return torch.bfloat16
else:
return torch.float
def build_model_for_eval(config, pretrained_weights):
model, _ = build_model_from_cfg(config, only_teacher=True)
dinov2_utils.load_pretrained_weights(model, pretrained_weights, "teacher")
model.eval()
model.cuda()
return model
def setup_and_build_model(args) -> Tuple[Any, torch.dtype]:
cudnn.benchmark = True
config = setup(args)
model = build_model_for_eval(config, args.pretrained_weights)
autocast_dtype = get_autocast_dtype(config)
return model, autocast_dtype
| EXA-1-master | exa/models/dinov2/dinov2/eval/setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, Optional
import torch
from torch import nn
from torchmetrics import MetricCollection
from dinov2.data import DatasetWithEnumeratedTargets, SamplerType, make_data_loader
import dinov2.distributed as distributed
from dinov2.logging import MetricLogger
logger = logging.getLogger("dinov2")
class ModelWithNormalize(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, samples):
return nn.functional.normalize(self.model(samples), dim=1, p=2)
class ModelWithIntermediateLayers(nn.Module):
def __init__(self, feature_model, n_last_blocks, autocast_ctx):
super().__init__()
self.feature_model = feature_model
self.feature_model.eval()
self.n_last_blocks = n_last_blocks
self.autocast_ctx = autocast_ctx
def forward(self, images):
with torch.inference_mode():
with self.autocast_ctx():
features = self.feature_model.get_intermediate_layers(
images, self.n_last_blocks, return_class_token=True
)
return features
@torch.inference_mode()
def evaluate(
model: nn.Module,
data_loader,
postprocessors: Dict[str, nn.Module],
metrics: Dict[str, MetricCollection],
device: torch.device,
criterion: Optional[nn.Module] = None,
):
model.eval()
if criterion is not None:
criterion.eval()
for metric in metrics.values():
metric = metric.to(device)
metric_logger = MetricLogger(delimiter=" ")
header = "Test:"
for samples, targets, *_ in metric_logger.log_every(data_loader, 10, header):
outputs = model(samples.to(device))
targets = targets.to(device)
if criterion is not None:
loss = criterion(outputs, targets)
metric_logger.update(loss=loss.item())
for k, metric in metrics.items():
metric_inputs = postprocessors[k](outputs, targets)
metric.update(**metric_inputs)
metric_logger.synchronize_between_processes()
logger.info(f"Averaged stats: {metric_logger}")
stats = {k: metric.compute() for k, metric in metrics.items()}
metric_logger_stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
return metric_logger_stats, stats
def all_gather_and_flatten(tensor_rank):
tensor_all_ranks = torch.empty(
distributed.get_global_size(),
*tensor_rank.shape,
dtype=tensor_rank.dtype,
device=tensor_rank.device,
)
tensor_list = list(tensor_all_ranks.unbind(0))
torch.distributed.all_gather(tensor_list, tensor_rank.contiguous())
return tensor_all_ranks.flatten(end_dim=1)
def extract_features(model, dataset, batch_size, num_workers, gather_on_cpu=False):
dataset_with_enumerated_targets = DatasetWithEnumeratedTargets(dataset)
sample_count = len(dataset_with_enumerated_targets)
data_loader = make_data_loader(
dataset=dataset_with_enumerated_targets,
batch_size=batch_size,
num_workers=num_workers,
sampler_type=SamplerType.DISTRIBUTED,
drop_last=False,
shuffle=False,
)
return extract_features_with_dataloader(model, data_loader, sample_count, gather_on_cpu)
@torch.inference_mode()
def extract_features_with_dataloader(model, data_loader, sample_count, gather_on_cpu=False):
gather_device = torch.device("cpu") if gather_on_cpu else torch.device("cuda")
metric_logger = MetricLogger(delimiter=" ")
features, all_labels = None, None
for samples, (index, labels_rank) in metric_logger.log_every(data_loader, 10):
samples = samples.cuda(non_blocking=True)
labels_rank = labels_rank.cuda(non_blocking=True)
index = index.cuda(non_blocking=True)
features_rank = model(samples).float()
# init storage feature matrix
if features is None:
features = torch.zeros(sample_count, features_rank.shape[-1], device=gather_device)
labels_shape = list(labels_rank.shape)
labels_shape[0] = sample_count
all_labels = torch.full(labels_shape, fill_value=-1, device=gather_device)
logger.info(f"Storing features into tensor of shape {features.shape}")
# share indexes, features and labels between processes
index_all = all_gather_and_flatten(index).to(gather_device)
features_all_ranks = all_gather_and_flatten(features_rank).to(gather_device)
labels_all_ranks = all_gather_and_flatten(labels_rank).to(gather_device)
# update storage feature matrix
if len(index_all) > 0:
features.index_copy_(0, index_all, features_all_ranks)
all_labels.index_copy_(0, index_all, labels_all_ranks)
logger.info(f"Features shape: {tuple(features.shape)}")
logger.info(f"Labels shape: {tuple(all_labels.shape)}")
assert torch.all(all_labels > -1)
return features, all_labels
| EXA-1-master | exa/models/dinov2/dinov2/eval/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from functools import partial
import json
import logging
import os
import sys
from typing import List, Optional
import torch
from torch.nn.functional import one_hot, softmax
import dinov2.distributed as distributed
from dinov2.data import SamplerType, make_data_loader, make_dataset
from dinov2.data.transforms import make_classification_eval_transform
from dinov2.eval.metrics import AccuracyAveraging, build_topk_accuracy_metric
from dinov2.eval.setup import get_args_parser as get_setup_args_parser
from dinov2.eval.setup import setup_and_build_model
from dinov2.eval.utils import ModelWithNormalize, evaluate, extract_features
logger = logging.getLogger("dinov2")
def get_args_parser(
description: Optional[str] = None,
parents: Optional[List[argparse.ArgumentParser]] = [],
add_help: bool = True,
):
setup_args_parser = get_setup_args_parser(parents=parents, add_help=False)
parents = [setup_args_parser]
parser = argparse.ArgumentParser(
description=description,
parents=parents,
add_help=add_help,
)
parser.add_argument(
"--train-dataset",
dest="train_dataset_str",
type=str,
help="Training dataset",
)
parser.add_argument(
"--val-dataset",
dest="val_dataset_str",
type=str,
help="Validation dataset",
)
parser.add_argument(
"--nb_knn",
nargs="+",
type=int,
help="Number of NN to use. 20 is usually working the best.",
)
parser.add_argument(
"--temperature",
type=float,
help="Temperature used in the voting coefficient",
)
parser.add_argument(
"--gather-on-cpu",
action="store_true",
help="Whether to gather the train features on cpu, slower"
"but useful to avoid OOM for large datasets (e.g. ImageNet22k).",
)
parser.add_argument(
"--batch-size",
type=int,
help="Batch size.",
)
parser.add_argument(
"--n-per-class-list",
nargs="+",
type=int,
help="Number to take per class",
)
parser.add_argument(
"--n-tries",
type=int,
help="Number of tries",
)
parser.set_defaults(
train_dataset_str="ImageNet:split=TRAIN",
val_dataset_str="ImageNet:split=VAL",
nb_knn=[10, 20, 100, 200],
temperature=0.07,
batch_size=256,
n_per_class_list=[-1],
n_tries=1,
)
return parser
class KnnModule(torch.nn.Module):
"""
Gets knn of test features from all processes on a chunk of the train features
Each rank gets a chunk of the train features as well as a chunk of the test features.
In `compute_neighbors`, for each rank one after the other, its chunk of test features
is sent to all devices, partial knns are computed with each chunk of train features
then collated back on the original device.
"""
def __init__(self, train_features, train_labels, nb_knn, T, device, num_classes=1000):
super().__init__()
self.global_rank = distributed.get_global_rank()
self.global_size = distributed.get_global_size()
self.device = device
self.train_features_rank_T = train_features.chunk(self.global_size)[self.global_rank].T.to(self.device)
self.candidates = train_labels.chunk(self.global_size)[self.global_rank].view(1, -1).to(self.device)
self.nb_knn = nb_knn
self.max_k = max(self.nb_knn)
self.T = T
self.num_classes = num_classes
def _get_knn_sims_and_labels(self, similarity, train_labels):
topk_sims, indices = similarity.topk(self.max_k, largest=True, sorted=True)
neighbors_labels = torch.gather(train_labels, 1, indices)
return topk_sims, neighbors_labels
def _similarity_for_rank(self, features_rank, source_rank):
# Send the features from `source_rank` to all ranks
broadcast_shape = torch.tensor(features_rank.shape).to(self.device)
torch.distributed.broadcast(broadcast_shape, source_rank)
broadcasted = features_rank
if self.global_rank != source_rank:
broadcasted = torch.zeros(*broadcast_shape, dtype=features_rank.dtype, device=self.device)
torch.distributed.broadcast(broadcasted, source_rank)
# Compute the neighbors for `source_rank` among `train_features_rank_T`
similarity_rank = torch.mm(broadcasted, self.train_features_rank_T)
candidate_labels = self.candidates.expand(len(similarity_rank), -1)
return self._get_knn_sims_and_labels(similarity_rank, candidate_labels)
def _gather_all_knn_for_rank(self, topk_sims, neighbors_labels, target_rank):
# Gather all neighbors for `target_rank`
topk_sims_rank = retrieved_rank = None
if self.global_rank == target_rank:
topk_sims_rank = [torch.zeros_like(topk_sims) for _ in range(self.global_size)]
retrieved_rank = [torch.zeros_like(neighbors_labels) for _ in range(self.global_size)]
torch.distributed.gather(topk_sims, topk_sims_rank, dst=target_rank)
torch.distributed.gather(neighbors_labels, retrieved_rank, dst=target_rank)
if self.global_rank == target_rank:
# Perform a second top-k on the k * global_size retrieved neighbors
topk_sims_rank = torch.cat(topk_sims_rank, dim=1)
retrieved_rank = torch.cat(retrieved_rank, dim=1)
results = self._get_knn_sims_and_labels(topk_sims_rank, retrieved_rank)
return results
return None
def compute_neighbors(self, features_rank):
for rank in range(self.global_size):
topk_sims, neighbors_labels = self._similarity_for_rank(features_rank, rank)
results = self._gather_all_knn_for_rank(topk_sims, neighbors_labels, rank)
if results is not None:
topk_sims_rank, neighbors_labels_rank = results
return topk_sims_rank, neighbors_labels_rank
def forward(self, features_rank):
"""
Compute the results on all values of `self.nb_knn` neighbors from the full `self.max_k`
"""
assert all(k <= self.max_k for k in self.nb_knn)
topk_sims, neighbors_labels = self.compute_neighbors(features_rank)
batch_size = neighbors_labels.shape[0]
topk_sims_transform = softmax(topk_sims / self.T, 1)
matmul = torch.mul(
one_hot(neighbors_labels, num_classes=self.num_classes),
topk_sims_transform.view(batch_size, -1, 1),
)
probas_for_k = {k: torch.sum(matmul[:, :k, :], 1) for k in self.nb_knn}
return probas_for_k
class DictKeysModule(torch.nn.Module):
def __init__(self, keys):
super().__init__()
self.keys = keys
def forward(self, features_dict, targets):
for k in self.keys:
features_dict = features_dict[k]
return {"preds": features_dict, "target": targets}
def create_module_dict(*, module, n_per_class_list, n_tries, nb_knn, train_features, train_labels):
modules = {}
mapping = create_class_indices_mapping(train_labels)
for npc in n_per_class_list:
if npc < 0: # Only one try needed when using the full data
full_module = module(
train_features=train_features,
train_labels=train_labels,
nb_knn=nb_knn,
)
modules["full"] = ModuleDictWithForward({"1": full_module})
continue
all_tries = {}
for t in range(n_tries):
final_indices = filter_train(mapping, npc, seed=t)
k_list = list(set(nb_knn + [npc]))
k_list = sorted([el for el in k_list if el <= npc])
all_tries[str(t)] = module(
train_features=train_features[final_indices],
train_labels=train_labels[final_indices],
nb_knn=k_list,
)
modules[f"{npc} per class"] = ModuleDictWithForward(all_tries)
return ModuleDictWithForward(modules)
def filter_train(mapping, n_per_class, seed):
torch.manual_seed(seed)
final_indices = []
for k in mapping.keys():
index = torch.randperm(len(mapping[k]))[:n_per_class]
final_indices.append(mapping[k][index])
return torch.cat(final_indices).squeeze()
def create_class_indices_mapping(labels):
unique_labels, inverse = torch.unique(labels, return_inverse=True)
mapping = {unique_labels[i]: (inverse == i).nonzero() for i in range(len(unique_labels))}
return mapping
class ModuleDictWithForward(torch.nn.ModuleDict):
def forward(self, *args, **kwargs):
return {k: module(*args, **kwargs) for k, module in self._modules.items()}
def eval_knn(
model,
train_dataset,
val_dataset,
accuracy_averaging,
nb_knn,
temperature,
batch_size,
num_workers,
gather_on_cpu,
n_per_class_list=[-1],
n_tries=1,
):
model = ModelWithNormalize(model)
logger.info("Extracting features for train set...")
train_features, train_labels = extract_features(
model, train_dataset, batch_size, num_workers, gather_on_cpu=gather_on_cpu
)
logger.info(f"Train features created, shape {train_features.shape}.")
val_dataloader = make_data_loader(
dataset=val_dataset,
batch_size=batch_size,
num_workers=num_workers,
sampler_type=SamplerType.DISTRIBUTED,
drop_last=False,
shuffle=False,
persistent_workers=True,
)
num_classes = train_labels.max() + 1
metric_collection = build_topk_accuracy_metric(accuracy_averaging, num_classes=num_classes)
device = torch.cuda.current_device()
partial_module = partial(KnnModule, T=temperature, device=device, num_classes=num_classes)
knn_module_dict = create_module_dict(
module=partial_module,
n_per_class_list=n_per_class_list,
n_tries=n_tries,
nb_knn=nb_knn,
train_features=train_features,
train_labels=train_labels,
)
postprocessors, metrics = {}, {}
for n_per_class, knn_module in knn_module_dict.items():
for t, knn_try in knn_module.items():
postprocessors = {
**postprocessors,
**{(n_per_class, t, k): DictKeysModule([n_per_class, t, k]) for k in knn_try.nb_knn},
}
metrics = {**metrics, **{(n_per_class, t, k): metric_collection.clone() for k in knn_try.nb_knn}}
model_with_knn = torch.nn.Sequential(model, knn_module_dict)
# ============ evaluation ... ============
logger.info("Start the k-NN classification.")
_, results_dict = evaluate(model_with_knn, val_dataloader, postprocessors, metrics, device)
# Averaging the results over the n tries for each value of n_per_class
for n_per_class, knn_module in knn_module_dict.items():
first_try = list(knn_module.keys())[0]
k_list = knn_module[first_try].nb_knn
for k in k_list:
keys = results_dict[(n_per_class, first_try, k)].keys() # keys are e.g. `top-1` and `top-5`
results_dict[(n_per_class, k)] = {
key: torch.mean(torch.stack([results_dict[(n_per_class, t, k)][key] for t in knn_module.keys()]))
for key in keys
}
for t in knn_module.keys():
del results_dict[(n_per_class, t, k)]
return results_dict
def eval_knn_with_model(
model,
output_dir,
train_dataset_str="ImageNet:split=TRAIN",
val_dataset_str="ImageNet:split=VAL",
nb_knn=(10, 20, 100, 200),
temperature=0.07,
autocast_dtype=torch.float,
accuracy_averaging=AccuracyAveraging.MEAN_ACCURACY,
transform=None,
gather_on_cpu=False,
batch_size=256,
num_workers=5,
n_per_class_list=[-1],
n_tries=1,
):
transform = transform or make_classification_eval_transform()
train_dataset = make_dataset(
dataset_str=train_dataset_str,
transform=transform,
)
val_dataset = make_dataset(
dataset_str=val_dataset_str,
transform=transform,
)
with torch.cuda.amp.autocast(dtype=autocast_dtype):
results_dict_knn = eval_knn(
model=model,
train_dataset=train_dataset,
val_dataset=val_dataset,
accuracy_averaging=accuracy_averaging,
nb_knn=nb_knn,
temperature=temperature,
batch_size=batch_size,
num_workers=num_workers,
gather_on_cpu=gather_on_cpu,
n_per_class_list=n_per_class_list,
n_tries=n_tries,
)
results_dict = {}
if distributed.is_main_process():
for knn_ in results_dict_knn.keys():
top1 = results_dict_knn[knn_]["top-1"].item() * 100.0
top5 = results_dict_knn[knn_]["top-5"].item() * 100.0
results_dict[f"{knn_} Top 1"] = top1
results_dict[f"{knn_} Top 5"] = top5
logger.info(f"{knn_} classifier result: Top1: {top1:.2f} Top5: {top5:.2f}")
metrics_file_path = os.path.join(output_dir, "results_eval_knn.json")
with open(metrics_file_path, "a") as f:
for k, v in results_dict.items():
f.write(json.dumps({k: v}) + "\n")
if distributed.is_enabled():
torch.distributed.barrier()
return results_dict
def main(args):
model, autocast_dtype = setup_and_build_model(args)
eval_knn_with_model(
model=model,
output_dir=args.output_dir,
train_dataset_str=args.train_dataset_str,
val_dataset_str=args.val_dataset_str,
nb_knn=args.nb_knn,
temperature=args.temperature,
autocast_dtype=autocast_dtype,
accuracy_averaging=AccuracyAveraging.MEAN_ACCURACY,
transform=None,
gather_on_cpu=args.gather_on_cpu,
batch_size=args.batch_size,
num_workers=5,
n_per_class_list=args.n_per_class_list,
n_tries=args.n_tries,
)
return 0
if __name__ == "__main__":
description = "DINOv2 k-NN evaluation"
args_parser = get_args_parser(description=description)
args = args_parser.parse_args()
sys.exit(main(args))
| EXA-1-master | exa/models/dinov2/dinov2/eval/knn.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
from pathlib import Path
from typing import List, Optional
import submitit
from dinov2.utils.cluster import (
get_slurm_executor_parameters,
get_slurm_partition,
get_user_checkpoint_path,
)
logger = logging.getLogger("dinov2")
def get_args_parser(
description: Optional[str] = None,
parents: Optional[List[argparse.ArgumentParser]] = [],
add_help: bool = True,
) -> argparse.ArgumentParser:
slurm_partition = get_slurm_partition()
parser = argparse.ArgumentParser(
description=description,
parents=parents,
add_help=add_help,
)
parser.add_argument(
"--ngpus",
"--gpus",
"--gpus-per-node",
default=8,
type=int,
help="Number of GPUs to request on each node",
)
parser.add_argument(
"--nodes",
"--nnodes",
default=2,
type=int,
help="Number of nodes to request",
)
parser.add_argument(
"--timeout",
default=2800,
type=int,
help="Duration of the job",
)
parser.add_argument(
"--partition",
default=slurm_partition,
type=str,
help="Partition where to submit",
)
parser.add_argument(
"--use-volta32",
action="store_true",
help="Request V100-32GB GPUs",
)
parser.add_argument(
"--comment",
default="",
type=str,
help="Comment to pass to scheduler, e.g. priority message",
)
parser.add_argument(
"--exclude",
default="",
type=str,
help="Nodes to exclude",
)
return parser
def get_shared_folder() -> Path:
user_checkpoint_path = get_user_checkpoint_path()
if user_checkpoint_path is None:
raise RuntimeError("Path to user checkpoint cannot be determined")
path = user_checkpoint_path / "experiments"
path.mkdir(exist_ok=True)
return path
def submit_jobs(task_class, args, name: str):
if not args.output_dir:
args.output_dir = str(get_shared_folder() / "%j")
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
executor = submitit.AutoExecutor(folder=args.output_dir, slurm_max_num_timeout=30)
kwargs = {}
if args.use_volta32:
kwargs["slurm_constraint"] = "volta32gb"
if args.comment:
kwargs["slurm_comment"] = args.comment
if args.exclude:
kwargs["slurm_exclude"] = args.exclude
executor_params = get_slurm_executor_parameters(
nodes=args.nodes,
num_gpus_per_node=args.ngpus,
timeout_min=args.timeout, # max is 60 * 72
slurm_signal_delay_s=120,
slurm_partition=args.partition,
**kwargs,
)
executor.update_parameters(name=name, **executor_params)
task = task_class(args)
job = executor.submit(task)
logger.info(f"Submitted job_id: {job.job_id}")
str_output_dir = os.path.abspath(args.output_dir).replace("%j", str(job.job_id))
logger.info(f"Logs and checkpoints will be saved at: {str_output_dir}")
| EXA-1-master | exa/models/dinov2/dinov2/run/submit.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/models/dinov2/dinov2/run/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from dinov2.logging import setup_logging
from dinov2.train import get_args_parser as get_train_args_parser
from dinov2.run.submit import get_args_parser, submit_jobs
logger = logging.getLogger("dinov2")
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
from dinov2.train import main as train_main
self._setup_args()
train_main(self.args)
def checkpoint(self):
import submitit
logger.info(f"Requeuing {self.args}")
empty = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty)
def _setup_args(self):
import submitit
job_env = submitit.JobEnvironment()
self.args.output_dir = self.args.output_dir.replace("%j", str(job_env.job_id))
logger.info(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
logger.info(f"Args: {self.args}")
def main():
description = "Submitit launcher for DINOv2 training"
train_args_parser = get_train_args_parser(add_help=False)
parents = [train_args_parser]
args_parser = get_args_parser(description=description, parents=parents)
args = args_parser.parse_args()
setup_logging()
assert os.path.exists(args.config_file), "Configuration file does not exist!"
submit_jobs(Trainer, args, name="dinov2:train")
return 0
if __name__ == "__main__":
sys.exit(main())
| EXA-1-master | exa/models/dinov2/dinov2/run/train/train.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from dinov2.eval.linear import get_args_parser as get_linear_args_parser
from dinov2.logging import setup_logging
from dinov2.run.submit import get_args_parser, submit_jobs
logger = logging.getLogger("dinov2")
class Evaluator:
def __init__(self, args):
self.args = args
def __call__(self):
from dinov2.eval.linear import main as linear_main
self._setup_args()
linear_main(self.args)
def checkpoint(self):
import submitit
logger.info(f"Requeuing {self.args}")
empty = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty)
def _setup_args(self):
import submitit
job_env = submitit.JobEnvironment()
self.args.output_dir = self.args.output_dir.replace("%j", str(job_env.job_id))
logger.info(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
logger.info(f"Args: {self.args}")
def main():
description = "Submitit launcher for DINOv2 linear evaluation"
linear_args_parser = get_linear_args_parser(add_help=False)
parents = [linear_args_parser]
args_parser = get_args_parser(description=description, parents=parents)
args = args_parser.parse_args()
setup_logging()
assert os.path.exists(args.config_file), "Configuration file does not exist!"
submit_jobs(Evaluator, args, name="dinov2:linear")
return 0
if __name__ == "__main__":
sys.exit(main())
| EXA-1-master | exa/models/dinov2/dinov2/run/eval/linear.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from dinov2.eval.log_regression import get_args_parser as get_log_regression_args_parser
from dinov2.logging import setup_logging
from dinov2.run.submit import get_args_parser, submit_jobs
logger = logging.getLogger("dinov2")
class Evaluator:
def __init__(self, args):
self.args = args
def __call__(self):
from dinov2.eval.log_regression import main as log_regression_main
self._setup_args()
log_regression_main(self.args)
def checkpoint(self):
import submitit
logger.info(f"Requeuing {self.args}")
empty = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty)
def _setup_args(self):
import submitit
job_env = submitit.JobEnvironment()
self.args.output_dir = self.args.output_dir.replace("%j", str(job_env.job_id))
logger.info(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
logger.info(f"Args: {self.args}")
def main():
description = "Submitit launcher for DINOv2 logistic evaluation"
log_regression_args_parser = get_log_regression_args_parser(add_help=False)
parents = [log_regression_args_parser]
args_parser = get_args_parser(description=description, parents=parents)
args = args_parser.parse_args()
setup_logging()
assert os.path.exists(args.config_file), "Configuration file does not exist!"
submit_jobs(Evaluator, args, name="dinov2:logreg")
return 0
if __name__ == "__main__":
sys.exit(main())
| EXA-1-master | exa/models/dinov2/dinov2/run/eval/log_regression.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from dinov2.eval.knn import get_args_parser as get_knn_args_parser
from dinov2.logging import setup_logging
from dinov2.run.submit import get_args_parser, submit_jobs
logger = logging.getLogger("dinov2")
class Evaluator:
def __init__(self, args):
self.args = args
def __call__(self):
from dinov2.eval.knn import main as knn_main
self._setup_args()
knn_main(self.args)
def checkpoint(self):
import submitit
logger.info(f"Requeuing {self.args}")
empty = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty)
def _setup_args(self):
import submitit
job_env = submitit.JobEnvironment()
self.args.output_dir = self.args.output_dir.replace("%j", str(job_env.job_id))
logger.info(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
logger.info(f"Args: {self.args}")
def main():
description = "Submitit launcher for DINOv2 k-NN evaluation"
knn_args_parser = get_knn_args_parser(add_help=False)
parents = [knn_args_parser]
args_parser = get_args_parser(description=description, parents=parents)
args = args_parser.parse_args()
setup_logging()
assert os.path.exists(args.config_file), "Configuration file does not exist!"
submit_jobs(Evaluator, args, name="dinov2:knn")
return 0
if __name__ == "__main__":
sys.exit(main())
| EXA-1-master | exa/models/dinov2/dinov2/run/eval/knn.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Sequence
import torch
from torchvision import transforms
class GaussianBlur(transforms.RandomApply):
"""
Apply Gaussian Blur to the PIL image.
"""
def __init__(self, *, p: float = 0.5, radius_min: float = 0.1, radius_max: float = 2.0):
# NOTE: torchvision is applying 1 - probability to return the original image
keep_p = 1 - p
transform = transforms.GaussianBlur(kernel_size=9, sigma=(radius_min, radius_max))
super().__init__(transforms=[transform], p=keep_p)
class MaybeToTensor(transforms.ToTensor):
"""
Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor, or keep as is if already a tensor.
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image, numpy.ndarray or torch.tensor): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if isinstance(pic, torch.Tensor):
return pic
return super().__call__(pic)
# Use timm's names
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
def make_normalize_transform(
mean: Sequence[float] = IMAGENET_DEFAULT_MEAN,
std: Sequence[float] = IMAGENET_DEFAULT_STD,
) -> transforms.Normalize:
return transforms.Normalize(mean=mean, std=std)
# This roughly matches torchvision's preset for classification training:
# https://github.com/pytorch/vision/blob/main/references/classification/presets.py#L6-L44
def make_classification_train_transform(
*,
crop_size: int = 224,
interpolation=transforms.InterpolationMode.BICUBIC,
hflip_prob: float = 0.5,
mean: Sequence[float] = IMAGENET_DEFAULT_MEAN,
std: Sequence[float] = IMAGENET_DEFAULT_STD,
):
transforms_list = [transforms.RandomResizedCrop(crop_size, interpolation=interpolation)]
if hflip_prob > 0.0:
transforms_list.append(transforms.RandomHorizontalFlip(hflip_prob))
transforms_list.extend(
[
MaybeToTensor(),
make_normalize_transform(mean=mean, std=std),
]
)
return transforms.Compose(transforms_list)
# This matches (roughly) torchvision's preset for classification evaluation:
# https://github.com/pytorch/vision/blob/main/references/classification/presets.py#L47-L69
def make_classification_eval_transform(
*,
resize_size: int = 256,
interpolation=transforms.InterpolationMode.BICUBIC,
crop_size: int = 224,
mean: Sequence[float] = IMAGENET_DEFAULT_MEAN,
std: Sequence[float] = IMAGENET_DEFAULT_STD,
) -> transforms.Compose:
transforms_list = [
transforms.Resize(resize_size, interpolation=interpolation),
transforms.CenterCrop(crop_size),
MaybeToTensor(),
make_normalize_transform(mean=mean, std=std),
]
return transforms.Compose(transforms_list)
| EXA-1-master | exa/models/dinov2/dinov2/data/transforms.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import random
def collate_data_and_cast(samples_list, mask_ratio_tuple, mask_probability, dtype, n_tokens=None, mask_generator=None):
# dtype = torch.half # TODO: Remove
n_global_crops = len(samples_list[0][0]["global_crops"])
n_local_crops = len(samples_list[0][0]["local_crops"])
collated_global_crops = torch.stack([s[0]["global_crops"][i] for i in range(n_global_crops) for s in samples_list])
collated_local_crops = torch.stack([s[0]["local_crops"][i] for i in range(n_local_crops) for s in samples_list])
B = len(collated_global_crops)
N = n_tokens
n_samples_masked = int(B * mask_probability)
probs = torch.linspace(*mask_ratio_tuple, n_samples_masked + 1)
upperbound = 0
masks_list = []
for i in range(0, n_samples_masked):
prob_min = probs[i]
prob_max = probs[i + 1]
masks_list.append(torch.BoolTensor(mask_generator(int(N * random.uniform(prob_min, prob_max)))))
upperbound += int(N * prob_max)
for i in range(n_samples_masked, B):
masks_list.append(torch.BoolTensor(mask_generator(0)))
random.shuffle(masks_list)
collated_masks = torch.stack(masks_list).flatten(1)
mask_indices_list = collated_masks.flatten().nonzero().flatten()
masks_weight = (1 / collated_masks.sum(-1).clamp(min=1.0)).unsqueeze(-1).expand_as(collated_masks)[collated_masks]
return {
"collated_global_crops": collated_global_crops.to(dtype),
"collated_local_crops": collated_local_crops.to(dtype),
"collated_masks": collated_masks,
"mask_indices_list": mask_indices_list,
"masks_weight": masks_weight,
"upperbound": upperbound,
"n_masked_patches": torch.full((1,), fill_value=mask_indices_list.shape[0], dtype=torch.long),
}
| EXA-1-master | exa/models/dinov2/dinov2/data/collate.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from enum import Enum
from typing import Any, Callable, List, Optional, TypeVar
import torch
from torch.utils.data import Sampler
from .datasets import ImageNet, ImageNet22k
from .samplers import EpochSampler, InfiniteSampler, ShardedInfiniteSampler
logger = logging.getLogger("dinov2")
class SamplerType(Enum):
DISTRIBUTED = 0
EPOCH = 1
INFINITE = 2
SHARDED_INFINITE = 3
SHARDED_INFINITE_NEW = 4
def _make_bool_str(b: bool) -> str:
return "yes" if b else "no"
def _make_sample_transform(image_transform: Optional[Callable] = None, target_transform: Optional[Callable] = None):
def transform(sample):
image, target = sample
if image_transform is not None:
image = image_transform(image)
if target_transform is not None:
target = target_transform(target)
return image, target
return transform
def _parse_dataset_str(dataset_str: str):
tokens = dataset_str.split(":")
name = tokens[0]
kwargs = {}
for token in tokens[1:]:
key, value = token.split("=")
assert key in ("root", "extra", "split")
kwargs[key] = value
if name == "ImageNet":
class_ = ImageNet
if "split" in kwargs:
kwargs["split"] = ImageNet.Split[kwargs["split"]]
elif name == "ImageNet22k":
class_ = ImageNet22k
else:
raise ValueError(f'Unsupported dataset "{name}"')
return class_, kwargs
def make_dataset(
*,
dataset_str: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
):
"""
Creates a dataset with the specified parameters.
Args:
dataset_str: A dataset string description (e.g. ImageNet:split=TRAIN).
transform: A transform to apply to images.
target_transform: A transform to apply to targets.
Returns:
The created dataset.
"""
logger.info(f'using dataset: "{dataset_str}"')
class_, kwargs = _parse_dataset_str(dataset_str)
dataset = class_(transform=transform, target_transform=target_transform, **kwargs)
logger.info(f"# of dataset samples: {len(dataset):,d}")
# Aggregated datasets do not expose (yet) these attributes, so add them.
if not hasattr(dataset, "transform"):
setattr(dataset, "transform", transform)
if not hasattr(dataset, "target_transform"):
setattr(dataset, "target_transform", target_transform)
return dataset
def _make_sampler(
*,
dataset,
type: Optional[SamplerType] = None,
shuffle: bool = False,
seed: int = 0,
size: int = -1,
advance: int = 0,
) -> Optional[Sampler]:
sample_count = len(dataset)
if type == SamplerType.INFINITE:
logger.info("sampler: infinite")
if size > 0:
raise ValueError("sampler size > 0 is invalid")
return InfiniteSampler(
sample_count=sample_count,
shuffle=shuffle,
seed=seed,
advance=advance,
)
elif type in (SamplerType.SHARDED_INFINITE, SamplerType.SHARDED_INFINITE_NEW):
logger.info("sampler: sharded infinite")
if size > 0:
raise ValueError("sampler size > 0 is invalid")
# TODO: Remove support for old shuffling
use_new_shuffle_tensor_slice = type == SamplerType.SHARDED_INFINITE_NEW
return ShardedInfiniteSampler(
sample_count=sample_count,
shuffle=shuffle,
seed=seed,
advance=advance,
use_new_shuffle_tensor_slice=use_new_shuffle_tensor_slice,
)
elif type == SamplerType.EPOCH:
logger.info("sampler: epoch")
if advance > 0:
raise NotImplementedError("sampler advance > 0 is not supported")
size = size if size > 0 else sample_count
logger.info(f"# of samples / epoch: {size:,d}")
return EpochSampler(
size=size,
sample_count=sample_count,
shuffle=shuffle,
seed=seed,
)
elif type == SamplerType.DISTRIBUTED:
logger.info("sampler: distributed")
if size > 0:
raise ValueError("sampler size > 0 is invalid")
if advance > 0:
raise ValueError("sampler advance > 0 is invalid")
return torch.utils.data.DistributedSampler(
dataset=dataset,
shuffle=shuffle,
seed=seed,
drop_last=False,
)
logger.info("sampler: none")
return None
T = TypeVar("T")
def make_data_loader(
*,
dataset,
batch_size: int,
num_workers: int,
shuffle: bool = True,
seed: int = 0,
sampler_type: Optional[SamplerType] = SamplerType.INFINITE,
sampler_size: int = -1,
sampler_advance: int = 0,
drop_last: bool = True,
persistent_workers: bool = False,
collate_fn: Optional[Callable[[List[T]], Any]] = None,
):
"""
Creates a data loader with the specified parameters.
Args:
dataset: A dataset (third party, LaViDa or WebDataset).
batch_size: The size of batches to generate.
num_workers: The number of workers to use.
shuffle: Whether to shuffle samples.
seed: The random seed to use.
sampler_type: Which sampler to use: EPOCH, INFINITE, SHARDED_INFINITE, SHARDED_INFINITE_NEW, DISTRIBUTED or None.
sampler_size: The number of images per epoch (when applicable) or -1 for the entire dataset.
sampler_advance: How many samples to skip (when applicable).
drop_last: Whether the last non-full batch of data should be dropped.
persistent_workers: maintain the workers Dataset instances alive after a dataset has been consumed once.
collate_fn: Function that performs batch collation
"""
sampler = _make_sampler(
dataset=dataset,
type=sampler_type,
shuffle=shuffle,
seed=seed,
size=sampler_size,
advance=sampler_advance,
)
logger.info("using PyTorch data loader")
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=drop_last,
persistent_workers=persistent_workers,
collate_fn=collate_fn,
)
try:
logger.info(f"# of batches: {len(data_loader):,d}")
except TypeError: # data loader has no length
logger.info("infinite data loader")
return data_loader
| EXA-1-master | exa/models/dinov2/dinov2/data/loaders.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .adapters import DatasetWithEnumeratedTargets
from .loaders import make_data_loader, make_dataset, SamplerType
from .collate import collate_data_and_cast
from .masking import MaskingGenerator
from .augmentations import DataAugmentationDINO
| EXA-1-master | exa/models/dinov2/dinov2/data/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
import math
import numpy as np
class MaskingGenerator:
def __init__(
self,
input_size,
num_masking_patches=None,
min_num_patches=4,
max_num_patches=None,
min_aspect=0.3,
max_aspect=None,
):
if not isinstance(input_size, tuple):
input_size = (input_size,) * 2
self.height, self.width = input_size
self.num_patches = self.height * self.width
self.num_masking_patches = num_masking_patches
self.min_num_patches = min_num_patches
self.max_num_patches = num_masking_patches if max_num_patches is None else max_num_patches
max_aspect = max_aspect or 1 / min_aspect
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
def __repr__(self):
repr_str = "Generator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)" % (
self.height,
self.width,
self.min_num_patches,
self.max_num_patches,
self.num_masking_patches,
self.log_aspect_ratio[0],
self.log_aspect_ratio[1],
)
return repr_str
def get_shape(self):
return self.height, self.width
def _mask(self, mask, max_mask_patches):
delta = 0
for _ in range(10):
target_area = random.uniform(self.min_num_patches, max_mask_patches)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < self.width and h < self.height:
top = random.randint(0, self.height - h)
left = random.randint(0, self.width - w)
num_masked = mask[top : top + h, left : left + w].sum()
# Overlap
if 0 < h * w - num_masked <= max_mask_patches:
for i in range(top, top + h):
for j in range(left, left + w):
if mask[i, j] == 0:
mask[i, j] = 1
delta += 1
if delta > 0:
break
return delta
def __call__(self, num_masking_patches=0):
mask = np.zeros(shape=self.get_shape(), dtype=bool)
mask_count = 0
while mask_count < num_masking_patches:
max_mask_patches = num_masking_patches - mask_count
max_mask_patches = min(max_mask_patches, self.max_num_patches)
delta = self._mask(mask, max_mask_patches)
if delta == 0:
break
else:
mask_count += delta
return mask
| EXA-1-master | exa/models/dinov2/dinov2/data/masking.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from typing import Any, Optional
import warnings
import numpy as np
import torch
from torch.utils.data.sampler import Sampler
import dinov2.distributed as distributed
class EpochSampler(Sampler):
def __init__(
self,
*,
size: int,
sample_count: int,
shuffle: bool = False,
seed: int = 0,
start: Optional[int] = None,
step: Optional[int] = None,
):
self._size = size
self._sample_count = sample_count
self._shuffle = shuffle
self._seed = seed
self._start = distributed.get_global_rank() if start is None else start
self._step = distributed.get_global_size() if step is None else step
self._epoch = 0
def __iter__(self):
count = (self._size + self._sample_count - 1) // self._sample_count
tiled_indices = np.tile(np.arange(self._sample_count), count)
if self._shuffle:
seed = self._seed * self._epoch if self._seed != 0 else self._epoch
rng = np.random.default_rng(seed)
iterable = rng.choice(tiled_indices, self._size, replace=False)
else:
iterable = tiled_indices[: self._size]
yield from itertools.islice(iterable, self._start, None, self._step)
def __len__(self):
return (self._size - self._start + self._step - 1) // self._step
def set_epoch(self, epoch):
self._epoch = epoch
def _get_numpy_dtype(size: int) -> Any:
return np.int32 if size <= 2**31 else np.int64
def _get_torch_dtype(size: int) -> Any:
return torch.int32 if size <= 2**31 else torch.int64
def _generate_randperm_indices(*, size: int, generator: torch.Generator):
"""Generate the indices of a random permutation."""
dtype = _get_torch_dtype(size)
# This is actually matching PyTorch's CPU implementation, see: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/TensorFactories.cpp#L900-L921
perm = torch.arange(size, dtype=dtype)
for i in range(size):
j = torch.randint(i, size, size=(1,), generator=generator).item()
# Always swap even if no-op
value = perm[j].item()
perm[j] = perm[i].item()
perm[i] = value
yield value
class InfiniteSampler(Sampler):
def __init__(
self,
*,
sample_count: int,
shuffle: bool = False,
seed: int = 0,
start: Optional[int] = None,
step: Optional[int] = None,
advance: int = 0,
):
self._sample_count = sample_count
self._seed = seed
self._shuffle = shuffle
self._start = distributed.get_global_rank() if start is None else start
self._step = distributed.get_global_size() if step is None else step
self._advance = advance
def __iter__(self):
if self._shuffle:
iterator = self._shuffled_iterator()
else:
iterator = self._iterator()
yield from itertools.islice(iterator, self._advance, None)
def _iterator(self):
assert not self._shuffle
while True:
iterable = range(self._sample_count)
yield from itertools.islice(iterable, self._start, None, self._step)
def _shuffled_iterator(self):
assert self._shuffle
# Instantiate a generator here (rather than in the ctor) to keep the class
# picklable (requirement of mp.spawn)
generator = torch.Generator().manual_seed(self._seed)
while True:
iterable = _generate_randperm_indices(size=self._sample_count, generator=generator)
yield from itertools.islice(iterable, self._start, None, self._step)
# The following function is somewhat equivalent to _new_shuffle_tensor_slice below,
# but avoids a full in-place random permutation generation.
def _shuffle_tensor_slice(
*, tensor: torch.Tensor, start: int = 0, step: int = 1, generator: torch.Generator
) -> np.ndarray:
stop = len(tensor)
count = stop // step
drop_count = stop - step * count
if drop_count:
warnings.warn(f"# of dropped samples: {drop_count}")
dtype = _get_numpy_dtype(stop)
result = np.empty(count, dtype=dtype)
for i in range(count):
j = torch.randint(0, i + 1, size=(1,), generator=generator).item() if i > 0 else 0
result[i] = result[j]
result[j] = tensor[start + i * step].item()
return result
def _new_shuffle_tensor_slice(
*, tensor: torch.Tensor, start: int = 0, step: int = 1, generator: torch.Generator
) -> np.ndarray:
stop = len(tensor)
count = stop // step
dtype = torch.int64 # Needed for using randperm result as indices
count = stop // step
drop_count = stop - step * count
if drop_count:
warnings.warn(f"# of dropped samples: {drop_count}")
indices = torch.randperm(count, dtype=dtype, generator=generator)
return tensor[start::step][indices].numpy()
def _make_seed(seed: int, start: int, iter_count: int) -> int:
# NOTE: Tried a few variants (including iter_count << 32), this one worked best.
return seed + start + (iter_count << 24)
class ShardedInfiniteSampler(Sampler):
def __init__(
self,
*,
sample_count: int,
shuffle: bool = False,
seed: int = 0,
start: Optional[int] = None,
step: Optional[int] = None,
advance: int = 0,
use_new_shuffle_tensor_slice: bool = False,
):
self._sample_count = sample_count
self._seed = seed
self._shuffle = shuffle
self._start = distributed.get_global_rank() if start is None else start
self._step = distributed.get_global_size() if step is None else step
self._advance = advance
self._iter_count = 0
self._shuffle_tensor_slice_fn = (
_new_shuffle_tensor_slice if use_new_shuffle_tensor_slice else _shuffle_tensor_slice
)
def __iter__(self):
iter_count = self._advance // self._sample_count
if iter_count > 0:
self._advance -= iter_count * self._sample_count
self._iter_count += iter_count
if self._shuffle:
iterator = self._shuffled_iterator()
else:
iterator = self._iterator()
yield from itertools.islice(iterator, self._advance, None)
def _iterator(self):
assert not self._shuffle
while True:
iterable = range(self._sample_count)
yield from itertools.islice(iterable, self._start, None, self._step)
def _shuffled_iterator(self):
assert self._shuffle
# Instantiate a generator here (rather than in the ctor) to be keep the class
# picklable (requirement of mp.spawn)
generator = torch.Generator()
# Always shuffle everything first
generator.manual_seed(self._seed)
dtype = _get_torch_dtype(self._sample_count)
perm = torch.randperm(self._sample_count, dtype=dtype, generator=generator)
while True:
# Re-seed on each iteration to allow skipping whole permutations
seed = _make_seed(self._seed, self._start, self._iter_count)
generator.manual_seed(seed)
iterable = self._shuffle_tensor_slice_fn(
tensor=perm, start=self._start, step=self._step, generator=generator
)
yield from iterable
self._iter_count += 1
| EXA-1-master | exa/models/dinov2/dinov2/data/samplers.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from torchvision import transforms
from .transforms import (
GaussianBlur,
make_normalize_transform,
)
logger = logging.getLogger("dinov2")
class DataAugmentationDINO(object):
def __init__(
self,
global_crops_scale,
local_crops_scale,
local_crops_number,
global_crops_size=224,
local_crops_size=96,
):
self.global_crops_scale = global_crops_scale
self.local_crops_scale = local_crops_scale
self.local_crops_number = local_crops_number
self.global_crops_size = global_crops_size
self.local_crops_size = local_crops_size
logger.info("###################################")
logger.info("Using data augmentation parameters:")
logger.info(f"global_crops_scale: {global_crops_scale}")
logger.info(f"local_crops_scale: {local_crops_scale}")
logger.info(f"local_crops_number: {local_crops_number}")
logger.info(f"global_crops_size: {global_crops_size}")
logger.info(f"local_crops_size: {local_crops_size}")
logger.info("###################################")
# random resized crop and flip
self.geometric_augmentation_global = transforms.Compose(
[
transforms.RandomResizedCrop(
global_crops_size, scale=global_crops_scale, interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.RandomHorizontalFlip(p=0.5),
]
)
self.geometric_augmentation_local = transforms.Compose(
[
transforms.RandomResizedCrop(
local_crops_size, scale=local_crops_scale, interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.RandomHorizontalFlip(p=0.5),
]
)
# color distorsions / blurring
color_jittering = transforms.Compose(
[
transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)],
p=0.8,
),
transforms.RandomGrayscale(p=0.2),
]
)
global_transfo1_extra = GaussianBlur(p=1.0)
global_transfo2_extra = transforms.Compose(
[
GaussianBlur(p=0.1),
transforms.RandomSolarize(threshold=128, p=0.2),
]
)
local_transfo_extra = GaussianBlur(p=0.5)
# normalization
self.normalize = transforms.Compose(
[
transforms.ToTensor(),
make_normalize_transform(),
]
)
self.global_transfo1 = transforms.Compose([color_jittering, global_transfo1_extra, self.normalize])
self.global_transfo2 = transforms.Compose([color_jittering, global_transfo2_extra, self.normalize])
self.local_transfo = transforms.Compose([color_jittering, local_transfo_extra, self.normalize])
def __call__(self, image):
output = {}
# global crops:
im1_base = self.geometric_augmentation_global(image)
global_crop_1 = self.global_transfo1(im1_base)
im2_base = self.geometric_augmentation_global(image)
global_crop_2 = self.global_transfo2(im2_base)
output["global_crops"] = [global_crop_1, global_crop_2]
# global crops for teacher:
output["global_crops_teacher"] = [global_crop_1, global_crop_2]
# local crops:
local_crops = [
self.local_transfo(self.geometric_augmentation_local(image)) for _ in range(self.local_crops_number)
]
output["local_crops"] = local_crops
output["offsets"] = ()
return output
| EXA-1-master | exa/models/dinov2/dinov2/data/augmentations.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Tuple
from torch.utils.data import Dataset
class DatasetWithEnumeratedTargets(Dataset):
def __init__(self, dataset):
self._dataset = dataset
def get_image_data(self, index: int) -> bytes:
return self._dataset.get_image_data(index)
def get_target(self, index: int) -> Tuple[Any, int]:
target = self._dataset.get_target(index)
return (index, target)
def get_sample_decoder(self, index: int) -> Any:
return self._dataset.get_sample_decoder(index)
def __getitem__(self, index: int) -> Tuple[Any, Tuple[Any, int]]:
image, target = self._dataset[index]
target = index if target is None else target
return image, (index, target)
def __len__(self) -> int:
return len(self._dataset)
| EXA-1-master | exa/models/dinov2/dinov2/data/adapters.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import csv
from enum import Enum
import os
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
from .extended import ExtendedVisionDataset
_Labels = int
class _Split(Enum):
TRAIN = "train"
VAL = "val"
TEST = "test" # NOTE: torchvision does not support the test split
@property
def length(self) -> int:
split_lengths = {
_Split.TRAIN: 1_281_167,
_Split.VAL: 50_000,
_Split.TEST: 100_000,
}
return split_lengths[self]
def get_dirname(self, class_id: Optional[str] = None) -> str:
return self.value if class_id is None else os.path.join(self.value, class_id)
def get_image_relpath(self, actual_index: int, class_id: Optional[str] = None) -> str:
dirname = self.get_dirname(class_id)
if self == _Split.TRAIN:
basename = f"{class_id}_{actual_index}"
else: # self in (_Split.VAL, _Split.TEST):
basename = f"ILSVRC2012_{self.value}_{actual_index:08d}"
return os.path.join(dirname, basename + ".JPEG")
def parse_image_relpath(self, image_relpath: str) -> Tuple[str, int]:
assert self != _Split.TEST
dirname, filename = os.path.split(image_relpath)
class_id = os.path.split(dirname)[-1]
basename, _ = os.path.splitext(filename)
actual_index = int(basename.split("_")[-1])
return class_id, actual_index
class ImageNet(ExtendedVisionDataset):
Labels = Union[_Labels]
Split = Union[_Split]
def __init__(
self,
*,
split: "ImageNet.Split",
root: str,
extra: str,
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
super().__init__(root, transforms, transform, target_transform)
self._extra_root = extra
self._split = split
entries_path = self._get_entries_path(split, root)
self._entries = self._load_extra(entries_path)
self._class_ids = None
self._class_names = None
if split == _Split.TEST:
return
class_ids_path = self._get_class_ids_path(split, root)
self._class_ids = self._load_extra(class_ids_path)
class_names_path = self._get_class_names_path(split, root)
self._class_names = self._load_extra(class_names_path)
@property
def split(self) -> "ImageNet.Split":
return self._split
def _load_extra(self, extra_path: str) -> np.ndarray:
extra_root = self._extra_root
extra_full_path = os.path.join(extra_root, extra_path)
return np.load(extra_full_path, mmap_mode="r")
def _save_extra(self, extra_array: np.ndarray, extra_path: str) -> None:
extra_root = self._extra_root
extra_full_path = os.path.join(extra_root, extra_path)
os.makedirs(extra_root, exist_ok=True)
np.save(extra_full_path, extra_array)
def _get_entries_path(self, split: "ImageNet.Split", root: Optional[str] = None) -> str:
return f"entries-{split.value.upper()}.npy"
def _get_class_ids_path(self, split: "ImageNet.Split", root: Optional[str] = None) -> str:
return f"class-ids-{split.value.upper()}.npy"
def _get_class_names_path(self, split: "ImageNet.Split", root: Optional[str] = None) -> str:
return f"class-names-{split.value.upper()}.npy"
def find_class_id(self, class_index: int) -> str:
assert self._class_ids is not None
return str(self._class_ids[class_index])
def find_class_name(self, class_index: int) -> str:
assert self._class_names is not None
return str(self._class_names[class_index])
def get_image_data(self, index: int) -> bytes:
actual_index = self._entries[index]["actual_index"]
class_id = self.get_class_id(index)
image_relpath = self.split.get_image_relpath(actual_index, class_id)
image_full_path = os.path.join(self.root, image_relpath)
with open(image_full_path, mode="rb") as f:
image_data = f.read()
return image_data
def get_target(self, index: int) -> Optional[_Labels]:
class_index = self._entries[index]["class_index"]
return None if self.split == _Split.TEST else int(class_index)
def get_targets(self) -> Optional[np.ndarray]:
return None if self.split == _Split.TEST else self._entries["class_index"]
def get_class_id(self, index: int) -> Optional[str]:
class_id = self._entries[index]["class_id"]
return None if self.split == _Split.TEST else str(class_id)
def get_class_name(self, index: int) -> Optional[str]:
class_name = self._entries[index]["class_name"]
return None if self.split == _Split.TEST else str(class_name)
def __len__(self) -> int:
assert len(self._entries) == self.split.length
return len(self._entries)
def _load_labels(self, root: str) -> List[Tuple[str, str]]:
path = os.path.join(root, "labels.txt")
labels = []
try:
with open(path, "r") as f:
reader = csv.reader(f)
for row in reader:
class_id, class_name = row
labels.append((class_id, class_name))
except OSError as e:
raise RuntimeError(f'can not read labels file "{path}"') from e
return labels
def _dump_entries(self, split: "ImageNet.Split", root: Optional[str] = None) -> None:
# NOTE: Using torchvision ImageFolder for consistency
from torchvision.datasets import ImageFolder
root = self.root
labels = self._load_labels(root)
if split == ImageNet.Split.TEST:
dataset = None
sample_count = split.length
max_class_id_length, max_class_name_length = 0, 0
else:
dataset_root = os.path.join(root, split.get_dirname())
dataset = ImageFolder(dataset_root)
sample_count = len(dataset)
max_class_id_length, max_class_name_length = -1, -1
for sample in dataset.samples:
_, class_index = sample
class_id, class_name = labels[class_index]
max_class_id_length = max(len(class_id), max_class_id_length)
max_class_name_length = max(len(class_name), max_class_name_length)
dtype = np.dtype(
[
("actual_index", "<u4"),
("class_index", "<u4"),
("class_id", f"U{max_class_id_length}"),
("class_name", f"U{max_class_name_length}"),
]
)
entries_array = np.empty(sample_count, dtype=dtype)
if split == ImageNet.Split.TEST:
for index in range(sample_count):
entries_array[index] = (index + 1, np.uint32(-1), "", "")
else:
class_names = {class_id: class_name for class_id, class_name in labels}
assert dataset
for index, _ in enumerate(dataset):
image_full_path, class_index = dataset.samples[index]
image_relpath = os.path.relpath(image_full_path, root)
class_id, actual_index = split.parse_image_relpath(image_relpath)
class_name = class_names[class_id]
entries_array[index] = (actual_index, class_index, class_id, class_name)
entries_path = self._get_entries_path(split, root)
self._save_extra(entries_array, entries_path)
def _dump_class_ids_and_names(self, split: "ImageNet.Split", root: Optional[str] = None) -> None:
if split == ImageNet.Split.TEST:
return
root = self.get_root(root)
entries_path = self._get_entries_path(split, root)
entries_array = self._load_extra(entries_path)
max_class_id_length, max_class_name_length, max_class_index = -1, -1, -1
for entry in entries_array:
class_index, class_id, class_name = (
entry["class_index"],
entry["class_id"],
entry["class_name"],
)
max_class_index = max(int(class_index), max_class_index)
max_class_id_length = max(len(str(class_id)), max_class_id_length)
max_class_name_length = max(len(str(class_name)), max_class_name_length)
class_count = max_class_index + 1
class_ids_array = np.empty(class_count, dtype=f"U{max_class_id_length}")
class_names_array = np.empty(class_count, dtype=f"U{max_class_name_length}")
for entry in entries_array:
class_index, class_id, class_name = (
entry["class_index"],
entry["class_id"],
entry["class_name"],
)
class_ids_array[class_index] = class_id
class_names_array[class_index] = class_name
class_ids_path = self._get_class_ids_path(split, root)
self._save_extra(class_ids_array, class_ids_path)
class_names_path = self._get_class_names_path(split, root)
self._save_extra(class_names_array, class_names_path)
def dump_extra(self, split: "ImageNet.Split", root: Optional[str] = None) -> None:
self._dump_entries(split, root)
self._dump_class_ids_and_names(split, root)
| EXA-1-master | exa/models/dinov2/dinov2/data/datasets/image_net.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from io import BytesIO
from typing import Any, Tuple
from PIL import Image
class Decoder:
def decode(self) -> Any:
raise NotImplementedError
class ImageDataDecoder(Decoder):
def __init__(self, image_data: bytes) -> None:
self._image_data = image_data
def decode(self) -> Image:
f = BytesIO(self._image_data)
return Image.open(f).convert(mode="RGB")
class TargetDecoder(Decoder):
def __init__(self, target: Any):
self._target = target
def decode(self) -> Any:
return self._target
class TupleDecoder(Decoder):
def __init__(self, *decoders: Decoder):
self._decoders: Tuple[Decoder, ...] = decoders
def decode(self) -> Any:
return (decoder.decode() for decoder in self._decoders)
| EXA-1-master | exa/models/dinov2/dinov2/data/datasets/decoders.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Tuple
from torchvision.datasets import VisionDataset
from .decoders import Decoder, TargetDecoder, ImageDataDecoder, TupleDecoder
class ExtendedVisionDataset(VisionDataset):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs) # type: ignore
def get_image_data(self, index: int) -> bytes:
raise NotImplementedError
def get_target(self, index: int) -> Any:
raise NotImplementedError
def __getitem__(self, index: int) -> Tuple[Any, Any]:
try:
image_data = self.get_image_data(index)
image = ImageDataDecoder(image_data).decode()
except Exception as e:
raise RuntimeError(f"can not read image for sample {index}") from e
target = self.get_target(index)
target = TargetDecoder(target).decode()
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def get_sample_decoder(self, index: int) -> Decoder:
image_data = self.get_image_data(index)
target = self.get_target(index)
return TupleDecoder(
ImageDataDecoder(image_data),
TargetDecoder(target),
)
def __len__(self) -> int:
raise NotImplementedError
| EXA-1-master | exa/models/dinov2/dinov2/data/datasets/extended.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .image_net import ImageNet
from .image_net_22k import ImageNet22k
| EXA-1-master | exa/models/dinov2/dinov2/data/datasets/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from enum import Enum
from functools import lru_cache
from gzip import GzipFile
from io import BytesIO
from mmap import ACCESS_READ, mmap
import os
from typing import Any, Callable, List, Optional, Set, Tuple
import warnings
import numpy as np
from .extended import ExtendedVisionDataset
_Labels = int
_DEFAULT_MMAP_CACHE_SIZE = 16 # Warning: This can exhaust file descriptors
_IMAGES_SUBDIR_IMAGENET_21KP = "062717"
@dataclass
class _ClassEntry:
block_offset: int
maybe_filename: Optional[str] = None
@dataclass
class _Entry:
class_index: int # noqa: E701
start_offset: int
end_offset: int
filename: str
class _Split(Enum):
TRAIN = "train"
VAL = "val"
@property
def length(self) -> int:
return {
_Split.TRAIN: 11_797_647,
_Split.VAL: 561_050,
}[self]
def entries_path(self):
return f"imagenet21kp_{self.value}.txt"
def _get_tarball_path(class_id: str) -> str:
return f"{class_id}.tar"
def _make_mmap_tarball(tarballs_root: str, mmap_cache_size: int):
@lru_cache(maxsize=mmap_cache_size)
def _mmap_tarball(class_id: str) -> mmap:
tarball_path = _get_tarball_path(class_id)
tarball_full_path = os.path.join(tarballs_root, tarball_path)
with open(tarball_full_path) as f:
return mmap(fileno=f.fileno(), length=0, access=ACCESS_READ)
return _mmap_tarball
class ImageNet22k(ExtendedVisionDataset):
_GZIPPED_INDICES: Set[int] = {
841_545,
1_304_131,
2_437_921,
2_672_079,
2_795_676,
2_969_786,
6_902_965,
6_903_550,
6_903_628,
7_432_557,
7_432_589,
7_813_809,
8_329_633,
10_296_990,
10_417_652,
10_492_265,
10_598_078,
10_782_398,
10_902_612,
11_203_736,
11_342_890,
11_397_596,
11_589_762,
11_705_103,
12_936_875,
13_289_782,
}
Labels = _Labels
def __init__(
self,
*,
root: str,
extra: str,
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
mmap_cache_size: int = _DEFAULT_MMAP_CACHE_SIZE,
) -> None:
super().__init__(root, transforms, transform, target_transform)
self._extra_root = extra
entries_path = self._get_entries_path(root)
self._entries = self._load_extra(entries_path)
class_ids_path = self._get_class_ids_path(root)
self._class_ids = self._load_extra(class_ids_path)
self._gzipped_indices = ImageNet22k._GZIPPED_INDICES
self._mmap_tarball = _make_mmap_tarball(self._tarballs_root, mmap_cache_size)
def _get_entries_path(self, root: Optional[str] = None) -> str:
return "entries.npy"
def _get_class_ids_path(self, root: Optional[str] = None) -> str:
return "class-ids.npy"
def _find_class_ids(self, path: str) -> List[str]:
class_ids = []
with os.scandir(path) as entries:
for entry in entries:
root, ext = os.path.splitext(entry.name)
if ext != ".tar":
continue
class_ids.append(root)
return sorted(class_ids)
def _load_entries_class_ids(self, root: Optional[str] = None) -> Tuple[List[_Entry], List[str]]:
root = self.get_root(root)
entries: List[_Entry] = []
class_ids = self._find_class_ids(root)
for class_index, class_id in enumerate(class_ids):
path = os.path.join(root, "blocks", f"{class_id}.log")
class_entries = []
try:
with open(path) as f:
for line in f:
line = line.rstrip()
block, filename = line.split(":")
block_offset = int(block[6:])
filename = filename[1:]
maybe_filename = None
if filename != "** Block of NULs **":
maybe_filename = filename
_, ext = os.path.splitext(filename)
# assert ext == ".JPEG"
class_entry = _ClassEntry(block_offset, maybe_filename)
class_entries.append(class_entry)
except OSError as e:
raise RuntimeError(f'can not read blocks file "{path}"') from e
assert class_entries[-1].maybe_filename is None
for class_entry1, class_entry2 in zip(class_entries, class_entries[1:]):
assert class_entry1.block_offset <= class_entry2.block_offset
start_offset = 512 * class_entry1.block_offset
end_offset = 512 * class_entry2.block_offset
assert class_entry1.maybe_filename is not None
filename = class_entry1.maybe_filename
entry = _Entry(class_index, start_offset, end_offset, filename)
# Skip invalid image files (PIL throws UnidentifiedImageError)
if filename == "n06470073_47249.JPEG":
continue
entries.append(entry)
return entries, class_ids
def _load_extra(self, extra_path: str) -> np.ndarray:
extra_root = self._extra_root
extra_full_path = os.path.join(extra_root, extra_path)
return np.load(extra_full_path, mmap_mode="r")
def _save_extra(self, extra_array: np.ndarray, extra_path: str) -> None:
extra_root = self._extra_root
extra_full_path = os.path.join(extra_root, extra_path)
os.makedirs(extra_root, exist_ok=True)
np.save(extra_full_path, extra_array)
@property
def _tarballs_root(self) -> str:
return self.root
def find_class_id(self, class_index: int) -> str:
return str(self._class_ids[class_index])
def get_image_data(self, index: int) -> bytes:
entry = self._entries[index]
class_id = entry["class_id"]
class_mmap = self._mmap_tarball(class_id)
start_offset, end_offset = entry["start_offset"], entry["end_offset"]
try:
mapped_data = class_mmap[start_offset:end_offset]
data = mapped_data[512:] # Skip entry header block
if len(data) >= 2 and tuple(data[:2]) == (0x1F, 0x8B):
assert index in self._gzipped_indices, f"unexpected gzip header for sample {index}"
with GzipFile(fileobj=BytesIO(data)) as g:
data = g.read()
except Exception as e:
raise RuntimeError(f"can not retrieve image data for sample {index} " f'from "{class_id}" tarball') from e
return data
def get_target(self, index: int) -> Any:
return int(self._entries[index]["class_index"])
def get_targets(self) -> np.ndarray:
return self._entries["class_index"]
def get_class_id(self, index: int) -> str:
return str(self._entries[index]["class_id"])
def get_class_ids(self) -> np.ndarray:
return self._entries["class_id"]
def __getitem__(self, index: int) -> Tuple[Any, Any]:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return super().__getitem__(index)
def __len__(self) -> int:
return len(self._entries)
def _dump_entries(self, *args, **kwargs) -> None:
entries, class_ids = self._load_entries_class_ids(*args, **kwargs)
max_class_id_length, max_filename_length, max_class_index = -1, -1, -1
for entry in entries:
class_id = class_ids[entry.class_index]
max_class_index = max(entry.class_index, max_class_index)
max_class_id_length = max(len(class_id), max_class_id_length)
max_filename_length = max(len(entry.filename), max_filename_length)
dtype = np.dtype(
[
("class_index", "<u4"),
("class_id", f"U{max_class_id_length}"),
("start_offset", "<u4"),
("end_offset", "<u4"),
("filename", f"U{max_filename_length}"),
]
)
sample_count = len(entries)
entries_array = np.empty(sample_count, dtype=dtype)
for i, entry in enumerate(entries):
class_index = entry.class_index
class_id = class_ids[class_index]
start_offset = entry.start_offset
end_offset = entry.end_offset
filename = entry.filename
entries_array[i] = (
class_index,
class_id,
start_offset,
end_offset,
filename,
)
entries_path = self._get_entries_path(*args, **kwargs)
self._save_extra(entries_array, entries_path)
def _dump_class_ids(self, *args, **kwargs) -> None:
entries_path = self._get_entries_path(*args, **kwargs)
entries_array = self._load_extra(entries_path)
max_class_id_length, max_class_index = -1, -1
for entry in entries_array:
class_index, class_id = entry["class_index"], entry["class_id"]
max_class_index = max(int(class_index), max_class_index)
max_class_id_length = max(len(str(class_id)), max_class_id_length)
class_ids_array = np.empty(max_class_index + 1, dtype=f"U{max_class_id_length}")
for entry in entries_array:
class_index, class_id = entry["class_index"], entry["class_id"]
class_ids_array[class_index] = class_id
class_ids_path = self._get_class_ids_path(*args, **kwargs)
self._save_extra(class_ids_array, class_ids_path)
def _dump_extra(self, *args, **kwargs) -> None:
self._dump_entries(*args, *kwargs)
self._dump_class_ids(*args, *kwargs)
def dump_extra(self, root: Optional[str] = None) -> None:
return self._dump_extra(root)
| EXA-1-master | exa/models/dinov2/dinov2/data/datasets/image_net_22k.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import functools
import logging
import os
import sys
from typing import Optional
import dinov2.distributed as distributed
from .helpers import MetricLogger, SmoothedValue
# So that calling _configure_logger multiple times won't add many handlers
@functools.lru_cache()
def _configure_logger(
name: Optional[str] = None,
*,
level: int = logging.DEBUG,
output: Optional[str] = None,
):
"""
Configure a logger.
Adapted from Detectron2.
Args:
name: The name of the logger to configure.
level: The logging level to use.
output: A file name or a directory to save log. If None, will not save log file.
If ends with ".txt" or ".log", assumed to be a file name.
Otherwise, logs will be saved to `output/log.txt`.
Returns:
The configured logger.
"""
logger = logging.getLogger(name)
logger.setLevel(level)
logger.propagate = False
# Loosely match Google glog format:
# [IWEF]yyyymmdd hh:mm:ss.uuuuuu threadid file:line] msg
# but use a shorter timestamp and include the logger name:
# [IWEF]yyyymmdd hh:mm:ss logger threadid file:line] msg
fmt_prefix = "%(levelname).1s%(asctime)s %(process)s %(name)s %(filename)s:%(lineno)s] "
fmt_message = "%(message)s"
fmt = fmt_prefix + fmt_message
datefmt = "%Y%m%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
# stdout logging for main worker only
if distributed.is_main_process():
handler = logging.StreamHandler(stream=sys.stdout)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
# file logging for all workers
if output:
if os.path.splitext(output)[-1] in (".txt", ".log"):
filename = output
else:
filename = os.path.join(output, "logs", "log.txt")
if not distributed.is_main_process():
global_rank = distributed.get_global_rank()
filename = filename + ".rank{}".format(global_rank)
os.makedirs(os.path.dirname(filename), exist_ok=True)
handler = logging.StreamHandler(open(filename, "a"))
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def setup_logging(
output: Optional[str] = None,
*,
name: Optional[str] = None,
level: int = logging.DEBUG,
capture_warnings: bool = True,
) -> None:
"""
Setup logging.
Args:
output: A file name or a directory to save log files. If None, log
files will not be saved. If output ends with ".txt" or ".log", it
is assumed to be a file name.
Otherwise, logs will be saved to `output/log.txt`.
name: The name of the logger to configure, by default the root logger.
level: The logging level to use.
capture_warnings: Whether warnings should be captured as logs.
"""
logging.captureWarnings(capture_warnings)
_configure_logger(name, level=level, output=output)
| EXA-1-master | exa/models/dinov2/dinov2/logging/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict, deque
import datetime
import json
import logging
import time
import torch
import dinov2.distributed as distributed
logger = logging.getLogger("dinov2")
class MetricLogger(object):
def __init__(self, delimiter="\t", output_file=None):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
self.output_file = output_file
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def dump_in_output_file(self, iteration, iter_time, data_time):
if self.output_file is None or not distributed.is_main_process():
return
dict_to_dump = dict(
iteration=iteration,
iter_time=iter_time,
data_time=data_time,
)
dict_to_dump.update({k: v.median for k, v in self.meters.items()})
with open(self.output_file, "a") as f:
f.write(json.dumps(dict_to_dump) + "\n")
pass
def log_every(self, iterable, print_freq, header=None, n_iterations=None, start_iteration=0):
i = start_iteration
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.6f}")
data_time = SmoothedValue(fmt="{avg:.6f}")
if n_iterations is None:
n_iterations = len(iterable)
space_fmt = ":" + str(len(str(n_iterations))) + "d"
log_list = [
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
if torch.cuda.is_available():
log_list += ["max mem: {memory:.0f}"]
log_msg = self.delimiter.join(log_list)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == n_iterations - 1:
self.dump_in_output_file(iteration=i, iter_time=iter_time.avg, data_time=data_time.avg)
eta_seconds = iter_time.global_avg * (n_iterations - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
logger.info(
log_msg.format(
i,
n_iterations,
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
logger.info(
log_msg.format(
i,
n_iterations,
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
if i >= n_iterations:
break
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info("{} Total time: {} ({:.6f} s / it)".format(header, total_time_str, total_time / n_iterations))
class SmoothedValue:
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, num=1):
self.deque.append(value)
self.count += num
self.total += value * num
def synchronize_between_processes(self):
"""
Distributed synchronization of the metric
Warning: does not synchronize the deque!
"""
if not distributed.is_enabled():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
torch.distributed.barrier()
torch.distributed.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
| EXA-1-master | exa/models/dinov2/dinov2/logging/helpers.py |
import torch
import torch.nn as nn
import math
from scipy.special import gamma as scipy_gamma
import functools
import torch.jit
def gamma(n):
return math.gamma(n)
@torch.jit.script
# def caputo_approximation(x, base_activation, derivative_order, h, n):
# k = torch.arange(n).float()
# x_expanded = x.view(-1, 1)
# h_expanded = h.view(-1, 1)
# term = ((-1)**k) * scipy_gamma(derivative_order + k + 1) / (torch.tensor([math.factorial(int(k_i)) for k_i in k]) * scipy_gamma(derivative_order + 1)) * (base_activation(x_expanded - k * h_expanded) - base_activation(x_expanded - (k + 1) * h_expanded))
# sum_terms = torch.sum(term, dim=-1)
# return sum_terms / h
# def caputo_approximation(x, base_activation, derivative_order, h, n):
# k = torch.arange(n).float().view(1, -1)
# x_expanded = x.view(-1, 1)
# h_expanded = h.view(-1, 1)
# term = ((-1)**k) * scipy_gamma(derivative_order + k + 1) / (torch.tensor([math.factorial(int(k_i)) for k_i in k]) * scipy_gamma(derivative_order + 1)) * (base_activation(x_expanded - k * h_expanded) - base_activation(x_expanded - (k + 1) * h_expanded))
# sum_terms = torch.sum(term, dim=-1)
# return sum_terms / h
def caputo_approximation(x, base_activation, derivative_order, h, n):
k = torch.arange(n).float().view(1, -1)
x_expanded = x.view(-1, 1)
h_expanded = h.view(-1, 1)
term = ((-1)**k) * torch.exp(torch.lgamma(derivative_order + k + 1)) / (torch.tensor([math.factorial(int(k_i)) for k_i in k]) * torch.exp(torch.lgamma(derivative_order + 1))) * (base_activation(x_expanded - k * h_expanded) - base_activation(x_expanded - (k + 1) * h_expanded))
sum_terms = torch.sum(term, dim=-1)
return sum_terms / h
class CaputoFractionalActivation(nn.Module):
def __init__(self, base_activation, derivative_order, n=5):
super(CaputoFractionalActivation, self).__init__()
self.base_activation = base_activation
self.derivative_order = derivative_order
self.n = n
@functools.lru_cache(maxsize=None)
def memoized_base_activation(self, x):
return self.base_activation(x)
def adaptive_step_size(self, x, min_step=1e-6, max_step=1e-3):
x_mean = torch.mean(x)
x_std = torch.std(x)
step_size = min_step + (x - x_mean) / x_std * (max_step - min_step)
return step_size
# def forward(self, x):
# h = self.adaptive_step_size(x)
# # Compute the base activation function
# base = self.memoized_base_activation(x)
# # Compute the Caputo approximate fractional derivative
# fractional_derivative = caputo_approximation(x, self.memoized_base_activation, self.derivative_order, h.view(-1), self.n)
# # Combine the base activation function with its fractional derivative (e.g., addition)
# output = base + fractional_derivative
# return output.view_as(x)
def forward(self, x):
h = self.adaptive_step_size(x)
# Compute the base activation function
base = self.memoized_base_activation(x)
# Compute the Caputo approximate fractional derivative
fractional_derivative = caputo_approximation(x, self.memoized_base_activation, self.derivative_order, h.view(-1), self.n)
# Combine the base activation function with its fractional derivative (e.g., addition)
output = base.view_as(fractional_derivative) + fractional_derivative
return output.view_as(x)
| EXA-1-master | exa/modular_components/activations/neox/neox5.py |
import torch
import torch.nn as nn
import math
from scipy.special import gamma as scipy_gamma
import math
import functools
# @functools.lru_cache(maxsize=None)
# def memoized_base_activation(x):
# return base_activation(x)
def gamma(n):
return math.gamma(n)
# def caputo_approximation(x, base_activation, derivative_order, h, n):
# k = torch.arange(n).float()
# term = ((-1)**k) * scipy_gamma(derivative_order + k + 1) / (torch.tensor([math.factorial(int(k_i)) for k_i in k]) * scipy_gamma(derivative_order + 1)) * (base_activation(x - k * h) - base_activation(x - (k + 1) * h))
# sum_terms = torch.sum(term, dim=-1)
# return sum_terms / h
def caputo_approximation(x, base_activation, derivative_order, h, n):
k = torch.arange(n).float()
x_expanded = x.view(-1, 1)
h_expanded = h.view(-1, 1)
term = ((-1)**k) * scipy_gamma(derivative_order + k + 1) / (torch.tensor([math.factorial(int(k_i)) for k_i in k]) * scipy_gamma(derivative_order + 1)) * (base_activation(x_expanded - k * h_expanded) - base_activation(x_expanded - (k + 1) * h_expanded))
sum_terms = torch.sum(term, dim=-1)
return sum_terms / h
class CaputoFractionalActivation(nn.Module):
def __init__(self, base_activation, derivative_order, n=10):
super(CaputoFractionalActivation, self).__init__()
self.base_activation = base_activation
self.derivative_order = derivative_order
self.n = n
self.base_cache = {}
def adaptive_step_size(self, x, min_step=1e-6, max_step=1e-3):
x_normalized = (x - torch.min(x)) / (torch.max(x) - torch.min(x))
step_size = min_step + x_normalized * (max_step - min_step)
return step_size
# def forward(self, x):
# h = self.adaptive_step_size(x)
# # Compute the base activation function
# base = torch.zeros_like(x)
# for i, x_i in enumerate(x.view(-1)):
# x_i_item = x_i.item()
# if torch.isnan(x_i):
# base.view(-1)[i] = x_i
# continue
# if x_i_item not in self.base_cache:
# self.base_cache[x_i_item] = self.base_activation(x_i)
# base.view(-1)[i] = self.base_cache[x_i_item]
# # Compute the Caputo approximate fractional derivative
# fractional_derivative = caputo_approximation(x, self.base_activation, self.derivative_order, h.view(-1), self.n)
# # Combine the base activation function with its fractional derivative (e.g., addition)
# output = base + fractional_derivative
# return output.view_as(x)
def forward(self, x):
h = self.adaptive_step_size(x)
# Compute the base activation function
base = torch.zeros_like(x)
for i, x_i in enumerate(x.view(-1)):
x_i_item = x_i.item()
if torch.isnan(x_i):
base.view(-1)[i] = x_i
continue
if x_i_item not in self.base_cache:
self.base_cache[x_i_item] = self.base_activation(x_i)
base.view(-1)[i] = self.base_cache[x_i_item]
# Compute the Caputo approximate fractional derivative
fractional_derivative = caputo_approximation(x, self.base_activation, self.derivative_order, h.view(-1), self.n)
# Combine the base activation function with its fractional derivative (e.g., addition)
output = base.view(-1) + fractional_derivative
return output.view_as(x) | EXA-1-master | exa/modular_components/activations/neox/neox4.py |
import torch
import torch.nn as nn
from neox2 import SimplifiedOptimizedFractionalActivation
# import torch
# import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset, random_split
# from neox3 import CaputoFractionalActivation
# from neox4 import CaputoFractionalActivation
# from neox5 import CaputoFractionalActivation
# from neox6 import CaputoFractionalActivation
from neo7 import CaputoFractionalActivation
from torch.utils.data import DataLoader
#test neox5
class NovelActivationModel(nn.Module):
def __init__(self):
super(NovelActivationModel, self).__init__()
self.fc1 = nn.Linear(20, 128)
self.act1 = CaputoFractionalActivation(nn.ReLU(), 0.5)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.fc1(x)
x = self.act1(x)
x = self.fc2(x)
return x
class ReLUModel(nn.Module):
def __init__(self):
super(ReLUModel, self).__init__()
self.fc1 = nn.Linear(20, 128)
self.act1 = nn.ReLU()
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.fc1(x)
x = self.act1(x)
x = self.fc2(x)
return x
novel_activation_model = NovelActivationModel()
relu_model = ReLUModel()
# 1. Generate synthetic dataset
num_samples = 1000
input_features = 20
output_classes = 2
# 1a. Generate random input features
X = torch.randn(num_samples, input_features)
# 1b. Generate corresponding output labels based on a predefined rule or function
y = (X.sum(dim=1) > 0).long()
# 2. Split the dataset into training and validation sets
dataset = TensorDataset(X, y)
train_size = int(0.8 * len(dataset))
val_size = len(dataset) - train_size
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
# 3. Create the neural network models
novel_activation_model = NovelActivationModel()
relu_model = ReLUModel()
# 4. Define the loss function, optimizer, and other hyperparameters
criterion = nn.CrossEntropyLoss()
novel_optimizer = optim.Adam(novel_activation_model.parameters(), lr=0.001)
relu_optimizer = optim.Adam(relu_model.parameters(), lr=0.001)
epochs = 50
batch_size = 64
# # 5. Train the models
# def train_model(model, optimizer, train_dataset, val_dataset, criterion, epochs, batch_size):
# train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
# val_loader = DataLoader(val_dataset, batch_size=batch_size)
# for epoch in range(epochs):
# model.train()
# for inputs, labels in train_loader:
# optimizer.zero_grad()
# outputs = model(inputs)
# loss = criterion(outputs, labels)
# loss.backward()
# optimizer.step()
# model.eval()
# val_loss = 0
# correct = 0
# total = 0
# with torch.no_grad():
# for inputs, labels in val_loader:
# outputs = model(inputs)
# loss = criterion(outputs, labels)
# val_loss += loss.item()
# _, predicted = torch.max(outputs, 1)
# total += labels.size(0)
# correct += (predicted == labels).sum().item()
# accuracy = correct / total
# print(f'Epoch {epoch + 1}/{epochs}, Validation Loss: {val_loss / len(val_loader):.4f}, Accuracy: {accuracy:.4f}')
from torch.utils.data import DataLoader
def train_model(model, optimizer, train_dataset, val_dataset, criterion, epochs, batch_size):
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
for epoch in range(epochs):
model.train()
for inputs, targets in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward(retain_graph=True)
optimizer.step()
# Validation
model.eval()
with torch.no_grad():
val_loss = 0.0
correct = 0
total = 0
for inputs, targets in val_loader:
outputs = model(inputs)
loss = criterion(outputs, targets)
val_loss += loss.item()
_, predicted = torch.max(outputs, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
val_loss /= len(val_loader)
accuracy = correct / total
print(f'Epoch {epoch + 1}/{epochs}, Validation Loss: {val_loss:.4f}, Accuracy: {accuracy:.4f}')
train_model(novel_activation_model, novel_optimizer, train_dataset, val_dataset, criterion, epochs, batch_size)
train_model(relu_model, relu_optimizer, train_dataset, val_dataset, criterion, epochs, batch_size)
# 6. Evaluate the models on the validation set and compare their performance
# The evaluation is done during the training process, and the performance is printed at each epoch. | EXA-1-master | exa/modular_components/activations/neox/test.py |
import torch
import torch.nn as nn
def fractional_derivative(x, base_activation, derivative_order, h=1e-5):
# Apply base activation function on x
base = base_activation(x)
# Apply base activation function on x + h
base_plus_h = base_activation(x + h)
# Compute the fractional derivative using Gruenwald-Letnikov definition
fractional_derivative = ((base_plus_h - base) / h) ** derivative_order
return fractional_derivative
class FractionalActivation(nn.Module):
def __init__(self, base_activation, derivative_order):
super(FractionalActivation, self).__init__()
self.base_activation = base_activation
self.derivative_order = derivative_order
def forward(self, x):
# Compute the base activation function
base = self.base_activation(x)
# Compute the fractional derivative
fractional_derivative = fractional_derivative(x, self.base_activation, self.derivative_order)
# Combine the base activation function with its fractional derivative (e.g., addition)
output = base + fractional_derivative
return output
class SimpleNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, base_activation, derivative_order):
super(SimpleNetwork, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.activation = FractionalActivation(base_activation, derivative_order)
self.fc2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.fc1(x)
x = self.activation(x)
x = self.fc2(x)
return x
input_size = 784 # Example for MNIST dataset
hidden_size = 128
output_size = 10
base_activation = torch.relu
derivative_order = 0.5 # Example fractional order
network = SimpleNetwork(input_size, hidden_size, output_size, base_activation, derivative_order) | EXA-1-master | exa/modular_components/activations/neox/neox.py |
import torch
import torch.nn as nn
import math
import torch.jit
def relu_activation(x):
return torch.relu(x)
@torch.jit.script
def caputo_approximation(x, derivative_order, h, n):
k = torch.arange(n).float().view(1, -1)
x_expanded = x.view(-1, 1)
h_expanded = h.view(-1, 1)
factorial_k = torch.empty_like(k)
for i in range(k.shape[1]):
factorial_k[0, i] = math.factorial(int(k[0, i]))
term = ((-1)**k) * torch.exp(torch.lgamma(derivative_order + k + 1)) / (factorial_k * torch.exp(torch.lgamma(derivative_order + 1))) * (relu_activation(x_expanded - k * h_expanded) - relu_activation(x_expanded - (k + 1) * h_expanded))
sum_terms = torch.sum(term, dim=-1)
return sum_terms / h
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
self.beta = nn.Parameter(torch.tensor(1.0))
def forward(self, x):
return x * torch.sigmoid(self.beta * x)
class CaputoFractionalActivation(nn.Module):
def __init__(self, base_activation, derivative_order, n=20):
super(CaputoFractionalActivation, self).__init__()
self.base_activation = base_activation
self.derivative_order = torch.tensor(float(derivative_order))
self.n = torch.tensor(n)
self.alpha = nn.Parameter(torch.tensor(1.0))
def adaptive_step_size(self, x, min_step=1e-6, max_step=1e-3):
x_mean = torch.mean(x)
x_std = torch.std(x)
step_size = min_step + self.alpha * (x - x_mean) / x_std * (max_step - min_step)
return step_size
def forward(self, x):
h = self.adaptive_step_size(x)
base = self.base_activation(x)
fractional_derivative = caputo_approximation(x, self.derivative_order, h.view(-1), self.n)
output = base.view_as(fractional_derivative) + fractional_derivative
return output.view_as(x) | EXA-1-master | exa/modular_components/activations/neox/neo7.py |
import torch
import torch.nn as nn
import math
def gamma(n):
return math.gamma(n)
def caputo_approximation(x, base_activation, derivative_order, h, n):
sum_terms = 0.0
for k in range(n):
term = ((-1)**k) * gamma(derivative_order + k + 1) / (math.factorial(k) * gamma(derivative_order + 1)) * (base_activation(x - k * h) - base_activation(x - (k + 1) * h))
sum_terms += term
return sum_terms / h
class CaputoFractionalActivation(nn.Module):
def __init__(self, base_activation, derivative_order, n=10):
super(CaputoFractionalActivation, self).__init__()
self.base_activation = base_activation
self.derivative_order = derivative_order
self.n = n
def forward(self, x):
h = 1e-5 # You can use the adaptive step size function from the previous examples to get h
# Compute the base activation function
base = self.base_activation(x)
# Compute the Caputo approximate fractional derivative
fractional_derivative = caputo_approximation(x, self.base_activation, self.derivative_order, h, self.n)
# Combine the base activation function with its fractional derivative (e.g., addition)
output = base + fractional_derivative
return output
| EXA-1-master | exa/modular_components/activations/neox/neox3.py |
import torch
import torch.nn as nn
import math
from scipy.special import gamma as scipy_gamma
import functools
import torch.jit
def gamma(n):
return math.gamma(n)
def relu_activation(x):
return torch.relu(x)
def factorial_tensor(tensor):
return torch.tensor([math.factorial(int(k_i)) for k_i in tensor])
@torch.jit.script
def caputo_approximation(x, derivative_order, h, n):
k = torch.arange(n).float().view(1, -1)
x_expanded = x.view(-1, 1)
h_expanded = h.view(-1, 1)
# Compute the factorial of each element in k
factorial_k = torch.empty_like(k)
for i in range(k.shape[1]):
factorial_k[0, i] = math.factorial(int(k[0, i]))
term = ((-1)**k) * torch.exp(torch.lgamma(derivative_order + k + 1)) / (factorial_k * torch.exp(torch.lgamma(derivative_order + 1))) * (relu_activation(x_expanded - k * h_expanded) - relu_activation(x_expanded - (k + 1) * h_expanded))
sum_terms = torch.sum(term, dim=-1)
return sum_terms / h
class CaputoFractionalActivation(nn.Module):
def __init__(self, base_activation, derivative_order, n=2):
super(CaputoFractionalActivation, self).__init__()
self.base_activation = base_activation
self.derivative_order = torch.tensor(float(derivative_order)) # Convert derivative_order to a tensor
self.n = torch.tensor(n) # Convert n to a tensor
def adaptive_step_size(self, x, min_step=1e-6, max_step=1e-3):
x_mean = torch.mean(x)
x_std = torch.std(x)
step_size = min_step + (x - x_mean) / x_std * (max_step - min_step)
return step_size
def forward(self, x):
h = self.adaptive_step_size(x)
# Compute the base activation function
base = self.base_activation(x)
# Compute the Caputo approximate fractional derivative
fractional_derivative = caputo_approximation(x, self.derivative_order, h.view(-1), self.n)
# Combine the base activation function with its fractional derivative (e.g., addition)
output = base.view_as(fractional_derivative) + fractional_derivative
return output.view_as(x) | EXA-1-master | exa/modular_components/activations/neox/neox6.py |
import torch
import torch.nn as nn
def fractional_derivative(x, base_activation, derivative_order, h):
base = base_activation(x)
base_plus_h = base_activation(x + h)
fractional_derivative = ((base_plus_h - base) / h) ** derivative_order
return fractional_derivative
class SimplifiedOptimizedFractionalActivation(nn.Module):
def __init__(self, base_activation, derivative_order):
super(SimplifiedOptimizedFractionalActivation, self).__init__()
self.base_activation = base_activation
self.derivative_order = derivative_order
self.base_cache = {}
# def adaptive_step_size(x, min_step=1e-6, max_step=1e-3):
# # Normalize the input x
# x_normalized = (x - x.min()) / (x.max() - x.min())
# # Calculate the desired step size based on the normalized input
# step_size = min_step + x_normalized * (max_step - min_step)
# return step_size
#v2 TypeError: min(): argument 'input' (position 1) must be Tensor, not SimplifiedOptimizedFractionalActivation
# def adaptive_step_size(x, min_step=1e-6, max_step=1e-3):
# # Normalize the input x
# x_normalized = (x - torch.min(x)) / (torch.max(x) - torch.min(x))
# # Calculate the desired step size based on the normalized input
# step_size = min_step + x_normalized * (max_step - min_step)
# return step_size
#v3
def adaptive_step_size(self, x, min_step=1e-6, max_step=1e-3):
# Normalize the input x
x_normalized = (x - torch.min(x)) / (torch.max(x) - torch.min(x))
# Calculate the desired step size based on the normalized input
step_size = min_step + x_normalized * (max_step - min_step)
return step_size
#v1 -RuntimeError: a Tensor with 8192 elements cannot be converted to Scalar
# def forward(self, x):
# h = self.adaptive_step_size(x)
# # Caching base activation
# if x.item() not in self.base_cache:
# self.base_cache[x.item()] = self.base_activation(x)
# base = self.base_cache[x.item()]
# # Approximate fractional derivative
# fractional_derivative = fractional_derivative(x, self.base_activation, self.derivative_order, h)
# # Combine the base activation function with its fractional derivative (e.g., addition)
# output = base + fractional_derivative
# return output
#v2 -> UnboundLocalError: cannot access local variable 'fractional_derivative' where it is not associated with a value
# def forward(self, x):
# h = self.adaptive_step_size(x)
# # Caching base activation
# output = torch.zeros_like(x)
# for i, x_i in enumerate(x.view(-1)):
# if x_i.item() not in self.base_cache:
# self.base_cache[x_i.item()] = self.base_activation(x_i)
# base = self.base_cache[x_i.item()]
# # Approximate fractional derivative
# fractional_derivative = fractional_derivative(x_i, self.base_activation, self.derivative_order, h[i])
# # Combine the base activation function with its fractional derivative (e.g., addition)
# output[i] = base + fractional_derivative
# return output.view_as(x)
#v3
# def forward(self, x):
# h = self.adaptive_step_size(x)
# # Caching base activation
# output = torch.zeros_like(x)
# for i, x_i in enumerate(x.view(-1)):
# if x_i.item() not in self.base_cache:
# self.base_cache[x_i.item()] = self.base_activation(x_i)
# base = self.base_cache[x_i.item()]
# # Approximate fractional derivative
# frac_derivative = fractional_derivative(x_i, self.base_activation, self.derivative_order, h[i])
# # Combine the base activation function with its fractional derivative (e.g., addition)
# output[i] = base + frac_derivative
# return output.view_as(x)
#v4 -> IndexError: index 64 is out of bounds for dimension 0 with size 64
#IndexError: index 64 is out of bounds for dimension 0 with size 64
# def forward(self, x):
# h = self.adaptive_step_size(x)
# # Caching base activation
# output = torch.zeros_like(x)
# for i, x_i in enumerate(x.view(-1)):
# if x_i.item() not in self.base_cache:
# self.base_cache[x_i.item()] = self.base_activation(x_i)
# base = self.base_cache[x_i.item()]
# # Approximate fractional derivative
# frac_derivative = fractional_derivative(x_i, self.base_activation, self.derivative_order, h.view(-1)[i])
# # Combine the base activation function with its fractional derivative (e.g., addition)
# output[i] = base + frac_derivative
# return output.view_as(x)
#v5 - KeyError: nan
def forward(self, x):
h = self.adaptive_step_size(x)
# Caching base activation
output = torch.zeros_like(x)
for i, x_i in enumerate(x.view(-1)):
if x_i.item() not in self.base_cache:
self.base_cache[x_i.item()] = self.base_activation(x_i)
base = self.base_cache[x_i.item()]
# Approximate fractional derivative
frac_derivative = fractional_derivative(x_i, self.base_activation, self.derivative_order, h.view(-1)[i])
# Combine the base activation function with its fractional derivative (e.g., addition)
output.view(-1)[i] = base + frac_derivative
return output.view_as(x)
#v6
def forward(self, x):
h = self.adaptive_step_size(x)
# Caching base activation
output = torch.zeros_like(x)
for i, x_i in enumerate(x.view(-1)):
x_i_item = x_i.item()
if torch.isnan(x_i):
output.view(-1)[i] = x_i
continue
if x_i_item not in self.base_cache:
self.base_cache[x_i_item] = self.base_activation(x_i)
base = self.base_cache[x_i_item]
# Approximate fractional derivative
frac_derivative = fractional_derivative(x_i, self.base_activation, self.derivative_order, h.view(-1)[i])
# Combine the base activation function with its fractional derivative (e.g., addition)
output.view(-1)[i] = base + frac_derivative
return output.view_as(x) | EXA-1-master | exa/modular_components/activations/neox/neox2.py |
import cmath
import math
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
def jones_polynomial_torus_knot(m, n):
t = sp.symbols('t')
numerator = t**((m-1) * (n-1)/2) * (1 - t ** (m + 1) - t**(n + 1) + t**(m+n))
denominator = 1 - t **2
return numerator / denominator
# def knot_invariant(x):
# #convert the input value into a knot representation (m, n ) for a torus knot
# m, n = int(x), int(x + 1)
# #calculate the jones polynomial for the torus knot
# jones_poly = jones_polynomial_torus_knot(m, n)
# #eval the jones polynomial at a specific point
# knot_inv = jones_poly.subs('t', 2)
# return float(knot_inv)
# def knot_invariant(x):
# z = complex(x, 0)
# return (z * cmath.exp(-z * z / 2)).real
def lorenz_system(t, state, sigma, rho, beta):
x, y, z = state
dx_dt = sigma * (y - x)
dy_dt = x * (rho - z) - y
dz_dt = x * y - beta * z
return [dx_dt, dy_dt, dz_dt]
def convert_to_knot_representation(x):
# Convert x to a suitable knot representation, for example, a torus knot (m, n)
m = int(np.ceil(x))
n = m + 1
return (m, n)
def knot_invariant(x):
knot_representation = convert_to_knot_representation(x)
return knot_representation
def dynamical_systems_modeling(knot_representation):
sigma, rho, beta = 10, 28, 8/3
initial_state = list(knot_representation) + [0] # Use knot_representation as the initial state
t_span = (0, 1)
sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True)
output_value = sol.sol(1)[0] # Get the x value at t=1
return output_value
# def knot_gelu(x):
# knot_inv = [knot_invariant(val.item()) for val in x]
# dyn_sys_mod = [dynamical_systems_modeling(knot_inv_val) for knot_inv_val in knot_inv]
# dyn_sys_mod_tensor = torch.tensor(dyn_sys_mod, dtype=torch.float, device=x.device)
# knot_gelu_output = 0.5 * x * (1 + torch.tanh(torch.sqrt(torch.tensor(2 / np.pi, device=x.device)) * (x + 0.044715 * torch.pow(x, 3))) * dyn_sys_mod_tensor)
# return knot_gelu_output
# def knot_gelu(x):
# knot_inv = x.apply_(knot_invariant)
# dyn_sys_mod = torch.tensor([dynamical_systems_modeling(knot_inv_val.item()) for knot_inv_val in knot_inv], dtype=torch.float, device=x.device)
# knot_gelu_output = 0.5 * x * (1 + torch.tanh(torch.sqrt(torch.tensor(2 / np.pi, device=x.device)) * (x + 0.044715 * torch.pow(x, 3))) * dyn_sys_mod)
# return knot_gelu_output
# def knot_gelu(x):
# def knot_invariant_grad(grad):
# return grad.apply_(knot_invariant)
# knot_inv = x.detach().apply_(knot_invariant)
# dyn_sys_mod = torch.tensor([dynamical_systems_modeling(knot_inv_val.item()) for knot_inv_val in knot_inv], dtype=torch.float, device=x.device)
# knot_gelu_output = 0.5 * x * (1 + torch.tanh(torch.sqrt(torch.tensor(2 / np.pi, device=x.device)) * (x + 0.044715 * torch.pow(x, 3))) * dyn_sys_mod)
# knot_gelu_output.register_hook(knot_invariant_grad)
# return knot_gelu_output
# def knot_gelu(x):
# knot_inv = x.detach().cpu().apply_(knot_invariant)
# knot_inv = torch.tensor(knot_inv, dtype=torch.float32, device=x.device)
# return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * knot_inv**3)))
def knot_gelu(x):
knot_inv = torch.tensor([knot_invariant(val.item()) for val in x], dtype=torch.float32, device=x.device)
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * knot_inv**3)))
# Custom Activation Layer
class CustomActivation(nn.Module):
def __init__(self, activation_type):
super(CustomActivation, self).__init__()
self.activation_type = activation_type
def forward(self, x):
if self.activation_type == 'gelu':
return 0.5 * x * (1 + torch.tanh(torch.sqrt(torch.tensor(2 / math.pi)) * (x + 0.044715 * torch.pow(x, 3))))
elif self.activation_type == 'knot_gelu':
return knot_gelu(x)
else:
raise ValueError("Invalid activation type")
# Simple Neural Network
class SimpleNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, activation_type='gelu'):
super(SimpleNN, self).__init__()
self.input_layer = nn.Linear(input_size, hidden_size)
self.activation = CustomActivation(activation_type)
self.output_layer = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.input_layer(x)
x = self.activation(x)
x = self.output_layer(x)
return x
# Define input_size, hidden_size, and output_size based on your data and problem
input_size = 10
hidden_size = 20
output_size = 2
# Initialize Simple Neural Networks with GELU and KnotGELU activations
nn_gelu = SimpleNN(input_size, hidden_size, output_size, activation_type='gelu')
nn_knot_gelu = SimpleNN(input_size, hidden_size, output_size, activation_type='knot_gelu')
# Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer_gelu = optim.SGD(nn_gelu.parameters(), lr=0.01)
optimizer_knot_gelu = optim.SGD(nn_knot_gelu.parameters(), lr=0.01)
# Train the networks and compare their performance on your dataset
# Generate synthetic dataset
num_samples = 1000
X = torch.randn(num_samples, input_size)
Y = torch.randint(0, output_size, (num_samples,))
# Split dataset into training and testing sets
train_ratio = 0.8
train_size = int(train_ratio * num_samples)
test_size = num_samples - train_size
X_train, X_test = torch.split(X, [train_size, test_size])
Y_train, Y_test = torch.split(Y, [train_size, test_size])
# Create DataLoaders
train_dataset = torch.utils.data.TensorDataset(X_train, Y_train)
test_dataset = torch.utils.data.TensorDataset(X_test, Y_test)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=True)
# Training loop
def train(network, dataloader, optimizer, criterion, device):
network.train()
running_loss = 0.0
correct = 0
total = 0
for inputs, targets in dataloader:
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = network(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
running_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
return running_loss / len(dataloader), correct / total
# Training settings
epochs = 10
device = 'cuda' if torch.cuda.is_available() else 'cpu'
nn_gelu.to(device)
nn_knot_gelu.to(device)
# Train and log progress
for epoch in range(epochs):
gelu_loss, gelu_acc = train(nn_gelu, train_loader, optimizer_gelu, criterion, device)
knot_gelu_loss, knot_gelu_acc = train(nn_knot_gelu, train_loader, optimizer_knot_gelu, criterion, device)
print(f'Epoch {epoch+1}/{epochs}:')
print(f' GELU | Loss: {gelu_loss:.4f} | Accuracy: {gelu_acc:.4f}')
print(f' KnotGELU| Loss: {knot_gelu_loss:.4f} | Accuracy: {knot_gelu_acc:.4f}') | EXA-1-master | exa/modular_components/activations/KNOTX/v4.py |
import torch
import numpy as np
from scipy.integrate import solve_ivp
import asyncio
import concurrent.futures
# Set the default device to use GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def lorenz_ode(x0, y0, z0, sigma=10, rho=28, beta=8/3, dt=0.01, steps=1000):
x, y, z = x0, y0, z0
for _ in range(steps):
dx = sigma * (y - x) * dt
dy = (x * (rho - z) - y) * dt
dz = (x * y - beta * z) * dt
x, y, z = x + dx, y + dy, z + dz
return x, y, z
def lorenz_system(t, state, sigma, rho, beta):
x, y, z = state
dx_dt = sigma * (y - x)
dy_dt = x * (rho - z) - y
dz_dt = x * y - beta * z
return [dx_dt, dy_dt, dz_dt]
def convert_to_knot_representation(x):
m = int(np.ceil(x))
n = m + 1
return (m, n)
def vectorized_knot_invariant(x):
m = np.ceil(x)
n = m + 1
return m * n
def fast_tanh(x):
return torch.tanh(x)
def dynamical_systems_modeling(knot_representation):
sigma, rho, beta = 10, 28, 8/3
initial_state = list(knot_representation) + [0]
t_span = (0, 1)
sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True, method='BDF')
output_value = sol.sol(1)[0]
return output_value
async def parallel_lorenz_solver(initial_states):
loop = asyncio.get_event_loop()
with concurrent.futures.ThreadPoolExecutor() as executor:
tasks = [loop.run_in_executor(executor, dynamical_systems_modeling, state) for state in initial_states]
results = await asyncio.gather(*tasks)
return results
# def knotx(x, device):
# x_flat = x.view(-1)
# x_flat = x_flat.to(device)
# knot_representation = np.array([convert_to_knot_representation(val.item()) for val in x_flat])
# lorenz_output = asyncio.run(parallel_lorenz_solver(knot_representation))
# lorenz_output = torch.tensor(lorenz_output, dtype=torch.float32, device=x.device).view_as(x_flat)
# return x * (1 + lorenz_output)
def knotx(x, device):
output = torch.empty_like(x)
for i in range(x.shape[0]):
x0, y0, z0 = x[i], x[i] + 1, x[i] + 2
output[i] = lorenz_ode(x0, y0, z0)[-1]
return output
from torch.profiler import profile, record_function
def profile_knotx(x):
with profile(activities=[torch.profiler.ProfilerActivity.CPU], record_shapes=True) as prof:
with record_function("knotx"):
result = knotx(x)
print(prof.key_averages().table(sort_by="self_cpu_time_total"))
return result
# def optimized_knotx(x: torch.Tensor, device: torch.device) -> torch.Tensor:
# x_flat = x.view(-1)
# x_flat = x_flat.to(device)
# knot_inv = vectorized_knot_invariant(x_flat.detach().cpu().numpy())
# knot_inv_tensor = torch.tensor(knot_inv, dtype=torch.float32, device=x.device)
# lorenz_output = 1 + fast_tanh(knot_inv_tensor**3).view_as(x_flat)
# return x * lorenz_output.view_as(x)
def optimized_knotx(x, device):
x0, y0, z0 = x, x + 1, x + 2
x, y, z = lorenz_ode(x0, y0, z0)
return z
import timeit
import psutil
import os
x = torch.randn(1000, device=device) # Create a random tensor of shape (1000,) for testing
def measure_time_and_memory(func, x, device, num_runs=100):
start_time = timeit.default_timer()
start_memory = psutil.Process(os.getpid()).memory_info().rss
for _ in range(num_runs):
result = func(x.clone(), device)
end_time = timeit.default_timer()
end_memory = psutil.Process(os.getpid()).memory_info().rss
time_elapsed = (end_time - start_time) / num_runs
memory_used = end_memory - start_memory
return time_elapsed, memory_used
initial_states = [convert_to_knot_representation(x) for x in [0.5, 1.0, 1.5]]
results = asyncio.run(parallel_lorenz_solver(initial_states))
print(results)
time_elapsed, memory_used = measure_time_and_memory(knotx, x, device)
print(f"Original function: Time elapsed = {time_elapsed:.6f} s, Memory used = {memory_used / 1024} KiB")
time_elapsed, memory_used = measure_time_and_memory(optimized_knotx, x, device)
print(f"Optimized function: Time elapsed = {time_elapsed:.6f} s, Memory used = {memory_used / 1024} KiB")
# Check if the optimized function produces the same output as the original function
x_test = torch.randn(1000, device=device)
original_output = knotx(x_test.clone(), device)
optimized_output = optimized_knotx(x_test.clone(), device)
assert torch.allclose(original_output, optimized_output), "The outputs of the original and optimized functions do not match"
# Profile the optimized function
profiled_output = profile_knotx(x_test.clone())
assert torch.allclose(original_output, profiled_output), "The outputs of the original and profiled functions do not match"
# Save the optimized function as a TorchScript module
optimized_knotx_script = torch.jit.script(optimized_knotx)
torch.jit.save(optimized_knotx_script, "optimized_knotx.pt")
# Load the saved TorchScript module and test it
loaded_optimized_knotx = torch.jit.load("optimized_knotx.pt")
loaded_output = loaded_optimized_knotx(x_test.clone(), device)
assert torch.allclose(original_output, loaded_output), "The outputs of the original and loaded functions do not match"
print("All tests passed!") | EXA-1-master | exa/modular_components/activations/KNOTX/clean.py |
import cmath
import math
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
def lorenz_system(t, state, sigma, rho, beta):
x, y, z = state
dx_dt = sigma * (y - x)
dy_dt = x * (rho - z) - y
dz_dt = x * y - beta * z
return [dx_dt, dy_dt, dz_dt]
def jones_polynomial_torus_knot(m, n):
t = sp.symbols('t')
numerator = t**((m-1) * (n-1)/2) * (1 - t ** (m + 1) - t**(n + 1) + t**(m+n))
denominator = 1 - t **2
return numerator / denominator
def convert_to_knot_representation(x):
# Convert x to a suitable knot representation, for example, a torus knot (m, n)
m = int(np.ceil(x))
n = m + 1
return (m, n)
# def knot_invariant(x):
# knot_representation = convert_to_knot_representation(x)
# return knot_representation
def knot_invariant(x):
knot_representation = convert_to_knot_representation(x)
m, n = knot_representation
return m * n
def dynamical_systems_modeling(knot_representation):
sigma, rho, beta = 10, 28, 8/3
initial_state = list(knot_representation) + [0] # Use knot_representation as the initial state
t_span = (0, 1)
sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True)
output_value = sol.sol(1)[0] # Get the x value at t=1
return output_value
# def knot_gelu(x):
# knot_inv = torch.tensor([knot_invariant(val.item()) for val in x], dtype=torch.float32, device=x.device)
# return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * knot_inv**3)))
# def knot_gelu(x):
# knot_inv_list = [knot_invariant(val.item()) for val in x.view(-1)]
# knot_inv = torch.tensor(knot_inv_list, dtype=torch.float32, device=x.device).view_as(x)
# return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * knot_inv**3)))
# def knot_gelu(x):
# knot_inv_list = [knot_invariant(val.item()) for val in x.view(-1)]
# knot_inv = torch.tensor(knot_inv_list, dtype=torch.float32, device=x.device).view(*x.shape)
# return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * knot_inv**3)))
# def knot_gelu(x):
# x_flat = x.view(-1)
# knot_inv_list = [knot_invariant(val.item()) for val in x_flat]
# knot_inv = torch.tensor(knot_inv_list, dtype=torch.float32, device=x.device).view_as(x_flat)
# return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * knot_inv**3))).view_as(x)
# def knot_gelu(x):
# x_flat = x.view(-1)
# knot_inv_list = [knot_invariant(val.item()) for val in x_flat]
# knot_inv = torch.tensor(knot_inv_list, dtype=torch.float32, device=x.device).view_as(x_flat)
# return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * knot_inv**3))).view_as(x)
#with gelu
# def knot_gelu(x):
# x_flat = x.view(-1)
# knot_inv_list = [knot_invariant(val.item()) for val in x_flat]
# knot_inv = torch.tensor(knot_inv_list, dtype=torch.float32, device=x.device).view_as(x_flat)
# return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * knot_inv.view_as(x)**3)))
#without gelu
def knot_gelu(x):
x_flat = x.view(-1)
knot_inv_list = [knot_invariant(val.item()) for val in x_flat]
knot_inv = torch.tensor(knot_inv_list, dtype=torch.float32, device=x.device).view_as(x_flat)
lorenz_output = 1 + torch.tanh(knot_inv.view_as(x)**3)
return x * lorenz_output
# # Custom Activation Layer
class CustomActivation(nn.Module):
def __init__(self, activation_type):
super(CustomActivation, self).__init__()
self.activation_type = activation_type
def forward(self, x):
if self.activation_type == 'gelu':
return 0.5 * x * (1 + torch.tanh(torch.sqrt(torch.tensor(2 / math.pi)) * (x + 0.044715 * torch.pow(x, 3))))
elif self.activation_type == 'knot_gelu':
return knot_gelu(x)
else:
raise ValueError("Invalid activation type")
# Simple Neural Network
class SimpleNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, activation_type='gelu'):
super(SimpleNN, self).__init__()
self.input_layer = nn.Linear(input_size, hidden_size)
self.activation = CustomActivation(activation_type)
self.output_layer = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.input_layer(x)
x = self.activation(x)
x = self.output_layer(x)
return x
# Define input_size, hidden_size, and output_size based on your data and problem
input_size = 10
hidden_size = 20
output_size = 2
# Initialize Simple Neural Networks with GELU and KnotGELU activations
nn_gelu = SimpleNN(input_size, hidden_size, output_size, activation_type='gelu')
nn_knot_gelu = SimpleNN(input_size, hidden_size, output_size, activation_type='knot_gelu')
# Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer_gelu = optim.SGD(nn_gelu.parameters(), lr=0.01)
optimizer_knot_gelu = optim.SGD(nn_knot_gelu.parameters(), lr=0.01)
# Train the networks and compare their performance on your dataset
# Generate synthetic dataset
num_samples = 1000
X = torch.randn(num_samples, input_size)
Y = torch.randint(0, output_size, (num_samples,))
# Split dataset into training and testing sets
train_ratio = 0.8
train_size = int(train_ratio * num_samples)
test_size = num_samples - train_size
X_train, X_test = torch.split(X, [train_size, test_size])
Y_train, Y_test = torch.split(Y, [train_size, test_size])
# Create DataLoaders
train_dataset = torch.utils.data.TensorDataset(X_train, Y_train)
test_dataset = torch.utils.data.TensorDataset(X_test, Y_test)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=True)
# Training loop
def train(network, dataloader, optimizer, criterion, device):
network.train()
running_loss = 0.0
correct = 0
total = 0
for inputs, targets in dataloader:
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = network(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
running_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
return running_loss / len(dataloader), correct / total
# Training settings
epochs = 10
device = 'cuda' if torch.cuda.is_available() else 'cpu'
nn_gelu.to(device)
nn_knot_gelu.to(device)
# Train and log progress
for epoch in range(epochs):
gelu_loss, gelu_acc = train(nn_gelu, train_loader, optimizer_gelu, criterion, device)
knot_gelu_loss, knot_gelu_acc = train(nn_knot_gelu, train_loader, optimizer_knot_gelu, criterion, device)
print(f'Epoch {epoch+1}/{epochs}:')
print(f' GELU | Loss: {gelu_loss:.4f} | Accuracy: {gelu_acc:.4f}')
print(f' KnotGELU| Loss: {knot_gelu_loss:.4f} | Accuracy: {knot_gelu_acc:.4f}') | EXA-1-master | exa/modular_components/activations/KNOTX/v5.py |
#who fucking knows if this code will work
import torch
import numpy as np
from scipy.integrate import solve_ivp
# Define the knot_invariant function
def knot_invariant(x):
# Convert the input value x into a knot representation
def knot_representation(x):
return x * 2
# Calculate the knot invariant using a specific knot invariant algorithm (e.g., Jones polynomial)
def jones_polynomial(knot_repr):
return knot_repr ** 2
knot_repr = knot_representation(x)
knot_inv = jones_polynomial(knot_repr)
return knot_inv
# Define the Lorenz system
def lorenz_system(t, state, sigma, rho, beta):
x, y, z = state
dx_dt = sigma * (y - x)
dy_dt = x * (rho - z) - y
dz_dt = x * y - beta * z
return [dx_dt, dy_dt, dz_dt]
# Define the dynamical_systems_modeling function
def dynamical_systems_modeling(x, model='lorenz', params=None):
if model == 'lorenz':
# Define the Lorenz system parameters
if params is None:
sigma, rho, beta = 10, 28, 8/3
else:
sigma, rho, beta = params
# Set initial state and time span
initial_state = [x, x, x]
t_span = (0, 1)
# Solve the Lorenz system
sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True)
# Calculate the output value based on the applied dynamical system model
output_value = sol.sol(1)[0] # Get the x value at t=1
return output_value
# Define the KnotGELU activation function
def knot_gelu(x):
knot_inv = knot_invariant(x.item())
dyn_sys_mod = dynamical_systems_modeling(knot_inv)
# Create the hyper-efficient version of GELU
knot_gelu_output = 0.5 * x * (1 + torch.tanh(torch.sqrt(2 / np.pi) * (x + 0.044715 * torch.pow(x, 3))) * dyn_sys_mod)
return knot_gelu_output
# Test the KnotGELU activation function with sample input
input_values = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float64)
output_values = knot_gelu(input_values)
print("Output values after applying KnotGELU activation function:", output_values) | EXA-1-master | exa/modular_components/activations/KNOTX/knotx.py |
import torch
import numpy as np
from scipy.integrate import solve_ivp
#define the knot invarient function
def knot_invariant(x):
#convert the input value x into a knot representation
def knot_representation(x):
return x * 2
#calculate the knot invariant using a specific knot invariant algorithm [jones polynomial]
def jones_polynomial(knot_repr):
return knot_repr ** 2
knot_repr = knot_representation(x)
knot_inv = jones_polynomial(knot_repr)
return knot_inv
#define the lorenx system
def lorenz_system(t, state, sigma, rho, beta):
x, y, z = state
dx_dt = sigma * (y - x)
dy_dt = x * (rho - z) - y
dz_dt = x * y - beta * z
return [dx_dt, dy_dt, dz_dt]
#define the dynamical systems modeling function
def dynamical_systems_modeling(x, model="lorenz", params=None):
if model == 'lorenz':
#define the lorenz systems parameters
if params is None:
sigma, rho, beta = 10, 28, 8/3
else:
sigma, rho, beta = params
#set initial state and time span
initial_state = [x, x, x]
t_span = (0, 1)
#solve lorenz system
sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True)
#calculate the output value based on the applied dynamcal system model
output_value = sol.sol(1)[0]
return output_value
#define the KNOTX activation function
def knotx(x):
knot_inv = knot_invariant(x)
dyn_sys_mod = dynamical_systems_modeling(knot_inv)
#create the hyper efficient version of gelu
knotx_output = 0.5 * x * (1 + torch.tanh(torch.sqrt(torch.tensor(2 / np.pi)) * (x + 0.044715 * torch.pow(x, 3))) * dyn_sys_mod)
return knotx_output
#test the knotx activation function
input_values = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float64)
output_values = torch.tensor([knotx(x) for x in input_values])
print(f"output values after applying knotx activation function {output_values}")
| EXA-1-master | exa/modular_components/activations/KNOTX/knotxv2.py |
# import torch
# import numpy as np
# from scipy.integrate import solve_ivp
# import torch.optim
# import sympy as sp
# import torch.nn as nn
# import torch.utils.data
# import concurrent.futures
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# def lorenz_system(t, state, sigma, rho, beta):
# x, y, z = state
# dx_dt = sigma * (y - x)
# dy_dt = x * (rho - z) - y
# dz_dt = x * y - beta * z
# return [dx_dt, dy_dt, dz_dt]
# def jones_polynomial_torus_knot(m, n):
# t = sp.symbols('t')
# numerator = t**((m-1) * (n-1)/2) * (1 - t ** (m + 1) - t**(n + 1) + t**(m+n))
# denominator = 1 - t **2
# return numerator / denominator
# def convert_to_knot_representation(x):
# # Convert x to a suitable knot representation, for example, a torus knot (m, n)
# m = int(np.ceil(x))
# n = m + 1
# return (m, n)
# def knot_invariant(x):
# knot_representation = convert_to_knot_representation(x)
# m, n = knot_representation
# return m * n
# def dynamical_systems_modeling(knot_representation):
# sigma, rho, beta = 10, 28, 8/3
# initial_state = list(knot_representation) + [0] # Use knot_representation as the initial state
# t_span = (0, 1)
# sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True)
# output_value = sol.sol(1)[0] # Get the x value at t=1
# return output_value
# def vectorized_knot_invariant(x):
# m = np.ceil(x)
# n = m + 1
# return m * n
# def fast_tanh(x):
# return x / (1 + np.abs(x))
# def parallel_lorenz_solver(initial_states):
# with concurrent.futures.ThreadPoolExecutor() as executor:
# results = list(executor.map(dynamical_systems_modeling, initial_states))
# return results
# #v1
# # def knotx(x):
# # x_flat = x.view(-1)
# # knot_inv = vectorized_knot_invariant(x_flat.numpy())
# # lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device).view_as(x_flat)
# # return x * lorenz_output.view_as(x)
# # error occurs because we're trying to call numpy() on a tensor that requires gradient computation. To fix this issue, we can use detach().numpy() instead of numpy().
# #v2
# def knotx(x):
# x_flat = x.view(-1)
# x_flat = x_flat.to(device) # Move the tensor to the GPU if available
# knot_inv = vectorized_knot_invariant(x_flat.detach().cpu().numpy())
# lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device).view_as(x_flat)
# return x * lorenz_output.view_as(x)
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import concurrent.futures
# Set the default device to use GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def lorenz_system(t, state, sigma, rho, beta):
x, y, z = state
dx_dt = sigma * (y - x)
dy_dt = x * (rho - z) - y
dz_dt = x * y - beta * z
return [dx_dt, dy_dt, dz_dt]
def jones_polynomial_torus_knot(m, n):
t = sp.symbols('t')
numerator = t**((m-1) * (n-1)/2) * (1 - t ** (m + 1) - t**(n + 1) + t**(m+n))
denominator = 1 - t **2
return numerator / denominator
def convert_to_knot_representation(x):
# Convert x to a suitable knot representation, for example, a torus knot (m, n)
m = int(np.ceil(x))
n = m + 1
return (m, n)
def knot_invariant(x):
knot_representation = convert_to_knot_representation(x)
m, n = knot_representation
return m * n
def dynamical_systems_modeling(knot_representation):
sigma, rho, beta = 10, 28, 8/3
initial_state = list(knot_representation) + [0] # Use knot_representation as the initial state
t_span = (0, 1)
sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True)
output_value = sol.sol(1)[0] # Get the x value at t=1
return output_value
def vectorized_knot_invariant(x):
m = np.ceil(x)
n = m + 1
return m * n
def fast_tanh(x):
return x / (1 + np.abs(x))
def parallel_lorenz_solver(initial_states):
with concurrent.futures.ThreadPoolExecutor() as executor:
results = list(executor.map(dynamical_systems_modeling, initial_states))
return results
def knotx(x):
x_flat = x.view(-1)
x_flat = x_flat.to(device) # Move the tensor to the GPU if available
knot_inv = vectorized_knot_invariant(x_flat.detach().cpu().numpy())
lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device).view_as(x_flat)
return x * lorenz_output.view_as(x) | EXA-1-master | exa/modular_components/activations/KNOTX/working.py |
import numpy as np
import torch
from scipy.integrate import solve_ivp
def convert_to_knot_representation(x):
m = int(np.ceil(x))
n = m + 1
return (m, n)
def lorenz_system(t, y, sigma=10, rho=28, beta=8/3):
x, y, z = y
dxdt = sigma * (y - x)
dydt = x * (rho - z) - y
dzdt = x * y - beta * z
return [dxdt, dydt, dzdt]
def lorenz_ode(x0, y0, z0, t_span=(0, 10), t_eval=None):
if t_eval is None:
t_eval = np.linspace(t_span[0], t_span[1], 1000)
sol = solve_ivp(
lorenz_system,
t_span,
(x0, y0, z0),
t_eval=t_eval,
method="RK45",
args=(10, 28, 8/3)
)
return sol.y[:, -1]
def knotx(x, device):
x_flat = x.view(-1)
x_flat = x_flat.to(device)
knot_representation = np.array([convert_to_knot_representation(val.item()) for val in x_flat])
lorenz_output = []
for m, n in knot_representation:
x0, y0, z0 = m, n, n + 1
lorenz_output.append(lorenz_ode(x0, y0, z0)[-1])
lorenz_output = torch.tensor(lorenz_output, dtype=torch.float32, device=x.device).view_as(x_flat)
return x * (1 + lorenz_output)
# def knotx(x, device):
# output = torch.empty_like(x)
# for i in range(x.shape[0]):
# x0, y0, z0 = x[i], x[i] + 1, x[i] + 2
# output[i] = lorenz_ode(x0, y0, z0)[-1]
# return output
# def test_knotx():
# device = torch.device('cuda' if torch.cuda.is_available() else "cpu")
# #create a random input tensor with the shape (batch size 1)
# batch_size = 5
# x = torch.rand(batch_size, 1) * 10
# #run the knotx function on the input token
# output = knotx(x, device)
# #check if the output tensor has the same shape as the input tensor
# assert output.shape == x.shape, f"output shape {output.shape} does not match input shape {x.shape}"
# #check if the output values are updated as expected
# for i in range(batch_size):
# x_val = x[i].item()
# expected_output = x_val * (1 + lorenz_ode(*convert_to_knot_representation(x_val), x_val + 1, x_val + 2)[-1])
# assert np.isclose(output[i].item(), expected_output, rtol=1e-5), f"Output value {output[i].item()} does not match expected value {expected.output}"
# print("knotx test passed")
# test_knotx()
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Generate initial conditions for the Lorenz system
x0, y0, z0 = 1, 1, 1
# Solve the Lorenz system and get the output trajectory
t_span = (0, 100)
sol = solve_ivp(lorenz_system, t_span, (x0, y0, z0), t_eval=np.linspace(*t_span, 10000))
x, y, z = sol.y
# Create a 3D plot of the trajectory
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(x, y, z)
plt.show()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#====================> knotx visulization
# Define the range of input values for x
x_range = np.linspace(-10, 10, 100)
# Generate the knotx output for each input value
knotx_output = knotx(torch.tensor(x_range, dtype=torch.float32, device=device), device)
# Use the knotx output to generate x, y, and z coordinates for the Lorenz system
x_coords = knotx_output.detach().cpu().numpy()
y_coords = knotx_output.detach().cpu().numpy() - 1
z_coords = knotx_output.detach().cpu().numpy() + 1
# Initialize a 3D plot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Plot the Lorenz system trajectory
ax.plot(x_coords, y_coords, z_coords)
# Set the axis labels
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Show the plot
plt.show() | EXA-1-master | exa/modular_components/activations/KNOTX/knotODE.py |
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import concurrent.futures
import asyncio
import torch.jit
# Set the default device to use GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def lorenz_system(t, state, sigma, rho, beta):
x, y, z = state
dx_dt = sigma * (y - x)
dy_dt = x * (rho - z) - y
dz_dt = x * y - beta * z
return [dx_dt, dy_dt, dz_dt]
def jones_polynomial_torus_knot(m, n):
t = sp.symbols('t')
numerator = t**((m-1) * (n-1)/2) * (1 - t ** (m + 1) - t**(n + 1) + t**(m+n))
denominator = 1 - t **2
return numerator / denominator
def convert_to_knot_representation(x):
# Convert x to a suitable knot representation, for example, a torus knot (m, n)
m = int(np.ceil(x))
n = m + 1
return (m, n)
def knot_invariant(x):
knot_representation = convert_to_knot_representation(x)
m, n = knot_representation
return m * n
# ===========================+>
#use a more efficient ode solver like the backward differentiation formula solver
# async def dynamical_systems_modeling(knot_representation):
# sigma, rho, beta = 10, 28, 8/3
# initial_state = list(knot_representation) + [0] # Use knot_representation as the initial state
# t_span = (0, 1)
# # sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True)
# sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True, method='BDF')
# output_value = sol.sol(1)[0] # Get the x value at t=1
# return output_value
def dynamical_systems_modeling(knot_representation):
sigma, rho, beta = 10, 28, 8/3
initial_state = list(knot_representation) + [0]
t_span = (0, 1)
sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True, method='BDF')
output_value = sol.sol(1)[0]
return output_value
def vectorized_knot_invariant(x):
m = np.ceil(x)
n = m + 1
return m * n
#torch built in vectorized function for faster computation
def fast_tanh(x):
return torch.tanh(x)
# ===========================+>
#batch processing of multiple inputs
def parallel_lorenz_solver_batch(inital_states, batch_size=10):
num_batches = len(initial_states) // batch_size
results = []
for i in range(num_batches):
batch = initial_states[i * batch_size: (i + 1) * batch_size]
batch_results = parallel_lorenz_solver(batch)
results.extend(batch_results)
return results
# ===========================+>
# async def parallel_lorenz_solver(initial_states):
# async with concurrent.futures.ThreadPoolExecutor() as executor:
# results = list(executor.map(dynamical_systems_modeling, initial_states))
# return results
#removed async
def parallel_lorenz_solver(initial_states):
loop = asyncio.get_event_loop()
with concurrent.futures.ThreadPoolExecutor() as executor:
tasks = [loop.run_in_executor(executor, dynamical_systems_modeling, state) for state in initial_states]
results = asyncio.gather(*tasks)
return results
#v1
# def knotx(x, device):
# x_flat = x.view(-1)
# x_flat = x_flat.to(device) # Move the tensor to the GPU if available
# knot_inv = vectorized_knot_invariant(x_flat.detach().cpu().numpy())
# lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device).view_as(x_flat)
# return x * lorenz_output.view_as(x)
#v2 --> converts a numpy array into a torch tensor
# def knotx(x, device):
# x_flat = x.view(-1)
# x_flat = x_flat.to(device) # Move the tensor to the GPU if available
# knot_inv = vectorized_knot_invariant(x_flat.detach().cpu().numpy())
# knot_inv_tensor = torch.tensor(knot_inv, dtype=torch.float32, device=x.device) # Convert the NumPy array to a tensor
# lorenz_output = 1 + fast_tanh(knot_inv_tensor**3).view_as(x_flat) # Use the tensor in fast_tanh
# return x * lorenz_output.view_as(x)
#v3 -> call lorenz ode solution
def knotx(x, device):
x_flat = x.view(-1)
x_flat = x_flat.to(device) # Move the tensor to the GPU if available
knot_representation = np.array([convert_to_knot_representation(val.item()) for val in x_flat])
lorenz_output = parallel_lorenz_solver(knot_representation)
lorenz_output = torch.tensor(lorenz_output, dtype=torch.float32, device=x.device).view_as(x_flat)
return x * (1 + lorenz_output)
from torch.profiler import profile, record_function
def profile_knotx(x):
with profile(activities=[torch.profiler.ProfilerActivity.CPU], record_shapes=True) as prof:
with record_function("knotx"):
result = knotx(x)
print(prof.key_averages().table(sort_by="self_cpu_time_total"))
return result
#v1
# def optimized_knotx(x):
# x_flat = x.view(-1).to(device)
# knot_inv = vectorized_knot_invariant(x_flat.detach().cpu().numpy())
# lorenz_output = torch.tesnor(fast_tanh(knot_inv **3), dtype=torch.float32, device=x.device).view_as(x_flat)
# #in place multiplication
# x.mul_(lorenz_output.view_as(x))
# return x
#v2
# def optimized_knotx(x):
# x_flat = x.view(-1)
# x_flat = x_flat.to(device) # Move the tensor to the GPU if available
# knot_inv = vectorized_knot_invariant(x_flat.detach().cpu().numpy())
# lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device).view_as(x_flat)
# return x * lorenz_output.view_as(x)
#v3
#optimized knotx for torch jit
# @torch.jit.script
# def optimized_knotx(x: torch.Tensor, device: torch.device) -> torch.Tensor:
# x_flat = x.view(-1)
# x_flat = x_flat.to(device) # Move the tensor to the GPU if available
# knot_inv = vectorized_knot_invariant(x_flat.detach().cpu().numpy())
# lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device).view_as(x_flat)
# return x * lorenz_output.view_as(x)
#v4
# def optimized_knotx(x, device):
# x_flat = x.view(-1)
# x_flat = x_flat.to(device) # Move the tensor to the GPU if available
# knot_inv = vectorized_knot_invariant(x_flat.detach().cpu().numpy())
# lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device).view_as(x_flat)
# return x * lorenz_output.view_as(x)
#v35--> transforming numpy array into torch tensor
def optimized_knotx(x: torch.Tensor, device: torch.device) -> torch.Tensor:
x_flat = x.view(-1)
x_flat = x_flat.to(device) # Move the tensor to the GPU if available
knot_inv = vectorized_knot_invariant(x_flat.detach().cpu().numpy())
knot_inv_tensor = torch.tensor(knot_inv, dtype=torch.float32, device=x.device) # Convert the NumPy array to a tensor
lorenz_output = 1 + fast_tanh(knot_inv_tensor**3).view_as(x_flat) # Use the tensor in fast_tanh
return x * lorenz_output.view_as(x)
initial_states = [convert_to_knot_representation(x) for x in [0.5, 1.0, 1.5]]
results = asyncio.run(parallel_lorenz_solver(initial_states))
print(results)
import timeit
import psutil
import os
x = torch.randn(1000, device=device) # Create a random tensor of shape (1000,) for testing
# Update the measure_time_and_memory function to pass the device as an argument
# Update the measure_time_and_memory function to pass the device as an argument
def measure_time_and_memory(func, x, device, num_runs=100):
start_time = timeit.default_timer()
start_memory = psutil.Process(os.getpid()).memory_info().rss
for _ in range(num_runs):
result = func(x.clone(), device) # Pass the device as an argument
end_time = timeit.default_timer()
end_memory = psutil.Process(os.getpid()).memory_info().rss
time_elapsed = (end_time - start_time) / num_runs
memory_used = end_memory - start_memory
return time_elapsed, memory_used
# ...
time_elapsed, memory_used = measure_time_and_memory(knotx, x, device)
print(f"Original function: Time elapsed = {time_elapsed:.6f} s, Memory used = {memory_used / 1024} KiB")
time_elapsed, memory_used = measure_time_and_memory(optimized_knotx, x, device)
print(f"Optimized function: Time elapsed = {time_elapsed:.6f} s, Memory used = {memory_used / 1024} KiB")
| EXA-1-master | exa/modular_components/activations/KNOTX/experimental_async.py |
#vectorized operations
#cache knot invariants
#faster approamiation
#paralleize the lorenz system solver
# use a lower order ode solver\
import math
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import concurrent.futures
# def vectorized_knot_invariant(x):
# m = np.ceil(x)
# n = m + 1
# return m * n
# def fast_tanh(x):
# return x / (1 + np.abs(x))
# def lorenz_system(t, state, sigma, rho, beta):
# x, y, z = state
# dx_dt = sigma * (y - x)
# dy_dt = x * (rho - z) - y
# dz_dt = x * y - beta * z
# return [dx_dt, dy_dt, dz_dt]
# def jones_polynomial_torus_knot(m, n):
# t = sp.symbols('t')
# numerator = t**((m-1) * (n-1)/2) * (1 - t ** (m + 1) - t**(n + 1) + t**(m+n))
# denominator = 1 - t ** 2
# return numerator /denominator
# def convert_to_knot_representation(x):
# #convert x to a suitable knot representation for example a torus knot(m, n)
# m = int(np.ceil(x))
# n = m + 1
# return (m, n)
# def knot_invariant(x):
# knot_representation = convert_to_knot_representation(x)
# m, n = knot_representation
# return m * n
# def parallel_lorenz_solver(initial_states):
# with concurrent.futures.ThreadPoolExecutor() as executor:
# results = list(executor.map(dynamical_systems_modeling, initial_state))
# def dynamical_systems_modeling(knot_representation):
# sigma, rho, beta = 10, 28, 8/3
# initial_state = list(knot_representation) + [0] # Use knot_representation as the initial state
# t_span = (0, 1)
# sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True)
# output_value = sol.sol(1)[0] # Get the x value at t=1
# return output_value
# def knotx(x):
# x_flat = x.view(-1)
# knot_inv = vectorized_knot_invariant(x_flat.numpy())
# lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device)
# return x * lorenz_output.view_as(x)
def lorenz_system(t, state, sigma, rho, beta):
x, y, z = state
dx_dt = sigma * (y - x)
dy_dt = x * (rho - z) - y
dz_dt = x * y - beta * z
return [dx_dt, dy_dt, dz_dt]
def jones_polynomial_torus_knot(m, n):
t = sp.symbols('t')
numerator = t**((m-1) * (n-1)/2) * (1 - t ** (m + 1) - t**(n + 1) + t**(m+n))
denominator = 1 - t **2
return numerator / denominator
def convert_to_knot_representation(x):
# Convert x to a suitable knot representation, for example, a torus knot (m, n)
m = int(np.ceil(x))
n = m + 1
return (m, n)
def knot_invariant(x):
knot_representation = convert_to_knot_representation(x)
m, n = knot_representation
return m * n
def dynamical_systems_modeling(knot_representation):
sigma, rho, beta = 10, 28, 8/3
initial_state = list(knot_representation) + [0] # Use knot_representation as the initial state
t_span = (0, 1)
sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True)
output_value = sol.sol(1)[0] # Get the x value at t=1
return output_value
def vectorized_knot_invariant(x):
m = np.ceil(x)
n = m + 1
return m * n
def fast_tanh(x):
return x / (1 + np.abs(x))
def parallel_lorenz_solver(initial_states):
with concurrent.futures.ThreadPoolExecutor() as executor:
results = list(executor.map(dynamical_systems_modeling, initial_states))
return results
#v1
# def knotx(x):
# x_flat = x.view(-1)
# knot_inv = vectorized_knot_invariant(x_flat.numpy())
# lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device).view_as(x_flat)
# return x * lorenz_output.view_as(x)
# error occurs because we're trying to call numpy() on a tensor that requires gradient computation. To fix this issue, we can use detach().numpy() instead of numpy().
#v2
def knotx(x):
x_flat = x.view(-1)
knot_inv = vectorized_knot_invariant(x_flat.detach().numpy())
lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device).view_as(x_flat)
return x * lorenz_output.view_as(x)
#v3
#simple test
# class SimpleNN(nn.Module):
# def __init__(self):
# super(SimpleNN, self).__init__()
# self.linear = nn.Linear(10, 10)
# self.activation = knotx
# def forward(self, x):
# x = self.linear(x)
# x = self.activation(x)
# return x
# # Create a random input tensor of size (batch_size, 10)
# input_tensor = torch.randn(5, 10)
# # Initialize the simple neural network
# model = SimpleNN()
# # Forward pass the input tensor through the model
# output_tensor = model(input_tensor)
# # Print the output tensor
# print("Output tensor:")
# print(output_tensor)
# # Custom Activation Layer
class CustomActivation(nn.Module):
def __init__(self, activation_type):
super(CustomActivation, self).__init__()
self.activation_type = activation_type
def forward(self, x):
if self.activation_type == 'gelu':
return 0.5 * x * (1 + torch.tanh(torch.sqrt(torch.tensor(2 / math.pi)) * (x + 0.044715 * torch.pow(x, 3))))
elif self.activation_type == 'knot_gelu':
return knotx(x)
else:
raise ValueError("Invalid activation type")
# Simple Neural Network
class SimpleNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, activation_type='gelu'):
super(SimpleNN, self).__init__()
self.input_layer = nn.Linear(input_size, hidden_size)
self.activation = CustomActivation(activation_type)
self.output_layer = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.input_layer(x)
x = self.activation(x)
x = self.output_layer(x)
return x
# Define input_size, hidden_size, and output_size based on your data and problem
input_size = 10
hidden_size = 20
output_size = 2
# Initialize Simple Neural Networks with GELU and KnotGELU activations
nn_gelu = SimpleNN(input_size, hidden_size, output_size, activation_type='gelu')
nn_knot_gelu = SimpleNN(input_size, hidden_size, output_size, activation_type='knot_gelu')
# Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer_gelu = optim.SGD(nn_gelu.parameters(), lr=0.01)
optimizer_knot_gelu = optim.SGD(nn_knot_gelu.parameters(), lr=0.01)
# Train the networks and compare their performance on your dataset
# Generate synthetic dataset
num_samples = 1000
X = torch.randn(num_samples, input_size)
Y = torch.randint(0, output_size, (num_samples,))
# Split dataset into training and testing sets
train_ratio = 0.8
train_size = int(train_ratio * num_samples)
test_size = num_samples - train_size
X_train, X_test = torch.split(X, [train_size, test_size])
Y_train, Y_test = torch.split(Y, [train_size, test_size])
# Create DataLoaders
train_dataset = torch.utils.data.TensorDataset(X_train, Y_train)
test_dataset = torch.utils.data.TensorDataset(X_test, Y_test)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=True)
# Training loop
def train(network, dataloader, optimizer, criterion, device):
network.train()
running_loss = 0.0
correct = 0
total = 0
for inputs, targets in dataloader:
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = network(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
running_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
return running_loss / len(dataloader), correct / total
# Training settings
epochs = 10
device = 'cuda' if torch.cuda.is_available() else 'cpu'
nn_gelu.to(device)
nn_knot_gelu.to(device)
# Train and log progress
for epoch in range(epochs):
gelu_loss, gelu_acc = train(nn_gelu, train_loader, optimizer_gelu, criterion, device)
knot_gelu_loss, knot_gelu_acc = train(nn_knot_gelu, train_loader, optimizer_knot_gelu, criterion, device)
print(f'Epoch {epoch+1}/{epochs}:')
print(f' GELU | Loss: {gelu_loss:.4f} | Accuracy: {gelu_acc:.4f}')
print(f' KnotGELU| Loss: {knot_gelu_loss:.4f} | Accuracy: {knot_gelu_acc:.4f}') | EXA-1-master | exa/modular_components/activations/KNOTX/v6.py |
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import concurrent.futures
import asyncio
# Set the default device to use GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def lorenz_system(t, state, sigma, rho, beta):
x, y, z = state
dx_dt = sigma * (y - x)
dy_dt = x * (rho - z) - y
dz_dt = x * y - beta * z
return [dx_dt, dy_dt, dz_dt]
def jones_polynomial_torus_knot(m, n):
t = sp.symbols('t')
numerator = t**((m-1) * (n-1)/2) * (1 - t ** (m + 1) - t**(n + 1) + t**(m+n))
denominator = 1 - t **2
return numerator / denominator
def convert_to_knot_representation(x):
# Convert x to a suitable knot representation, for example, a torus knot (m, n)
m = int(np.ceil(x))
n = m + 1
return (m, n)
def knot_invariant(x):
knot_representation = convert_to_knot_representation(x)
m, n = knot_representation
return m * n
async def dynamical_systems_modeling(knot_representation):
sigma, rho, beta = 10, 28, 8/3
initial_state = list(knot_representation) + [0] # Use knot_representation as the initial state
t_span = (0, 1)
sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True)
output_value = sol.sol(1)[0] # Get the x value at t=1
return output_value
def vectorized_knot_invariant(x):
m = np.ceil(x)
n = m + 1
return m * n
def fast_tanh(x):
return x / (1 + np.abs(x))
async def parallel_lorenz_solver(initial_states):
async with concurrent.futures.ThreadPoolExecutor() as executor:
results = list(executor.map(dynamical_systems_modeling, initial_states))
return results
def knotx(x):
x_flat = x.view(-1)
x_flat = x_flat.to(device) # Move the tensor to the GPU if available
knot_inv = vectorized_knot_invariant(x_flat.detach().cpu().numpy())
lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device).view_as(x_flat)
return x * lorenz_output.view_as(x)
from torch.profiler import profile, record_function
def profile_knotx(x):
with profile(activities=[torch.profiler.ProfilerActivity.CPU], record_shapes=True) as prof:
with record_function("knotx"):
result = knotx(x)
print(prof.key_averages().table(sort_by="self_cpu_time_total"))
return result
# def optimized_knotx(x):
# x_flat = x.view(-1).to(device)
# knot_inv = vectorized_knot_invariant(x_flat.detach().cpu().numpy())
# lorenz_output = torch.tesnor(fast_tanh(knot_inv **3), dtype=torch.float32, device=x.device).view_as(x_flat)
# #in place multiplication
# x.mul_(lorenz_output.view_as(x))
# return x
def optimized_knotx(x):
x_flat = x.view(-1)
x_flat = x_flat.to(device) # Move the tensor to the GPU if available
knot_inv = vectorized_knot_invariant(x_flat.detach().cpu().numpy())
lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device).view_as(x_flat)
return x * lorenz_output.view_as(x)
import timeit
import psutil
import os
def measure_time_and_memory(func, x, num_runs=100):
start_time = timeit.default_timer()
start_memory = psutil.Process(os.getpid()).memory_info().rss
for _ in range(num_runs):
result = func(x.clone()) # Use a clone of x to avoid modifying the original tensor
end_time = timeit.default_timer()
end_memory = psutil.Process(os.getpid()).memory_info().rss
time_elapsed = (end_time - start_time) / num_runs
memory_used = end_memory - start_memory
return time_elapsed, memory_used
x = torch.tensor([0.5, 1.0, 1.5], device=device)
# Test original function
time_elapsed, memory_used = measure_time_and_memory(knotx, x)
print(f"Original function: Time elapsed = {time_elapsed:.6f} s, Memory used = {memory_used / 1024} KiB")
# Test optimized function
time_elapsed, memory_used = measure_time_and_memory(optimized_knotx, x)
print(f"Optimized function: Time elapsed = {time_elapsed:.6f} s, Memory used = {memory_used / 1024} KiB") | EXA-1-master | exa/modular_components/activations/KNOTX/async.py |
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import concurrent.futures
def lorenz_system(t, state, sigma, rho, beta):
x, y, z = state
dx_dt = sigma * (y - x)
dy_dt = x * (rho - z) - y
dz_dt = x * y - beta * z
return [dx_dt, dy_dt, dz_dt]
def jones_polynomial_torus_knot(m, n):
t = sp.symbols('t')
numerator = t**((m-1) * (n-1)/2) * (1 - t ** (m + 1) - t**(n + 1) + t**(m+n))
denominator = 1 - t **2
return numerator / denominator
def convert_to_knot_representation(x):
# Convert x to a suitable knot representation, for example, a torus knot (m, n)
m = int(np.ceil(x))
n = m + 1
return (m, n)
def knot_invariant(x):
knot_representation = convert_to_knot_representation(x)
m, n = knot_representation
return m * n
def dynamical_systems_modeling(knot_representation):
sigma, rho, beta = 10, 28, 8/3
initial_state = list(knot_representation) + [0] # Use knot_representation as the initial state
t_span = (0, 1)
sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True)
output_value = sol.sol(1)[0] # Get the x value at t=1
return output_value
def vectorized_knot_invariant(x):
m = np.ceil(x)
n = m + 1
return m * n
def fast_tanh(x):
return x / (1 + np.abs(x))
def parallel_lorenz_solver(initial_states):
with concurrent.futures.ThreadPoolExecutor() as executor:
results = list(executor.map(dynamical_systems_modeling, initial_states))
return results
#v1
# def knotx(x):
# x_flat = x.view(-1)
# knot_inv = vectorized_knot_invariant(x_flat.numpy())
# lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device).view_as(x_flat)
# return x * lorenz_output.view_as(x)
# error occurs because we're trying to call numpy() on a tensor that requires gradient computation. To fix this issue, we can use detach().numpy() instead of numpy().
#v2
def knotx(x):
x_flat = x.view(-1)
knot_inv = vectorized_knot_invariant(x_flat.detach().numpy())
lorenz_output = 1 + torch.tensor(fast_tanh(knot_inv**3), dtype=torch.float32, device=x.device).view_as(x_flat)
return x * lorenz_output.view_as(x)
| EXA-1-master | exa/modular_components/activations/KNOTX/main.py |
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
def jones_polynomial_torus_knot(m, n):
t = sp.symbols('t')
numerator = t**((m-1) * (n-1)/2) * (1 - t ** (m + 1) - t**(n + 1) + t**(m+n))
denominator = 1 - t **2
return numerator / denominator
def knot_invariant(x):
#convert the input value into a knot representation (m, n ) for a torus knot
m, n = int(x), int(x + 1)
#calculate the jones polynomial for the torus knot
jones_poly = jones_polynomial_torus_knot(m, n)
#eval the jones polynomial at a specific point
knot_inv = jones_poly.subs('t', 2)
return float(knot_inv)
def lorenz_system(t, state, sigma, rho, beta):
x, y, z = state
dx_dt = sigma * (y - x)
dy_dt = x * (rho - z) - y
dz_dt = x * y - beta * z
return [dx_dt, dy_dt, dz_dt]
def convert_to_knot_representation(x):
# Convert x to a suitable knot representation, for example, a torus knot (m, n)
m = int(np.ceil(x))
n = m + 1
return (m, n)
def knot_invariant(x):
knot_representation = convert_to_knot_representation(x)
return knot_representation
def dynamical_systems_modeling(knot_representation):
sigma, rho, beta = 10, 28, 8/3
initial_state = list(knot_representation) + [0] # Use knot_representation as the initial state
t_span = (0, 1)
sol = solve_ivp(lorenz_system, t_span, initial_state, args=(sigma, rho, beta), dense_output=True)
output_value = sol.sol(1)[0] # Get the x value at t=1
return output_value
def knot_gelu(x):
knot_inv = knot_invariant(x.item())
dyn_sys_mod = dynamical_systems_modeling(knot_inv)
knot_gelu_output = 0.5 * x * (1 + torch.tanh(torch.sqrt(torch.tensor(2 / np.pi)) * (x + 0.044715 * torch.pow(x, 3))) * dyn_sys_mod)
return knot_gelu_output
#input_values = torch.tensor([-1.0, 0.0, 1.0])
#output_values = torch.tensor([knot_gelu(x) for x in input_values])
#print("Output values after applying KnotGELU activation function:", output_values)
# Simple Neural Network
class SimpleNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, activation_type='gelu'):
super(SimpleNN, self).__init__()
self.input_layer = nn.Linear(input_size, hidden_size)
self.activation = knot_gelu(activation_type)
self.output_layer = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.input_layer(x)
x = self.activation(x)
x = self.output_layer(x)
return x
# Define input_size, hidden_size, and output_size based on your data and problem
input_size = 10
hidden_size = 20
output_size = 2
# Initialize Simple Neural Networks with GELU and KnotGELU activations
nn_gelu = SimpleNN(input_size, hidden_size, output_size, activation_type='gelu')
nn_knot_gelu = SimpleNN(input_size, hidden_size, output_size, activation_type='knot_gelu')
# Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer_gelu = optim.SGD(nn_gelu.parameters(), lr=0.01)
optimizer_knot_gelu = optim.SGD(nn_knot_gelu.parameters(), lr=0.01)
# Train the networks and compare their performance on your dataset | EXA-1-master | exa/modular_components/activations/KNOTX/v3.py |
import sympy as sp
def jones_polynomial_torus_knot(m, n):
t = sp.symbols('t')
numerator = t**((m-1) * (n-1)/2) * (1 - t ** (m + 1) - t**(n + 1) + t**(m+n))
denominator = 1 - t **2
return numerator / denominator
def knot_invariant(x):
#convert the input value into a knot representation (m, n ) for a torus knot
m, n = int(x), int(x + 1)
#calculate the jones polynomial for the torus knot
jones_poly = jones_polynomial_torus_knot(m, n)
#eval the jones polynomial at a specific point
knot_inv = jones_poly.subs('t', 2)
return float(knot_inv)
#test the knot invariant function with sample input
input_value = 1.0
knot_inv_value = knot_invariant(input_value)
print(f"Knot invariant value after applying knot invariant function {knot_inv_value}") | EXA-1-master | exa/modular_components/activations/KNOTX/knot/jones.py |
import jax
import jax.numpy as jnp
@jit
def lorenz(sigma, beta, rho, X, t):
x, y, z = X
xdot = sigma * (y - x)
ydot = X * (rho - z) - y
zdot = x * y - beta * z
return jnp.array([xdot, ydot, zdot])
#since th params are fixed we use a partial to create aa new function that does not ask them
g = partial(lorenz, 10,8 / 3, 28)
g = jit(g)
#inital condition
x_0 = jnp.ones(3)
#time intervals
t_vals = jnp.linspace(0., 450., 45000)
#integrate the function to get the data
sol = odeint(g, x_0, t_vals)
X = sol[500:] | EXA-1-master | exa/modular_components/activations/KNOTX/visualizations/lorenz.py |
import numpy as np
import torch
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.integrate import solve_ivp
def convert_to_knot_representation(x):
m = int(np.ceil(x))
n = m + 1
return (m, n)
def knotx(x, device):
x_flat = x.view(-1)
x_flat = x_flat.to(device)
knot_representation = np.array([convert_to_knot_representation(val.item()) for val in x_flat])
lorenz_output = []
for m, n in knot_representation:
x0, y0, z0 = m, n, n + 1
lorenz_output.append(lorenz_ode(x0, y0, z0)[-1])
lorenz_output = torch.tensor(lorenz_output, dtype=torch.float32, device=x.device).view_as(x_flat)
return x * (1 + lorenz_output)
def visualize_knot_representation(x):
device = torch.device('cpu')
knot_reps = [convert_to_knot_representation(val) for val in x]
# Calculate Lorenz system output for each knot representation
lorenz_output = []
for m, n in knot_reps:
x0, y0, z0 = m, n, n + 1
lorenz_output.append(lorenz_ode(x0, y0, z0)[-1])
x_coords = x.cpu().numpy()
y_coords = x.cpu().numpy() - 1
z_coords = x.cpu().numpy() + 1
# Shift coordinates by Lorenz system output
y_coords -= np.array(lorenz_output)
z_coords += np.array(lorenz_output)
# Plot the knot representation
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(x_coords, y_coords, z_coords)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
def lorenz_system(t, y, sigma=10, rho=28, beta=8/3):
x, y, z = y
dxdt = sigma * (y - x)
dydt = x * (rho - z) - y
dzdt = x * y - beta * z
return [dxdt, dydt, dzdt]
def lorenz_ode(x0, y0, z0, t_span=(0, 10), t_eval=None):
if t_eval is None:
t_eval = np.linspace(t_span[0], t_span[1], 1000)
sol = solve_ivp(
lorenz_system,
t_span,
(x0, y0, z0),
t_eval=t_eval,
method="RK45",
args=(10, 28, 8/3)
)
return sol.y[:, -1]
# Example usage
device = torch.device('cpu')
x = torch.tensor([[-10, -5, 0, 5], [1, 2, 3, 4], [5, 6, 7, 8]], dtype=torch.float32, device=device)
knotx_output = knotx(x, device)
visualize_knot_representation(knotx_output) | EXA-1-master | exa/modular_components/activations/KNOTX/visualizations/knot.py |
import torch.multiprocessing as mp
import time
import copy
class InfinityWarp:
def __init__(self, model, train_data, train_labels, infer_data, train_fn, infer_fn):
self.model = model
self.train_data = train_data
self.train_labels = train_labels
self.infer_data = infer_data
self.train_fn = train_fn
self.infer_fn = infer_fn
def train_model(self):
self.train_fn(self.model, self.train_data, self.train_labels)
def perform_inference(self):
while True:
#perform a deep copy of the model parameters to avoid any conflict
model_copy = copy.deepcopy(self.model)
preds = self.infer_fn(model_copy, self.infer_data)
print(f"Inference result: {preds}")
time.sleep(0.5)
def start(self):
train_process = mp.Processor(target=self.train_model)
infer_process = mp.Processor(target=self.perform_inference)
train_process.start()
infer_process.start()
train_process.join()
infer_process.join()
| EXA-1-master | exa/modular_components/infinityWarp/InfinityWarp.py |
import torch.multiprocessing as mp
import time
import copy
class InfinityWarp:
def __init__(self, model, train_data, train_labels, infer_data):
self.model = model
self.train_data = train_data
self.train_labels = train_labels
self.infer_data = infer_data
def train_model(self):
for data, labels in zip(self.train_data, self.train_labels):
self.model.train_step(data, labels)
time.sleep(0.1)
def perform_inference(self):
while True:
#perform a deep copy of the models params to avoid any conflict
model_copy = copy.deepcopy(self.model)
preds = model_copy.infer(self.infer_data)
print(f"Inference result: {preds}")
time.sleep(0.5)
def start(self):
train_process = mp.Process(target=self.train_model)
infer_process = mp.Processor(target=self.perform_inference)
train_process.start()
infer_process.start()
train_process.join()
infer_process.join()
| EXA-1-master | exa/modular_components/infinityWarp/experimental/InfinityWarp3.py |
import multiprocessing
import time
import copy
import torch
class ConcurrentTrainInference:
def __init__(self, model, train_data, train_labels, infer_data):
self.model = model
self.train_data = train_data
self.train_labels = train_labels
self.infer_data = infer_data
def train_model(self, lock, model_params):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model.to(device)
for data, labels in zip(self.train_data, self.train_labels):
with lock:
self.model_train_step(data, labels)
torch.save(self.model.state_dict(), model_params)
time.sleep(0.1) # simulate training time
def perform_inference(self, lock, model_params):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model.to(device)
while True:
with lock:
model_copy = copy.deepcopy(self.model)
model_copy.load_state_dict(torch.load(model_params))
preds = model_copy.infer(self.infer_data)
print(f"Inference results {preds}")
time.sleep(0.5)
def start(self):
manager = multiprocessing.Manager()
lock = manager.Lock()
model_params = manager.Value(str, "")
train_process = multiprocessing.Process(target=self.train_model, args=(lock, model_params))
infer_process = multiprocessing.Process(target=self.perform_inference, args=(lock, model_params))
train_process.start()
infer_process.start()
train_process.join()
infer_process.terminate()
| EXA-1-master | exa/modular_components/infinityWarp/experimental/InfinityWarp2.py |
import threading
import time
import copy
class ConcurrentTrainInference:
def __init__(self, model, train_data, train_labels, infer_data):
self.model = model
self.train_data = train_data
self.train_labels = train_labels
self.infer_data = infer_data
def train_model(self):
for data, labels in zip(self.train_data, self.train_labels):
self.model.train_step(data, labels)
time.sleep(0.1) # sumulate training time
def perform_inference(self):
while True:
#perform a deep copy of the models params to avoid any conflicts
model_copy = copy.deepcopy(self.model)
preds = model_copy.infer(self.infer_data)
print(f"Inference results: {preds}")
time.sleep(0.5)
def start(self):
train_thread = threading.Thread(target=self.train_model)
infer_thread = threading.Thread(target=self.perform_inference)
train_thread.start()
infer_thread.start()
train_thread.join()
infer_thread.join()
# to use
# # Define your neural network model here
# class MyNeuralNet:
# # ... (as before)
# # Initialize model and training/inference data
# model = MyNeuralNet()
# train_data, train_labels = load_training_data() # Implement data loading function
# infer_data = load_inference_data() # Implement data loading function
# # Create and start the concurrent training and inference component
# concurrent_component = ConcurrentTrainInference(model, train_data, train_labels, infer_data)
# concurrent_component.start()
| EXA-1-master | exa/modular_components/infinityWarp/experimental/infinitywarp.py |
# import torch.multiprocessing as mp
# import time
# import copy
# import
# # Modify the InfinityWarp class
# class InfinityWarp:
# def __init__(self, accelerator, model, train_data, train_labels, infer_data, train_fn, infer_fn):
# self.accelerator = accelerator
# self.model = model
# self.train_data = train_data
# self.train_labels = train_labels
# self.infer_data = infer_data
# self.train_fn = train_fn
# self.infer_fn = infer_fn
# def train_model(self):
# # Wrap the training function with the accelerator
# self.train_fn(self.accelerator, self.model, self.train_data, self.train_labels)
# def perform_inference(self):
# # Wrap the inference function with the accelerator
# while True:
# preds = self.infer_fn(self.accelerator, self.model, self.infer_data)
# print(f"Inference result: {preds}")
# time.sleep(0.5)
# def start(self):
# # Use accelerator.launch() to start the train_model and perform_inference functions
# self.accelerator.launch(self.train_model)
# self.accelerator.launch(self.perform_inference)
# # In the train function:
# def train(args):
# # Instantiate the accelerator (Hugging Face Accelerate or PyTorch Distributed)
# accelerator = ...
# # Modify the custom_train_fn and custom_infer_fn to use the accelerator
# def custom_train_fn(accelerator, model, train_data, train_labels):
# # Your training logic using 'train_dataloader' in place of 'train_data', wrapped with the accelerator
# def custom_infer_fn(accelerator, model, infer_data):
# # Your custom inference logic, wrapped with the accelerator
# # Instantiate the InfinityWarp class
# iw = InfinityWarp(accelerator, model, train_dataloader, None, infer_data, custom_train_fn, custom_infer_fn)
# # Start the training and inference processes
# iw.start()
| EXA-1-master | exa/modular_components/infinityWarp/distributed/InfinityWarp.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tokens is None:
return self.vision_embed(visual_tokens)
if visual_tokens is None:
return self.text_embed(textual_tokens)
x1 = self.vision_embed(visual_tokens)
x2 = self.text_embed(textual_tokens)
return torch.cat([x1, x2], dim=1)
class VisionEmbedding(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
contain_mask_token=False,
prepend_cls_token=False,
):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if contain_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.mask_token = None
if prepend_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
def num_position_embeddings(self):
if self.cls_token is None:
return self.num_patches
else:
return self.num_patches + 1
def forward(self, x, masked_position=None, **kwargs):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
batch_size, seq_len, _ = x.size()
if masked_position is not None:
assert self.mask_token is not None
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
w = masked_position.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
return x
class TextEmbedding(nn.Embedding):
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5)
self._fill_padding_idx_with_zero()
class PositionalEmbedding(nn.Embedding):
def forward(
self,
x,
positions=None,
**kwargs,
):
if positions is None:
# being consistent with Fairseq, which starts from 2.
positions = (
torch.arange(2, x.size(1) + 2, device=x.device).long().unsqueeze(0)
)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
class AudioEmbedding(nn.Module):
def __init__(self, in_channels, embed_dim):
super().__init__()
self.conv = nn.Conv2d(in_channels, embed_dim, kernel_size=1)
def forward(self, x, **kwargs):
return self.conv(x)
class OmniModalityEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed, audio_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
self.audio_embed = audio_embed
def forward(self, input_data, modality_type, **kwargs):
if modality_type == "text":
return self.text_embed(input_data, **kwargs)
elif modality_type == "vision":
return self.vision_embed(input_data, **kwargs)
elif modality_type == "audio":
return self.audio_embed(input_data, **kwargs)
else:
raise ValueError(f"Unsupported modality type {modality_type}")
#instantiate the embedding module
text_embed = TextEmbedding(num_embeddings=10000, embedding_dim=768)
vision_embed = VisionEmbedding(img_size=224, patch_size=16, in_chans=3, embed_dim=768)
audio_embed = AudioEmbedding(in_channels=128, embed_dim=768)
#create the omnimodality embedding instance
OmniMorph = OmniModalityEmbedding(text_embed, vision_embed, audio_embed)
#example usage for different modalities
text_input = torch.randint(0, 10000, (1, 50))
vision_input = torch.randn(1, 3, 224, 224)
audio_input = torch.randn(1, 128, 100)
text_embedding = OmniMorph(text_input, "text")
vision_embedding = OmniMorph(vision_input, 'vision')
audio_embedding = OmniMorph(audio_input, 'audio')
| EXA-1-master | exa/modular_components/embedding/OmniMorph/OMNI.py |
import torch
import torch.nn as nn
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tokens is None:
return self.vision_embed(visual_tokens)
if visual_tokens is None:
return self.text_embed(textual_tokens)
x1 = self.vision_embed(visual_tokens)
x2 = self.text_embed(textual_tokens)
return torch.cat([x1, x2], dim=1)
class VisionEmbedding(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
contain_mask_token=False,
prepend_cls_token=False,
):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if contain_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.mask_token = None
if prepend_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
def num_position_embeddings(self):
if self.cls_token is None:
return self.num_patches
else:
return self.num_patches + 1
def forward(self, x, masked_position=None, **kwargs):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
batch_size, seq_len, _ = x.size()
if masked_position is not None:
assert self.mask_token is not None
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
w = masked_position.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
return x
class TextEmbedding(nn.Embedding):
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5)
self._fill_padding_idx_with_zero()
class AudioEmbedding(nn.Module):
def __init__(self, in_channels, embed_dim):
super().__init__()
self.conv = nn.Conv2d(in_channels, embed_dim, kernel_size=1)
def forward(self, x, **kwargs):
return self.conv(x)
class OmniMorph(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self._embedding_registry = {}
self._embedding_instances = {}
self._fusion_techniques = {}
# preregister and instantiate the embedding functions
# Pre-register and instantiate the embedding functions
# Pre-register and instantiate the embedding functions
self.register_and_instantiate('text', TextEmbedding, num_embeddings=10000, embedding_dim=768)
self.register_and_instantiate('vision', VisionEmbedding, img_size=224, patch_size=16, in_chans=3, embed_dim=768)
self.register_and_instantiate('audio', AudioEmbedding, in_channels=128, embed_dim=768)
# self.register_and_instantiate('video', VideoEmbedding, num_channels=3, time_dim=10, height=224, width=224, embed_dim=768)
# Instantiate VisionLanguageEmbedding with visionembeddings and textembeddings instances
vision_embed_instance = self._embedding_instances.get('vision')
text_embed_instance = self._embedding_instances.get('text')
self.vision_language_embedding = VisionLanguageEmbedding(text_embed_instance, vision_embed_instance)
def register_and_instantiate(self, modality_type, embedding_class, **kwargs):
self.register_embedding(modality_type, embedding_class)
self.instantiate_embedding(modality_type, **kwargs)
def register_embedding(self, modality_type, embedding_class):
self._embedding_registry[modality_type] = embedding_class
def instantiate_embedding(self, modality_type, embedding_class=None, *args, **kwargs):
if embedding_class is None:
embedding_class = self._embedding_registry.get(modality_type)
if embedding_class is not None:
self._embedding_instances[modality_type] = embedding_class(*args, **kwargs)
else:
raise ValueError(f"Unsupported modality type: {modality_type}")
def forward(self, input_data, modality_type=None, fusion_technique=None, file_extension=None, user_defined_modality=None, custom_modality_fn=None, **kwargs):
if modality_type is None:
modality_type = self.detect_modality(input_data, file_extension=file_extension, user_defined_modality=user_defined_modality, custom_modality_fn=custom_modality_fn)
print(modality_type)
embedding_instance = self._embedding_instances.get(modality_type)
if embedding_instance is not None:
embedding = embedding_instance(input_data, **kwargs)
print(embedding)
if fusion_technique:
fusion_fn = self._fusion_techniques.get(fusion_technique)
if fusion_fn:
embedding = fusion_fn(embedding)
print(embedding)
else:
raise ValueError(f"Unsupported fusion technique: {fusion_technique}")
return embedding
else:
raise ValueError(f"Embedding for modality type {modality_type} not instantiated")
def detect_modality(self, input_data, file_extension=None, user_defined_modality=None, custom_modality_fn=None):
if user_defined_modality:
return user_defined_modality
if custom_modality_fn:
return custom_modality_fn(input_data)
if file_extension:
extension_mapping = {
'.txt': 'text', '.json': 'text',
'.jpg': 'vision', '.png': 'vision',
'.mp3': 'audio', '.wav': 'audio',
'.mp4': 'video', '.avi': 'video',
}
return extension_mapping.get(file_extension.lower())
# Existing modality detection logic
if len(input_data.shape) == 2 and input_data.dtype == torch.int64:
return 'text'
elif len(input_data.shape) == 4:
return 'vision'
elif len(input_data.shape) == 3:
return 'audio'
elif len(input_data.shape) == 5:
return 'video'
else:
raise ValueError("Unable to detect input data modality")
def register_fusion_technique(self, technique_name, fusion_fn):
self._fusion_techniques[technique_name] = fusion_fn
omni_morph = OmniMorph()
text_input = torch.randint(0, 10000, (1, 50))
# vision_input = torch.randn(1, 3, 224, 224)
# audio_input = torch.randn(1, 128, 100)
# audio_input = audio_input.unsqueeze(1) # Add a new dimension for channels
text_embedding = omni_morph(text_input, user_defined_modality='text') # modality_type is automatically detected
# vision_embedding = omni_morph(vision_input) # modality_type is automatically detected
# audio_embedding = omni_morph(audio_input) # modality_type is automatically detected
| EXA-1-master | exa/modular_components/embedding/OmniMorph/OMNI5.py |
import torch
import torch.nn as nn
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tokens is None:
return self.vision_embed(visual_tokens)
if visual_tokens is None:
return self.text_embed(textual_tokens)
x1 = self.vision_embed(visual_tokens)
x2 = self.text_embed(textual_tokens)
return torch.cat([x1, x2], dim=1)
class VisionEmbedding(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
contain_mask_token=False,
prepend_cls_token=False,
):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if contain_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.mask_token = None
if prepend_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
def num_position_embeddings(self):
if self.cls_token is None:
return self.num_patches
else:
return self.num_patches + 1
def forward(self, x, masked_position=None, **kwargs):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
batch_size, seq_len, _ = x.size()
if masked_position is not None:
assert self.mask_token is not None
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
w = masked_position.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
return x
class TextEmbedding(nn.Embedding):
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5)
self._fill_padding_idx_with_zero()
class AudioEmbedding(nn.Module):
def __init__(self, in_channels, embed_dim):
super().__init__()
self.conv = nn.Conv2d(in_channels, embed_dim, kernel_size=1)
def forward(self, x, **kwargs):
return self.conv(x)
class OmniMorph(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self._embedding_registry = {}
self._embedding_instances = {}
self._fusion_techniques = {}
# preregister and instantiate the embedding functions
# Pre-register and instantiate the embedding functions
# Pre-register and instantiate the embedding functions
self.register_and_instantiate('text', TextEmbedding, num_embeddings=10000, embedding_dim=768)
self.register_and_instantiate('vision', VisionEmbedding, img_size=224, patch_size=16, in_chans=3, embed_dim=768)
self.register_and_instantiate('audio', AudioEmbedding, in_channels=128, embed_dim=768)
# self.register_and_instantiate('video', VideoEmbedding, num_channels=3, time_dim=10, height=224, width=224, embed_dim=768)
# Instantiate VisionLanguageEmbedding with visionembeddings and textembeddings instances
vision_embed_instance = self._embedding_instances.get('vision')
text_embed_instance = self._embedding_instances.get('text')
self.vision_language_embedding = VisionLanguageEmbedding(text_embed_instance, vision_embed_instance)
def register_and_instantiate(self, modality_type, embedding_class, **kwargs):
self.register_embedding(modality_type, embedding_class)
self.instantiate_embedding(modality_type, **kwargs)
def register_embedding(self, modality_type, embedding_class):
self._embedding_registry[modality_type] = embedding_class
def instantiate_embedding(self, modality_type, embedding_class=None, *args, **kwargs):
if embedding_class is None:
embedding_class = self._embedding_registry.get(modality_type)
if embedding_class is not None:
self._embedding_instances[modality_type] = embedding_class(*args, **kwargs)
else:
raise ValueError(f"Unsupported modality type: {modality_type}")
def forward(self, input_data, modality_type=None, fusion_technique=None, **kwargs):
if modality_type is None:
modality_type = self.detect_modality(input_data)
print(modality_type)
embedding_instance = self._embedding_instances.get(modality_type)
if embedding_instance is not None:
embedding = embedding_instance(input_data, **kwargs)
print(embedding)
if fusion_technique:
fusion_fn = self._fusion_techniques.get(fusion_technique)
if fusion_fn:
embedding = fusion_fn(embedding)
print(embedding)
else:
raise ValueError(f"Unsupported fusion technique: {fusion_technique}")
return embedding
else:
raise ValueError(f"Embedding for modality type {modality_type} not instantiated")
def detect_modality(self, input_data):
if len(input_data.shape) == 2 and input_data.dtype == torch.int64:
return 'text'
elif len(input_data.shape) == 4:
return 'vision'
elif len(input_data.shape) == 3:
return 'audio'
elif len(input_data.shape) == 5:
return 'video'
else:
raise ValueError("Unable to detect input data modality")
def register_fusion_technique(self, technique_name, fusion_fn):
self._fusion_techniques[technique_name] = fusion_fn
omni_morph = OmniMorph()
text_input = torch.randint(0, 10000, (1, 50))
# vision_input = torch.randn(1, 3, 224, 224)
# audio_input = torch.randn(1, 128, 100)
# audio_input = audio_input.unsqueeze(1) # Add a new dimension for channels
text_embedding = omni_morph(text_input) # modality_type is automatically detected
# vision_embedding = omni_morph(vision_input) # modality_type is automatically detected
# audio_embedding = omni_morph(audio_input) # modality_type is automatically detected
| EXA-1-master | exa/modular_components/embedding/OmniMorph/OMNI4.py |
import torch
import torch.nn as nn
class OmniMorph(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self._embedding_registry = {}
self._embedding_instances = {}
def register_embedding(self, modality_type, embedding_class):
self._embedding_registry[modality_type] = embedding_class
def instantiate_embedding(self, modality_type, embedding_class=None, *args, **kwargs):
if embedding_class is None:
embedding_class = self._embedding_registry.get(modality_type)
if embedding_class is not None:
self._embedding_instances[modality_type] = embedding_class(*args, **kwargs)
else:
raise ValueError(f"Unsupported modality type: {modality_type}")
def forward(self, input_data, modality_type=None, **kwargs):
if modality_type is None:
modality_type = self.detect_modality(input_data)
embedding_instance = self._embedding_instances.get(modality_type)
if embedding_instance is not None:
return embedding_instance(input_data, **kwargs)
else:
raise ValueError(f"Embedding for modality type {modality_type} not instantiated")\
def detect_modality(self, input_data):
if len(input_data.shape) == 2 and input_data.dtype == torch.int64:
return 'text'
elif len(input_data.shape) == 4:
return 'vision'
elif len(input_data.shape) == 3:
return 'audio'
elif len(input_data.shape) == 5:
return 'video'
else:
raise ValueError("Unable to detect input data modality")
omni_morph = OmniMorph()
# Register and instantiate embeddings
omni_morph.register_embedding('text', TextEmbedding)
omni_morph.instantiate_embedding('text', num_embeddings=10000, embedding_dim=768)
omni_morph.register_embedding('vision', VisionEmbedding)
omni_morph.instantiate_embedding('vision', img_size=224, patch_size=16, in_chans=3, embed_dim=768)
omni_morph.register_embedding('audio', AudioEmbedding)
omni_morph.instantiate_embedding('audio', in_channels=128, embed_dim=768)
omni_morph.register_embedding('video', VideoEmbedding)
omni_morph.instantiate_embedding('video', num_channels=3, time_dim=10, height=224, width=224, embed_dim=768)
# Example usage for different modalities
text_input = torch.randint(0, 10000, (1, 50))
vision_input = torch.randn(1, 3, 224, 224)
audio_input = torch.randn(1, 128, 100)
video_input = torch.randn(1, 3, 10, 224, 224)
text_embedding = omni_morph(text_input) # modality_type is automatically detected
vision_embedding = omni_morph(vision_input) # modality_type is automatically detected
audio_embedding = omni_morph(audio_input) # modality_type is automatically detected
video_embedding = omni_morph(video_input) # modality_type is automatically detected
| EXA-1-master | exa/modular_components/embedding/OmniMorph/OMNI3.py |
import torch
import torch.nn as nn
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tokens is None:
return self.vision_embed(visual_tokens)
if visual_tokens is None:
return self.text_embed(textual_tokens)
x1 = self.vision_embed(visual_tokens)
x2 = self.text_embed(textual_tokens)
return torch.cat([x1, x2], dim=1)
class VisionEmbedding(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
contain_mask_token=False,
prepend_cls_token=False,
):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if contain_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.mask_token = None
if prepend_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
def num_position_embeddings(self):
if self.cls_token is None:
return self.num_patches
else:
return self.num_patches + 1
def forward(self, x, masked_position=None, **kwargs):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
batch_size, seq_len, _ = x.size()
if masked_position is not None:
assert self.mask_token is not None
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
w = masked_position.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
return x
class TextEmbedding(nn.Embedding):
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5)
self._fill_padding_idx_with_zero()
class PositionalEmbedding(nn.Embedding):
def forward(
self,
x,
positions=None,
**kwargs,
):
if positions is None:
# being consistent with Fairseq, which starts from 2.
positions = (
torch.arange(2, x.size(1) + 2, device=x.device).long().unsqueeze(0)
)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
class OmniMorph(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self._embedding_registry = {}
self._embedding_instances = {}
def register_embedding(self, modality_type, embedding_class):
self._embedding_registry[modality_type] = embedding_class
def instantiate_embedding(self, modality_type, *args, **kwargs):
embedding_class = self._embedding_registry.get(modality_type)
if embedding_class is not None:
self._embedding_instances[modality_type] = embedding_class(*args, **kwargs)
self.add_module(f"{modality_type}_embedding", self._embedding_instances[modality_type])
else:
raise ValueError(f"Unsupported modality type: {modality_type}")
def forward(self, input_data, modality_type=None, **kwargs):
if modality_type is None:
modality_type = self.detect_modality(input_data)
embedding_instance = self._embedding_instances.get(modality_type)
if embedding_instance is not None:
return embedding_instance(input_data, **kwargs)
else:
raise ValueError(f"Embedding for modality type {modality_type} not instantiated.")
def detect_modality(self, input_data):
# Implement heuristics to automatically detect input data modality
# For example:
if len(input_data.shape) == 2 and input_data.dtype == torch.int64:
return 'text'
elif len(input_data.shape) == 4:
return 'vision'
elif len(input_data.shape) == 3:
return 'audio'
else:
raise ValueError("Unable to detect input data modality.")
class AudioEmbedding(nn.Module):
def __init__(self, in_channels, embed_dim):
super().__init__()
self.conv = nn.Conv2d(in_channels, embed_dim, kernel_size=1)
def forward(self, x, **kwargs):
return self.conv(x)
# Instantiate OmniMorph
omni_morph = OmniMorph()
# Register and instantiate embeddings
omni_morph.register_embedding('text', TextEmbedding)
omni_morph.instantiate_embedding('text', num_embeddings=10000, embedding_dim=768)
omni_morph.register_embedding('vision', VisionEmbedding)
omni_morph.instantiate_embedding('vision', img_size=224, patch_size=16, in_chans=3, embed_dim=768)
omni_morph.register_embedding('audio', AudioEmbedding)
omni_morph.instantiate_embedding('audio', in_channels=128, embed_dim=768)
# Example usage for different modalities
text_input = torch.randint(0, 10000, (1, 50))
vision_input = torch.randn(1, 3, 224, 224)
audio_input = torch.randn(1, 128, 100)
text_embedding = omni_morph(text_input) # modality_type is automatically detected
vision_embedding = omni_morph(vision_input) # modality_type is automatically detected
audio_embedding = omni_morph(audio_input) # modality_type is automatically detected | EXA-1-master | exa/modular_components/embedding/OmniMorph/OMNI2.py |
from setuptools import setup, find_packages
setup(
name = 'lion-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.7',
license='MIT',
description = 'Lion Optimizer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/lion-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'optimizers'
],
install_requires=[
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| EXA-1-master | exa/modular_components/optimizers/lion-pytorch/setup.py |
import torch
try:
import triton
import triton.language as tl
except ImportError as e:
print('triton is not installed, please install by running `pip install triton -U --pre`')
exit()
@triton.autotune(configs = [
triton.Config({'BLOCK_SIZE': 128}, num_warps = 4),
triton.Config({'BLOCK_SIZE': 1024}, num_warps = 8),
], key = ['n_elements'])
@triton.jit
def update_fn_kernel(
p_ptr,
grad_ptr,
exp_avg_ptr,
lr,
wd,
beta1,
beta2,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis = 0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# offsetted pointers
offset_p_ptr = p_ptr + offsets
offset_grad_ptr = grad_ptr + offsets
offset_exp_avg_ptr = exp_avg_ptr + offsets
# load
p = tl.load(offset_p_ptr, mask = mask)
grad = tl.load(offset_grad_ptr, mask = mask)
exp_avg = tl.load(offset_exp_avg_ptr, mask = mask)
# stepweight decay
p = p * (1 - lr * wd)
# diff between momentum running average and grad
diff = exp_avg - grad
# weight update
update = diff * beta1 + grad
# torch.sign
can_update = update != 0
update_sign = tl.where(update > 0, -lr, lr)
p = p + update_sign * can_update
# decay the momentum running average coefficient
exp_avg = diff * beta2 + grad
# store new params and momentum running average coefficient
tl.store(offset_p_ptr, p, mask = mask)
tl.store(offset_exp_avg_ptr, exp_avg, mask = mask)
def update_fn(
p: torch.Tensor,
grad: torch.Tensor,
exp_avg: torch.Tensor,
lr: float,
wd: float,
beta1: float,
beta2: float
):
assert all([t.is_cuda for t in (p, grad, exp_avg)])
n_elements = p.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
update_fn_kernel[grid](
p,
grad,
exp_avg,
lr,
wd,
beta1,
beta2,
n_elements
)
| EXA-1-master | exa/modular_components/optimizers/lion-pytorch/lion_pytorch/triton.py |
from typing import Tuple, Optional, Callable
import torch
from torch.optim.optimizer import Optimizer
# functions
def exists(val):
return val is not None
# update functions
def update_fn(p, grad, exp_avg, lr, wd, beta1, beta2):
# stepweight decay
p.data.mul_(1 - lr * wd)
# weight update
update = exp_avg.clone().mul_(beta1).add(grad, alpha = 1 - beta1).sign_()
p.add_(update, alpha = -lr)
# decay the momentum running average coefficient
exp_avg.mul_(beta2).add_(grad, alpha = 1 - beta2)
# class
class Lion(Optimizer):
def __init__(
self,
params,
lr: float = 1e-4,
betas: Tuple[float, float] = (0.9, 0.99),
weight_decay: float = 0.0,
use_triton: bool = False
):
assert lr > 0.
assert all([0. <= beta <= 1. for beta in betas])
defaults = dict(
lr = lr,
betas = betas,
weight_decay = weight_decay
)
super().__init__(params, defaults)
self.update_fn = update_fn
if use_triton:
from lion_pytorch.triton import update_fn as triton_update_fn
self.update_fn = triton_update_fn
@torch.no_grad()
def step(
self,
closure: Optional[Callable] = None
):
loss = None
if exists(closure):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in filter(lambda p: exists(p.grad), group['params']):
grad, lr, wd, beta1, beta2, state = p.grad, group['lr'], group['weight_decay'], *group['betas'], self.state[p]
# init state - exponential moving average of gradient values
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
exp_avg = state['exp_avg']
self.update_fn(
p,
grad,
exp_avg,
lr,
wd,
beta1,
beta2
)
return loss
| EXA-1-master | exa/modular_components/optimizers/lion-pytorch/lion_pytorch/lion_pytorch.py |
from lion_pytorch.lion_pytorch import Lion
| EXA-1-master | exa/modular_components/optimizers/lion-pytorch/lion_pytorch/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'CoLT5-attention',
packages = find_packages(),
version = '0.3.4',
license='MIT',
description = 'Conditionally Routed Attention',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/CoLT5-attention',
keywords = [
'artificial intelligence',
'attention mechanism',
'dynamic routing'
],
install_requires=[
'einops>=0.6.1',
'local-attention>=1.8.5',
'torch>=1.10'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| EXA-1-master | exa/modular_components/attentions/CoLT5-attention-main/setup.py |
import math
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
from local_attention import LocalMHA
from einops import rearrange, repeat, pack, unpack
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(numer, denom):
return (numer % denom) == 0
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def pad_to_multiple(tensor, multiple, dim=-1, value=0):
seq_len = tensor.shape[dim]
m = seq_len / multiple
if m.is_integer():
return tensor, seq_len
remainder = math.ceil(m) * multiple - seq_len
pad_offset = (0,) * (-1 - dim) * 2
padded_tensor = F.pad(tensor, (*pad_offset, 0, remainder), value = value)
return padded_tensor, seq_len
def batched_gather(x, indices):
batch_range = create_batch_range(indices, indices.ndim - 1)
return x[batch_range, indices]
# tensor helpers
def create_batch_range(t, right_pad_dims = 1):
b, device = t.shape[0], t.device
batch_range = torch.arange(b, device = device)
pad_dims = ((1,) * right_pad_dims)
return batch_range.reshape(-1, *pad_dims)
# normalization
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
normed = F.normalize(x, dim = -1)
return normed * self.scale * self.gamma
# modules
def FeedForward(dim, mult = 4):
dim_hidden = int(dim * mult)
return nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim_hidden),
nn.GELU(),
nn.Linear(dim_hidden, dim)
)
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
multiply_keys_by_score = False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
dim_hidden = dim_head * heads
self.multiply_keys_by_score = multiply_keys_by_score
self.norm = RMSNorm(dim)
self.null_kv = nn.Parameter(torch.randn(2, heads, dim_head))
self.to_q = nn.Linear(dim, dim_hidden, bias = False)
self.to_kv = nn.Linear(dim, dim_hidden * 2, bias = False)
self.to_out = nn.Linear(dim_hidden, dim, bias = False)
def forward(
self,
x,
context = None,
mask = None,
normalized_scores_kv = None,
normalized_scores_q = None
):
"""
einops:
b - batch
h - heads, or number of heads per route
r - routing dimension, for routing different sets of key / values - should be more expressive
n - sequence dimension
d - head dimension
i - input model dimension
"""
batch, h = x.shape[0], self.heads
x = self.norm(x)
if exists(context):
context = self.norm(context)
context = default(context, x)
# if routing dimension is not there, unsqueeze for 1 routing dimension
if context.ndim == 3:
context = rearrange(context, 'b n d -> b 1 n d')
if exists(normalized_scores_kv) and isinstance(normalized_scores_kv, torch.Tensor):
if normalized_scores_kv.ndim == 2:
normalized_scores_kv = rearrange(normalized_scores_kv, 'b n -> b 1 n')
normalized_scores_kv = rearrange(normalized_scores_kv, 'b r n -> b r 1 n 1')
num_kv_routes = context.shape[1]
# get queries
q = self.to_q(x)
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if exists(normalized_scores_q) and isinstance(normalized_scores_q, torch.Tensor):
q = q * rearrange(normalized_scores_q, 'b n -> b 1 n 1')
# handle key / values, with the routing dimension, dividing the number of heads in between the routes
assert divisible_by(h, num_kv_routes), 'number of heads must be divisible by the number of key / value routes'
heads_per_route = h // num_kv_routes
kv_weight = rearrange(self.to_kv.weight, '(r h d) i -> r h d i', h = heads_per_route, r = num_kv_routes)
kv = einsum('r h d i, b r n i -> b r h n d', kv_weight, context)
k, v = kv.chunk(2, dim = -1)
if exists(normalized_scores_kv):
# in paper, not sure how they passed back the signal from heavy attention to normalized scores for key/values. just multiply the values by the normalized kv scores for now
v = v * normalized_scores_kv
if self.multiply_keys_by_score:
k = k * normalized_scores_kv
k, v = map(lambda t: rearrange(t, 'b r h n d -> b (r h) n d'), (k, v))
# null key values
nk, nv = map(lambda t: repeat(t, 'h d -> b h 1 d', b = batch), self.null_kv)
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# scale and get similarity
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# masking
if exists(mask):
if mask.ndim == 3:
mask = repeat(mask, 'b r j -> b (r h) 1 j', h = heads_per_route)
else:
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = F.pad(mask, (1, 0), value = True)
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim = -1)
# aggregate
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# routing related logic
class DifferentiableTopKRouter(nn.Module):
""" differentiable topk using cumulative softmax """
def __init__(
self,
dim,
straight_through = True,
temperature = 1.,
num_routing_tokens = 1
):
super().__init__()
self.is_one_routing_token = num_routing_tokens == 1
self.num_routing_tokens = num_routing_tokens
self.routing_token = nn.Parameter(torch.randn(num_routing_tokens, dim))
self.straight_through = straight_through
self.temperature = temperature
def route_back(self, src, routed_tokens, indices):
batch_range = create_batch_range(routed_tokens)
src[batch_range, indices] = routed_tokens
return src
def forward(
self,
x,
*,
num_tokens,
mask = None
):
num_routes = self.num_routing_tokens
# eventual normalized score
scores = einsum('b n d, r d -> b r n', x, self.routing_token)
# merge routing dimension into batch
x = repeat(x, 'b ... -> (b r) ...', r = num_routes)
scores, ps = pack_one(scores, '* n')
if exists(mask):
mask = repeat(mask, 'b ... -> (b r) ...', r = num_routes)
scores = scores / self.temperature
if exists(mask):
mask_value = -torch.finfo(scores.dtype).max
scores = scores.masked_fill(~mask, mask_value)
scores, indices = scores.sort(dim = -1)
scores = scores - scores.amax(dim = -1, keepdim = True).detach()
exp_scores = scores.exp()
cum_softmax = exp_scores / exp_scores.cumsum(dim = -1).clamp(min = 1e-6)
selected_scores, selected_indices = map(lambda t: t[:, -num_tokens:], (cum_softmax, indices))
if self.straight_through:
# this would make sure all normalized scores returned are 1., but still differentiable using straight-through trick
selected_scores = selected_scores + (1. - selected_scores).detach()
if exists(mask):
selected_mask = batched_gather(mask, selected_indices)
selected_scores = selected_scores.masked_fill(~selected_mask, 0.)
# split out routing dimension again if need be
if not self.is_one_routing_token:
selected_scores = unpack_one(selected_scores, ps, '* n')
selected_indices = unpack_one(selected_indices, ps, '* n')
return selected_scores, selected_indices
# sinkhorn type routing, with ties to optimal transport
from colt5_attention.sinkhorn import gumbel_sinkhorn, scatter_mean, log
class SinkhornRouter(nn.Module):
""" gumbel sinkhorn router """
""" ex. https://arxiv.org/abs/1910.09036 """
def __init__(
self,
dim,
straight_through = True,
n_iters = 8,
temperature = 0.7,
num_routing_tokens = 1
):
super().__init__()
self.is_one_routing_token = num_routing_tokens == 1
self.num_routing_tokens = num_routing_tokens
self.routing_token = nn.Parameter(torch.randn(num_routing_tokens, dim))
self.straight_through = straight_through
self.gumbel_sinkhorn_fn = partial(gumbel_sinkhorn, temperature = temperature, n_iters = n_iters)
def route_back(self, src, routed_tokens, indices):
return scatter_mean(src, routed_tokens, indices, dim = 1)
def forward(
self,
x,
*,
num_tokens,
mask = None
):
n, num_routes = x.shape[-2], self.num_routing_tokens
num_tokens = min(n, num_tokens)
# eventual normalized score
scores = einsum('b n d, r d -> b r n', x, self.routing_token)
# merge routing dimension into batch
x = repeat(x, 'b ... -> (b r) ...', r = num_routes)
scores, ps = pack_one(scores, '* n')
if exists(mask):
mask = repeat(mask, 'b ... -> (b r) ...', r = num_routes)
# calculate scores
scores = repeat(scores, '... j -> ... i j', i = num_tokens)
if exists(mask):
mask_value = -torch.finfo(scores.dtype).max
sinkhorn_mask = rearrange(mask, 'b j -> b 1 j')
scores = scores.masked_fill(~sinkhorn_mask, mask_value)
# sinkhorn
scores = self.gumbel_sinkhorn_fn(scores)
# mask again just in case
if exists(mask):
scores = scores.masked_fill(~sinkhorn_mask, mask_value)
selected_scores, selected_indices = scores.topk(1, dim = -1)
selected_scores, selected_indices = map(lambda t: rearrange(t, '... 1 -> ...'), (selected_scores, selected_indices))
if self.straight_through:
# this would make sure all normalized scores returned are 1., but still differentiable using straight-through trick
selected_scores = selected_scores + (1. - selected_scores).detach()
if exists(mask):
selected_mask = batched_gather(mask, selected_indices)
selected_scores = selected_scores.masked_fill(~selected_mask, 0.)
# split out routing dimension again if need be
if not self.is_one_routing_token:
selected_scores = unpack_one(selected_scores, ps, '* n')
selected_indices = unpack_one(selected_indices, ps, '* n')
return selected_scores, selected_indices
from colt5_attention.coor_descent import coor_descent
class CoordinateDescentRouter(nn.Module):
"""
from Wright et al. https://arxiv.org/abs/1502.04759
then adopted by https://arxiv.org/abs/2211.01267 for multi-vector document retrieval by Qian et al
finally, used successfully by this paper for routing to heavy branch attention / feedforward
"""
def __init__(
self,
dim,
straight_through = True,
n_iters = 50, # 50 iterations in the paper
fetch_k_ratio = 9 / 8, # in the paper, they do a bit slightly higher k (times this ratio) for better learning
eps = 1., # the epsilon for coordinate descent. in CoLT5 paper they used 1. apparently
num_routing_tokens = 1
):
super().__init__()
assert fetch_k_ratio >= 1.
self.eps = eps
self.n_iters = n_iters
self.fetch_k_ratio = fetch_k_ratio
self.is_one_routing_token = num_routing_tokens == 1
self.num_routing_tokens = num_routing_tokens
self.routing_token = nn.Parameter(torch.randn(num_routing_tokens, dim))
self.straight_through = straight_through
def route_back(self, src, routed_tokens, indices):
batch_range = create_batch_range(routed_tokens)
src[batch_range, indices] = routed_tokens
return src
def forward(
self,
x,
*,
num_tokens,
mask = None
):
n, device, eps, num_routes = x.shape[-2], x.device, self.eps, self.num_routing_tokens
# s stands for eventual normalized score
s = einsum('b n d, r d -> b r n', x, self.routing_token)
# merge routing dimension into batch
x = repeat(x, 'b ... -> (b r) ...', r = num_routes)
s, ps = pack_one(s, '* n')
if exists(mask):
mask = repeat(mask, 'b ... -> (b r) ...', r = num_routes)
# k, which controls the sparsity of the outputted scores from iterative coordinate descent
effective_k = min(num_tokens * self.fetch_k_ratio, n)
k = torch.tensor([effective_k], device = device)
# coordinate descent
scores = coor_descent(s, n_iters = self.n_iters, mask = mask, k = k, eps = eps)
# get the topk scores and indices from the sparse matrix
selected_scores, selected_indices = scores.topk(num_tokens, dim = -1)
if self.straight_through:
# this would make sure all normalized scores returned are 1., but still differentiable using straight-through trick
selected_scores = selected_scores + (1. - selected_scores).detach()
if exists(mask):
selected_mask = batched_gather(mask, selected_indices)
selected_scores = selected_scores.masked_fill(~selected_mask, 0.)
# split out routing dimension again if need be
if not self.is_one_routing_token:
selected_scores = unpack_one(selected_scores, ps, '* n')
selected_indices = unpack_one(selected_indices, ps, '* n')
return selected_scores, selected_indices
# all router types
ROUTERS = dict(
cum_softmax = DifferentiableTopKRouter,
sinkhorn = SinkhornRouter,
coor_descent = CoordinateDescentRouter
)
# main classes
class ConditionalRoutedFeedForward(nn.Module):
def __init__(
self,
dim,
*,
num_heavy_tokens,
light_ff_mult = 0.5,
heavy_ff_mult = 4,
router_straight_through = True, # would make sure all normalized scores are 1., still differentiable
router_type = 'coor_descent',
router_kwargs: dict = {}
):
super().__init__()
assert router_type in ROUTERS.keys()
self.num_heavy_tokens = num_heavy_tokens
self.router_type = router_type
router_klass = ROUTERS.get(router_type)
self.router = router_klass(
dim = dim,
straight_through = router_straight_through,
**router_kwargs
)
self.light_ff = FeedForward(dim, light_ff_mult)
self.heavy_ff = FeedForward(dim, heavy_ff_mult)
def forward(
self,
x,
mask = None,
num_heavy_tokens = None
):
device, num_heavy_tokens = x.device, default(num_heavy_tokens, self.num_heavy_tokens)
# light feedforward sees all the tokens (hidden dimension is only 1/2 of model dimensions)
light_out = self.light_ff(x)
# route tokens appropriately for heavy branch
normalized_scores, indices = self.router(x, num_tokens = num_heavy_tokens, mask = mask)
# select the tokens to be routed to heavier feedforward (hidden dimension is 4 times model dimensions)
routed_tokens = batched_gather(x, indices)
# do the heavier branch with only routed tokens
routed_tokens_out = self.heavy_ff(routed_tokens) * rearrange(normalized_scores, '... -> ... 1')
# scatter back the output of the heavy feedforward branch
heavy_out = torch.zeros_like(x)
heavy_out = self.router.route_back(heavy_out, routed_tokens_out, indices)
# sum light and heavy branches
return light_out + heavy_out
class ConditionalRoutedAttention(nn.Module):
def __init__(
self,
dim,
*,
num_heavy_tokens_q,
num_heavy_tokens_kv,
num_routed_kv = 1,
light_dim_head = 64,
light_heads = 8,
light_window_size = 128, # each token would see ~ 64 tokens either way to left or right
heavy_dim_head = 64,
heavy_heads = 8,
router_straight_through = True, # would make sure all normalized scores are 1., still differentiable
router_type = 'coor_descent',
router_kwargs: dict = {},
multiply_keys_by_score = False,
multiply_queries_by_score = False
):
super().__init__()
assert router_type in ROUTERS.keys()
self.router_type = router_type
router_klass = ROUTERS.get(router_type)
self.num_heavy_tokens_q = num_heavy_tokens_q
self.num_heavy_tokens_kv = num_heavy_tokens_kv
self.multiply_queries_by_score = multiply_queries_by_score
self.light_attn = LocalMHA(
dim = dim,
dim_head = light_dim_head,
heads = light_heads,
window_size = light_window_size // 2,
prenorm = True,
causal = False,
use_rotary_pos_emb = False,
look_backward = 1,
look_forward = 1
)
self.q_router = router_klass(
dim = dim,
straight_through = router_straight_through,
**router_kwargs
)
self.kv_router = router_klass(
dim = dim,
num_routing_tokens = num_routed_kv,
straight_through = router_straight_through,
**router_kwargs
)
self.heavy_attn = Attention(
dim = dim,
dim_head = heavy_dim_head,
heads = heavy_heads,
multiply_keys_by_score = multiply_keys_by_score
)
def forward(
self,
x,
*,
num_heavy_tokens_q = None,
num_heavy_tokens_kv = None,
mask = None
):
batch, device = x.shape[0], x.device
num_heavy_tokens_q = default(num_heavy_tokens_q, self.num_heavy_tokens_q)
num_heavy_tokens_kv = default(num_heavy_tokens_kv, self.num_heavy_tokens_kv)
# light local attention sees all tokens in a limited context
light_out = self.light_attn(x, mask = mask)
# route tokens appropriately for heavy branch
normalized_scores_q, indices_q = self.q_router(x, num_tokens = num_heavy_tokens_q, mask = mask)
normalized_scores_kv, indices_kv = self.kv_router(x, num_tokens = num_heavy_tokens_kv, mask = mask)
# select the tokens to be routed to full attention
routed_tokens_q = batched_gather(x, indices_q)
kv_batch_range = create_batch_range(x, right_pad_dims = indices_kv.ndim - 1)
routed_tokens_kv = batched_gather(x, indices_kv)
# calculate key padding mask
routed_tokens_kv_mask = None
if exists(mask):
routed_tokens_kv_mask = mask[kv_batch_range, indices_kv]
# do the heavier branch with only routed tokens
routed_tokens_out = self.heavy_attn(
routed_tokens_q,
mask = routed_tokens_kv_mask,
context = routed_tokens_kv,
normalized_scores_kv = normalized_scores_kv,
normalized_scores_q = normalized_scores_q if self.multiply_queries_by_score else None
)
routed_tokens_out = routed_tokens_out * rearrange(normalized_scores_q, '... -> ... 1')
# scatter back the output of the heavy branch
heavy_out = torch.zeros_like(x)
heavy_out = self.q_router.route_back(heavy_out, routed_tokens_out, indices_q)
# sum light and heavy branches
return light_out + heavy_out
# improvised conditionally routed autoregressive attention
class ConditionalRoutedAutoregressiveAttention(nn.Module):
def __init__(
self,
dim,
*,
num_heavy_tokens_q,
num_heavy_tokens_kv,
num_routed_kv = 1,
light_dim_head = 64,
light_heads = 8,
light_window_size = 128, # each token would see ~ 64 tokens either way to left or right
heavy_window_size = None,
heavy_dim_head = 64,
heavy_heads = 8,
router_straight_through = True, # would make sure all normalized scores are 1., still differentiable
router_type = 'coor_descent',
router_kwargs: dict = {},
multiply_keys_by_score = False,
multiply_queries_by_score = False
):
super().__init__()
assert router_type in ROUTERS.keys()
self.router_type = router_type
router_klass = ROUTERS.get(router_type)
self.num_heavy_tokens_q = num_heavy_tokens_q
self.num_heavy_tokens_kv = num_heavy_tokens_kv
self.multiply_queries_by_score = multiply_queries_by_score
self.heavy_window_size = default(heavy_window_size, light_window_size)
self.light_attn = LocalMHA(
dim = dim,
dim_head = light_dim_head,
heads = light_heads,
window_size = light_window_size,
prenorm = True,
causal = True,
use_rotary_pos_emb = False
)
self.q_router = router_klass(
dim = dim,
straight_through = router_straight_through,
**router_kwargs
)
self.kv_router = router_klass(
dim = dim,
num_routing_tokens = num_routed_kv,
straight_through = router_straight_through,
**router_kwargs
)
self.heavy_attn = Attention(
dim = dim,
dim_head = heavy_dim_head,
heads = heavy_heads,
multiply_keys_by_score = multiply_keys_by_score
)
def forward(
self,
x,
*,
num_heavy_tokens_q = None,
num_heavy_tokens_kv = None
):
batch, device = x.shape[0], x.device
num_heavy_tokens_q = default(num_heavy_tokens_q, self.num_heavy_tokens_q)
num_heavy_tokens_kv = default(num_heavy_tokens_kv, self.num_heavy_tokens_kv)
# light local attention sees all tokens in a limited context
light_out = self.light_attn(x)
# pad sequence to multiple of the heavy window size
# routing will take place within each heavy window block size
window_size = self.heavy_window_size
x, seq_len = pad_to_multiple(x, window_size, dim = -2)
padded_seq_len = x.shape[-2]
# construct mask, and make sure not to attend to padding
q_mask = torch.ones((batch, seq_len), dtype = torch.bool, device = device)
q_mask = F.pad(q_mask, (0, padded_seq_len - seq_len), value = False)
# block the sequence and mask into windows for the queries
q = rearrange(x, 'b (n w) d -> (b n) w d', w = window_size)
q_mask = rearrange(q_mask, 'b (n w) -> (b n) w', w = window_size)
# each block of queries attend to sequences that are causally masked out appropriately
windows = padded_seq_len // window_size
kv = repeat(x, 'b n d -> (b m) n d', m = windows)
kv_mask = torch.ones((windows, windows), dtype = torch.bool, device = device).tril(-1)
kv_mask = repeat(kv_mask, 'm n -> (b m) (n w)', b = batch, w = window_size)
# route tokens appropriately for heavy branch, if need be
should_route_q = q.shape[-2] > num_heavy_tokens_q
should_route_kv = kv.shape[-2] > num_heavy_tokens_kv
if should_route_q:
normalized_scores_q, indices_q = self.q_router(q, num_tokens = num_heavy_tokens_q, mask = q_mask)
routed_tokens_q = batched_gather(q, indices_q)
else:
normalized_scores_q = 1.
routed_tokens_q = q
if should_route_kv:
normalized_scores_kv, indices_kv = self.kv_router(kv, num_tokens = num_heavy_tokens_kv, mask = kv_mask)
routed_tokens_kv = batched_gather(kv, indices_kv)
routed_tokens_kv_mask = batched_gather(kv_mask, indices_kv)
else:
normalized_scores_kv = 1.
routed_tokens_kv = kv
routed_tokens_kv_mask = kv_mask
# do the heavier branch with only routed tokens
routed_tokens_out = self.heavy_attn(
routed_tokens_q,
mask = routed_tokens_kv_mask,
context = routed_tokens_kv,
normalized_scores_kv = normalized_scores_kv,
normalized_scores_q = normalized_scores_q if self.multiply_queries_by_score else None
)
if should_route_q:
routed_tokens_out = routed_tokens_out * rearrange(normalized_scores_q, '... -> ... 1')
# scatter back the output of the heavy branch
heavy_out = torch.zeros_like(q)
heavy_out = self.q_router.route_back(heavy_out, routed_tokens_out, indices_q)
else:
heavy_out = routed_tokens_out
# un-window and slice out original sequence
heavy_out = rearrange(heavy_out, '(b n) w d -> b (n w) d', b = batch)
heavy_out = heavy_out[:, :seq_len]
# sum light and heavy branches
return light_out + heavy_out
# adapting the conditional routed self attention to cross attention
class ConditionalRoutedCrossAttention(nn.Module):
def __init__(
self,
dim,
*,
num_tokens_q,
num_tokens_kv,
num_sets_kv = 1, # setting this greater than 1 would route multiple sets of key / values, each of size num_tokens_kv, using this many routing tokens
dim_head = 64,
heads = 8,
router_straight_through = True, # would make sure all normalized scores are 1., still differentiable
router_type = 'coor_descent',
router_kwargs: dict = {},
kv_routing_tokens = 1,
multiply_keys_by_score = False
):
super().__init__()
assert router_type in ROUTERS.keys()
self.router_type = router_type
router_klass = ROUTERS.get(router_type)
self.num_tokens_q = num_tokens_q
self.num_tokens_kv = num_tokens_kv
self.q_router = router_klass(
dim = dim,
straight_through = router_straight_through,
**router_kwargs
)
self.kv_router = router_klass(
dim = dim,
straight_through = router_straight_through,
num_routing_tokens = kv_routing_tokens,
**router_kwargs
)
self.heavy_attn = Attention(
dim = dim,
dim_head = dim_head,
heads = heads,
multiply_keys_by_score = multiply_keys_by_score
)
def forward(
self,
x,
context,
*,
num_tokens_q = None,
num_tokens_kv = None,
mask = None,
context_mask = None
):
batch, device = x.shape[0], x.device
# route the queries
query_length = x.shape[-2]
num_tokens_q = default(num_tokens_q, self.num_tokens_q)
routed_tokens_q = x
should_route_queries = query_length > num_tokens_q
if should_route_queries:
normalized_scores_q, indices_q = self.q_router(x, num_tokens = num_tokens_q, mask = mask)
routed_tokens_q = batched_gather(x, indices_q)
# route the long contexts
key_value_length = context.shape[-2]
num_tokens_kv = default(num_tokens_kv, self.num_tokens_kv)
routed_tokens_kv = context
routed_tokens_kv_mask = context_mask
normalized_scores_kv = None
should_route_kv = key_value_length > num_tokens_kv
if should_route_kv:
normalized_scores_kv, indices_kv = self.kv_router(context, num_tokens = num_tokens_kv, mask = context_mask)
routed_tokens_kv = batched_gather(x, indices_kv)
routed_tokens_kv_mask = None
if exists(context_mask):
routed_tokens_kv_mask = batched_gather(context_mask, indices_kv)
# do the heavier branch with only routed tokens
routed_tokens_out = self.heavy_attn(
routed_tokens_q,
mask = routed_tokens_kv_mask,
context = routed_tokens_kv,
normalized_scores_kv = normalized_scores_kv
)
if should_route_queries:
routed_tokens_out = routed_tokens_out * rearrange(normalized_scores_q, '... -> ... 1')
# early return if queries did not undergo routing
if not should_route_queries:
return routed_tokens_out
# otherwise, scatter back the query outputs
out = torch.zeros_like(x)
out = self.q_router.route_back(out, routed_tokens_out, indices_q)
return out
# block
class ConditionalRoutedTransformerBlock(nn.Module):
def __init__(
self,
dim,
*,
num_heavy_attn_tokens_q,
num_heavy_attn_tokens_kv,
num_routed_kv = 1,
num_heavy_ff_tokens,
light_dim_head = 64,
light_heads = 8,
light_window_size = 128,
heavy_dim_head = 64,
heavy_heads = 8,
light_ff_mult = 0.5,
heavy_ff_mult = 4,
router_straight_through = True,
router_type = 'coor_descent',
router_kwargs: dict = {},
multiply_keys_by_score = False,
multiply_queries_by_score = False
):
super().__init__()
self.conditional_ff = ConditionalRoutedFeedForward(
dim,
num_heavy_tokens = num_heavy_ff_tokens,
light_ff_mult = light_ff_mult,
heavy_ff_mult = heavy_ff_mult,
router_straight_through = router_straight_through,
router_type = router_type,
router_kwargs = router_kwargs
)
self.conditional_attn = ConditionalRoutedAttention(
dim,
light_dim_head = light_dim_head,
light_heads = light_heads,
light_window_size = light_window_size,
heavy_dim_head = heavy_dim_head,
heavy_heads = heavy_heads,
num_heavy_tokens_q = num_heavy_attn_tokens_q,
num_heavy_tokens_kv = num_heavy_attn_tokens_kv,
num_routed_kv = num_routed_kv,
router_straight_through = router_straight_through,
router_type = router_type,
router_kwargs = router_kwargs,
multiply_keys_by_score = multiply_keys_by_score,
multiply_queries_by_score = multiply_queries_by_score
)
def forward(
self,
x,
mask = None,
num_heavy_attn_tokens_q = None,
num_heavy_attn_tokens_kv = None,
num_heavy_ff_tokens = None
):
x = self.conditional_attn(x, mask = mask, num_heavy_tokens_q = num_heavy_attn_tokens_q, num_heavy_tokens_kv = num_heavy_attn_tokens_kv) + x
x = self.conditional_ff(x, mask = mask, num_heavy_tokens = num_heavy_ff_tokens) + x
return x
| EXA-1-master | exa/modular_components/attentions/CoLT5-attention-main/colt5_attention/transformer_block.py |
from colt5_attention.transformer_block import (
ConditionalRoutedFeedForward,
ConditionalRoutedAttention,
ConditionalRoutedAutoregressiveAttention,
ConditionalRoutedCrossAttention,
ConditionalRoutedTransformerBlock,
DifferentiableTopKRouter,
SinkhornRouter,
CoordinateDescentRouter
)
from colt5_attention.coor_descent import coor_descent
| EXA-1-master | exa/modular_components/attentions/CoLT5-attention-main/colt5_attention/__init__.py |
import torch
import torch.nn.functional as F
def exists(val):
return val is not None
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def coor_descent(
s,
*,
n_iters,
k,
eps = 1e-1,
clamp_fn = F.relu,
mask = None,
):
mask_value = -torch.finfo(s.dtype).max
constant = eps * log(k)
b = -clamp_fn(s)
for _ in range(n_iters):
if exists(mask):
s = s.masked_fill(~mask, mask_value)
a = constant - eps * ((s + b) / eps).logsumexp(dim = -1, keepdim = True)
b = -clamp_fn(s + a)
if exists(mask):
s = s.masked_fill(~mask, mask_value)
scores = ((s + a + b) / eps).exp()
return scores | EXA-1-master | exa/modular_components/attentions/CoLT5-attention-main/colt5_attention/coor_descent.py |
import math
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
from local_attention import LocalMHA
from einops import rearrange, repeat, pack, unpack
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(numer, denom):
return (numer % denom) == 0
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def pad_to_multiple(tensor, multiple, dim=-1, value=0):
seq_len = tensor.shape[dim]
m = seq_len / multiple
if m.is_integer():
return tensor, seq_len
remainder = math.ceil(m) * multiple - seq_len
pad_offset = (0,) * (-1 - dim) * 2
padded_tensor = F.pad(tensor, (*pad_offset, 0, remainder), value = value)
return padded_tensor, seq_len
def batched_gather(x, indices):
batch_range = create_batch_range(indices, indices.ndim - 1)
return x[batch_range, indices]
# tensor helpers
def create_batch_range(t, right_pad_dims = 1):
b, device = t.shape[0], t.device
batch_range = torch.arange(b, device = device)
pad_dims = ((1,) * right_pad_dims)
return batch_range.reshape(-1, *pad_dims)
# normalization
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
normed = F.normalize(x, dim = -1)
return normed * self.scale * self.gamma
# modules
def FeedForward(dim, mult = 4):
dim_hidden = int(dim * mult)
return nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim_hidden),
nn.GELU(),
nn.Linear(dim_hidden, dim)
)
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
multiply_keys_by_score = False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
dim_hidden = dim_head * heads
self.multiply_keys_by_score = multiply_keys_by_score
self.norm = RMSNorm(dim)
self.null_kv = nn.Parameter(torch.randn(2, heads, dim_head))
self.to_q = nn.Linear(dim, dim_hidden, bias = False)
self.to_kv = nn.Linear(dim, dim_hidden * 2, bias = False)
self.to_out = nn.Linear(dim_hidden, dim, bias = False)
def forward(
self,
x,
context = None,
mask = None,
normalized_scores_kv = None,
normalized_scores_q = None
):
"""
einops:
b - batch
h - heads, or number of heads per route
r - routing dimension, for routing different sets of key / values - should be more expressive
n - sequence dimension
d - head dimension
i - input model dimension
"""
batch, h = x.shape[0], self.heads
x = self.norm(x)
if exists(context):
context = self.norm(context)
context = default(context, x)
# if routing dimension is not there, unsqueeze for 1 routing dimension
if context.ndim == 3:
context = rearrange(context, 'b n d -> b 1 n d')
if exists(normalized_scores_kv) and isinstance(normalized_scores_kv, torch.Tensor):
if normalized_scores_kv.ndim == 2:
normalized_scores_kv = rearrange(normalized_scores_kv, 'b n -> b 1 n')
normalized_scores_kv = rearrange(normalized_scores_kv, 'b r n -> b r 1 n 1')
num_kv_routes = context.shape[1]
# get queries
q = self.to_q(x)
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if exists(normalized_scores_q) and isinstance(normalized_scores_q, torch.Tensor):
q = q * rearrange(normalized_scores_q, 'b n -> b 1 n 1')
# handle key / values, with the routing dimension, dividing the number of heads in between the routes
assert divisible_by(h, num_kv_routes), 'number of heads must be divisible by the number of key / value routes'
heads_per_route = h // num_kv_routes
kv_weight = rearrange(self.to_kv.weight, '(r h d) i -> r h d i', h = heads_per_route, r = num_kv_routes)
kv = einsum('r h d i, b r n i -> b r h n d', kv_weight, context)
k, v = kv.chunk(2, dim = -1)
if exists(normalized_scores_kv):
# in paper, not sure how they passed back the signal from heavy attention to normalized scores for key/values. just multiply the values by the normalized kv scores for now
v = v * normalized_scores_kv
if self.multiply_keys_by_score:
k = k * normalized_scores_kv
k, v = map(lambda t: rearrange(t, 'b r h n d -> b (r h) n d'), (k, v))
# null key values
nk, nv = map(lambda t: repeat(t, 'h d -> b h 1 d', b = batch), self.null_kv)
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# scale and get similarity
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# masking
if exists(mask):
if mask.ndim == 3:
mask = repeat(mask, 'b r j -> b (r h) 1 j', h = heads_per_route)
else:
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = F.pad(mask, (1, 0), value = True)
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim = -1)
# aggregate
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# routing related logic
class DifferentiableTopKRouter(nn.Module):
""" differentiable topk using cumulative softmax """
def __init__(
self,
dim,
straight_through = True,
temperature = 1.,
num_routing_tokens = 1
):
super().__init__()
self.is_one_routing_token = num_routing_tokens == 1
self.num_routing_tokens = num_routing_tokens
self.routing_token = nn.Parameter(torch.randn(num_routing_tokens, dim))
self.straight_through = straight_through
self.temperature = temperature
def route_back(self, src, routed_tokens, indices):
batch_range = create_batch_range(routed_tokens)
src[batch_range, indices] = routed_tokens
return src
def forward(
self,
x,
*,
num_tokens,
mask = None
):
num_routes = self.num_routing_tokens
# eventual normalized score
scores = einsum('b n d, r d -> b r n', x, self.routing_token)
# merge routing dimension into batch
x = repeat(x, 'b ... -> (b r) ...', r = num_routes)
scores, ps = pack_one(scores, '* n')
if exists(mask):
mask = repeat(mask, 'b ... -> (b r) ...', r = num_routes)
scores = scores / self.temperature
if exists(mask):
mask_value = -torch.finfo(scores.dtype).max
scores = scores.masked_fill(~mask, mask_value)
scores, indices = scores.sort(dim = -1)
scores = scores - scores.amax(dim = -1, keepdim = True).detach()
exp_scores = scores.exp()
cum_softmax = exp_scores / exp_scores.cumsum(dim = -1).clamp(min = 1e-6)
selected_scores, selected_indices = map(lambda t: t[:, -num_tokens:], (cum_softmax, indices))
if self.straight_through:
# this would make sure all normalized scores returned are 1., but still differentiable using straight-through trick
selected_scores = selected_scores + (1. - selected_scores).detach()
if exists(mask):
selected_mask = batched_gather(mask, selected_indices)
selected_scores = selected_scores.masked_fill(~selected_mask, 0.)
# split out routing dimension again if need be
if not self.is_one_routing_token:
selected_scores = unpack_one(selected_scores, ps, '* n')
selected_indices = unpack_one(selected_indices, ps, '* n')
return selected_scores, selected_indices
# sinkhorn type routing, with ties to optimal transport
from colt5_attention.sinkhorn import gumbel_sinkhorn, scatter_mean, log
class SinkhornRouter(nn.Module):
""" gumbel sinkhorn router """
""" ex. https://arxiv.org/abs/1910.09036 """
def __init__(
self,
dim,
straight_through = True,
n_iters = 8,
temperature = 0.7,
num_routing_tokens = 1
):
super().__init__()
self.is_one_routing_token = num_routing_tokens == 1
self.num_routing_tokens = num_routing_tokens
self.routing_token = nn.Parameter(torch.randn(num_routing_tokens, dim))
self.straight_through = straight_through
self.gumbel_sinkhorn_fn = partial(gumbel_sinkhorn, temperature = temperature, n_iters = n_iters)
def route_back(self, src, routed_tokens, indices):
return scatter_mean(src, routed_tokens, indices, dim = 1)
def forward(
self,
x,
*,
num_tokens,
mask = None
):
n, num_routes = x.shape[-2], self.num_routing_tokens
num_tokens = min(n, num_tokens)
# eventual normalized score
scores = einsum('b n d, r d -> b r n', x, self.routing_token)
# merge routing dimension into batch
x = repeat(x, 'b ... -> (b r) ...', r = num_routes)
scores, ps = pack_one(scores, '* n')
if exists(mask):
mask = repeat(mask, 'b ... -> (b r) ...', r = num_routes)
# calculate scores
scores = repeat(scores, '... j -> ... i j', i = num_tokens)
if exists(mask):
mask_value = -torch.finfo(scores.dtype).max
sinkhorn_mask = rearrange(mask, 'b j -> b 1 j')
scores = scores.masked_fill(~sinkhorn_mask, mask_value)
# sinkhorn
scores = self.gumbel_sinkhorn_fn(scores)
# mask again just in case
if exists(mask):
scores = scores.masked_fill(~sinkhorn_mask, mask_value)
selected_scores, selected_indices = scores.topk(1, dim = -1)
selected_scores, selected_indices = map(lambda t: rearrange(t, '... 1 -> ...'), (selected_scores, selected_indices))
if self.straight_through:
# this would make sure all normalized scores returned are 1., but still differentiable using straight-through trick
selected_scores = selected_scores + (1. - selected_scores).detach()
if exists(mask):
selected_mask = batched_gather(mask, selected_indices)
selected_scores = selected_scores.masked_fill(~selected_mask, 0.)
# split out routing dimension again if need be
if not self.is_one_routing_token:
selected_scores = unpack_one(selected_scores, ps, '* n')
selected_indices = unpack_one(selected_indices, ps, '* n')
return selected_scores, selected_indices
from colt5_attention.coor_descent import coor_descent
class CoordinateDescentRouter(nn.Module):
"""
from Wright et al. https://arxiv.org/abs/1502.04759
then adopted by https://arxiv.org/abs/2211.01267 for multi-vector document retrieval by Qian et al
finally, used successfully by this paper for routing to heavy branch attention / feedforward
"""
def __init__(
self,
dim,
straight_through = True,
n_iters = 50, # 50 iterations in the paper
fetch_k_ratio = 9 / 8, # in the paper, they do a bit slightly higher k (times this ratio) for better learning
eps = 1., # the epsilon for coordinate descent. in CoLT5 paper they used 1. apparently
num_routing_tokens = 1
):
super().__init__()
assert fetch_k_ratio >= 1.
self.eps = eps
self.n_iters = n_iters
self.fetch_k_ratio = fetch_k_ratio
self.is_one_routing_token = num_routing_tokens == 1
self.num_routing_tokens = num_routing_tokens
self.routing_token = nn.Parameter(torch.randn(num_routing_tokens, dim))
self.straight_through = straight_through
def route_back(self, src, routed_tokens, indices):
batch_range = create_batch_range(routed_tokens)
src[batch_range, indices] = routed_tokens
return src
def forward(
self,
x,
*,
num_tokens,
mask = None
):
n, device, eps, num_routes = x.shape[-2], x.device, self.eps, self.num_routing_tokens
# s stands for eventual normalized score
s = einsum('b n d, r d -> b r n', x, self.routing_token)
# merge routing dimension into batch
x = repeat(x, 'b ... -> (b r) ...', r = num_routes)
s, ps = pack_one(s, '* n')
if exists(mask):
mask = repeat(mask, 'b ... -> (b r) ...', r = num_routes)
# k, which controls the sparsity of the outputted scores from iterative coordinate descent
effective_k = min(num_tokens * self.fetch_k_ratio, n)
k = torch.tensor([effective_k], device = device)
# coordinate descent
scores = coor_descent(s, n_iters = self.n_iters, mask = mask, k = k, eps = eps)
# get the topk scores and indices from the sparse matrix
selected_scores, selected_indices = scores.topk(num_tokens, dim = -1)
if self.straight_through:
# this would make sure all normalized scores returned are 1., but still differentiable using straight-through trick
selected_scores = selected_scores + (1. - selected_scores).detach()
if exists(mask):
selected_mask = batched_gather(mask, selected_indices)
selected_scores = selected_scores.masked_fill(~selected_mask, 0.)
# split out routing dimension again if need be
if not self.is_one_routing_token:
selected_scores = unpack_one(selected_scores, ps, '* n')
selected_indices = unpack_one(selected_indices, ps, '* n')
return selected_scores, selected_indices
# all router types
ROUTERS = dict(
cum_softmax = DifferentiableTopKRouter,
sinkhorn = SinkhornRouter,
coor_descent = CoordinateDescentRouter
)
# main classes
class ConditionalRoutedFeedForward(nn.Module):
def __init__(
self,
dim,
*,
num_heavy_tokens,
light_ff_mult = 0.5,
heavy_ff_mult = 4,
router_straight_through = True, # would make sure all normalized scores are 1., still differentiable
router_type = 'coor_descent',
router_kwargs: dict = {}
):
super().__init__()
assert router_type in ROUTERS.keys()
self.num_heavy_tokens = num_heavy_tokens
self.router_type = router_type
router_klass = ROUTERS.get(router_type)
self.router = router_klass(
dim = dim,
straight_through = router_straight_through,
**router_kwargs
)
self.light_ff = FeedForward(dim, light_ff_mult)
self.heavy_ff = FeedForward(dim, heavy_ff_mult)
def forward(
self,
x,
mask = None,
num_heavy_tokens = None
):
device, num_heavy_tokens = x.device, default(num_heavy_tokens, self.num_heavy_tokens)
# light feedforward sees all the tokens (hidden dimension is only 1/2 of model dimensions)
light_out = self.light_ff(x)
# route tokens appropriately for heavy branch
normalized_scores, indices = self.router(x, num_tokens = num_heavy_tokens, mask = mask)
# select the tokens to be routed to heavier feedforward (hidden dimension is 4 times model dimensions)
routed_tokens = batched_gather(x, indices)
# do the heavier branch with only routed tokens
routed_tokens_out = self.heavy_ff(routed_tokens) * rearrange(normalized_scores, '... -> ... 1')
# scatter back the output of the heavy feedforward branch
heavy_out = torch.zeros_like(x)
heavy_out = self.router.route_back(heavy_out, routed_tokens_out, indices)
# sum light and heavy branches
return light_out + heavy_out
class ConditionalRoutedAttention(nn.Module):
def __init__(
self,
dim,
*,
num_heavy_tokens_q,
num_heavy_tokens_kv,
num_routed_kv = 1,
light_dim_head = 64,
light_heads = 8,
light_window_size = 128, # each token would see ~ 64 tokens either way to left or right
heavy_dim_head = 64,
heavy_heads = 8,
router_straight_through = True, # would make sure all normalized scores are 1., still differentiable
router_type = 'coor_descent',
router_kwargs: dict = {},
multiply_keys_by_score = False,
multiply_queries_by_score = False
):
super().__init__()
assert router_type in ROUTERS.keys()
self.router_type = router_type
router_klass = ROUTERS.get(router_type)
self.num_heavy_tokens_q = num_heavy_tokens_q
self.num_heavy_tokens_kv = num_heavy_tokens_kv
self.multiply_queries_by_score = multiply_queries_by_score
self.light_attn = LocalMHA(
dim = dim,
dim_head = light_dim_head,
heads = light_heads,
window_size = light_window_size // 2,
prenorm = True,
causal = False,
use_rotary_pos_emb = False,
look_backward = 1,
look_forward = 1
)
self.q_router = router_klass(
dim = dim,
straight_through = router_straight_through,
**router_kwargs
)
self.kv_router = router_klass(
dim = dim,
num_routing_tokens = num_routed_kv,
straight_through = router_straight_through,
**router_kwargs
)
self.heavy_attn = Attention(
dim = dim,
dim_head = heavy_dim_head,
heads = heavy_heads,
multiply_keys_by_score = multiply_keys_by_score
)
def forward(
self,
x,
*,
num_heavy_tokens_q = None,
num_heavy_tokens_kv = None,
mask = None
):
batch, device = x.shape[0], x.device
num_heavy_tokens_q = default(num_heavy_tokens_q, self.num_heavy_tokens_q)
num_heavy_tokens_kv = default(num_heavy_tokens_kv, self.num_heavy_tokens_kv)
# light local attention sees all tokens in a limited context
light_out = self.light_attn(x, mask = mask)
# route tokens appropriately for heavy branch
normalized_scores_q, indices_q = self.q_router(x, num_tokens = num_heavy_tokens_q, mask = mask)
normalized_scores_kv, indices_kv = self.kv_router(x, num_tokens = num_heavy_tokens_kv, mask = mask)
# select the tokens to be routed to full attention
routed_tokens_q = batched_gather(x, indices_q)
kv_batch_range = create_batch_range(x, right_pad_dims = indices_kv.ndim - 1)
routed_tokens_kv = batched_gather(x, indices_kv)
# calculate key padding mask
routed_tokens_kv_mask = None
if exists(mask):
routed_tokens_kv_mask = mask[kv_batch_range, indices_kv]
# do the heavier branch with only routed tokens
routed_tokens_out = self.heavy_attn(
routed_tokens_q,
mask = routed_tokens_kv_mask,
context = routed_tokens_kv,
normalized_scores_kv = normalized_scores_kv,
normalized_scores_q = normalized_scores_q if self.multiply_queries_by_score else None
)
routed_tokens_out = routed_tokens_out * rearrange(normalized_scores_q, '... -> ... 1')
# scatter back the output of the heavy branch
heavy_out = torch.zeros_like(x)
heavy_out = self.q_router.route_back(heavy_out, routed_tokens_out, indices_q)
# sum light and heavy branches
return light_out + heavy_out
# improvised conditionally routed autoregressive attention
class ConditionalRoutedAutoregressiveAttention(nn.Module):
def __init__(
self,
dim,
*,
num_heavy_tokens_q,
num_heavy_tokens_kv,
num_routed_kv = 1,
light_dim_head = 64,
light_heads = 8,
light_window_size = 128, # each token would see ~ 64 tokens either way to left or right
heavy_window_size = None,
heavy_dim_head = 64,
heavy_heads = 8,
router_straight_through = True, # would make sure all normalized scores are 1., still differentiable
router_type = 'coor_descent',
router_kwargs: dict = {},
multiply_keys_by_score = False,
multiply_queries_by_score = False
):
super().__init__()
assert router_type in ROUTERS.keys()
self.router_type = router_type
router_klass = ROUTERS.get(router_type)
self.num_heavy_tokens_q = num_heavy_tokens_q
self.num_heavy_tokens_kv = num_heavy_tokens_kv
self.multiply_queries_by_score = multiply_queries_by_score
self.heavy_window_size = default(heavy_window_size, light_window_size)
self.light_attn = LocalMHA(
dim = dim,
dim_head = light_dim_head,
heads = light_heads,
window_size = light_window_size,
prenorm = True,
causal = True,
use_rotary_pos_emb = False
)
self.q_router = router_klass(
dim = dim,
straight_through = router_straight_through,
**router_kwargs
)
self.kv_router = router_klass(
dim = dim,
num_routing_tokens = num_routed_kv,
straight_through = router_straight_through,
**router_kwargs
)
self.heavy_attn = Attention(
dim = dim,
dim_head = heavy_dim_head,
heads = heavy_heads,
multiply_keys_by_score = multiply_keys_by_score
)
def forward(
self,
x,
*,
num_heavy_tokens_q = None,
num_heavy_tokens_kv = None
):
batch, device = x.shape[0], x.device
num_heavy_tokens_q = default(num_heavy_tokens_q, self.num_heavy_tokens_q)
num_heavy_tokens_kv = default(num_heavy_tokens_kv, self.num_heavy_tokens_kv)
# light local attention sees all tokens in a limited context
light_out = self.light_attn(x)
# pad sequence to multiple of the heavy window size
# routing will take place within each heavy window block size
window_size = self.heavy_window_size
x, seq_len = pad_to_multiple(x, window_size, dim = -2)
padded_seq_len = x.shape[-2]
# construct mask, and make sure not to attend to padding
q_mask = torch.ones((batch, seq_len), dtype = torch.bool, device = device)
q_mask = F.pad(q_mask, (0, padded_seq_len - seq_len), value = False)
# block the sequence and mask into windows for the queries
q = rearrange(x, 'b (n w) d -> (b n) w d', w = window_size)
q_mask = rearrange(q_mask, 'b (n w) -> (b n) w', w = window_size)
# each block of queries attend to sequences that are causally masked out appropriately
windows = padded_seq_len // window_size
kv = repeat(x, 'b n d -> (b m) n d', m = windows)
kv_mask = torch.ones((windows, windows), dtype = torch.bool, device = device).tril(-1)
kv_mask = repeat(kv_mask, 'm n -> (b m) (n w)', b = batch, w = window_size)
# route tokens appropriately for heavy branch, if need be
should_route_q = q.shape[-2] > num_heavy_tokens_q
should_route_kv = kv.shape[-2] > num_heavy_tokens_kv
if should_route_q:
normalized_scores_q, indices_q = self.q_router(q, num_tokens = num_heavy_tokens_q, mask = q_mask)
routed_tokens_q = batched_gather(q, indices_q)
else:
normalized_scores_q = 1.
routed_tokens_q = q
if should_route_kv:
normalized_scores_kv, indices_kv = self.kv_router(kv, num_tokens = num_heavy_tokens_kv, mask = kv_mask)
routed_tokens_kv = batched_gather(kv, indices_kv)
routed_tokens_kv_mask = batched_gather(kv_mask, indices_kv)
else:
normalized_scores_kv = 1.
routed_tokens_kv = kv
routed_tokens_kv_mask = kv_mask
# do the heavier branch with only routed tokens
routed_tokens_out = self.heavy_attn(
routed_tokens_q,
mask = routed_tokens_kv_mask,
context = routed_tokens_kv,
normalized_scores_kv = normalized_scores_kv,
normalized_scores_q = normalized_scores_q if self.multiply_queries_by_score else None
)
if should_route_q:
routed_tokens_out = routed_tokens_out * rearrange(normalized_scores_q, '... -> ... 1')
# scatter back the output of the heavy branch
heavy_out = torch.zeros_like(q)
heavy_out = self.q_router.route_back(heavy_out, routed_tokens_out, indices_q)
else:
heavy_out = routed_tokens_out
# un-window and slice out original sequence
heavy_out = rearrange(heavy_out, '(b n) w d -> b (n w) d', b = batch)
heavy_out = heavy_out[:, :seq_len]
# sum light and heavy branches
return light_out + heavy_out
# adapting the conditional routed self attention to cross attention
class ConditionalRoutedCrossAttention(nn.Module):
def __init__(
self,
dim,
*,
num_tokens_q,
num_tokens_kv,
num_sets_kv = 1, # setting this greater than 1 would route multiple sets of key / values, each of size num_tokens_kv, using this many routing tokens
dim_head = 64,
heads = 8,
router_straight_through = True, # would make sure all normalized scores are 1., still differentiable
router_type = 'coor_descent',
router_kwargs: dict = {},
kv_routing_tokens = 1,
multiply_keys_by_score = False
):
super().__init__()
assert router_type in ROUTERS.keys()
self.router_type = router_type
router_klass = ROUTERS.get(router_type)
self.num_tokens_q = num_tokens_q
self.num_tokens_kv = num_tokens_kv
self.q_router = router_klass(
dim = dim,
straight_through = router_straight_through,
**router_kwargs
)
self.kv_router = router_klass(
dim = dim,
straight_through = router_straight_through,
num_routing_tokens = kv_routing_tokens,
**router_kwargs
)
self.heavy_attn = Attention(
dim = dim,
dim_head = dim_head,
heads = heads,
multiply_keys_by_score = multiply_keys_by_score
)
def forward(
self,
x,
context,
*,
num_tokens_q = None,
num_tokens_kv = None,
mask = None,
context_mask = None
):
batch, device = x.shape[0], x.device
# route the queries
query_length = x.shape[-2]
num_tokens_q = default(num_tokens_q, self.num_tokens_q)
routed_tokens_q = x
should_route_queries = query_length > num_tokens_q
if should_route_queries:
normalized_scores_q, indices_q = self.q_router(x, num_tokens = num_tokens_q, mask = mask)
routed_tokens_q = batched_gather(x, indices_q)
# route the long contexts
key_value_length = context.shape[-2]
num_tokens_kv = default(num_tokens_kv, self.num_tokens_kv)
routed_tokens_kv = context
routed_tokens_kv_mask = context_mask
normalized_scores_kv = None
should_route_kv = key_value_length > num_tokens_kv
if should_route_kv:
normalized_scores_kv, indices_kv = self.kv_router(context, num_tokens = num_tokens_kv, mask = context_mask)
routed_tokens_kv = batched_gather(x, indices_kv)
routed_tokens_kv_mask = None
if exists(context_mask):
routed_tokens_kv_mask = batched_gather(context_mask, indices_kv)
# do the heavier branch with only routed tokens
routed_tokens_out = self.heavy_attn(
routed_tokens_q,
mask = routed_tokens_kv_mask,
context = routed_tokens_kv,
normalized_scores_kv = normalized_scores_kv
)
if should_route_queries:
routed_tokens_out = routed_tokens_out * rearrange(normalized_scores_q, '... -> ... 1')
# early return if queries did not undergo routing
if not should_route_queries:
return routed_tokens_out
# otherwise, scatter back the query outputs
out = torch.zeros_like(x)
out = self.q_router.route_back(out, routed_tokens_out, indices_q)
return out
# block
class ConditionalRoutedTransformerBlock(nn.Module):
def __init__(
self,
dim,
*,
num_heavy_attn_tokens_q,
num_heavy_attn_tokens_kv,
num_routed_kv = 1,
num_heavy_ff_tokens,
light_dim_head = 64,
light_heads = 8,
light_window_size = 128,
heavy_dim_head = 64,
heavy_heads = 8,
light_ff_mult = 0.5,
heavy_ff_mult = 4,
router_straight_through = True,
router_type = 'coor_descent',
router_kwargs: dict = {},
multiply_keys_by_score = False,
multiply_queries_by_score = False
):
super().__init__()
self.conditional_ff = ConditionalRoutedFeedForward(
dim,
num_heavy_tokens = num_heavy_ff_tokens,
light_ff_mult = light_ff_mult,
heavy_ff_mult = heavy_ff_mult,
router_straight_through = router_straight_through,
router_type = router_type,
router_kwargs = router_kwargs
)
self.conditional_attn = ConditionalRoutedAttention(
dim,
light_dim_head = light_dim_head,
light_heads = light_heads,
light_window_size = light_window_size,
heavy_dim_head = heavy_dim_head,
heavy_heads = heavy_heads,
num_heavy_tokens_q = num_heavy_attn_tokens_q,
num_heavy_tokens_kv = num_heavy_attn_tokens_kv,
num_routed_kv = num_routed_kv,
router_straight_through = router_straight_through,
router_type = router_type,
router_kwargs = router_kwargs,
multiply_keys_by_score = multiply_keys_by_score,
multiply_queries_by_score = multiply_queries_by_score
)
def forward(
self,
x,
mask = None,
num_heavy_attn_tokens_q = None,
num_heavy_attn_tokens_kv = None,
num_heavy_ff_tokens = None
):
x = self.conditional_attn(x, mask = mask, num_heavy_tokens_q = num_heavy_attn_tokens_q, num_heavy_tokens_kv = num_heavy_attn_tokens_kv) + x
x = self.conditional_ff(x, mask = mask, num_heavy_tokens = num_heavy_ff_tokens) + x
return x
| EXA-1-master | exa/modular_components/attentions/CoLT5-attention-main/colt5_attention/flash.py |
import torch
from einops import repeat
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def scatter_mean(src, t, index, dim, eps = 1e-5):
index = repeat(index, '... -> ... d', d = t.shape[-1])
numer = src.scatter_add(dim, index, t)
denom = src.scatter_add(dim, index, torch.ones_like(t))
return numer / denom.clamp(min = eps)
def sample_gumbel(shape, device, dtype):
u = torch.empty(shape, device = device, dtype = dtype).uniform_(0, 1)
return -log(-log(u))
def sinkhorn(r, n_iters = 8):
n = r.shape[1]
for _ in range(n_iters):
r = r - torch.logsumexp(r, dim = 2, keepdim = True)
r = r - torch.logsumexp(r, dim = 1, keepdim = True)
return torch.exp(r)
def gumbel_sinkhorn(r, n_iters = 8, temperature = 0.7):
r = log(r)
gumbel = sample_gumbel(r.shape, r.device, r.dtype)
r = (r + gumbel) / temperature
return sinkhorn(r, n_iters)
| EXA-1-master | exa/modular_components/attentions/CoLT5-attention-main/colt5_attention/sinkhorn.py |
import math
import jax
from functools import partial
from jax import nn
from jax import custom_vjp
from jax import numpy as jnp, lax, jit
# constants
EPSILON = 1e-10
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
# flash attention
def _query_chunk_flash_attention(q_range_chunk, k_range, q, k, v):
q_len, k_len, dim, v_dim = q.shape[-2], *k.shape, v.shape[-1]
scale = 1 / jnp.sqrt(dim)
q_scaled = q * scale
def chunk_scanner(carries, _):
key_chunk_idx, out, row_sum, row_max = carries
k_chunk_sizes = min(K_CHUNK_SIZE, k_len)
k_chunk = lax.dynamic_slice(k, (key_chunk_idx, 0), slice_sizes=(k_chunk_sizes, dim))
v_chunk = lax.dynamic_slice(v, (key_chunk_idx, 0), slice_sizes=(k_chunk_sizes, v_dim))
k_range_chunk = lax.dynamic_slice(k_range, (0, key_chunk_idx), slice_sizes=(1, k_chunk_sizes))
causal_mask = q_range_chunk < k_range_chunk
attn_weights = q_scaled @ k_chunk.transpose()
attn_weights = jnp.where(causal_mask, MASK_VALUE, attn_weights)
block_row_max = jnp.max(attn_weights, axis = -1, keepdims = True)
exp_weights = jnp.exp(attn_weights - block_row_max)
exp_weights = jnp.where(causal_mask, 0., exp_weights)
block_row_sum = jnp.sum(exp_weights, axis = -1, keepdims = True) + EPSILON
exp_values = exp_weights @ v_chunk
new_row_max = jnp.maximum(block_row_max, row_max)
exp_row_max_diff = jnp.exp(row_max - new_row_max)
exp_block_row_max_diff = jnp.exp(block_row_max - new_row_max)
new_row_sum = exp_row_max_diff * row_sum + exp_block_row_max_diff * block_row_sum
out = (row_sum / new_row_sum) * exp_row_max_diff * out + \
(exp_block_row_max_diff / new_row_sum) * exp_values
return (key_chunk_idx + k_chunk_sizes, out, new_row_sum, new_row_max), None
out = jnp.zeros((q_len, dim))
row_sum = jnp.zeros((q_len, 1))
row_max = jnp.ones((q_len, 1)) * -1e6
(_, out, row_sum, row_max), _ = lax.scan(chunk_scanner, init = (0, out, row_sum, row_max), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE))
out = out.reshape(q_len, v_dim)
row_sum = row_sum.reshape(q_len)
row_max = row_max.reshape(q_len)
return out, row_sum, row_max
def _causal_flash_attention(q, k, v):
q_len, dim, k_len, v_dim = *q.shape, *v.shape
q_range = jnp.arange(q_len).reshape(q_len, 1) + (k_len - q_len)
k_range = jnp.arange(k_len).reshape(1, k_len)
def chunk_scanner(chunk_idx, _):
chunk_sizes = min(Q_CHUNK_SIZE, q_len)
q_chunk = lax.dynamic_slice(q, (chunk_idx, 0), slice_sizes = (chunk_sizes, dim))
q_range_chunk = lax.dynamic_slice(q_range, (chunk_idx, 0), slice_sizes = (chunk_sizes, 1))
return (chunk_idx + chunk_sizes, _query_chunk_flash_attention(q_range_chunk, k_range, q_chunk, k, v))
_, (out, row_sum, row_max) = lax.scan(chunk_scanner, init = 0, xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE))
out = out.reshape(q_len, v_dim)
row_sum = row_sum.reshape(q_len)
row_max = row_max.reshape(q_len)
return out, (row_sum, row_max)
@custom_vjp
def causal_flash_attention(q, k, v):
out, _ = _causal_flash_attention(q, k, v)
return out
@jit
def flash_attention_forward(q, k, v):
out, (row_sum, row_max) = _causal_flash_attention(q, k, v)
return out, (q, k, v, out, row_sum, row_max)
def _query_chunk_flash_attention_backward(query_range_chunk, key_range, q, k, v, o, do, l, m):
q_len, dim, k_len, v_dim = *q.shape, *v.shape
scale = 1 / jnp.sqrt(dim)
q_scaled = q * scale
def chunk_scanner(carries, _):
key_chunk_idx, dq = carries
k_chunk_sizes = min(K_CHUNK_SIZE, k_len)
k_chunk = lax.dynamic_slice(k, (key_chunk_idx, 0), slice_sizes=(k_chunk_sizes, dim))
v_chunk = lax.dynamic_slice(v, (key_chunk_idx, 0), slice_sizes=(k_chunk_sizes, v_dim))
key_range_chunk = lax.dynamic_slice(key_range, (0, key_chunk_idx), slice_sizes=(1, k_chunk_sizes))
causal_mask = query_range_chunk < key_range_chunk
attn_weights = q_scaled @ k_chunk.transpose()
attn_weights = jnp.where(causal_mask, MASK_VALUE, attn_weights)
exp_attn_weights = jnp.exp(attn_weights - m)
exp_attn_weights = jnp.where(causal_mask, 0., exp_attn_weights)
p = exp_attn_weights / l
dv_chunk = p.transpose() @ do
dp = do @ v_chunk.transpose()
D = jnp.sum(do * o, axis = -1, keepdims = True)
ds = p * scale * (dp - D)
dq_chunk = ds @ k_chunk
dk_chunk = ds.transpose() @ q
return (key_chunk_idx + k_chunk_sizes, dq + dq_chunk), (dk_chunk, dv_chunk)
dq = jnp.zeros_like(q)
(_, dq), (dk, dv) = lax.scan(chunk_scanner, init = (0, dq), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE))
dq = dq.reshape(q_len, dim)
dk = dk.reshape(k_len, v_dim)
dv = dv.reshape(k_len, v_dim)
return dq, dk, dv
@jit
def flash_attention_backward(res, do):
q, k, v, o, l, m = res
q_len, dim, k_len, v_dim = *q.shape, *v.shape
dk = jnp.zeros_like(k)
dv = jnp.zeros_like(v)
m = m.reshape(q_len, 1)
l = l.reshape(q_len, 1)
q_range = jnp.arange(q_len).reshape(q_len, 1) + (k_len - q_len)
k_range = jnp.arange(k_len).reshape(1, k_len)
def chunk_scanner(carries, _):
chunk_idx, dk, dv = carries
chunk_sizes = min(Q_CHUNK_SIZE, q_len)
q_chunk = lax.dynamic_slice(q, (chunk_idx, 0), slice_sizes = (chunk_sizes, q.shape[-1]))
q_range_chunk = lax.dynamic_slice(q_range, (chunk_idx, 0), slice_sizes = (chunk_sizes, 1))
m_chunk = lax.dynamic_slice(m, (chunk_idx, 0), slice_sizes = (chunk_sizes, 1))
l_chunk = lax.dynamic_slice(l, (chunk_idx, 0), slice_sizes = (chunk_sizes, 1))
o_chunk = lax.dynamic_slice(o, (chunk_idx, 0), slice_sizes = (chunk_sizes, o.shape[-1]))
do_chunk = lax.dynamic_slice(do, (chunk_idx, 0), slice_sizes = (chunk_sizes, do.shape[-1]))
dq_chunk, dk_chunk, dv_chunk = _query_chunk_flash_attention_backward(q_range_chunk, k_range, q_chunk, k, v, o_chunk, do_chunk, l_chunk, m_chunk)
return (chunk_idx + chunk_sizes, dk + dk_chunk, dv + dv_chunk), dq_chunk
(_, dk, dv), dq = lax.scan(chunk_scanner, init = (0, dk, dv), xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE))
dq = dq.reshape(q_len, dim)
return dq, dk, dv
causal_flash_attention.defvjp(flash_attention_forward, flash_attention_backward)
| EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/casual_flash_attention.py |
import jax
from jax import nn
from jax import jit, numpy as jnp
from jax.numpy import einsum
from einops import rearrange
EPSILON = 1e-10
MASK_VALUE = -1e10
COSINE_SIM_SCALE = 10
@jit
def attention(q, k, v, key_mask):
dim, k_len = q.shape[-1], k.shape[-2]
scale = 1/ jnp.sqrt(dim)
q = q * scale
sim = einsum('... i d, ... j d -> ... i j', q, k)
key_mask = rearrange(key_mask, 'b j -> b 1 1 j')
sim = jnp.where(key_mask, sim, MASK_VALUE)
attn = nn.softmax(sim, axis=-1)
return attn @ v
@jit
def casual_attention(q, k, v):
q_len, dim, k_len = *q.shape[-2:], k.shape[-2]
scale = 1 / jnp.sqrt(dim)
q = q * scale
sim = einsum('... i d, ... j d -> ... i j', q, k)
casual_mask = jnp.triu(jnp.ones((q_len, k_len)), k_len - q_len + 1)
sim = jnp.where(casual_mask, MASK_VALUE, sim)
attn = nn.softmax(sim, axis=-1)
return einsum('... i j, ... j d -> ... i d', attn, v)
#cosine flash attention
@jit
def l2norm(t):
return t / (jnp.linalg.norm(t) + EPSILON)
@jit
def cosine_sim_attention(q, k, v, key_mask):
dim, k_len = q.shape[-1], k.shape[-2]
q, k = map(l2norm, (q, k))
sim = einsum('...i d, ... j d -> ... i j', q, k) * COSINE_SIM_SCALE
key_mask = rearrange(key_mask, 'b j -> b 1 1 j')
sim = jnp.where(key_mask, sim, MASK_VALUE)
attn = nn.softmax(sim, axis=-1)
return einsum('... i j, ... j d -> ... i d', attn, v)
| EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/attention.py |
import math
from functools import partial
import jax
from jax import lax, numpy as jnp, jit
# constants
HIGHEST_PRECISION = jax.lax.Precision.HIGHEST
einsum = partial(jnp.einsum, precision = HIGHEST_PRECISION)
# Figure 1 from https://arxiv.org/abs/2112.05682
# cleaned up
def _query_chunk_attention(q, k, v, k_chunk_size = 4096):
q_len, k_len, dim, v_dim = q.shape[-2], *k.shape, v.shape[-1]
k_chunk_size = min(k_chunk_size, k_len)
q = q / jnp.sqrt(dim)
@partial(jax.checkpoint, prevent_cse = False)
def summarize_chunk(q, k, v):
attn_weights = einsum('qd, kd -> qk', q, k)
max_score = jnp.max(attn_weights, axis = -1, keepdims = True)
max_score = jax.lax.stop_gradient(max_score)
exp_weights = jnp.exp(attn_weights - max_score)
exp_values = einsum('vf, qv -> qf', v, exp_weights)
return (exp_values, exp_weights.sum(axis = -1), max_score.reshape((q_len,)))
def chunk_scanner(chunk_idx):
k_chunk = lax.dynamic_slice(k, (chunk_idx, 0), slice_sizes=(k_chunk_size, dim))
v_chunk = lax.dynamic_slice(v, (chunk_idx, 0), slice_sizes=(k_chunk_size, v_dim))
return summarize_chunk(q, k_chunk, v_chunk)
chunk_values, chunk_weights, chunk_max = jax.lax.map(chunk_scanner, xs = jnp.arange(0, k_len, k_chunk_size))
global_max = jnp.max(chunk_max, axis = 0, keepdims = True)
max_diffs = jnp.exp(chunk_max - global_max)
chunk_values *= jnp.expand_dims(max_diffs, axis=-1)
chunk_weights *= max_diffs
all_values = chunk_values.sum(axis = 0)
all_weights = jnp.expand_dims(chunk_weights, -1).sum(axis = 0)
return all_values / all_weights
@jit
def rabe_attention(q, k, v, q_chunk_size = 1024, k_chunk_size = 4096):
q_len, dim, v_dim = *q.shape, v.shape[-1]
def chunk_scanner(chunk_idx, _):
q_chunk = lax.dynamic_slice(q, (chunk_idx, 0), slice_sizes = (min(q_chunk_size, q_len), dim))
return (chunk_idx + q_chunk_size, _query_chunk_attention(q_chunk, k, v, k_chunk_size = k_chunk_size))
_, res = jax.lax.scan(chunk_scanner, init = 0, xs = None, length = math.ceil(q_len / q_chunk_size))
return res.reshape(q_len, v_dim) | EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/rabe_attention.py |
from flash_attention import flash_attention
from cosine_sim_flash_attention import cosine_sim_flash_attention
from casual_flash_attention import causal_flash_attention
from rabe_attention import rabe_attention
from attention import attention, causal_attention, cosine_sim_attention
from utils import value_and_grad_difference, PRNGKeyGenerator
plain_attention = attention | EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/__init__.py |
import math
import jax
from functools import partial
from jax import nn
from jax import custom_vjp
from jax import numpy as jnp, lax, jit
#CONSTANTS
EPSILON = 1e-10
MASK_VALUE= -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
COSINE_SIM_SCALE = 10 # this may need to be a functon of log(sequence length) but 15 was sufficient for 2048 and 4096
#flash attention
def _query_chunk_flash_attention(chunk_idx, q, k, v, key_mask):
q_len, k_len, dim, v_dim = q.shape[-2], *k.shape, v.shape[-1]
def chunk_scanner(carries, _):
chunk_idx, out, row_sum = carries
k_chunk_sizes = min(K_CHUNK_SIZE, k_len)
k_chunk = lax.dynamic_slice(k, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, dim))
v_chunk = lax.dynamic_slice(v, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, v_dim))
key_mask_chunk = lax.dynamic_slice(key_mask, (chunk_idx,), slice_sizes=(k_chunk_sizes, ))
attn_weights = (q @ k_chunk.transpose() * COSINE_SIM_SCALE) - COSINE_SIM_SCALE # the output of this will range from [-2 * scale, 0], and the row sums are now bounded by key/values sequence length - you can also shift this more if you wish to tailor the normalization constant in the case of extreme sequence length
attn_weights = jnp.where(key_mask_chunk, attn_weights, MASK_VALUE)
exp_weights = jnp.exp(attn_weights)
exp_weights = jnp.where(key_mask_chunk, exp_weights, 0.)
block_row_sum = jnp.sum(exp_weights, axis=-1, keepdims=True)
exp_values = exp_weights @ v_chunk
chunk_out = exp_values / k_len
return (chunk_idx + k_chunk_sizes, out + chunk_out, row_sum, + block_row_sum), None
out = jnp.zeros((q_len, dim))
row_sum = jnp.zeros((q_len, 1))
(_, out, row_sum), _ = lax.scan(chunk_scanner, init= (0, out, row_sum), xs=None, length=math.ceil(k_len / K_CHUNK_SIZE))
out = out * (k_len / (row_sum + EPSILON)) # RENORMALIZE AFTER ACQUIRING ALL THE CORRECT ROW Sums
out = out.reshape(q_len, v_dim)
row_sum = row_sum.reshape(q_len)
return out, row_sum
@jit
def l2norm(t):
return t / (jnp.linalg.norm(t) + EPSILON)
@jit
def cosine_sim_flash_attention(q, k, v, key_mask):
q, k = map(l2norm, (q, k))
return cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask)
def _cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask):
q_len, dim, v_dim = *q.shape, v.shape[-1]
def chunk_scanner(chunk_idx, _):
chunk_sizes = min(Q_CHUNK_SIZE, q_len)
q_chunk = lax.dynamic_slice(q, (chunk_idx, 0), slice_sizes = (chunk_sizes, dim))
return (chunk_idx + chunk_idx, _query_chunk_flash_attention(chunk_idx, q_chunk, k, v, key_mask))
_, (out, row_sum) = lax.scan(chunk_scanner, init=0, xs=None, length =math.ceil(q_len / Q_CHUNK_SIZE))
out = out.reshape(q_len, v_dim)
row_sum = row_sum.reshape(q_len)
return out, (row_sum,)
@custom_vjp
def cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask):
out, _ = _cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask)
return out
@jit
def flash_attention_forward(q, k, v, key_mask):
out, (row_sum,) = _cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask)
def _query_chunk_flash_attention_backward(q, k, v, key_mask, o, do, l):
q_len, dim, k_len, v_dim = *q.shape, *v.shape
def chunk_scanner(carrries, _):
chunk_idx, dq = carrries
k_chunk_sizes = min(K_CHUNK_SIZE, k_len)
k_chunk = lax.dynamic_slice(k, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, dim))
v_chunk = lax.dynamic_slice(v, (chunk_idx, 0), slice_siezes=(k_chunk_sizes, v_dim))
key_mask_chunk = lax.dynamic_slice(key_mask, (chunk_idx,), slice_sizes=(k_chunk_sizes,))
attn_weights = q @ k_chunk.tranpose() * COSINE_SIM_SCALE - COSINE_SIM_SCALE
exp_attn_weights = jnp.exp(attn_weights)
exp_attn_weights = jnp.where(key_mask_chunk, exp_attn_weights, 0.)
p = exp_attn_weights / (l + EPSILON)
dv_chunk = p.tranpose() @ do
dp = do @ v_chunk.tranpose()
D = jnp.sum(do * o, axis=-1, keepdims=True)
ds = p * COSINE_SIM_SCALE * (dp - D)
dq_chunk = ds @ k_chunk
dk_chunk = ds.tranpose() @ q
return (chunk_idx + k_chunk_sizes, dq + dq_chunk), (dk_chunk, dv_chunk)
dq = jnp.zeros_like(q)
(_, dq), (dk, dv) = lax.scan(chunk_scanner, init=(0, dq), xs=None, lenght=math.ceil(k_len / K_CHUNK_SIZE))
dq = dq.reshape(q_len, dim)
dk = dq.reshape(k_len, v_dim)
dv = dv.reshape(k_len, v_dim)
return dq, dk, dv
@jit
def flash_attention_backward(res, do):
q, k, v, key_mask, o, l = res
q_len, dim = q.shape
dk = jnp.zeros_like(k)
dv = jnp.zeros_like(v)
l = l.reshape(q_len, 1)
def chunk_scanner(carries, _):
chunk_idx, dk, dv = carries
chunk_sizes = min(Q_CHUNK_SIZE, q_len)
q_chunk = lax.dynamic_slice(q, (chunk_idx, 0), slice_sizes = (chunk_sizes, q.shape[-1]))
l_chunk = lax.dynamic_slice(l, (chunk_idx, 0), slice_sizes = (chunk_sizes, 1))
o_chunk = lax.dynamic_slice(o, (chunk_idx, 0), slice_sizes = (chunk_sizes, o.shape[1]))
do_chunk = lax.dynamic_slice(do, (chunk_idx, 0), slice_sizes = (chunk_sizes, do.shape[-1]))
dq_chunk, dk_chunk, dv_chunk = _query_chunk_flash_attention_backward(q_chunk, k, v, key_mask, o_chunk, do_chunk, l_chunk)
return (chunk_idx + chunk_sizes, dk + dk_chunk, dv + dv_chunk), dq_chunk
(_, dk, dv), dq = lax.scan(chunk_scanner, init=(0, dk, dv), xs=None, lenght=math.ceil(q_len / Q_CHUNK_SIZE))
dq = dq.reshape(q_len, dim)
return dq, dk, dv, None
cosine_sim_flash_attention_after_l2norm.defvjp(flash_attention_forward, flash_attention_backward)
| EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/cosine_sim_flash_attention.py |
import jax
from functools import partial
import jax.numpy as jnp
from jax import random
from jax import value_and_grad
def value_and_grad_wrapper(fn, **kwargs):
@partial(value_and_grad, **kwargs)
def inner(*args, **kwargs):
return jnp.sum(fn(*args, **kwargs))
return inner
def diff(t1, t2):
return jnp.max(jnp.abs(t1 - t2))
def PRNGKeyGenerator(seed = 42):
key = random.PRNGKey(seed)
while True:
sub_key, key = random.split(key)
yield sub_key
def value_and_grad_difference(
fn1,
fn2,
seed = 42,
batch = 2,
heads = 4,
q_seq_len = 4096,
k_seq_len = 8192,
add_key_mask = True,
dim = 512
):
key_gen = PRNGKeyGenerator(seed)
q = random.normal(next(key_gen), (batch, heads, q_seq_len, dim))
k = random.normal(next(key_gen), (batch, heads, k_seq_len, dim))
v = random.normal(next(key_gen), (batch, heads, k_seq_len, dim))
key_mask = random.randint(next(key_gen), (batch, k_seq_len), 0, 2) == 1
fn1_value_and_grad, fn2_value_and_grad = map(partial(value_and_grad_wrapper, argnums = (0, 1, 2)), (fn1, fn2))
args = (q, k, v)
if add_key_mask:
args = (*args, key_mask)
o1, grads1 = fn1_value_and_grad(*args)
o2, grads2 = fn2_value_and_grad(*args)
return diff(o1, o2), [diff(*args) for args in zip(grads1, grads2)] | EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/utils.py |
import math
import jax
from functools import partial
from jax import nn
from jax import custom_vjp
from jax import numpy as jnp, lax, jit
from jax.numpy import einsum
from einops import rearrange
# constants
EPSILON = 1e-10
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
# flash attention
def _query_chunk_flash_attention(chunk_idx, q, k, v, key_mask):
q_len, batch, heads, dim, k_len, v_dim = *q.shape, k.shape[0], v.shape[-1]
scale = 1 / jnp.sqrt(dim)
q_scaled = q * scale
def chunk_scanner(carries, _):
chunk_idx, out, row_sum, row_max = carries
k_chunk_sizes = min(K_CHUNK_SIZE, k_len)
k_chunk = lax.dynamic_slice(k, (chunk_idx, 0, 0, 0), slice_sizes=(k_chunk_sizes, batch, heads, dim))
v_chunk = lax.dynamic_slice(v, (chunk_idx, 0, 0, 0), slice_sizes=(k_chunk_sizes, batch, heads, v_dim))
key_mask_chunk = lax.dynamic_slice(key_mask, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, batch))
attn_weights = einsum('i ... d, j ... d -> i ... j', q_scaled, k_chunk)
key_mask_chunk = rearrange(key_mask_chunk, 'j b -> 1 b 1 j')
attn_weights = jnp.where(key_mask_chunk, attn_weights, MASK_VALUE)
block_row_max = jnp.max(attn_weights, axis = -1, keepdims = True)
exp_weights = jnp.exp(attn_weights - block_row_max)
exp_weights = jnp.where(key_mask_chunk, exp_weights, 0.)
block_row_sum = jnp.sum(exp_weights, axis = -1, keepdims = True) + EPSILON
exp_values = einsum('i ... j, j ... d -> i ... d', exp_weights, v_chunk)
new_row_max = jnp.maximum(block_row_max, row_max)
exp_row_max_diff = jnp.exp(row_max - new_row_max)
exp_block_row_max_diff = jnp.exp(block_row_max - new_row_max)
new_row_sum = exp_row_max_diff * row_sum + exp_block_row_max_diff * block_row_sum
out = (row_sum / new_row_sum) * exp_row_max_diff * out + \
(exp_block_row_max_diff / new_row_sum) * exp_values
return (chunk_idx + k_chunk_sizes, out, new_row_sum, new_row_max), None
out = jnp.zeros((q_len, batch, heads, dim))
row_sum = jnp.zeros((q_len, batch, heads, 1))
row_max = jnp.ones((q_len, batch, heads, 1)) * -1e6
(_, out, row_sum, row_max), _ = lax.scan(chunk_scanner, init = (0, out, row_sum, row_max), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE))
row_sum = rearrange(row_sum, 'n ... 1 -> n ...')
row_max = rearrange(row_max, 'n ... 1 -> n ...')
lse = jnp.log(row_sum) + row_max
return out, lse
def _flash_attention(q, k, v, key_mask):
batch, heads, q_len, dim, v_dim = *q.shape, v.shape[-1]
def chunk_scanner(chunk_idx, _):
chunk_sizes = min(Q_CHUNK_SIZE, q_len)
q_chunk = lax.dynamic_slice(q, (chunk_idx, 0, 0, 0), slice_sizes = (chunk_sizes, batch, heads, dim))
return (chunk_idx + chunk_sizes, _query_chunk_flash_attention(chunk_idx, q_chunk, k, v, key_mask))
q, k, v = map(lambda t: rearrange(t, 'b h n d -> n b h d'), (q, k, v))
key_mask = rearrange(key_mask, 'b j -> j b')
_, (out, lse) = lax.scan(chunk_scanner, init = 0, xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE))
out = rearrange(out, 'c n b h d -> b h (c n) d')
lse = rearrange(lse, 'c n b h -> b h (c n)')
return out, lse
@custom_vjp
@jit
def flash_attention(q, k, v, key_mask):
out, _ = _flash_attention(q, k, v, key_mask)
return out
@jit
def flash_attention_forward(q, k, v, key_mask):
out, lse = _flash_attention(q, k, v, key_mask)
return out, (q, k, v, key_mask, out, lse)
def _query_chunk_flash_attention_backward(q, k, v, key_mask, o, do, lse):
q_len, batch, heads, dim, k_len, v_dim = *q.shape, v.shape[0], v.shape[-1]
scale = 1 / jnp.sqrt(dim)
q_scaled = q * scale
def chunk_scanner(carries, _):
chunk_idx, dq = carries
k_chunk_sizes = min(K_CHUNK_SIZE, k_len)
k_chunk = lax.dynamic_slice(k, (chunk_idx, batch, heads, 0), slice_sizes=(k_chunk_sizes, batch, heads, dim))
v_chunk = lax.dynamic_slice(v, (chunk_idx, batch, heads, 0), slice_sizes=(k_chunk_sizes, batch, heads, v_dim))
key_mask_chunk = lax.dynamic_slice(key_mask, (chunk_idx, batch), slice_sizes=(k_chunk_sizes, batch))
attn_weights = einsum('i ... d, j ... d -> i ... j', q_scaled, k_chunk)
p = jnp.exp(attn_weights - lse)
key_mask_chunk = rearrange(key_mask_chunk, 'j b -> 1 b 1 j')
p = jnp.where(key_mask_chunk, p, 0.)
dv_chunk = einsum('i ... j, i ... d -> j ... d', p, do)
dp = einsum('i ... d, j ... d -> i ... j', do, v_chunk)
D = jnp.sum(do * o, axis = -1, keepdims = True)
ds = p * scale * (dp - D)
dq_chunk = einsum('i ... j, j ... d -> i ... d', ds, k_chunk)
dk_chunk = einsum('i ... j, i ... d -> j ... d', ds, q)
return (chunk_idx + k_chunk_sizes, dq + dq_chunk), (dk_chunk, dv_chunk)
dq = jnp.zeros_like(q)
(_, dq), (dk, dv) = lax.scan(chunk_scanner, init = (0, dq), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE))
dk = rearrange(dk, 'c n ... -> (c n) ...')
dv = rearrange(dv, 'c n ... -> (c n) ...')
return dq, dk, dv
@jit
def flash_attention_backward(res, do):
q, k, v, key_mask, o, lse = res
batch, heads, q_len, dim = q.shape
lse = rearrange(lse, 'b h n -> n b h 1')
q, k, v, o, do = map(lambda t: rearrange(t, 'b h n d -> n b h d'), (q, k, v, o, do))
key_mask = rearrange(key_mask, 'b j -> j b')
dk = jnp.zeros_like(k)
dv = jnp.zeros_like(v)
def chunk_scanner(carries, _):
chunk_idx, dk, dv = carries
chunk_sizes = min(Q_CHUNK_SIZE, q_len)
q_chunk = lax.dynamic_slice(q, (chunk_idx, batch, heads, 0), slice_sizes = (chunk_sizes, batch, heads, q.shape[-1]))
lse_chunk = lax.dynamic_slice(lse, (chunk_idx, batch, heads, 0), slice_sizes = (chunk_sizes, batch, heads, 1))
o_chunk = lax.dynamic_slice(o, (chunk_idx, batch, heads, 0), slice_sizes = (chunk_sizes, batch, heads, o.shape[-1]))
do_chunk = lax.dynamic_slice(do, (chunk_idx, batch, heads, 0), slice_sizes = (chunk_sizes, batch, heads, do.shape[-1]))
dq_chunk, dk_chunk, dv_chunk = _query_chunk_flash_attention_backward(q_chunk, k, v, key_mask, o_chunk, do_chunk, lse_chunk)
return (chunk_idx + chunk_sizes, dk + dk_chunk, dv + dv_chunk), dq_chunk
(_, dk, dv), dq = lax.scan(chunk_scanner, init = (0, dk, dv), xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE))
dq = rearrange(dq, 'c n b h d -> b h (c n) d')
dk, dv = map(lambda t: rearrange(t, 'n b h d -> b h n d'), (dk, dv))
return dq, dk, dv, None
flash_attention.defvjp(flash_attention_forward, flash_attention_backward)
| EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/flash_attention.py |
import torch
import torch.nn as nn
from torch.nn import LayerNorm
from einops import rearrange
class KernelAttention(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.0, kernel="gaussian", sigma=1.0):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim ** -0.5
self.sigma = sigma
self.kernel = kernel
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=True)
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=True)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=True)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True)
self.dropout_module = torch.nn.Dropout(dropout)
def forward(self, q, k, v, attn_mask=None):
bsz, tgt_len, embed_dim = q.size()
q = self.q_proj(q)
k = self.k_proj(k)
v = self.v_proj(v)
q *= self.scaling
q = rearrange(q, 'b t (h d) -> b h t d', h=self.num_heads)
k = rearrange(k, 'b s (h d) -> b h s d', h=self.num_heads)
v = rearrange(v, 'b s (h d) -> b h s d', h=self.num_heads)
if self.kernel == "gaussian":
kernel_attn = torch.exp(-((q.unsqueeze(3) - k.unsqueeze(2)) ** 2).sum(-1) / (2 * self.sigma ** 2))
else:
raise ValueError(f"Unsupported kernel type: {self.kernel}")
kernel_attn = kernel_attn / kernel_attn.sum(dim=-1, keepdim=True)
if attn_mask is not None:
kernel_attn = kernel_attn * attn_mask.unsqueeze(1)
attn_probs = self.dropout_module(kernel_attn)
attn = torch.einsum('b h t s, b h s d -> b h t d', attn_probs, v)
attn = rearrange(attn, 'b h t d -> b t (h d)', h=self.num_heads)
attn = self.out_proj(attn)
return attn, kernel_attn
class OptimizedAttention(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.0):
super().__init__()
self.kernel_attn = KernelAttention(embed_dim, num_heads, dropout)
self.layer_norm = LayerNorm(embed_dim)
def forward(self, q, k, v, attn_mask=None):
attn, attn_weights = self.kernel_attn(q, k, v, attn_mask)
attn = self.layer_norm(attn)
attn_weights = attn_weights.to(torch.float16)
return attn, attn_weights
| EXA-1-master | exa/modular_components/attentions/KernelizedAttention/ka.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
from pathlib import Path
from packaging.version import parse, Version
from setuptools import setup, find_packages
import subprocess
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
bare_metal_version = parse(output[release_idx].split(",")[0])
return raw_output, bare_metal_version
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_version = get_cuda_bare_metal_version(cuda_dir)
torch_binary_version = parse(torch.version.cuda)
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_version != torch_binary_version):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version >= Version("11.2"):
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None and CUDA_HOME is not None:
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version >= Version("11.8"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
elif bare_metal_version >= Version("11.1"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
elif bare_metal_version == Version("11.0"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
raise_if_cuda_home_none("flash_attn")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version < Version("11.0"):
raise RuntimeError("FlashAttention is only supported on CUDA 11 and above")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_75,code=sm_75")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
if bare_metal_version >= Version("11.8"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_90,code=sm_90")
subprocess.run(["git", "submodule", "update", "--init", "csrc/flash_attn/cutlass"])
ext_modules.append(
CUDAExtension(
name="flash_attn_cuda",
sources=[
"csrc/flash_attn/fmha_api.cpp",
"csrc/flash_attn/src/fmha_fwd_hdim32.cu",
"csrc/flash_attn/src/fmha_fwd_hdim64.cu",
"csrc/flash_attn/src/fmha_fwd_hdim128.cu",
"csrc/flash_attn/src/fmha_bwd_hdim32.cu",
"csrc/flash_attn/src/fmha_bwd_hdim64.cu",
"csrc/flash_attn/src/fmha_bwd_hdim128.cu",
"csrc/flash_attn/src/fmha_block_fprop_fp16_kernel.sm80.cu",
"csrc/flash_attn/src/fmha_block_dgrad_fp16_kernel_loop.sm80.cu",
],
extra_compile_args={
"cxx": ["-O3", "-std=c++17"] + generator_flag,
"nvcc": append_nvcc_threads(
[
"-O3",
"-std=c++17",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
"--ptxas-options=-v",
"-lineinfo"
]
+ generator_flag
+ cc_flag
),
},
include_dirs=[
Path(this_dir) / 'csrc' / 'flash_attn',
Path(this_dir) / 'csrc' / 'flash_attn' / 'src',
Path(this_dir) / 'csrc' / 'flash_attn' / 'cutlass' / 'include',
],
)
)
setup(
name="flash_attn",
version="1.0.1",
packages=find_packages(
exclude=("build", "csrc", "include", "tests", "dist", "docs", "benchmarks", "flash_attn.egg-info",)
),
author="Tri Dao",
author_email="[email protected]",
description="Flash Attention: Fast and Memory-Efficient Exact Attention",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/HazyResearch/flash-attention",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: Unix",
],
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
python_requires=">=3.7",
install_requires=[
"torch",
"einops",
"packaging",
],
)
| EXA-1-master | exa/modular_components/attentions/flash-attention/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
from packaging.version import parse, Version
from setuptools import setup, find_packages
import subprocess
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
bare_metal_version = parse(output[release_idx].split(",")[0])
return raw_output, bare_metal_version
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_version = get_cuda_bare_metal_version(cuda_dir)
torch_binary_version = parse(torch.version.cuda)
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_version != torch_binary_version):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version >= Version("11.2"):
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None and CUDA_HOME is not None:
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version >= Version("11.8"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
elif bare_metal_version >= Version("11.1"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
elif bare_metal_version == Version("11.0"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
raise_if_cuda_home_none("--ft_attention")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version < Version("11.0"):
raise RuntimeError("ft_attention is only supported on CUDA 11 and above")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
if bare_metal_version >= Version("11.8"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_90,code=sm_90")
ext_modules.append(
CUDAExtension(
name="ft_attention",
sources=[
"ft_attention.cpp",
"decoder_masked_multihead_attention.cu",
],
extra_compile_args={
"cxx": ["-O3", "-DENABLE_BF16"] + generator_flag,
"nvcc": append_nvcc_threads(
[
"-DENABLE_BF16", # TODO
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT16_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT162_OPERATORS__",
"-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
]
+ generator_flag
+ cc_flag
),
},
include_dirs=[this_dir],
)
)
setup(
name="ft_attention",
version="0.1",
description="Attention for single query from FasterTransformer",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
| EXA-1-master | exa/modular_components/attentions/flash-attention/csrc/ft_attention/setup.py |
# Copied from https://github.com/NVIDIA/apex/tree/master/csrc/megatron
# We add the case where seqlen = 4k and seqlen = 8k
import os
import subprocess
import torch
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
cc_flag = []
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
setup(
name='fused_softmax_lib',
ext_modules=[
CUDAExtension(
name='fused_softmax_lib',
sources=['fused_softmax.cpp', 'scaled_masked_softmax_cuda.cu', 'scaled_upper_triang_masked_softmax_cuda.cu'],
extra_compile_args={
'cxx': ['-O3',],
'nvcc': append_nvcc_threads(['-O3', '--use_fast_math'] + cc_flag)
}
)
],
cmdclass={
'build_ext': BuildExtension
})
| EXA-1-master | exa/modular_components/attentions/flash-attention/csrc/fused_softmax/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
from packaging.version import parse, Version
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
bare_metal_version = parse(output[release_idx].split(",")[0])
return raw_output, bare_metal_version
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_version = get_cuda_bare_metal_version(cuda_dir)
torch_binary_version = parse(torch.version.cuda)
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_version != torch_binary_version):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version >= Version("11.2"):
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None and CUDA_HOME is not None:
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version >= Version("11.8"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
elif bare_metal_version >= Version("11.1"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
elif bare_metal_version == Version("11.0"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
raise_if_cuda_home_none("--xentropy")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version < Version("11.0"):
raise RuntimeError("xentropy is only supported on CUDA 11 and above")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
if bare_metal_version >= Version("11.8"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_90,code=sm_90")
ext_modules.append(
CUDAExtension(
name="xentropy_cuda_lib",
sources=[
"interface.cpp",
"xentropy_kernel.cu"
],
extra_compile_args={
"cxx": ["-O3"] + generator_flag,
"nvcc": append_nvcc_threads(
["-O3"]
+ generator_flag
+ cc_flag
),
},
include_dirs=[this_dir],
)
)
setup(
name="xentropy_cuda_lib",
version="0.1",
description="Cross-entropy loss",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
| EXA-1-master | exa/modular_components/attentions/flash-attention/csrc/xentropy/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
from packaging.version import parse, Version
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
bare_metal_version = parse(output[release_idx].split(",")[0])
return raw_output, bare_metal_version
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_version = get_cuda_bare_metal_version(cuda_dir)
torch_binary_version = parse(torch.version.cuda)
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_version != torch_binary_version):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version >= Version("11.2"):
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None and CUDA_HOME is not None:
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version >= Version("11.8"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
elif bare_metal_version >= Version("11.1"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
elif bare_metal_version == Version("11.0"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
raise_if_cuda_home_none("--fast_layer_norm")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version < Version("11.0"):
raise RuntimeError("dropout_layer_norm is only supported on CUDA 11 and above")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
if bare_metal_version >= Version("11.8"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_90,code=sm_90")
ext_modules.append(
CUDAExtension(
name="dropout_layer_norm",
sources=[
"ln_api.cpp",
"ln_fwd_256.cu",
"ln_bwd_256.cu",
"ln_fwd_512.cu",
"ln_bwd_512.cu",
"ln_fwd_768.cu",
"ln_bwd_768.cu",
"ln_fwd_1024.cu",
"ln_bwd_1024.cu",
"ln_fwd_1280.cu",
"ln_bwd_1280.cu",
"ln_fwd_1536.cu",
"ln_bwd_1536.cu",
"ln_fwd_2048.cu",
"ln_bwd_2048.cu",
"ln_fwd_2560.cu",
"ln_bwd_2560.cu",
"ln_fwd_3072.cu",
"ln_bwd_3072.cu",
"ln_fwd_4096.cu",
"ln_bwd_4096.cu",
"ln_fwd_5120.cu",
"ln_bwd_5120.cu",
"ln_fwd_6144.cu",
"ln_bwd_6144.cu",
"ln_fwd_7168.cu",
"ln_bwd_7168.cu",
"ln_fwd_8192.cu",
"ln_bwd_8192.cu",
"ln_parallel_fwd_256.cu",
"ln_parallel_bwd_256.cu",
"ln_parallel_fwd_512.cu",
"ln_parallel_bwd_512.cu",
"ln_parallel_fwd_768.cu",
"ln_parallel_bwd_768.cu",
"ln_parallel_fwd_1024.cu",
"ln_parallel_bwd_1024.cu",
"ln_parallel_fwd_1280.cu",
"ln_parallel_bwd_1280.cu",
"ln_parallel_fwd_1536.cu",
"ln_parallel_bwd_1536.cu",
"ln_parallel_fwd_2048.cu",
"ln_parallel_bwd_2048.cu",
"ln_parallel_fwd_2560.cu",
"ln_parallel_bwd_2560.cu",
"ln_parallel_fwd_3072.cu",
"ln_parallel_bwd_3072.cu",
"ln_parallel_fwd_4096.cu",
"ln_parallel_bwd_4096.cu",
"ln_parallel_fwd_5120.cu",
"ln_parallel_bwd_5120.cu",
"ln_parallel_fwd_6144.cu",
"ln_parallel_bwd_6144.cu",
"ln_parallel_fwd_7168.cu",
"ln_parallel_bwd_7168.cu",
"ln_parallel_fwd_8192.cu",
"ln_parallel_bwd_8192.cu",
],
extra_compile_args={
"cxx": ["-O3"] + generator_flag,
"nvcc": append_nvcc_threads(
[
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT16_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT162_OPERATORS__",
"-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
]
+ generator_flag
+ cc_flag
),
},
include_dirs=[this_dir],
)
)
setup(
name="dropout_layer_norm",
version="0.1",
description="Fused dropout + add + layer norm",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
| EXA-1-master | exa/modular_components/attentions/flash-attention/csrc/layer_norm/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
from packaging.version import parse, Version
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
bare_metal_version = parse(output[release_idx].split(",")[0])
return raw_output, bare_metal_version
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_version = get_cuda_bare_metal_version(cuda_dir)
torch_binary_version = parse(torch.version.cuda)
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_version != torch_binary_version):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version >= Version("11.2"):
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None and CUDA_HOME is not None:
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version >= Version("11.8"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
elif bare_metal_version >= Version("11.1"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
elif bare_metal_version == Version("11.0"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
raise_if_cuda_home_none("rotary_emb")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version < Version("11.0"):
raise RuntimeError("rotary_emb is only supported on CUDA 11 and above")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
if bare_metal_version >= Version("11.8"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_90,code=sm_90")
ext_modules.append(
CUDAExtension(
'rotary_emb', [
'rotary.cpp',
'rotary_cuda.cu',
],
extra_compile_args={'cxx': ['-g', '-march=native', '-funroll-loops'],
'nvcc': append_nvcc_threads([
'-O3', '--use_fast_math', '--expt-extended-lambda'
] + cc_flag)
}
)
)
setup(
name="rotary_emb",
version="0.1",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
| EXA-1-master | exa/modular_components/attentions/flash-attention/csrc/rotary/setup.py |
import os
import subprocess
from packaging.version import parse, Version
import torch
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
bare_metal_version = parse(output[release_idx].split(",")[0])
return raw_output, bare_metal_version
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version >= Version("11.2"):
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
setup(
name='fused_dense_lib',
ext_modules=[
CUDAExtension(
name='fused_dense_lib',
sources=['fused_dense.cpp', 'fused_dense_cuda.cu'],
extra_compile_args={
'cxx': ['-O3',],
'nvcc': append_nvcc_threads(['-O3'])
}
)
],
cmdclass={
'build_ext': BuildExtension
})
| EXA-1-master | exa/modular_components/attentions/flash-attention/csrc/fused_dense_lib/setup.py |
from typing import Callable
import dotenv
import hydra
from omegaconf import OmegaConf, DictConfig
# load environment variables from `.env` file if it exists
# recursively searches for `.env` in all folders starting from work dir
dotenv.load_dotenv(override=True)
OmegaConf.register_new_resolver('eval', eval)
OmegaConf.register_new_resolver('div_up', lambda x, y: (x + y - 1) // y)
# Delay the evaluation until we have the datamodule
# So we want the resolver to yield the same string.
OmegaConf.register_new_resolver('datamodule', lambda attr: '${datamodule:' + str(attr) + '}')
# Turn on TensorFloat32
import torch.backends
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
def dictconfig_filter_key(d: DictConfig, fn: Callable) -> DictConfig:
"""Only keep keys where fn(key) is True. Support nested DictConfig.
"""
# Using d.items_ex(resolve=False) instead of d.items() since we want to keep the
# ${datamodule:foo} unresolved for now.
return DictConfig({k: dictconfig_filter_key(v, fn) if isinstance(v, DictConfig) else v
# for k, v in d.items_ex(resolve=False) if fn(k)})
for k, v in d.items() if fn(k)})
@hydra.main(config_path="configs/", config_name="config.yaml")
def main(config: DictConfig):
# Remove config keys that start with '__'. These are meant to be used only in computing
# other entries in the config.
config = dictconfig_filter_key(config, lambda k: not k.startswith('__'))
# Imports should be nested inside @hydra.main to optimize tab completion
# Read more here: https://github.com/facebookresearch/hydra/issues/934
from src.train import train
from src.eval import evaluate
from src.utils import utils
# A couple of optional utilities:
# - disabling python warnings
# - forcing debug-friendly configuration
# - verifying experiment name is set when running in experiment mode
# You can safely get rid of this line if you don't want those
utils.extras(config)
# Pretty print config using Rich library
if config.get("print_config"):
utils.print_config(config, resolve=True)
# Train model
mode = config.get('mode', 'train')
if mode not in ['train', 'eval']:
raise NotImplementedError(f'mode {mode} not supported')
if mode == 'train':
return train(config)
elif mode == 'eval':
return evaluate(config)
if __name__ == "__main__":
main()
| EXA-1-master | exa/modular_components/attentions/flash-attention/training/run.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.