python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
RadFM-main | Quick_demo/Model/RadFM/__init__.py |
|
"""
Code modified from DETR tranformer:
https://github.com/facebookresearch/detr
Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
import copy
from typing import Optional, List
import pickle as cp
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
T,B,C = memory.shape
intermediate = []
atten_layers = []
for n,layer in enumerate(self.layers):
residual=True
output,ws = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos,residual=residual)
atten_layers.append(ws)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output,atten_layers
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
residual=True):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2,ws = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)
tgt = self.norm1(tgt)
tgt2,ws = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
# attn_weights [B,NUM_Q,T]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt,ws
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2,ws = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2,attn_weights = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt,attn_weights
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
residual=True):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos,residual)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| RadFM-main | Quick_demo/Model/RadFM/transformer_decoder.py |
from torch import nn
from transformers.models.llama import LlamaForCausalLM
from transformers import AutoConfig
from .my_embedding_layer import MyEmbedding
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import tqdm.auto as tqdm
import torch.nn as nn
import torch
from torch.utils.checkpoint import checkpoint
from torch.autograd import Variable
import numpy as np
class MultiLLaMAForCausalLM(nn.Module):
def __init__(self, lang_model_path):
super(MultiLLaMAForCausalLM, self).__init__()
try:
self.lang_model = LlamaForCausalLM.from_pretrained(
lang_model_path,
)
except:
config = AutoConfig.from_pretrained(lang_model_path)
self.lang_model = LlamaForCausalLM(config)
self.lang_model.gradient_checkpointing_enable()
self.lang_model.enable_input_require_grads()
# self.lang_model.requires_grad_(False)
self.embedding_layer = MyEmbedding()
self.embedding_layer.weight = self.lang_model.get_input_embeddings().weight
self.hidden_dim = 5120
self.voc_size = 32000
def forward(self,lang_x, vision_x, attention_mask, labels, loss_reweight,key_words_query):
if labels.shape == lang_x.shape:
self.embedding_layer.flag = 'Text'
# lang_x = lang_x.to(vision_x.dtype)
# lang_x = lang_x + torch.zeros(1, dtype=lang_x.dtype, device=lang_x.device, requires_grad=True)
# vision_x = vision_x + torch.zeros(1, dtype=vision_x.dtype, device=vision_x.device, requires_grad=True)
# input_embedding = checkpoint(self.embedding_layer, lang_x, vision_x)
input_embedding,loss_match= self.embedding_layer(lang_x, vision_x,key_words_query) # ,loss_matching
output = self.lang_model(inputs_embeds = input_embedding,attention_mask = attention_mask, labels = labels)
logits = output['logits']
loss_reg = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
shift_loss_reweight = loss_reweight[...,1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(reduction = 'none')
shift_logits = shift_logits.view(-1, self.voc_size)
shift_labels = shift_labels.view(-1)
shift_loss_reweight = shift_loss_reweight.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
shift_loss_reweight = shift_loss_reweight.to(shift_logits.device)
loss_reg = loss_fct(shift_logits, shift_labels)
loss_reg = torch.sum(shift_loss_reweight*loss_reg)/torch.sum(shift_loss_reweight)
loss = loss_reg
if loss_match!= None:
loss = 0.8*loss + 0.2*loss_match
logits = output['logits'][..., :-1, :].contiguous().detach()
total = len(labels)
predictions = torch.argmax(logits, dim=-1)
labels = labels[..., 1:].contiguous()
Acc = torch.sum(torch.all(torch.logical_or(predictions == labels, labels == -100),dim = -1))
Accuracy = Acc /total
return dict(
# loss_reg = loss_reg,
# loss_matching = loss_matching,
logits = Accuracy,
loss = output['loss'],
)
### useless for now ignore the folowing codes ###
# if labels.shape == vision_x.shape:
# self.embedding_layer.flag = 'Seg'
# input_embedding = self.embedding_layer(lang_x, vision_x)
def generate(self, lang_x,vision_x):
self.embedding_layer.flag = 'Text'
with torch.no_grad():
input_embedding,_ = self.embedding_layer(lang_x, vision_x)
generation = self.lang_model.generate(inputs_embeds = input_embedding, max_new_tokens =200,top_k=50)
return generation
| RadFM-main | Quick_demo/Model/RadFM/multimodality_model.py |
from .blocks import ModifiedResNet,PMC_CLIP_cfg
import torch
from torchvision import transforms
from PIL import Image
import torch.nn as nn
def extend_instance(obj, mixin):
"""Apply mixins to a class instance after creation"""
base_cls = obj.__class__
base_cls_name = obj.__class__.__name__
obj.__class__ = type(
base_cls_name, (mixin, base_cls), {}
) # mixin needs to go first for our forward() logic to work
def getattr_recursive(obj, att):
"""
Return nested attribute of obj
Example: getattr_recursive(obj, 'a.b.c') is equivalent to obj.a.b.c
"""
if att == "":
return obj
i = att.find(".")
if i < 0:
return getattr(obj, att)
else:
return getattr_recursive(getattr(obj, att[:i]), att[i + 1 :])
def setattr_recursive(obj, att, val):
"""
Set nested attribute of obj
Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val
"""
if "." in att:
obj = getattr_recursive(obj, ".".join(att.split(".")[:-1]))
setattr(obj, att.split(".")[-1], val)
def get_visual_encoder(model_str):
"""
Args:
str (_type_): str_to_model_path
Return:
vision_model, visual_dim, img_preprocessor
"""
normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
img_preprocessor = transforms.Compose([
transforms.Resize((512,512), interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize,
])
if 'PMC-CLIP' in model_str:
#vision_cfg = json.load(open(model_args.visual_model_config,'r'))['vision_cfg']
vision_cfg = PMC_CLIP_cfg()
vision_heads = vision_cfg.width * 32 // vision_cfg.head_width
vision_model = ModifiedResNet(
layers=vision_cfg.layers,
heads=vision_heads,
output_dim = 768,
image_size=vision_cfg.image_size,
width=vision_cfg.width
)
vision_model = vision_load_pretrain(vision_model,model_str)
vision_model = nn.Sequential(*list(vision_model.children())[:-2])
visual_dim = 1024
return vision_model,visual_dim,img_preprocessor
def vision_load_pretrain(resnet,model_path):
checkpoint = torch.load(model_path, map_location='cpu')
state_dict = checkpoint['state_dict']
state_dict = {k.replace('module.visual.',''): v for k, v in state_dict.items() if '.visual' in k}
resnet.load_state_dict(state_dict)
return resnet
| RadFM-main | Quick_demo/Model/RadFM/utils.py |
import torch
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from .position_encoding import PositionEmbeddingLearned3d
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.dropout = nn.Dropout(dropout)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
attn = self.attend(dots)
attn = self.dropout(attn)
out = torch.matmul(attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
class ViT(nn.Module):
def __init__(self, *, image_size, image_patch_size, frames, frame_patch_size, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.):
super().__init__()
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(image_patch_size)
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
assert frames % frame_patch_size == 0, 'Frames must be divisible by frame patch size'
self.patch_height = patch_height
self.patch_width = patch_width
self.frame_patch_size = frame_patch_size
num_patches = (image_height // patch_height) * (image_width // patch_width) * (frames // frame_patch_size)
patch_dim = channels * patch_height * patch_width * frame_patch_size
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) (f pf) -> b (h w f) (p1 p2 pf c)', p1 = patch_height, p2 = patch_width, pf = frame_patch_size),
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim),
)
self.pos_embedding = PositionEmbeddingLearned3d(dim // 3,(image_height // patch_height), (image_width // patch_width), (frames // frame_patch_size))
self.dropout = nn.Dropout(emb_dropout)
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
def forward(self, video):
B, C, H, W, D = video.shape
x = self.to_patch_embedding(video)
b, n, _ = x.shape
pos = self.pos_embedding(B, H // self.patch_height, W // self.patch_width, D // self.frame_patch_size,x)
x += pos
x = self.dropout(x)
x = self.transformer(x)
return x,pos
| RadFM-main | Quick_demo/Model/RadFM/vit_3d.py |
from collections import OrderedDict
from typing import Tuple, Union, Callable, Optional
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.checkpoint import checkpoint
class PMC_CLIP_cfg:
backbone: str = 'ModifiedRN50' # ['RN50', 'ModifiedRN50', 'MAE']
layers: Union[Tuple[int, int, int, int], int] = [3,4,6,3]
width: int = 64
head_width: int = 64
mlp_ratio: float = 4.0
patch_size: int = 16
image_size: Union[Tuple[int, int], int] = 224
timm_model_name: str = None # a valid model name overrides layers, width, patch_size
timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')
patch_dropout: float = 0.0 # patch dropout rate, no dropout by default
drop_attention_rate: float = 0. # Transformer Dropout
patch_size: None
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ResNet(nn.Module):
"""
RN50
"""
def __init__(
self, layers, output_dim, heads, image_size=224, width=64,
block=Bottleneck,
):
super().__init__()
self.output_dim = output_dim
self.image_size = image_size
# the 1-layer stem
self.conv1 = nn.Conv2d(3, width, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width)
self.relu1 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.head = nn.Linear(512 * 6, output_dim)
self.head = nn.Linear(512 * block.expansion, output_dim)
# embed_dim = width * 32 # the ResNet feature dimension
# self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
self.init_parameters()
def _make_layer(
self,
planes, blocks, stride=1,
block=Bottleneck,
):
layers = [block(self._inplanes, planes, stride)]
self._inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self._inplanes, planes))
return nn.Sequential(*layers)
def init_parameters(self):
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert unlocked_groups == 0, 'partial locking not currently supported for this model'
for param in self.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
# FIXME support for non-transformer
pass
def stem(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.maxpool(x)
return x
def forward(self, x):
# x[0]: [batch_size, 3, 224, 224]
# x[1]: [batch_size, 1]
x = self.stem(x) # [batch_size, 64, 56, 56]
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x) # [batch_size, 2048, 7, 7]
x = self.avgpool(x) # [batch_size, 2048, 1, 1]
x = torch.flatten(x, 1) # [batch_size, 2048*1*1]
x = self.head(x) # [batch_size, 1024]
visual_output = dict.fromkeys(["image_features", "mim_loss"], None)
visual_output.update({
'image_features': x,
})
return visual_output
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, image_size=224, width=64):
super().__init__()
self.output_dim = output_dim
self.image_size = image_size
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.relu3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
self.init_parameters()
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def init_parameters(self):
if self.attnpool is not None:
std = self.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.attnpool.c_proj.weight, std=std)
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert unlocked_groups == 0, 'partial locking not currently supported for this model'
for param in self.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
# FIXME support for non-transformer
pass
def stem(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
def forward(self, x):
x = self.stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
visual_output = dict.fromkeys(["image_features", "mim_loss"], None)
visual_output.update({
'image_features': x,
})
return visual_output
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
return x.to(orig_type)
class QuickGELU(nn.Module):
# NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(
self, d_model: int, n_head: int, mlp_ratio: float = 4.0, act_layer: Callable = nn.GELU,
drop_attention_rate: float = 0.,
):
super().__init__()
self.attn = nn.MultiheadAttention(
embed_dim=d_model,
num_heads=n_head,
dropout=drop_attention_rate,
)
self.ln_1 = LayerNorm(d_model)
mlp_width = int(d_model * mlp_ratio)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, mlp_width)),
("gelu", act_layer()),
("c_proj", nn.Linear(mlp_width, d_model))
]))
self.ln_2 = LayerNorm(d_model)
def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
x = x + self.attention(self.ln_1(x), attn_mask=attn_mask)
x = x + self.mlp(self.ln_2(x))
return x
class PatchDropout(nn.Module):
"""
https://arxiv.org/abs/2212.00794
"""
def __init__(self, prob, exclude_first_token=True):
super().__init__()
assert 0 <= prob < 1.
self.prob = prob
self.exclude_first_token = exclude_first_token # exclude CLS token
def forward(self, x):
if not self.training or self.prob == 0.:
return x
if self.exclude_first_token:
cls_tokens, x = x[:, :1], x[:, 1:]
else:
cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
batch = x.size()[0]
num_tokens = x.size()[1]
batch_indices = torch.arange(batch)
batch_indices = batch_indices[..., None]
keep_prob = 1 - self.prob
num_patches_keep = max(1, int(num_tokens * keep_prob))
rand = torch.randn(batch, num_tokens)
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
x = x[batch_indices, patch_indices_keep]
if self.exclude_first_token:
x = torch.cat((cls_tokens, x), dim=1)
return x
class Transformer(nn.Module):
def __init__(
self, width: int, layers: int, heads: int, mlp_ratio: float = 4.0, act_layer: Callable = nn.GELU,
drop_attention_rate: float = 0.,
):
super().__init__()
self.width = width
self.layers = layers
self.grad_checkpointing = False
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, mlp_ratio, act_layer=act_layer, drop_attention_rate=drop_attention_rate)
for _ in range(layers)
])
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
for r in self.resblocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(r, x, attn_mask)
else:
x = r(x, attn_mask=attn_mask)
return x | RadFM-main | Quick_demo/Model/RadFM/blocks.py |
"""
Taken from https://github.com/lucidrains/flamingo-pytorch
"""
import torch
from einops import rearrange, repeat
from einops_exts import rearrange_many
from torch import einsum, nn
def exists(val):
return val is not None
def FeedForward(dim, mult=4):
inner_dim = int(dim * mult)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, inner_dim, bias=False),
nn.GELU(),
nn.Linear(inner_dim, dim, bias=False),
)
class PerceiverAttention(nn.Module):
def __init__(self, *, dim, dim_head=64, heads=8):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm_media = nn.LayerNorm(dim)
self.norm_latents = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
def forward(self, x, latents):
"""
Args:
x (torch.Tensor): image features
shape (b, T, n1, D)
latent (torch.Tensor): latent features
shape (b, T, n2, D)
"""
x = self.norm_media(x)
latents = self.norm_latents(latents)
h = self.heads
q = self.to_q(latents)
kv_input = torch.cat((x, latents), dim=-2)
k, v = self.to_kv(kv_input).chunk(2, dim=-1)
q, k, v = rearrange_many((q, k, v), "b t n (h d) -> b h t n d", h=h)
q = q * self.scale
# attention
sim = einsum("... i d, ... j d -> ... i j", q, k)
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
out = einsum("... i j, ... j d -> ... i d", attn, v)
out = rearrange(out, "b h t n d -> b t n (h d)", h=h)
return self.to_out(out)
class PerceiverResampler(nn.Module):
def __init__(
self,
*,
dim,
depth=6,
dim_head=64,
heads=8,
num_latents=64,
max_num_media=None,
max_num_frames=None,
ff_mult=4,
):
super().__init__()
self.latents = nn.Parameter(torch.randn(num_latents, dim))
self.frame_embs = (
nn.Parameter(torch.randn(max_num_frames, dim))
if exists(max_num_frames)
else None
)
self.media_time_embs = (
nn.Parameter(torch.randn(max_num_media, 1, dim))
if exists(max_num_media)
else None
)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(
nn.ModuleList(
[
PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
FeedForward(dim=dim, mult=ff_mult),
]
)
)
self.norm = nn.LayerNorm(dim)
def forward(self, x):
"""
Args:
x (torch.Tensor): image features
shape (b, T, F, v, D)
Returns:
shape (b, T, n, D) where n is self.num_latents
"""
b, T, F, v = x.shape[:4]
# frame and media time embeddings
if exists(self.frame_embs):
frame_embs = repeat(self.frame_embs[:F], "F d -> b T F v d", b=b, T=T, v=v)
x = x + frame_embs
x = rearrange(
x, "b T F v d -> b T (F v) d"
) # flatten the frame and spatial dimensions
if exists(self.media_time_embs):
x = x + self.media_time_embs[:T]
# blocks
latents = repeat(self.latents, "n d -> b T n d", b=b, T=T)
for attn, ff in self.layers:
latents = attn(x, latents) + latents
latents = ff(latents) + latents
return self.norm(latents)
# gated cross attention
class MaskedCrossAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_visual,
dim_head=64,
heads=8,
only_attend_immediate_media=True,
):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim_visual, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
# whether for text to only attend to immediate preceding image, or all previous images
self.only_attend_immediate_media = only_attend_immediate_media
def forward(self, x, media, media_locations=None, attend_previous=True):
"""
Args:
x (torch.Tensor): text features
shape (B, T_txt, D_txt)
media (torch.Tensor): image features
shape (B, T_img, n, D_img) where n is the dim of the latents
media_locations: boolean mask identifying the media tokens in x
shape (B, T_txt)
attend_previous: bool
If false, ignores immediately preceding image and starts attending when following image
"""
_, T_img, n = media.shape[:3]
h = self.heads
x = self.norm(x)
q = self.to_q(x)
media = rearrange(media, "b t n d -> b (t n) d")
k, v = self.to_kv(media).chunk(2, dim=-1)
q, k, v = rearrange_many((q, k, v), "b n (h d) -> b h n d", h=h)
q = q * self.scale
sim = einsum("... i d, ... j d -> ... i j", q, k)
if exists(media_locations):
# at each boolean of True, increment the time counter (relative to media time)
text_time = media_locations.cumsum(dim=-1)
media_time = torch.arange(T_img, device=x.device) + 1
if not attend_previous:
text_time[~media_locations] += 1
# make sure max is still the number of images in the sequence
text_time[
text_time
> repeat(
torch.count_nonzero(media_locations, dim=1),
"b -> b i",
i=text_time.shape[1],
)
] = 0
# text time must equal media time if only attending to most immediate image
# otherwise, as long as text time is greater than media time (if attending to all previous images / media)
mask_op = torch.eq if self.only_attend_immediate_media else torch.ge
text_to_media_mask = mask_op(
rearrange(text_time, "b i -> b 1 i 1"),
repeat(media_time, "j -> 1 1 1 (j n)", n=n),
)
sim = sim.masked_fill(~text_to_media_mask, -torch.finfo(sim.dtype).max)
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
if exists(media_locations) and self.only_attend_immediate_media:
# any text without a preceding media needs to have attention zeroed out
text_without_media_mask = text_time == 0
text_without_media_mask = rearrange(
text_without_media_mask, "b i -> b 1 i 1"
)
attn = attn.masked_fill(text_without_media_mask, 0.0)
out = einsum("... i j, ... j d -> ... i d", attn, v)
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
class GatedCrossAttentionBlock(nn.Module):
def __init__(
self,
*,
dim,
dim_visual,
dim_head=64,
heads=8,
ff_mult=4,
only_attend_immediate_media=True,
):
super().__init__()
self.attn = MaskedCrossAttention(
dim=dim,
dim_visual=dim_visual,
dim_head=dim_head,
heads=heads,
only_attend_immediate_media=only_attend_immediate_media,
)
self.attn_gate = nn.Parameter(torch.tensor([0.0]))
self.ff = FeedForward(dim, mult=ff_mult)
self.ff_gate = nn.Parameter(torch.tensor([0.0]))
def forward(
self,
x,
media,
media_locations=None,
attend_previous=True,
):
x = (
self.attn(
x,
media,
media_locations=media_locations,
attend_previous=attend_previous,
)
* self.attn_gate.tanh()
+ x
)
x = self.ff(x) * self.ff_gate.tanh() + x
return x
| RadFM-main | Quick_demo/Model/RadFM/helpers.py |
import tqdm.auto as tqdm
import torch.nn.functional as F
from typing import Optional, Dict, Sequence
from typing import List, Optional, Tuple, Union
import transformers
from My_Trainer.trainer import Trainer
from dataclasses import dataclass, field
from Dataset.multi_dataset_test import multi_dataset
from Model.RadFM.multimodality_model import MultiLLaMAForCausalLM
from datasampler import My_DistributedBatchSampler
import torch
from torch.utils.data import DataLoader
import csv
import random
import numpy as np
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
# 设置随机数种子
setup_seed(20)
# 预处理数据以及训练模型
@dataclass
class ModelArguments:
lang_encoder_path: Optional[str] = field(default="/home/cs/leijiayu/wuchaoyi/book_pretrain/Results/Book_mix_2048_13B_full/checkpoint-45800")
tokenizer_path: str = field(default='/home/cs/leijiayu/wuchaoyi/Finetune_LLAMA/LLAMA_Model/tokenizer', metadata={"help": "Path to the tokenizer data."})
#vision_encoder_path: str = field(default='/home/cs/leijiayu/wuchaoyi/multi_modal/src/PMC-CLIP/checkpoint.pt', metadata={"help": "Path to the vision_encoder."})
@dataclass
class DataArguments:
Mode: Optional[str] = field(default="Train")
test_split: Optional[str] = field(default="open")
@dataclass
class TrainingArguments(transformers.TrainingArguments):
remove_unused_columns: bool = field(default = False)
batch_size_2D: int = field(default = 4)
batch_size_3D: int = field(default = 1)
output_dir: Optional[str] = field(default="/home/cs/leijiayu/wuchaoyi/multi_modal/src/Results/BLIP_overfit/")
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
@dataclass
class DataCollator(object):
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
#print(instances)
vision_xs, lang_xs, attention_masks, labels = tuple([instance[key] for instance in instances] for key in ('vision_x','lang_x', 'attention_mask', 'labels'))
lang_xs = torch.cat([_.unsqueeze(0) for _ in lang_xs],dim = 0)
attention_masks = torch.cat([_.unsqueeze(0) for _ in attention_masks],dim = 0)
labels = torch.cat([_.unsqueeze(0) for _ in labels],dim = 0)
#print(lang_xs.shape,attention_masks.shape,labels.shape)
target_H = 512
target_W = 512
target_D = 4
MAX_D = 0
if len(vision_xs) == 1:
target_H = 256
target_W = 256
D_list = list(range(4,65,4))
if len(vision_xs) == 1:
if vision_xs[0].shape[0] >6:
D_list = list(range(4,33,4))
for ii in vision_xs:
try:
D = ii.shape[-1]
if D > MAX_D:
MAX_D = D
except:
continue
for temp_D in D_list:
if abs(temp_D - MAX_D)< abs(target_D - MAX_D):
target_D = temp_D
vision_xs = [torch.nn.functional.interpolate(s, size = (target_H,target_W,target_D)) for s in vision_xs]
vision_xs = torch.nn.utils.rnn.pad_sequence(
vision_xs, batch_first=True, padding_value=0
)
print(vision_xs.shape)
return dict(
lang_x=lang_xs,
vision_x=vision_xs,
attention_mask=attention_masks,
labels = labels,
)
def main():
parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
training_args.data_sampler = My_DistributedBatchSampler
print("Setup Data")
Test_dataset = multi_dataset(text_tokenizer = model_args.tokenizer_path,test_split = data_args.test_split)
Test_dataloader = DataLoader(
Test_dataset,
batch_size=1,
num_workers=1,
pin_memory=True,
sampler=None,
shuffle=True,
collate_fn=None,
drop_last=False,
)
print("Setup Model")
model = MultiLLaMAForCausalLM(
lang_model_path=model_args.lang_encoder_path,
)
ckpt = torch.load('/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/Results/backup/checkpoint-17600/pytorch_model.bin',map_location ='cpu')
# ckpt.pop('embedding_layer.figure_token_weight')
model.load_state_dict(ckpt,strict=False)
model = model.to('cuda')
model.eval()
with open('output_whole_2_epoch' + data_args.test_split+'.csv', mode='w') as outfile:
writer = csv.writer(outfile)
writer.writerow(["Question", "Ground Truth","Pred",'belong_to'])
cc = 0
for sample in tqdm.tqdm(Test_dataloader):
question = sample["question"]
belong_to = sample['belong_to']
# img_pp = sample['img_path']
lang_x = Test_dataset.text_tokenizer(
question, max_length=2048, truncation=True, return_tensors="pt"
)['input_ids'].to('cuda')
vision_x = sample["vision_x"].to('cuda')
answer = sample['answer']
try:
generation = model.generate(lang_x,vision_x)
generated_texts = Test_dataset.text_tokenizer.batch_decode(generation, skip_special_tokens=True)
writer.writerow([question,answer,generated_texts,belong_to])
cc = cc+1
# if cc>=10000:
# break
except:
continue
if __name__ == "__main__":
main()
| RadFM-main | src/test.py |
import tqdm.auto as tqdm
import torch.nn.functional as F
from typing import Optional, Dict, Sequence
from typing import List, Optional, Tuple, Union
import transformers
from My_Trainer.trainer import Trainer
from dataclasses import dataclass, field
from Dataset.multi_dataset import multi_dataset
from Model.RadFM.multimodality_model import MultiLLaMAForCausalLM
from datasampler import My_DistributedBatchSampler
from datasets import load_metric
from Dataset.multi_dataset_test_for_close import multi_dataset_close
import numpy as np
import torch
def compute_metrics(eval_preds):
# metric = load_metric("glue", "mrpc")
ACCs = eval_preds.predictions
# print(ACCs)
return {"accuracy": np.mean(ACCs,axis=-1)}
@dataclass
class ModelArguments:
lang_encoder_path: Optional[str] = field(default="/home/cs/leijiayu/wuchaoyi/book_pretrain/Results/Book_mix_2048_13B_full/checkpoint-45800")
tokenizer_path: str = field(default='/home/cs/leijiayu/wuchaoyi/Finetune_LLAMA/LLAMA_Model/tokenizer', metadata={"help": "Path to the tokenizer data."})
@dataclass
class DataArguments:
Mode: Optional[str] = field(default="Train")
@dataclass
class TrainingArguments(transformers.TrainingArguments):
remove_unused_columns: bool = field(default = False)
batch_size_2D: int = field(default = 4)
batch_size_3D: int = field(default = 1)
output_dir: Optional[str] = field(default="/home/cs/leijiayu/wuchaoyi/multi_modal/src/Results/BLIP_overfit/")
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
@dataclass
class DataCollator(object):
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
#print(instances) 'loss_reweight': reweight_tensor, 'key_words_query': emphasize_words
vision_xs, lang_xs, attention_masks, labels,loss_reweight,key_words_query = tuple([instance[key] for instance in instances] for key in ('vision_x','lang_x', 'attention_mask', 'labels', 'loss_reweight','key_words_query'))
lang_xs = torch.cat([_.unsqueeze(0) for _ in lang_xs],dim = 0)
attention_masks = torch.cat([_.unsqueeze(0) for _ in attention_masks],dim = 0)
labels = torch.cat([_.unsqueeze(0) for _ in labels],dim = 0)
loss_reweight = torch.cat([_.unsqueeze(0) for _ in loss_reweight],dim = 0)
#print(lang_xs.shape,attention_masks.shape,labels.shape)
target_H = 512
target_W = 512
target_D = 4
MAX_D = 0
D_list = list(range(4,65,4))
if len(vision_xs) == 1:
if vision_xs[0].shape[0] >6:
D_list = list(range(4,33,4))
for ii in vision_xs:
try:
D = ii.shape[-1]
if D > MAX_D:
MAX_D = D
except:
continue
for temp_D in D_list:
if abs(temp_D - MAX_D)< abs(target_D - MAX_D):
target_D = temp_D
if len(vision_xs) == 1 and target_D > 4:
target_H = 256
target_W = 256
vision_xs = [torch.nn.functional.interpolate(s, size = (target_H,target_W,target_D)) for s in vision_xs]
vision_xs = torch.nn.utils.rnn.pad_sequence(
vision_xs, batch_first=True, padding_value=0
)
print(vision_xs.shape,vision_xs.dtype)
return dict(
lang_x=lang_xs,
vision_x=vision_xs,
attention_mask=attention_masks,
labels = labels,
loss_reweight = loss_reweight,
key_words_query = key_words_query
)
def main():
parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
training_args.data_sampler = My_DistributedBatchSampler
print("Setup Data")
Train_dataset = multi_dataset(text_tokenizer = model_args.tokenizer_path)
Eval_dataset = multi_dataset_close(text_tokenizer = model_args.tokenizer_path)
print("Setup Model")
model = MultiLLaMAForCausalLM(
lang_model_path=model_args.lang_encoder_path,
)
trainer = Trainer(model=model,
train_dataset = Train_dataset,
eval_dataset = Eval_dataset,
args = training_args,
data_collator = DataCollator(),
compute_metrics= compute_metrics
)
trainer.train()
trainer.save_state()
if __name__ == "__main__":
main()
| RadFM-main | src/train.py |
import torch.distributed as dist
import math
from torch.utils.data.sampler import Sampler
from torch.utils.data.sampler import Sampler
from torch.utils.data import DataLoader
import random
import torch
from New_Dataset.multi_dataset import multi_dataset
def make_batch(index_list, batch_size, drop_last):
if drop_last:
batches = []
whole_batch_num = len(index_list)//batch_size
for _ in range(whole_batch_num):
batches.append(index_list[batch_size*_:(batch_size*(_+1))])
else:
batches = []
whole_batch_num = math.ceil(len(index_list)/batch_size)
for _ in range(whole_batch_num):
batches.append(index_list[batch_size*_:(batch_size*(_+1))])
return batches
def batch_generation(dataset,batch_size_2D, batch_size_3D,drop_last=False,shuffle = True):
len_2D = len(dataset.data_whole_2D)
len_3D = len(dataset.data_whole_3D)
index_2D = list(range(len_2D))
index_3D = list(range(len_2D,(len_2D+len_3D)))
assert len(index_2D) + len(index_3D) == len(dataset.data_whole)
if shuffle:
random.shuffle(index_2D)
random.shuffle(index_3D)
batch_2D = make_batch(index_2D, batch_size_2D, drop_last)
batch_3D = make_batch(index_3D, batch_size_3D, drop_last)
batch_chunk = batch_2D + batch_3D
return batch_chunk
class My_DistributedBatchSampler(Sampler):
""" Iterable wrapper that distributes data across multiple workers.
Args:
iterable (iterable)
num_replicas (int, optional): Number of processes participating in distributed training.
rank (int, optional): Rank of the current process within ``num_replicas``.
Example:
>>> list(DistributedSampler(range(10), num_replicas=2, rank=0))
[0, 2, 4, 6, 8]
>>> list(DistributedSampler(range(10), num_replicas=2, rank=1))
[1, 3, 5, 7, 9]
"""
def __init__(self, dataset, num_replicas=None, rank=None, batch_size_2D = 4, batch_size_3D = 1, drop_last = False, shuffle = True):
self.num_replicas = num_replicas
self.rank = rank
self.drop_last = drop_last
self.shuffle = shuffle
self.dataset = dataset
self.batch_size_2D = batch_size_2D
self.batch_size_3D = batch_size_3D
if num_replicas is None or rank is None: # pragma: no cover
if not torch.distributed.is_initialized():
raise RuntimeError('Requires `torch.distributed` to be initialized.')
self.num_replicas = (
torch.distributed.get_world_size() if num_replicas is None else num_replicas)
self.rank = torch.distributed.get_rank() if rank is None else rank
indices = batch_generation(self.dataset,self.batch_size_2D,self.batch_size_3D,self.drop_last,self.shuffle)
if self.rank >= self.num_replicas:
raise IndexError('`rank` must be smaller than the `num_replicas`.')
if self.drop_last and len(indices) % self.num_replicas != 0: # type: ignore[arg-type]
# Split to nearest available length that is evenly divisible.
# This is to ensure each rank receives the same amount of data when
# using this Sampler.
self.num_samples = math.ceil(
(len(indices) - self.num_replicas) / self.num_replicas # type: ignore[arg-type]
)
else:
self.num_samples = math.ceil(len(indices) / self.num_replicas) # type: ignore[arg-type]
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
indices = batch_generation(self.dataset,self.batch_size_2D,self.batch_size_3D,self.drop_last,self.shuffle)
# print(indices)
if self.shuffle:
random.shuffle(indices)
if not self.drop_last:
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size <= len(indices):
indices += indices[:padding_size]
else:
indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
else:
# remove tail of data to make it evenly divisible.
indices = indices[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
# print(My_DistributedBatchSampler)
# Train_dataset = multi_dataset(text_tokenizer = '/home/cs/leijiayu/wuchaoyi/Finetune_LLAMA/LLAMA_Model/tokenizer')
# DDP_sample_0 = list(My_DistributedBatchSampler(dataset= Train_dataset , num_replicas = 32, rank = 0,))
# for ii in DDP_sample_0:
# print(ii) | RadFM-main | src/datasampler.py |
from torch.utils.data import Dataset
import numpy as np
import transformers
import pandas as pd
import copy
import random
import os
import numpy as np
import tqdm
import torch
import json
from PIL import Image
import math
import torchvision
from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaTokenizer
from .dataset import *
def stack_images(images):
target_H = 512
target_W = 512
target_D = 4
if len(images) == 0:
return torch.zeros((1,3,target_H,target_W,target_D))
MAX_D = 4
D_list = list(range(4,65,4))
for ii in images:
try:
D = ii.shape[3]
if D > MAX_D:
MAX_D = D
except:
continue
for temp_D in D_list:
if abs(temp_D - MAX_D)< abs(target_D - MAX_D):
target_D = temp_D
stack_images = []
for s in images:
s = torch.tensor(s)
if len(s.shape) == 3:
#print(s.shape)
stack_images.append(torch.nn.functional.interpolate(s.unsqueeze(0).unsqueeze(-1), size = (target_H,target_W,target_D)))
else:
stack_images.append(torch.nn.functional.interpolate(s.unsqueeze(0), size = (target_H,target_W,target_D)))
images = torch.cat(stack_images, dim=0)
return images
class multi_dataset(Dataset):
def __init__(self, text_tokenizer, test_split = 'close', max_seq = 2048, max_img_size = 10, image_num=32,voc_size =32000):
self.text_tokenizer = text_tokenizer
self.max_img_size = max_img_size
self.image_num = image_num
self.max_seq = max_seq
self.voc_size = voc_size
self.H = 512
self.W = 512
self.image_padding_tokens = []
if isinstance(self.text_tokenizer,str):
self.text_tokenizer = LlamaTokenizer.from_pretrained(
self.text_tokenizer,
)
special_token = {"additional_special_tokens": [""]}
for i in range(max_img_size):
image_padding_token = ""
for j in range(image_num):
image_token = "<image"+str(i*image_num+j)+">"
image_padding_token = image_padding_token + image_token
special_token["additional_special_tokens"].append("<image"+str(i*image_num+j)+">")
self.image_padding_tokens.append(image_padding_token)
self.text_tokenizer.add_special_tokens(
special_token
)
self.text_tokenizer.pad_token_id = 0
self.text_tokenizer.bos_token_id = 1
self.text_tokenizer.eos_token_id = 2
self.data_whole_2D = []
self.data_whole_3D = []
self.dataset_reflect = {}
self.test_split = test_split
### closed ###
if self.test_split == 'diagnosis':
chestxray_dataset_bn = Binary_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/chestxray_balance_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json')
self.dataset_reflect['chestxray_dataset_bn'] = chestxray_dataset_bn
self.data_whole_2D = self.data_whole_2D + [{'chestxray_dataset_bn':i} for i in range(len(chestxray_dataset_bn))]
print('chestxray_dataset_bn loaded')
pcxr_dataset_bn = Binary_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/pcxr_balance_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json')
self.dataset_reflect['pcxr_dataset_bn'] = pcxr_dataset_bn
self.data_whole_2D = self.data_whole_2D + [{'pcxr_dataset_bn':i} for i in range(len(pcxr_dataset_bn))]
print('pcxr_dataset_bn loaded')
mammo_dataset_bn = Binary_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/mammo_balance_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json')
self.dataset_reflect['mammo_dataset_bn'] = mammo_dataset_bn
self.data_whole_2D = self.data_whole_2D + [{'mammo_dataset_bn':i} for i in range(len(mammo_dataset_bn))]
print('mammo_dataset_bn loaded')
spinexr_dataset_bn = Binary_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/spinexr_balance_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json')
self.dataset_reflect['spinexr_dataset_bn'] = spinexr_dataset_bn
self.data_whole_2D = self.data_whole_2D + [{'spinexr_dataset_bn':i} for i in range(len(spinexr_dataset_bn))]
print('spinexr_dataset_bn loaded')
### multi-label ###
chestxray_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/chestxray_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/cls_prompt.json')
self.dataset_reflect['chestxray_dataset'] = chestxray_dataset
self.data_whole_2D = self.data_whole_2D + [{'chestxray_dataset':i} for i in range(len(chestxray_dataset))]
print('chestxray_dataset loaded')
pcxr_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/pcxr_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/cls_prompt.json')
self.dataset_reflect['pcxr_dataset'] = pcxr_dataset
self.data_whole_2D = self.data_whole_2D + [{'pcxr_dataset':i} for i in range(len(pcxr_dataset))]
print('pcxr_dataset loaded')
mammo_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/mammo_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/mammo_prompt.json')
self.dataset_reflect['mammo_dataset'] = mammo_dataset
self.data_whole_2D = self.data_whole_2D + [{'mammo_dataset':i} for i in range(len(mammo_dataset))]
print('mammo_dataset loaded')
spinexr_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/spinexr_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/spinexr_prompt.json')
self.dataset_reflect['spinexr_dataset'] = spinexr_dataset
self.data_whole_2D = self.data_whole_2D + [{'spinexr_dataset':i} for i in range(len(spinexr_dataset))]
print('spinexr_dataset loaded')
if self.test_split == 'modality':
radiomodality_dataset = Radio_Modality_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/radio_modality_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/modality_prompt.json',
modality_json_file = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/modality_set.json',down_sample_ratio = 1)
self.dataset_reflect['radiomodality_dataset'] = radiomodality_dataset
self.data_whole_3D = self.data_whole_3D + [{'radiomodality_dataset':i} for i in range(len(radiomodality_dataset))]
print('radiomodality_dataset loaded')
# medpix_single_dataset = MedPix_Single_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/MedPix_single_test_close.csv')
# self.dataset_reflect['medpix_single_dataset'] = medpix_single_dataset
# self.data_whole_2D = self.data_whole_2D + [{'medpix_single_dataset':i} for i in range(len(medpix_single_dataset))]
# print('medpix_single_dataset loaded')
if self.test_split == 'vqa':
# medpix_qa_dataset = MedPix_QA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/MedPix_questions_test.csv')
# self.dataset_reflect['medpix_qa_dataset'] = medpix_qa_dataset
# self.data_whole_2D = self.data_whole_2D + [{'medpix_qa_dataset':i} for i in range(len(medpix_qa_dataset))]
# print('medpix_qa_dataset loaded')
pmcvqa_dataset = PMCVQA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/pmcvqa_test.csv')
self.dataset_reflect['pmcvqa_dataset'] = pmcvqa_dataset
self.data_whole_2D = self.data_whole_2D + [{'pmcvqa_dataset':i} for i in range(len(pmcvqa_dataset))]
print('pmcvqa_dataset loaded')
casereport_dataset = CaseReport_dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/filtered_case_report_test.csv',
img_path = '/home/cs/leijiayu/data/all_images/figures/')
self.dataset_reflect['casereport_dataset'] = casereport_dataset
self.data_whole_2D = self.data_whole_2D + [{'casereport_dataset':i} for i in range(len(casereport_dataset))]
print('casereport_dataset loaded')
vqarad_dataset = PMCVQA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/vqarad_test.csv')
self.dataset_reflect['vqarad_dataset'] = vqarad_dataset
self.data_whole_2D = self.data_whole_2D + [{'vqarad_dataset':i} for i in range(len(vqarad_dataset))]
print('vqarad_dataset loaded')
slake_dataset = PMCVQA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/slakevqa_test.csv')
self.dataset_reflect['slake_dataset'] = slake_dataset
self.data_whole_2D = self.data_whole_2D + [{'slake_dataset':i} for i in range(len(slake_dataset))]
print('slake_dataset loaded')
## 3D
radiovqa_dataset = RadioVQA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/radiology_vqa_test.csv')
self.dataset_reflect['radiovqa_dataset'] = radiovqa_dataset
self.data_whole_3D = self.data_whole_3D + [{'radiovqa_dataset':i} for i in range(len(radiovqa_dataset))]
print('radiovqa_dataset loaded')
if self.test_split == 'caption':
## open ###
# medpix_multi_dataset = MedPix_Multi_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/MedPix_multi_test.csv')
# self.dataset_reflect['medpix_multi_dataset'] = medpix_multi_dataset
# self.data_whole_2D = self.data_whole_2D + [{'medpix_multi_dataset':i} for i in range(len(medpix_multi_dataset))]
# print('medpix_multi_dataset loaded')
chestxray_caption_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/mimic_caption_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/report_prompt.json')
self.dataset_reflect['chestxray_caption_dataset'] = chestxray_caption_dataset
self.data_whole_2D = self.data_whole_2D + [{'chestxray_caption_dataset':i} for i in range(len(chestxray_caption_dataset))]
print('chestxray_caption_dataset loaded')
# medpix_single_dataset = MedPix_Single_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/MedPix_single_test.csv')
# self.dataset_reflect['medpix_single_dataset'] = medpix_single_dataset
# self.data_whole_2D = self.data_whole_2D + [{'medpix_single_dataset':i} for i in range(len(medpix_single_dataset))]
# print('medpix_single_dataset loaded')
radiocaption_dataset = RadioCaption_Dataset(json_path='/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/radiology_article_npy_test.json',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/caption_prompt.json'
)
self.dataset_reflect['radiocaption_dataset'] = radiocaption_dataset
self.data_whole_3D = self.data_whole_3D + [{'radiocaption_dataset':i} for i in range(len(radiocaption_dataset))]
print('radiocaption_dataset loaded')
if self.test_split == 'feature':
radiofeatures_dataset = Radiofeatures_Dataset(json_path='/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/radiology_article_npy_test.json',
prompt_json_file = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/radiology_feature_prompt.json',
disease_prompt_json_file = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json',
article_json_file = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/articles_resave.json')
self.dataset_reflect['radiofeatures_dataset'] = radiofeatures_dataset
self.data_whole_3D = self.data_whole_3D + [{'radiofeatures_dataset':i} for i in range(len(radiofeatures_dataset))]
print('radiofeatures_dataset loaded')
self.data_whole = self.data_whole_2D + self.data_whole_3D
def __len__(self):
return len(self.data_whole)
def __getitem__(self, idx):
# vision_x, lang_x, attention_mask, labels
sample = list(self.data_whole[idx].items())[0]
#print(sample)
belong_to = sample[0]
sample = self.dataset_reflect[sample[0]][sample[1]]
'''
Dict: {
"image_dict": [
{"image": image, # image is a tensor of shape [c,w,h,d], c is channel=3, w is width, h is height, d is depth(1 for chestxray,pmcoa,pmcvqa)
"position": {"question": 0}}, position is a dict, random choice of 0 or len(question)
]
"question": question,
"answer":answer,
}
'''
images = sample["image_dict"]
if len(images) > 8:
images = random.sample(images,8)
question = str(sample["question"])
answer = str(sample["answer"])
images, question, answer = self.text_add_image(images,question,answer)
#print(question,answer)
### make vision_x
try:
vision_x = stack_images(images)
except:
print(self.data_whole[idx].items())
input()
#print(vision_x.shape,question,answer)
return {'vision_x': vision_x,'question':question, 'answer':answer, 'belong_to':belong_to,}
def text_add_image(self,images,question,answer):
ref_image = []
question_list = [[] for _ in range(len(str(question)))]
answer_list = [[] for _ in range(len(str(answer)))]
for index, image in enumerate(images):
ref_image.append(image["image"])
position = image["position"]
position = list(position.items())[0]
if position[0] == 'question':
insert_loc = position[1] -1
if insert_loc < 0:
insert_loc = 0
question_list[insert_loc].append(index)
if position[0] == 'answer':
insert_loc = position[1] -1
if insert_loc < 0:
insert_loc = 0
answer_list[insert_loc].append(index)
new_question = ''
new_answer = ''
for char_i in range(len(question)):
if question_list[char_i] == []:
new_question = new_question + question[char_i]
if question_list[char_i] != []:
for img_index in question_list[char_i]:
try:
new_question = new_question + ''
except:
print("Error: out of max image input size")
new_question = new_question + question[char_i]
for char_i in range(len(answer)):
if answer_list[char_i] == []:
new_answer = new_answer + answer[char_i]
if answer_list[char_i] != []:
for img_index in answer_list[char_i]:
try:
new_answer = new_answer + ''
except:
print("Error: out of max image input size")
new_answer = new_answer + answer[char_i]
new_answer = new_answer.replace('•','')
return ref_image,new_question,new_answer
# torch.set_printoptions(profile="full")
# text_tokenizer = '/home/cs/leijiayu/wuchaoyi/Finetune_LLAMA/LLAMA_Model/tokenizer'
# dataset = multi_dataset(text_tokenizer = text_tokenizer)
# print(len(dataset))
# for i in range(10):
# dataset[i]
# input() | RadFM-main | src/Dataset/multi_dataset_test.py |
from torch.utils.data import Dataset
import numpy as np
import transformers
import pandas as pd
import copy
import random
import os
import numpy as np
import tqdm
import torch
import json
from PIL import Image
import math
import torchvision
from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaTokenizer
from .dataset import *
def find_position(label, key_embeddings):
loss_reweight = torch.ones(label.shape)
for i in range(len(label)):
if label[i] == -100:
loss_reweight[i] = 0
else:
for key_embedding in key_embeddings:
if torch.equal(label[i:i+len(key_embedding)], key_embedding):
loss_reweight[i:i+len(key_embedding)] = 3
return loss_reweight
def stack_images(images):
target_H = 512
target_W = 512
target_D = 4
if len(images) == 0:
return torch.zeros((1,3,target_H,target_W,target_D))
MAX_D = 4
D_list = list(range(4,65,4))
for ii in images:
try:
D = ii.shape[3]
if D > MAX_D:
MAX_D = D
except:
continue
for temp_D in D_list:
if abs(temp_D - MAX_D)< abs(target_D - MAX_D):
target_D = temp_D
stack_images = []
for s in images:
if len(s.shape) == 3:
#print(s.shape)
stack_images.append(torch.nn.functional.interpolate(s.unsqueeze(0).unsqueeze(-1), size = (target_H,target_W,target_D)))
else:
stack_images.append(torch.nn.functional.interpolate(s.unsqueeze(0), size = (target_H,target_W,target_D)))
images = torch.cat(stack_images, dim=0)
return images
class multi_dataset_close(Dataset):
def __init__(self, text_tokenizer, test_split = 'close', max_seq = 2048, max_img_size = 10, image_num=32,voc_size =32000, down_sample_ratio = 100):
self.down_sample_ratio = down_sample_ratio
self.text_tokenizer = text_tokenizer
self.max_img_size = max_img_size
self.image_num = image_num
self.max_seq = max_seq
self.voc_size = voc_size
self.H = 512
self.W = 512
self.image_padding_tokens = []
if isinstance(self.text_tokenizer,str):
self.text_tokenizer = LlamaTokenizer.from_pretrained(
self.text_tokenizer,
)
special_token = {"additional_special_tokens": [""]}
for i in range(max_img_size):
image_padding_token = ""
for j in range(image_num):
image_token = "<image"+str(i*image_num+j)+">"
image_padding_token = image_padding_token + image_token
special_token["additional_special_tokens"].append("<image"+str(i*image_num+j)+">")
self.image_padding_tokens.append(image_padding_token)
self.text_tokenizer.add_special_tokens(
special_token
)
self.text_tokenizer.pad_token_id = 0
self.text_tokenizer.bos_token_id = 1
self.text_tokenizer.eos_token_id = 2
self.data_whole_2D = []
self.data_whole_3D = []
self.dataset_reflect = {}
self.test_split = test_split
### closed ###
if self.test_split == 'close':
chestxray_dataset_bn = Binary_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/chestxray_balance_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json')
self.dataset_reflect['chestxray_dataset_bn'] = chestxray_dataset_bn
self.data_whole_2D = self.data_whole_2D + [{'chestxray_dataset_bn':i} for i in range(len(chestxray_dataset_bn))]
print('chestxray_dataset_bn loaded')
pcxr_dataset_bn = Binary_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/pcxr_balance_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json')
self.dataset_reflect['pcxr_dataset_bn'] = pcxr_dataset_bn
self.data_whole_2D = self.data_whole_2D + [{'pcxr_dataset_bn':i} for i in range(len(pcxr_dataset_bn))]
print('pcxr_dataset_bn loaded')
mammo_dataset_bn = Binary_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/mammo_balance_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json')
self.dataset_reflect['mammo_dataset_bn'] = mammo_dataset_bn
self.data_whole_2D = self.data_whole_2D + [{'mammo_dataset_bn':i} for i in range(len(mammo_dataset_bn))]
print('mammo_dataset_bn loaded')
spinexr_dataset_bn = Binary_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/spinexr_balance_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json')
self.dataset_reflect['spinexr_dataset_bn'] = spinexr_dataset_bn
self.data_whole_2D = self.data_whole_2D + [{'spinexr_dataset_bn':i} for i in range(len(spinexr_dataset_bn))]
print('spinexr_dataset_bn loaded')
radiomodality_dataset = Radio_Modality_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/radio_modality_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/modality_prompt.json',
modality_json_file = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/modality_set.json')
self.dataset_reflect['radiomodality_dataset'] = radiomodality_dataset
self.data_whole_3D = self.data_whole_3D + [{'radiomodality_dataset':i} for i in range(len(radiomodality_dataset))]
print('radiomodality_dataset loaded')
### multi-label ###
chestxray_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/chestxray_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/cls_prompt.json')
self.dataset_reflect['chestxray_dataset'] = chestxray_dataset
self.data_whole_2D = self.data_whole_2D + [{'chestxray_dataset':i} for i in range(len(chestxray_dataset))]
print('chestxray_dataset loaded')
pcxr_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/pcxr_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/cls_prompt.json')
self.dataset_reflect['pcxr_dataset'] = pcxr_dataset
self.data_whole_2D = self.data_whole_2D + [{'pcxr_dataset':i} for i in range(len(pcxr_dataset))]
print('pcxr_dataset loaded')
mammo_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/mammo_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/mammo_prompt.json')
self.dataset_reflect['mammo_dataset'] = mammo_dataset
self.data_whole_2D = self.data_whole_2D + [{'mammo_dataset':i} for i in range(len(mammo_dataset))]
print('mammo_dataset loaded')
spinexr_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/spinexr_test.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/spinexr_prompt.json')
self.dataset_reflect['spinexr_dataset'] = spinexr_dataset
self.data_whole_2D = self.data_whole_2D + [{'spinexr_dataset':i} for i in range(len(spinexr_dataset))]
print('spinexr_dataset loaded')
medpix_single_dataset = MedPix_Single_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/MedPix_single_test_close.csv')
self.dataset_reflect['medpix_single_dataset'] = medpix_single_dataset
self.data_whole_2D = self.data_whole_2D + [{'medpix_single_dataset':i} for i in range(len(medpix_single_dataset))]
print('medpix_single_dataset loaded')
vqarad_dataset = PMCVQA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/vqarad_test.csv')
self.dataset_reflect['vqarad_dataset'] = vqarad_dataset
self.data_whole_2D = self.data_whole_2D + [{'vqarad_dataset':i} for i in range(len(vqarad_dataset))]
print('vqarad_dataset loaded')
slake_dataset = PMCVQA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/slakevqa_test.csv')
self.dataset_reflect['slake_dataset'] = slake_dataset
self.data_whole_2D = self.data_whole_2D + [{'slake_dataset':i} for i in range(len(slake_dataset))]
print('slake_dataset loaded')
self.data_whole = self.data_whole_2D + self.data_whole_3D
random.shuffle(self.data_whole)
def __len__(self):
return math.ceil(len(self.data_whole)/self.down_sample_ratio)
def __getitem__(self, idx):
idx = (self.down_sample_ratio*idx +random.randint(0,self.down_sample_ratio-1))%len(self.data_whole)
# vision_x, lang_x, attention_mask, labels
sample = list(self.data_whole[idx].items())[0]
dataset_index = sample[0]
#print(sample)
sample = self.dataset_reflect[sample[0]][sample[1]]
'''
Dict: {
"image_dict": [
{"image": image, # image is a tensor of shape [c,w,h,d], c is channel=3, w is width, h is height, d is depth(1 for chestxray,pmcoa,pmcvqa)
"position": {"question": 0}}, position is a dict, random choice of 0 or len(question)
]
"question": question,
"answer":answer,
}
'''
images = sample["image_dict"]
question = str(sample["question"])
answer = str(sample["answer"])
images, question, answer = self.text_add_image(images,question,answer)
# print(question,answer)
### make vision_x
try:
vision_x = stack_images(images)
except:
print(self.data_whole[idx].items())
#print(vision_x.shape,question,answer)
### make lang_x ###
self.text_tokenizer.padding_side = "right"
text_tensor = self.text_tokenizer(
question + ' ' + answer, max_length=self.max_seq, truncation=True, padding="max_length", return_tensors="pt"
)
lang_x = text_tensor["input_ids"][0]
attention_mask = text_tensor["attention_mask"][0]
try:
lang_x[torch.sum(attention_mask)] = self.text_tokenizer.eos_token_id
except:
pass
### make label ###
emphasize_words = []
# emphasize_words = [str(_) for _ in self.words_extract.extract(answer)]
if emphasize_words != []:
emphasize_words_tensor = self.text_tokenizer(
emphasize_words , max_length=self.max_seq
)
key_embeddings = [torch.tensor(_[1:]) for _ in emphasize_words_tensor['input_ids']]
else:
key_embeddings = []
question_tensor = self.text_tokenizer(
question, max_length=self.max_seq, truncation=True, padding="max_length", return_tensors="pt"
)
question_length = torch.sum(question_tensor["attention_mask"][0])
labels = lang_x.clone()
labels[labels == self.text_tokenizer.pad_token_id] = -100
labels[labels >= self.voc_size] = -100
labels[:question_length] = -100
reweight_tensor = find_position(labels, key_embeddings)
if dataset_index == 'paper_inline_dataset':
emphasize_words = []
# print(labels,key_embeddings,reweight_tensor)
return {'vision_x': vision_x,'lang_x':lang_x, 'attention_mask': attention_mask, 'labels':labels, 'loss_reweight': reweight_tensor, 'key_words_query': emphasize_words}
def text_add_image(self,images,question,answer):
question = str(question)
answer = str(answer)
ref_image = []
question_list = [[] for _ in range(len(str(question)))]
answer_list = [[] for _ in range(len(str(answer)))]
for index, image in enumerate(images):
ref_image.append(image["image"])
position = image["position"]
position = list(position.items())[0]
if position[0] == 'question':
insert_loc = position[1] -1
if insert_loc < 0:
insert_loc = 0
question_list[insert_loc].append(index)
if position[0] == 'answer':
insert_loc = position[1] -1
if insert_loc < 0:
insert_loc = 0
answer_list[insert_loc].append(index)
new_question = ''
new_answer = ''
for char_i in range(len(question)):
if question_list[char_i] == []:
new_question = new_question + question[char_i]
if question_list[char_i] != []:
for img_index in question_list[char_i]:
try:
new_question = new_question + ''
except:
print("Error: out of max image input size")
new_question = new_question + question[char_i]
for char_i in range(len(answer)):
if answer_list[char_i] == []:
new_answer = new_answer + answer[char_i]
if answer_list[char_i] != []:
for img_index in answer_list[char_i]:
try:
new_answer = new_answer + ''
except:
print("Error: out of max image input size")
new_answer = new_answer + answer[char_i]
new_answer = new_answer.replace('•','')
return ref_image,new_question,new_answer
# torch.set_printoptions(profile="full")
# text_tokenizer = '/home/cs/leijiayu/wuchaoyi/Finetune_LLAMA/LLAMA_Model/tokenizer'
# dataset = multi_dataset_close(text_tokenizer = text_tokenizer)
# print(len(dataset))
# for i in range(10):
# dataset[i]
# input() | RadFM-main | src/Dataset/multi_dataset_test_for_close.py |
from torch.utils.data import Dataset
import numpy as np
import transformers
import pandas as pd
import copy
import random
import os
import numpy as np
import tqdm
import torch
import json
from PIL import Image
import math
import torchvision
from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaTokenizer
from .dataset import *
import spacy
from spacy.tokens import Span
from scispacy.abbreviation import AbbreviationDetector
from scispacy.umls_linking import UmlsEntityLinker
class umls_extractor:
def __init__(self):
nlp = spacy.load("en_core_sci_lg")
nlp.add_pipe("abbreviation_detector")
nlp.add_pipe("scispacy_linker", config={"resolve_abbreviations": True, "linker_name": "umls"})
self.nlp = nlp
def extract(self,text):
doc = self.nlp(text)
ent_set = doc.ents
return ent_set
def find_position(label, key_embeddings):
loss_reweight = torch.ones(label.shape)
for i in range(len(label)):
if label[i] == -100:
loss_reweight[i] = 0
else:
for key_embedding in key_embeddings:
if torch.equal(label[i:i+len(key_embedding)], key_embedding):
loss_reweight[i:i+len(key_embedding)] = 3
return loss_reweight
def stack_images(images):
target_H = 512
target_W = 512
target_D = 4
if len(images) == 0:
return torch.zeros((1,3,target_H,target_W,target_D))
MAX_D = 4
D_list = list(range(4,65,4))
for ii in images:
try:
D = ii.shape[3]
if D > MAX_D:
MAX_D = D
except:
continue
for temp_D in D_list:
if abs(temp_D - MAX_D)< abs(target_D - MAX_D):
target_D = temp_D
stack_images = []
for s in images:
if len(s.shape) == 3:
#print(s.shape)
stack_images.append(torch.nn.functional.interpolate(s.unsqueeze(0).unsqueeze(-1), size = (target_H,target_W,target_D)))
else:
stack_images.append(torch.nn.functional.interpolate(s.unsqueeze(0), size = (target_H,target_W,target_D)))
images = torch.cat(stack_images, dim=0)
return images
class multi_dataset(Dataset):
def __init__(self, text_tokenizer, max_seq = 2048, max_img_size = 100, image_num=32,voc_size =32000):
self.text_tokenizer = text_tokenizer
self.max_img_size = max_img_size
self.image_num = image_num
self.max_seq = max_seq
self.voc_size = voc_size
self.H = 512
self.W = 512
self.image_padding_tokens = []
self.words_extract = umls_extractor()
if isinstance(self.text_tokenizer,str):
self.text_tokenizer = LlamaTokenizer.from_pretrained(
self.text_tokenizer,
)
special_token = {"additional_special_tokens": [""]}
for i in range(max_img_size):
image_padding_token = ""
for j in range(image_num):
image_token = "<image"+str(i*image_num+j)+">"
image_padding_token = image_padding_token + image_token
special_token["additional_special_tokens"].append("<image"+str(i*image_num+j)+">")
self.image_padding_tokens.append(image_padding_token)
self.text_tokenizer.add_special_tokens(
special_token
)
self.text_tokenizer.pad_token_id = 0
self.text_tokenizer.bos_token_id = 1
self.text_tokenizer.eos_token_id = 2
self.data_whole_2D = []
self.data_whole_3D = []
self.dataset_reflect = {}
### 2D
### pretrain ###
# paper_inline_dataset = Paper_Inline_dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/multi_modal/Data/paper_train.csv',
# img_path = '/home/cs/leijiayu/data/all_images/figures/')
# self.dataset_reflect['paper_inline_dataset'] = paper_inline_dataset
# self.data_whole_2D = self.data_whole_2D + [{'paper_inline_dataset':i} for i in range(len(paper_inline_dataset))]
# print('paper_inline_dataset loaded')
# pmcoa_dataset = PMCOA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/pmcoa_image_caption_train.csv',
# img_root_dir = '/home/cs/leijiayu/data/PMCVQA/caption_T060_filtered_top4_sep_v0_subfigures',
# prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/caption_prompt.json')
# self.dataset_reflect['pmcoa_dataset'] = pmcoa_dataset
# self.data_whole_2D = self.data_whole_2D + [{'pmcoa_dataset':i} for i in range(len(pmcoa_dataset))]
# print('pmcoa_dataset loaded')
### sft ###
### medpix ###
medpix_multi_dataset = MedPix_Multi_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/MedPix_multi_train.csv')
self.dataset_reflect['medpix_multi_dataset'] = medpix_multi_dataset
self.data_whole_2D = self.data_whole_2D + [{'medpix_multi_dataset':i} for i in range(len(medpix_multi_dataset))]
print('medpix_multi_dataset loaded')
medpix_single_dataset = MedPix_Single_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/MedPix_single_train.csv')
self.dataset_reflect['medpix_single_dataset'] = medpix_single_dataset
self.data_whole_2D = self.data_whole_2D + [{'medpix_single_dataset':i} for i in range(len(medpix_single_dataset))]
print('medpix_single_dataset loaded')
medpix_qa_dataset = MedPix_QA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/MedPix_questions_train.csv')
self.dataset_reflect['medpix_qa_dataset'] = medpix_qa_dataset
self.data_whole_2D = self.data_whole_2D + [{'medpix_qa_dataset':i} for i in range(len(medpix_qa_dataset))]
print('medpix_qa_dataset loaded')
### CXR ###
### caption ###
chestxray_caption_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/mimic_caption_train.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/report_prompt.json')
self.dataset_reflect['chestxray_caption_dataset'] = chestxray_caption_dataset
self.data_whole_2D = self.data_whole_2D + [{'chestxray_caption_dataset':i} for i in range(len(chestxray_caption_dataset))]
print('chestxray_caption_dataset loaded')
### binary ###
chestxray_dataset_bn = Binary_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/chestxray_balance_train_new.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json')
self.dataset_reflect['chestxray_dataset_bn'] = chestxray_dataset_bn
self.data_whole_2D = self.data_whole_2D + [{'chestxray_dataset_bn':i} for i in range(len(chestxray_dataset_bn))]
print('chestxray_dataset_bn loaded')
pcxr_dataset_bn = Binary_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/pcxr_balance_train.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json')
self.dataset_reflect['pcxr_dataset_bn'] = pcxr_dataset_bn
self.data_whole_2D = self.data_whole_2D + [{'pcxr_dataset_bn':i} for i in range(len(pcxr_dataset_bn))]
print('pcxr_dataset_bn loaded')
mammo_dataset_bn = Binary_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/mammo_balance_train.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json')
self.dataset_reflect['mammo_dataset_bn'] = mammo_dataset_bn
self.data_whole_2D = self.data_whole_2D + [{'mammo_dataset_bn':i} for i in range(len(mammo_dataset_bn))]
print('mammo_dataset_bn loaded')
spinexr_dataset_bn = Binary_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/spinexr_balance_train.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json')
self.dataset_reflect['spinexr_dataset_bn'] = spinexr_dataset_bn
self.data_whole_2D = self.data_whole_2D + [{'spinexr_dataset_bn':i} for i in range(len(spinexr_dataset_bn))]
print('spinexr_dataset_bn loaded')
### multi-label ###
chestxray_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/chestxray_new.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/cls_prompt.json')
self.dataset_reflect['chestxray_dataset'] = chestxray_dataset
self.data_whole_2D = self.data_whole_2D + [{'chestxray_dataset':i} for i in range(len(chestxray_dataset))]
print('chestxray_dataset loaded')
pcxr_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/pcxr_train_new.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/cls_prompt.json')
self.dataset_reflect['pcxr_dataset'] = pcxr_dataset
self.data_whole_2D = self.data_whole_2D + [{'pcxr_dataset':i} for i in range(len(pcxr_dataset))]
print('pcxr_dataset loaded')
mammo_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/mammo_train_new.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/mammo_prompt.json')
self.dataset_reflect['mammo_dataset'] = mammo_dataset
self.data_whole_2D = self.data_whole_2D + [{'mammo_dataset':i} for i in range(len(mammo_dataset))]
print('mammo_dataset loaded')
spinexr_dataset = ChestXray_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/spinexr_train_new.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/spinexr_prompt.json')
self.dataset_reflect['spinexr_dataset'] = spinexr_dataset
self.data_whole_2D = self.data_whole_2D + [{'spinexr_dataset':i} for i in range(len(spinexr_dataset))]
print('spinexr_dataset loaded')
### VQA ###
pmcvqa_dataset = PMCVQA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/pmcvqa_train.csv')
self.dataset_reflect['pmcvqa_dataset'] = pmcvqa_dataset
self.data_whole_2D = self.data_whole_2D + [{'pmcvqa_dataset':i} for i in range(len(pmcvqa_dataset))]
print('pmcvqa_dataset loaded')
casereport_dataset = CaseReport_dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/filtered_case_report_train.csv',
img_path = '/home/cs/leijiayu/data/all_images/figures/')
self.dataset_reflect['casereport_dataset'] = casereport_dataset
self.data_whole_2D = self.data_whole_2D + [{'casereport_dataset':i} for i in range(len(casereport_dataset))]
print('casereport_dataset loaded')
vqarad_dataset = PMCVQA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/vqarad_train.csv')
self.dataset_reflect['vqarad_dataset'] = vqarad_dataset
self.data_whole_2D = self.data_whole_2D + [{'vqarad_dataset':i} for i in range(len(vqarad_dataset))]
print('vqarad_dataset loaded')
slake_dataset = PMCVQA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/slakevqa_train.csv')
self.dataset_reflect['slake_dataset'] = slake_dataset
self.data_whole_2D = self.data_whole_2D + [{'slake_dataset':i} for i in range(len(slake_dataset))]
print('slake_dataset loaded')
### 3D
radiovqa_dataset = RadioVQA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/radiology_vqa_train.csv')
self.dataset_reflect['radiovqa_dataset'] = radiovqa_dataset
self.data_whole_3D = self.data_whole_3D + [{'radiovqa_dataset':i} for i in range(len(radiovqa_dataset))]
print('radiovqa_dataset loaded')
radiomodality_dataset = Radio_Modality_Dataset(csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/radio_modality_train.csv',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/modality_prompt.json',
modality_json_file = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/modality_set.json')
self.dataset_reflect['radiomodality_dataset'] = radiomodality_dataset
self.data_whole_3D = self.data_whole_3D + [{'radiomodality_dataset':i} for i in range(len(radiomodality_dataset))]
print('radiomodality_dataset loaded')
radiocaption_dataset = RadioCaption_Dataset(json_path='/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/radiology_article_npy_train.json',
prompt_json_file = '/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/caption_prompt.json',
)
self.dataset_reflect['radiocaption_dataset'] = radiocaption_dataset
self.data_whole_3D = self.data_whole_3D + [{'radiocaption_dataset':i} for i in range(len(radiocaption_dataset))]
print('radiocaption_dataset loaded')
radiofeatures_dataset = Radiofeatures_Dataset(json_path='/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/radiology_article_npy_train.json',
prompt_json_file = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/radiology_feature_prompt.json',
disease_prompt_json_file = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/dataset/yes_no_prompt.json',
article_json_file = '/gpfs/home/cs/leijiayu/wuchaoyi/wangyingjie/src/New_Dataset/data_csv/articles_resave.json')
self.dataset_reflect['radiofeatures_dataset'] = radiofeatures_dataset
self.data_whole_3D = self.data_whole_3D + [{'radiofeatures_dataset':i} for i in range(len(radiofeatures_dataset))]
print('radiofeatures_dataset loaded')
self.data_whole = self.data_whole_2D + self.data_whole_3D
def __len__(self):
return len(self.data_whole)
def __getitem__(self, idx):
# vision_x, lang_x, attention_mask, labels
sample = list(self.data_whole[idx].items())[0]
#print(sample)
dataset_index = sample[0]
sample = self.dataset_reflect[sample[0]][sample[1]]
'''
Dict: {
"image_dict": [
{"image": image, # image is a tensor of shape [c,w,h,d], c is channel=3, w is width, h is height, d is depth(1 for chestxray,pmcoa,pmcvqa)
"position": {"question": 0}}, position is a dict, random choice of 0 or len(question)
]
"question": question,
"answer":answer,
}
'''
images = sample["image_dict"]
question = sample["question"]
answer = sample["answer"]
images, question, answer = self.text_add_image(images,question,answer)
# print(question,answer)
### make vision_x
try:
vision_x = stack_images(images)
except:
print(self.data_whole[idx].items())
#print(vision_x.shape,question,answer)
### make lang_x ###
self.text_tokenizer.padding_side = "right"
text_tensor = self.text_tokenizer(
question + ' ' + answer, max_length=self.max_seq, truncation=True, padding="max_length", return_tensors="pt"
)
lang_x = text_tensor["input_ids"][0]
attention_mask = text_tensor["attention_mask"][0]
try:
lang_x[torch.sum(attention_mask)] = self.text_tokenizer.eos_token_id
except:
pass
### make label ###
emphasize_words = []
emphasize_words = [str(_) for _ in self.words_extract.extract(answer)]
if emphasize_words != []:
emphasize_words_tensor = self.text_tokenizer(
emphasize_words , max_length=self.max_seq
)
key_embeddings = [torch.tensor(_[1:]) for _ in emphasize_words_tensor['input_ids']]
else:
key_embeddings = []
question_tensor = self.text_tokenizer(
question, max_length=self.max_seq, truncation=True, padding="max_length", return_tensors="pt"
)
question_length = torch.sum(question_tensor["attention_mask"][0])
labels = lang_x.clone()
labels[labels == self.text_tokenizer.pad_token_id] = -100
labels[labels >= self.voc_size] = -100
labels[:question_length] = -100
reweight_tensor = find_position(labels, key_embeddings)
if dataset_index == 'paper_inline_dataset':
emphasize_words = []
# print(labels,key_embeddings,reweight_tensor)
return {'vision_x': vision_x,'lang_x':lang_x, 'attention_mask': attention_mask, 'labels':labels, 'loss_reweight': reweight_tensor, 'key_words_query': emphasize_words}
def text_add_image(self,images,question,answer):
ref_image = []
question = str(question)
answer = str(answer)
question_list = [[] for _ in range(len(str(question)))]
answer_list = [[] for _ in range(len(str(answer)))]
for index, image in enumerate(images):
ref_image.append(image["image"])
position = image["position"]
position = list(position.items())[0]
if position[0] == 'question':
insert_loc = position[1] -1
if insert_loc < 0:
insert_loc = 0
question_list[insert_loc].append(index)
if position[0] == 'answer':
insert_loc = position[1] -1
if insert_loc < 0:
insert_loc = 0
answer_list[insert_loc].append(index)
new_question = ''
new_answer = ''
question = str(question)
for char_i in range(len(question)):
if question_list[char_i] == []:
new_question = new_question + question[char_i]
if question_list[char_i] != []:
for img_index in question_list[char_i]:
try:
new_question = new_question + ''
except:
print("Error: out of max image input size")
new_question = new_question + question[char_i]
answer = str(answer)
for char_i in range(len(str(answer))):
if answer_list[char_i] == []:
new_answer = new_answer + answer[char_i]
if answer_list[char_i] != []:
for img_index in answer_list[char_i]:
try:
new_answer = new_answer + ''
except:
print("Error: out of max image input size")
new_answer = new_answer + answer[char_i]
new_answer = new_answer.replace('•','')
return ref_image,new_question,new_answer
# torch.set_printoptions(profile="full")
# text_tokenizer = '/home/cs/leijiayu/wuchaoyi/Finetune_LLAMA/LLAMA_Model/tokenizer'
# dataset = multi_dataset(text_tokenizer = text_tokenizer)
# print(len(dataset))
# for i in range(10):
# dataset[i]
# input() | RadFM-main | src/Dataset/multi_dataset.py |
from torch.utils.data import Dataset
import numpy as np
import transformers
import pandas as pd
import copy
import random
import os
import numpy as np
import tqdm
import torch
import json
from PIL import Image
import torchvision
from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaTokenizer
from torchvision import transforms
class Paper_Inline_dataset(Dataset):
def __init__(self, csv_path,img_path, sample_sentence_length = 50,max_img_size = 3):
self.max_img_size = max_img_size
self.sample_sentence_length = sample_sentence_length
self.img_path = img_path
self.paper_path = np.array(pd.read_csv(csv_path)['PMC_path'])
#normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
self.transform = transforms.Compose([
transforms.RandomResizedCrop([512,512],scale=(0.8, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
transforms.ToTensor(),
#normalize,
])
def __len__(self):
return self.paper_path.shape[0]
def __getitem__(self, idx):
# lang_x, vision_x, attention_mask, labels
paper_json = self.paper_path[idx]
PMC_name = paper_json.rsplit('/',2)[-1].split('.')[0]
sentences_list = json.load(open(paper_json,'r'))
image_dict, question, answer = self.random_sample_sentence(sentences_list,PMC_name)
return {
"image_dict": image_dict,
"question": question,
"answer":answer,
}
def random_sample_sentence(self, sentences_list, PMC_name):
sentences_length = len(sentences_list)
p = random.random()
if p>=0.5:
if len(sentences_list) > self.sample_sentence_length:
start = random.randint(0, sentences_length-self.sample_sentence_length)
sentences_list = sentences_list[start:(start + self.sample_sentence_length)]
else:
if len(sentences_list) > self.sample_sentence_length:
sample_start = []
for sentence_id in range(len(sentences_list)):
if sentences_list[sentence_id]['img_ref'] != []:
if sentence_id-10 < 0:
sample_start.append(0)
else:
if sentence_id-10 > sentences_length-self.sample_sentence_length:
sample_start.append(sentences_length-self.sample_sentence_length)
else:
sample_start.append(sentence_id-10)
if sample_start == []:
start = random.randint(0, sentences_length-self.sample_sentence_length)
sentences_list = sentences_list[start:(start + self.sample_sentence_length)]
else:
start = sample_start[random.randint(0, len(sample_start)-1)]
sentences_list = sentences_list[start:(start + self.sample_sentence_length)]
text = ''
images = []
for ix in sentences_list:
sentence = ix
if sentence["img_ref"] == []:
text = text + sentence['text']
else:
if len(images)+len(sentence["img_ref"]) > self.max_img_size:
break
for img_id in sentence["img_ref"]:
img_path = self.img_path + '/' + PMC_name+'_'+ img_id + '.jpg'
if os.path.exists(img_path):
try:
image = Image.open(img_path).convert('RGB')
image = self.transform(image)
images.append({'image':image, "position": {"answer":len(text)}})
except:
continue
text = text + sentence['text']
question = ''
answer = text
return images,question,answer
# csv_path = '/home/cs/leijiayu/wuchaoyi/multi_modal/Data/train_paper.csv'
# img_path = '/home/cs/leijiayu/data/all_images/figures/'
# dataset = multi_paper_dataset(csv_path, img_path)
# print(dataset[0])
| RadFM-main | src/Dataset/dataset/paper_inline.py |
import csv
import json
import logging
import os
import re
import difflib
import sys
import torch
import random
from abc import abstractmethod
from itertools import islice
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from collections.abc import Mapping
from torch.utils.data import DataLoader
import PIL
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
from tqdm import tqdm
from torchvision import transforms
from collections import defaultdict
from PIL import Image
class Binary_Dataset(Dataset):
"""_summary_
Args:
Dataset (_type_): caption task formulated as vqa task for Chestxray classification dataset
csv_path (_type_): path to csv file
prompt_json_file (_type_): path to json file containing binary cls prompts, the answer is yes/no
Output:
Dict: {
"image_dict": {"image": image, "position": {"question": 0}}, # image is a tensor of shape [c,w,h,d] [3,512,512,1], position is a dict, random choice of 0 or len(question)
"question": question, # random choice of caption prompts
"answer":answer, # caption
}
"""
def __init__(self,csv_path,prompt_json_file):
data_info = pd.read_csv(csv_path)
self.img_path_list = np.asarray(data_info['image_path'])
self.disease_list = np.asarray(data_info['disease'])
self.answer_list = np.asarray(data_info['label'])
self.transform = transforms.Compose([
transforms.RandomResizedCrop([512,512],scale=(0.8, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
transforms.ToTensor(),
])
with open(prompt_json_file, 'r') as f:
self.caption_prompts = json.load(f)['caption_prompt']
self.map_answer = {0:'no',1:'yes'}
def __len__(self):
return len(self.img_path_list)
def __getitem__(self, index):
img_path = self.img_path_list[index]
image = Image.open(img_path).convert('RGB')
image = self.transform(image)
image = image.unsqueeze(-1) # c,w,h,d
answer = self.map_answer[self.answer_list[index]]
question = random.choice(self.caption_prompts).replace('disease',self.disease_list[index])
image_dict = [{
"image": image,
"position": {
"question": len(question)
}
}]
return {
"image_dict": image_dict,
"question": question,
"answer":answer,
}
| RadFM-main | src/Dataset/dataset/binary.py |
from torch.utils.data import Dataset
import numpy as np
import transformers
import pandas as pd
import copy
import random
import os
import numpy as np
import tqdm
import torch
import json
from PIL import Image
import torchvision
from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaTokenizer
from torchvision import transforms
from ast import literal_eval
import re
import math
class MedPix_Single_Dataset(Dataset):
def __init__(self, csv_path, img_root = "/gpfs/home/cs/leijiayu/data/MedPix/images/",down_sample_ratio = 5):
self.case_list = pd.read_csv(csv_path)
self.img_root = img_root
#normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
self.transform = transforms.Compose([
transforms.RandomResizedCrop([512,512],scale=(0.8, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
transforms.ToTensor(),
#normalize,
])
self.down_sample_ratio = down_sample_ratio
self.promt = {
"caption": [
"Describe this input image.",
"Help captioning the image.",
"What can be inflected from the scan?",
"Can you give a caption for this image?",
"Can you provide a brief summary of the radiology image?",
"Please write a report about the image?",
"Can you provide an analysis of this image?",
"Can you explain what is shown in this image?",
"What can be indicated from the radiologic scans?",
"What can you infer from this photograph?",
],
"modality": [
"What is the modality of the image?",
"What type of imaging technique was utilized?",
"What imaging technology was used?",
"Please tell me the modality of the image.",
"Describe the modality of the image.",
"Tell me the imaging technology used.",
"Can you specify the imaging modality used?",
"What kind of imaging modality was applied?",
"Which imaging technique was used for this image?",
"Could you identify the imaging modality of this picture?",
"What type of image modality was used here?",
"Can you describe the imaging technique used?"
],
"plane": [
"Please distinguish the plane of the image",
"Which view does this scan take from?",
"Describe the position.",
"What angle is this image taken from?",
"Can you explain the orientation of this picture?",
"From which direction was this shot taken?",
"Can you specify the plane of this picture?",
"From which standpoint is this image taken?",
"Tell me which plane is the image.",
"From what angle is this picture captured?",
"Can you determine the shot direction of this image?",
"Can you describe the plane of this image?",
],
"modality_yes_no": [
"Is this image shot in {object}?",
"Is this image in {object}?",
"Is {object} used fro this image?",
"Was this picture taken in {object}?",
"Was this photo captured in {object}?",
"Did they use {object} for this image?",
"Is this picture from {object}?",
"Is this scan shot in {object}?"
],
"plane_yes_no": [
"Is this image shot from {object} view?",
"Is this image in the view of {object}?",
"Was this scan in {object} view?",
"Is this photo shot in {object} position?",
"Was this picture taken from the perspective of {object}?",
"Is this image captured from {object} viewpoint?",
"Is this photograph from the angle of {object}?",
"Is this snapshot from the view of {object}?",
],
}
self.sample_list = {
'modality': ['HE - High Power (>200X)', 'MR - FLAIR', 'Mammograph', 'SPECT',
'MR - FLAIR w/Gd', 'UGI - Upper GI', 'OPHTH - Fundoscopy', 'SBFT - Small Bowel',
'Special Stain (specify in caption)', 'EM - Electron Microscopic',
'MR T2* gradient GRE', 'CT - Montage', 'ECG EKG', 'MR - T2 FLAIR w/Contrast',
'CT - noncontrast', 'MR - ADC Map (App Diff Coeff)', 'Interventional Procedure',
'BE - Barium Enema', 'HE - Low Power (<50x)', 'MR - T2 weighted', 'MR - T1W w/Gd (fat suppressed)',
'AN - Angiogram', 'OR - Operative photograph', 'Montage of Images', 'XR - Plain Film',
'MR - T1W - noncontrast', 'BAS - Barium Swallow', 'US - Ultrasound', 'LOGO',
'HE - Med Power (~50-200x)', 'NM - Nuclear Medicine', 'GR - Gross photograph',
'MR - Other Pulse Seq.', 'Dermatology', 'IVP/IVU - Intravenous Urogram/Pyelogram',
'VCUG - Voiding Cystourethrogram', 'CT - GI Contrast', 'MRS - Spectroscopy', 'MR - Montage',
'Photograph', 'MRA - MR Angiography/Venography', 'MR - T1W w/Gadolinium', 'HSG - Hysterosalpingogram',
'MR T2* gradient,GRE,MPGR,SWAN,SWI', 'Histology - Special Stain (specify in caption)', 'Venogram',
'Arthrogram', 'CT - Myelogram', 'US-D - Doppler Ultrasound', 'CT - GI & IV Contrast',
'CP - Clinical photograph', 'Histology (NOS)', 'Not Assigned', 'MR - PDW Proton Density',
'CT w/contrast (IV)', 'OPHTH - Slit-Lamp', 'CTA - CT Angiography', 'AN - Angiogram (Catheter)',
'MR - T1W SPGR', 'Tomography', 'EP - Endoscopy', 'PET-CT Fusion', 'MR - DWI Diffusion Weighted',
'Drawing', 'PET - Positron Emission', 'SPECT - Single Photon', 'RU - Retrograde Urogram',
'Myelogram', 'Fundoscopy', 'Virtual Colonoscopy', 'Photographs',
'Interventional Procedure (specify in caption)', 'MR - STIR', 'MR - FIESTA'],
'plane': ['Other View (see caption)',
'Mammo - CC', 'Sagittal', 'Image Plane', 'Mammo - XCC', 'Lateral', 'Longitudinal',
'Mammo - Mag CC', 'Frontal', 'Mammo - MLO', 'Transverse', 'Gross Pathology', 'Dermatology',
'3D Reconstruction', 'Photograph', 'Histology', 'PA', 'Decubitus', 'Multiple or Montage',
'Oblique', 'AP', 'Drawing', 'Axial', 'Coronal'],
}
def __len__(self):
return math.ceil(len(self.case_list)/self.down_sample_ratio)
def get_image(self, img_path):
image = Image.open(img_path).convert('RGB')
image = self.transform(image)
image = image.unsqueeze(-1)
return image
def __getitem__(self, idx):
idx = (self.down_sample_ratio*idx +random.randint(0,self.down_sample_ratio-1))%len(self.case_list)
sample = self.case_list.iloc[idx]
answer = sample['context']
if sample['type'] == "modality" or sample['type'] == "plane":
pp = random.random()
if pp>0.5:
question = random.sample(self.promt[sample['type']],1)[0]
else:
question = random.sample(self.promt[sample['type']+'_yes_no'],1)[0]
ppp = random.random()
if ppp> 0.5:
question = question.format(object = answer)
answer = 'yes'
else:
sample_list = self.sample_list[sample['type']]
try:
sample_list.remove(answer)
except:
pass
answer = random.sample(sample_list,1)[0]
question = question.format(object = answer)
answer = 'no'
else:
question = random.sample(self.promt[sample['type']],1)[0]
p = random.random()
images = []
if p>0.5:
try:
images.append(
{
"image": self.get_image(self.img_root+sample['name']),
"position": {
"question": len(question)
}
}
)
except:
pass
else:
try:
images.append(
{
"image": self.get_image(self.img_root+sample['name']),
"position": {
"question": 0
}
}
)
except:
pass
return {
"image_dict": images,
"question": str(question),
"answer": str(answer),
}
class MedPix_Multi_Dataset(Dataset):
def __init__(self, csv_path, img_root = "/gpfs/home/cs/leijiayu/data/MedPix/images/"):
self.case_list = pd.read_csv(csv_path)
self.img_root = img_root
#normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
self.transform = transforms.Compose([
transforms.RandomResizedCrop([512,512],scale=(0.8, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
transforms.ToTensor(),
#normalize,
])
self.promt = {
"txFollowup": [
"What treatment should the patient take?",
"Please give me some treatment advise.",
"What is the recommended treatment for this condition?",
"What kind of treatment is necessary for this patient?",
"Can you suggest a suitable treatment for this case?",
"What treatment options are available for this patient?",
"What is the best course of treatment for this condition?",
"How to follow up with the patient?",
"What treatment should be administered for this illness?",
"What is the most effective treatment for this disease?"
],
"ddx": [
"What illness can you diagnose from this images?",
"What disease is shown in the scans?",
"Please make diagnosis with the input images?",
"What health issue can be inferred from these photos?",
"What is the diagnosis based on these medical scans?",
"Based on these scans, what is the patient suffering from?",
"What ailment can be deduced from these medical images?",
"Can you determine the illness from these medical photos?",
"Can you identify the disease from these scans?",
"What is the medical diagnosis based on these images?",
],
"dxHow": [
"What imaging technology is used for diagnosis?",
"What imaging tests are shown in the images?",
"What type of imaging technique is used in medical diagnosis?",
"What kind of imaging technology is used for medical purposes?",
"Which imaging tests are depicted in these pictures?",
"Can you identify the imaging tests in these images?",
"What kind of imaging technology is used in healthcare?",
"What imaging procedures are used for diagnosing diseases?",
"Can you name the imaging tests shown in these photographs?",
"Please distinguish the imaging type in these images",
],
# "diagnosis_yes_no":[
# "Does the patient have {object}?",
# "Is the patient infected with {object}?",
# "Does the patient test positive for {object}?",
# "Is the patient suffering from {object}?",
# "Has the patient contracted {object}?",
# "Is the patient diagnosed with {object}?",
# "Is the patient affected by {object}?",
# "Is the patient carrying the {object} virus?",
# "Is the patient stricken with {object}?",
# ],
"diagnosis":[
"What condition can be diagnosed from these pictures?",
"Can you interpret the disease from these medical scans?",
"What medical condition is depicted in these images?",
"Based on these images, what could be the potential diagnosis?",
"What health condition can be identified from these scans?",
"Can you diagnose the disease from these medical images?",
"What is the patient's condition according to these scans?",
"What medical issue can be determined from these photos?",
"Can you infer the illness from these medical scans?",
"What is the probable diagnosis based on these medical images?",
"What illness can you diagnose from this images?",
"What disease is shown in the scans?",
"Please make diagnosis with the input images?",
"What health issue can be inferred from these photos?",
"What is the diagnosis based on these medical scans?",
"Based on these scans, what is the patient suffering from?",
"What ailment can be deduced from these medical images?",
],
"findings":[
"Caption the case.",
"Describe your findings for this patient.",
"What is shown in the case?",
"Please help me write a report about the patient.",
"Can you provide a summary of the case?",
"What are the key points in this case?",
"Could you explain the details of the case?",
"What are your observations about the case?",
"Can you give an overview of the case?",
"How would you interpret this case?",
"What is your analysis of the patient?",
"Can you provide a brief on the patient?"
],
"exam":[
"Make a conclusion for this patient.",
"What are the exam results for this patient?",
"What is the diagnosis for this patient?",
"What are the symptoms presented by this patient?",
"Please make diagnosis with the input case.",
"Is there any abnormality with the presented case?",
"What can be reflected from the input images?",
"Please provide me with some diagnosis advise.",
"Can you provide a summary of the patient's condition?",
"Can you provide a detailed analysis of the patient's condition?"
],
"discussion":[
"Discuss about the case more.",
"Tell more about the patient's illness.",
"What image patterns or knowledge can help you make diagnosis?",
"Could you provide more details about the situation?",
"What additional information can you provide about the issue?",
"Can you explain more about the subject matter?",
"What other factors should be considered in this scenario?",
"Can you provide more context or background information?",
"What other relevant details can you share about this case?",
"Can you expand on your initial explanation?" ,
"What other insights can you provide on this matter?" ,
"Can you delve deeper into the specifics of the situation?",
],
}
def __len__(self):
return len(self.case_list)
def get_image(self, img_path):
image = Image.open(img_path).convert('RGB')
image = self.transform(image)
image = image.unsqueeze(-1)
return image
def __getitem__(self, idx):
sample = self.case_list.iloc[idx]
answer = str(sample['context']).replace('• ','')
question = random.sample(self.promt[sample['type']],1)[0]
#question = random.sample(self.promt[sample['type']],1)[0]
history = sample['history']
if history is not None:
p = random.random()
if p>0.5:
try:
question = history + ' ' + question
except:
pass
image_names = sample['name'].split(',')
p = random.random()
images = []
if p>0.5:
for pp in image_names:
try:
images.append(
{
"image": self.get_image(self.img_root+pp),
"position": {
"question": len(question)
}
}
)
except:
pass
else:
for pp in image_names:
try:
images.append(
{
"image": self.get_image(self.img_root+pp),
"position": {
"question": 0
}
}
)
except:
pass
if sample['type'] =="findings":
pattern = r"\d+(\.\d+)?\s*(mm|cm|x\d+\s*cm)"
answer = re.sub(pattern, "", answer)
if len(images) > 10:
images = random.sample(images,10)
return {
"image_dict": images,
"question": str(question),
"answer":str(answer),
}
class MedPix_QA_Dataset(Dataset):
def __init__(self, csv_path, img_root = "/gpfs/home/cs/leijiayu/data/MedPix/images/"):
self.case_list = pd.read_csv(csv_path)
self.img_root = img_root
#normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
self.transform = transforms.Compose([
transforms.RandomResizedCrop([512,512],scale=(0.8, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
transforms.ToTensor(),
#normalize,
])
def __len__(self):
return len(self.case_list)
def get_image(self, img_path):
image = Image.open(img_path).convert('RGB')
image = self.transform(image)
image = image.unsqueeze(-1)
return image
def __getitem__(self, idx):
sample = self.case_list.iloc[idx]
answer = sample['answer']
question = sample['question']
explanation = sample['explanation']
try:
answer = answer + '. '+ explanation
except:
pass
p = random.random()
images = []
if p>0.5:
try:
images.append(
{
"image": self.get_image(self.img_root+sample['name']),
"position": {
"question": len(question)
}
}
)
except:
pass
else:
try:
images.append(
{
"image": self.get_image(self.img_root+sample['name']),
"position": {
"question": 0
}
}
)
except:
pass
if len(images) > 10:
images = random.sample(images,10)
return {
"image_dict": images,
"question": str(question),
"answer": str(answer),
}
# dataset = MedPix_Single_Dataset(csv_path = '/gpfs/home/cs/leijiayu/data/MedPix/Preprocessor/MedPix_single_train.csv')
# for i in tqdm.tqdm(range(len(dataset))):
# sample = dataset[i]
# print(len(sample['image_dict']),sample['image_dict'][0]["image"].shape,sample['question'],sample['answer'])
# input()
# dataset = MedPix_Multi_Dataset(csv_path = '/gpfs/home/cs/leijiayu/data/MedPix/Preprocessor/MedPix_multi_train.csv')
# for i in tqdm.tqdm(range(len(dataset))):
# sample = dataset[i]
# print(len(sample['image_dict']),sample['image_dict'][0]["image"].shape,sample['question'],sample['answer'])
# input()
# dataset = MedPix_QA_Dataset(csv_path = '/gpfs/home/cs/leijiayu/data/MedPix/Preprocessor/MedPix_questions_train.csv')
# for i in tqdm.tqdm(range(len(dataset))):
# sample = dataset[i]
# print(len(sample['image_dict']),sample['image_dict'][0]["image"].shape,sample['question'],sample['answer'])
# input()
| RadFM-main | src/Dataset/dataset/MedPix_dataset.py |
from .radiopaedia import RadioVQA_Dataset,Radio_Modality_Dataset,Radiofeatures_Dataset,RadioCaption_Dataset
from .binary import Binary_Dataset
from .chestxray import ChestXray_Dataset
from .pmcvqa import PMCVQA_Dataset
from .pmcoa import PMCOA_Dataset
from .paper_inline import Paper_Inline_dataset
from .case_report import CaseReport_dataset
from .MedPix_dataset import MedPix_Multi_Dataset,MedPix_Single_Dataset,MedPix_QA_Dataset
| RadFM-main | src/Dataset/dataset/__init__.py |
import csv
import json
import logging
import os
import re
import difflib
import sys
import torch
import random
from abc import abstractmethod
from itertools import islice
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from collections.abc import Mapping
from torch.utils.data import DataLoader
import PIL
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
from tqdm import tqdm
from torchvision import transforms
from collections import defaultdict
from PIL import Image
class PMCOA_Dataset(Dataset):
"""_summary_
Args:
Dataset (_type_): caption task formulated as vqa task for PMC-OA dataset
csv_path (_type_): path to csv file
img_root_dir (_type_): path to image root directory, with columns [PMC_ID,Figure_path,Caption]
prompt_json_file (_type_): path to json file containing caption prompts
Output:
Dict: {
"image_dict": {"image": image, "position": {"question": 0}}, # image is a tensor of shape [c,w,h,d] [3,512,512,1], position is a dict, random choice of 0 or len(question)
"question": question, # random choice of caption prompts
"answer":answer, # caption
}
"""
def __init__(self,csv_path,img_root_dir,prompt_json_file):
self.img_root_dir = img_root_dir
data_info = pd.read_csv(csv_path)
self.img_path_list = np.asarray(data_info['Figure_path'])
self.caption_list = np.asarray(data_info['Caption'])
# PMC_ID,Figure_path,Caption
# normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
self.transform = transforms.Compose([
transforms.RandomResizedCrop([512,512],scale=(0.8, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
transforms.ToTensor(),
# normalize,
])
with open(prompt_json_file, 'r') as f:
self.caption_prompts = json.load(f)['caption_prompt']
def __len__(self):
return len(self.img_path_list)
def __getitem__(self, index):
file_name = self.img_path_list[index]
img_path = os.path.join(self.img_root_dir,file_name)
image = Image.open(img_path).convert('RGB')
image = self.transform(image) # normalize to [0,1]
image = image.unsqueeze(-1) # expand a dimension
answer = self.caption_list[index]
question = random.choice(self.caption_prompts)
if random.random() < 0.5:
image_dict = {
"image": image,
"position": {
"question": 0
}
}
else:
image_dict = {
"image": image,
"position": {
"question": len(question)
}
}
return {
"image_dict": [image_dict],
"question": question,
"answer":answer,
}
if __name__ == "__main__":
test_dataset = PMCOA_Dataset(csv_path = '../data_csv/pmcoa_image_caption_train.csv',
img_root_dir = '/home/cs/leijiayu/data/PMCVQA/caption_T060_filtered_top4_sep_v0_subfigures',
prompt_json_file = './caption_prompt.json')
for i in range(10):
test_data = test_dataset[i]
print(test_data['image_dict'][0]['image'].shape) # [3,512,512,1]
| RadFM-main | src/Dataset/dataset/pmcoa.py |
import csv
import json
import logging
import os
import re
import difflib
import sys
import torch
import random
from abc import abstractmethod
from itertools import islice
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from collections.abc import Mapping
from torch.utils.data import DataLoader
import PIL
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
from tqdm import tqdm
from torchvision import transforms
from collections import defaultdict
from PIL import Image
class PMCVQA_Dataset(Dataset):
"""_summary_
Args:
Dataset (_type_):
csv_path (_type_): path to csv file
Output:
Dict: {
"image_dict": {"image": image, "position": {"question": 0}}, # image is a tensor of shape [c,w,h,d] [3,512,512,1], position is a dict, random choice of 0 or len(question)
"question": question, # random choice of caption prompts
"answer":answer, # caption
}
"""
def __init__(self,csv_path):
data_info = pd.read_csv(csv_path)
self.img_root_dir_list = np.asarray(data_info['img_root_dir'])
self.img_path_list = np.asarray(data_info['Figure_path'])
self.question_list = np.asarray(data_info['Question'])
self.answer_list = np.asarray(data_info['Answer'])
# PMC_ID,Figure_path,Question,Answer
self.transform = transforms.Compose([
transforms.RandomResizedCrop([512,512],scale=(0.8, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
transforms.ToTensor(),
])
def __len__(self):
return len(self.img_path_list)
def __getitem__(self, index):
file_name = self.img_path_list[index]
img_root_dir = self.img_root_dir_list[index]
img_path = os.path.join(img_root_dir,file_name)
image = Image.open(img_path).convert('RGB')
image = self.transform(image)
image = image.unsqueeze(-1)
answer = self.answer_list[index]
question = str(self.question_list[index])
if random.random() < 0.5:
image_dict = {
"image": image,
"position": {
"question": 0
}
}
else:
image_dict = {
"image": image,
"position": {
"question": len(question)
}
}
return {
"image_dict": [image_dict],
"question": question,
"answer":answer,
}
if __name__ == "__main__":
test_dataset = PMCVQA_Dataset(csv_path = '../data_csv/pmcvqa_train.csv')
for i in range(10):
test_data = test_dataset[i]
print(test_data['image_dict'][0]['image'].shape) # [3,512,512,1]
| RadFM-main | src/Dataset/dataset/pmcvqa.py |
from torch.utils.data import Dataset
import numpy as np
import transformers
import pandas as pd
import copy
import random
import os
import numpy as np
import tqdm
import torch
import json
from PIL import Image
import torchvision
from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaTokenizer
from torchvision import transforms
from ast import literal_eval
class CaseReport_dataset(Dataset):
def __init__(self, csv_path,img_path):
self.img_path = img_path
self.question_list = pd.read_csv(csv_path)
#normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
self.transform = transforms.Compose([
transforms.RandomResizedCrop([512,512],scale=(0.8, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
transforms.ToTensor(),
#normalize,
])
def __len__(self):
return len(self.question_list)
def __getitem__(self, idx):
# lang_x, vision_x, attention_mask, labels
sample = self.question_list.iloc[idx]
PMC_id = sample['PMC_id']
img_ref = literal_eval(sample['img_ref'])
context = str(sample['context'])
sentences = context.split('.')
if len(sentences) > 5:
first_sentence = sentences[0]
last_sentences = ". ".join(context.split('.')[-4:])
context = first_sentence + '. ' + last_sentences
question = str(context) + '\n' + str(sample['question']).replace('Q:','')
answer = str(sample['answer']).replace('A:','')
images = []
for img_id in img_ref:
#print(img_ref)
img_path = self.img_path + '/' + PMC_id+'_'+ img_id + '.jpg'
try:
image = Image.open(img_path).convert('RGB')
image = self.transform(image)
p = random.random()
if random.random() >0.5:
images.append({'image':image, "position": {"question":len(question)}})
else:
images.append({'image':image, "position": {"question":len(context)}})
except:
continue
return {
"image_dict": images,
"question": question,
"answer":answer,
}
# csv_path = '/gpfs/home/cs/leijiayu/wuchaoyi/multi_modal/Data/GPT_realdata/casa_report_train.csv'
# img_path = '/home/cs/leijiayu/data/all_images/figures/'
# dataset = CaseReport_dataset(csv_path, img_path)
# print(dataset[0])
| RadFM-main | src/Dataset/dataset/case_report.py |
import csv
import json
import logging
import os
import re
import difflib
import sys
import cv2
import torch
import random
from abc import abstractmethod
from itertools import islice
from scipy import ndimage
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from collections.abc import Mapping
from torch.utils.data import DataLoader
import PIL
import SimpleITK as sitk
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
from tqdm import tqdm
from torchvision import transforms
from collections import defaultdict
from PIL import Image
import math
class Radio_Modality_Dataset(Dataset):
"""_summary_
Args:
Dataset (_type_): _description_: modality asked task formulated as vqa task for Radiopaedia dataset
csv_path (_type_): path to csv file
prompt_json_file (_type_): path to json file containing caption prompts
Output:
Dict: {
"image_dict": {"image": image, "position": {"question": 0}}, # image is a tensor of shape [s,c,w,h,d] like, [1,3,512,512,1], position is a dict, random choice of 0 or len(question)
"question": question, # random choice of caption prompts
"answer":answer, # caption
}
"""
def __init__(self,csv_path,prompt_json_file,modality_json_file,down_sample_ratio = 5):
data_info = pd.read_csv(csv_path)
self.down_sample_ratio = down_sample_ratio
self.img_path_list = np.asarray(data_info['image_path'])
self.caption_list = np.asarray(data_info['answer'])
with open(prompt_json_file, 'r') as f:
self.caption_prompts = json.load(f)['caption_prompt']
with open(prompt_json_file, 'r') as f:
self.modality_prompts = json.load(f)['modality_prompt']
with open(modality_json_file, 'r') as f:
self.modality_sets = json.load(f)['modality']
def resize_image(self, image):
if len(image.shape) == 3:
if image.shape[0] > image.shape[2]:
image = image.transpose(2,0,1)
# print('before resize',image.shape)
image = cv2.resize(image,(512,512),interpolation = cv2.INTER_LINEAR)
# print('after resize',image.shape)
image = image[np.newaxis,:,:,:]
image = np.concatenate([image,image,image],axis=0)
if image.shape[-1] > 64:
image = ndimage.zoom(image, (3/image.shape[0],512/image.shape[1],512/image.shape[2],64/image.shape[3]), order=0)
else:
image = ndimage.zoom(image, (3/image.shape[0],512/image.shape[1],512/image.shape[2],1), order=0)
return image
def __len__(self):
return math.ceil(len(self.img_path_list)/self.down_sample_ratio)
def __getitem__(self, index):
index = (self.down_sample_ratio*index +random.randint(0,self.down_sample_ratio-1))%len(self.img_path_list)
img_path = self.img_path_list[index]
try:
itk_image = sitk.ReadImage(img_path)
image = sitk.GetArrayFromImage(itk_image)
image = self.resize_image(image)
except:
image = np.random.randn(3,512,512,4)
# image = np.load(img_path) # c,w,h,d
image = (image-image.min())/(image.max()-image.min())
contain_nan = (True in np.isnan(image))
if contain_nan:
image = np.random.randn(3,512,512,4)
image = torch.from_numpy(image).float()
if random.random() < 0.5:
#直接回答
answer = self.caption_list[index]
question = random.choice(self.caption_prompts)
else:
modality = self.caption_list[index]
if random.random() < 0.5:
# 回答为yes
question = random.choice(self.modality_prompts).replace('modality',modality)
answer = 'yes'
else:
select_modality = modality
while select_modality == modality:
select_modality = random.choice(list(self.modality_sets))
question = random.choice(self.modality_prompts).replace('modality',modality)
answer = 'no'
if random.random() < 0.5:
image_dict = {
"image": image,
"position": {
"question": 0
}
}
else:
image_dict = {
"image": image,
"position": {
"question": len(question)
}
}
return {
"image_dict": [image_dict],
"question": question,
"answer":answer,
}
class RadioVQA_Dataset(Dataset):
"""_summary_
Args:
Dataset (_type_): _description_: caption task formulated as vqa task for Radiopaedia dataset
csv_path (_type_): path to csv file
prompt_json_file (_type_): path to json file containing caption prompts
Output:
Dict: {
"image_dict": {"image": image, "position": {"question": 0}}, # image is a tensor of shape [s,c,w,h,d] like, [1,3,512,512,1], position is a dict, random choice of 0 or len(question)
"question": question, # random choice of caption prompts
"answer":answer, # caption
}
"""
def __init__(self,csv_path):
data_info = pd.read_csv(csv_path)
# npy_path,image_caption,question,answer
self.img_path_list = np.asarray(data_info['image_path'])
self.question_list = np.asarray(data_info['question'])
self.answer_list = np.asarray(data_info['answer'])
def __len__(self):
return len(self.img_path_list)
def __getitem__(self, index):
img_path = self.img_path_list[index]
image = np.load(img_path)
image = (image-image.min())/(image.max()-image.min())
contain_nan = (True in np.isnan(image))
if contain_nan:
image = np.random.randn(3,512,512,4)
image = torch.from_numpy(image).float()
answer = self.answer_list[index]
question = self.question_list[index]
image_dict = []
for idx in range(image.shape[0]):
if random.random() < 0.5:
dict_idx = {
"image": image[idx],
"position": {
"question": 0
}
}
else:
dict_idx = {
"image": image[idx],
"position": {
"question": len(question)
}
}
image_dict.append(dict_idx)
if len(image_dict) > 10:
images = random.sample(image_dict,10)
return {
"image_dict": image_dict,
"question": question,
"answer":answer,
}
class RadioCaption_Dataset(Dataset):
def __init__(self,json_path,prompt_json_file):
with open(json_path, 'r') as file:
self.json_data = json.load(file)
with open(prompt_json_file, 'r') as f:
self.caption_prompts = json.load(f)['caption_prompt']
def __len__(self):
return len(self.json_data)
def __getitem__(self, index):
data_index = self.json_data[index]
patient_pre = data_index['pre']
patient_pat = data_index['pat']
img_path = data_index['npy_path']
finding = data_index['finding']
impression = data_index['impression']
prompt_question = random.choice(self.caption_prompts)
question = patient_pat + ' ' + patient_pre + ' ' + prompt_question
image = np.load(img_path)
image = (image-image.min())/(image.max()-image.min())
contain_nan = (True in np.isnan(image))
if contain_nan:
image = np.random.randn(3,512,512,4)
image = torch.from_numpy(image).float()
answer = 'Finding: ' + str(finding) + 'Impression: ' + str(impression)
image_dict = []
for idx in range(image.shape[0]):
if random.random() < 0.5:
dict_idx = {
"image": image[idx],
"position": {
"question": 0
}
}
else:
dict_idx = {
"image": image[idx],
"position": {
"question": len(question)
}
}
image_dict.append(dict_idx)
return {
"image_dict": image_dict,
"question": question,
"answer":answer,
}
class Radiofeatures_Dataset(Dataset):
def __init__(self,json_path,prompt_json_file,disease_prompt_json_file,article_json_file):
with open(json_path, 'r') as file:
self.json_data = json.load(file)
with open(prompt_json_file, 'r') as f:
self.caption_prompts = json.load(f)['caption_prompt']
with open(disease_prompt_json_file, 'r') as f:
self.disease_prompts = json.load(f)['caption_prompt']
with open(article_json_file, 'r') as f:
self.article_sets = json.load(f).keys()
def __len__(self):
return len(self.json_data)
def __getitem__(self, index):
data_index = self.json_data[index]
patient_pre = data_index['pre']
patient_pat = data_index['pat']
img_path = data_index['npy_path']
radiographic_features = ' '.join(data_index['radiographic_features'])
image = np.load(img_path)
image = (image-image.min())/(image.max()-image.min())
contain_nan = (True in np.isnan(image))
if contain_nan:
image = np.random.randn(3,512,512,4)
image = torch.from_numpy(image).float()
if random.random() < 0.5:
articles = ' '.join(data_index['articles'])
prompt_question = random.choice(self.caption_prompts)
question = patient_pat + ' ' + patient_pre + ' ' + prompt_question
answer = articles + 'The Radiographic features can be summarized as follows.' + radiographic_features
else:
articles = data_index['title']
if random.random() < 0.5:
# 回答为yes
question = random.choice(self.disease_prompts).replace('disease',articles)
answer = 'yes'
else:
select_articles = articles
while select_articles == articles:
select_articles = random.choice(list(self.article_sets))
question = random.choice(self.disease_prompts).replace('disease',select_articles)
answer = 'no'
image_dict = []
for idx in range(image.shape[0]):
if random.random() < 0.5:
dict_idx = {
"image": image[idx],
"position": {
"question": 0
}
}
else:
dict_idx = {
"image": image[idx],
"position": {
"question": len(question)
}
}
image_dict.append(dict_idx)
return {
"image_dict": image_dict,
"question": question,
"answer":answer,
}
"""
class Radiodisease_Dataset(Dataset):
def __init__(self,json_path,prompt_json_file,article_json_file):
with open(json_path, 'r') as file:
self.json_data = json.load(file)
with open(prompt_json_file, 'r') as f:
self.caption_prompts = json.load(f)['caption_prompt']
with open(article_json_file, 'r') as f:
self.article_sets = json.load(f).keys()
def __len__(self):
return len(self.json_data)
def __getitem__(self, index):
data_index = self.json_data[index]
patient_pre = data_index['pre']
patient_pat = data_index['pat']
img_path = data_index['npy_path']
articles = data_index['title']
if random.random() < 0.5:
# 回答为yes
question = random.choice(self.caption_prompts).replace('disease',articles)
answer = 'yes'
else:
select_articles = articles
while select_articles == articles:
select_articles = random.choice(list(self.article_sets))
question = random.choice(self.caption_prompts).replace('disease',select_articles)
answer = 'no'
image = np.load(img_path)
image = (image-image.min())/(image.max()-image.min())
image = torch.from_numpy(image).float()
image_dict = []
for idx in range(image.shape[0]):
if random.random() < 0.5:
dict_idx = {
"image": image[idx],
"position": {
"question": 0
}
}
else:
dict_idx = {
"image": image[idx],
"position": {
"question": len(question)
}
}
image_dict.append(dict_idx)
return {
"image_dict": image_dict,
"question": question,
"answer":answer,
}
class Radio_modality_binary_Dataset(Dataset):
def __init__(self,csv_path,prompt_json_file,modality_json_file):
data_info = pd.read_csv(csv_path)
self.img_path_list = np.asarray(data_info['image_path'])
self.caption_list = np.asarray(data_info['answer'])
with open(prompt_json_file, 'r') as f:
self.caption_prompts = json.load(f)['modality_prompt']
with open(modality_json_file, 'r') as f:
self.modality_sets = json.load(f)['modality']
def resize_image(self, image):
if len(image.shape) == 3:
if image.shape[0] > image.shape[2]:
image = image.transpose(2,0,1)
# print('before resize',image.shape)
image = cv2.resize(image,(512,512),interpolation = cv2.INTER_LINEAR)
# print('after resize',image.shape)
image = image[np.newaxis,:,:,:]
image = np.concatenate([image,image,image],axis=0)
if image.shape[-1] > 64:
image = ndimage.zoom(image, (3/image.shape[0],512/image.shape[1],512/image.shape[2],64/image.shape[3]), order=0)
else:
image = ndimage.zoom(image, (3/image.shape[0],512/image.shape[1],512/image.shape[2],1), order=0)
return image
def __len__(self):
return len(self.img_path_list)
def __getitem__(self, index):
img_path = self.img_path_list[index]
try:
itk_image = sitk.ReadImage(img_path)
image = sitk.GetArrayFromImage(itk_image)
image = self.resize_image(image)
except:
image = np.random.randn(3,512,512,4)
image = (image-image.min())/(image.max()-image.min())
image = torch.from_numpy(image).float()
modality = self.caption_list[index]
if random.random() < 0.5:
# 回答为yes
question = random.choice(self.caption_prompts).replace('modality',modality)
answer = 'yes'
else:
select_modality = modality
while select_modality == modality:
select_modality = random.choice(list(self.modality_sets))
question = random.choice(self.caption_prompts).replace('modality',modality)
answer = 'no'
if random.random() < 0.5:
image_dict = {
"image": image,
"position": {
"question": 0
}
}
else:
image_dict = {
"image": image,
"position": {
"question": len(question)
}
}
return {
"image_dict": [image_dict],
"question": question,
"answer":answer,
}
""" | RadFM-main | src/Dataset/dataset/radiopaedia.py |
import csv
import json
import logging
import os
import re
import difflib
import sys
import torch
import random
from abc import abstractmethod
from itertools import islice
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from collections.abc import Mapping
from torch.utils.data import DataLoader
import PIL
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
from tqdm import tqdm
from torchvision import transforms
from collections import defaultdict
from PIL import Image
class ChestXray_Dataset(Dataset):
"""_summary_
Args:
Dataset (_type_): caption task formulated as vqa task for Chestxray classification dataset
csv_path (_type_): path to csv file
img_root_dir (_type_): path to image root directory
prompt_json_file (_type_): path to json file containing caption prompts
Output:
Dict: {
"image_dict": {"image": image, "position": {"question": 0}}, # image is a tensor of shape [c,w,h,d] [3,512,512,1], position is a dict, random choice of 0 or len(question)
"question": question, # random choice of caption prompts
"answer":answer, # caption
}
"""
def __init__(self,csv_path,prompt_json_file):
data_info = pd.read_csv(csv_path)
self.img_path_list = np.asarray(data_info['image_path'])
self.answer_list = np.asarray(data_info['label'])
self.transform = transforms.Compose([
transforms.RandomResizedCrop([512,512],scale=(0.8, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
transforms.ToTensor(),
])
with open(prompt_json_file, 'r') as f:
self.caption_prompts = json.load(f)['caption_prompt']
def __len__(self):
return len(self.img_path_list)
def __getitem__(self, index):
img_path = self.img_path_list[index]
try:
image = Image.open(img_path).convert('RGB')
image = self.transform(image)
image = image.unsqueeze(-1) # c,w,h,d
except:
image = np.random.randn(3,512,512,4)
answer = self.answer_list[index]
question = random.choice(self.caption_prompts)
image_dict = [{
"image": image,
"position": {
"question": len(question)
}
}]
return {
"image_dict": image_dict,
"question": question,
"answer":answer,
}
if __name__ == "__main__":
test_dataset = ChestXray_Dataset(csv_path = '../data_csv/chestxray.csv',
prompt_json_file = './cls_prompt.json')
for i in range(10):
test_data = test_dataset[i]
print(test_data['image_dict'][0]['image'].shape) # [3,512,512,1]
#需要确保所有的chestxray img_path都有图像
| RadFM-main | src/Dataset/dataset/chestxray.py |
import torch.nn as nn
import torch.nn.functional as F
import torch
from .helpers import PerceiverResampler
from .utils import get_visual_encoder
from einops import rearrange, repeat
from einops_exts import rearrange_many
import torchvision
from .vit_3d import ViT
from einops.layers.torch import Rearrange
from .transformer_decoder import TransformerDecoder,TransformerDecoderLayer
from torch.utils.checkpoint import checkpoint
from torch.autograd import Variable
import random
from transformers import AutoTokenizer, AutoModel
class MyEmbedding(nn.Module):
def __init__(self, num_embeddings=32000, embedding_dim=5120, perceiver_num=32,vis_dim = 768, patch_size=32, frame_patch_size = 4 ,seg_channel = 256):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.weight = nn.Parameter(torch.torch.randn((num_embeddings, embedding_dim)))
self.figure_token_weight = nn.Parameter(torch.randn((2, embedding_dim)))
self.flag = 'Text'
self.patch_size = patch_size
self.frame_patch_size = frame_patch_size
self.seg_channel = seg_channel
self.bert_tokenizer = AutoTokenizer.from_pretrained("/gpfs/home/cs/leijiayu/wuchaoyi/multi_modal/src/MedKEBERT")
self.bert_model = AutoModel.from_pretrained("/gpfs/home/cs/leijiayu/wuchaoyi/multi_modal/src/MedKEBERT")
self.bert_projection_fc = nn.Linear(768,vis_dim)
self.vision_encoder = ViT(
image_size = 512, # image size
frames = 512, # max number of frames
image_patch_size = patch_size, # image patch size
frame_patch_size = frame_patch_size, # frame patch size
dim = vis_dim,
depth = 12,
heads = 8,
mlp_dim = 2048,
dropout = 0.1,
emb_dropout = 0.1
)
self.output_upscaling = nn.Sequential(
nn.ConvTranspose3d(vis_dim, vis_dim // 4, kernel_size=2, stride=2),
nn.BatchNorm3d(vis_dim // 4),
nn.GELU(),
nn.ConvTranspose3d(vis_dim // 4, vis_dim // 8, kernel_size=2, stride=2),
nn.GELU(),
)
decoder_layer = TransformerDecoderLayer(d_model = vis_dim, nhead = 8, normalize_before=True)
decoder_norm = nn.LayerNorm(vis_dim)
self.transformer_decoder = TransformerDecoder(decoder_layer = decoder_layer, num_layers = 4, norm=decoder_norm)
self.transformer_decoder_mlp = nn.Sequential(
nn.Linear(vis_dim,vis_dim // 4),
nn.GELU(),
nn.Linear(vis_dim // 4,vis_dim // 8),
nn.GELU(),
)
self.vis_dim = vis_dim
self.perceiver = PerceiverResampler(dim=self.vis_dim, num_latents = perceiver_num)
self.fc = nn.Linear(self.vis_dim,self.embedding_dim)
self.cls_head = nn.Linear(self.vis_dim // 8, 1)
def forward(self, text_input, vision_x, key_words_query = None):
if self.flag == 'Text':
B,S,C,H,W,D = vision_x.shape
vision_x = rearrange(vision_x, "b S c h w d-> (b S) c h w d")
vision_x, pos_embedding = self.vision_encoder(vision_x)
# vision_x = Variable(vision_x,requires_grad=True)
# vision_x, _ = checkpoint(self.vision_encoder,vision_x)
vision_x = rearrange(vision_x, "(b s F) v d -> b s F v d", b=B, s=S,F=1)
loss_matching = None
if key_words_query != None:
# key_words_query list[list[str]] B, words, each word matches corresponding vision_x embedding
query_words = [item for sublist in key_words_query for item in sublist]
query_words = list(set(query_words))
if len(query_words)>16:
random.shuffle(query_words)
query_words = query_words[0:16]
if query_words != []:
contrastive_labels = torch.zeros(B,len(query_words)) #B Q
for i,sublist in enumerate(key_words_query):
for j,item in enumerate(query_words):
if item in sublist:
contrastive_labels[i,j] = 1
contrastive_labels = contrastive_labels.to(vision_x.dtype).to(vision_x.device)
with torch.no_grad():
query_words_embedding = self.bert_tokenizer(query_words, padding='max_length', truncation=True, max_length=256,return_tensors="pt")
query_words_embedding = self.bert_model(input_ids = query_words_embedding['input_ids'].to(vision_x.device),attention_mask = query_words_embedding['attention_mask'].to(vision_x.device))['last_hidden_state'][:,0,:].to(vision_x.dtype).to(vision_x.device) # Q,D
query_words_embedding = self.bert_projection_fc(query_words_embedding)
query_words_embedding = query_words_embedding.unsqueeze(0).repeat(B,1,1) # B,Q,D
_,N,_ = query_words_embedding.shape
image_embedding = vision_x.mean(dim=1) # B V D average pooling 去除掉多模态。
image_embedding = rearrange(image_embedding, "b F v d -> b (F v) d")
pos_embedding = rearrange(pos_embedding, "(b s) v d -> b s v d", b=B, s=S)[:,0,:,:]
image_embedding = image_embedding.transpose(0,1) # (H/P W/P D/P) B D
pos_embedding = pos_embedding.transpose(0,1) # (H/P W/P D/P) B D
query_words_embedding = query_words_embedding.transpose(0,1) # N B D
oo_embedding,_ = self.transformer_decoder(query_words_embedding, image_embedding, pos = pos_embedding)
oo_embedding = oo_embedding.transpose(0,1) # B Q D
oo_embedding = rearrange(oo_embedding, 'b n d -> (b n) d')
oo_embedding = self.transformer_decoder_mlp(oo_embedding)
oo_embedding = self.cls_head(oo_embedding).mean(dim = -1)
oo_embedding = rearrange(oo_embedding, '(b n) -> b n', b=B, n=N) # B Q
# oo_embedding = rearrange(oo_embedding, 'b n d -> b (n d)') # B Q
loss_matching = F.binary_cross_entropy_with_logits(oo_embedding, contrastive_labels)
vision_x = self.perceiver(vision_x) # reshapes to (b, S, n, d)
#vision_x = checkpoint(self.perceiver,vision_x)
n = vision_x.shape[2]
vision_x = rearrange(vision_x, "b s n d -> (b s n) d")
vision_x = self.fc(vision_x)
vision_x = rearrange(vision_x, "(b T) d -> b T d", b=B, T=n*S)
embedding_weight = torch.cat([self.weight, self.figure_token_weight],dim = 0)
embedding_weight = embedding_weight.unsqueeze(0).repeat(B, 1, 1)
embedding_weight = torch.cat([embedding_weight,vision_x],dim = 1)
text_input = F.one_hot(text_input,embedding_weight.shape[1]).to(vision_x.dtype).to(vision_x.device)
out_put = torch.matmul(text_input, embedding_weight)
## useless for now. ignore the folowing code##
# if self.flag == 'Seg':
# B,C,H,W,D = vision_x.shape
# _,N,_ = text_input.shape
# latent_embedding, pos_embedding = self.vision_encoder(vision_x) # B (H/P W/P D/P) D
# image_embedding = latent_embedding.transpose(0,1) # (H/P W/P D/P) B D
# pos_embedding = pos_embedding.transpose(0,1) # (H/P W/P D/P) B D
# text_input = text_input.transpose(0,1) # N B D
# mask_embedding,_ = self.transformer_decoder(text_input, image_embedding, pos = pos_embedding)
# mask_embedding = mask_embedding.transpose(0,1) # B N D
# mask_embedding = rearrange(mask_embedding, 'b n d -> (b n) d')
# mask_embedding = self.transformer_decoder_mlp(mask_embedding)
# mask_embedding = rearrange(mask_embedding, '(b n) d -> b n d', b=B, n=N,d = self.vis_dim // 8)
# vision_x = rearrange(latent_embedding,'b (h w d) c -> b c h w d', h = (H // self.patch_size), w = (W // self.patch_size), d = (D // self.frame_patch_size), c=self.vis_dim)
# vision_x = self.output_upscaling(vision_x) #B C H/4 W/4 D/4
# out_put = torch.einsum('bchwd,bnc->bnhwd', vision_x, mask_embedding)
return out_put,loss_matching
# model = MyEmbedding(vision_encoder_path = '')
# text_input = torch.randint(low=0, high=3210, size=(4,2048))
# image_input = torch.randn((4,3,3,512,512,4))
# key_words_query = [[],[],[],['consoliation']]
# print(model(text_input, image_input, key_words_query))
| RadFM-main | src/Model/RadFM/my_embedding_layer.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
from einops.layers.torch import Rearrange
from einops import rearrange, repeat
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, tensor_list):
x = tensor_list.tensors
mask = tensor_list.mask
assert mask is not None
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, num_pos_feats=256):
super().__init__()
self.row_embed = nn.Embedding(50, num_pos_feats)
self.col_embed = nn.Embedding(50, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
def forward(self, tensor_list):
x = tensor_list.tensors
h, w = x.shape[-2:]
i = torch.arange(w, device=x.device)
j = torch.arange(h, device=x.device)
x_emb = self.col_embed(i)
y_emb = self.row_embed(j)
pos = torch.cat([
x_emb.unsqueeze(0).repeat(h, 1, 1),
y_emb.unsqueeze(1).repeat(1, w, 1),
], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
return pos
class PositionEmbeddingLearned3d(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, num_pos_feats=256,h_patch_num = 16, w_patch_num = 16,d_patch_num = 64):
super().__init__()
self.h_patch_num = h_patch_num
self.w_patch_num = w_patch_num
self.d_patch_num = d_patch_num
self.row_embed = nn.Embedding(h_patch_num, num_pos_feats)
self.col_embed = nn.Embedding(w_patch_num, num_pos_feats)
self.dep_embed = nn.Embedding(d_patch_num, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
nn.init.uniform_(self.dep_embed.weight)
def forward(self, B, h, w, d,x):
i = (torch.arange(h, device=x.device) + 1)* (self.h_patch_num // h) -1
j = (torch.arange(w, device=x.device) + 1)* (self.w_patch_num // w) -1
k = (torch.arange(d, device=x.device) + 1)* (self.d_patch_num // d) -1
x_emb = self.row_embed(i).unsqueeze(1).unsqueeze(2).repeat(1,w,d,1)
y_emb = self.col_embed(j).unsqueeze(0).unsqueeze(2).repeat(h,1,d,1)
z_emb = self.dep_embed(k).unsqueeze(0).unsqueeze(1).repeat(h,w,1,1)
pos = torch.cat([x_emb,y_emb,z_emb,], dim=-1).unsqueeze(0).repeat(B, 1, 1, 1, 1)
pos = rearrange(pos,'b h w d c -> b (h w d) c')
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim // 2
if args.position_embedding in ('v2', 'sine'):
# TODO find a better way of exposing other arguments
position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
elif args.position_embedding in ('v3', 'learned'):
position_embedding = PositionEmbeddingLearned(N_steps)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
# Pos = PositionEmbeddingLearned3d()
# x = torch.randn((8,3,32,32,1))
# print(Pos(8,16,16,1,x)) | RadFM-main | src/Model/RadFM/position_encoding.py |
RadFM-main | src/Model/RadFM/__init__.py |
|
"""
Code modified from DETR tranformer:
https://github.com/facebookresearch/detr
Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
import copy
from typing import Optional, List
import pickle as cp
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
T,B,C = memory.shape
intermediate = []
atten_layers = []
for n,layer in enumerate(self.layers):
residual=True
output,ws = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos,residual=residual)
atten_layers.append(ws)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output,atten_layers
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
residual=True):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2,ws = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)
tgt = self.norm1(tgt)
tgt2,ws = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
# attn_weights [B,NUM_Q,T]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt,ws
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2,ws = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2,attn_weights = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt,attn_weights
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
residual=True):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos,residual)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| RadFM-main | src/Model/RadFM/transformer_decoder.py |
from torch import nn
from transformers.models.llama import LlamaForCausalLM
from .my_embedding_layer import MyEmbedding
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import tqdm.auto as tqdm
import torch.nn as nn
import torch
from torch.utils.checkpoint import checkpoint
from torch.autograd import Variable
import numpy as np
class MultiLLaMAForCausalLM(nn.Module):
def __init__(self, lang_model_path):
super(MultiLLaMAForCausalLM, self).__init__()
self.lang_model = LlamaForCausalLM.from_pretrained(
lang_model_path,
)
self.lang_model.gradient_checkpointing_enable()
self.lang_model.enable_input_require_grads()
# self.lang_model.requires_grad_(False)
self.embedding_layer = MyEmbedding()
self.embedding_layer.weight = self.lang_model.get_input_embeddings().weight
self.hidden_dim = 5120
self.voc_size = 32000
def forward(self,lang_x, vision_x, attention_mask, labels, loss_reweight,key_words_query):
if labels.shape == lang_x.shape:
self.embedding_layer.flag = 'Text'
# lang_x = lang_x.to(vision_x.dtype)
# lang_x = lang_x + torch.zeros(1, dtype=lang_x.dtype, device=lang_x.device, requires_grad=True)
# vision_x = vision_x + torch.zeros(1, dtype=vision_x.dtype, device=vision_x.device, requires_grad=True)
# input_embedding = checkpoint(self.embedding_layer, lang_x, vision_x)
input_embedding,loss_match= self.embedding_layer(lang_x, vision_x,key_words_query) # ,loss_matching
output = self.lang_model(inputs_embeds = input_embedding,attention_mask = attention_mask, labels = labels)
logits = output['logits']
loss_reg = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
shift_loss_reweight = loss_reweight[...,1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(reduction = 'none')
shift_logits = shift_logits.view(-1, self.voc_size)
shift_labels = shift_labels.view(-1)
shift_loss_reweight = shift_loss_reweight.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
shift_loss_reweight = shift_loss_reweight.to(shift_logits.device)
loss_reg = loss_fct(shift_logits, shift_labels)
loss_reg = torch.sum(shift_loss_reweight*loss_reg)/torch.sum(shift_loss_reweight)
loss = loss_reg
if loss_match!= None:
loss = 0.8*loss + 0.2*loss_match
logits = output['logits'][..., :-1, :].contiguous().detach()
total = len(labels)
predictions = torch.argmax(logits, dim=-1)
labels = labels[..., 1:].contiguous()
Acc = torch.sum(torch.all(torch.logical_or(predictions == labels, labels == -100),dim = -1))
Accuracy = Acc /total
return dict(
# loss_reg = loss_reg,
# loss_matching = loss_matching,
logits = Accuracy,
loss = output['loss'],
)
### useless for now ignore the folowing codes ###
# if labels.shape == vision_x.shape:
# self.embedding_layer.flag = 'Seg'
# input_embedding = self.embedding_layer(lang_x, vision_x)
def generate(self, lang_x,vision_x):
self.embedding_layer.flag = 'Text'
with torch.no_grad():
input_embedding,_ = self.embedding_layer(lang_x, vision_x)
generation = self.lang_model.generate(inputs_embeds = input_embedding, max_new_tokens =200,top_k=50)
return generation
| RadFM-main | src/Model/RadFM/multimodality_model.py |
from .blocks import ModifiedResNet,PMC_CLIP_cfg
import torch
from torchvision import transforms
from PIL import Image
import torch.nn as nn
def extend_instance(obj, mixin):
"""Apply mixins to a class instance after creation"""
base_cls = obj.__class__
base_cls_name = obj.__class__.__name__
obj.__class__ = type(
base_cls_name, (mixin, base_cls), {}
) # mixin needs to go first for our forward() logic to work
def getattr_recursive(obj, att):
"""
Return nested attribute of obj
Example: getattr_recursive(obj, 'a.b.c') is equivalent to obj.a.b.c
"""
if att == "":
return obj
i = att.find(".")
if i < 0:
return getattr(obj, att)
else:
return getattr_recursive(getattr(obj, att[:i]), att[i + 1 :])
def setattr_recursive(obj, att, val):
"""
Set nested attribute of obj
Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val
"""
if "." in att:
obj = getattr_recursive(obj, ".".join(att.split(".")[:-1]))
setattr(obj, att.split(".")[-1], val)
def get_visual_encoder(model_str):
"""
Args:
str (_type_): str_to_model_path
Return:
vision_model, visual_dim, img_preprocessor
"""
normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
img_preprocessor = transforms.Compose([
transforms.Resize((512,512), interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize,
])
if 'PMC-CLIP' in model_str:
#vision_cfg = json.load(open(model_args.visual_model_config,'r'))['vision_cfg']
vision_cfg = PMC_CLIP_cfg()
vision_heads = vision_cfg.width * 32 // vision_cfg.head_width
vision_model = ModifiedResNet(
layers=vision_cfg.layers,
heads=vision_heads,
output_dim = 768,
image_size=vision_cfg.image_size,
width=vision_cfg.width
)
vision_model = vision_load_pretrain(vision_model,model_str)
vision_model = nn.Sequential(*list(vision_model.children())[:-2])
visual_dim = 1024
return vision_model,visual_dim,img_preprocessor
def vision_load_pretrain(resnet,model_path):
checkpoint = torch.load(model_path, map_location='cpu')
state_dict = checkpoint['state_dict']
state_dict = {k.replace('module.visual.',''): v for k, v in state_dict.items() if '.visual' in k}
resnet.load_state_dict(state_dict)
return resnet
| RadFM-main | src/Model/RadFM/utils.py |
import torch
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from .position_encoding import PositionEmbeddingLearned3d
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.dropout = nn.Dropout(dropout)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
attn = self.attend(dots)
attn = self.dropout(attn)
out = torch.matmul(attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
class ViT(nn.Module):
def __init__(self, *, image_size, image_patch_size, frames, frame_patch_size, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.):
super().__init__()
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(image_patch_size)
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
assert frames % frame_patch_size == 0, 'Frames must be divisible by frame patch size'
self.patch_height = patch_height
self.patch_width = patch_width
self.frame_patch_size = frame_patch_size
num_patches = (image_height // patch_height) * (image_width // patch_width) * (frames // frame_patch_size)
patch_dim = channels * patch_height * patch_width * frame_patch_size
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) (f pf) -> b (h w f) (p1 p2 pf c)', p1 = patch_height, p2 = patch_width, pf = frame_patch_size),
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim),
)
self.pos_embedding = PositionEmbeddingLearned3d(dim // 3,(image_height // patch_height), (image_width // patch_width), (frames // frame_patch_size))
self.dropout = nn.Dropout(emb_dropout)
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
def forward(self, video):
B, C, H, W, D = video.shape
x = self.to_patch_embedding(video)
b, n, _ = x.shape
pos = self.pos_embedding(B, H // self.patch_height, W // self.patch_width, D // self.frame_patch_size,x)
x += pos
x = self.dropout(x)
x = self.transformer(x)
return x,pos
| RadFM-main | src/Model/RadFM/vit_3d.py |
from collections import OrderedDict
from typing import Tuple, Union, Callable, Optional
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.checkpoint import checkpoint
class PMC_CLIP_cfg:
backbone: str = 'ModifiedRN50' # ['RN50', 'ModifiedRN50', 'MAE']
layers: Union[Tuple[int, int, int, int], int] = [3,4,6,3]
width: int = 64
head_width: int = 64
mlp_ratio: float = 4.0
patch_size: int = 16
image_size: Union[Tuple[int, int], int] = 224
timm_model_name: str = None # a valid model name overrides layers, width, patch_size
timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')
patch_dropout: float = 0.0 # patch dropout rate, no dropout by default
drop_attention_rate: float = 0. # Transformer Dropout
patch_size: None
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ResNet(nn.Module):
"""
RN50
"""
def __init__(
self, layers, output_dim, heads, image_size=224, width=64,
block=Bottleneck,
):
super().__init__()
self.output_dim = output_dim
self.image_size = image_size
# the 1-layer stem
self.conv1 = nn.Conv2d(3, width, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width)
self.relu1 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.head = nn.Linear(512 * 6, output_dim)
self.head = nn.Linear(512 * block.expansion, output_dim)
# embed_dim = width * 32 # the ResNet feature dimension
# self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
self.init_parameters()
def _make_layer(
self,
planes, blocks, stride=1,
block=Bottleneck,
):
layers = [block(self._inplanes, planes, stride)]
self._inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self._inplanes, planes))
return nn.Sequential(*layers)
def init_parameters(self):
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert unlocked_groups == 0, 'partial locking not currently supported for this model'
for param in self.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
# FIXME support for non-transformer
pass
def stem(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.maxpool(x)
return x
def forward(self, x):
# x[0]: [batch_size, 3, 224, 224]
# x[1]: [batch_size, 1]
x = self.stem(x) # [batch_size, 64, 56, 56]
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x) # [batch_size, 2048, 7, 7]
x = self.avgpool(x) # [batch_size, 2048, 1, 1]
x = torch.flatten(x, 1) # [batch_size, 2048*1*1]
x = self.head(x) # [batch_size, 1024]
visual_output = dict.fromkeys(["image_features", "mim_loss"], None)
visual_output.update({
'image_features': x,
})
return visual_output
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, image_size=224, width=64):
super().__init__()
self.output_dim = output_dim
self.image_size = image_size
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.relu3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
self.init_parameters()
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def init_parameters(self):
if self.attnpool is not None:
std = self.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.attnpool.c_proj.weight, std=std)
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert unlocked_groups == 0, 'partial locking not currently supported for this model'
for param in self.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
# FIXME support for non-transformer
pass
def stem(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
def forward(self, x):
x = self.stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
visual_output = dict.fromkeys(["image_features", "mim_loss"], None)
visual_output.update({
'image_features': x,
})
return visual_output
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
return x.to(orig_type)
class QuickGELU(nn.Module):
# NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(
self, d_model: int, n_head: int, mlp_ratio: float = 4.0, act_layer: Callable = nn.GELU,
drop_attention_rate: float = 0.,
):
super().__init__()
self.attn = nn.MultiheadAttention(
embed_dim=d_model,
num_heads=n_head,
dropout=drop_attention_rate,
)
self.ln_1 = LayerNorm(d_model)
mlp_width = int(d_model * mlp_ratio)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, mlp_width)),
("gelu", act_layer()),
("c_proj", nn.Linear(mlp_width, d_model))
]))
self.ln_2 = LayerNorm(d_model)
def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
x = x + self.attention(self.ln_1(x), attn_mask=attn_mask)
x = x + self.mlp(self.ln_2(x))
return x
class PatchDropout(nn.Module):
"""
https://arxiv.org/abs/2212.00794
"""
def __init__(self, prob, exclude_first_token=True):
super().__init__()
assert 0 <= prob < 1.
self.prob = prob
self.exclude_first_token = exclude_first_token # exclude CLS token
def forward(self, x):
if not self.training or self.prob == 0.:
return x
if self.exclude_first_token:
cls_tokens, x = x[:, :1], x[:, 1:]
else:
cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
batch = x.size()[0]
num_tokens = x.size()[1]
batch_indices = torch.arange(batch)
batch_indices = batch_indices[..., None]
keep_prob = 1 - self.prob
num_patches_keep = max(1, int(num_tokens * keep_prob))
rand = torch.randn(batch, num_tokens)
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
x = x[batch_indices, patch_indices_keep]
if self.exclude_first_token:
x = torch.cat((cls_tokens, x), dim=1)
return x
class Transformer(nn.Module):
def __init__(
self, width: int, layers: int, heads: int, mlp_ratio: float = 4.0, act_layer: Callable = nn.GELU,
drop_attention_rate: float = 0.,
):
super().__init__()
self.width = width
self.layers = layers
self.grad_checkpointing = False
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, mlp_ratio, act_layer=act_layer, drop_attention_rate=drop_attention_rate)
for _ in range(layers)
])
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
for r in self.resblocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(r, x, attn_mask)
else:
x = r(x, attn_mask=attn_mask)
return x | RadFM-main | src/Model/RadFM/blocks.py |
"""
Taken from https://github.com/lucidrains/flamingo-pytorch
"""
import torch
from einops import rearrange, repeat
from einops_exts import rearrange_many
from torch import einsum, nn
def exists(val):
return val is not None
def FeedForward(dim, mult=4):
inner_dim = int(dim * mult)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, inner_dim, bias=False),
nn.GELU(),
nn.Linear(inner_dim, dim, bias=False),
)
class PerceiverAttention(nn.Module):
def __init__(self, *, dim, dim_head=64, heads=8):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm_media = nn.LayerNorm(dim)
self.norm_latents = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
def forward(self, x, latents):
"""
Args:
x (torch.Tensor): image features
shape (b, T, n1, D)
latent (torch.Tensor): latent features
shape (b, T, n2, D)
"""
x = self.norm_media(x)
latents = self.norm_latents(latents)
h = self.heads
q = self.to_q(latents)
kv_input = torch.cat((x, latents), dim=-2)
k, v = self.to_kv(kv_input).chunk(2, dim=-1)
q, k, v = rearrange_many((q, k, v), "b t n (h d) -> b h t n d", h=h)
q = q * self.scale
# attention
sim = einsum("... i d, ... j d -> ... i j", q, k)
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
out = einsum("... i j, ... j d -> ... i d", attn, v)
out = rearrange(out, "b h t n d -> b t n (h d)", h=h)
return self.to_out(out)
class PerceiverResampler(nn.Module):
def __init__(
self,
*,
dim,
depth=6,
dim_head=64,
heads=8,
num_latents=64,
max_num_media=None,
max_num_frames=None,
ff_mult=4,
):
super().__init__()
self.latents = nn.Parameter(torch.randn(num_latents, dim))
self.frame_embs = (
nn.Parameter(torch.randn(max_num_frames, dim))
if exists(max_num_frames)
else None
)
self.media_time_embs = (
nn.Parameter(torch.randn(max_num_media, 1, dim))
if exists(max_num_media)
else None
)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(
nn.ModuleList(
[
PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
FeedForward(dim=dim, mult=ff_mult),
]
)
)
self.norm = nn.LayerNorm(dim)
def forward(self, x):
"""
Args:
x (torch.Tensor): image features
shape (b, T, F, v, D)
Returns:
shape (b, T, n, D) where n is self.num_latents
"""
b, T, F, v = x.shape[:4]
# frame and media time embeddings
if exists(self.frame_embs):
frame_embs = repeat(self.frame_embs[:F], "F d -> b T F v d", b=b, T=T, v=v)
x = x + frame_embs
x = rearrange(
x, "b T F v d -> b T (F v) d"
) # flatten the frame and spatial dimensions
if exists(self.media_time_embs):
x = x + self.media_time_embs[:T]
# blocks
latents = repeat(self.latents, "n d -> b T n d", b=b, T=T)
for attn, ff in self.layers:
latents = attn(x, latents) + latents
latents = ff(latents) + latents
return self.norm(latents)
# gated cross attention
class MaskedCrossAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_visual,
dim_head=64,
heads=8,
only_attend_immediate_media=True,
):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim_visual, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
# whether for text to only attend to immediate preceding image, or all previous images
self.only_attend_immediate_media = only_attend_immediate_media
def forward(self, x, media, media_locations=None, attend_previous=True):
"""
Args:
x (torch.Tensor): text features
shape (B, T_txt, D_txt)
media (torch.Tensor): image features
shape (B, T_img, n, D_img) where n is the dim of the latents
media_locations: boolean mask identifying the media tokens in x
shape (B, T_txt)
attend_previous: bool
If false, ignores immediately preceding image and starts attending when following image
"""
_, T_img, n = media.shape[:3]
h = self.heads
x = self.norm(x)
q = self.to_q(x)
media = rearrange(media, "b t n d -> b (t n) d")
k, v = self.to_kv(media).chunk(2, dim=-1)
q, k, v = rearrange_many((q, k, v), "b n (h d) -> b h n d", h=h)
q = q * self.scale
sim = einsum("... i d, ... j d -> ... i j", q, k)
if exists(media_locations):
# at each boolean of True, increment the time counter (relative to media time)
text_time = media_locations.cumsum(dim=-1)
media_time = torch.arange(T_img, device=x.device) + 1
if not attend_previous:
text_time[~media_locations] += 1
# make sure max is still the number of images in the sequence
text_time[
text_time
> repeat(
torch.count_nonzero(media_locations, dim=1),
"b -> b i",
i=text_time.shape[1],
)
] = 0
# text time must equal media time if only attending to most immediate image
# otherwise, as long as text time is greater than media time (if attending to all previous images / media)
mask_op = torch.eq if self.only_attend_immediate_media else torch.ge
text_to_media_mask = mask_op(
rearrange(text_time, "b i -> b 1 i 1"),
repeat(media_time, "j -> 1 1 1 (j n)", n=n),
)
sim = sim.masked_fill(~text_to_media_mask, -torch.finfo(sim.dtype).max)
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
if exists(media_locations) and self.only_attend_immediate_media:
# any text without a preceding media needs to have attention zeroed out
text_without_media_mask = text_time == 0
text_without_media_mask = rearrange(
text_without_media_mask, "b i -> b 1 i 1"
)
attn = attn.masked_fill(text_without_media_mask, 0.0)
out = einsum("... i j, ... j d -> ... i d", attn, v)
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
class GatedCrossAttentionBlock(nn.Module):
def __init__(
self,
*,
dim,
dim_visual,
dim_head=64,
heads=8,
ff_mult=4,
only_attend_immediate_media=True,
):
super().__init__()
self.attn = MaskedCrossAttention(
dim=dim,
dim_visual=dim_visual,
dim_head=dim_head,
heads=heads,
only_attend_immediate_media=only_attend_immediate_media,
)
self.attn_gate = nn.Parameter(torch.tensor([0.0]))
self.ff = FeedForward(dim, mult=ff_mult)
self.ff_gate = nn.Parameter(torch.tensor([0.0]))
def forward(
self,
x,
media,
media_locations=None,
attend_previous=True,
):
x = (
self.attn(
x,
media,
media_locations=media_locations,
attend_previous=attend_previous,
)
* self.attn_gate.tanh()
+ x
)
x = self.ff(x) * self.ff_gate.tanh() + x
return x
| RadFM-main | src/Model/RadFM/helpers.py |
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import contextlib
import functools
import glob
import inspect
import math
import os
import random
import re
import shutil
import sys
import time
import warnings
from collections.abc import Mapping
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from tqdm.auto import tqdm
# Integrations must be imported before ML frameworks:
# isort: off
from transformers.integrations import (
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
# isort: on
import numpy as np
import torch
import torch.distributed as dist
from huggingface_hub import Repository, create_repo
from packaging import version
from torch import nn
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from transformers import __version__
from transformers.configuration_utils import PretrainedConfig
from transformers.data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from transformers.debug_utils import DebugOption, DebugUnderflowOverflow
from transformers.deepspeed import deepspeed_init, is_deepspeed_zero3_enabled
from transformers.dependency_versions_check import dep_version_check
from transformers.modelcard import TrainingSummary
from transformers.modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.optimization import Adafactor, get_scheduler
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_10, is_torch_less_than_1_11
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
distributed_broadcast_scalars,
distributed_concat,
find_batch_size,
get_model_param_count,
get_module_class_from_name,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalLoopOutput,
EvalPrediction,
FSDPOption,
HPSearchBackend,
HubStrategy,
IntervalStrategy,
PredictionOutput,
RemoveColumnsCollator,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
enable_full_determinism,
find_executable_batch_size,
get_last_checkpoint,
has_length,
number_of_arguments,
seed_worker,
set_seed,
speed_metrics,
)
from transformers.training_args import OptimizerNames, ParallelMode, TrainingArguments
from transformers.utils import (
CONFIG_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
can_return_loss,
find_labels,
get_full_repo_name,
is_accelerate_available,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_ipex_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_compile_available,
is_torch_neuroncore_available,
is_torch_tpu_available,
logging,
strtobool,
)
from transformers.utils.generic import ContextManagers
_is_native_cpu_amp_available = is_torch_greater_or_equal_than_1_10
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from transformers.utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if is_datasets_available():
import datasets
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
dep_version_check("fairscale")
import fairscale
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.nn.wrap import auto_wrap
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
from smdistributed.modelparallel import __version__ as SMP_VERSION
IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10")
from transformers.trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat
else:
IS_SAGEMAKER_MP_POST_1_10 = False
if is_safetensors_available():
import safetensors.torch
skip_first_batches = None
if is_accelerate_available():
from accelerate import __version__ as accelerate_version
if version.parse(accelerate_version) >= version.parse("0.16"):
from accelerate import skip_first_batches
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
# Name of the files used for checkpointing
TRAINING_ARGS_NAME = "training_args.bin"
TRAINER_STATE_NAME = "trainer_state.json"
OPTIMIZER_NAME = "optimizer.pt"
SCHEDULER_NAME = "scheduler.pt"
SCALER_NAME = "scaler.pt"
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*):
The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed.
<Tip>
[`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use
your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers
models.
</Tip>
args ([`TrainingArguments`], *optional*):
The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the
`output_dir` set to a directory named *tmp_trainer* in the current directory if not provided.
data_collator (`DataCollator`, *optional*):
The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will
default to [`default_data_collator`] if no `tokenizer` is provided, an instance of
[`DataCollatorWithPadding`] otherwise.
train_dataset (`torch.utils.data.Dataset` or `torch.utils.data.IterableDataset`, *optional*):
The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed.
Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a
distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a
`torch.Generator` for the randomization that must be identical on all processes (and the Trainer will
manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally
sets the seed of the RNGs used.
eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`]), *optional*):
The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each
dataset prepending the dictionary key to the metric name.
tokenizer ([`PreTrainedTokenizerBase`], *optional*):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs to the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (`Callable[[], PreTrainedModel]`, *optional*):
A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start
from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to
be able to choose different architectures according to hyper parameters (such as layer count, sizes of
inner layers, dropout probabilities etc).
compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*):
The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return
a dictionary string to metric values.
callbacks (List of [`TrainerCallback`], *optional*):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in [here](callback).
If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method.
optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model
and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`.
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*):
A function that preprocess the logits right before caching them at each evaluation step. Must take two
tensors, the logits and the labels, and return the logits once processed as desired. The modifications made
by this function will be reflected in the predictions received by `compute_metrics`.
Note that the labels (second parameter) will be `None` if the dataset does not have them.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`]
subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`,
the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner
model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to `False` if model parallel or deepspeed is used, or if the default
`TrainingArguments.place_model_on_device` is overridden to return `False` .
- **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while
in `train`)
"""
from transformers.trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Optional[Callable[[], PreTrainedModel]] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# set the correct log level depending on the node
log_level = args.get_process_log_level()
logging.set_verbosity(log_level)
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will"
" overwrite your model when calling the `train` method. This will become a fatal error in the next"
" release.",
FutureWarning,
)
self.model_init = model_init
if model.__class__.__name__ in MODEL_MAPPING_NAMES:
raise ValueError(
f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only "
"computes hidden states and does not accept any labels. You should choose a model with a head "
"suitable for your task like any of the `AutoModelForXxx` listed at "
"https://huggingface.co/docs/transformers/model_doc/auto."
)
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
if (
getattr(model, "hf_device_map", None) is not None
and len([device for device in set(model.hf_device_map.values()) if device not in ["cpu", "disk"]]) > 1
and not self.is_model_parallel
):
self.is_model_parallel = True
# warn users
logger.info(
"You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set"
" to `True` to avoid any unexpected behavior such as device placement mismatching."
)
# At this stage the model is already loaded
if getattr(model, "is_loaded_in_8bit", False):
if getattr(model, "_is_int8_training_enabled", False):
logger.info(
"The model is loaded in 8-bit precision. To train this model you need to add additional modules"
" inside the model such as adapters using `peft` library and freeze the model weights. Please"
" check "
" the examples in https://github.com/huggingface/peft for more details."
)
else:
raise ValueError(
"The model you want to train is loaded in 8-bit precision. if you want to fine-tune an 8-bit"
" model, please make sure that you have installed `bitsandbytes>=0.37.0`. "
)
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if len(args.fsdp) > 0:
raise ValueError(
"Using --sharded_ddp xxx together with --fsdp is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
self.fsdp = None
if len(args.fsdp) > 0:
if args.deepspeed:
raise ValueError(
"Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if not args.fsdp_config["xla"] and args.local_rank == -1:
raise ValueError("Using fsdp only works in distributed training.")
# dep_version_check("torch>=1.12.0")
# Would have to update setup.py with torch>=1.12.0
# which isn't ideally given that it will force people not using FSDP to also use torch>=1.12.0
# below is the current alternative.
if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.12.0"):
raise ValueError("FSDP requires PyTorch >= 1.12.0")
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, ShardingStrategy
if FSDPOption.FULL_SHARD in args.fsdp:
self.fsdp = ShardingStrategy.FULL_SHARD
elif FSDPOption.SHARD_GRAD_OP in args.fsdp:
self.fsdp = ShardingStrategy.SHARD_GRAD_OP
elif FSDPOption.NO_SHARD in args.fsdp:
self.fsdp = ShardingStrategy.NO_SHARD
self.backward_prefetch = BackwardPrefetch.BACKWARD_PRE
if "backward_prefetch" in self.args.fsdp_config and "backward_pos" not in self.backward_prefetch:
self.backward_prefetch = BackwardPrefetch.BACKWARD_POST
self.forward_prefetch = False
if self.args.fsdp_config.get("forward_prefect", False):
self.forward_prefetch = True
self.limit_all_gathers = False
if self.args.fsdp_config.get("limit_all_gathers", False):
self.limit_all_gathers = True
# one place to sort out whether to place the model on device or not
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
# 3. full bf16 or fp16 eval - since the model needs to be cast to the right dtype first
# 4. Sharded DDP - same as MP
# 5. FSDP - same as MP
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or args.deepspeed
or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
or (self.fsdp is not None)
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
if self.place_model_on_device and not getattr(model, "is_loaded_in_8bit", False):
self._move_model_to_device(model, args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.preprocess_logits_for_metrics = preprocess_logits_for_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument. "
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
if is_torch_tpu_available() and self.optimizer is not None:
for param in self.model.parameters():
model_device = param.device
break
for param_group in self.optimizer.param_groups:
if len(param_group["params"]) > 0:
optimizer_device = param_group["params"][0].device
break
if model_device != optimizer_device:
raise ValueError(
"The model and the optimizer parameters are not on the same device, which probably means you"
" created an optimizer around your model **before** putting on the device and passing it to the"
" `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and"
" `model.to(xm.xla_device())` is performed before the optimizer creation in your script."
)
if ((self.sharded_ddp is not None) or args.deepspeed or (self.fsdp is not None)) and (
self.optimizer is not None or self.lr_scheduler is not None
):
raise RuntimeError(
"Passing `optimizers` is not allowed if Fairscale, Deepspeed or PyTorch FSDP is enabled."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create clone of distant repo and output directory if needed
if self.args.push_to_hub:
self.init_git_repo(at_init=True)
# In case of pull, we need to make sure every process has the latest.
if is_torch_tpu_available():
xm.rendezvous("init git repo")
elif args.local_rank != -1:
dist.barrier()
if self.args.should_save:
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0:
raise ValueError(
"The train_dataset does not implement __len__, max_steps has to be specified. "
"The number of steps needs to be known in advance for the learning rate scheduler."
)
if (
train_dataset is not None
and isinstance(train_dataset, torch.utils.data.IterableDataset)
and args.group_by_length
):
raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset")
self._signature_columns = None
# Mixed precision setup
self.use_apex = False
self.use_cuda_amp = False
self.use_cpu_amp = False
# Mixed precision setup for SageMaker Model Parallel
if is_sagemaker_mp_enabled():
# BF16 + model parallelism in SageMaker: currently not supported, raise an error
if args.bf16:
raise ValueError("SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ")
if IS_SAGEMAKER_MP_POST_1_10:
# When there's mismatch between SMP config and trainer argument, use SMP config as truth
if args.fp16 != smp.state.cfg.fp16:
logger.warning(
f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16},"
f"but FP16 provided in trainer argument is {args.fp16},"
f"setting to {smp.state.cfg.fp16}"
)
args.fp16 = smp.state.cfg.fp16
else:
# smp < 1.10 does not support fp16 in trainer.
if hasattr(smp.state.cfg, "fp16"):
logger.warning(
f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, "
"but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer."
)
if args.fp16 or args.bf16:
if args.half_precision_backend == "auto":
if args.device == torch.device("cpu"):
if args.fp16:
raise ValueError("Tried to use `fp16` but it is not supported on cpu")
elif _is_native_cpu_amp_available:
args.half_precision_backend = "cpu_amp"
else:
raise ValueError("Tried to use cpu amp but native cpu amp is not available")
else:
args.half_precision_backend = "cuda_amp"
logger.info(f"Using {args.half_precision_backend} half precision backend")
self.do_grad_scaling = False
if (args.fp16 or args.bf16) and not (args.deepspeed or is_sagemaker_mp_enabled()):
# deepspeed and SageMaker Model Parallel manage their own half precision
if args.half_precision_backend == "cuda_amp":
self.use_cuda_amp = True
self.amp_dtype = torch.float16 if args.fp16 else torch.bfloat16
# bf16 does not need grad scaling
self.do_grad_scaling = self.amp_dtype == torch.float16
if self.do_grad_scaling:
if self.sharded_ddp is not None:
self.scaler = ShardedGradScaler()
elif self.fsdp is not None:
from torch.distributed.fsdp.sharded_grad_scaler import (
ShardedGradScaler as FSDPShardedGradScaler,
)
self.scaler = FSDPShardedGradScaler()
elif is_torch_tpu_available():
from torch_xla.amp import GradScaler
self.scaler = GradScaler()
else:
self.scaler = torch.cuda.amp.GradScaler()
elif args.half_precision_backend == "cpu_amp":
self.use_cpu_amp = True
self.amp_dtype = torch.bfloat16
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to"
" https://www.github.com/nvidia/apex."
)
self.use_apex = True
# FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error.
if (
is_sagemaker_mp_enabled()
and self.use_cuda_amp
and args.max_grad_norm is not None
and args.max_grad_norm > 0
):
raise ValueError(
"SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass "
"along 'max_grad_norm': 0 in your hyperparameters."
)
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState(
is_local_process_zero=self.is_local_process_zero(),
is_world_process_zero=self.is_world_process_zero(),
)
self.control = TrainerControl()
# Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then
# returned to 0 every time flos need to be logged
self.current_flos = 0
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = find_labels(self.model.__class__)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.can_return_loss = can_return_loss(self.model.__class__)
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# Internal variables to keep track of the original batch size
self._train_batch_size = args.train_batch_size
# very last
self._memory_tracker.stop_and_update_metrics()
# torch.compile
if args.torch_compile and not is_torch_compile_available():
raise RuntimeError("Using torch.compile requires PyTorch 2.0 or higher.")
def add_callback(self, callback):
"""
Add a callback to the current list of [`~transformer.TrainerCallback`].
Args:
callback (`type` or [`~transformer.TrainerCallback`]):
A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of [`~transformer.TrainerCallback`] and returns it.
If the callback is not found, returns `None` (and no error is raised).
Args:
callback (`type` or [`~transformer.TrainerCallback`]):
A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
first case, will pop the first member of that class found in the list of callbacks.
Returns:
[`~transformer.TrainerCallback`]: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of [`~transformer.TrainerCallback`].
Args:
callback (`type` or [`~transformer.TrainerCallback`]):
A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _move_model_to_device(self, model, device):
model = model.to(device)
# Moving a model to an XLA device disconnects the tied weights, so we have to retie them.
if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"):
model.tie_weights()
def _set_signature_columns_if_needed(self):
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += list(set(["label", "label_ids"] + self.label_names))
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return dataset
self._set_signature_columns_if_needed()
signature_columns = self._signature_columns
ignored_columns = list(set(dataset.column_names) - set(signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set"
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
f" If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, "
" you can safely ignore this message."
)
columns = [k for k in signature_columns if k in dataset.column_names]
if version.parse(datasets.__version__) < version.parse("1.4.0"):
dataset.set_format(
type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"]
)
return dataset
else:
return dataset.remove_columns(ignored_columns)
def _get_collator_with_removed_columns(
self, data_collator: Callable, description: Optional[str] = None
) -> Callable:
"""Wrap the data collator in a callable removing unused columns."""
if not self.args.remove_unused_columns:
return data_collator
self._set_signature_columns_if_needed()
signature_columns = self._signature_columns
remove_columns_collator = RemoveColumnsCollator(
data_collator=data_collator,
signature_columns=signature_columns,
logger=logger,
description=description,
model_name=self.model.__class__.__name__,
)
return remove_columns_collator
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
if self.train_dataset is None or not has_length(self.train_dataset):
return None
generator = None
if self.args.world_size <= 1:
generator = torch.Generator()
# for backwards compatibility, we generate a seed here (which is sampled from a generator seeded with
# `args.seed`) if data_seed isn't provided.
# Further on in this method, we default to `args.seed` instead.
if self.args.data_seed is None:
seed = int(torch.empty((), dtype=torch.int64).random_().item())
else:
seed = self.args.data_seed
generator.manual_seed(seed)
seed = self.args.data_seed if self.args.data_seed is not None else self.args.seed
# Build the sampler.
if self.args.group_by_length:
if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):
lengths = (
self.train_dataset[self.args.length_column_name]
if self.args.length_column_name in self.train_dataset.column_names
else None
)
else:
lengths = None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.args.train_batch_size * self.args.gradient_accumulation_steps,
dataset=self.train_dataset,
lengths=lengths,
model_input_name=model_input_name,
generator=generator,
)
else:
return DistributedLengthGroupedSampler(
self.args.train_batch_size * self.args.gradient_accumulation_steps,
dataset=self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=lengths,
model_input_name=model_input_name,
seed=seed,
)
else:
if self.args.world_size <= 1:
return RandomSampler(self.train_dataset, generator=generator)
elif (
self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]
and not self.args.dataloader_drop_last
):
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=seed,
)
else:
return DistributedSampler(
self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=seed,
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training [`~torch.utils.data.DataLoader`].
Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed
training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_dataset = self.train_dataset
data_collator = self.data_collator
if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
train_dataset = self._remove_unused_columns(train_dataset, description="training")
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description="training")
if isinstance(train_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
train_dataset = IterableDatasetShard(
train_dataset,
batch_size=self._train_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
train_dataset,
batch_size=self._train_batch_size,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
### 吴超逸加 ###
if self.args.data_sampler != None:
#print(self.args.world_size,self.args.process_index,self.args.batch_size_2D,self.args.batch_size_3D)
train_sampler = self.args.data_sampler(
dataset=self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
batch_size_2D = self.args.batch_size_2D,
batch_size_3D = self.args.batch_size_3D,
)
return DataLoader(
train_dataset,
batch_sampler=train_sampler,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
worker_init_fn=seed_worker,
)
else:
### 吴超逸加 ###
train_sampler = self._get_train_sampler()
return DataLoader(
train_dataset,
batch_size=self._train_batch_size,
sampler=train_sampler,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
worker_init_fn=seed_worker,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]:
# Deprecated code
if self.args.use_legacy_prediction_loop:
if is_torch_tpu_available():
return SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif is_sagemaker_mp_enabled():
return SequentialDistributedSampler(
eval_dataset,
num_replicas=smp.dp_size(),
rank=smp.dp_rank(),
batch_size=self.args.per_device_eval_batch_size,
)
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
if self.args.world_size <= 1:
return SequentialSampler(eval_dataset)
else:
return ShardSampler(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation [`~torch.utils.data.DataLoader`].
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (`torch.utils.data.Dataset`, *optional*):
If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
by the `model.forward()` method are automatically removed. It must implement `__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
data_collator = self.data_collator
if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation")
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description="evaluation")
if isinstance(eval_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
eval_dataset = IterableDatasetShard(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
eval_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test [`~torch.utils.data.DataLoader`].
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (`torch.utils.data.Dataset`, *optional*):
The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed. It must implement `__len__`.
"""
data_collator = self.data_collator
if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
test_dataset = self._remove_unused_columns(test_dataset, description="test")
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description="test")
if isinstance(test_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
test_dataset = IterableDatasetShard(
test_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
test_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or
`create_scheduler`) in a subclass.
"""
self.create_optimizer()
if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16:
# If smp >= 1.10 and fp16 is enabled, we unwrap the optimizer
optimizer = self.optimizer.optimizer
else:
optimizer = self.optimizer
self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through `optimizers`, or subclass and override this method in a subclass.
"""
opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
if self.optimizer is None:
decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
]
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if optimizer_cls.__name__ == "Adam8bit":
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
skipped = 0
for module in opt_model.modules():
if isinstance(module, nn.Embedding):
skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
print(f"skipped {module}: {skipped/2**20}M params")
manager.register_module_override(module, "weight", {"optim_bits": 32})
logger.debug(f"bitsandbytes: will optimize {module} in fp32")
print(f"skipped: {skipped/2**20}M params")
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
return self.optimizer
@staticmethod
def get_optimizer_cls_and_kwargs(args: TrainingArguments) -> Tuple[Any, Any]:
"""
Returns the optimizer class and optimizer parameters based on the training arguments.
Args:
args (`transformers.training_args.TrainingArguments`):
The training arguments for the training session.
"""
# parse args.optim_args
optim_args = {}
if args.optim_args:
for mapping in args.optim_args.replace(" ", "").split(","):
key, value = mapping.split("=")
optim_args[key] = value
optimizer_kwargs = {"lr": args.learning_rate}
adam_kwargs = {
"betas": (args.adam_beta1, args.adam_beta2),
"eps": args.adam_epsilon,
}
if args.optim == OptimizerNames.ADAFACTOR:
optimizer_cls = Adafactor
optimizer_kwargs.update({"scale_parameter": False, "relative_step": False})
elif args.optim == OptimizerNames.ADAMW_HF:
from .optimization import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]:
from torch.optim import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
if args.optim == OptimizerNames.ADAMW_TORCH_FUSED:
optimizer_kwargs.update({"fused": True})
elif args.optim == OptimizerNames.ADAMW_TORCH_XLA:
try:
from torch_xla.amp.syncfree import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.")
elif args.optim == OptimizerNames.ADAMW_APEX_FUSED:
try:
from apex.optimizers import FusedAdam
optimizer_cls = FusedAdam
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!")
elif args.optim == OptimizerNames.ADAMW_BNB:
try:
from bitsandbytes.optim import Adam8bit
optimizer_cls = Adam8bit
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer tried to instantiate bnb Adam8bit but bnb is not installed!")
elif args.optim == OptimizerNames.ADAMW_ANYPRECISION:
try:
from torchdistx.optimizers import AnyPrecisionAdamW
optimizer_cls = AnyPrecisionAdamW
optimizer_kwargs.update(adam_kwargs)
# TODO Change dtypes back to M=FP32, Var = BF16, Kahan = False once they can be cast together in torchdistx.
optimizer_kwargs.update(
{
"use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")),
"momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")),
"variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")),
"compensation_buffer_dtype": getattr(
torch, optim_args.get("compensation_buffer_dtype", "bfloat16")
),
}
)
except ImportError:
raise ValueError("Please install https://github.com/pytorch/torchdistx")
elif args.optim == OptimizerNames.SGD:
optimizer_cls = torch.optim.SGD
elif args.optim == OptimizerNames.ADAGRAD:
optimizer_cls = torch.optim.Adagrad
else:
raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}")
return optimizer_cls, optimizer_kwargs
def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
"""
Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
passed as an argument.
Args:
num_training_steps (int): The number of training steps to do.
"""
if self.lr_scheduler is None:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
optimizer=self.optimizer if optimizer is None else optimizer,
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
)
return self.lr_scheduler
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When
dataloader.dataset does not exist or has no length, estimates as best it can
"""
try:
dataset = dataloader.dataset
# Special case for IterableDatasetShard, we need to dig deeper
if isinstance(dataset, IterableDatasetShard):
return len(dataloader.dataset.dataset)
return len(dataloader.dataset)
except (NameError, AttributeError, TypeError): # no dataset or length, estimate by length of dataloader
return len(dataloader) * self.args.per_device_train_batch_size
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
"""HP search setup code"""
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
if self.hp_search_backend == HPSearchBackend.OPTUNA:
params = self.hp_space(trial)
elif self.hp_search_backend == HPSearchBackend.RAY:
params = trial
params.pop("wandb", None)
elif self.hp_search_backend == HPSearchBackend.SIGOPT:
params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()}
elif self.hp_search_backend == HPSearchBackend.WANDB:
params = trial
for key, value in params.items():
if not hasattr(self.args, key):
logger.warning(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in"
" `TrainingArguments`."
)
continue
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info(f"Trial: {trial.params}")
if self.hp_search_backend == HPSearchBackend.SIGOPT:
logger.info(f"SigOpt Assignments: {trial.assignments}")
if self.hp_search_backend == HPSearchBackend.WANDB:
logger.info(f"W&B Sweep parameters: {trial}")
if self.args.deepspeed:
# Rebuild the deepspeed config to reflect the updated training parameters
from transformers.deepspeed import HfTrainerDeepSpeedConfig
self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed)
self.args.hf_deepspeed_config.trainer_config_process(self.args)
def _report_to_hp_search(self, trial: Union["optuna.Trial", Dict[str, Any]], step: int, metrics: Dict[str, float]):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, step)
if trial.should_prune():
self.callback_handler.on_train_end(self.args, self.state, self.control)
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir, _internal_call=True)
if self.args.should_save:
self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
def call_model_init(self, trial=None):
model_init_argcount = number_of_arguments(self.model_init)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def torch_jit_model_eval(self, model, dataloader, training=False):
if not training:
if dataloader is None:
logger.warning("failed to use PyTorch jit mode due to current dataloader is none.")
return model
example_batch = next(iter(dataloader))
example_batch = self._prepare_inputs(example_batch)
try:
jit_model = model.eval()
with ContextManagers([self.autocast_smart_context_manager(cache_enabled=False), torch.no_grad()]):
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.14.0"):
if isinstance(example_batch, dict):
jit_model = torch.jit.trace(jit_model, example_kwarg_inputs=example_batch, strict=False)
else:
jit_model = torch.jit.trace(
jit_model,
example_kwarg_inputs={key: example_batch[key] for key in example_batch},
strict=False,
)
else:
jit_inputs = []
for key in example_batch:
example_tensor = torch.ones_like(example_batch[key])
jit_inputs.append(example_tensor)
jit_inputs = tuple(jit_inputs)
jit_model = torch.jit.trace(jit_model, jit_inputs, strict=False)
jit_model = torch.jit.freeze(jit_model)
with torch.no_grad():
jit_model(**example_batch)
jit_model(**example_batch)
model = jit_model
self.use_cpu_amp = False
self.use_cuda_amp = False
except (RuntimeError, TypeError, ValueError, NameError, IndexError) as e:
logger.warning(f"failed to use PyTorch jit mode due to: {e}.")
return model
def ipex_optimize_model(self, model, training=False, dtype=torch.float32):
if not is_ipex_available():
raise ImportError(
"Using IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer"
" to https://github.com/intel/intel-extension-for-pytorch."
)
import intel_extension_for_pytorch as ipex
if not training:
model.eval()
dtype = torch.bfloat16 if not self.is_in_train and self.args.bf16_full_eval else dtype
# conv_bn_folding is disabled as it fails in symbolic tracing, resulting in ipex warnings
model = ipex.optimize(model, dtype=dtype, level="O1", conv_bn_folding=False, inplace=not self.is_in_train)
else:
if not model.training:
model.train()
model, self.optimizer = ipex.optimize(
model, dtype=dtype, optimizer=self.optimizer, inplace=True, level="O1"
)
return model
def _wrap_model(self, model, training=True, dataloader=None):
if self.args.use_ipex:
dtype = torch.bfloat16 if self.use_cpu_amp else torch.float32
model = self.ipex_optimize_model(model, training, dtype=dtype)
if is_sagemaker_mp_enabled():
# Wrapping the base model twice in a DistributedModel will raise an error.
if isinstance(self.model_wrapped, smp.model.DistributedModel):
return self.model_wrapped
return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization) / 8bit models does not support DDP
if self.args.n_gpu > 1 and not getattr(model, "is_loaded_in_8bit", False):
model = nn.DataParallel(model)
if self.args.jit_mode_eval:
start_time = time.time()
model = self.torch_jit_model_eval(model, dataloader, training)
self.jit_compilation_time = round(time.time() - start_time, 4)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16 or self.args.bf16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
# Distributed training using PyTorch FSDP
elif self.fsdp is not None:
if not self.args.fsdp_config["xla"]:
# PyTorch FSDP!
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, MixedPrecision
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
if FSDPOption.OFFLOAD in self.args.fsdp:
cpu_offload = CPUOffload(offload_params=True)
else:
cpu_offload = CPUOffload(offload_params=False)
auto_wrap_policy = None
if FSDPOption.AUTO_WRAP in self.args.fsdp:
if self.args.fsdp_config["fsdp_min_num_params"] > 0:
auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["fsdp_min_num_params"]
)
elif self.args.fsdp_config.get("fsdp_transformer_layer_cls_to_wrap", None) is not None:
transformer_cls_to_wrap = set()
for layer_class in self.args.fsdp_config["fsdp_transformer_layer_cls_to_wrap"]:
transformer_cls = get_module_class_from_name(model, layer_class)
if transformer_cls is None:
raise Exception("Could not find the transformer layer class to wrap in the model.")
else:
transformer_cls_to_wrap.add(transformer_cls)
auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
# Transformer layer class to wrap
transformer_layer_cls=transformer_cls_to_wrap,
)
mixed_precision_policy = None
dtype = None
if self.args.fp16:
dtype = torch.float16
elif self.args.bf16:
dtype = torch.bfloat16
if dtype is not None:
mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype)
if type(model) != FSDP:
# XXX: Breaking the self.model convention but I see no way around it for now.
signature = inspect.signature(FSDP.__init__).parameters.keys()
kwargs = {}
for arg in ["limit_all_gathers", "forward_prefetch", "backward_prefetch"]:
if arg in signature:
kwargs[arg] = getattr(self, arg)
self.model = model = FSDP(
model,
sharding_strategy=self.fsdp,
cpu_offload=cpu_offload,
auto_wrap_policy=auto_wrap_policy,
mixed_precision=mixed_precision_policy,
device_id=self.args.device,
**kwargs,
)
else:
try:
from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP
from torch_xla.distributed.fsdp import checkpoint_module
from torch_xla.distributed.fsdp.wrap import (
size_based_auto_wrap_policy,
transformer_auto_wrap_policy,
)
except ImportError:
raise ImportError("Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.")
auto_wrap_policy = None
auto_wrapper_callable = None
if self.args.fsdp_config["fsdp_min_num_params"] > 0:
auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["fsdp_min_num_params"]
)
elif self.args.fsdp_config.get("fsdp_transformer_layer_cls_to_wrap", None) is not None:
transformer_cls_to_wrap = set()
for layer_class in self.args.fsdp_config["fsdp_transformer_layer_cls_to_wrap"]:
transformer_cls = get_module_class_from_name(model, layer_class)
if transformer_cls is None:
raise Exception("Could not find the transformer layer class to wrap in the model.")
else:
transformer_cls_to_wrap.add(transformer_cls)
auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
# Transformer layer class to wrap
transformer_layer_cls=transformer_cls_to_wrap,
)
fsdp_kwargs = self.args.xla_fsdp_config
if self.args.fsdp_config["xla_fsdp_grad_ckpt"]:
# Apply gradient checkpointing to auto-wrapped sub-modules if specified
def auto_wrapper_callable(m, *args, **kwargs):
return FSDP(checkpoint_module(m), *args, **kwargs)
# Wrap the base model with an outer FSDP wrapper
self.model = model = FSDP(
model,
auto_wrap_policy=auto_wrap_policy,
auto_wrapper_callable=auto_wrapper_callable,
**fsdp_kwargs,
)
# Patch `xm.optimizer_step` should not reduce gradients in this case,
# as FSDP does not need gradient reduction over sharded parameters.
def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}):
loss = optimizer.step(**optimizer_args)
if barrier:
xm.mark_step()
return loss
xm.optimizer_step = patched_optimizer_step
elif is_sagemaker_dp_enabled():
model = nn.parallel.DistributedDataParallel(
model, device_ids=[int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))]
)
elif self.args.local_rank != -1:
kwargs = {}
if self.args.ddp_find_unused_parameters is not None:
kwargs["find_unused_parameters"] = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
kwargs["find_unused_parameters"] = not model.is_gradient_checkpointing
else:
kwargs["find_unused_parameters"] = True
if self.args.ddp_bucket_cap_mb is not None:
kwargs["bucket_cap_mb"] = self.args.ddp_bucket_cap_mb
if is_torch_neuroncore_available():
return model
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank] if self.args._n_gpu != 0 else None,
output_device=self.args.local_rank if self.args._n_gpu != 0 else None,
**kwargs,
)
# torch.compile() needs to be called after wrapping the model with FSDP or DDP
# to ensure that it accounts for the graph breaks required by those wrappers
if self.args.torch_compile:
model = torch.compile(model, backend=self.args.torch_compile_backend, mode=self.args.torch_compile_mode)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
ignore_keys_for_eval: Optional[List[str]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (`str` or `bool`, *optional*):
If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a
`bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance
of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here.
trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):
The trial run or the hyperparameter dictionary for hyperparameter search.
ignore_keys_for_eval (`List[str]`, *optional*)
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions for evaluation during the training.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
if resume_from_checkpoint is False:
resume_from_checkpoint = None
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
self.is_in_train = True
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train:
self._move_model_to_device(self.model, args.device)
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
self._train_batch_size = self.args.train_batch_size
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None and not is_sagemaker_mp_enabled() and args.deepspeed is None:
self._load_from_checkpoint(resume_from_checkpoint)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self._move_model_to_device(self.model, args.device)
self.model_wrapped = self.model
inner_training_loop = find_executable_batch_size(
self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size
)
return inner_training_loop(
args=args,
resume_from_checkpoint=resume_from_checkpoint,
trial=trial,
ignore_keys_for_eval=ignore_keys_for_eval,
)
def _inner_training_loop(
self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None
):
self._train_batch_size = batch_size
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
len_dataloader = None
if has_length(train_dataloader):
len_dataloader = len(train_dataloader)
num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
num_examples = self.num_examples(train_dataloader)
if args.max_steps > 0:
max_steps = args.max_steps
num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
args.max_steps % num_update_steps_per_epoch > 0
)
# May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's
# the best we can do.
num_train_samples = args.max_steps * total_train_batch_size
else:
max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(args.num_train_epochs)
num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs
elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size
max_steps = args.max_steps
# Setting a very large number of epochs so we go as many times as necessary over the iterator.
num_train_epochs = sys.maxsize
num_update_steps_per_epoch = max_steps
num_examples = total_train_batch_size * args.max_steps
num_train_samples = args.max_steps * total_train_batch_size
else:
raise ValueError(
"args.max_steps must be set to a positive value if dataloader does not have a length, was"
f" {args.max_steps}"
)
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
if self.args.n_gpu > 1:
# nn.DataParallel(model) replicates the model, creating new variables and module
# references registered here no longer work on other gpus, breaking the module
raise ValueError(
"Currently --debug underflow_overflow is not supported under DP. Please use DDP"
" (torch.distributed.launch)."
)
else:
debug_overflow = DebugUnderflowOverflow(self.model) # noqa
delay_optimizer_creation = (
self.sharded_ddp is not None
and self.sharded_ddp != ShardedDDPOption.SIMPLE
or is_sagemaker_mp_enabled()
or self.fsdp is not None
)
if args.deepspeed:
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Activate gradient checkpointing if needed
if args.gradient_checkpointing:
self.model.gradient_checkpointing_enable()
model = self._wrap_model(self.model_wrapped)
if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None:
self._load_from_checkpoint(resume_from_checkpoint, model)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples:,}")
logger.info(f" Num Epochs = {num_train_epochs:,}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size:,}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size:,}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps:,}")
logger.info(f" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
steps_trained_progress_bar = None
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not args.ignore_data_skip:
if skip_first_batches is None:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first"
f" {steps_trained_in_current_epoch} batches in the first epoch. If this takes a lot of time,"
" you can install the latest version of Accelerate with `pip install -U accelerate`.You can"
" also add the `--ignore_data_skip` flag to your launch command, but you will resume the"
" training on data already seen by your model."
)
else:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first"
f" {steps_trained_in_current_epoch} batches in the first epoch."
)
if self.is_local_process_zero() and not args.disable_tqdm and skip_first_batches is None:
steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)
steps_trained_progress_bar.set_description("Skipping the first batches")
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
if self.hp_name is not None and self._trial is not None:
# use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial
# parameter to Train when using DDP.
self.state.trial_name = self.hp_name(self._trial)
if trial is not None:
assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial
self.state.trial_params = hp_params(assignments)
else:
self.state.trial_params = None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
model.zero_grad()
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not args.ignore_data_skip:
for epoch in range(epochs_trained):
is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance(
train_dataloader.sampler, RandomSampler
)
if is_torch_less_than_1_11 or not is_random_sampler:
# We just need to begin an iteration to create the randomization of the sampler.
# That was before PyTorch 1.11 however...
for _ in train_dataloader:
break
else:
# Otherwise we need to call the whooooole sampler cause there is some random operation added
# AT THE VERY END!
_ = list(train_dataloader.sampler)
total_batched_samples = 0
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
elif hasattr(train_dataloader, "dataset") and isinstance(train_dataloader.dataset, IterableDatasetShard):
train_dataloader.dataset.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator)
if len_dataloader is not None
else args.max_steps * args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
rng_to_sync = False
steps_skipped = 0
if skip_first_batches is not None and steps_trained_in_current_epoch > 0:
epoch_iterator = skip_first_batches(epoch_iterator, steps_trained_in_current_epoch)
steps_skipped = steps_trained_in_current_epoch
steps_trained_in_current_epoch = 0
rng_to_sync = True
step = -1
for step, inputs in enumerate(epoch_iterator):
total_batched_samples += 1
if rng_to_sync:
self._load_rng_state(resume_from_checkpoint)
rng_to_sync = False
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
if steps_trained_progress_bar is not None:
steps_trained_progress_bar.update(1)
if steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
continue
elif steps_trained_progress_bar is not None:
steps_trained_progress_bar.close()
steps_trained_progress_bar = None
if step % args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
if (
(total_batched_samples % args.gradient_accumulation_steps != 0)
and args.local_rank != -1
and args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss_step = self.training_step(model, inputs)
else:
tr_loss_step = self.training_step(model, inputs)
if (
args.logging_nan_inf_filter
and not is_torch_tpu_available()
and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))
):
# if loss is nan or inf simply add the average of previous logged losses
tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)
else:
tr_loss += tr_loss_step
self.current_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if total_batched_samples % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.do_grad_scaling:
# Reduce gradients first for XLA
if is_torch_tpu_available():
gradients = xm._fetch_gradients(self.optimizer)
xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size())
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if is_sagemaker_mp_enabled() and args.fp16:
self.optimizer.clip_master_grads(args.max_grad_norm)
elif hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
args.max_grad_norm,
)
# Optimizer step
optimizer_was_run = True
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
if self.do_grad_scaling:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
xm.optimizer_step(self.optimizer)
elif self.do_grad_scaling:
scale_before = self.scaler.get_scale()
self.scaler.step(self.optimizer)
self.scaler.update()
scale_after = self.scaler.get_scale()
optimizer_was_run = scale_before <= scale_after
else:
self.optimizer.step()
if optimizer_was_run and not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1 + steps_skipped) / steps_in_epoch
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
else:
self.control = self.callback_handler.on_substep_end(args, self.state, self.control)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
if step < 0:
logger.warning(
"There seems to be not a single sample in your epoch_iterator, stopping training at step"
f" {self.state.global_step}! This is expected if you're using an IterableDataset and set"
f" num_steps ({max_steps}) higher than the number of available samples."
)
self.control.should_training_stop = True
self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif args.local_rank != -1:
dist.barrier()
elif is_sagemaker_mp_enabled():
smp.barrier()
self._load_best_model()
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
train_loss = self._total_loss_scalar / self.state.global_step
metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)
self.store_flos()
metrics["total_flos"] = self.state.total_flos
metrics["train_loss"] = train_loss
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
self.log(metrics)
run_dir = self._get_output_dir(trial)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir)
# Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint and process allowed to save.
if self.args.should_save and self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1:
for checkpoint in checkpoints_sorted:
if checkpoint != self.state.best_model_checkpoint:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint)
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
return TrainOutput(self.state.global_step, train_loss, metrics)
def _get_output_dir(self, trial):
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
run_id = tune.get_trial_id()
elif self.hp_search_backend == HPSearchBackend.SIGOPT:
run_id = trial.id
elif self.hp_search_backend == HPSearchBackend.WANDB:
import wandb
run_id = wandb.run.id
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
return run_dir
def _load_from_checkpoint(self, resume_from_checkpoint, model=None):
if model is None:
model = self.model
config_file = os.path.join(resume_from_checkpoint, CONFIG_NAME)
weights_file = os.path.join(resume_from_checkpoint, WEIGHTS_NAME)
weights_index_file = os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME)
safe_weights_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_NAME)
safe_weights_index_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_INDEX_NAME)
if not any(
[os.path.isfile(f) for f in [weights_file, safe_weights_file, weights_index_file, safe_weights_index_file]]
):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
logger.info(f"Loading model from {resume_from_checkpoint}.")
if os.path.isfile(config_file):
config = PretrainedConfig.from_json_file(config_file)
checkpoint_version = config.transformers_version
if checkpoint_version is not None and checkpoint_version != __version__:
logger.warning(
f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
f"Transformers but your current version is {__version__}. This is not recommended and could "
"yield to errors or unwanted behaviors."
)
if os.path.isfile(weights_file) or os.path.isfile(safe_weights_file):
# If the model is on the GPU, it still works!
if is_sagemaker_mp_enabled():
if os.path.isfile(os.path.join(resume_from_checkpoint, "user_content.pt")):
# If the 'user_content.pt' file exists, load with the new smp api.
# Checkpoint must have been saved with the new smp api.
smp.resume_from_checkpoint(
path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False
)
else:
# If the 'user_content.pt' file does NOT exist, load with the old smp api.
# Checkpoint must have been saved with the old smp api.
if hasattr(self.args, "fp16") and self.args.fp16 is True:
logger.warning(
"Enabling FP16 and loading from smp < 1.10 checkpoint together is not suppported."
)
state_dict = torch.load(weights_file, map_location="cpu")
# Required for smp to not auto-translate state_dict from hf to smp (is already smp).
state_dict["_smp_is_partial"] = False
load_result = model.load_state_dict(state_dict, strict=True)
# release memory
del state_dict
else:
# We load the model state dict on the CPU to avoid an OOM error.
if self.args.save_safetensors and os.path.isfile(safe_weights_file):
state_dict = safetensors.torch.load_file(safe_weights_file, device="cpu")
else:
state_dict = torch.load(weights_file, map_location="cpu")
# workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963
# which takes *args instead of **kwargs
load_result = model.load_state_dict(state_dict, False)
# release memory
del state_dict
self._issue_warnings_after_load(load_result)
else:
# We load the sharded checkpoint
load_result = load_sharded_checkpoint(
model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled(), prefer_safe=self.args.save_safetensors
)
if not is_sagemaker_mp_enabled():
self._issue_warnings_after_load(load_result)
def _load_best_model(self):
logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).")
best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)
best_safe_model_path = os.path.join(self.state.best_model_checkpoint, SAFE_WEIGHTS_NAME)
model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
if os.path.exists(best_model_path) or os.path.exists(best_safe_model_path):
if self.deepspeed:
if self.model_wrapped is not None:
# this removes the pre-hooks from the previous engine
self.model_wrapped.destroy()
self.model_wrapped = None
# temp hack until Deepspeed fixes the problem with resume from an existing engine that did some stepping
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self,
num_training_steps=self.args.max_steps,
resume_from_checkpoint=self.state.best_model_checkpoint,
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
else:
if is_sagemaker_mp_enabled():
if os.path.isfile(os.path.join(self.state.best_model_checkpoint, "user_content.pt")):
# If the 'user_content.pt' file exists, load with the new smp api.
# Checkpoint must have been saved with the new smp api.
smp.resume_from_checkpoint(
path=self.state.best_model_checkpoint,
tag=WEIGHTS_NAME,
partial=False,
load_optimizer=False,
)
else:
# If the 'user_content.pt' file does NOT exist, load with the old smp api.
# Checkpoint must have been saved with the old smp api.
if self.args.save_safetensors and os.path.isfile(best_safe_model_path):
state_dict = safetensors.torch.load_file(best_safe_model_path, device="cpu")
else:
state_dict = torch.load(best_model_path, map_location="cpu")
state_dict["_smp_is_partial"] = False
load_result = model.load_state_dict(state_dict, strict=True)
else:
# We load the model state dict on the CPU to avoid an OOM error.
if self.args.save_safetensors and os.path.isfile(best_safe_model_path):
state_dict = safetensors.torch.load_file(best_safe_model_path, device="cpu")
else:
state_dict = torch.load(best_model_path, map_location="cpu")
# If the model is on the GPU, it still works!
# workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963
# which takes *args instead of **kwargs
load_result = model.load_state_dict(state_dict, False)
if not is_sagemaker_mp_enabled():
self._issue_warnings_after_load(load_result)
elif os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)):
load_result = load_sharded_checkpoint(
model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled()
)
if not is_sagemaker_mp_enabled():
self._issue_warnings_after_load(load_result)
else:
logger.warning(
f"Could not locate the best model at {best_model_path}, if you are running a distributed training "
"on multiple nodes, you should activate `--save_on_each_node`."
)
def _issue_warnings_after_load(self, load_result):
if len(load_result.missing_keys) != 0:
if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set(
self.model._keys_to_ignore_on_save
):
self.model.tie_weights()
else:
logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
if len(load_result.unexpected_keys) != 0:
logger.warning(
f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}."
)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval):
if self.control.should_log:
if is_torch_tpu_available():
xm.mark_step()
logs: Dict[str, float] = {}
# all_gather + mean() to get average loss over all processes
tr_loss_scalar = self._nested_gather(tr_loss).mean().item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
metrics = None
if self.control.should_evaluate:
if isinstance(self.eval_dataset, dict):
metrics = {}
for eval_dataset_name, eval_dataset in self.eval_dataset.items():
dataset_metrics = self.evaluate(
eval_dataset=eval_dataset,
ignore_keys=ignore_keys_for_eval,
metric_key_prefix=f"eval_{eval_dataset_name}",
)
metrics.update(dataset_metrics)
else:
metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
self._report_to_hp_search(trial, self.state.global_step, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _load_rng_state(self, checkpoint):
# Load RNG states from `checkpoint`
if checkpoint is None:
return
if self.args.world_size > 1:
process_index = self.args.process_index
rng_file = os.path.join(checkpoint, f"rng_state_{process_index}.pth")
if not os.path.isfile(rng_file):
logger.info(
f"Didn't find an RNG file for process {process_index}, if you are resuming a training that "
"wasn't launched in a distributed fashion, reproducibility is not guaranteed."
)
return
else:
rng_file = os.path.join(checkpoint, "rng_state.pth")
if not os.path.isfile(rng_file):
logger.info(
"Didn't find an RNG file, if you are resuming a training that was launched in a distributed "
"fashion, reproducibility is not guaranteed."
)
return
checkpoint_rng_state = torch.load(rng_file)
random.setstate(checkpoint_rng_state["python"])
np.random.set_state(checkpoint_rng_state["numpy"])
torch.random.set_rng_state(checkpoint_rng_state["cpu"])
if torch.cuda.is_available():
if self.args.local_rank != -1:
torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"])
else:
try:
torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"])
except Exception as e:
logger.info(
f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}"
"\nThis won't yield the same results as if the training had not been interrupted."
)
if is_torch_tpu_available():
xm.set_rng_state(checkpoint_rng_state["xla"])
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is None and trial is None:
self.store_flos()
run_dir = self._get_output_dir(trial=trial)
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir, _internal_call=True)
if self.deepspeed:
# under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed
# config `stage3_gather_16bit_weights_on_model_save` is True
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False)
smp.barrier()
if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state:
smp.save(
opt_state_dict,
os.path.join(output_dir, OPTIMIZER_NAME),
partial=True,
v3=smp.state.cfg.shard_optimizer_state,
)
if self.args.should_save:
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
if self.do_grad_scaling:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
elif self.args.should_save and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
if self.do_grad_scaling:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.args.should_save:
self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
# Save RNG state in non-distributed training
rng_states = {
"python": random.getstate(),
"numpy": np.random.get_state(),
"cpu": torch.random.get_rng_state(),
}
if torch.cuda.is_available():
if self.args.local_rank == -1:
# In non distributed, we save the global CUDA RNG state (will take care of DataParallel)
rng_states["cuda"] = torch.cuda.random.get_rng_state_all()
else:
rng_states["cuda"] = torch.cuda.random.get_rng_state()
if is_torch_tpu_available():
rng_states["xla"] = xm.get_rng_state()
# A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
# not yet exist.
os.makedirs(output_dir, exist_ok=True)
if self.args.world_size <= 1:
torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
else:
torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth"))
if self.args.push_to_hub:
self._push_from_checkpoint(output_dir)
# Maybe delete some older checkpoints.
if self.args.should_save:
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if self.deepspeed:
# deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
return
checkpoint_file_exists = (
glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*")
if is_sagemaker_mp_enabled()
else os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME))
)
if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
if is_sagemaker_mp_enabled():
if os.path.isfile(os.path.join(checkpoint, "user_content.pt")):
# Optimizer checkpoint was saved with smp >= 1.10
def opt_load_hook(mod, opt):
opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
else:
# Optimizer checkpoint was saved with smp < 1.10
def opt_load_hook(mod, opt):
if IS_SAGEMAKER_MP_POST_1_10:
opt.load_state_dict(
smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True)
)
else:
opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
self.model_wrapped.register_post_step_hook(opt_load_hook)
else:
# We use the CPU when training on one GPU to avoid OOM for GPU RAM when training big models.
# In distributed training however, we load directly on each GPU and risk the GPU OOM as it's more
# likely to get OOM on CPU (since we load num_gpu times the optimizer state
map_location = self.args.device if self.args.world_size > 1 else "cpu"
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME)))
reissue_pt_warnings(caught_warnings)
if self.do_grad_scaling and os.path.isfile(os.path.join(checkpoint, SCALER_NAME)):
self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, SCALER_NAME)))
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined
by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,
the sum of all metrics otherwise.
<Tip warning={true}>
To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to
reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to
subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom
optimizer/scheduler.
</Tip>
Args:
hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*):
A function that defines the hyperparameter search space. Will default to
[`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or
[`~trainer_utils.default_hp_space_sigopt`] depending on your backend.
compute_objective (`Callable[[Dict[str, float]], float]`, *optional*):
A function computing the objective to minimize or maximize from the metrics returned by the `evaluate`
method. Will default to [`~trainer_utils.default_compute_objective`].
n_trials (`int`, *optional*, defaults to 100):
The number of trial runs to test.
direction (`str`, *optional*, defaults to `"minimize"`):
Whether to optimize greater or lower objects. Can be `"minimize"` or `"maximize"`, you should pick
`"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics.
backend (`str` or [`~training_utils.HPSearchBackend`], *optional*):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending
on which one is installed. If all are installed, will default to optuna.
hp_name (`Callable[["optuna.Trial"], str]]`, *optional*):
A function that defines the trial/run name. Will default to None.
kwargs (`Dict[str, Any]`, *optional*):
Additional keyword arguments passed along to `optuna.create_study` or `ray.tune.run`. For more
information see:
- the documentation of
[optuna.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html)
- the documentation of [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run)
- the documentation of [sigopt](https://app.sigopt.com/docs/endpoints/experiments/create)
Returns:
[`trainer_utils.BestRun`]: All the information about the best run. Experiment summary can be found in
`run_summary` attribute for Ray backend.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`. "
"To install ray run `pip install ray[tune]`. "
"To install sigopt run `pip install sigopt`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
if backend == HPSearchBackend.SIGOPT and not is_sigopt_available():
raise RuntimeError("You picked the sigopt backend, but it is not installed. Use `pip install sigopt`.")
if backend == HPSearchBackend.WANDB and not is_wandb_available():
raise RuntimeError("You picked the wandb backend, but it is not installed. Use `pip install wandb`.")
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
backend_dict = {
HPSearchBackend.OPTUNA: run_hp_search_optuna,
HPSearchBackend.RAY: run_hp_search_ray,
HPSearchBackend.SIGOPT: run_hp_search_sigopt,
HPSearchBackend.WANDB: run_hp_search_wandb,
}
best_run = backend_dict[backend](self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log `logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]:
"""
Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors.
"""
if isinstance(data, Mapping):
return type(data)({k: self._prepare_input(v) for k, v in data.items()})
elif isinstance(data, (tuple, list)):
return type(data)(self._prepare_input(v) for v in data)
elif isinstance(data, torch.Tensor):
kwargs = {"device": self.args.device}
if self.deepspeed and (torch.is_floating_point(data) or torch.is_complex(data)):
# NLP models inputs are int/uint and those get adjusted to the right dtype of the
# embedding. Other models such as wav2vec2's inputs are already float and thus
# may need special handling to match the dtypes of the model
kwargs.update({"dtype": self.args.hf_deepspeed_config.dtype()})
return data.to(**kwargs)
return data
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
inputs = self._prepare_input(inputs)
if len(inputs) == 0:
raise ValueError(
"The batch received was empty, your model won't be able to train on it. Double-check that your "
f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}."
)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def compute_loss_context_manager(self):
"""
A helper wrapper to group together context managers.
"""
return self.autocast_smart_context_manager()
def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True):
"""
A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired
arguments, depending on the situation.
"""
if self.use_cuda_amp or self.use_cpu_amp:
if is_torch_greater_or_equal_than_1_10:
ctx_manager = (
torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype)
if self.use_cpu_amp
else torch.cuda.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype)
)
else:
ctx_manager = torch.cuda.amp.autocast()
else:
ctx_manager = contextlib.nullcontext() if sys.version_info >= (3, 7) else contextlib.suppress()
return ctx_manager
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to train.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
Return:
`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
return loss_mb.reduce_mean().detach().to(self.args.device)
with self.compute_loss_context_manager():
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.do_grad_scaling:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values():
loss = self.label_smoother(outputs, labels, shift_labels=True)
else:
loss = self.label_smoother(outputs, labels)
else:
if isinstance(outputs, dict) and "loss" not in outputs:
raise ValueError(
"The model did not return a loss from the inputs, only the following keys: "
f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}."
)
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
return self.args.local_process_index == 0
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be `True` for one process).
"""
# Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
# process index.
if is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.args.process_index == 0
def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False):
"""
Will save the model, so you can reload it using `from_pretrained()`.
Will only save from the main process.
"""
if output_dir is None:
output_dir = self.args.output_dir
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif is_sagemaker_mp_enabled():
# Calling the state_dict needs to be done on the wrapped model and on all processes.
os.makedirs(output_dir, exist_ok=True)
state_dict = self.model_wrapped.state_dict()
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
if IS_SAGEMAKER_MP_POST_1_10:
# 'user_content.pt' indicates model state_dict saved with smp >= 1.10
Path(os.path.join(output_dir, "user_content.pt")).touch()
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp
or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
or self.fsdp is not None
):
state_dict = self.model.state_dict()
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
elif self.deepspeed:
# this takes care of everything as long as we aren't under zero3
if self.args.should_save:
self._save(output_dir)
if is_deepspeed_zero3_enabled():
# It's too complicated to try to override different places where the weights dump gets
# saved, so since under zero3 the file is bogus, simply delete it. The user should
# either user deepspeed checkpoint to resume or to recover full weights use
# zero_to_fp32.py stored in the checkpoint.
if self.args.should_save:
file = os.path.join(output_dir, WEIGHTS_NAME)
if os.path.isfile(file):
# logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
os.remove(file)
# now save the real model if stage3_gather_16bit_weights_on_model_save=True
# if false it will not be saved.
# This must be called on all ranks
if not self.deepspeed.save_16bit_model(output_dir, WEIGHTS_NAME):
logger.warning(
"deepspeed.save_16bit_model didn't save the model, since"
" stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use"
" zero_to_fp32.py to recover weights"
)
self.deepspeed.save_checkpoint(output_dir)
elif self.args.should_save:
self._save(output_dir)
# Push to the Hub when `save_model` is called by the user.
if self.args.push_to_hub and not _internal_call:
self.push_to_hub(commit_message="Model save")
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info(f"Saving model checkpoint to {output_dir}")
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
is_main_process=self.args.should_save,
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, is_main_process=self.args.should_save, save_function=xm.save)
if self.tokenizer is not None and self.args.should_save:
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if self.args.save_safetensors:
safetensors.torch.save_file(state_dict, os.path.join(output_dir, SAFE_WEIGHTS_NAME))
else:
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(
output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors
)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self.args.local_rank != -1:
self.state.total_flos += (
distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item()
)
self.current_flos = 0
else:
self.state.total_flos += self.current_flos
self.current_flos = 0
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
for i in range(best_model_index, len(checkpoints_sorted) - 2):
checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
# If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which
# we don't do to allow resuming.
save_total_limit = self.args.save_total_limit
if (
self.state.best_model_checkpoint is not None
and self.args.save_total_limit == 1
and checkpoints_sorted[-1] != self.state.best_model_checkpoint
):
save_total_limit = 2
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint, ignore_errors=True)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init `compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (`Dataset`, *optional*):
Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns
not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
method.
ignore_keys (`List[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.log(output.metrics)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in `evaluate()`.
Args:
test_dataset (`Dataset`):
Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the
`model.forward()` method are automatically removed. Has to implement the method `__len__`
ignore_keys (`List[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"test"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"test_bleu" if the prefix is "test" (default)
<Tip>
If your predictions or labels have different sequence length (for instance because you're doing dynamic padding
in a token classification task) the predictions will be padded (on the right) to allow for concatenation into
one array. The padding index is -100.
</Tip>
Returns: *NamedTuple* A namedtuple with the following keys:
- predictions (`np.ndarray`): The predictions on `test_dataset`.
- label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
- metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
Works both with or without labels.
"""
args = self.args
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
# if eval is called w/o train init deepspeed here
if args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(
self, num_training_steps=0, resume_from_checkpoint=None, inference=True
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
model = self._wrap_model(self.model, training=False, dataloader=dataloader)
# if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
# while ``train`` is running, cast it to the right dtype first and then put on device
if not self.is_in_train:
if args.fp16_full_eval:
model = model.to(dtype=torch.float16, device=args.device)
elif args.bf16_full_eval:
model = model.to(dtype=torch.bfloat16, device=args.device)
batch_size = self.args.eval_batch_size
logger.info(f"***** Running {description} *****")
if has_length(dataloader):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = getattr(dataloader, "dataset", None)
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)
if args.past_index >= 0:
self._past = None
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
inputs_host = None
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
all_inputs = None
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# For batch samplers, batch_size is not known by the dataloader in advance.
if batch_size is None:
batch_size = observed_batch_size
# Prediction step
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None
if is_torch_tpu_available():
xm.mark_step()
# Update containers on host
if loss is not None:
losses = self._nested_gather(loss.repeat(batch_size))
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if labels is not None:
labels = self._pad_across_processes(labels)
labels = self._nested_gather(labels)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
if inputs_decode is not None:
inputs_decode = self._pad_across_processes(inputs_decode)
inputs_decode = self._nested_gather(inputs_decode)
inputs_host = (
inputs_decode
if inputs_host is None
else nested_concat(inputs_host, inputs_decode, padding_index=-100)
)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
if self.preprocess_logits_for_metrics is not None:
logits = self.preprocess_logits_for_metrics(logits, labels)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if inputs_host is not None:
inputs_decode = nested_numpify(inputs_host)
all_inputs = (
inputs_decode
if all_inputs is None
else nested_concat(all_inputs, inputs_decode, padding_index=-100)
)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = (
labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
)
# Set back to None to begin a new accumulation
losses_host, preds_host, inputs_host, labels_host = None, None, None, None
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if inputs_host is not None:
inputs_decode = nested_numpify(inputs_host)
all_inputs = (
inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100)
)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
# Number of samples
if has_length(eval_dataset):
num_samples = len(eval_dataset)
# The instance check is weird and does not actually check for the type, but whether the dataset has the right
# methods. Therefore we need to make sure it also has the attribute.
elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0:
num_samples = eval_dataset.num_examples
else:
if has_length(dataloader):
num_samples = self.num_examples(dataloader)
else: # both len(dataloader.dataset) and len(dataloader) fail
num_samples = observed_num_examples
if num_samples == 0 and observed_num_examples > 0:
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = all_losses[:num_samples]
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = nested_truncate(all_labels, num_samples)
if all_inputs is not None:
all_inputs = nested_truncate(all_inputs, num_samples)
# Metrics!
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
if args.include_inputs_for_metrics:
metrics = self.compute_metrics(
EvalPrediction(predictions=all_preds, label_ids=all_labels, inputs=all_inputs)
)
else:
metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if all_losses is not None:
metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
if hasattr(self, "jit_compilation_time"):
metrics[f"{metric_key_prefix}_jit_compilation_time"] = self.jit_compilation_time
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
def _nested_gather(self, tensors, name=None):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
if name is None:
name = "nested_gather"
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return tensors
# Copied from Accelerate.
def _pad_across_processes(self, tensor, pad_index=-100):
"""
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
they can safely be gathered.
"""
if isinstance(tensor, (list, tuple)):
return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)
elif isinstance(tensor, dict):
return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()})
elif not isinstance(tensor, torch.Tensor):
raise TypeError(
f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
)
if len(tensor.shape) < 2:
return tensor
# Gather all sizes
size = torch.tensor(tensor.shape, device=tensor.device)[None]
sizes = self._nested_gather(size).cpu()
max_size = max(s[1] for s in sizes)
# When extracting XLA graphs for compilation, max_size is 0,
# so use inequality to avoid errors.
if tensor.shape[1] >= max_size:
return tensor
# Then pad to the maximum size
old_size = tensor.shape
new_size = list(old_size)
new_size[1] = max_size
new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
new_tensor[:, : old_size[1]] = tensor
return new_tensor
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on `model` using `inputs`.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to evaluate.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (`bool`):
Whether or not to return the loss only.
ignore_keys (`List[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
logits and labels (each being optional).
"""
has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names)
# For CLIP-like models capable of returning loss values.
# If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss`
# is `True` in `model.forward`.
return_loss = inputs.get("return_loss", None)
if return_loss is None:
return_loss = self.can_return_loss
loss_without_labels = True if len(self.label_names) == 0 and return_loss else False
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels or loss_without_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels or loss_without_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels or loss_without_labels:
with self.compute_loss_context_manager():
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
with self.compute_loss_context_manager():
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point
operations for every backward + forward pass. If using another model, either implement such a method in the
model or subclass and override this method.
Args:
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
def init_git_repo(self, at_init: bool = False):
"""
Initializes a git repo in `self.args.hub_model_id`.
Args:
at_init (`bool`, *optional*, defaults to `False`):
Whether this function is called before any training or not. If `self.args.overwrite_output_dir` is
`True` and `at_init` is `True`, the path to the repo (which is `self.args.output_dir`) might be wiped
out.
"""
if not self.is_world_process_zero():
return
if self.args.hub_model_id is None:
repo_name = Path(self.args.output_dir).absolute().name
else:
repo_name = self.args.hub_model_id
if "/" not in repo_name:
repo_name = get_full_repo_name(repo_name, token=self.args.hub_token)
# Make sure the repo exists.
create_repo(repo_name, token=self.args.hub_token, private=self.args.hub_private_repo, exist_ok=True)
try:
self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token)
except EnvironmentError:
if self.args.overwrite_output_dir and at_init:
# Try again after wiping output_dir
shutil.rmtree(self.args.output_dir)
self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token)
else:
raise
self.repo.git_pull()
# By default, ignore the checkpoint folders
if (
not os.path.exists(os.path.join(self.args.output_dir, ".gitignore"))
and self.args.hub_strategy != HubStrategy.ALL_CHECKPOINTS
):
with open(os.path.join(self.args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer:
writer.writelines(["checkpoint-*/"])
# Add "*.sagemaker" to .gitignore if using SageMaker
if os.environ.get("SM_TRAINING_ENV"):
self._add_sm_patterns_to_gitignore()
self.push_in_progress = None
def create_model_card(
self,
language: Optional[str] = None,
license: Optional[str] = None,
tags: Union[str, List[str], None] = None,
model_name: Optional[str] = None,
finetuned_from: Optional[str] = None,
tasks: Union[str, List[str], None] = None,
dataset_tags: Union[str, List[str], None] = None,
dataset: Union[str, List[str], None] = None,
dataset_args: Union[str, List[str], None] = None,
):
"""
Creates a draft of a model card using the information available to the `Trainer`.
Args:
language (`str`, *optional*):
The language of the model (if applicable)
license (`str`, *optional*):
The license of the model. Will default to the license of the pretrained model used, if the original
model given to the `Trainer` comes from a repo on the Hub.
tags (`str` or `List[str]`, *optional*):
Some tags to be included in the metadata of the model card.
model_name (`str`, *optional*):
The name of the model.
finetuned_from (`str`, *optional*):
The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo
of the original model given to the `Trainer` (if it comes from the Hub).
tasks (`str` or `List[str]`, *optional*):
One or several task identifiers, to be included in the metadata of the model card.
dataset_tags (`str` or `List[str]`, *optional*):
One or several dataset tags, to be included in the metadata of the model card.
dataset (`str` or `List[str]`, *optional*):
One or several dataset identifiers, to be included in the metadata of the model card.
dataset_args (`str` or `List[str]`, *optional*):
One or several dataset arguments, to be included in the metadata of the model card.
"""
if not self.is_world_process_zero():
return
training_summary = TrainingSummary.from_trainer(
self,
language=language,
license=license,
tags=tags,
model_name=model_name,
finetuned_from=finetuned_from,
tasks=tasks,
dataset_tags=dataset_tags,
dataset=dataset,
dataset_args=dataset_args,
)
model_card = training_summary.to_model_card()
with open(os.path.join(self.args.output_dir, "README.md"), "w") as f:
f.write(model_card)
def _push_from_checkpoint(self, checkpoint_folder):
# Only push from one node.
if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END:
return
# If we haven't finished the last push, we don't do this one.
if self.push_in_progress is not None and not self.push_in_progress.is_done:
return
output_dir = self.args.output_dir
# To avoid a new synchronization of all model weights, we just copy the file from the checkpoint folder
modeling_files = [CONFIG_NAME, WEIGHTS_NAME, SAFE_WEIGHTS_NAME]
for modeling_file in modeling_files:
if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)):
shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file))
# Saving the tokenizer is fast and we don't know how many files it may have spawned, so we resave it to be sure.
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Same for the training arguments
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
try:
if self.args.hub_strategy == HubStrategy.CHECKPOINT:
# Temporarily move the checkpoint just saved for the push
tmp_checkpoint = os.path.join(output_dir, "last-checkpoint")
# We have to remove the "last-checkpoint" dir if it exists, otherwise the checkpoint is moved as a
# subfolder.
if os.path.isdir(tmp_checkpoint):
shutil.rmtree(tmp_checkpoint)
shutil.move(checkpoint_folder, tmp_checkpoint)
if self.args.save_strategy == IntervalStrategy.STEPS:
commit_message = f"Training in progress, step {self.state.global_step}"
else:
commit_message = f"Training in progress, epoch {int(self.state.epoch)}"
_, self.push_in_progress = self.repo.push_to_hub(
commit_message=commit_message, blocking=False, auto_lfs_prune=True
)
finally:
if self.args.hub_strategy == HubStrategy.CHECKPOINT:
# Move back the checkpoint to its place
shutil.move(tmp_checkpoint, checkpoint_folder)
def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str:
"""
Upload *self.model* and *self.tokenizer* to the 🤗 model hub on the repo *self.args.hub_model_id*.
Parameters:
commit_message (`str`, *optional*, defaults to `"End of training"`):
Message to commit while pushing.
blocking (`bool`, *optional*, defaults to `True`):
Whether the function should return only when the `git push` has finished.
kwargs:
Additional keyword arguments passed along to [`~Trainer.create_model_card`].
Returns:
The url of the commit of your model in the given repository if `blocking=False`, a tuple with the url of
the commit and an object to track the progress of the commit if `blocking=True`
"""
# If a user calls manually `push_to_hub` with `self.args.push_to_hub = False`, we try to create the repo but
# it might fail.
if not hasattr(self, "repo"):
self.init_git_repo()
model_name = kwargs.pop("model_name", None)
if model_name is None and self.args.should_save:
if self.args.hub_model_id is None:
model_name = Path(self.args.output_dir).name
else:
model_name = self.args.hub_model_id.split("/")[-1]
# Needs to be executed on all processes for TPU training, but will only save on the processed determined by
# self.args.should_save.
self.save_model(_internal_call=True)
# Only push from one node.
if not self.is_world_process_zero():
return
# Cancel any async push in progress if blocking=True. The commits will all be pushed together.
if blocking and self.push_in_progress is not None and not self.push_in_progress.is_done:
self.push_in_progress._process.kill()
self.push_in_progress = None
git_head_commit_url = self.repo.push_to_hub(
commit_message=commit_message, blocking=blocking, auto_lfs_prune=True
)
# push separately the model card to be independant from the rest of the model
if self.args.should_save:
self.create_model_card(model_name=model_name, **kwargs)
try:
self.repo.push_to_hub(
commit_message="update model card README.md", blocking=blocking, auto_lfs_prune=True
)
except EnvironmentError as exc:
logger.error(f"Error pushing update to the model card. Please read logs and retry.\n${exc}")
return git_head_commit_url
#
# Deprecated code
#
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
Works both with or without labels.
"""
args = self.args
if not has_length(dataloader):
raise ValueError("dataloader must implement a working __len__")
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
# if eval is called w/o train init deepspeed here
if args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False, dataloader=dataloader)
# if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
# while ``train`` is running, cast it to the right dtype first and then put on device
if not self.is_in_train:
if args.fp16_full_eval:
model = model.to(dtype=torch.float16, device=args.device)
elif args.bf16_full_eval:
model = model.to(dtype=torch.bfloat16, device=args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info(f"***** Running {description} *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Batch size = {batch_size}")
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
inputs_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
inputs_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)
if args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
if inputs_decode is not None:
inputs_host = (
inputs_decode
if inputs_host is None
else nested_concat(inputs_host, inputs_decode, padding_index=-100)
)
self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host, inputs_host = None, None, None, None
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
inputs_ids = inputs_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
if args.include_inputs_for_metrics:
metrics = self.compute_metrics(
EvalPrediction(predictions=preds, label_ids=label_ids, inputs=inputs_ids)
)
else:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def _add_sm_patterns_to_gitignore(self) -> None:
"""Add SageMaker Checkpointing patterns to .gitignore file."""
# Make sure we only do this on the main process
if not self.is_world_process_zero():
return
patterns = ["*.sagemaker-uploading", "*.sagemaker-uploaded"]
# Get current .gitignore content
if os.path.exists(os.path.join(self.repo.local_dir, ".gitignore")):
with open(os.path.join(self.repo.local_dir, ".gitignore"), "r") as f:
current_content = f.read()
else:
current_content = ""
# Add the patterns to .gitignore
content = current_content
for pattern in patterns:
if pattern not in content:
if content.endswith("\n"):
content += pattern
else:
content += f"\n{pattern}"
# Write the .gitignore file if it has changed
if content != current_content:
with open(os.path.join(self.repo.local_dir, ".gitignore"), "w") as f:
logger.debug(f"Writing .gitignore file. Content: {content}")
f.write(content)
self.repo.git_add(".gitignore")
# avoid race condition with git status
time.sleep(0.5)
if not self.repo.is_repo_clean():
self.repo.git_commit("Add *.sagemaker patterns to .gitignore.")
self.repo.git_push()
| RadFM-main | src/My_Trainer/trainer.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| deepmind-research-master | __init__.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configurations for IODINE."""
# pylint: disable=missing-docstring, unused-variable
import math
def clevr6():
n_z = 64 # number of latent dimensions
num_components = 7 # number of components (K)
num_iters = 5
checkpoint_dir = "iodine/checkpoints/clevr6"
# For the paper we used 8 GPUs with a batch size of 4 each.
# This means a total batch size of 32, which is too large for a single GPU.
# When reducing the batch size, the learning rate should also be lowered.
batch_size = 4
learn_rate = 0.001 * math.sqrt(batch_size / 32)
data = {
"constructor": "iodine.modules.data.CLEVR",
"batch_size": batch_size,
"path": "multi_object_datasets/clevr_with_masks_train.tfrecords",
"max_num_objects": 6,
}
model = {
"constructor": "iodine.modules.iodine.IODINE",
"n_z": n_z,
"num_components": num_components,
"num_iters": num_iters,
"iter_loss_weight": "linspace",
"coord_type": "linear",
"decoder": {
"constructor": "iodine.modules.decoder.ComponentDecoder",
"pixel_decoder": {
"constructor": "iodine.modules.networks.BroadcastConv",
"cnn_opt": {
# Final channels is irrelevant with target_output_shape
"output_channels": [64, 64, 64, 64, None],
"kernel_shapes": [3],
"strides": [1],
"activation": "elu",
},
"coord_type": "linear",
},
},
"refinement_core": {
"constructor": "iodine.modules.refinement.RefinementCore",
"encoder_net": {
"constructor": "iodine.modules.networks.CNN",
"mode": "avg_pool",
"cnn_opt": {
"output_channels": [64, 64, 64, 64],
"strides": [2],
"kernel_shapes": [3],
"activation": "elu",
},
"mlp_opt": {
"output_sizes": [256, 256],
"activation": "elu"
},
},
"recurrent_net": {
"constructor": "iodine.modules.networks.LSTM",
"hidden_sizes": [256],
},
"refinement_head": {
"constructor": "iodine.modules.refinement.ResHead"
},
},
"latent_dist": {
"constructor": "iodine.modules.distributions.LocScaleDistribution",
"dist": "normal",
"scale_act": "softplus",
"scale": "var",
"name": "latent_dist",
},
"output_dist": {
"constructor": "iodine.modules.distributions.MaskedMixture",
"num_components": num_components,
"component_dist": {
"constructor":
"iodine.modules.distributions.LocScaleDistribution",
"dist":
"logistic",
"scale":
"fixed",
"scale_val":
0.03,
"name":
"pixel_distribution",
},
},
"factor_evaluator": {
"constructor":
"iodine.modules.factor_eval.FactorRegressor",
"mapping": [
("color", 9, "categorical"),
("shape", 4, "categorical"),
("size", 3, "categorical"),
("position", 3, "scalar"),
],
},
}
optimizer = {
"constructor": "tensorflow.train.AdamOptimizer",
"learning_rate": {
"constructor": "tensorflow.train.exponential_decay",
"learning_rate": learn_rate,
"global_step": {
"constructor": "tensorflow.train.get_or_create_global_step"
},
"decay_steps": 1000000,
"decay_rate": 0.1,
},
"beta1": 0.95,
}
def multi_dsprites():
n_z = 16 # number of latent dimensions
num_components = 6 # number of components (K)
num_iters = 5
checkpoint_dir = "iodine/checkpoints/multi_dsprites"
# For the paper we used 8 GPUs with a batch size of 16 each.
# This means a total batch size of 128, which is too large for a single GPU.
# When reducing the batch size, the learning rate should also be lowered.
batch_size = 16
learn_rate = 0.0003 * math.sqrt(batch_size / 128)
data = {
"constructor":
"iodine.modules.data.MultiDSprites",
"batch_size":
batch_size,
"path":
"multi_object_datasets/multi_dsprites_colored_on_grayscale.tfrecords",
"dataset_variant":
"colored_on_grayscale",
"min_num_objs":
3,
"max_num_objs":
3,
}
model = {
"constructor": "iodine.modules.iodine.IODINE",
"n_z": n_z,
"num_components": num_components,
"num_iters": num_iters,
"iter_loss_weight": "linspace",
"coord_type": "cos",
"coord_freqs": 3,
"decoder": {
"constructor": "iodine.modules.decoder.ComponentDecoder",
"pixel_decoder": {
"constructor": "iodine.modules.networks.BroadcastConv",
"cnn_opt": {
# Final channels is irrelevant with target_output_shape
"output_channels": [32, 32, 32, 32, None],
"kernel_shapes": [5],
"strides": [1],
"activation": "elu",
},
"coord_type": "linear",
},
},
"refinement_core": {
"constructor": "iodine.modules.refinement.RefinementCore",
"encoder_net": {
"constructor": "iodine.modules.networks.CNN",
"mode": "avg_pool",
"cnn_opt": {
"output_channels": [32, 32, 32],
"strides": [2],
"kernel_shapes": [5],
"activation": "elu",
},
"mlp_opt": {
"output_sizes": [128],
"activation": "elu"
},
},
"recurrent_net": {
"constructor": "iodine.modules.networks.LSTM",
"hidden_sizes": [128],
},
"refinement_head": {
"constructor": "iodine.modules.refinement.ResHead"
},
},
"latent_dist": {
"constructor": "iodine.modules.distributions.LocScaleDistribution",
"dist": "normal",
"scale_act": "softplus",
"scale": "var",
"name": "latent_dist",
},
"output_dist": {
"constructor": "iodine.modules.distributions.MaskedMixture",
"num_components": num_components,
"component_dist": {
"constructor":
"iodine.modules.distributions.LocScaleDistribution",
"dist":
"logistic",
"scale":
"fixed",
"scale_val":
0.03,
"name":
"pixel_distribution",
},
},
"factor_evaluator": {
"constructor":
"iodine.modules.factor_eval.FactorRegressor",
"mapping": [
("color", 3, "scalar"),
("shape", 4, "categorical"),
("scale", 1, "scalar"),
("x", 1, "scalar"),
("y", 1, "scalar"),
("orientation", 2, "angle"),
],
},
}
optimizer = {
"constructor": "tensorflow.train.AdamOptimizer",
"learning_rate": {
"constructor": "tensorflow.train.exponential_decay",
"learning_rate": learn_rate,
"global_step": {
"constructor": "tensorflow.train.get_or_create_global_step"
},
"decay_steps": 1000000,
"decay_rate": 0.1,
},
"beta1": 0.95,
}
def tetrominoes():
n_z = 32 # number of latent dimensions
num_components = 4 # number of components (K)
num_iters = 5
checkpoint_dir = "iodine/checkpoints/tetrominoes"
# For the paper we used 8 GPUs with a batch size of 32 each.
# This means a total batch size of 256, which is too large for a single GPU.
# When reducing the batch size, the learning rate should also be lowered.
batch_size = 128
learn_rate = 0.0003 * math.sqrt(batch_size / 256)
data = {
"constructor": "iodine.modules.data.Tetrominoes",
"batch_size": batch_size,
"path": "iodine/multi_object_datasets/tetrominoes_train.tfrecords",
}
model = {
"constructor": "iodine.modules.iodine.IODINE",
"n_z": n_z,
"num_components": num_components,
"num_iters": num_iters,
"iter_loss_weight": "linspace",
"coord_type": "cos",
"coord_freqs": 3,
"decoder": {
"constructor": "iodine.modules.decoder.ComponentDecoder",
"pixel_decoder": {
"constructor": "iodine.modules.networks.BroadcastConv",
"cnn_opt": {
# Final channels is irrelevant with target_output_shape
"output_channels": [32, 32, 32, 32, None],
"kernel_shapes": [5],
"strides": [1],
"activation": "elu",
},
"coord_type": "linear",
"coord_freqs": 3,
},
},
"refinement_core": {
"constructor": "iodine.modules.refinement.RefinementCore",
"encoder_net": {
"constructor": "iodine.modules.networks.CNN",
"mode": "avg_pool",
"cnn_opt": {
"output_channels": [32, 32, 32],
"strides": [2],
"kernel_shapes": [5],
"activation": "elu",
},
"mlp_opt": {
"output_sizes": [128],
"activation": "elu"
},
},
"recurrent_net": {
"constructor": "iodine.modules.networks.LSTM",
"hidden_sizes": [], # No recurrent layer used for this dataset
},
"refinement_head": {
"constructor": "iodine.modules.refinement.ResHead"
},
},
"latent_dist": {
"constructor": "iodine.modules.distributions.LocScaleDistribution",
"dist": "normal",
"scale_act": "softplus",
"scale": "var",
"name": "latent_dist",
},
"output_dist": {
"constructor": "iodine.modules.distributions.MaskedMixture",
"num_components": num_components,
"component_dist": {
"constructor":
"iodine.modules.distributions.LocScaleDistribution",
"dist":
"logistic",
"scale":
"fixed",
"scale_val":
0.03,
"name":
"pixel_distribution",
},
},
"factor_evaluator": {
"constructor":
"iodine.modules.factor_eval.FactorRegressor",
"mapping": [
("position", 2, "scalar"),
("color", 3, "scalar"),
("shape", 20, "categorical"),
],
},
}
optimizer = {
"constructor": "tensorflow.train.AdamOptimizer",
"learning_rate": {
"constructor": "tensorflow.train.exponential_decay",
"learning_rate": learn_rate,
"global_step": {
"constructor": "tensorflow.train.get_or_create_global_step"
},
"decay_steps": 1000000,
"decay_rate": 0.1,
},
"beta1": 0.95,
}
| deepmind-research-master | iodine/configurations.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-importing-member, g-multiple-import, g-import-not-at-top
# pylint: disable=protected-access, g-bad-import-order, missing-docstring
# pylint: disable=unused-variable, invalid-name, no-value-for-parameter
from copy import deepcopy
import os.path
import warnings
from absl import logging
import numpy as np
from sacred import Experiment, SETTINGS
# Ignore all tensorflow deprecation warnings
logging._warn_preinit_stderr = 0
warnings.filterwarnings("ignore", module=".*tensorflow.*")
import tensorflow.compat.v1 as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import sonnet as snt
from sacred.stflow import LogFileWriter
from iodine.modules import utils
from iodine import configurations
SETTINGS.CONFIG.READ_ONLY_CONFIG = False
ex = Experiment("iodine")
@ex.config
def default_config():
continue_run = False # set to continue experiment from an existing checkpoint
checkpoint_dir = ("checkpoints/iodine"
) # if continue_run is False, "_{run_id}" will be appended
save_summaries_steps = 10
save_checkpoint_steps = 1000
n_z = 64 # number of latent dimensions
num_components = 7 # number of components (K)
num_iters = 5
learn_rate = 0.001
batch_size = 4
stop_after_steps = int(1e6)
# Details for the dataset, model and optimizer are left empty here.
# They can be found in the configurations for individual datasets,
# which are provided in configurations.py and added as named configs.
data = {} # Dataset details will go here
model = {} # Model details will go here
optimizer = {} # Optimizer details will go here
ex.named_config(configurations.clevr6)
ex.named_config(configurations.multi_dsprites)
ex.named_config(configurations.tetrominoes)
@ex.capture
def build(identifier, _config):
config_copy = deepcopy(_config[identifier])
return utils.build(config_copy, identifier=identifier)
def get_train_step(model, dataset, optimizer):
loss, scalars, _ = model(dataset("train"))
global_step = tf.train.get_or_create_global_step()
grads = optimizer.compute_gradients(loss)
gradients, variables = zip(*grads)
global_norm = tf.global_norm(gradients)
gradients, global_norm = tf.clip_by_global_norm(
gradients, 5.0, use_norm=global_norm)
grads = zip(gradients, variables)
train_op = optimizer.apply_gradients(grads, global_step=global_step)
with tf.control_dependencies([train_op]):
overview = model.get_overview_images(dataset("summary"))
scalars["debug/global_grad_norm"] = global_norm
summaries = {
k: tf.summary.scalar(k, v) for k, v in scalars.items()
}
summaries.update(
{k: tf.summary.image(k, v) for k, v in overview.items()})
return tf.identity(global_step), scalars, train_op
@ex.capture
def get_checkpoint_dir(continue_run, checkpoint_dir, _run, _log):
if continue_run:
assert os.path.exists(checkpoint_dir)
_log.info("Continuing run from checkpoint at {}".format(checkpoint_dir))
return checkpoint_dir
run_id = _run._id
if run_id is None: # then no observer was added that provided an _id
if not _run.unobserved:
_log.warning(
"No run_id given or provided by an Observer. (Re-)using run_id=1.")
run_id = 1
checkpoint_dir = checkpoint_dir + "_{run_id}".format(run_id=run_id)
_log.info(
"Starting a new run using checkpoint dir: '{}'".format(checkpoint_dir))
return checkpoint_dir
@ex.capture
def get_session(chkp_dir, loss, stop_after_steps, save_summaries_steps,
save_checkpoint_steps):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
hooks = [
tf.train.StopAtStepHook(last_step=stop_after_steps),
tf.train.NanTensorHook(loss),
]
return tf.train.MonitoredTrainingSession(
hooks=hooks,
config=config,
checkpoint_dir=chkp_dir,
save_summaries_steps=save_summaries_steps,
save_checkpoint_steps=save_checkpoint_steps,
)
@ex.command(unobserved=True)
def load_checkpoint(use_placeholder=False, session=None):
dataset = build("data")
model = build("model")
if use_placeholder:
inputs = dataset.get_placeholders()
else:
inputs = dataset()
info = model.eval(inputs)
if session is None:
session = tf.Session()
saver = tf.train.Saver()
checkpoint_dir = get_checkpoint_dir()
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
saver.restore(session, checkpoint_file)
print('Successfully restored Checkpoint "{}"'.format(checkpoint_file))
# print variables
variables = tf.global_variables() + tf.local_variables()
for row in snt.format_variables(variables, join_lines=False):
print(row)
return {
"session": session,
"model": model,
"info": info,
"inputs": inputs,
"dataset": dataset,
}
@ex.automain
@LogFileWriter(ex)
def main(save_summaries_steps):
checkpoint_dir = get_checkpoint_dir()
dataset = build("data")
model = build("model")
optimizer = build("optimizer")
gstep, train_step_exports, train_op = get_train_step(model, dataset,
optimizer)
loss, ari = [], []
with get_session(checkpoint_dir, train_step_exports["loss/total"]) as sess:
while not sess.should_stop():
out = sess.run({
"step": gstep,
"loss": train_step_exports["loss/total"],
"ari": train_step_exports["loss/ari_nobg"],
"train": train_op,
})
loss.append(out["loss"])
ari.append(out["ari"])
step = out["step"]
if step % save_summaries_steps == 0:
mean_loss = np.mean(loss)
mean_ari = np.mean(ari)
ex.log_scalar("loss", mean_loss, step)
ex.log_scalar("ari", mean_ari, step)
print("{step:>6d} Loss: {loss: >12.2f}\t\tARI-nobg:{ari: >6.2f}".format(
step=step, loss=mean_loss, ari=mean_ari))
loss, ari = [], []
| deepmind-research-master | iodine/main.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoders for rendering images."""
# pylint: disable=missing-docstring
from iodine.modules.distributions import MixtureParameters
import shapeguard
import sonnet as snt
class ComponentDecoder(snt.AbstractModule):
def __init__(self, pixel_decoder, name="component_decoder"):
super().__init__(name=name)
self._pixel_decoder = pixel_decoder
self._sg = shapeguard.ShapeGuard()
def set_output_shapes(self, pixel, mask):
self._sg.guard(pixel, "K, H, W, Cp")
self._sg.guard(mask, "K, H, W, 1")
self._pixel_decoder.set_output_shapes(self._sg["H, W, 1 + Cp"])
def _build(self, z):
self._sg.guard(z, "B, K, Z")
z_flat = self._sg.reshape(z, "B*K, Z")
pixel_params = self._pixel_decoder(z_flat).params
self._sg.guard(pixel_params, "B*K, H, W, 1 + Cp")
mask_params = pixel_params[..., 0:1]
pixel_params = pixel_params[..., 1:]
output = MixtureParameters(
pixel=self._sg.reshape(pixel_params, "B, K, H, W, Cp"),
mask=self._sg.reshape(mask_params, "B, K, H, W, 1"),
)
del self._sg.B
return output
| deepmind-research-master | iodine/modules/decoder.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factor Evaluation Module."""
# pylint: disable=unused-variable
import collections
import functools
from iodine.modules import utils
import shapeguard
import sonnet as snt
import tensorflow.compat.v1 as tf
Factor = collections.namedtuple("Factor", ["name", "size", "type"])
class FactorRegressor(snt.AbstractModule):
"""Assess representations by learning a linear mapping to latents."""
def __init__(self, mapping=None, name="repres_content"):
super().__init__(name=name)
if mapping is None:
self._mapping = [
Factor("color", 3, "scalar"),
Factor("shape", 4, "categorical"),
Factor("scale", 1, "scalar"),
Factor("x", 1, "scalar"),
Factor("y", 1, "scalar"),
Factor("orientation", 2, "angle"),
]
else:
self._mapping = [Factor(*m) for m in mapping]
def _build(self, z, latent, visibility, pred_mask, true_mask):
sg = shapeguard.ShapeGuard()
z = sg.guard(z, "B, K, Z")
pred_mask = sg.guard(pred_mask, "B, K, H, W, 1")
true_mask = sg.guard(true_mask, "B, L, H, W, 1")
visibility = sg.guard(visibility, "B, L")
num_visible_obj = tf.reduce_sum(visibility)
# Map z to predictions for all latents
sg.M = sum([m.size for m in self._mapping])
self.predictor = snt.Linear(sg.M, name="predict_latents")
z_flat = sg.reshape(z, "B*K, Z")
all_preds = sg.guard(self.predictor(z_flat), "B*K, M")
all_preds = sg.reshape(all_preds, "B, 1, K, M")
all_preds = tf.tile(all_preds, sg["1, L, 1, 1"])
# prepare latents
latents = {}
mean_var_tot = {}
for m in self._mapping:
with tf.name_scope(m.name):
# preprocess, reshape, and tile
lat_preprocess = self.get_preprocessing(m)
lat = sg.guard(
lat_preprocess(latent[m.name]), "B, L, {}".format(m.size))
# compute mean over latent by training a variable using mse
if m.type in {"scalar", "angle"}:
mvt = utils.OnlineMeanVarEstimator(
axis=[0, 1], ddof=1, name="{}_mean_var".format(m.name))
mean_var_tot[m.name] = mvt(lat, visibility[:, :, tf.newaxis])
lat = tf.reshape(lat, sg["B, L, 1"] + [-1])
lat = tf.tile(lat, sg["1, 1, K, 1"])
latents[m.name] = lat
# prepare predictions
idx = 0
predictions = {}
for m in self._mapping:
with tf.name_scope(m.name):
assert m.name in latent, "{} not in {}".format(m.name, latent.keys())
pred = all_preds[..., idx:idx + m.size]
predictions[m.name] = sg.guard(pred, "B, L, K, {}".format(m.size))
idx += m.size
# compute error
total_pairwise_errors = None
for m in self._mapping:
with tf.name_scope(m.name):
error_fn = self.get_error_func(m)
sg.guard(latents[m.name], "B, L, K, {}".format(m.size))
sg.guard(predictions[m.name], "B, L, K, {}".format(m.size))
err = error_fn(latents[m.name], predictions[m.name])
sg.guard(err, "B, L, K")
if total_pairwise_errors is None:
total_pairwise_errors = err
else:
total_pairwise_errors += err
# determine best assignment by comparing masks
obj_mask = true_mask[:, :, tf.newaxis]
pred_mask = pred_mask[:, tf.newaxis]
pairwise_overlap = tf.reduce_sum(obj_mask * pred_mask, axis=[3, 4, 5])
best_match = sg.guard(tf.argmax(pairwise_overlap, axis=2), "B, L")
assignment = tf.one_hot(best_match, sg.K)
assignment *= visibility[:, :, tf.newaxis] # Mask non-visible objects
# total error
total_error = (
tf.reduce_sum(assignment * total_pairwise_errors) / num_visible_obj)
# compute scalars
monitored_scalars = {}
for m in self._mapping:
with tf.name_scope(m.name):
metric = self.get_metric(m)
scalar = metric(
latents[m.name],
predictions[m.name],
assignment[:, :, :, tf.newaxis],
mean_var_tot.get(m.name),
num_visible_obj,
)
monitored_scalars[m.name] = scalar
return total_error, monitored_scalars, mean_var_tot, predictions, assignment
@snt.reuse_variables
def predict(self, z):
sg = shapeguard.ShapeGuard()
z = sg.guard(z, "B, Z")
all_preds = sg.guard(self.predictor(z), "B, M")
idx = 0
predictions = {}
for m in self._mapping:
with tf.name_scope(m.name):
pred = all_preds[:, idx:idx + m.size]
predictions[m.name] = sg.guard(pred, "B, {}".format(m.size))
idx += m.size
return predictions
@staticmethod
def get_error_func(factor):
if factor.type in {"scalar", "angle"}:
return sse
elif factor.type == "categorical":
return functools.partial(
tf.losses.softmax_cross_entropy, reduction="none")
else:
raise KeyError(factor.type)
@staticmethod
def get_metric(factor):
if factor.type in {"scalar", "angle"}:
return r2
elif factor.type == "categorical":
return accuracy
else:
raise KeyError(factor.type)
@staticmethod
def one_hot(f, nr_categories):
return tf.one_hot(tf.cast(f[..., 0], tf.int32), depth=nr_categories)
@staticmethod
def angle_to_vector(theta):
return tf.concat([tf.math.cos(theta), tf.math.sin(theta)], axis=-1)
@staticmethod
def get_preprocessing(factor):
if factor.type == "scalar":
return tf.identity
elif factor.type == "categorical":
return functools.partial(
FactorRegressor.one_hot, nr_categories=factor.size)
elif factor.type == "angle":
return FactorRegressor.angle_to_vector
else:
raise KeyError(factor.type)
def sse(true, pred):
# run our own sum squared error because we want to reduce sum over last dim
return tf.reduce_sum(tf.square(true - pred), axis=-1)
def accuracy(labels, logits, assignment, mean_var_tot, num_vis):
del mean_var_tot # unused
pred = tf.argmax(logits, axis=-1, output_type=tf.int32)
labels = tf.argmax(labels, axis=-1, output_type=tf.int32)
correct = tf.cast(tf.equal(labels, pred), tf.float32)
return tf.reduce_sum(correct * assignment[..., 0]) / num_vis
def r2(labels, pred, assignment, mean_var_tot, num_vis):
del num_vis # unused
mean, var, _ = mean_var_tot
# labels, pred: (B, L, K, n)
ss_res = tf.reduce_sum(tf.square(labels - pred) * assignment, axis=2)
ss_tot = var[tf.newaxis, tf.newaxis, :] # (1, 1, n)
return tf.reduce_mean(1.0 - ss_res / ss_tot)
| deepmind-research-master | iodine/modules/factor_eval.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plotting tools for IODINE."""
# pylint: disable=unused-import, missing-docstring, unused-variable
# pylint: disable=invalid-name, unexpected-keyword-arg
import functools
from iodine.modules.utils import get_mask_plot_colors
from matplotlib.colors import hsv_to_rgb
import matplotlib.pyplot as plt
import numpy as np
__all__ = ("get_mask_plot_colors", "example_plot", "iterations_plot",
"inputs_plot")
def clean_ax(ax, color=None, lw=4.0):
ax.set_xticks([])
ax.set_yticks([])
if color is not None:
for spine in ax.spines.values():
spine.set_linewidth(lw)
spine.set_color(color)
def optional_ax(fn):
def _wrapped(*args, **kwargs):
if kwargs.get("ax", None) is None:
figsize = kwargs.pop("figsize", (4, 4))
fig, ax = plt.subplots(figsize=figsize)
kwargs["ax"] = ax
return fn(*args, **kwargs)
return _wrapped
def optional_clean_ax(fn):
def _wrapped(*args, **kwargs):
if kwargs.get("ax", None) is None:
figsize = kwargs.pop("figsize", (4, 4))
fig, ax = plt.subplots(figsize=figsize)
kwargs["ax"] = ax
color = kwargs.pop("color", None)
lw = kwargs.pop("lw", 4.0)
res = fn(*args, **kwargs)
clean_ax(kwargs["ax"], color, lw)
return res
return _wrapped
@optional_clean_ax
def show_img(img, mask=None, ax=None, norm=False):
if norm:
vmin, vmax = np.min(img), np.max(img)
img = (img - vmin) / (vmax - vmin)
if mask is not None:
img = img * mask + np.ones_like(img) * (1.0 - mask)
return ax.imshow(img.clip(0.0, 1.0), interpolation="nearest")
@optional_clean_ax
def show_mask(m, ax):
color_conv = get_mask_plot_colors(m.shape[0])
color_mask = np.dot(np.transpose(m, [1, 2, 0]), color_conv)
return ax.imshow(color_mask.clip(0.0, 1.0), interpolation="nearest")
@optional_clean_ax
def show_mat(m, ax, vmin=None, vmax=None, cmap="viridis"):
return ax.matshow(
m[..., 0], cmap=cmap, vmin=vmin, vmax=vmax, interpolation="nearest")
@optional_clean_ax
def show_coords(m, ax):
vmin, vmax = np.min(m), np.max(m)
m = (m - vmin) / (vmax - vmin)
color_conv = get_mask_plot_colors(m.shape[-1])
color_mask = np.dot(m, color_conv)
return ax.imshow(color_mask, interpolation="nearest")
def example_plot(rinfo,
b=0,
t=-1,
mask_components=False,
size=2,
column_titles=True):
image = rinfo["data"]["image"][b, 0]
recons = rinfo["outputs"]["recons"][b, t, 0]
pred_mask = rinfo["outputs"]["pred_mask"][b, t]
components = rinfo["outputs"]["components"][b, t]
K, H, W, C = components.shape
colors = get_mask_plot_colors(K)
nrows = 1
ncols = 3 + K
fig, axes = plt.subplots(ncols=ncols, figsize=(ncols * size, nrows * size))
show_img(image, ax=axes[0], color="#000000")
show_img(recons, ax=axes[1], color="#000000")
show_mask(pred_mask[..., 0], ax=axes[2], color="#000000")
for k in range(K):
mask = pred_mask[k] if mask_components else None
show_img(components[k], ax=axes[k + 3], color=colors[k], mask=mask)
if column_titles:
labels = ["Image", "Recons.", "Mask"
] + ["Component {}".format(k + 1) for k in range(K)]
for ax, title in zip(axes, labels):
ax.set_title(title)
plt.subplots_adjust(hspace=0.03, wspace=0.035)
return fig
def iterations_plot(rinfo, b=0, mask_components=False, size=2):
image = rinfo["data"]["image"][b]
true_mask = rinfo["data"]["true_mask"][b]
recons = rinfo["outputs"]["recons"][b]
pred_mask = rinfo["outputs"]["pred_mask"][b]
pred_mask_logits = rinfo["outputs"]["pred_mask_logits"][b]
components = rinfo["outputs"]["components"][b]
T, K, H, W, C = components.shape
colors = get_mask_plot_colors(K)
nrows = T + 1
ncols = 2 + K
fig, axes = plt.subplots(
nrows=nrows, ncols=ncols, figsize=(ncols * size, nrows * size))
for t in range(T):
show_img(recons[t, 0], ax=axes[t, 0])
show_mask(pred_mask[t, ..., 0], ax=axes[t, 1])
axes[t, 0].set_ylabel("iter {}".format(t))
for k in range(K):
mask = pred_mask[t, k] if mask_components else None
show_img(components[t, k], ax=axes[t, k + 2], color=colors[k], mask=mask)
axes[0, 0].set_title("Reconstruction")
axes[0, 1].set_title("Mask")
show_img(image[0], ax=axes[T, 0])
show_mask(true_mask[0, ..., 0], ax=axes[T, 1])
vmin = np.min(pred_mask_logits[T - 1])
vmax = np.max(pred_mask_logits[T - 1])
for k in range(K):
axes[0, k + 2].set_title("Component {}".format(k + 1)) # , color=colors[k])
show_mat(
pred_mask_logits[T - 1, k], ax=axes[T, k + 2], vmin=vmin, vmax=vmax)
axes[T, k + 2].set_xlabel(
"Mask Logits for\nComponent {}".format(k + 1)) # , color=colors[k])
axes[T, 0].set_xlabel("Input Image")
axes[T, 1].set_xlabel("Ground Truth Mask")
plt.subplots_adjust(wspace=0.05, hspace=0.05)
return fig
def inputs_plot(rinfo, b=0, t=0, size=2):
B, T, K, H, W, C = rinfo["outputs"]["components"].shape
colors = get_mask_plot_colors(K)
inputs = rinfo["inputs"]["spatial"]
rows = [
("image", show_img, False),
("components", show_img, False),
("dcomponents", functools.partial(show_img, norm=True), False),
("mask", show_mat, True),
("pred_mask", show_mat, True),
("dmask", functools.partial(show_mat, cmap="coolwarm"), True),
("posterior", show_mat, True),
("log_prob", show_mat, True),
("counterfactual", show_mat, True),
("coordinates", show_coords, False),
]
rows = [(n, f, mcb) for n, f, mcb in rows if n in inputs]
nrows = len(rows)
ncols = K + 1
fig, axes = plt.subplots(
nrows=nrows,
ncols=ncols,
figsize=(ncols * size - size * 0.9, nrows * size),
gridspec_kw={"width_ratios": [1] * K + [0.1]},
)
for r, (name, plot_fn, make_cbar) in enumerate(rows):
axes[r, 0].set_ylabel(name)
if make_cbar:
vmin = np.min(inputs[name][b, t])
vmax = np.max(inputs[name][b, t])
if np.abs(vmin - vmax) < 1e-6:
vmin -= 0.1
vmax += 0.1
plot_fn = functools.partial(plot_fn, vmin=vmin, vmax=vmax)
# print("range of {:<16}: [{:0.2f}, {:0.2f}]".format(name, vmin, vmax))
for k in range(K):
if inputs[name].shape[2] == 1:
m = inputs[name][b, t, 0]
color = (0.0, 0.0, 0.0)
else:
m = inputs[name][b, t, k]
color = colors[k]
mappable = plot_fn(m, ax=axes[r, k], color=color)
if make_cbar:
fig.colorbar(mappable, cax=axes[r, K])
else:
axes[r, K].set_visible(False)
for k in range(K):
axes[0, k].set_title("Component {}".format(k + 1)) # , color=colors[k])
plt.subplots_adjust(hspace=0.05, wspace=0.05)
return fig
| deepmind-research-master | iodine/modules/plotting.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| deepmind-research-master | iodine/modules/__init__.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Iterative refinement modules."""
# pylint: disable=g-doc-bad-indent, unused-variable
from iodine.modules import utils
import shapeguard
import sonnet as snt
import tensorflow.compat.v1 as tf
class RefinementCore(snt.RNNCore):
"""Recurrent Refinement Module.
Refinement modules take as inputs:
* previous state (which could be an arbitrary nested structure)
* current inputs which include
* image-space inputs like pixel-based errors, or mask-posteriors
* latent-space inputs like the previous z_dist, or dz
They use these inputs to produce:
* output (usually a new z_dist)
* new_state
"""
def __init__(self,
encoder_net,
recurrent_net,
refinement_head,
name="refinement"):
super().__init__(name=name)
self._encoder_net = encoder_net
self._recurrent_net = recurrent_net
self._refinement_head = refinement_head
self._sg = shapeguard.ShapeGuard()
def initial_state(self, batch_size, **unused_kwargs):
return self._recurrent_net.initial_state(batch_size)
def _build(self, inputs, prev_state):
sg = self._sg
assert "spatial" in inputs, inputs.keys()
assert "flat" in inputs, inputs.keys()
assert "zp" in inputs["flat"], inputs["flat"].keys()
zp = sg.guard(inputs["flat"]["zp"], "B, K, Zp")
x = sg.guard(self.prepare_spatial_inputs(inputs["spatial"]), "B*K, H, W, C")
h1 = sg.guard(self._encoder_net(x).params, "B*K, H1")
h2 = sg.guard(self.prepare_flat_inputs(h1, inputs["flat"]), "B*K, H2")
h2_unflattened = sg.reshape(h2, "B, K, H2")
h3, next_state = self._recurrent_net(h2_unflattened, prev_state)
sg.guard(h3, "B, K, H3")
outputs = sg.guard(self._refinement_head(zp, h3), "B, K, Y")
del self._sg.B
return outputs, next_state
def prepare_spatial_inputs(self, inputs):
values = []
for name, val in sorted(inputs.items(), key=lambda it: it[0]):
if val.shape.as_list()[1] == 1:
self._sg.guard(val, "B, 1, H, W, _C")
val = tf.tile(val, self._sg["1, K, 1, 1, 1"])
else:
self._sg.guard(val, "B, K, H, W, _C")
values.append(val)
concat_inputs = self._sg.guard(tf.concat(values, axis=-1), "B, K, H, W, C")
return self._sg.reshape(concat_inputs, "B*K, H, W, C")
def prepare_flat_inputs(self, hidden, inputs):
values = [self._sg.guard(hidden, "B*K, H1")]
for name, val in sorted(inputs.items(), key=lambda it: it[0]):
self._sg.guard(val, "B, K, _")
val_flat = tf.reshape(val, self._sg["B*K"] + [-1])
values.append(val_flat)
return tf.concat(values, axis=-1)
class ResHead(snt.AbstractModule):
"""Updates Zp using a residual mechanism."""
def __init__(self, name="residual_head"):
super().__init__(name=name)
def _build(self, zp_old, inputs):
sg = shapeguard.ShapeGuard()
sg.guard(zp_old, "B, K, Zp")
sg.guard(inputs, "B, K, H")
update = snt.Linear(sg.Zp)
flat_zp = sg.reshape(zp_old, "B*K, Zp")
flat_inputs = sg.reshape(inputs, "B*K, H")
zp = flat_zp + update(flat_inputs)
return sg.reshape(zp, "B, K, Zp")
class PredictorCorrectorHead(snt.AbstractModule):
"""This refinement head is used for sequential data.
At every step it computes a prediction from the λ of the previous timestep
and an update from the refinement network of the current timestep.
The next step λ' is computed as a gated combination of both:
λ' = g * λ_corr + (1-g) * λ_pred
"""
def __init__(
self,
hidden_sizes=(64,),
pred_gate_bias=0.0,
corrector_gate_bias=0.0,
activation=tf.nn.elu,
name="predcorr_head",
):
super().__init__(name=name)
self._hidden_sizes = hidden_sizes
self._activation = utils.get_act_func(activation)
self._pred_gate_bias = pred_gate_bias
self._corrector_gate_bias = corrector_gate_bias
def _build(self, zp_old, inputs):
sg = shapeguard.ShapeGuard()
sg.guard(zp_old, "B, K, Zp")
sg.guard(inputs, "B, K, H")
update = snt.Linear(sg.Zp)
update_gate = snt.Linear(sg.Zp)
predict = snt.nets.MLP(
output_sizes=list(self._hidden_sizes) + [sg.Zp * 2],
activation=self._activation,
)
flat_zp = sg.reshape(zp_old, "B*K, Zp")
flat_inputs = sg.reshape(inputs, "B*K, H")
g = tf.nn.sigmoid(update_gate(flat_inputs) + self._corrector_gate_bias)
u = update(flat_inputs)
# a slightly more efficient way of computing the gated update
# (1-g) * flat_zp + g * u
zp_corrected = flat_zp + g * (u - flat_zp)
predicted = predict(flat_zp)
pred_up = predicted[:, :sg.Zp]
pred_gate = tf.nn.sigmoid(predicted[:, sg.Zp:] + self._pred_gate_bias)
zp = zp_corrected + pred_gate * (pred_up - zp_corrected)
return sg.reshape(zp, "B, K, Zp")
| deepmind-research-master | iodine/modules/refinement.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of sonnet modules that wrap useful distributions."""
# pylint: disable=missing-docstring, g-doc-args, g-short-docstring-punctuation
# pylint: disable=g-space-before-docstring-summary
# pylint: disable=g-no-space-after-docstring-summary
import collections
from iodine.modules.utils import get_act_func
from iodine.modules.utils import get_distribution
import shapeguard
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
FlatParameters = collections.namedtuple("ParameterOut", ["params"])
MixtureParameters = collections.namedtuple("MixtureOut", ["pixel", "mask"])
class DistributionModule(snt.AbstractModule):
"""Distribution Base class supporting shape inference & default priors."""
def __init__(self, name="distribution"):
super().__init__(name=name)
self._output_shape = None
def set_output_shape(self, shape):
self._output_shape = shape
@property
def output_shape(self):
return self._output_shape
@property
def input_shapes(self):
raise NotImplementedError()
def get_default_prior(self, batch_dim=(1,)):
return self(
tf.zeros(list(batch_dim) + self.input_shapes.params, dtype=tf.float32))
class BernoulliOutput(DistributionModule):
def __init__(self, name="bernoulli_output"):
super().__init__(name=name)
@property
def input_shapes(self):
return FlatParameters(self.output_shape)
def _build(self, params):
return tfd.Independent(
tfd.Bernoulli(logits=params, dtype=tf.float32),
reinterpreted_batch_ndims=1)
class LocScaleDistribution(DistributionModule):
"""Generic IID location / scale distribution.
Input parameters are concatenation of location and scale (2*Z,)
Args:
dist: Distribution or str Kind of distribution used. Supports Normal,
Logistic, Laplace, and StudentT distributions.
dist_kwargs: dict custom keyword arguments for the distribution
scale_act: function or str or None activation function to be applied to
the scale input
scale: str
different modes for computing the scale:
* stddev: scale is computed as scale_act(s)
* var: scale is computed as sqrt(scale_act(s))
* prec: scale is computed as 1./scale_act(s)
* fixed: scale is a global variable (same for all pixels) if
scale_val==-1. then it is a trainable variable initialized to 0.1
else it is fixed to scale_val (input shape is only (Z,) in this
case)
scale_val: float determines the scale value (only used if scale=='fixed').
loc_act: function or str or None activation function to be applied to the
location input. Supports optional activation functions for scale and
location.
Supports different "modes" for scaling:
* stddev:
"""
def __init__(
self,
dist=tfd.Normal,
dist_kwargs=None,
scale_act=tf.exp,
scale="stddev",
scale_val=1.0,
loc_act=None,
name="loc_scale_dist",
):
super().__init__(name=name)
self._scale_act = get_act_func(scale_act)
self._loc_act = get_act_func(loc_act)
# supports Normal, Logstic, Laplace, StudentT
self._dist = get_distribution(dist)
self._dist_kwargs = dist_kwargs or {}
assert scale in ["stddev", "var", "prec", "fixed"], scale
self._scale = scale
self._scale_val = scale_val
@property
def input_shapes(self):
if self._scale == "fixed":
param_shape = self.output_shape
else:
param_shape = self.output_shape[:-1] + [self.output_shape[-1] * 2]
return FlatParameters(param_shape)
def _build(self, params):
if self._scale == "fixed":
loc = params
scale = None # set later
else:
n_channels = params.get_shape().as_list()[-1]
assert n_channels % 2 == 0
assert n_channels // 2 == self.output_shape[-1]
loc = params[..., :n_channels // 2]
scale = params[..., n_channels // 2:]
# apply activation functions
if self._scale != "fixed":
scale = self._scale_act(scale)
loc = self._loc_act(loc)
# apply the correct parametrization
if self._scale == "var":
scale = tf.sqrt(scale)
elif self._scale == "prec":
scale = tf.reciprocal(scale)
elif self._scale == "fixed":
if self._scale_val == -1.0:
scale_val = tf.get_variable(
"scale", initializer=tf.constant(0.1, dtype=tf.float32))
else:
scale_val = self._scale_val
scale = tf.ones_like(loc) * scale_val
# else 'stddev'
dist = self._dist(loc=loc, scale=scale, **self._dist_kwargs)
return tfd.Independent(dist, reinterpreted_batch_ndims=1)
class MaskedMixture(DistributionModule):
def __init__(
self,
num_components,
component_dist,
mask_activation=None,
name="masked_mixture",
):
"""
Spatial Mixture Model composed of a categorical masking distribution and
a custom pixel-wise component distribution (usually logistic or
gaussian).
Args:
num_components: int Number of mixture components >= 2
component_dist: the distribution to use for the individual components
mask_activation: str or function or None activation function that
should be applied to the mask before the softmax.
name: str
"""
super().__init__(name=name)
self._num_components = num_components
self._dist = component_dist
self._mask_activation = get_act_func(mask_activation)
def set_output_shape(self, shape):
super().set_output_shape(shape)
self._dist.set_output_shape(shape)
def _build(self, pixel, mask):
sg = shapeguard.ShapeGuard()
# MASKING
sg.guard(mask, "B, K, H, W, 1")
mask = tf.transpose(mask, perm=[0, 2, 3, 4, 1])
mask = sg.reshape(mask, "B, H, W, K")
mask = self._mask_activation(mask)
mask = mask[:, tf.newaxis] # add K=1 axis since K is removed by mixture
mix_dist = tfd.Categorical(logits=mask)
# COMPONENTS
sg.guard(pixel, "B, K, H, W, Cp")
params = tf.transpose(pixel, perm=[0, 2, 3, 1, 4])
params = params[:, tf.newaxis] # add K=1 axis since K is removed by mixture
dist = self._dist(params)
return tfd.MixtureSameFamily(
mixture_distribution=mix_dist, components_distribution=dist)
@property
def input_shapes(self):
pixel = [self._num_components] + self._dist.input_shapes.params
mask = pixel[:-1] + [1]
return MixtureParameters(pixel, mask)
def get_default_prior(self, batch_dim=(1,)):
pixel = tf.zeros(
list(batch_dim) + self.input_shapes.pixel, dtype=tf.float32)
mask = tf.zeros(list(batch_dim) + self.input_shapes.mask, dtype=tf.float32)
return self(pixel, mask)
| deepmind-research-master | iodine/modules/distributions.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network modules."""
# pylint: disable=g-multiple-import, g-doc-args, g-short-docstring-punctuation
# pylint: disable=g-no-space-after-docstring-summary
from iodine.modules.distributions import FlatParameters
from iodine.modules.utils import flatten_all_but_last, get_act_func
import numpy as np
import shapeguard
import sonnet as snt
import tensorflow.compat.v1 as tf
class CNN(snt.AbstractModule):
"""ConvNet2D followed by an MLP.
This is a typical encoder architecture for VAEs, and has been found to work
well. One small improvement is to append coordinate channels on the input,
though for most datasets the improvement obtained is negligible.
"""
def __init__(self, cnn_opt, mlp_opt, mode="flatten", name="cnn"):
"""Constructor.
Args:
cnn_opt: Dictionary. Kwargs for the cnn. See vae_lib.ConvNet2D for
details.
mlp_opt: Dictionary. Kwargs for the mlp. See vae_lib.MLP for details.
name: String. Optional name.
"""
super().__init__(name=name)
if "activation" in cnn_opt:
cnn_opt["activation"] = get_act_func(cnn_opt["activation"])
self._cnn_opt = cnn_opt
if "activation" in mlp_opt:
mlp_opt["activation"] = get_act_func(mlp_opt["activation"])
self._mlp_opt = mlp_opt
self._mode = mode
def set_output_shapes(self, shape):
# assert self._mlp_opt['output_sizes'][-1] is None, self._mlp_opt
sg = shapeguard.ShapeGuard()
sg.guard(shape, "1, Y")
self._mlp_opt["output_sizes"][-1] = sg.Y
def _build(self, image):
"""Connect model to TensorFlow graph."""
assert self._mlp_opt["output_sizes"][-1] is not None, "set output_shapes"
sg = shapeguard.ShapeGuard()
flat_image, unflatten = flatten_all_but_last(image, n_dims=3)
sg.guard(flat_image, "B, H, W, C")
cnn = snt.nets.ConvNet2D(
activate_final=True,
paddings=("SAME",),
normalize_final=False,
**self._cnn_opt)
mlp = snt.nets.MLP(**self._mlp_opt)
# run CNN
net = cnn(flat_image)
if self._mode == "flatten":
# flatten
net_shape = net.get_shape().as_list()
flat_shape = net_shape[:-3] + [np.prod(net_shape[-3:])]
net = tf.reshape(net, flat_shape)
elif self._mode == "avg_pool":
net = tf.reduce_mean(net, axis=[1, 2])
else:
raise KeyError('Unknown mode "{}"'.format(self._mode))
# run MLP
output = sg.guard(mlp(net), "B, Y")
return FlatParameters(unflatten(output))
class MLP(snt.AbstractModule):
"""MLP."""
def __init__(self, name="mlp", **mlp_opt):
super().__init__(name=name)
if "activation" in mlp_opt:
mlp_opt["activation"] = get_act_func(mlp_opt["activation"])
self._mlp_opt = mlp_opt
assert mlp_opt["output_sizes"][-1] is None, mlp_opt
def set_output_shapes(self, shape):
sg = shapeguard.ShapeGuard()
sg.guard(shape, "1, Y")
self._mlp_opt["output_sizes"][-1] = sg.Y
def _build(self, data):
"""Connect model to TensorFlow graph."""
assert self._mlp_opt["output_sizes"][-1] is not None, "set output_shapes"
sg = shapeguard.ShapeGuard()
flat_data, unflatten = flatten_all_but_last(data)
sg.guard(flat_data, "B, N")
mlp = snt.nets.MLP(**self._mlp_opt)
# run MLP
output = sg.guard(mlp(flat_data), "B, Y")
return FlatParameters(unflatten(output))
class DeConv(snt.AbstractModule):
"""MLP followed by Deconv net.
This decoder is commonly used by vanilla VAE models. However, in practice
BroadcastConv (see below) seems to disentangle slightly better.
"""
def __init__(self, mlp_opt, cnn_opt, name="deconv"):
"""Constructor.
Args:
mlp_opt: Dictionary. Kwargs for vae_lib.MLP.
cnn_opt: Dictionary. Kwargs for vae_lib.ConvNet2D for the CNN.
name: Optional name.
"""
super().__init__(name=name)
assert cnn_opt["output_channels"][-1] is None, cnn_opt
if "activation" in cnn_opt:
cnn_opt["activation"] = get_act_func(cnn_opt["activation"])
self._cnn_opt = cnn_opt
if mlp_opt and "activation" in mlp_opt:
mlp_opt["activation"] = get_act_func(mlp_opt["activation"])
self._mlp_opt = mlp_opt
self._target_out_shape = None
def set_output_shapes(self, shape):
self._target_out_shape = shape
self._cnn_opt["output_channels"][-1] = self._target_out_shape[-1]
def _build(self, z):
"""Connect model to TensorFlow graph."""
sg = shapeguard.ShapeGuard()
flat_z, unflatten = flatten_all_but_last(z)
sg.guard(flat_z, "B, Z")
sg.guard(self._target_out_shape, "H, W, C")
mlp = snt.nets.MLP(**self._mlp_opt)
cnn = snt.nets.ConvNet2DTranspose(
paddings=("SAME",), normalize_final=False, **self._cnn_opt)
net = mlp(flat_z)
output = sg.guard(cnn(net), "B, H, W, C")
return FlatParameters(unflatten(output))
class BroadcastConv(snt.AbstractModule):
"""MLP followed by a broadcast convolution.
This decoder takes a latent vector z, (optionally) applies an MLP to it,
then tiles the resulting vector across space to have dimension [B, H, W, C]
i.e. tiles across H and W. Then coordinate channels are appended and a
convolutional layer is applied.
"""
def __init__(
self,
cnn_opt,
mlp_opt=None,
coord_type="linear",
coord_freqs=3,
name="broadcast_conv",
):
"""Args:
cnn_opt: dict Kwargs for vae_lib.ConvNet2D for the CNN.
mlp_opt: None or dict If dictionary, then kwargs for snt.nets.MLP. If
None, then the model will not process the latent vector by an mlp.
coord_type: ["linear", "cos", None] type of coordinate channels to
add.
None: add no coordinate channels.
linear: two channels with values linearly spaced from -1. to 1. in
the H and W dimension respectively.
cos: coord_freqs^2 many channels containing cosine basis functions.
coord_freqs: int number of frequencies used to construct the cosine
basis functions (only for coord_type=="cos")
name: Optional name.
"""
super().__init__(name=name)
assert cnn_opt["output_channels"][-1] is None, cnn_opt
if "activation" in cnn_opt:
cnn_opt["activation"] = get_act_func(cnn_opt["activation"])
self._cnn_opt = cnn_opt
if mlp_opt and "activation" in mlp_opt:
mlp_opt["activation"] = get_act_func(mlp_opt["activation"])
self._mlp_opt = mlp_opt
self._target_out_shape = None
self._coord_type = coord_type
self._coord_freqs = coord_freqs
def set_output_shapes(self, shape):
self._target_out_shape = shape
self._cnn_opt["output_channels"][-1] = self._target_out_shape[-1]
def _build(self, z):
"""Connect model to TensorFlow graph."""
assert self._target_out_shape is not None, "Call set_output_shape"
# reshape components into batch dimension before processing them
sg = shapeguard.ShapeGuard()
flat_z, unflatten = flatten_all_but_last(z)
sg.guard(flat_z, "B, Z")
sg.guard(self._target_out_shape, "H, W, C")
if self._mlp_opt is None:
mlp = tf.identity
else:
mlp = snt.nets.MLP(activate_final=True, **self._mlp_opt)
mlp_output = sg.guard(mlp(flat_z), "B, hidden")
# tile MLP output spatially and append coordinate channels
broadcast_mlp_output = tf.tile(
mlp_output[:, tf.newaxis, tf.newaxis],
multiples=tf.constant(sg["1, H, W, 1"]),
) # B, H, W, Z
dec_cnn_inputs = self.append_coordinate_channels(broadcast_mlp_output)
cnn = snt.nets.ConvNet2D(
paddings=("SAME",), normalize_final=False, **self._cnn_opt)
cnn_outputs = cnn(dec_cnn_inputs)
sg.guard(cnn_outputs, "B, H, W, C")
return FlatParameters(unflatten(cnn_outputs))
def append_coordinate_channels(self, output):
sg = shapeguard.ShapeGuard()
sg.guard(output, "B, H, W, C")
if self._coord_type is None:
return output
if self._coord_type == "linear":
w_coords = tf.linspace(-1.0, 1.0, sg.W)[None, None, :, None]
h_coords = tf.linspace(-1.0, 1.0, sg.H)[None, :, None, None]
w_coords = tf.tile(w_coords, sg["B, H, 1, 1"])
h_coords = tf.tile(h_coords, sg["B, 1, W, 1"])
return tf.concat([output, h_coords, w_coords], axis=-1)
elif self._coord_type == "cos":
freqs = sg.guard(tf.range(0.0, self._coord_freqs), "F")
valx = tf.linspace(0.0, np.pi, sg.W)[None, None, :, None, None]
valy = tf.linspace(0.0, np.pi, sg.H)[None, :, None, None, None]
x_basis = tf.cos(valx * freqs[None, None, None, :, None])
y_basis = tf.cos(valy * freqs[None, None, None, None, :])
xy_basis = tf.reshape(x_basis * y_basis, sg["1, H, W, F*F"])
coords = tf.tile(xy_basis, sg["B, 1, 1, 1"])[..., 1:]
return tf.concat([output, coords], axis=-1)
else:
raise KeyError('Unknown coord_type: "{}"'.format(self._coord_type))
class LSTM(snt.RNNCore):
"""Wrapper around snt.LSTM that supports multi-layers and runs K components in
parallel.
Expects input data of shape (B, K, H) and outputs data of shape (B, K, Y)
"""
def __init__(self, hidden_sizes, name="lstm"):
super().__init__(name=name)
self._hidden_sizes = hidden_sizes
with self._enter_variable_scope():
self._lstm_layers = [snt.LSTM(hidden_size=h) for h in self._hidden_sizes]
def initial_state(self, batch_size, **kwargs):
return [
lstm.initial_state(batch_size, **kwargs) for lstm in self._lstm_layers
]
def _build(self, data, prev_states):
assert not self._hidden_sizes or self._hidden_sizes[-1] is not None
assert len(prev_states) == len(self._hidden_sizes)
sg = shapeguard.ShapeGuard()
sg.guard(data, "B, K, H")
data = sg.reshape(data, "B*K, H")
out = data
new_states = []
for lstm, pstate in zip(self._lstm_layers, prev_states):
out, nstate = lstm(out, pstate)
new_states.append(nstate)
sg.guard(out, "B*K, Y")
out = sg.reshape(out, "B, K, Y")
return out, new_states
| deepmind-research-master | iodine/modules/networks.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for IODINE."""
# pylint: disable=g-doc-bad-indent, g-doc-return-or-yield, g-doc-args
# pylint: disable=missing-docstring
import importlib
import math
from absl import logging
from matplotlib.colors import hsv_to_rgb
import numpy as np
import shapeguard
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
ACT_FUNCS = {
"identity": tf.identity,
"sigmoid": tf.nn.sigmoid,
"tanh": tf.nn.tanh,
"relu": tf.nn.relu,
"elu": tf.nn.elu,
"selu": tf.nn.selu,
"softplus": tf.nn.softplus,
"exp": tf.exp,
"softmax": tf.nn.softmax,
}
def get_act_func(name_or_func):
if name_or_func is None:
return tf.identity
if callable(name_or_func):
return name_or_func
elif isinstance(name_or_func, str):
return ACT_FUNCS[name_or_func.lower()]
else:
raise KeyError(
'Unknown activation function "{}" of type {}"'.format(
name_or_func, type(name_or_func)
)
)
DISTS = {
"normal": tfd.Normal,
"log_normal": tfd.LogNormal,
"laplace": tfd.Laplace,
"logistic": tfd.Logistic,
}
def get_distribution(name_or_dist):
if isinstance(name_or_dist, type(tfd.Normal)):
return name_or_dist
elif isinstance(name_or_dist, str):
return DISTS[name_or_dist.lower()]
raise KeyError(
'Unknown distribution "{}" of type {}"'.format(name_or_dist,
type(name_or_dist)))
def get_mask_plot_colors(nr_colors):
"""Get nr_colors uniformly spaced hues to plot mask values."""
hsv_colors = np.ones((nr_colors, 3), dtype=np.float32)
hsv_colors[:, 0] = np.linspace(0, 1, nr_colors, endpoint=False)
color_conv = hsv_to_rgb(hsv_colors)
return color_conv
def color_transform(masks):
with tf.name_scope("color_transform"):
n_components = masks.shape.as_list()[-1]
colors = tf.constant(get_mask_plot_colors(n_components), name="mask_colors")
return tf.tensordot(masks, colors, axes=1)
def construct_diagnostic_image(
images,
recons,
masks,
components,
border_width=2,
nr_images=6,
clip=True,
mask_components=False,
):
"""Construct a single image containing image, recons., mask, and components.
Args:
images: (B, H, W, C)
recons: (B, H, W, C)
masks: (B, H, W, K)
components: (B, H, W, K, C)
border_width: int. width of the border in pixels. (default=2)
nr_images: int. Number of images to include. (default=6)
clip: bool. Whether to clip the final image to range [0, 1].
Returns:
diag_images: (nr, H+border_width*2, (W+border_width*2) * (K+3), 3)
"""
with tf.name_scope("diagnostic_image"):
# transform the masks into RGB images
recolored_masks = color_transform(masks[:nr_images])
if images.get_shape().as_list()[-1] == 1:
# deal with grayscale images
images = tf.tile(images[:nr_images], [1, 1, 1, 3])
recons = tf.tile(recons[:nr_images], [1, 1, 1, 3])
components = tf.tile(components[:nr_images], [1, 1, 1, 1, 3])
if mask_components:
components *= masks[:nr_images, ..., tf.newaxis]
# Pad everything
no_pad, pad = (0, 0), (border_width, border_width)
paddings = tf.constant([no_pad, pad, pad, no_pad])
paddings_components = tf.constant([no_pad, pad, pad, no_pad, no_pad])
pad_images = tf.pad(images[:nr_images], paddings, constant_values=0.5)
pad_recons = tf.pad(recons[:nr_images], paddings, constant_values=0.5)
pad_masks = tf.pad(recolored_masks, paddings, constant_values=1.0)
pad_components = tf.pad(
components[:nr_images], paddings_components, constant_values=0.5
)
# reshape components into single wide image
pad_components = tf.transpose(pad_components, [0, 1, 3, 2, 4])
pc_shape = pad_components.shape.as_list()
pc_shape[2] = pc_shape[2] * pc_shape.pop(3)
pad_components = tf.reshape(pad_components, pc_shape)
# concatenate all parts along width
diag_imgs = tf.concat(
[pad_images, pad_recons, pad_masks, pad_components], axis=2
)
# concatenate all images along height
diag_shape = diag_imgs.shape.as_list()
final_img = tf.reshape(diag_imgs, [1, -1, diag_shape[2], diag_shape[3]])
if clip:
final_img = tf.clip_by_value(final_img, 0.0, 1.0)
return final_img
def construct_reconstr_image(images, recons, border_width=2,
nr_images=6, clip=True):
"""Construct a single image containing image, and recons.
Args:
images: (B, H, W, C)
recons: (B, H, W, C)
border_width: int. width of the border in pixels. (default=2)
nr_images: int. Number of images to include. (default=6)
clip: bool. Whether to clip the final image to range [0, 1].
Returns:
rec_images: (nr, H+border_width*2, (W+border_width*2) * 2, 3)
"""
with tf.name_scope("diagnostic_image"):
# Pad everything
no_pad, pad = (0, 0), (border_width, border_width)
paddings = tf.constant([no_pad, pad, pad, no_pad])
pad_images = tf.pad(images[:nr_images], paddings, constant_values=0.5)
pad_recons = tf.pad(recons[:nr_images], paddings, constant_values=0.5)
# concatenate all parts along width
diag_imgs = tf.concat([pad_images, pad_recons], axis=2)
# concatenate all images along height
diag_shape = diag_imgs.shape.as_list()
final_img = tf.reshape(diag_imgs, [1, -1, diag_shape[2], diag_shape[3]])
if clip:
final_img = tf.clip_by_value(final_img, 0.0, 1.0)
return final_img
def construct_iterations_image(
images, recons, masks, border_width=2, nr_seqs=2, clip=True
):
"""Construct a single image containing image, and recons.
Args:
images: (B, T, 1, H, W, C)
recons: (B, T, 1, H, W, C)
masks: (B, T, K, H, W, 1)
border_width: int. width of the border in pixels. (default=2)
nr_seqs: int. Number of sequences to include. (default=2)
clip: bool. Whether to clip the final image to range [0, 1].
Returns:
rec_images: (nr, H+border_width*2, (W+border_width*2) * 2, 3)
"""
sg = shapeguard.ShapeGuard()
sg.guard(recons, "B, T, 1, H, W, C")
if images.get_shape().as_list()[1] == 1:
images = tf.tile(images, sg["1, T, 1, 1, 1, 1"])
sg.guard(images, "B, T, 1, H, W, C")
sg.guard(masks, " B, T, K, H, W, 1")
if sg.C == 1: # deal with grayscale
images = tf.tile(images, [1, 1, 1, 1, 1, 3])
recons = tf.tile(recons, [1, 1, 1, 1, 1, 3])
sg.S = min(nr_seqs, sg.B)
with tf.name_scope("diagnostic_image"):
# convert masks to rgb
masks_trans = tf.transpose(masks[:nr_seqs], [0, 1, 5, 3, 4, 2])
recolored_masks = color_transform(masks_trans)
# Pad everything
no_pad, pad = (0, 0), (border_width, border_width)
paddings = tf.constant([no_pad, no_pad, no_pad, pad, pad, no_pad])
pad_images = tf.pad(images[:nr_seqs], paddings, constant_values=0.5)
pad_recons = tf.pad(recons[:nr_seqs], paddings, constant_values=0.5)
pad_masks = tf.pad(recolored_masks, paddings, constant_values=0.5)
# concatenate all parts along width
triples = tf.concat([pad_images, pad_recons, pad_masks], axis=3)
triples = sg.guard(triples[:, :, 0], "S, T, 3*Hp, Wp, 3")
# concatenate iterations along width and sequences along height
final = tf.reshape(
tf.transpose(triples, [0, 2, 1, 3, 4]), sg["1, S*3*Hp, Wp*T, 3"]
)
if clip:
final = tf.clip_by_value(final, 0.0, 1.0)
return final
def get_overview_image(image, output_dist, mask_components=False):
recons = output_dist.mean()[:, 0]
image = image[:, 0]
if hasattr(output_dist, "mixture_distribution") and hasattr(
output_dist, "components_distribution"
):
mask = output_dist.mixture_distribution.probs[:, 0]
components = output_dist.components_distribution.mean()[:, 0]
return construct_diagnostic_image(
image, recons, mask, components, mask_components=mask_components
)
else:
return construct_reconstr_image(image, recons)
class OnlineMeanVarEstimator(snt.AbstractModule):
"""Online estimator for mean and variance using Welford's algorithm."""
def __init__(self, axis=None, ddof=0.0, name="online_mean_var"):
super().__init__(name=name)
self._axis = axis
self._ddof = ddof
def _build(self, x, weights=None):
if weights is None:
weights = tf.ones_like(x)
if weights.get_shape().as_list() != x.get_shape().as_list():
weights = tf.broadcast_to(weights, x.get_shape().as_list())
sum_weights = tf.reduce_sum(weights, axis=self._axis)
shape = sum_weights.get_shape().as_list()
total = tf.get_variable(
"total",
shape=shape,
dtype=weights.dtype,
initializer=tf.zeros_initializer(),
trainable=False,
)
mean = tf.get_variable(
"mean",
shape=shape,
dtype=x.dtype,
initializer=tf.zeros_initializer(),
trainable=False,
)
m2 = tf.get_variable(
"M2",
shape=shape,
dtype=x.dtype,
initializer=tf.zeros_initializer(),
trainable=False,
)
total_update = tf.assign_add(total, sum_weights)
with tf.control_dependencies([total_update]):
delta = (x - mean) * weights
mean_update = tf.assign_add(
mean, tf.reduce_sum(delta, axis=self._axis) / total
)
with tf.control_dependencies([mean_update]):
delta2 = x - mean
m2_update = tf.assign_add(
m2, tf.reduce_sum(delta * delta2, axis=self._axis)
)
with tf.control_dependencies([m2_update]):
return tf.identity(mean), m2 / (total - self._ddof), tf.identity(total)
def print_shapes(name, value, indent=""):
if isinstance(value, dict):
print("{}{}:".format(indent, name))
for k, v in sorted(value.items(),
key=lambda x: (isinstance(x[1], dict), x[0])):
print_shapes(k, v, indent + " ")
elif isinstance(value, list):
print(
"{}{}[{}]: {} @ {}".format(
indent, name, len(value), value[0].shape, value[0].dtype
)
)
elif isinstance(value, np.ndarray):
print("{}{}: {} @ {}".format(indent, name, value.shape, value.dtype))
elif isinstance(value, tf.Tensor):
print(
"{}{}: {} @ {}".format(
indent, name, value.get_shape().as_list(), value.dtype
)
)
elif np.isscalar(value):
print("{}{}: {}".format(indent, name, value))
else:
print("{}{}.type: {}".format(indent, name, type(value)))
def _pad_images(images, image_border_value=0.5, border_width=2):
"""Pad images to create gray borders.
Args:
images: Tensor of shape [B, H], [B, H, W], or [B, H, W, C].
image_border_value: Scalar value of greyscale borderfor images.
border_width: Int. Border width in pixels.
Raises:
ValueError: if the image provided is not {2,3,4} dimensional.
Returns:
Tensor of same shape as images, except H and W being H + border_width and
W + border_width.
"""
image_rank = len(images.get_shape())
border_paddings = (border_width, border_width)
if image_rank == 2: # [B, H]
paddings = [(0, 0), border_paddings]
elif image_rank == 3: # [B, H, W]
paddings = [(0, 0), border_paddings, border_paddings]
elif image_rank == 4: # [B, H, W, C]
paddings = [(0, 0), border_paddings, border_paddings, (0, 0)]
else:
raise ValueError("expected image to be 2D, 3D or 4D, got %d" % image_rank)
paddings = tf.constant(paddings)
return tf.pad(images, paddings, "CONSTANT",
constant_values=image_border_value)
def images_to_grid(
images,
grid_height=None,
grid_width=4,
max_grid_height=4,
max_grid_width=4,
image_border_value=0.5,
):
"""Combine images and arrange them in a grid.
Args:
images: Tensor of shape [B, H], [B, H, W], or [B, H, W, C].
grid_height: Height of the grid of images to output, or None. Either
`grid_width` or `grid_height` must be set to an integer value.
If None, `grid_height` is set to ceil(B/`grid_width`), and capped at
`max_grid_height` when provided.
grid_width: Width of the grid of images to output, or None. Either
`grid_width` or `grid_height` must be set to an integer value.
If None, `grid_width` is set to ceil(B/`grid_height`), and capped at
`max_grid_width` when provided.
max_grid_height: Maximum allowable height of the grid of images to
output or None. Only used when `grid_height` is None.
max_grid_width: Maximum allowable width of the grid of images to output,
or None. Only used when `grid_width` is None.
image_border_value: None or scalar value of greyscale borderfor images.
If None, then no border is rendered.
Raises:
ValueError: if neither of grid_width or grid_height are set to a positive
integer.
Returns:
images: Tensor of shape [height*H, width*W, C].
C will be set to 1 if the input was provided with no channels.
Contains all input images in a grid.
"""
# If only one dimension is set, infer how big the other one should be.
if grid_height is None:
if not isinstance(grid_width, int) or grid_width <= 0:
raise ValueError(
"if `grid_height` is None, `grid_width` must be " "a positive integer"
)
grid_height = int(math.ceil(images.get_shape()[0].value / grid_width))
if max_grid_height is not None:
grid_height = min(max_grid_height, grid_height)
if grid_width is None:
if not isinstance(grid_height, int) or grid_height <= 0:
raise ValueError(
"if `grid_width` is None, `grid_height` must be " "a positive integer"
)
grid_width = int(math.ceil(images.get_shape()[0].value / grid_height))
if max_grid_width is not None:
grid_width = min(max_grid_width, grid_width)
images = images[: grid_height * grid_width, ...]
# Pad with extra blank frames if grid_height x grid_width is less than the
# number of frames provided.
pre_images_shape = images.get_shape().as_list()
if pre_images_shape[0] < grid_height * grid_width:
pre_images_shape[0] = grid_height * grid_width - pre_images_shape[0]
if image_border_value is not None:
dummy_frames = image_border_value * tf.ones(
shape=pre_images_shape, dtype=images.dtype
)
else:
dummy_frames = tf.zeros(shape=pre_images_shape, dtype=images.dtype)
images = tf.concat([images, dummy_frames], axis=0)
if image_border_value:
images = _pad_images(images, image_border_value=image_border_value)
images_shape = images.get_shape().as_list()
images = tf.reshape(images, [grid_height, grid_width] + images_shape[1:])
if len(images_shape) == 2:
images = tf.expand_dims(images, -1)
if len(images_shape) <= 3:
images = tf.expand_dims(images, -1)
image_height, image_width, channels = images.get_shape().as_list()[2:]
images = tf.transpose(images, perm=[0, 2, 1, 3, 4])
images = tf.reshape(
images, [grid_height * image_height, grid_width * image_width, channels]
)
return images
def flatten_all_but_last(tensor, n_dims=1):
shape = tensor.shape.as_list()
batch_dims = shape[:-n_dims]
flat_tensor = tf.reshape(tensor, [np.prod(batch_dims)] + shape[-n_dims:])
def unflatten(other_tensor):
other_shape = other_tensor.shape.as_list()
return tf.reshape(other_tensor, batch_dims + other_shape[1:])
return flat_tensor, unflatten
def ensure_3d(tensor):
if tensor.shape.ndims == 2:
return tensor[..., None]
assert tensor.shape.ndims == 3
return tensor
built_element_cache = {
"none": None,
"global_step": tf.train.get_or_create_global_step(),
}
def build(plan, identifier):
logging.debug("building %s", identifier)
if identifier in built_element_cache:
logging.debug("%s is already built, returning", identifier)
return built_element_cache[identifier]
elif not isinstance(plan, dict):
return plan
elif "constructor" in plan:
ctor = _resolve_constructor(plan)
kwargs = {
k: build(v, identifier=k) for k, v in plan.items() if k != "constructor"
}
with tf.variable_scope(identifier):
built_element_cache[identifier] = ctor(**kwargs)
return built_element_cache[identifier]
else:
return {k: build(v, identifier=k) for k, v in plan.items()}
def _resolve_constructor(plan_subsection):
assert "constructor" in plan_subsection, plan_subsection
if isinstance(plan_subsection["constructor"], str):
module, _, ctor = plan_subsection["constructor"].rpartition(".")
mod = importlib.import_module(module)
return getattr(mod, ctor)
else:
return plan_subsection["constructor"]
| deepmind-research-master | iodine/modules/utils.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stochastic Variational inference Auto-Encoder."""
# pylint: disable=unused-variable, g-bad-todo
import collections
from iodine.modules import utils
from multi_object_datasets.segmentation_metrics import adjusted_rand_index
import numpy as np
import shapeguard
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
logging = tf.logging
DEFAULT_INPUTS = (
"image",
"zp",
"mask",
"components",
"dmask",
"dzp",
"dcomponents",
"posterior",
"log_prob",
"pred_mask",
"capacity",
"flat_capacity",
"coordinates",
"counterfactual",
)
DEFAULT_PREPROCESSING = [
"dcomponents", "dmask", "dzp", "log_prob", "counterfactual"
]
DEFAULT_STOP_GRADIENT = ("dzp", "dmask", "dcomponents", "log_prob",
"counterfactual")
class IODINE(snt.AbstractModule):
"""Iterative Amortized Variational Autoencoder.
Args:
decoder (decoders.ComponentDecoder): The decoder.
refinement_core (refinement.RefinementCore): The recurrent (refinement)
encoder.
latent_dist (distributions.Distribution): The distribution of the latent
z variables.
output_dist (distributions.MaskedMixture): The pixel-wise output
distribution (a spatial mixture).
n_z (int): Dimensionality of the per-object latents z_k.
num_components (int): Number of available object slots (K).
num_iters (int): Number of refinement iterations.
sequential (bool): Whether the input data is sequential.
factor_evaluator (factor_eval.FactorRegressor): The factor evaluation
model that is trained to predict the true factors from the inferred
latents.
stop_gradients (List[str]): For which refinement inputs to stop
gradients from backpropagating through the iterations. (see inputs for
valid values)
Default is: ["dcomponents", "dmask", "dzp", "log_prob",
"counterfactual"]
iter_loss_weight ("linspace" | float | List[float]): How to weigh the
loss terms for each timestep.
Can be:
"linspace": Linearly increasing weights from 0 to 1.0.
float: A fixed value for all steps.
List[float]: Manually specify all weight.
inputs (List[str]): list of inputs to use for the refinement network.
Can include the following (default is to use all): ["image", "zp",
"mask", "components", "dmask", "dzp", "dcomponents", "posterior",
"log_prob", "pred_mask", "capacity", "flat_capacity",
"coordinates", "counterfactual"]
preprocess (List[str]): Specifies the subset of inputs that be
preprocessed with layernorm.
Default is: ["dcomponents", "dmask", "dzp", "log_prob",
"counterfactual"]
coord_type (str): Type of coordinate channels to append to the
refinement inputs. Can be "linear" (default) or "cos".
coord_freqs (int): If using cos based coordinate channels, then this
specifies the number of frequencies used.
name (str): Name of the module (within the tensorflow graph).
"""
def __init__(
self,
decoder,
refinement_core,
latent_dist,
output_dist,
n_z,
num_components,
num_iters,
sequential=False,
factor_evaluator=None,
stop_gradients=DEFAULT_STOP_GRADIENT,
iter_loss_weight="linspace",
inputs=DEFAULT_INPUTS,
preprocess=None,
coord_type="linear",
coord_freqs=3,
name="iodine",
):
super().__init__(name=name)
self._sg = shapeguard.ShapeGuard(dims={"K": num_components})
self.decoder = decoder
self.refinement_core = refinement_core
self.latent_dist = latent_dist
self.output_dist = output_dist
self.n_z = n_z
self.num_components = num_components
self.num_iters = num_iters
self.sequential = sequential
self.iter_loss_weights = self._parse_iter_loss_weights(iter_loss_weight)
self.factor_evaluator = factor_evaluator
self.stop_gradients = stop_gradients
self.inputs = inputs
self.preprocess = DEFAULT_PREPROCESSING if preprocess is None else preprocess
self.coord_type = coord_type
self.coord_freqs = coord_freqs
with self._enter_variable_scope():
self.latent_dist.set_output_shape([self.n_z])
logging.info("VAE: z shape: %s", [self.n_z])
with tf.name_scope("prior"):
self.prior = self.latent_dist.get_default_prior((self.num_components,))
self._sg.guard(self.prior, "K, Z")
with tf.variable_scope("preprocess"):
self._layernorms = {
name: snt.LayerNorm(name="layer_norm_" + name)
for name in self.preprocess
}
def _build(self, data):
data["image"] = data["image"][:, :self.num_iters + 1]
if "mask" in data:
data["mask"] = data["mask"][:, :self.num_iters + 1]
x = self._sg.guard(data["image"], "B, T, H, W, C")
self._propagate_shape_info(x.get_shape().as_list())
# run iterative encoder
iterations = self.encode(x)
z_dist = self._sg.guard(iterations["z_dist"][-1], "B, K, Z")
z = self._sg.guard(iterations["z"][-1], "B, K, Z")
# decode
x_params, x_dist = self.decode(z)
iterations["x_dist"].append(self._sg.guard(x_dist, "B, 1, H, W, C"))
# compute loss
kl = self._sg.guard(self._raw_kl(z_dist), "B, K")
img = self._get_image_for_iter(x, self.num_iters)
re = self._sg.guard(self._reconstruction_error(x_dist, img), "B")
iterations["kl"].append(kl)
iterations["re"].append(re)
iterations["recons_loss"] = [tf.reduce_mean(re) for re in iterations["re"]]
total_rec_loss = sum([
w * re
for w, re in zip(self.iter_loss_weights, iterations["recons_loss"])
])
total_kl_loss = sum([
w * tf.reduce_mean(tf.reduce_sum(kl, axis=1))
for w, kl in zip(self.iter_loss_weights, iterations["kl"])
])
total_loss = total_rec_loss + total_kl_loss
scalars = {
"loss/kl":
sum([
tf.reduce_mean(tf.reduce_sum(kl, axis=1))
for kl in iterations["kl"]
]),
"loss/recons":
total_rec_loss,
}
if self.factor_evaluator:
pred_mask = self._sg.guard(x_dist.mixture_distribution.probs,
"B, 1, H, W, K")
pred_mask = tf.transpose(pred_mask, [0, 4, 2, 3, 1])
mask_true = self._sg.guard(data["mask"], "B, T, L, H, W, 1")
mask_true = self._get_image_for_iter(mask_true, self.num_iters)
mask_true = mask_true[:, 0]
factor_loss, factor_scalars, _, _, _ = self.factor_evaluator(
tf.stop_gradient(z),
data["factors"],
data["visibility"],
tf.stop_gradient(pred_mask),
mask_true,
)
total_loss += factor_loss
scalars["factor/loss"] = factor_loss
scalars.update({"factor/" + k: v for k, v in factor_scalars.items()})
scalars["loss/total"] = total_loss
scalars.update(self._get_monitored_scalars(x_dist, data))
logging.info(self._sg.dims)
return total_loss, scalars, iterations
@snt.reuse_variables
def encode(self, images):
sg = self._sg
sg.guard(images, "B, T, H, W, C")
zp, z_dist, z = self._get_initial_z()
iterations = {
"z": [z],
"zp": [zp],
"z_dist": [z_dist],
"x_dist": [],
"inputs": [],
"kl": [],
"re": [],
}
state = self.refinement_core.initial_state(sg["B*K"][0])
for t in range(self.num_iters):
img = sg.guard(self._get_image_for_iter(images, t), "B, 1, H, W, C")
x_params, x_dist = self.decode(z)
# compute loss
kl = self._sg.guard(self._raw_kl(z_dist), "B, K")
re = self._sg.guard(self._reconstruction_error(x_dist, img), "B")
loss = tf.reduce_mean(re) + tf.reduce_mean(tf.reduce_sum(kl, axis=1))
inputs = self._get_inputs_for(x_params, x_dist, img, z_dist, zp, loss)
zp, state = self.refinement_core(inputs, state)
sg.guard(zp, "B, K, Zp")
z_dist = sg.guard(self.latent_dist(zp), "B, K, Z")
z = z_dist.sample()
# append local variables to iteration collections
for v, name in zip(
[z, zp, z_dist, x_dist, inputs, kl, re],
["z", "zp", "z_dist", "x_dist", "inputs", "kl", "re"],
):
iterations[name].append(v)
return iterations
@snt.reuse_variables
def decode(self, z):
sg = shapeguard.ShapeGuard()
sg.guard(z, "B, K, Z")
# legacy
z = tf.concat([z, 5.0 * tf.ones(sg["B, K, 1"], dtype=tf.float32)], axis=2)
params = self.decoder(z)
out_dist = self.output_dist(*params)
return params, out_dist
@snt.reuse_variables
def eval(self, data):
total_loss, scalars, iterations = self._build(data)
sg = shapeguard.ShapeGuard()
def get_components(dist):
return tf.transpose(dist.components_distribution.mean()[:, 0, :, :, :, :],
[0, 3, 1, 2, 4])
def get_mask(dist):
return tf.transpose(dist.mixture_distribution.probs[:, :, :, :, :],
[0, 4, 2, 3, 1])
def get_mask_logits(dist):
return tf.transpose(dist.mixture_distribution.logits[:, :, :, :, :],
[0, 4, 2, 3, 1])
def stack_iters(list_of_variables, pad_zero=False):
if pad_zero:
list_of_variables.insert(0, tf.zeros_like(list_of_variables[0]))
return tf.stack(list_of_variables, axis=1)
# data
image = sg.guard(data["image"], "B, 1, H, W, C")
true_mask = sg.guard(data["mask"], "B, 1, L, H, W, 1")
visibility = sg.guard(data["visibility"], "B, L")
factors = data["factors"]
# inputs
inputs_flat = {
k: stack_iters([inp["flat"][k] for inp in iterations["inputs"]],
pad_zero=True)
for k in iterations["inputs"][0]["flat"].keys()
}
inputs_spatial = {
k: stack_iters([inp["spatial"][k] for inp in iterations["inputs"]],
pad_zero=True)
for k in iterations["inputs"][0]["spatial"].keys()
}
# latent
z = sg.guard(stack_iters(iterations["z"]), "B, T, K, Z")
z_mean = stack_iters([zd.mean() for zd in iterations["z_dist"]])
z_std = stack_iters([zd.stddev() for zd in iterations["z_dist"]])
# outputs
recons = stack_iters([xd.mean() for xd in iterations["x_dist"]])
pred_mask = stack_iters([get_mask(xd) for xd in iterations["x_dist"]])
pred_mask_logits = stack_iters(
[get_mask_logits(xd) for xd in iterations["x_dist"]])
components = stack_iters(
[get_components(xd) for xd in iterations["x_dist"]])
# metrics
tm = tf.transpose(true_mask[..., 0], [0, 1, 3, 4, 2])
tm = tf.reshape(tf.tile(tm, sg["1, T, 1, 1, 1"]), sg["B * T, H * W, L"])
pm = tf.transpose(pred_mask[..., 0], [0, 1, 3, 4, 2])
pm = tf.reshape(pm, sg["B * T, H * W, K"])
ari = tf.reshape(adjusted_rand_index(tm, pm), sg["B, T"])
ari_nobg = tf.reshape(adjusted_rand_index(tm[..., 1:], pm), sg["B, T"])
mse = tf.reduce_mean(tf.square(recons - image[:, None]), axis=[2, 3, 4, 5])
# losses
loss_recons = stack_iters(iterations["re"])
kl = stack_iters(iterations["kl"])
info = {
"data": {
"image": sg.guard(image, "B, 1, H, W, C"),
"true_mask": sg.guard(true_mask, "B, 1, L, H, W, 1"),
"visibility": sg.guard(visibility, "B, L"),
"factors": factors,
},
"inputs": {
"flat": inputs_flat,
"spatial": inputs_spatial
},
"latent": {
"z": sg.guard(z, "B, T, K, Z"),
"z_mean": sg.guard(z_mean, "B, T, K, Z"),
"z_std": sg.guard(z_std, "B, T, K, Z"),
},
"outputs": {
"recons": sg.guard(recons, "B, T, 1, H, W, C"),
"pred_mask": sg.guard(pred_mask, "B, T, K, H, W, 1"),
"pred_mask_logits": sg.guard(pred_mask_logits, "B, T, K, H, W, 1"),
"components": sg.guard(components, "B, T, K, H, W, C"),
},
"losses": {
"total": total_loss,
"recons": sg.guard(loss_recons, "B, T"),
"kl": sg.guard(kl, "B, T, K"),
},
"metrics": {
"ari": ari,
"ari_nobg": ari_nobg,
"mse": mse
},
}
if self.factor_evaluator:
# factor evaluation information
factor_info = {
"loss": [],
"metrics": collections.defaultdict(list),
"predictions": collections.defaultdict(list),
"assignment": [],
}
for t in range(z.get_shape().as_list()[1]):
floss, fscalars, _, fpred, fass = self.factor_evaluator(
z[:, t], factors, visibility, pred_mask[:, t], true_mask[:, 0])
factor_info["loss"].append(floss)
factor_info["assignment"].append(fass)
for k in fpred:
factor_info["predictions"][k].append(
tf.reduce_sum(fpred[k] * fass[..., None], axis=2))
factor_info["metrics"][k].append(fscalars[k])
info["losses"]["factor"] = sg.guard(tf.stack(factor_info["loss"]), "T")
info["factor_regressor"] = {
"assignment":
sg.guard(stack_iters(factor_info["assignment"]), "B, T, L, K"),
"metrics": {
k: tf.stack(factor_info["metrics"][k], axis=0)
for k in factor_info["metrics"]
},
"predictions": {
k: stack_iters(factor_info["predictions"][k])
for k in factor_info["predictions"]
},
}
return info
@snt.reuse_variables
def get_sample_images(self, nr_samples=16):
with tf.name_scope("prior_samples"):
prior_z = self.prior.sample(nr_samples)
_, prior_out = self.decode(prior_z)
prior_out = tf.clip_by_value(prior_out.mean(), 0.0, 1.0)
return utils.images_to_grid(prior_out[:, 0])[tf.newaxis]
@snt.reuse_variables
def get_overview_images(self, data, nr_images=4, mask_components=False):
x = data["image"][:nr_images, :self.num_iters + 1]
old_b, self._sg.B = self._sg.B, x.get_shape().as_list()[0]
iterations = self.encode(x)
z = iterations["z"][-1]
_, x_dist = self.decode(z)
self._sg.B = old_b
t = min(self.num_iters, x.get_shape().as_list()[1]) - 1
# iterations view
recons = tf.stack([x_dist.mean() for x_dist in iterations["x_dist"]],
axis=1)
masks = tf.stack(
[
tf.transpose(x_dist.mixture_distribution.probs, [0, 4, 2, 3, 1])
for x_dist in iterations["x_dist"]
],
axis=1,
)
return {
"overview":
utils.get_overview_image(
x[:, t:t + 1], x_dist, mask_components=mask_components),
"sequence":
utils.construct_iterations_image(x[:, :t + 1, tf.newaxis], recons,
masks),
"samples":
self.get_sample_images(),
}
def _get_initial_z(self):
# Initial z distribution
zp_init = tf.get_variable(
"initial_sample_distribution",
shape=self.latent_dist.input_shapes.params,
dtype=tf.float32,
)
zp = tf.tile(zp_init[tf.newaxis, tf.newaxis], self._sg["B, K, 1"])
z_dist = self.latent_dist(zp)
z = z_dist.sample()
self._sg.guard(zp, "B, K, Zp")
self._sg.guard(z_dist, "B, K, Z")
self._sg.guard(z, "B, K, Z")
return zp, z_dist, z
def _parse_iter_loss_weights(self, iter_loss_weight):
if iter_loss_weight == "linspace":
iter_weights = np.linspace(0.0, 1.0, self.num_iters + 1).tolist()
elif isinstance(iter_loss_weight, (float, int)):
iter_weights = [float(iter_loss_weight)] * (self.num_iters + 1)
elif isinstance(iter_loss_weight, (tuple, list)):
iter_weights = [float(w) for w in iter_loss_weight]
else:
raise ValueError("Unknown iter_loss_weight type {}.".format(
repr(iter_loss_weight)))
assert len(iter_weights) == (self.num_iters + 1), iter_loss_weight
return iter_weights
def _propagate_shape_info(self, image_shape):
image_shape = image_shape[-3:] # ignore batch dims
logging.info("VAE: image shape: %s", image_shape)
z_param_shape = self._sg.guard(self.latent_dist.input_shapes.params, "Zp")
logging.info("VAE: z parameter shape: %s", z_param_shape)
self.output_dist.set_output_shape(image_shape)
out_param_shapes = self.output_dist.input_shapes
logging.info("VAE: output parameter shapes: %s", out_param_shapes)
self.decoder.set_output_shapes(*out_param_shapes)
def _get_image_for_iter(self, images, t):
"""Return current frame or first image."""
if self.sequential:
return images[:, t:t + 1]
else:
return images[:, :1]
@staticmethod
def _get_mask_posterior(out_dist, img):
p_comp = out_dist.components_distribution.prob(img[..., tf.newaxis, :])
posterior = p_comp / (tf.reduce_sum(p_comp, axis=-1, keepdims=True) + 1e-6)
return tf.transpose(posterior, [0, 4, 2, 3, 1])
def _get_inputs_for(self, out_params, out_dist, img, z_dist, zp, loss):
sg = self._sg
# gradients of loss wrt z, components and mask
dzp, dxp, dmp = tf.gradients(loss, [zp, out_params.pixel, out_params.mask])
log_prob = sg.guard(
out_dist.log_prob(img)[..., tf.newaxis], "B, 1, H, W, 1")
counterfactual_log_probs = []
for k in range(0, self.num_components):
mask = tf.concat([out_params.mask[:, :k], out_params.mask[:, k + 1:]],
axis=1)
pixel = tf.concat([out_params.pixel[:, :k], out_params.pixel[:, k + 1:]],
axis=1)
out_dist_k = self.output_dist(pixel, mask)
log_prob_k = out_dist_k.log_prob(img)[..., tf.newaxis]
counterfactual_log_probs.append(log_prob_k)
counterfactual = log_prob - tf.concat(counterfactual_log_probs, axis=1)
pred_mask = tf.transpose(out_dist.mixture_distribution.probs,
[0, 4, 2, 3, 1])
potential_inputs = {
# spatial
"image":
sg.guard(img, "B, 1, H, W, C"),
"log_prob":
sg.guard(log_prob, "B, 1, H, W, 1"),
"mask":
sg.guard(out_params.mask, "B, K, H, W, 1"),
"pred_mask":
sg.guard(pred_mask, "B, K, H, W, 1"),
"components":
sg.guard(out_params.pixel, "B, K, H, W, Cp"),
"dmask":
sg.guard(dmp, "B, K, H, W, Mp"),
"dcomponents":
sg.guard(dxp, "B, K, H, W, Cp"),
"posterior":
sg.guard(self._get_mask_posterior(out_dist, img), "B, K, H, W, 1"),
"capacity":
0.5 *
tf.ones(sg["B, K, H, W, 1"], dtype=tf.float32), # TODO: legacy
"coordinates":
self._get_coord_channels(),
"counterfactual":
self._sg.guard(counterfactual, "B, K, H, W, 1"),
# flat
"zp":
sg.guard(zp, "B, K, Zp"),
"dzp":
sg.guard(dzp, "B, K, Zp"),
"flat_capacity":
0.5 * tf.ones(sg["B, K, 1"], dtype=tf.float32), # TODO: legacy
}
# collect used inputs, stop gradients and preprocess where needed
final_inputs = {"spatial": {}, "flat": {}}
for k, v in potential_inputs.items():
# skip unused inputs
if k not in self.inputs:
continue
# stop gradients
if k in self.stop_gradients:
v = tf.stop_gradient(v)
# preprocess
v = self._apply_preprocessing(k, v)
# sort into flat / spatial according to their shape
structure = "flat" if len(v.get_shape().as_list()) == 3 else "spatial"
final_inputs[structure][k] = v
return final_inputs
def _apply_preprocessing(self, name, val):
if name in self.preprocess:
if self._sg.matches(val, "B, K, _z"):
flat_val = tf.reshape(val, self._sg["B*K"] + [-1])
elif self._sg.matches(val, "B, 1, _z"):
flat_val = val[:, 0, :]
elif self._sg.matches(val, "B, K, H, W, _c"):
flat_val = tf.reshape(val, self._sg["B*K, H*W"] + [-1])
elif self._sg.matches(val, "B, 1, H, W, _c"):
flat_val = tf.reshape(val, self._sg["B, H*W"] + [-1])
else:
raise ValueError("Cannot handle shape {}".format(
val.get_shape().as_list()))
ln = self._layernorms[name]
norm_val = ln(flat_val)
return tf.reshape(norm_val, val.shape.as_list())
else:
return val
def _get_coord_channels(self):
if self.coord_type == "linear":
x_coords = tf.linspace(-1.0, 1.0, self._sg.W)[None, None, None, :, None]
y_coords = tf.linspace(-1.0, 1.0, self._sg.H)[None, None, :, None, None]
x_coords = tf.tile(x_coords, self._sg["B, 1, H, 1, 1"])
y_coords = tf.tile(y_coords, self._sg["B, 1, 1, W, 1"])
return tf.concat([x_coords, y_coords], axis=-1)
elif self.coord_type == "cos":
freqs = self._sg.guard(tf.range(0.0, self.coord_freqs), "F")
valx = tf.linspace(0.0, np.pi, self._sg.W)[None, None, None, :, None,
None]
valy = tf.linspace(0.0, np.pi, self._sg.H)[None, None, :, None, None,
None]
x_basis = tf.cos(valx * freqs[None, None, None, None, :, None])
y_basis = tf.cos(valy * freqs[None, None, None, None, None, :])
xy_basis = tf.reshape(x_basis * y_basis, self._sg["1, 1, H, W, F*F"])
coords = tf.tile(xy_basis, self._sg["B, 1, 1, 1, 1"])[..., 1:]
return coords
else:
raise KeyError('Unknown coord_type: "{}"'.format(self.coord_type))
def _raw_kl(self, z_dist):
return tfd.kl_divergence(z_dist, self.prior)
def _reconstruction_error(self, x_dist, img):
log_prob = self._sg.guard(x_dist.log_prob(img), "B, 1, H, W")
return -tf.reduce_sum(log_prob, axis=[1, 2, 3])
def _get_monitored_scalars(self, out_dist, data):
self._sg.guard(out_dist, "B, 1, H, W, C")
img = self._get_image_for_iter(data["image"], self.num_iters)
scalars = {}
with tf.name_scope("monitored_scalars"):
# ######### Loss Monitoring #########
scalars["loss/mse"] = tf.losses.mean_squared_error(
img, out_dist.mean())
# ########## Mask Monitoring #######
if "mask" in data:
true_mask = self._sg.guard(data["mask"], "B, T, L, H, W, 1")
true_mask = tf.transpose(true_mask[:, -1, ..., 0], [0, 2, 3, 1])
true_mask = self._sg.reshape(true_mask, "B, H*W, L")
else:
true_mask = None
pred_mask = self._sg.guard(out_dist.mixture_distribution.probs,
"B, 1, H, W, K")
pred_mask = self._sg.reshape(pred_mask, "B, H*W, K")
if pred_mask is not None and true_mask is not None:
self._sg.guard(pred_mask, "B, H*W, K")
self._sg.guard(true_mask, "B, H*W, L")
scalars["loss/ari"] = tf.reduce_mean(
adjusted_rand_index(true_mask, pred_mask))
scalars["loss/ari_nobg"] = tf.reduce_mean(
adjusted_rand_index(true_mask[..., 1:], pred_mask))
return scalars
| deepmind-research-master | iodine/modules/iodine.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loading functionality for IODINE."""
# pylint: disable=g-multiple-import, missing-docstring, unused-import
import os.path
from iodine.modules.utils import flatten_all_but_last, ensure_3d
from multi_object_datasets import (
clevr_with_masks,
multi_dsprites,
tetrominoes,
objects_room,
)
from shapeguard import ShapeGuard
import sonnet as snt
import tensorflow.compat.v1 as tf
class IODINEDataset(snt.AbstractModule):
num_true_objects = 1
num_channels = 3
factors = {}
def __init__(
self,
path,
batch_size,
image_dim,
crop_region=None,
shuffle_buffer=1000,
max_num_objects=None,
min_num_objects=None,
grayscale=False,
name="dataset",
**kwargs,
):
super().__init__(name=name)
self.path = os.path.abspath(os.path.expanduser(path))
self.batch_size = batch_size
self.crop_region = crop_region
self.image_dim = image_dim
self.shuffle_buffer = shuffle_buffer
self.max_num_objects = max_num_objects
self.min_num_objects = min_num_objects
self.grayscale = grayscale
self.dataset = None
def _build(self, subset="train"):
dataset = self.dataset
# filter by number of objects
if self.max_num_objects is not None or self.min_num_objects is not None:
dataset = self.dataset.filter(self.filter_by_num_objects)
if subset == "train":
# normal mode returns a shuffled dataset iterator
if self.shuffle_buffer is not None:
dataset = dataset.shuffle(self.shuffle_buffer)
elif subset == "summary":
# for generating summaries and overview images
# returns a single fixed batch
dataset = dataset.take(self.batch_size)
# repeat and batch
dataset = dataset.repeat().batch(self.batch_size, drop_remainder=True)
iterator = dataset.make_one_shot_iterator()
data = iterator.get_next()
# preprocess the data to ensure correct format, scale images etc.
data = self.preprocess(data)
return data
def filter_by_num_objects(self, d):
if "visibility" not in d:
return tf.constant(True)
min_num_objects = self.max_num_objects or 0
max_num_objects = self.max_num_objects or 6
min_predicate = tf.greater_equal(
tf.reduce_sum(d["visibility"]),
tf.constant(min_num_objects - 1e-5, dtype=tf.float32),
)
max_predicate = tf.less_equal(
tf.reduce_sum(d["visibility"]),
tf.constant(max_num_objects + 1e-5, dtype=tf.float32),
)
return tf.logical_and(min_predicate, max_predicate)
def preprocess(self, data):
sg = ShapeGuard(dims={
"B": self.batch_size,
"H": self.image_dim[0],
"W": self.image_dim[1]
})
image = sg.guard(data["image"], "B, h, w, C")
mask = sg.guard(data["mask"], "B, L, h, w, 1")
# to float
image = tf.cast(image, tf.float32) / 255.0
mask = tf.cast(mask, tf.float32) / 255.0
# crop
if self.crop_region is not None:
height_slice = slice(self.crop_region[0][0], self.crop_region[0][1])
width_slice = slice(self.crop_region[1][0], self.crop_region[1][1])
image = image[:, height_slice, width_slice, :]
mask = mask[:, :, height_slice, width_slice, :]
flat_mask, unflatten = flatten_all_but_last(mask, n_dims=3)
# rescale
size = tf.constant(
self.image_dim, dtype=tf.int32, shape=[2], verify_shape=True)
image = tf.image.resize_images(
image, size, method=tf.image.ResizeMethod.BILINEAR)
mask = tf.image.resize_images(
flat_mask, size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
if self.grayscale:
image = tf.reduce_mean(image, axis=-1, keepdims=True)
output = {
"image": sg.guard(image[:, None], "B, T, H, W, C"),
"mask": sg.guard(unflatten(mask)[:, None], "B, T, L, H, W, 1"),
"factors": self.preprocess_factors(data, sg),
}
if "visibility" in data:
output["visibility"] = sg.guard(data["visibility"], "B, L")
else:
output["visibility"] = tf.ones(sg["B, L"], dtype=tf.float32)
return output
def preprocess_factors(self, data, sg):
return {
name: sg.guard(ensure_3d(data[name]), "B, L, *")
for name in self.factors
}
def get_placeholders(self, batch_size=None):
batch_size = batch_size or self.batch_size
sg = ShapeGuard(
dims={
"B": batch_size,
"H": self.image_dim[0],
"W": self.image_dim[1],
"L": self.num_true_objects,
"C": 3,
"T": 1,
})
return {
"image": tf.placeholder(dtype=tf.float32, shape=sg["B, T, H, W, C"]),
"mask": tf.placeholder(dtype=tf.float32, shape=sg["B, T, L, H, W, 1"]),
"visibility": tf.placeholder(dtype=tf.float32, shape=sg["B, L"]),
"factors": {
name:
tf.placeholder(dtype=dtype, shape=sg["B, L, {}".format(size)])
for name, (dtype, size) in self.factors
},
}
class CLEVR(IODINEDataset):
num_true_objects = 11
num_channels = 3
factors = {
"color": (tf.uint8, 1),
"shape": (tf.uint8, 1),
"size": (tf.uint8, 1),
"position": (tf.float32, 3),
"rotation": (tf.float32, 1),
}
def __init__(
self,
path,
crop_region=((29, 221), (64, 256)),
image_dim=(128, 128),
name="clevr",
**kwargs,
):
super().__init__(
path=path,
crop_region=crop_region,
image_dim=image_dim,
name=name,
**kwargs)
self.dataset = clevr_with_masks.dataset(self.path)
def preprocess_factors(self, data, sg):
return {
"color": sg.guard(ensure_3d(data["color"]), "B, L, 1"),
"shape": sg.guard(ensure_3d(data["shape"]), "B, L, 1"),
"size": sg.guard(ensure_3d(data["color"]), "B, L, 1"),
"position": sg.guard(ensure_3d(data["pixel_coords"]), "B, L, 3"),
"rotation": sg.guard(ensure_3d(data["rotation"]), "B, L, 1"),
}
class MultiDSprites(IODINEDataset):
num_true_objects = 6
num_channels = 3
factors = {
"color": (tf.float32, 3),
"shape": (tf.uint8, 1),
"scale": (tf.float32, 1),
"x": (tf.float32, 1),
"y": (tf.float32, 1),
"orientation": (tf.float32, 1),
}
def __init__(
self,
path,
# variant from ['binarized', 'colored_on_grayscale', 'colored_on_colored']
dataset_variant="colored_on_grayscale",
image_dim=(64, 64),
name="multi_dsprites",
**kwargs,
):
super().__init__(path=path, name=name, image_dim=image_dim, **kwargs)
self.dataset_variant = dataset_variant
self.dataset = multi_dsprites.dataset(self.path, self.dataset_variant)
class Tetrominoes(IODINEDataset):
num_true_objects = 6
num_channels = 3
factors = {
"color": (tf.uint8, 3),
"shape": (tf.uint8, 1),
"position": (tf.float32, 2),
}
def __init__(self, path, image_dim=(35, 35), name="tetrominoes", **kwargs):
super().__init__(path=path, name=name, image_dim=image_dim, **kwargs)
self.dataset = tetrominoes.dataset(self.path)
def preprocess_factors(self, data, sg):
pos_x = ensure_3d(data["x"])
pos_y = ensure_3d(data["y"])
position = tf.concat([pos_x, pos_y], axis=2)
return {
"color": sg.guard(ensure_3d(data["color"]), "B, L, 3"),
"shape": sg.guard(ensure_3d(data["shape"]), "B, L, 1"),
"position": sg.guard(ensure_3d(position), "B, L, 2"),
}
| deepmind-research-master | iodine/modules/data.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration parameters for MMV."""
def get_model_config(ckpt_path):
"""Returns the model configuration to be used with each checkpoint."""
config = {
'audio_backbone': 'resnet50',
'audio_model_kwargs': {
'bn_config': {
'create_offset': True,
'create_scale': True,
'decay_rate': 0.9,
'eps': 1.0e-5
}
},
'bn_config_proj': {
'create_offset': True,
'create_scale': True,
'decay_rate': 0.9,
'eps': 1.0e-5
},
'config_audio_text': {
'embedding_dim': 512,
'toaud_bn_after_proj': False,
'toaud_head_mode': 'linear',
'totxt_bn_after_proj': False,
'totxt_head_mode': 'linear'
},
'config_video_audio': {
'embedding_dim': 512,
'toaud_bn_after_proj': True,
'toaud_head_mode': 'mlp@512',
'tovid_bn_after_proj': False,
'tovid_head_mode': 'linear'
},
'config_video_text': {
'embedding_dim': 256,
'totxt_bn_after_proj': True,
'totxt_head_mode': 'linear',
'tovid_bn_after_proj': False,
'tovid_head_mode': 'linear'
},
'mm_embedding_graph': 'fac_relu',
'name': 'text_audio_video',
'sentence_dim': 2048,
'use_xreplica_bn': True,
'vision_model_kwargs': {
'bn_config': {
'create_offset': True,
'create_scale': True,
'decay_rate': 0.9,
'eps': 1.0e-5
},
'n_frames': 32,
'width_mult': 1,
},
}
if 's3d' in ckpt_path:
config['visual_backbone'] = 's3d'
if 'tsm_resnet_x1' in ckpt_path:
config['visual_backbone'] = 'resnet50tsm'
if 'tsm_resnet_x2' in ckpt_path:
config['visual_backbone'] = 'resnet50tsm'
config['vision_model_kwargs']['width_mult'] = 2
return config
| deepmind-research-master | mmv/config.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UCF101 linear evaluation."""
import functools
from typing import Any, Dict, Optional
from absl import app
from absl import flags
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import sklearn
from sklearn import preprocessing
import sklearn.linear_model
import sklearn.svm
import tensorflow as tf
import tensorflow_datasets as tfds
from mmv import config
from mmv.models import mm_embeddings
from mmv.utils import checkpoint
from mmv.utils import ucf101_dataset
flags.DEFINE_string('checkpoint_path', '~/tmp/mmv_s3d.pkl',
'The directory to load pre-trained weights from.')
flags.DEFINE_string('dataset_folder', '/tmp/ucf101',
'The directory with the ucf101 dataset.')
flags.DEFINE_integer('eval_batch_size', 1,
'The batch size for evaluation.')
flags.DEFINE_integer('train_batch_size', 16,
'The batch size for training.')
flags.DEFINE_integer('num_train_epochs', 10,
'How many epochs to collect features during training.')
flags.DEFINE_integer('num_test_windows', 10,
'How many windows to average on during test.')
flags.DEFINE_integer('min_resize', 224,
'Min value to resize images to during preprocessing.')
flags.DEFINE_integer('crop_size', 224,
'Value to resize images to during preprocessing.')
flags.DEFINE_integer('num_frames', 32,
'Number of video frames.')
flags.DEFINE_integer('stride', 2,
'Stride for video frames.')
flags.DEFINE_integer('ucf101_split', 1,
'Which split of ucf101 to use.')
FLAGS = flags.FLAGS
def get_sampling_offset(sequence: tf.Tensor,
num_steps: Optional[int],
is_training: bool,
stride: int = 1,
seed: Optional[int] = None) -> tf.Tensor:
"""Calculates the initial offset for a sequence where all steps will fit.
Args:
sequence: any tensor where the first dimension is timesteps.
num_steps: The number of timesteps we will output. If None,
deterministically start at the first frame.
is_training: A boolean indicates whether the graph is for training or not.
If False, the starting frame always the first frame.
stride: distance to sample between timesteps.
seed: a deterministic seed to use when sampling.
Returns:
The first index to begin sampling from. A best effort is made to provide a
starting index such that all requested steps fit within the sequence (i.e.
offset + 1 + (num_steps - 1) * stride < len(sequence)). If this is not
satisfied, the starting index is chosen randomly from the full sequence.
"""
if num_steps is None or not is_training:
return tf.constant(0)
sequence_length = tf.shape(sequence)[0]
max_offset = tf.cond(
tf.greater(sequence_length, (num_steps - 1) * stride),
lambda: sequence_length - (num_steps - 1) * stride,
lambda: sequence_length)
offset = tf.random.uniform(
(),
maxval=tf.cast(max_offset, tf.int32),
dtype=tf.int32,
seed=seed)
return offset
def sample_or_pad_sequence_indices(sequence: tf.Tensor,
num_steps: Optional[int],
is_training: bool,
repeat_sequence: bool = True,
stride: int = 1,
offset: Optional[int] = None) -> tf.Tensor:
"""Returns indices to take for sampling or padding a sequence to fixed size.
Samples num_steps from the sequence. If the sequence is shorter than
num_steps, the sequence loops. If the sequence is longer than num_steps and
is_training is True, then we seek to a random offset before sampling. If
offset is provided, we use that deterministic offset.
This method is appropriate for sampling from a tensor where you want every
timestep between a start and end time. See sample_stacked_sequence_indices for
more flexibility.
Args:
sequence: any tensor where the first dimension is timesteps.
num_steps: how many steps (e.g. frames) to take. If None, all steps from
start to end are considered and `is_training` has no effect.
is_training: A boolean indicates whether the graph is for training or not.
If False, the starting frame is deterministic.
repeat_sequence: A boolean indicates whether the sequence will repeat to
have enough steps for sampling. If False, a runtime error is thrown if
num_steps * stride is longer than sequence length.
stride: distance to sample between timesteps.
offset: a deterministic offset to use regardless of the is_training value.
Returns:
Indices to gather from the sequence Tensor to get a fixed size sequence.
"""
sequence_length = tf.shape(sequence)[0]
sel_idx = tf.range(sequence_length)
if num_steps:
if offset is None:
offset = get_sampling_offset(sequence, num_steps, is_training, stride)
if repeat_sequence:
# Repeats sequence until num_steps are available in total.
num_repeats = tf.cast(
tf.math.ceil(
tf.math.divide(
tf.cast(num_steps * stride + offset, tf.float32),
tf.cast(sequence_length, tf.float32)
)), tf.int32)
sel_idx = tf.tile(sel_idx, [num_repeats])
steps = tf.range(offset, offset + num_steps * stride, stride)
else:
steps = tf.range(0, sequence_length, stride)
return tf.gather(sel_idx, steps)
def random_sample_sequence(sequence: tf.Tensor,
num_steps: int,
stride: int = 1) -> tf.Tensor:
"""Randomly sample a segment of size num_steps from a given sequence."""
indices = sample_or_pad_sequence_indices(
sequence=sequence,
num_steps=num_steps,
is_training=True, # Random sample.
repeat_sequence=True, # Will repeat the sequence if request more.
stride=stride,
offset=None)
indices.set_shape((num_steps,))
output = tf.gather(sequence, indices)
return output
def sample_linspace_sequence(sequence: tf.Tensor,
num_windows: int,
num_steps: int,
stride: int = 1) -> tf.Tensor:
"""Samples num_windows segments from sequence with linearly spaced offsets.
The samples are concatenated in a single Tensor in order to have the same
format structure per timestep (e.g. a single frame). If num_steps * stride is
bigger than the number of timesteps, the sequence is repeated. This function
can be used in evaluation in order to extract enough segments in order to span
the entire sequence.
Args:
sequence: Any tensor where the first dimension is timesteps.
num_windows: Number of windows retrieved from the sequence.
num_steps: Number of steps (e.g. frames) to take.
stride: Distance to sample between timesteps.
Returns:
A single Tensor with first dimension num_windows * num_steps. The Tensor
contains the concatenated list of num_windows tensors which offsets have
been linearly spaced from input.
"""
sequence_length = tf.shape(sequence)[0]
max_offset = tf.maximum(0, sequence_length - num_steps * stride)
offsets = tf.linspace(0.0, tf.cast(max_offset, tf.float32), num_windows)
offsets = tf.cast(offsets, tf.int32)
all_indices = []
for i in range(num_windows):
all_indices.append(
sample_or_pad_sequence_indices(
sequence=sequence,
num_steps=num_steps,
is_training=False,
repeat_sequence=True, # Will repeat the sequence if request more.
stride=stride,
offset=offsets[i]))
indices = tf.concat(all_indices, axis=0)
indices.set_shape((num_windows * num_steps,))
output = tf.gather(sequence, indices)
return output
def resize_smallest(frames: tf.Tensor, min_resize: int) -> tf.Tensor:
"""Resizes frames so that min(height, width) is equal to min_resize.
This function will not do anything if the min(height, width) is already equal
to min_resize. This allows to save compute time.
Args:
frames: A Tensor of dimension [timesteps, input_h, input_w, channels].
min_resize: Minimum size of the final image dimensions.
Returns:
A Tensor of shape [timesteps, output_h, output_w, channels] of type
frames.dtype where min(output_h, output_w) = min_resize.
"""
shape = tf.shape(frames)
input_h = shape[1]
input_w = shape[2]
output_h = tf.maximum(min_resize, (input_h * min_resize) // input_w)
output_w = tf.maximum(min_resize, (input_w * min_resize) // input_h)
def resize_fn():
frames_resized = tf.image.resize(frames, (output_h, output_w))
return tf.cast(frames_resized, frames.dtype)
should_resize = tf.math.logical_or(tf.not_equal(input_w, output_w),
tf.not_equal(input_h, output_h))
frames = tf.cond(should_resize, resize_fn, lambda: frames)
return frames
def process_samples(features_dict, num_frames=32, stride=1, is_training=True,
min_resize=224, crop_size=224, num_windows=1):
"""Process video frames."""
video = features_dict['video']
if is_training:
assert num_windows == 1
video = random_sample_sequence(video, num_frames, stride)
is_flipped = tf.random.uniform((), minval=0, maxval=2, dtype=tf.int32)
video = tf.cond(tf.equal(is_flipped, 1),
true_fn=lambda: tf.image.flip_left_right(video),
false_fn=lambda: video)
else:
video = sample_linspace_sequence(video, num_windows, num_frames, stride)
# Resize smallest side.
video = resize_smallest(video, min_resize)
if is_training:
# Random crop.
video = tf.image.random_crop(video, [num_frames, crop_size, crop_size, 3])
else:
# Central crop.
video = tf.image.resize_with_crop_or_pad(video, crop_size, crop_size)
video = tf.cast(video, tf.float32)
video /= 255.0 # Set between [0, 1].
features_dict['video'] = video
return features_dict
def space_to_depth_batch(features_dict):
images = features_dict['video']
_, l, h, w, c = images.shape
images = tf.reshape(images, [-1, l // 2, 2, h // 2, 2, w // 2, 2, c])
images = tf.transpose(images, [0, 1, 3, 5, 2, 4, 6, 7])
images = tf.reshape(images, [-1, l // 2, h // 2, w // 2, 8 * c])
features_dict['video'] = images
return features_dict
def reshape_windows(features_dict, num_frames):
x = features_dict['video']
x = tf.reshape(x, (-1, num_frames, x.shape[2], x.shape[3], x.shape[4]))
features_dict['video'] = x
return features_dict
def compute_accuracy_metrics(pred, gt, prefix=''):
order_pred = np.argsort(pred, axis=1)
assert len(gt.shape) == len(order_pred.shape) == 2
top1_pred = order_pred[:, -1:]
top5_pred = order_pred[:, -5:]
top1_acc = np.mean(top1_pred == gt)
top5_acc = np.mean(np.max(top5_pred == gt, 1))
return {prefix + 'top1': top1_acc,
prefix + 'top5': top5_acc}
def forward_fn(images: jnp.ndarray,
audio_spectrogram: jnp.ndarray,
word_ids: jnp.ndarray,
is_training: bool,
model_config: Dict[str, Any]):
"""Forward pass of the model."""
# This should contain the pre-trained weights. We set it to zero because it
# will be loaded from the checkpoint.
language_model_vocab_size = 65536
word_embedding_dim = 300
dummy_embedding_matrix = jnp.zeros(shape=(language_model_vocab_size,
word_embedding_dim))
module = mm_embeddings.AudioTextVideoEmbedding(
**model_config,
word_embedding_matrix=dummy_embedding_matrix)
return module(images=images,
audio_spectrogram=audio_spectrogram,
word_ids=word_ids,
is_training=is_training)['vid_repr']
def main(argv):
del argv
sklearn_reg = 0.001
model_config = config.get_model_config(FLAGS.checkpoint_path)
forward = hk.without_apply_rng(hk.transform_with_state(forward_fn))
forward_apply = jax.jit(functools.partial(forward.apply,
is_training=False,
model_config=model_config))
# Get the UCF101 config.
dset_config = tfds.video.ucf101.Ucf101.BUILDER_CONFIGS[FLAGS.ucf101_split]
builder = ucf101_dataset.ModUcf101(
data_dir=FLAGS.dataset_folder,
config=dset_config)
# Create the tfrecord files (no-op if already exists)
dl_config = tfds.download.DownloadConfig(verify_ssl=False)
builder.download_and_prepare(download_config=dl_config)
# Generate the training dataset.
train_ds = builder.as_dataset(split='train', shuffle_files=False)
train_ds = train_ds.map(lambda x: process_samples( # pylint: disable=g-long-lambda
x, num_frames=FLAGS.num_frames, stride=FLAGS.stride, is_training=True,
min_resize=FLAGS.min_resize, crop_size=FLAGS.crop_size))
train_ds = train_ds.batch(batch_size=FLAGS.train_batch_size)
if model_config['visual_backbone'] == 's3d':
train_ds = train_ds.map(space_to_depth_batch)
train_ds = train_ds.repeat(FLAGS.num_train_epochs)
# Generate the test dataset.
test_ds = builder.as_dataset(split='test', shuffle_files=False)
test_ds = test_ds.map(lambda x: process_samples( # pylint: disable=g-long-lambda
x, num_frames=FLAGS.num_frames, stride=FLAGS.stride, is_training=False,
min_resize=FLAGS.min_resize, crop_size=FLAGS.crop_size,
num_windows=FLAGS.num_test_windows))
test_ds = test_ds.batch(batch_size=FLAGS.eval_batch_size)
test_ds = test_ds.map(lambda x: reshape_windows( # pylint: disable=g-long-lambda
x, num_frames=FLAGS.num_frames))
if model_config['visual_backbone'] == 's3d':
test_ds = test_ds.map(space_to_depth_batch)
test_ds = test_ds.repeat(1)
pretrained_weights = checkpoint.load_checkpoint(FLAGS.checkpoint_path)
params = pretrained_weights['params']
state = pretrained_weights['state']
# Collect training samples.
audio_frames = 96
mel_filters = 40
num_tokens = 16
dummy_audio = jnp.zeros(
shape=(FLAGS.train_batch_size, audio_frames, mel_filters, 1))
dummy_word_ids = jnp.zeros(
shape=(FLAGS.train_batch_size, num_tokens), dtype=jnp.int32)
train_features = []
train_labels = []
print('Computing features on train')
training_examples = iter(tfds.as_numpy(train_ds))
for train_ex in training_examples:
vid_representation, _ = forward_apply(params=params,
state=state,
images=train_ex['video'],
audio_spectrogram=dummy_audio,
word_ids=dummy_word_ids)
train_labels.append(train_ex['label'])
train_features.append(vid_representation)
if len(train_labels) % 50 == 0:
print(f'Processed {len(train_labels)} examples.')
train_labels = np.concatenate(train_labels, axis=0)
train_features = np.concatenate(train_features, axis=0)
print(f'Finish collecting train features of shape {train_features.shape}')
# Collect test samples.
dummy_audio = jnp.zeros(
shape=(FLAGS.eval_batch_size, audio_frames, mel_filters, 1))
dummy_word_ids = jnp.zeros(
shape=(FLAGS.eval_batch_size, num_tokens), dtype=jnp.int32)
test_features = []
test_labels = []
print('Computing features on test')
test_examples = iter(tfds.as_numpy(test_ds))
for test_ex in test_examples:
vid_representation_test, _ = forward_apply(params=params,
state=state,
images=test_ex['video'],
audio_spectrogram=dummy_audio,
word_ids=dummy_word_ids)
test_labels.append(test_ex['label'])
test_features.append(vid_representation_test)
if len(test_labels) % 50 == 0:
print(f'Processed {len(test_labels)} examples.')
test_features = np.concatenate(test_features, axis=0)
test_labels = np.concatenate(test_labels, axis=0)
print(f'Finish collecting test features of shape {test_features.shape}')
# Train classifier
print('Training linear classifier!')
classifier = sklearn.svm.LinearSVC(C=sklearn_reg)
scaler = preprocessing.StandardScaler().fit(train_features)
train_features = scaler.transform(train_features)
classifier.fit(train_features, train_labels.ravel())
print('Training done !')
# Evaluation.
test_features = scaler.transform(test_features)
print('Running inference on train')
pred_train = classifier.decision_function(train_features)
print('Running inference on test')
pred_test = classifier.decision_function(test_features)
if FLAGS.num_test_windows > 1:
pred_test = np.reshape(
pred_test, (test_labels.shape[0], -1, pred_test.shape[1]))
pred_test = pred_test.mean(axis=1)
# Compute accuracies.
metrics = compute_accuracy_metrics(pred_train, train_labels[:, None],
prefix='train_')
metrics.update(
compute_accuracy_metrics(pred_test, test_labels[:, None], prefix='test_'))
print(metrics)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | mmv/eval_ucf101.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkpoint restoring utilities."""
from absl import logging
import dill
def load_checkpoint(checkpoint_path):
try:
with open(checkpoint_path, 'rb') as checkpoint_file:
checkpoint_data = dill.load(checkpoint_file)
logging.info('Loading checkpoint from %s', checkpoint_path)
return checkpoint_data
except FileNotFoundError:
return None
| deepmind-research-master | mmv/utils/checkpoint.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ucf101 with custom decoding params."""
import tensorflow as tf
import tensorflow_datasets as tfds
# Utilities functions.
tf.compat.v1.enable_eager_execution()
_CITATION = """\
@article{DBLP:journals/corr/abs-1212-0402,
author = {Khurram Soomro and
Amir Roshan Zamir and
Mubarak Shah},
title = {{UCF101:} {A} Dataset of 101 Human Actions Classes From Videos in
The Wild},
journal = {CoRR},
volume = {abs/1212.0402},
year = {2012},
url = {http://arxiv.org/abs/1212.0402},
archivePrefix = {arXiv},
eprint = {1212.0402},
timestamp = {Mon, 13 Aug 2018 16:47:45 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1212-0402},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_LABELS_FNAME = 'video/ucf101_labels.txt'
class ModUcf101(tfds.video.Ucf101):
"""Ucf101 action recognition dataset with better quality.
"""
def _info(self):
ffmpeg_extra_args = ('-qscale:v', '2', '-r', '25', '-t', '00:00:20')
video_shape = (
None, self.builder_config.height, self.builder_config.width, 3)
labels_names_file = tfds.core.tfds_path(_LABELS_FNAME)
features = tfds.features.FeaturesDict({
'video': tfds.features.Video(video_shape,
ffmpeg_extra_args=ffmpeg_extra_args,
encoding_format='jpeg'), # pytype: disable=wrong-arg-types # gen-stub-imports
'label': tfds.features.ClassLabel(names_file=labels_names_file),
})
return tfds.core.DatasetInfo(
builder=self,
description='A 101-label video classification dataset.',
features=features,
homepage='https://www.crcv.ucf.edu/data-sets/ucf101/',
citation=_CITATION,
)
| deepmind-research-master | mmv/utils/ucf101_dataset.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for text-video-audio embeddings."""
from typing import Any, Dict, Optional
import haiku as hk
import jax
import jax.numpy as jnp
from mmv.models import normalization
from mmv.models import resnet
from mmv.models import s3d
from mmv.models import tsm_resnet
_DEFAULT_CFG_AUDTXT = {
"totxt_head_mode": "linear",
"toaud_head_mode": "linear",
"toaud_bn_after_proj": False,
"totxt_bn_after_proj": False,
"embedding_dim": 512}
_DEFAULT_CFG_VIDAUD = {
"tovid_head_mode": "linear",
"toaud_head_mode": "mlp@512",
"tovid_bn_after_proj": False,
"toaud_bn_after_proj": True,
"embedding_dim": 512}
_DEFAULT_CFG_VIDTXT = {
"tovid_head_mode": "linear",
"totxt_head_mode": "mlp@512",
"tovid_bn_after_proj": False,
"totxt_bn_after_proj": True,
"embedding_dim": 512}
_DEFAULT_CFG_BN = {"decay_rate": 0.9, "eps": 1e-5,
"create_scale": True, "create_offset": True}
def _setkey_if_not_exists(d, key, value):
if key not in d:
d[key] = value
class AudioTextVideoEmbedding(hk.Module):
"""Module to fuse audio, text and video for joint embedding learning."""
def __init__(
self,
# Language parameters.
word_embedding_matrix,
sentence_dim=2048,
# Audio parameters.
audio_backbone="resnet18",
audio_model_kwargs=None,
# Vision parameters.
visual_backbone="s3d",
vision_model_kwargs=None,
# Common parameters.
mm_embedding_graph="fac_relu",
use_xreplica_bn=True,
bn_config_proj=None,
config_video_text=None,
config_video_audio=None,
config_audio_text=None,
use_audio_text=False,
name="audio_text_video_model"):
"""Initialize the AudioTextVideoEmbedding class.
Args:
word_embedding_matrix: 2d matrix [vocab_size, embed_size] to embed words.
sentence_dim: The dimension of the sentence representation.
audio_backbone: Backbone for audio.
audio_model_kwargs: Other specific parameters to pass to the audio
module.
visual_backbone: The video backbone.
vision_model_kwargs: Other specific parameters to pass to the vision
module.
mm_embedding_graph: Embedding graph merging strategy.
Can be `shared`, `disjoint` or `fac` (fac can be followed by an
activation function name e.g. `fac_relu`).
use_xreplica_bn: Whether or not to use the cross replica batch norm.
bn_config_proj: BN config of the projection heads.
config_video_text: Config for the video and the text branches.
config_video_audio: Config for the video and the audio branches.
config_audio_text: Config for the audio and the text branches.
use_audio_text: Whether or not the audio text branch is used during
training.
name: graph name.
"""
super(AudioTextVideoEmbedding, self).__init__(name=name)
# Audio parameters.
self._audio_backbone = audio_backbone
self._audio_model_kwargs = audio_model_kwargs
# Language parameters.
self._sentence_dim = sentence_dim
self._word_embedding_matrix = word_embedding_matrix
# Vision parameters.
self._visual_backbone = visual_backbone
self._vision_model_kwargs = vision_model_kwargs
# Joint parameters.
self._use_xreplica_bn = use_xreplica_bn
if self._use_xreplica_bn:
self._normalizer_name = "cross_replica_batch_norm"
else:
self._normalizer_name = "batch_norm"
# Projection head parameters.
if config_video_text is None:
config_video_text = _DEFAULT_CFG_VIDTXT
for k, v in _DEFAULT_CFG_VIDTXT.items():
_setkey_if_not_exists(config_video_text, k, v)
self._cfg_vid_txt = config_video_text
if config_video_audio is None:
config_video_audio = _DEFAULT_CFG_VIDAUD
for k, v in _DEFAULT_CFG_VIDAUD.items():
_setkey_if_not_exists(config_video_audio, k, v)
self._cfg_vid_aud = config_video_audio
if config_audio_text is None:
config_audio_text = _DEFAULT_CFG_AUDTXT
for k, v in _DEFAULT_CFG_AUDTXT.items():
_setkey_if_not_exists(config_audio_text, k, v)
self._cfg_aud_txt = config_audio_text
self._use_audio_text = use_audio_text
self._mm_embedding_graph = mm_embedding_graph
self._use_separate_heads = (
mm_embedding_graph == "disjoint" or
mm_embedding_graph.startswith("fac"))
self._bn_config_proj = bn_config_proj or _DEFAULT_CFG_BN
def _get_pair_embedding_heads(self,
embedding_dim_1, embedding_dim_2,
mode1, mode2,
use_bn_out1, use_bn_out2,
name1, name2):
embd1_module = EmbeddingModule(
embedding_dim_1,
mode=mode1,
use_bn_out=use_bn_out1,
bn_config=self._bn_config_proj,
use_xreplica_bn=self._use_xreplica_bn,
name=name1)
if self._use_separate_heads:
embd2_module = EmbeddingModule(
embedding_dim_2,
mode=mode2,
use_bn_out=use_bn_out2,
use_xreplica_bn=self._use_xreplica_bn,
bn_config=self._bn_config_proj,
name=name2)
else:
assert embedding_dim_1 == embedding_dim_2, (
"Using shared heads but inconsistent embedding dims where provided.")
assert mode1 == mode2, (
"Using shared heads but inconsistent modes where provided.")
assert use_bn_out1 == use_bn_out2, (
"Using shared heads but inconsistent bn conf where provided.")
embd2_module = embd1_module
return embd1_module, embd2_module
def _activate_interaction(self, inputs, activation_fn, is_training,
activation_module=None):
"""Activation function for the interaction modules."""
if activation_fn == "relu":
inputs = jax.nn.relu(inputs)
elif activation_fn == "bnrelu":
if activation_module is None:
activation_module = normalization.get_normalize_fn(
normalizer_name=self._normalizer_name,
normalizer_kwargs=self._bn_config_proj)
inputs = activation_module(inputs, is_training=is_training)
inputs = jax.nn.relu(inputs)
else:
raise ValueError(f"{activation_fn} not supported.")
return inputs, activation_module
def __call__(self,
images,
audio_spectrogram,
word_ids,
is_training,
return_intermediate_audio=False):
"""Computes video, text and audio embeddings.
Args:
images: The videos tensor of shape [B1, T, H, W, 3] where B1 is the batch
size, T is the number of frames per clip, H the height, W the width
and 3 the rgb channels.
audio_spectrogram: The audio tensor of shape [B2, T', F] where B2 is the
batch size, T' is the number of temporal frames, F is the number of
frequency frames.
word_ids: If words_embeddings is set to None, it will use the word indices
input instead so that we can compute the word embeddings within the
model graph. The expected shape is [B3, N, D] where B3 is the batch size
and N the maximum number of words per sentence.
is_training: Whether or not to activate the graph in training mode.
return_intermediate_audio: Return audio intermediate representation.
Returns:
if return_intermediate_audio = True
audio_representation: the 4-dim audio representation taken before
averaging over spatial dims in the Resnet.
else
visual_embd: a dict containing the video embeddings in audio and text
of shape [B1, d_embd].
audio_embd: a dict containing the audio embeddings in video and text
of shape [B2, d_embd].
txt_embd: a dict containing the text embeddings in video and audio
of shape[B3, d_embd].
visual_representation: the video rep of shape [B1, d_visual].
audio_representation: the audio rep of shape [B2, d_audio].
"""
# Computes the visual representation.
video_cnn = VisualModule(backbone=self._visual_backbone,
use_xreplica_bn=self._use_xreplica_bn,
model_kwargs=self._vision_model_kwargs)
visual_representation = video_cnn(images, is_training=is_training)
# Projection heads: Video -> Text and Video -> Audio.
vid2txt_embd_module, vid2aud_embd_module = self._get_pair_embedding_heads(
embedding_dim_1=self._cfg_vid_txt["embedding_dim"],
embedding_dim_2=self._cfg_vid_aud["embedding_dim"],
mode1=self._cfg_vid_txt["totxt_head_mode"],
mode2=self._cfg_vid_aud["toaud_head_mode"],
use_bn_out1=self._cfg_vid_txt["totxt_bn_after_proj"],
use_bn_out2=self._cfg_vid_aud["toaud_bn_after_proj"],
name1="vis_embd",
name2="vid2audio_embd")
video_embd = {}
if self._mm_embedding_graph in ["shared", "disjoint"]:
video_embd["toaud"] = vid2aud_embd_module(visual_representation,
is_training=is_training)
video_embd["totxt"] = vid2txt_embd_module(visual_representation,
is_training=is_training)
elif self._mm_embedding_graph.startswith("fac"):
# Activation function if specificed in the name, e.g. fac_relu.
activation_fn = None
if len(self._mm_embedding_graph.split("_")) == 2:
activation_fn = self._mm_embedding_graph.split("_")[1]
video_embd["toaud"] = vid2aud_embd_module(visual_representation,
is_training=is_training)
fine_rep = video_embd["toaud"]
# Eventually activate the fine grained representation.
if activation_fn:
fine_rep, activation_module = self._activate_interaction(
inputs=fine_rep, activation_fn=activation_fn,
is_training=is_training)
video_embd["totxt"] = vid2txt_embd_module(fine_rep,
is_training=is_training)
else:
raise ValueError(
f"{self._mm_embedding_graph} is not a valid MM embedding graph.")
# Computes the audio representation.
audio_cnn = AudioModule(backbone=self._audio_backbone,
use_xreplica_bn=self._use_xreplica_bn,
model_kwargs=self._audio_model_kwargs)
if return_intermediate_audio:
return audio_cnn(audio_spectrogram,
is_training=is_training,
return_intermediate=True)
audio_representation = audio_cnn(audio_spectrogram, is_training=is_training)
# Projection heads: Audio -> Video and Audio -> Text.
aud2vid_embd_module, aud2txt_embd_module = self._get_pair_embedding_heads(
embedding_dim_1=self._cfg_vid_aud["embedding_dim"],
embedding_dim_2=self._cfg_aud_txt["embedding_dim"],
mode1=self._cfg_vid_aud["tovid_head_mode"],
mode2=self._cfg_aud_txt["totxt_head_mode"],
use_bn_out1=self._cfg_vid_aud["tovid_bn_after_proj"],
use_bn_out2=self._cfg_aud_txt["totxt_bn_after_proj"],
name1="audio_embd",
name2="audio2txt_embd")
audio_embd = {}
audio_embd["tovid"] = aud2vid_embd_module(audio_representation,
is_training=is_training)
# Computes the projection to the text domain depending on the MM graph mode.
if (self._mm_embedding_graph.startswith("fac") and
(self._use_audio_text or (not is_training))):
# In case the audio text branch is not used during training, we do that
# only at eval time (is_training=False) in order to not pollute the BN
# stats in vid2txt_embd_module with audio features during training.
fine_rep_audio = audio_embd["tovid"]
if activation_fn:
fine_rep_audio, _ = self._activate_interaction(
inputs=fine_rep_audio, activation_fn=activation_fn,
is_training=is_training, activation_module=activation_module)
audio_embd["totxt"] = vid2txt_embd_module(fine_rep_audio,
is_training=is_training)
else:
audio_embd["totxt"] = aud2txt_embd_module(audio_representation,
is_training=is_training)
# Computes the text representation.
txt_representation = TextModule(
sentence_dim=self._sentence_dim,
word_embedding_matrix=self._word_embedding_matrix)(
word_ids, is_training=is_training)
# Projection heads: Text -> Video and Text -> Audio.
txt2vid_embd_module, txt2aud_embd_module = self._get_pair_embedding_heads(
embedding_dim_1=self._cfg_vid_txt["embedding_dim"],
embedding_dim_2=self._cfg_aud_txt["embedding_dim"],
mode1=self._cfg_vid_txt["tovid_head_mode"],
mode2=self._cfg_aud_txt["toaud_head_mode"],
use_bn_out1=self._cfg_vid_txt["tovid_bn_after_proj"],
use_bn_out2=self._cfg_aud_txt["toaud_bn_after_proj"],
name1="txt_embd",
name2="txt2audio_embd")
txt_embd = {}
txt_embd["tovid"] = txt2vid_embd_module(txt_representation,
is_training=is_training)
txt_embd["toaud"] = txt2aud_embd_module(txt_representation,
is_training=is_training)
return {
"vid_embd": video_embd,
"aud_embd": audio_embd,
"txt_embd": txt_embd,
"vid_repr": visual_representation,
"aud_repr": audio_representation,
}
class EmbeddingModule(hk.Module):
"""Final Embedding module."""
def __init__(self,
embedding_dim: int,
mode: str = "linear",
use_bn_out: bool = False,
bn_config: Optional[Dict[str, Any]] = None,
use_xreplica_bn: bool = True,
name="embedding_module"):
self._embedding_dim = embedding_dim
self._use_bn_out = use_bn_out
self._mode = mode
# Set default BN config.
bn_config = bn_config or _DEFAULT_CFG_BN
if use_xreplica_bn:
normalizer_name = "cross_replica_batch_norm"
else:
normalizer_name = "batch_norm"
self._batch_norm = normalization.get_normalize_fn(
normalizer_name=normalizer_name,
normalizer_kwargs=bn_config)
super(EmbeddingModule, self).__init__(name=name)
def __call__(self, input_feature, is_training):
if self._mode == "linear":
proj = hk.Linear(self._embedding_dim, name="final_projection")
embedding = proj(input_feature)
elif self._mode.startswith("mlp"):
if "@" not in self._mode:
raise ValueError(
("Please specify the inner dimensions of the MLP with `@` symbol"
"e.g. mlp@512 or mlp@512@256 for a 2 layer MLP."))
inner_dims = [int(dim) for dim in self._mode.split("@")[1:]]
embedding = input_feature
for inner_dim in inner_dims:
embedding = hk.Linear(inner_dim, with_bias=True,
name="final_projection_inner")(embedding)
if not self._mode.startswith("mlp_nobn"):
embedding = self._batch_norm(embedding, is_training=is_training)
embedding = jax.nn.relu(embedding)
# Final projection.
embedding = hk.Linear(self._embedding_dim, name="final_projection",
with_bias=not self._use_bn_out)(embedding)
else:
raise NotImplementedError
if self._use_bn_out:
embedding = self._batch_norm(embedding, is_training=is_training)
return embedding
class VisualModule(hk.Module):
"""The visual module selects which CNN backbone to connect to the graph."""
def __init__(self,
use_xreplica_bn=True,
backbone="s3d",
model_kwargs=None,
name="visual_module"):
self._backbone = backbone
super(VisualModule, self).__init__(name=name)
if model_kwargs is None:
model_kwargs = {}
bn_config = model_kwargs.get("bn_config", _DEFAULT_CFG_BN)
if use_xreplica_bn:
normalizer_name = "cross_replica_batch_norm"
else:
normalizer_name = "batch_norm"
normalize_fn = normalization.get_normalize_fn(
normalizer_name=normalizer_name,
normalizer_kwargs=bn_config)
if backbone == "s3d":
self._cnn = s3d.S3D(normalize_fn=normalize_fn)
elif backbone == "resnet50tsm":
width_mult = model_kwargs.get("width_mult", 1)
self._cnn = tsm_resnet.TSMResNetV2(
normalize_fn=normalize_fn,
depth=50,
num_frames=model_kwargs["n_frames"],
width_mult=width_mult)
else:
raise NotImplementedError
def __call__(self, images, is_training):
"""Connects graph to images."""
features = self._cnn(images, is_training=is_training)
return features
class AudioModule(hk.Module):
"""The audio module selects which CNN backbone to connect to the graph."""
def __init__(self,
backbone="resnet18",
use_xreplica_bn=True,
model_kwargs=None,
name="audio_module"):
super(AudioModule, self).__init__(name=name)
model_kwargs = model_kwargs or {}
bn_config = model_kwargs.get("bn_config", _DEFAULT_CFG_BN)
backbone_to_depth = {
"resnet18": 18,
"resnet34": 34,
"resnet50": 50,
"resnet101": 101
}
assert backbone in backbone_to_depth, (
f"backbone should be in {backbone_to_depth.keys()}")
if use_xreplica_bn:
normalizer_name = "cross_replica_batch_norm"
else:
normalizer_name = "batch_norm"
self._cnn = resnet.ResNetV2(
depth=backbone_to_depth[backbone],
normalize_fn=normalization.get_normalize_fn(
normalizer_name=normalizer_name,
normalizer_kwargs=bn_config),
num_classes=None)
def __call__(self,
audio_spectrogram,
is_training,
return_intermediate=False):
"""Connects graph to audio spectrogram."""
final_endpoint = "output"
if return_intermediate:
final_endpoint = "last_conv"
return self._cnn(audio_spectrogram,
is_training=is_training,
final_endpoint=final_endpoint)
class TextModule(hk.Module):
"""Text module computes the sentences representation."""
def __init__(self,
word_embedding_matrix,
sentence_dim=1024,
name="text_module"):
"""Initialize text module.
Args:
word_embedding_matrix: 2d matrix [vocab_size, embed_size] to embed words.
sentence_dim: dimension of sentence representation.
name: module name.
"""
super(TextModule, self).__init__(name=name)
self._word_embedding_module = hk.Embed(
embedding_matrix=word_embedding_matrix)
self._conv1d_module = hk.Conv1D(sentence_dim, 1, name="text_conv1")
def __call__(self, word_ids, is_training):
"""Connects graph to sentence representation."""
word_embeddings = self._word_embedding_module(word_ids)
word_embeddings = jax.lax.stop_gradient(word_embeddings)
output = self._conv1d_module(word_embeddings)
output = jax.nn.relu(output)
output = jnp.amax(output, axis=1)
return output
| deepmind-research-master | mmv/models/mm_embeddings.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TSM ResNet model."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
from mmv.models import tsm_resnet
class TSMResNetTest(parameterized.TestCase):
@parameterized.parameters(
('tsm_resnet_stem', (2 * 32, 56, 56, 64)),
('tsm_resnet_unit_0', (2 * 32, 56, 56, 256)),
('tsm_resnet_unit_1', (2 * 32, 28, 28, 512)),
('tsm_resnet_unit_2', (2 * 32, 14, 14, 1024)),
('tsm_resnet_unit_3', (2 * 32, 7, 7, 2048)),
('last_conv', (2 * 32, 7, 7, 2048)),
('Embeddings', (2, 2048)),
)
def test_output_dimension(self, final_endpoint, expected_shape):
input_shape = (2, 32, 224, 224, 3)
def f():
data = jnp.zeros(input_shape)
net = tsm_resnet.TSMResNetV2()
return net(data, final_endpoint=final_endpoint)
init_fn, apply_fn = hk.transform(f)
out = apply_fn(init_fn(jax.random.PRNGKey(42)), None)
self.assertEqual(out.shape, expected_shape)
def test_tpu_mode(self):
input_shape = (32 * 2, 224, 224, 3)
def f():
data = jnp.zeros(input_shape)
net = tsm_resnet.TSMResNetV2(num_frames=32)
return net(data, final_endpoint='Embeddings')
init_fn, apply_fn = hk.transform(f)
out = apply_fn(init_fn(jax.random.PRNGKey(42)), None)
self.assertEqual(out.shape, (2, 2048))
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | mmv/models/tsm_resnet_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tsm_utils."""
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
from mmv.models import tsm_utils
class TsmUtilsTest(parameterized.TestCase):
@parameterized.parameters(
((2, 32, 224, 224, 3), 'gpu', (2 * 32, 224, 224, 3), 32),
((32, 224, 224, 3), 'tpu', (32, 224, 224, 3), None),
)
def test_prepare_inputs(self, input_shape, expected_mode, expected_shape,
expected_num_frames):
data = jnp.zeros(input_shape)
out, mode, num_frames = tsm_utils.prepare_inputs(data)
self.assertEqual(out.shape, expected_shape)
self.assertEqual(mode, expected_mode)
self.assertEqual(num_frames, expected_num_frames)
def test_prepare_outputs(self):
data = jnp.concatenate([jnp.zeros(4), jnp.ones(4)]).reshape(4, 2)
out_gpu = tsm_utils.prepare_outputs(data, 'gpu', 2)
out_tpu = tsm_utils.prepare_outputs(data, 'tpu', 2)
expected_gpu = np.concatenate([np.zeros(2), np.ones(2)]).reshape(2, 2)
expected_tpu = 0.5 * jnp.ones((2, 2))
np.testing.assert_allclose(out_gpu, expected_gpu)
np.testing.assert_allclose(out_tpu, expected_tpu)
def test_apply_tsm(self):
shape = (32, 224, 224, 16)
data = jnp.zeros(shape)
out_gpu = tsm_utils.apply_temporal_shift(data, 'gpu', 16)
out_tpu = tsm_utils.apply_temporal_shift(data, 'tpu', 16)
self.assertEqual(out_gpu.shape, shape)
self.assertEqual(out_tpu.shape, shape)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | mmv/models/tsm_utils_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils functions for TSM."""
from typing import Tuple
import jax
import jax.numpy as jnp
from mmv.models import types
def prepare_inputs(
inputs: types.TensorLike) -> Tuple[jnp.ndarray, str, int]:
"""Deduces input mode for TSM."""
# Deduce if we run on TPU based on input shape.
if len(inputs.shape) == 5:
# Input is given in the standard [B, T, H, W, 3] format.
tsm_mode = 'gpu'
num_frames = inputs.shape[1]
inputs = jnp.reshape(inputs, [-1] + list(inputs.shape[2:]))
else:
# Input is given in the [T * B, H, W, 3] format.
tsm_mode = 'tpu'
num_frames = None
return inputs, tsm_mode, num_frames
def prepare_outputs(outputs: types.TensorLike,
tsm_mode: str,
num_frames: int) -> jnp.ndarray:
"""Processes output of TSM by averaging representations over time axis."""
n_channels = outputs.shape[-1]
if tsm_mode == 'tpu':
outputs = jnp.reshape(outputs, [num_frames, -1, n_channels])
outputs = jnp.mean(outputs, axis=0)
elif tsm_mode == 'gpu':
outputs = jnp.reshape(outputs, [-1, num_frames, n_channels])
outputs = jnp.mean(outputs, axis=1)
else:
raise ValueError(
f'`tsm_mode` should be \'tpu\' or \'gpu\' ({tsm_mode} given)')
return outputs
def apply_temporal_shift(
x: types.TensorLike,
tsm_mode: str,
num_frames: int,
channel_shift_fraction: float = 0.125) -> jnp.ndarray:
"""Performs a temporal shift: https://arxiv.org/abs/1811.08383 with mode."""
if tsm_mode == 'tpu':
outputs = temporal_shift_tpu(x, num_frames, channel_shift_fraction)
elif tsm_mode == 'gpu':
outputs = temporal_shift_gpu(x, num_frames, channel_shift_fraction)
else:
raise ValueError(
f'`tsm_mode` should be \'tpu\' or \'gpu\' ({tsm_mode} given)')
return outputs
def temporal_shift_gpu(
x: types.TensorLike,
num_frames: int,
channel_shift_fraction: float = 0.125) -> jnp.ndarray:
"""Performs a temporal shift: https://arxiv.org/abs/1811.08383."""
# B, T, H, W, C = batch_size, num_frames, im_height, im_width, channels
# Input is (B * T, H, W, C)
orig_shp = tuple(x.shape)
reshaped_x = jnp.reshape(x, (-1, num_frames) + orig_shp[1:])
n_channels = orig_shp[-1]
n_shift = int(n_channels * channel_shift_fraction)
new_shp = tuple(reshaped_x.shape)
# shifted_backward = reshaped_x[:, 1:, :, :, -n_shift:]
shifted_backward = jax.lax.slice(
reshaped_x, (0, 1, 0, 0, new_shp[4] - n_shift),
(new_shp[0], new_shp[1], new_shp[2], new_shp[3], new_shp[4]))
shifted_backward_padding = ((0, 0), (0, 1), (0, 0), (0, 0), (0, 0))
shifted_backward = jnp.pad(shifted_backward, shifted_backward_padding)
# shifted_forward = reshaped_x[:, :-1, :, :, :n_shift]
shifted_forward = jax.lax.slice(
reshaped_x, (0, 0, 0, 0, 0),
(new_shp[0], new_shp[1] - 1, new_shp[2], new_shp[3], n_shift))
shifted_forward_padding = ((0, 0), (1, 0), (0, 0), (0, 0), (0, 0))
shifted_forward = jnp.pad(shifted_forward, shifted_forward_padding)
no_shift = reshaped_x[:, :, :, :, n_shift:-n_shift]
shifted_x = jnp.concatenate([shifted_backward, no_shift, shifted_forward],
axis=4)
return jnp.reshape(shifted_x, (-1,) + orig_shp[1:])
def temporal_shift_tpu(
x: types.TensorLike,
num_frames: int,
channel_shift_fraction: float = 0.125) -> jnp.ndarray:
"""Performs a temporal shift: https://arxiv.org/abs/1811.08383.
TPU optimized version of TSM. Reshape is avoided by having the images
reshaped in [T * B, :] so that frames corresponding to same time frame in
videos are contiguous in memory. Thanks to cr/288510308 which allows to fuse
pad->slice into convolution, we reformulate the slice pad into a pad then
slice. Finally, to avoid concatenate that prevent some fusion from happening
we simply sum masked version of the features.
Args:
x: Input expected to be [T * B, H, W, C] (where the batch has been reshaped
from a time major version of the input).
num_frames: number of frames T per video.
channel_shift_fraction: fraction of the channel to shift forward and
backward.
Returns:
The temporal shifted version of x.
"""
# B, T, H, W, C = batch_size, num_frames, im_height, im_width, channels
# Input is (T * B, H, W, C)
original_shape = list(x.shape)
batch_size = int(original_shape[0] / num_frames)
n_channels = int(original_shape[-1])
n_shift = int(n_channels * channel_shift_fraction)
# Cast to bfloat16.
x = x.astype(jnp.bfloat16)
# For the following, assume that x has 3 channels [x1, x2, x3] and n_shift=1.
# Shift backward, we first pad by zeros [x1, x2, x3, 0, 0].
orig_shp = list(x.shape)
shifted_backward_padding = ((0, batch_size, 0), (0, 0, 0), (0, 0, 0),
(0, n_channels - n_shift, 0))
x_backward_padding = jax.lax.pad(
x,
padding_value=jnp.bfloat16(0.),
padding_config=shifted_backward_padding)
# The following shift gets to [x3^+1, 0, 0] (where +1 means from the future).
shifted_backward = jax.lax.slice(x_backward_padding,
(batch_size, 0, 0, n_channels - n_shift),
(orig_shp[0] + batch_size, orig_shp[1],
orig_shp[2], 2 * n_channels - n_shift))
# Shift forward, we first pad by zeros [0, 0, x1, x2, x3].
shifted_forward_padding = ((batch_size, 0, 0), (0, 0, 0), (0, 0, 0),
(n_channels - n_shift, 0, 0))
x_forward_padding = jax.lax.pad(
x,
padding_value=jnp.bfloat16(0.),
padding_config=shifted_forward_padding)
# The following shift gets to [0, 0, x1^-1] (where -1 means from the past).
shifted_forward = jax.lax.slice(
x_forward_padding, (0, 0, 0, 0),
(orig_shp[0], orig_shp[1], orig_shp[2], n_channels))
# No shift is in the middle, this gets [0, x2, 0].
mask_noshift = (jnp.reshape((jnp.arange(n_channels) >= n_shift) &
(jnp.arange(n_channels) < n_channels - n_shift),
(1, 1, 1, -1))).astype(jnp.bfloat16)
no_shift = mask_noshift * x
# By summing everything together, we end up with [x3^+1, x2, x1^-1].
# Note: channels have been reordered but that doesn't matter for the model.
shifted_x = shifted_backward + shifted_forward + no_shift
return shifted_x.astype(jnp.float32)
| deepmind-research-master | mmv/models/tsm_utils.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Temporal Shift Module w/ ResNet-50 and ResNet-101.
Based on:
TSM: Temporal Shift Module for Efficient Video Understanding
Ji Lin, Chuang Gan, Song Han
https://arxiv.org/pdf/1811.08383.pdf.
"""
from typing import Optional
import haiku as hk
import jax
import jax.numpy as jnp
from mmv.models import tsm_utils as tsmu
from mmv.models import types
class TSMResNetBlock(hk.Module):
"""A ResNet subblock with Temporal Channel Shifting.
Combines a typical ResNetV2 block implementation
(see https://arxiv.org/abs/1512.03385) with a pre-convolution Temporal
Shift Module (see https://arxiv.org/pdf/1811.08383.pdf) in the residual.
"""
def __init__(self,
output_channels: int,
stride: int,
use_projection: bool,
tsm_mode: str,
normalize_fn: Optional[types.NormalizeFn] = None,
channel_shift_fraction: float = 0.125,
num_frames: int = 8,
name: str = 'TSMResNetBlock'):
"""Initializes the TSMResNetBlock module.
Args:
output_channels: Number of output channels.
stride: Stride used in convolutions.
use_projection: Whether to use a projection for the shortcut.
tsm_mode: Mode for TSM ('gpu' or 'tpu').
normalize_fn: Function used for normalization.
channel_shift_fraction: The fraction of temporally shifted channels. If
`channel_shift_fraction` is 0, the block is the same as a normal ResNet
block.
num_frames: Size of frame dimension in a single batch example
name: The name of the module.
"""
super().__init__(name=name)
self._output_channels = output_channels
self._bottleneck_channels = output_channels // 4
self._stride = stride
self._use_projection = use_projection
self._normalize_fn = normalize_fn
self._tsm_mode = tsm_mode
self._channel_shift_fraction = channel_shift_fraction
self._num_frames = num_frames
def __call__(self,
inputs: types.TensorLike,
is_training: bool = True) -> jnp.ndarray:
"""Connects the ResNetBlock module into the graph.
Args:
inputs: A 4-D float array of shape `[B, H, W, C]`.
is_training: Whether to use training mode.
Returns:
A 4-D float array of shape
`[B * num_frames, new_h, new_w, output_channels]`.
"""
# ResNet V2 uses pre-activation, where the batch norm and relu are before
# convolutions, rather than after as in ResNet V1.
preact = inputs
if self._normalize_fn is not None:
preact = self._normalize_fn(preact, is_training=is_training)
preact = jax.nn.relu(preact)
if self._use_projection:
shortcut = hk.Conv2D(
output_channels=self._output_channels,
kernel_shape=1,
stride=self._stride,
with_bias=False,
padding='SAME',
name='shortcut_conv')(
preact)
else:
shortcut = inputs
# Eventually applies Temporal Shift Module.
if self._channel_shift_fraction != 0:
preact = tsmu.apply_temporal_shift(
preact, tsm_mode=self._tsm_mode, num_frames=self._num_frames,
channel_shift_fraction=self._channel_shift_fraction)
# First convolution.
residual = hk.Conv2D(
self._bottleneck_channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_0')(
preact)
# Second convolution.
if self._normalize_fn is not None:
residual = self._normalize_fn(residual, is_training=is_training)
residual = jax.nn.relu(residual)
residual = hk.Conv2D(
output_channels=self._bottleneck_channels,
kernel_shape=3,
stride=self._stride,
with_bias=False,
padding='SAME',
name='conv_1')(
residual)
# Third convolution.
if self._normalize_fn is not None:
residual = self._normalize_fn(residual, is_training=is_training)
residual = jax.nn.relu(residual)
residual = hk.Conv2D(
output_channels=self._output_channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_2')(
residual)
# NOTE: we do not use block multiplier.
output = shortcut + residual
return output
class TSMResNetUnit(hk.Module):
"""Block group for TSM ResNet."""
def __init__(self,
output_channels: int,
num_blocks: int,
stride: int,
tsm_mode: str,
num_frames: int,
normalize_fn: Optional[types.NormalizeFn] = None,
channel_shift_fraction: float = 0.125,
name: str = 'tsm_resnet_unit'):
"""Creates a TSMResNet Unit.
Args:
output_channels: Number of output channels.
num_blocks: Number of ResNet blocks in the unit.
stride: Stride of the unit.
tsm_mode: Which temporal shift module to use.
num_frames: Size of frame dimension in a single batch example.
normalize_fn: Function used for normalization.
channel_shift_fraction: The fraction of temporally shifted channels. If
`channel_shift_fraction` is 0, the block is the same as a normal ResNet
block.
name: The name of the module.
"""
super().__init__(name=name)
self._output_channels = output_channels
self._num_blocks = num_blocks
self._normalize_fn = normalize_fn
self._stride = stride
self._tsm_mode = tsm_mode
self._channel_shift_fraction = channel_shift_fraction
self._num_frames = num_frames
def __call__(self,
inputs: types.TensorLike,
is_training: bool) -> jnp.ndarray:
"""Connects the module to inputs.
Args:
inputs: A 4-D float array of shape `[B * num_frames, H, W, C]`.
is_training: Whether to use training mode.
Returns:
A 4-D float array of shape
`[B * num_frames, H // stride, W // stride, output_channels]`.
"""
net = inputs
for idx_block in range(self._num_blocks):
net = TSMResNetBlock(
self._output_channels,
stride=self._stride if idx_block == 0 else 1,
use_projection=idx_block == 0,
normalize_fn=self._normalize_fn,
tsm_mode=self._tsm_mode,
channel_shift_fraction=self._channel_shift_fraction,
num_frames=self._num_frames,
name=f'block_{idx_block}')(
net, is_training=is_training)
return net
class TSMResNetV2(hk.Module):
"""TSM based on ResNet V2 as described in https://arxiv.org/abs/1603.05027."""
# Endpoints of the model in order.
VALID_ENDPOINTS = (
'tsm_resnet_stem',
'tsm_resnet_unit_0',
'tsm_resnet_unit_1',
'tsm_resnet_unit_2',
'tsm_resnet_unit_3',
'last_conv',
'Embeddings',
)
def __init__(self,
normalize_fn: Optional[types.NormalizeFn] = None,
depth: int = 50,
num_frames: int = 16,
channel_shift_fraction: float = 0.125,
width_mult: int = 1,
name: str = 'TSMResNetV2'):
"""Constructs a ResNet model.
Args:
normalize_fn: Function used for normalization.
depth: Depth of the desired ResNet.
num_frames: Number of frames (used in TPU mode).
channel_shift_fraction: Fraction of channels that are temporally shifted,
if `channel_shift_fraction` is 0, a regular ResNet is returned.
width_mult: Whether or not to use a width multiplier.
name: The name of the module.
Raises:
ValueError: If `channel_shift_fraction` or `depth` has invalid value.
"""
super().__init__(name=name)
if not 0. <= channel_shift_fraction <= 1.0:
raise ValueError(
f'channel_shift_fraction ({channel_shift_fraction})'
' has to be in [0, 1].')
self._num_frames = num_frames
self._channels = (256, 512, 1024, 2048)
self._strides = (1, 2, 2, 2)
num_blocks = {
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
200: (3, 24, 36, 3),
}
if depth not in num_blocks:
raise ValueError(
f'`depth` should be in {list(num_blocks.keys())} ({depth} given).')
self._num_blocks = num_blocks[depth]
self._width_mult = width_mult
self._channel_shift_fraction = channel_shift_fraction
self._normalize_fn = normalize_fn
def __call__(
self,
inputs: types.TensorLike,
is_training: bool = True,
final_endpoint: str = 'Embeddings') -> jnp.ndarray:
"""Connects the TSM ResNetV2 module into the graph.
Args:
inputs: A 4-D float array of shape `[B, H, W, C]`.
is_training: Whether to use training mode.
final_endpoint: Up to which endpoint to run / return.
Returns:
Network output at location `final_endpoint`. A float array which shape
depends on `final_endpoint`.
Raises:
ValueError: If `final_endpoint` is not recognized.
"""
# Prepare inputs for TSM.
inputs, tsm_mode, num_frames = tsmu.prepare_inputs(inputs)
num_frames = num_frames or self._num_frames
self._final_endpoint = final_endpoint
if self._final_endpoint not in self.VALID_ENDPOINTS:
raise ValueError(f'Unknown final endpoint {self._final_endpoint}')
# Stem convolution.
end_point = 'tsm_resnet_stem'
net = hk.Conv2D(
output_channels=64 * self._width_mult,
kernel_shape=7,
stride=2,
with_bias=False,
name=end_point,
padding='SAME')(
inputs)
net = hk.MaxPool(
window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1),
padding='SAME')(
net)
if self._final_endpoint == end_point:
return net
# Residual block.
for unit_id, (channels, num_blocks, stride) in enumerate(
zip(self._channels, self._num_blocks, self._strides)):
end_point = f'tsm_resnet_unit_{unit_id}'
net = TSMResNetUnit(
output_channels=channels * self._width_mult,
num_blocks=num_blocks,
stride=stride,
normalize_fn=self._normalize_fn,
channel_shift_fraction=self._channel_shift_fraction,
num_frames=num_frames,
tsm_mode=tsm_mode,
name=end_point)(
net, is_training=is_training)
if self._final_endpoint == end_point:
return net
if self._normalize_fn is not None:
net = self._normalize_fn(net, is_training=is_training)
net = jax.nn.relu(net)
end_point = 'last_conv'
if self._final_endpoint == end_point:
return net
net = jnp.mean(net, axis=(1, 2))
# Prepare embedding outputs for TSM (temporal average of features).
net = tsmu.prepare_outputs(net, tsm_mode, num_frames)
assert self._final_endpoint == 'Embeddings'
return net
| deepmind-research-master | mmv/models/tsm_resnet.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Haiku S3D model."""
import collections
from typing import Optional, Sequence
import haiku as hk
import jax
from jax import numpy as jnp
from mmv.models import types
class _MaxPool(hk.MaxPool):
"""A `hk.MaxPool` accepting (and discarding) an `is_training` argument."""
def __call__(self,
x: types.TensorLike,
is_training: bool = True) -> jnp.ndarray:
del is_training # Unused.
return super().__call__(x)
def self_gating(inputs: types.TensorLike) -> jnp.ndarray:
"""Feature gating as used in S3D-G.
Transforms the input features by aggregating features from all spatial and
temporal locations, and applying gating conditioned on the aggregated
features. More details can be found at: https://arxiv.org/abs/1712.04851.
Args:
inputs: A 5-D float array of shape `[B, T, H, W, C]`.
Returns:
A tensor with the same shape as input_tensor.
Raises:
ValueError: If `inputs` has the wrong shape.
"""
if inputs.ndim != 5:
raise ValueError(
f'Expected an input of shape `[B, T, H, W, C]` but got {inputs.shape}.')
input_shape = inputs.shape
num_channels = input_shape[4]
spatiotemporal_average = jnp.mean(inputs, axis=(1, 2, 3))
weights = hk.Linear(num_channels, name='self_gating')(spatiotemporal_average)
weights = jax.nn.sigmoid(weights)
return jnp.multiply(weights[:, None, None, None, :], inputs)
class SUnit3D(hk.Module):
"""Base 3d Unit combining Conv3d + Batch Norm + non-linearity."""
def __init__(
self,
output_channels: int,
kernel_shape: Sequence[int] = (1, 1, 1),
stride: Sequence[int] = (1, 1, 1),
with_bias: bool = False,
separable: bool = False,
normalize_fn: Optional[types.NormalizeFn] = None,
activation_fn: Optional[types.ActivationFn] = jax.nn.relu,
self_gating_fn: Optional[types.GatingFn] = None,
name='SUnit3D'):
"""Initializes the SUnit3D module.
Args:
output_channels: Number of output channels.
kernel_shape: The shape of the kernel. A sequence of length 3.
stride: Stride for the kernel. A sequence of length 3.
with_bias: Whether to add a bias to the convolution.
separable: Whether to use separable.
normalize_fn: Function used for normalization.
activation_fn: Function used as non-linearity.
self_gating_fn: Function used for self-gating.
name: The name of the module.
Raises:
ValueError: If `kernel_shape` or `stride` has the wrong shape.
"""
super().__init__(name=name)
# Check args.
if len(kernel_shape) != 3:
raise ValueError(
'Given `kernel_shape` must have length 3 but has length '
f'{len(kernel_shape)}.')
if len(stride) != 3:
raise ValueError(
f'Given `stride` must have length 3 but has length {len(stride)}.')
self._normalize_fn = normalize_fn
self._activation_fn = activation_fn
self._self_gating_fn = self_gating_fn
k0, k1, k2 = kernel_shape
if separable and k1 != 1:
spatial_kernel_shape = [1, k1, k2]
temporal_kernel_shape = [k0, 1, 1]
s0, s1, s2 = stride
spatial_stride = [1, s1, s2]
temporal_stride = [s0, 1, 1]
self._convolutions = [
hk.Conv3D(
output_channels=output_channels,
kernel_shape=spatial_kernel_shape,
stride=spatial_stride,
padding='SAME',
with_bias=with_bias),
hk.Conv3D(
output_channels=output_channels,
kernel_shape=temporal_kernel_shape,
stride=temporal_stride,
padding='SAME',
with_bias=with_bias)
]
else:
self._convolutions = [
hk.Conv3D(
output_channels=output_channels,
kernel_shape=kernel_shape,
stride=stride,
padding='SAME',
with_bias=with_bias)]
def __call__(
self,
inputs: types.TensorLike,
is_training: bool) -> jnp.ndarray:
"""Connects the module to inputs.
Args:
inputs: A 5-D float array of shape `[B, T, H, W, C]`.
is_training: Whether to use training mode.
Returns:
A 5-D float array of shape `[B, new_t, new_h, new_w, output_channels]`.
"""
x = inputs
for conv in self._convolutions:
x = conv(x)
if self._normalize_fn is not None:
x = self._normalize_fn(x, is_training=is_training)
if self._activation_fn is not None:
x = self._activation_fn(x)
if self._self_gating_fn:
x = self._self_gating_fn(x)
return x
class InceptionBlockV13D(hk.Module):
"""A 3D Inception v1 block.
This allows use of separable 3D convolutions and self-gating, as described in:
Rethinking Spatiotemporal Feature Learning For Video Understanding.
Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu and Kevin Murphy.
https://arxiv.org/abs/1712.04851.
"""
def __init__(self,
output_channels: Sequence[int],
normalize_fn: Optional[types.NormalizeFn],
temporal_kernel_size: int = 3,
self_gating_fn: Optional[types.GatingFn] = None,
name: str = 'InceptionBlockV13D'):
"""Initializes the InceptionBlockV13D module.
Args:
output_channels: The size of the output channels of each block, ordered as
[Conv2d_0a_1x1, Conv2d_0a_1x1, Conv2d_0b_3x3, Conv2d_0a_1x1,
Conv2d_0b_3x3, Conv2d_0b_1x1]
normalize_fn: Function used for normalization.
temporal_kernel_size: The size of the temporal convolutional filters in
the conv3d_spatiotemporal blocks.
self_gating_fn: Function which optionally performs self-gating. If `None`,
no self-gating is applied.
name: The name of the module.
Raises:
ValueError: If `output_channels` has the wrong shape.
"""
super().__init__(name=name)
# Check args.
if len(output_channels) != 6:
raise ValueError(
'Given `output_channels` must have length 6 but has length '
f'{len(output_channels)}.')
self._output_channels = output_channels
self._normalize_fn = normalize_fn
self._temporal_kernel_size = temporal_kernel_size
if self_gating_fn is None:
self._self_gating_fn = lambda x: x
else:
self._self_gating_fn = self_gating_fn
def __call__(
self,
inputs: types.TensorLike,
is_training: bool) -> jnp.ndarray:
"""Connects the module to inputs.
Args:
inputs: A 5-D float array of shape `[B, T, H, W, C]`.
is_training: Whether to use training mode.
Returns:
A 5-D float array of shape
`[B, new_t, new_h, new_w, sum(output_channels)]`.
"""
# Branch 0
branch_0 = SUnit3D(
output_channels=self._output_channels[0],
kernel_shape=(1, 1, 1),
separable=False,
normalize_fn=self._normalize_fn,
self_gating_fn=self._self_gating_fn,
name='Branch_0_Conv2d_0a_1x1')(
inputs, is_training=is_training)
# Branch 1
branch_1 = SUnit3D(
output_channels=self._output_channels[1],
kernel_shape=(1, 1, 1),
separable=False,
normalize_fn=self._normalize_fn,
self_gating_fn=None,
name='Branch_1_Conv2d_0a_1x1')(
inputs, is_training=is_training)
branch_1 = SUnit3D(
output_channels=self._output_channels[2],
kernel_shape=(self._temporal_kernel_size, 3, 3),
separable=True,
normalize_fn=self._normalize_fn,
self_gating_fn=self._self_gating_fn,
name='Branch_1_Conv2d_0b_3x3')(
branch_1, is_training=is_training)
# Branch 2
branch_2 = SUnit3D(
output_channels=self._output_channels[3],
kernel_shape=(1, 1, 1),
separable=False,
normalize_fn=self._normalize_fn,
self_gating_fn=None,
name='Branch_2_Conv2d_0a_1x1')(
inputs, is_training=is_training)
branch_2 = SUnit3D(
output_channels=self._output_channels[4],
kernel_shape=(self._temporal_kernel_size, 3, 3),
separable=True,
normalize_fn=self._normalize_fn,
self_gating_fn=self._self_gating_fn,
name='Branch_2_Conv2d_0b_3x3')(
branch_2, is_training=is_training)
# Branch 3
branch_3 = hk.MaxPool(
window_shape=(1, 3, 3, 3, 1),
strides=(1, 1, 1, 1, 1),
padding='SAME',
name='Branch_3_MaxPool_0a_3x3')(
inputs)
branch_3 = SUnit3D(
output_channels=self._output_channels[5],
kernel_shape=(1, 1, 1),
separable=False,
normalize_fn=self._normalize_fn,
self_gating_fn=self._self_gating_fn,
name='Branch_3_Conv2d_0b_1x1')(
branch_3, is_training=is_training)
return jnp.concatenate((branch_0, branch_1, branch_2, branch_3), axis=4)
_Layer = collections.namedtuple('_Layer', ('name', 'module', 'kwargs'))
class S3D(hk.Module):
"""S3D architecture.
Any intermediary representation can be obtained by choosing one of the valid
`final_endpoint`s. The final value returned by this model (when 'Embeddings'
is used as `final_endpoint`) is a single 1-D representation for each video in
the batch. Another layer can be externally added on top of that to obtain
logits.
"""
# Endpoints of the model in order.
VALID_ENDPOINTS = (
'Conv2d_1a_7x7',
'MaxPool_2a_3x3',
'Conv2d_2b_1x1',
'Conv2d_2c_3x3',
'MaxPool_3a_3x3',
'Mixed_3b',
'Mixed_3c',
'MaxPool_4a_3x3',
'Mixed_4b',
'Mixed_4c',
'Mixed_4d',
'Mixed_4e',
'Mixed_4f',
'MaxPool_5a_2x2',
'Mixed_5b',
'Mixed_5c',
'Embeddings',
)
def __init__(self,
normalize_fn: Optional[types.NormalizeFn] = None,
first_temporal_kernel_size: int = 7,
temporal_conv_startat: Optional[str] = 'Conv2d_2c_3x3',
gating_startat: Optional[str] = 'Conv2d_2c_3x3',
name='S3D'):
"""Initializes the S3D module.
Args:
normalize_fn: Function used for normalization.
first_temporal_kernel_size: Specifies the temporal kernel size for the
first conv3d filter. A larger value slows down the model but provides
little accuracy improvement. Must be set to one of 1, 3, 5 or 7.
temporal_conv_startat: Specifies the first conv block to use separable 3D
convs rather than 2D convs (implemented as [1, k, k] 3D conv). This is
used to construct the inverted pyramid models. 'Conv2d_2c_3x3' is the
first valid block to use separable 3D convs. If provided block name is
not present, all valid blocks will use separable 3D convs.
gating_startat: Specifies the first conv block to use self gating.
'Conv2d_2c_3x3' is the first valid block to use self gating. If provided
block name is not present, all valid blocks will use separable 3D convs.
name: The name of the module.
Raises:
ValueError: If `temporal_conv_startat`, `gating_startat` or
`first_temporal_kernel_size` is not recognized.
"""
super().__init__(name=name)
self._first_temporal_kernel_size = first_temporal_kernel_size
self._temporal_conv_startat = temporal_conv_startat
self._gating_startat = gating_startat
self._normalize_fn = normalize_fn
if (temporal_conv_startat not in self.VALID_ENDPOINTS
and temporal_conv_startat is not None):
raise ValueError(
f'Provided `temporal_conv_startat`: {temporal_conv_startat} not '
f'valid. It must be one of: {self.VALID_ENDPOINTS}, or `None`.')
if (gating_startat not in self.VALID_ENDPOINTS
and gating_startat is not None):
raise ValueError(
f'Provided `gating_startat`: {gating_startat} not valid. '
f'It must be one of: {self.VALID_ENDPOINTS}, or `None`.')
if first_temporal_kernel_size not in [1, 3, 5, 7]:
raise ValueError('`first_temporal_kernel_size` can only be 1, 3, 5 or 7.')
def __call__(self,
inputs: types.TensorLike,
is_training: bool,
final_endpoint: str = 'Embeddings') -> jnp.ndarray:
"""Connects the model to inputs.
Args:
inputs: A 5-D float array of shape `[B, T, H, W, C]`.
is_training: Whether to use training mode.
final_endpoint: Up to which endpoint to run / return.
Returns:
A 5-D float array of shape
`[B, new_t, new_h, new_w, sum(output_channels)]`.
Returns:
Network output at location `final_endpoint`. A float array which shape
depends on `final_endpoint`.
Raises:
ValueError: If `final_endpoint` is not recognized.
"""
if final_endpoint not in self.VALID_ENDPOINTS:
raise ValueError(f'Provided final_endpoint: {final_endpoint} not valid.'
f' It must be one of: {self.VALID_ENDPOINTS}')
x = inputs
# We define layers with tuples (name, module, kwargs)
# Not all kwargs are present, as we will need to fill in certain properties
# as we move down the network.
layers = []
# The first layer is conditional on the input data shape: the channel size
# is used to identify whether the `space_to_depth` transformation has been
# applied to the input. This is used to speed up computation on TPUs.
if x.shape[-1] == 3:
layers.append(
_Layer('Conv2d_1a_7x7', SUnit3D,
dict(output_channels=64, stride=(2, 2, 2), separable=False,
kernel_shape=(self._first_temporal_kernel_size, 7, 7),
normalize_fn=self._normalize_fn)))
else:
layers.append(
_Layer('Conv2d_1a_7x7', SUnit3D,
dict(output_channels=64, kernel_shape=(2, 4, 4),
stride=(1, 1, 1), separable=False,
normalize_fn=self._normalize_fn)))
layers.extend([
_Layer('MaxPool_2a_3x3', _MaxPool,
dict(window_shape=(1, 1, 3, 3, 1), strides=(1, 1, 2, 2, 1),
padding='SAME')),
_Layer('Conv2d_2b_1x1', SUnit3D,
dict(output_channels=64, kernel_shape=(1, 1, 1),
normalize_fn=self._normalize_fn)),
_Layer('Conv2d_2c_3x3', SUnit3D,
dict(output_channels=192, separable=True,
normalize_fn=self._normalize_fn)),
_Layer('MaxPool_3a_3x3', _MaxPool,
dict(window_shape=(1, 1, 3, 3, 1), strides=(1, 1, 2, 2, 1),
padding='SAME')),
_Layer('Mixed_3b', InceptionBlockV13D,
dict(output_channels=(64, 96, 128, 16, 32, 32),
normalize_fn=self._normalize_fn)),
_Layer('Mixed_3c', InceptionBlockV13D,
dict(output_channels=(128, 128, 192, 32, 96, 64),
normalize_fn=self._normalize_fn)),
_Layer('MaxPool_4a_3x3', _MaxPool,
dict(window_shape=(1, 3, 3, 3, 1), strides=(1, 2, 2, 2, 1),
padding='SAME')),
_Layer('Mixed_4b', InceptionBlockV13D,
dict(output_channels=(192, 96, 208, 16, 48, 64),
normalize_fn=self._normalize_fn)),
_Layer('Mixed_4c', InceptionBlockV13D,
dict(output_channels=(160, 112, 224, 24, 64, 64),
normalize_fn=self._normalize_fn)),
_Layer('Mixed_4d', InceptionBlockV13D,
dict(output_channels=(128, 128, 256, 24, 64, 64),
normalize_fn=self._normalize_fn)),
_Layer('Mixed_4e', InceptionBlockV13D,
dict(output_channels=(112, 144, 288, 32, 64, 64),
normalize_fn=self._normalize_fn)),
_Layer('Mixed_4f', InceptionBlockV13D,
dict(output_channels=(256, 160, 320, 32, 128, 128),
normalize_fn=self._normalize_fn)),
_Layer('MaxPool_5a_2x2', _MaxPool,
dict(window_shape=(1, 2, 2, 2, 1), strides=(1, 2, 2, 2, 1),
padding='SAME')),
_Layer('Mixed_5b', InceptionBlockV13D,
dict(output_channels=(256, 160, 320, 32, 128, 128),
normalize_fn=self._normalize_fn)),
_Layer('Mixed_5c', InceptionBlockV13D,
dict(output_channels=(384, 192, 384, 48, 128, 128),
normalize_fn=self._normalize_fn)),
])
# These parameters may change thoughout the computation.
self_gating_fn = None
temporal_kernel_size = 1
# Iterate over layers.
for layer in layers:
# Update
if layer.name == self._gating_startat:
self_gating_fn = self_gating
if layer.name == self._temporal_conv_startat:
temporal_kernel_size = 3
kwargs = layer.kwargs
if layer.module is SUnit3D:
kwargs['self_gating_fn'] = self_gating_fn
if 'kernel_shape' not in kwargs:
kwargs['kernel_shape'] = (temporal_kernel_size, 3, 3)
elif layer.module is InceptionBlockV13D:
kwargs['self_gating_fn'] = self_gating_fn
kwargs['temporal_kernel_size'] = temporal_kernel_size
module = layer.module(name=layer.name, **kwargs)
x = module(x, is_training=is_training)
if final_endpoint == layer.name:
return x
assert final_endpoint == 'Embeddings'
return jnp.mean(x, axis=(1, 2, 3))
| deepmind-research-master | mmv/models/s3d.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Type Aliases."""
from typing import Callable, Tuple, Union
import jax.numpy as jnp
import numpy as np
import optax
TensorLike = Union[np.ndarray, jnp.DeviceArray]
ActivationFn = Callable[[TensorLike], TensorLike]
GatingFn = Callable[[TensorLike], TensorLike]
NetworkFn = Callable[[TensorLike], TensorLike]
# Callable doesn't allow kwargs to be used, and we often want to
# pass in is_training=..., so ignore the arguments for the sake of pytype.
NormalizeFn = Callable[..., TensorLike]
OptState = Tuple[optax.TraceState, optax.ScaleByScheduleState, optax.ScaleState]
| deepmind-research-master | mmv/models/types.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet V2 modules.
Equivalent to hk.Resnet except accepting a final_endpoint to return
intermediate activations.
"""
from typing import Optional, Sequence, Text, Type, Union
import haiku as hk
import jax
import jax.numpy as jnp
from mmv.models import types
class BottleneckBlock(hk.Module):
"""Implements a bottleneck residual block (ResNet50 and ResNet101)."""
# pylint:disable=g-bare-generic
def __init__(self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
normalize_fn: Optional[types.NormalizeFn] = None,
name: Optional[Text] = None):
super(BottleneckBlock, self).__init__(name=name)
self._channels = channels
self._stride = stride
self._use_projection = use_projection
self._normalize_fn = normalize_fn
if self._use_projection:
self._proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding='SAME',
name='shortcut_conv')
self._conv_0 = hk.Conv2D(
output_channels=channels // 4,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_0')
self._conv_1 = hk.Conv2D(
output_channels=channels // 4,
kernel_shape=3,
stride=stride,
with_bias=False,
padding='SAME',
name='conv_1')
self._conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_2')
def __call__(self,
inputs,
is_training):
net = inputs
shortcut = inputs
for i, conv_i in enumerate([self._conv_0, self._conv_1, self._conv_2]):
if self._normalize_fn is not None:
net = self._normalize_fn(net, is_training=is_training)
net = jax.nn.relu(net)
if i == 0 and self._use_projection:
shortcut = self._proj_conv(net)
# Now do the convs.
net = conv_i(net)
return net + shortcut
class BasicBlock(hk.Module):
"""Implements a basic residual block (ResNet18 and ResNet34)."""
# pylint:disable=g-bare-generic
def __init__(self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
normalize_fn: Optional[types.NormalizeFn] = None,
name: Optional[Text] = None):
super(BasicBlock, self).__init__(name=name)
self._channels = channels
self._stride = stride
self._use_projection = use_projection
self._normalize_fn = normalize_fn
if self._use_projection:
self._proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding='SAME',
name='shortcut_conv')
self._conv_0 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_0')
self._conv_1 = hk.Conv2D(
output_channels=channels,
kernel_shape=3,
stride=stride,
with_bias=False,
padding='SAME',
name='conv_1')
def __call__(self,
inputs,
is_training):
net = inputs
shortcut = inputs
for i, conv_i in enumerate([self._conv_0, self._conv_1]):
if self._normalize_fn is not None:
net = self._normalize_fn(net, is_training=is_training)
net = jax.nn.relu(net)
if i == 0 and self._use_projection:
shortcut = self._proj_conv(net)
# Now do the convs.
net = conv_i(net)
return net + shortcut
class ResNetUnit(hk.Module):
"""Unit (group of blocks) for ResNet."""
# pylint:disable=g-bare-generic
def __init__(self,
channels: int,
num_blocks: int,
stride: Union[int, Sequence[int]],
block_module: Type[BottleneckBlock],
normalize_fn: Optional[types.NormalizeFn] = None,
name: Optional[Text] = None,
remat: bool = False):
super(ResNetUnit, self).__init__(name=name)
self._channels = channels
self._num_blocks = num_blocks
self._stride = stride
self._normalize_fn = normalize_fn
self._block_module = block_module
self._remat = remat
def __call__(self,
inputs,
is_training):
input_channels = inputs.shape[-1]
self._blocks = []
for id_block in range(self._num_blocks):
use_projection = id_block == 0 and self._channels != input_channels
self._blocks.append(
self._block_module(
channels=self._channels,
stride=self._stride if id_block == 0 else 1,
use_projection=use_projection,
normalize_fn=self._normalize_fn,
name='block_%d' % id_block))
net = inputs
for block in self._blocks:
if self._remat:
# Note: we can ignore cell-var-from-loop because the lambda is evaluated
# inside every iteration of the loop. This is needed to go around the
# way variables are passed to jax.remat.
net = hk.remat(lambda x: block(x, is_training=is_training))(net) # pylint: disable=cell-var-from-loop
else:
net = block(net, is_training=is_training)
return net
class ResNetV2(hk.Module):
"""ResNetV2 model."""
# Endpoints of the model in order.
VALID_ENDPOINTS = (
'resnet_stem',
'resnet_unit_0',
'resnet_unit_1',
'resnet_unit_2',
'resnet_unit_3',
'last_conv',
'output',
)
# pylint:disable=g-bare-generic
def __init__(self,
depth=50,
num_classes: Optional[int] = 1000,
width_mult: int = 1,
normalize_fn: Optional[types.NormalizeFn] = None,
name: Optional[Text] = None,
remat: bool = False):
"""Creates ResNetV2 Haiku module.
Args:
depth: depth of the desired ResNet (18, 34, 50, 101, 152 or 202).
num_classes: (int) Number of outputs in final layer. If None will not add
a classification head and will return the output embedding.
width_mult: multiplier for channel width.
normalize_fn: normalization function, see helpers/utils.py
name: Name of the module.
remat: Whether to rematerialize intermediate activations (saves memory).
"""
super(ResNetV2, self).__init__(name=name)
self._normalize_fn = normalize_fn
self._num_classes = num_classes
self._width_mult = width_mult
self._strides = [1, 2, 2, 2]
num_blocks = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3],
}
if depth not in num_blocks:
raise ValueError(
f'`depth` should be in {list(num_blocks.keys())} ({depth} given).')
self._num_blocks = num_blocks[depth]
if depth >= 50:
self._block_module = BottleneckBlock
self._channels = [256, 512, 1024, 2048]
else:
self._block_module = BasicBlock
self._channels = [64, 128, 256, 512]
self._initial_conv = hk.Conv2D(
output_channels=64 * self._width_mult,
kernel_shape=7,
stride=2,
with_bias=False,
padding='SAME',
name='initial_conv')
if remat:
self._initial_conv = hk.remat(self._initial_conv)
self._block_groups = []
for i in range(4):
self._block_groups.append(
ResNetUnit(
channels=self._channels[i] * self._width_mult,
num_blocks=self._num_blocks[i],
block_module=self._block_module,
stride=self._strides[i],
normalize_fn=self._normalize_fn,
name='block_group_%d' % i,
remat=remat))
if num_classes is not None:
self._logits_layer = hk.Linear(
output_size=num_classes, w_init=jnp.zeros, name='logits')
def __call__(self, inputs, is_training, final_endpoint='output'):
self._final_endpoint = final_endpoint
net = self._initial_conv(inputs)
net = hk.max_pool(
net, window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1),
padding='SAME')
end_point = 'resnet_stem'
if self._final_endpoint == end_point:
return net
for i_group, block_group in enumerate(self._block_groups):
net = block_group(net, is_training=is_training)
end_point = f'resnet_unit_{i_group}'
if self._final_endpoint == end_point:
return net
end_point = 'last_conv'
if self._final_endpoint == end_point:
return net
if self._normalize_fn is not None:
net = self._normalize_fn(net, is_training=is_training)
net = jax.nn.relu(net)
# The actual representation
net = jnp.mean(net, axis=[1, 2])
assert self._final_endpoint == 'output'
if self._num_classes is None:
# If num_classes was None, we just return the output
# of the last block, without fully connected layer.
return net
return self._logits_layer(net)
| deepmind-research-master | mmv/models/resnet.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalize functions constructors."""
from typing import Any, Dict, Optional, Sequence, Union
import haiku as hk
from jax import numpy as jnp
from mmv.models import types
class _BatchNorm(hk.BatchNorm):
"""A `hk.BatchNorm` with adapted default arguments."""
def __init__(self,
create_scale: bool = True,
create_offset: bool = True,
decay_rate: float = 0.9,
eps: float = 1e-5,
test_local_stats: bool = False,
**kwargs):
# Check args.
if kwargs.get('cross_replica_axis', None) is not None:
raise ValueError(
'Attempting to use \'batch_norm\' normalizer, but specifying '
'`cross_replica_axis`. If you want this behavior use '
'`normalizer=\'cross_replica_batch_norm\'` directly.')
self._test_local_stats = test_local_stats
super().__init__(create_scale=create_scale,
create_offset=create_offset,
decay_rate=decay_rate,
eps=eps,
**kwargs)
def __call__(self,
x: types.TensorLike,
is_training: bool) -> jnp.ndarray:
return super().__call__(x, is_training,
test_local_stats=self._test_local_stats)
class _CrossReplicaBatchNorm(hk.BatchNorm):
"""A `hk.BatchNorm` with adapted default arguments for cross replica."""
def __init__(self,
create_scale: bool = True,
create_offset: bool = True,
decay_rate: float = 0.9,
eps: float = 1e-5,
test_local_stats: bool = False,
**kwargs):
# Check args.
if 'cross_replica_axis' in kwargs and kwargs['cross_replica_axis'] is None:
raise ValueError(
'Attempting to use \'cross_replica_batch_norm\' normalizer, but '
'specifying `cross_replica_axis` to be None. If you want this '
'behavior use `normalizer=\'batch_norm\'` directly.')
self._test_local_stats = test_local_stats
kwargs['cross_replica_axis'] = kwargs.get('cross_replica_axis', 'i')
super().__init__(create_scale=create_scale,
create_offset=create_offset,
decay_rate=decay_rate,
eps=eps,
**kwargs)
def __call__(self,
x: types.TensorLike,
is_training: bool) -> jnp.ndarray:
return super().__call__(x, is_training,
test_local_stats=self._test_local_stats)
class _LayerNorm(hk.LayerNorm):
"""A `hk.LayerNorm` accepting (and discarding) an `is_training` argument."""
def __init__(self,
axis: Union[int, Sequence[int]] = (1, 2),
create_scale: bool = True,
create_offset: bool = True,
**kwargs):
super().__init__(axis=axis,
create_scale=create_scale,
create_offset=create_offset,
**kwargs)
def __call__(self,
x: types.TensorLike,
is_training: bool) -> jnp.ndarray:
del is_training # Unused.
return super().__call__(x)
_NORMALIZER_NAME_TO_CLASS = {
'batch_norm': _BatchNorm,
'cross_replica_batch_norm': _CrossReplicaBatchNorm,
'layer_norm': _LayerNorm,
}
def get_normalize_fn(
normalizer_name: str = 'batch_norm',
normalizer_kwargs: Optional[Dict[str, Any]] = None,
) -> types.NormalizeFn:
"""Handles NormalizeFn creation.
These functions are expected to be used as part of Haiku model. On each
application of the returned normalization_fn, a new Haiku layer will be added
to the model.
Args:
normalizer_name: The name of the normalizer to be constructed.
normalizer_kwargs: The kwargs passed to the normalizer constructor.
Returns:
A `types.NormalizeFn` that when applied will create a new layer.
Raises:
ValueError: If `normalizer_name` is unknown.
"""
# Check args.
if normalizer_name not in _NORMALIZER_NAME_TO_CLASS:
raise ValueError(f'Unrecognized `normalizer_name` {normalizer_name}.')
normalizer_class = _NORMALIZER_NAME_TO_CLASS[normalizer_name]
normalizer_kwargs = normalizer_kwargs or dict()
return lambda *a, **k: normalizer_class(**normalizer_kwargs)(*a, **k) # pylint: disable=unnecessary-lambda
| deepmind-research-master | mmv/models/normalization.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for s3d."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import numpy as np
from mmv.models import normalization
from mmv.models import s3d
class _CallableS3D:
"""Wrapper around S3D that take care of parameter book keeping."""
def __init__(self, *args, **kwargs):
self._model = hk.transform_with_state(
lambda *a, **k: # pylint: disable=g-long-lambda,unnecessary-lambda
s3d.S3D(
normalize_fn=normalization.get_normalize_fn(),
*args, **kwargs)(*a, **k))
self._rng = jax.random.PRNGKey(42)
self._params, self._state = None, None
def init(self, inputs, **kwargs):
self._params, self._state = self._model.init(
self._rng, inputs, is_training=True, **kwargs)
def __call__(self, inputs, **kwargs):
if self._params is None:
self.init(inputs)
output, _ = self._model.apply(
self._params, self._state, self._rng, inputs, **kwargs)
return output
class S3DTest(parameterized.TestCase):
# Testing all layers is quite slow, added in comments for completeness.
@parameterized.parameters(
# dict(endpoint='Conv2d_1a_7x7', expected_size=(2, 8, 112, 112, 64)),
# dict(endpoint='MaxPool_2a_3x3', expected_size=(2, 8, 56, 56, 64)),
# dict(endpoint='Conv2d_2b_1x1', expected_size=(2, 8, 56, 56, 64)),
# dict(endpoint='Conv2d_2c_3x3', expected_size=(2, 8, 56, 56, 192)),
# dict(endpoint='MaxPool_3a_3x3', expected_size=(2, 8, 28, 28, 192)),
# dict(endpoint='Mixed_3b', expected_size=(2, 8, 28, 28, 256)),
# dict(endpoint='Mixed_3c', expected_size=(2, 8, 28, 28, 480)),
# dict(endpoint='MaxPool_4a_3x3', expected_size=(2, 4, 14, 14, 480)),
# dict(endpoint='Mixed_4b', expected_size=(2, 4, 14, 14, 512)),
# dict(endpoint='Mixed_4c', expected_size=(2, 4, 14, 14, 512)),
# dict(endpoint='Mixed_4d', expected_size=(2, 4, 14, 14, 512)),
# dict(endpoint='Mixed_4e', expected_size=(2, 4, 14, 14, 528)),
# dict(endpoint='Mixed_4f', expected_size=(2, 4, 14, 14, 832)),
# dict(endpoint='MaxPool_5a_2x2', expected_size=(2, 2, 7, 7, 832)),
# dict(endpoint='Mixed_5b', expected_size=(2, 2, 7, 7, 832)),
# dict(endpoint='Mixed_5c', expected_size=(2, 2, 7, 7, 1024)),
dict(endpoint='Embeddings', expected_size=(2, 1024)),
)
def test_endpoint_expected_output_dimensions(self, endpoint, expected_size):
inputs = np.random.normal(size=(2, 16, 224, 224, 3))
model = _CallableS3D()
output = model(inputs, is_training=False, final_endpoint=endpoint)
self.assertSameElements(output.shape, expected_size)
def test_space_to_depth(self):
inputs = np.random.normal(size=(2, 16//2, 224//2, 224//2, 3*2*2*2))
model = _CallableS3D()
output = model(inputs, is_training=False, final_endpoint='Conv2d_1a_7x7')
self.assertSameElements(output.shape, (2, 8, 112, 112, 64))
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | mmv/models/s3d_test.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tensorflow_gan as tfgan
import tensorflow_hub as hub
def fid(generated_sentences, real_sentences):
"""Compute FID rn sentences using pretrained universal sentence encoder.
Args:
generated_sentences: list of N strings.
real_sentences: list of N strings.
Returns:
Frechet distance between activations.
"""
embed = hub.Module("https://tfhub.dev/google/universal-sentence-encoder/2")
real_embed = embed(real_sentences)
generated_embed = embed(generated_sentences)
distance = tfgan.eval.frechet_classifier_distance_from_activations(
real_embed, generated_embed)
# Restrict the thread pool size to prevent excessive CPU usage.
config = tf.ConfigProto()
config.intra_op_parallelism_threads = 16
config.inter_op_parallelism_threads = 16
with tf.Session(config=config) as session:
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
distance_np = session.run(distance)
return distance_np
| deepmind-research-master | scratchgan/eval_metrics.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| deepmind-research-master | scratchgan/__init__.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generators for text data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from scratchgan import utils
class LSTMGen(snt.AbstractModule):
"""A multi-layer LSTM language model.
Uses tied input/output embedding weights.
"""
def __init__(self,
vocab_size,
feature_sizes,
max_sequence_length,
batch_size,
use_layer_norm,
trainable_embedding_size,
input_dropout,
output_dropout,
pad_token,
embedding_source=None,
vocab_file=None,
name='lstm_gen'):
super(LSTMGen, self).__init__(name=name)
self._feature_sizes = feature_sizes
self._max_sequence_length = max_sequence_length
self._vocab_size = vocab_size
self._batch_size = batch_size
self._use_layer_norm = use_layer_norm
self._trainable_embedding_size = trainable_embedding_size
self._embedding_source = embedding_source
self._vocab_file = vocab_file
self._input_dropout = input_dropout
self._output_dropout = output_dropout
self._pad_token = pad_token
if self._embedding_source:
assert vocab_file
def _build(self, is_training=True, temperature=1.0):
input_keep_prob = (1. - self._input_dropout) if is_training else 1.0
output_keep_prob = (1. - self._output_dropout) if is_training else 1.0
batch_size = self._batch_size
max_sequence_length = self._max_sequence_length
if self._embedding_source:
all_embeddings = utils.make_partially_trainable_embeddings(
self._vocab_file, self._embedding_source, self._vocab_size,
self._trainable_embedding_size)
else:
all_embeddings = tf.get_variable(
'trainable_embeddings',
shape=[self._vocab_size, self._trainable_embedding_size],
trainable=True)
_, self._embedding_size = all_embeddings.shape.as_list()
input_embeddings = tf.nn.dropout(all_embeddings, keep_prob=input_keep_prob)
output_embeddings = tf.nn.dropout(
all_embeddings, keep_prob=output_keep_prob)
out_bias = tf.get_variable(
'out_bias', shape=[1, self._vocab_size], dtype=tf.float32)
in_proj = tf.get_variable(
'in_proj', shape=[self._embedding_size, self._feature_sizes[0]])
# If more than 1 layer, then output has dim sum(self._feature_sizes),
# which is different from input dim == self._feature_sizes[0]
# So we need a different projection matrix for input and output.
if len(self._feature_sizes) > 1:
out_proj = tf.get_variable(
'out_proj', shape=[self._embedding_size,
sum(self._feature_sizes)])
else:
out_proj = in_proj
encoder_cells = []
for feature_size in self._feature_sizes:
encoder_cells += [
snt.LSTM(feature_size, use_layer_norm=self._use_layer_norm)
]
encoder_cell = snt.DeepRNN(encoder_cells)
state = encoder_cell.initial_state(batch_size)
# Manual unrolling.
samples_list, logits_list, logprobs_list, embeddings_list = [], [], [], []
sample = tf.tile(
tf.constant(self._pad_token, dtype=tf.int32)[None], [batch_size])
logging.info('Unrolling over %d steps.', max_sequence_length)
for _ in range(max_sequence_length):
# Input is sampled word at t-1.
embedding = tf.nn.embedding_lookup(input_embeddings, sample)
embedding.shape.assert_is_compatible_with(
[batch_size, self._embedding_size])
embedding_proj = tf.matmul(embedding, in_proj)
embedding_proj.shape.assert_is_compatible_with(
[batch_size, self._feature_sizes[0]])
outputs, state = encoder_cell(embedding_proj, state)
outputs_proj = tf.matmul(outputs, out_proj, transpose_b=True)
logits = tf.matmul(
outputs_proj, output_embeddings, transpose_b=True) + out_bias
categorical = tfp.distributions.Categorical(logits=logits/temperature)
sample = categorical.sample()
logprobs = categorical.log_prob(sample)
samples_list.append(sample)
logits_list.append(logits)
logprobs_list.append(logprobs)
embeddings_list.append(embedding)
# Create an op to retrieve embeddings for full sequence, useful for testing.
embeddings = tf.stack( # pylint: disable=unused-variable
embeddings_list,
axis=1,
name='embeddings')
sequence = tf.stack(samples_list, axis=1)
logprobs = tf.stack(logprobs_list, axis=1)
# The sequence stops after the first occurrence of a PAD token.
sequence_length = utils.get_first_occurrence_indices(
sequence, self._pad_token)
mask = utils.get_mask_past_symbol(sequence, self._pad_token)
masked_sequence = sequence * tf.cast(mask, tf.int32)
masked_logprobs = logprobs * tf.cast(mask, tf.float32)
return {
'sequence': masked_sequence,
'sequence_length': sequence_length,
'logprobs': masked_logprobs
}
| deepmind-research-master | scratchgan/generators.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for parsing text files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
from absl import logging
import numpy as np
from tensorflow.compat.v1.io import gfile
# sequences: [N, MAX_TOKENS_SEQUENCE] array of int32
# lengths: [N, 2] array of int32, such that
# lengths[i, 0] is the number of non-pad tokens in sequences[i, :]
FILENAMES = {
"emnlp2017": ("train.json", "valid.json", "test.json"),
}
# EMNLP2017 sentences have max length 50, add one for a PAD token so that all
# sentences end with PAD.
MAX_TOKENS_SEQUENCE = {"emnlp2017": 52}
UNK = "<unk>"
PAD = " "
PAD_INT = 0
def tokenize(sentence):
"""Split a string into words."""
return sentence.split(" ") + [PAD]
def _build_vocab(json_data):
"""Builds full vocab from json data."""
vocab = collections.Counter()
for sentence in json_data:
tokens = tokenize(sentence["s"])
vocab.update(tokens)
for title in sentence["t"]:
title_tokens = tokenize(title)
vocab.update(title_tokens)
# Most common words first.
count_pairs = sorted(list(vocab.items()), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
words = list(words)
if UNK not in words:
words = [UNK] + words
word_to_id = dict(list(zip(words, list(range(len(words))))))
# Tokens are now sorted by frequency. There's no guarantee that `PAD` will
# end up at `PAD_INT` index. Enforce it by swapping whatever token is
# currently at the `PAD_INT` index with the `PAD` token.
word = list(word_to_id.keys())[list(word_to_id.values()).index(PAD_INT)]
word_to_id[PAD], word_to_id[word] = word_to_id[word], word_to_id[PAD]
assert word_to_id[PAD] == PAD_INT
return word_to_id
def string_sequence_to_sequence(string_sequence, word_to_id):
result = []
for word in string_sequence:
if word in word_to_id:
result.append(word_to_id[word])
else:
result.append(word_to_id[UNK])
return result
def _integerize(json_data, word_to_id, dataset):
"""Transform words into integers."""
sequences = np.full((len(json_data), MAX_TOKENS_SEQUENCE[dataset]),
word_to_id[PAD], np.int32)
sequence_lengths = np.zeros(shape=(len(json_data)), dtype=np.int32)
for i, sentence in enumerate(json_data):
sequence_i = string_sequence_to_sequence(
tokenize(sentence["s"]), word_to_id)
sequence_lengths[i] = len(sequence_i)
sequences[i, :sequence_lengths[i]] = np.array(sequence_i)
return {
"sequences": sequences,
"sequence_lengths": sequence_lengths,
}
def get_raw_data(data_path, dataset, truncate_vocab=20000):
"""Load raw data from data directory "data_path".
Reads text files, converts strings to integer ids,
and performs mini-batching of the inputs.
Args:
data_path: string path to the directory where simple-examples.tgz has been
extracted.
dataset: one of ["emnlp2017"]
truncate_vocab: int, number of words to keep in the vocabulary.
Returns:
tuple (train_data, valid_data, vocabulary) where each of the data
objects can be passed to iterator.
Raises:
ValueError: dataset not in ["emnlp2017"].
"""
if dataset not in FILENAMES:
raise ValueError("Invalid dataset {}. Valid datasets: {}".format(
dataset, list(FILENAMES.keys())))
train_file, valid_file, _ = FILENAMES[dataset]
train_path = os.path.join(data_path, train_file)
valid_path = os.path.join(data_path, valid_file)
with gfile.GFile(train_path, "r") as json_file:
json_data_train = json.load(json_file)
with gfile.GFile(valid_path, "r") as json_file:
json_data_valid = json.load(json_file)
word_to_id = _build_vocab(json_data_train)
logging.info("Full vocab length: %d", len(word_to_id))
# Assume the vocab is sorted by frequency.
word_to_id_truncated = {
k: v for k, v in word_to_id.items() if v < truncate_vocab
}
logging.info("Truncated vocab length: %d", len(word_to_id_truncated))
train_data = _integerize(json_data_train, word_to_id_truncated, dataset)
valid_data = _integerize(json_data_valid, word_to_id_truncated, dataset)
return train_data, valid_data, word_to_id_truncated
def iterator(raw_data, batch_size, random=False):
"""Looping iterators on the raw data."""
sequences = raw_data["sequences"]
sequence_lengths = raw_data["sequence_lengths"]
num_examples = sequences.shape[0]
indice_range = np.arange(num_examples)
if random:
while True:
indices = np.random.choice(indice_range, size=batch_size, replace=True)
yield {
"sequence": sequences[indices, :],
"sequence_length": sequence_lengths[indices],
}
else:
start = 0
while True:
sequence = sequences[start:(start + batch_size), :]
sequence_length = sequence_lengths[start:(start + batch_size)]
start += batch_size
if start + batch_size > num_examples:
start = (start + batch_size) % num_examples
yield {
"sequence": sequence,
"sequence_length": sequence_length,
}
| deepmind-research-master | scratchgan/reader.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training script for ScratchGAN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.io import gfile
from scratchgan import discriminator_nets
from scratchgan import eval_metrics
from scratchgan import generators
from scratchgan import losses
from scratchgan import reader
from scratchgan import utils
flags.DEFINE_string("dataset", "emnlp2017", "Dataset.")
flags.DEFINE_integer("batch_size", 512, "Batch size")
flags.DEFINE_string("gen_type", "lstm", "Generator type.")
flags.DEFINE_string("disc_type", "lstm", "Discriminator type.")
flags.DEFINE_string("disc_loss_type", "ce", "Loss type.")
flags.DEFINE_integer("gen_feature_size", 512, "Generator feature size.")
flags.DEFINE_integer("disc_feature_size", 512, "Discriminator feature size.")
flags.DEFINE_integer("num_layers_gen", 2, "Number of generator layers.")
flags.DEFINE_integer("num_layers_disc", 1, "Number of discriminator layers.")
flags.DEFINE_bool("layer_norm_gen", False, "Layer norm generator.")
flags.DEFINE_bool("layer_norm_disc", True, "Layer norm discriminator.")
flags.DEFINE_float("gen_input_dropout", 0.0, "Input dropout generator.")
flags.DEFINE_float("gen_output_dropout", 0.0, "Input dropout discriminator.")
flags.DEFINE_float("l2_gen", 0.0, "L2 regularization generator.")
flags.DEFINE_float("l2_disc", 1e-6, "L2 regularization discriminator.")
flags.DEFINE_float("disc_dropout", 0.1, "Dropout discriminator")
flags.DEFINE_integer("trainable_embedding_size", 64,
"Size of trainable embedding.")
flags.DEFINE_bool("use_pretrained_embedding", True, "Use pretrained embedding.")
flags.DEFINE_integer("num_steps", int(200 * 1000), "Number of training steps.")
flags.DEFINE_integer("num_disc_updates", 1, "Number of discriminator updates.")
flags.DEFINE_integer("num_gen_updates", 1, "Number of generator updates.")
flags.DEFINE_string("data_dir", "/tmp/emnlp2017", "Directory where data is.")
flags.DEFINE_float("gen_lr", 9.59e-5, "Learning rate generator.")
flags.DEFINE_float("disc_lr", 9.38e-3, "Learning rate discriminator.")
flags.DEFINE_float("gen_beta1", 0.5, "Beta1 for generator.")
flags.DEFINE_float("disc_beta1", 0.5, "Beta1 for discriminator.")
flags.DEFINE_float("gamma", 0.23, "Discount factor.")
flags.DEFINE_float("baseline_decay", 0.08, "Baseline decay rate.")
flags.DEFINE_string("mode", "train", "train or evaluate_pair.")
flags.DEFINE_string("checkpoint_dir", "/tmp/emnlp2017/checkpoints/",
"Directory for checkpoints.")
flags.DEFINE_integer("export_every", 1000, "Frequency of checkpoint exports.")
flags.DEFINE_integer("num_examples_for_eval", int(1e4),
"Number of examples for evaluation")
EVALUATOR_SLEEP_PERIOD = 60 # Seconds evaluator sleeps if nothing to do.
def main(_):
config = flags.FLAGS
gfile.makedirs(config.checkpoint_dir)
if config.mode == "train":
train(config)
elif config.mode == "evaluate_pair":
while True:
checkpoint_path = utils.maybe_pick_models_to_evaluate(
checkpoint_dir=config.checkpoint_dir)
if checkpoint_path:
evaluate_pair(
config=config,
batch_size=config.batch_size,
checkpoint_path=checkpoint_path,
data_dir=config.data_dir,
dataset=config.dataset,
num_examples_for_eval=config.num_examples_for_eval)
else:
logging.info("No models to evaluate found, sleeping for %d seconds",
EVALUATOR_SLEEP_PERIOD)
time.sleep(EVALUATOR_SLEEP_PERIOD)
else:
raise Exception(
"Unexpected mode %s, supported modes are \"train\" or \"evaluate_pair\""
% (config.mode))
def train(config):
"""Train."""
logging.info("Training.")
tf.reset_default_graph()
np.set_printoptions(precision=4)
# Get data.
raw_data = reader.get_raw_data(
data_path=config.data_dir, dataset=config.dataset)
train_data, valid_data, word_to_id = raw_data
id_to_word = {v: k for k, v in word_to_id.items()}
vocab_size = len(word_to_id)
max_length = reader.MAX_TOKENS_SEQUENCE[config.dataset]
logging.info("Vocabulary size: %d", vocab_size)
iterator = reader.iterator(raw_data=train_data, batch_size=config.batch_size)
iterator_valid = reader.iterator(
raw_data=valid_data, batch_size=config.batch_size)
real_sequence = tf.placeholder(
dtype=tf.int32,
shape=[config.batch_size, max_length],
name="real_sequence")
real_sequence_length = tf.placeholder(
dtype=tf.int32, shape=[config.batch_size], name="real_sequence_length")
first_batch_np = next(iterator)
valid_batch_np = next(iterator_valid)
test_real_batch = {k: tf.constant(v) for k, v in first_batch_np.items()}
test_fake_batch = {
"sequence":
tf.constant(
np.random.choice(
vocab_size, size=[config.batch_size,
max_length]).astype(np.int32)),
"sequence_length":
tf.constant(
np.random.choice(max_length,
size=[config.batch_size]).astype(np.int32)),
}
valid_batch = {k: tf.constant(v) for k, v in valid_batch_np.items()}
# Create generator.
if config.use_pretrained_embedding:
embedding_source = utils.get_embedding_path(config.data_dir, config.dataset)
vocab_file = "/tmp/vocab.txt"
with gfile.GFile(vocab_file, "w") as f:
for i in range(len(id_to_word)):
f.write(id_to_word[i] + "\n")
logging.info("Temporary vocab file: %s", vocab_file)
else:
embedding_source = None
vocab_file = None
gen = generators.LSTMGen(
vocab_size=vocab_size,
feature_sizes=[config.gen_feature_size] * config.num_layers_gen,
max_sequence_length=reader.MAX_TOKENS_SEQUENCE[config.dataset],
batch_size=config.batch_size,
use_layer_norm=config.layer_norm_gen,
trainable_embedding_size=config.trainable_embedding_size,
input_dropout=config.gen_input_dropout,
output_dropout=config.gen_output_dropout,
pad_token=reader.PAD_INT,
embedding_source=embedding_source,
vocab_file=vocab_file,
)
gen_outputs = gen()
# Create discriminator.
disc = discriminator_nets.LSTMEmbedDiscNet(
vocab_size=vocab_size,
feature_sizes=[config.disc_feature_size] * config.num_layers_disc,
trainable_embedding_size=config.trainable_embedding_size,
embedding_source=embedding_source,
use_layer_norm=config.layer_norm_disc,
pad_token=reader.PAD_INT,
vocab_file=vocab_file,
dropout=config.disc_dropout,
)
disc_logits_real = disc(
sequence=real_sequence, sequence_length=real_sequence_length)
disc_logits_fake = disc(
sequence=gen_outputs["sequence"],
sequence_length=gen_outputs["sequence_length"])
# Loss of the discriminator.
if config.disc_loss_type == "ce":
targets_real = tf.ones(
[config.batch_size, reader.MAX_TOKENS_SEQUENCE[config.dataset]])
targets_fake = tf.zeros(
[config.batch_size, reader.MAX_TOKENS_SEQUENCE[config.dataset]])
loss_real = losses.sequential_cross_entropy_loss(disc_logits_real,
targets_real)
loss_fake = losses.sequential_cross_entropy_loss(disc_logits_fake,
targets_fake)
disc_loss = 0.5 * loss_real + 0.5 * loss_fake
# Loss of the generator.
gen_loss, cumulative_rewards, baseline = losses.reinforce_loss(
disc_logits=disc_logits_fake,
gen_logprobs=gen_outputs["logprobs"],
gamma=config.gamma,
decay=config.baseline_decay)
# Optimizers
disc_optimizer = tf.train.AdamOptimizer(
learning_rate=config.disc_lr, beta1=config.disc_beta1)
gen_optimizer = tf.train.AdamOptimizer(
learning_rate=config.gen_lr, beta1=config.gen_beta1)
# Get losses and variables.
disc_vars = disc.get_all_variables()
gen_vars = gen.get_all_variables()
l2_disc = tf.reduce_sum(tf.add_n([tf.nn.l2_loss(v) for v in disc_vars]))
l2_gen = tf.reduce_sum(tf.add_n([tf.nn.l2_loss(v) for v in gen_vars]))
scalar_disc_loss = tf.reduce_mean(disc_loss) + config.l2_disc * l2_disc
scalar_gen_loss = tf.reduce_mean(gen_loss) + config.l2_gen * l2_gen
# Update ops.
global_step = tf.train.get_or_create_global_step()
disc_update = disc_optimizer.minimize(
scalar_disc_loss, var_list=disc_vars, global_step=global_step)
gen_update = gen_optimizer.minimize(
scalar_gen_loss, var_list=gen_vars, global_step=global_step)
# Saver.
saver = tf.train.Saver()
# Metrics
test_disc_logits_real = disc(**test_real_batch)
test_disc_logits_fake = disc(**test_fake_batch)
valid_disc_logits = disc(**valid_batch)
disc_predictions_real = tf.nn.sigmoid(disc_logits_real)
disc_predictions_fake = tf.nn.sigmoid(disc_logits_fake)
valid_disc_predictions = tf.reduce_mean(
tf.nn.sigmoid(valid_disc_logits), axis=0)
test_disc_predictions_real = tf.reduce_mean(
tf.nn.sigmoid(test_disc_logits_real), axis=0)
test_disc_predictions_fake = tf.reduce_mean(
tf.nn.sigmoid(test_disc_logits_fake), axis=0)
# Only log results for the first element of the batch.
metrics = {
"scalar_gen_loss": scalar_gen_loss,
"scalar_disc_loss": scalar_disc_loss,
"disc_predictions_real": tf.reduce_mean(disc_predictions_real),
"disc_predictions_fake": tf.reduce_mean(disc_predictions_fake),
"test_disc_predictions_real": tf.reduce_mean(test_disc_predictions_real),
"test_disc_predictions_fake": tf.reduce_mean(test_disc_predictions_fake),
"valid_disc_predictions": tf.reduce_mean(valid_disc_predictions),
"cumulative_rewards": tf.reduce_mean(cumulative_rewards),
"baseline": tf.reduce_mean(baseline),
}
# Training.
logging.info("Starting training")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
latest_ckpt = tf.train.latest_checkpoint(config.checkpoint_dir)
if latest_ckpt:
saver.restore(sess, latest_ckpt)
for step in range(config.num_steps):
real_data_np = next(iterator)
train_feed = {
real_sequence: real_data_np["sequence"],
real_sequence_length: real_data_np["sequence_length"],
}
# Update generator and discriminator.
for _ in range(config.num_disc_updates):
sess.run(disc_update, feed_dict=train_feed)
for _ in range(config.num_gen_updates):
sess.run(gen_update, feed_dict=train_feed)
# Reporting
if step % config.export_every == 0:
gen_sequence_np, metrics_np = sess.run(
[gen_outputs["sequence"], metrics], feed_dict=train_feed)
metrics_np["gen_sentence"] = utils.sequence_to_sentence(
gen_sequence_np[0, :], id_to_word)
saver.save(
sess,
save_path=config.checkpoint_dir + "scratchgan",
global_step=global_step)
metrics_np["model_path"] = tf.train.latest_checkpoint(
config.checkpoint_dir)
logging.info(metrics_np)
# After training, export models.
saver.save(
sess,
save_path=config.checkpoint_dir + "scratchgan",
global_step=global_step)
logging.info("Saved final model at %s.",
tf.train.latest_checkpoint(config.checkpoint_dir))
def evaluate_pair(config, batch_size, checkpoint_path, data_dir, dataset,
num_examples_for_eval):
"""Evaluates a pair generator discriminator.
This function loads a discriminator from disk, a generator, and evaluates the
discriminator against the generator.
It returns the mean probability of the discriminator against several batches,
and the FID of the generator against the validation data.
It also writes evaluation samples to disk.
Args:
config: dict, the config file.
batch_size: int, size of the batch.
checkpoint_path: string, full path to the TF checkpoint on disk.
data_dir: string, path to a directory containing the dataset.
dataset: string, "emnlp2017", to select the right dataset.
num_examples_for_eval: int, number of examples for evaluation.
"""
tf.reset_default_graph()
logging.info("Evaluating checkpoint %s.", checkpoint_path)
# Build graph.
train_data, valid_data, word_to_id = reader.get_raw_data(
data_dir, dataset=dataset)
id_to_word = {v: k for k, v in word_to_id.items()}
vocab_size = len(word_to_id)
train_iterator = reader.iterator(raw_data=train_data, batch_size=batch_size)
valid_iterator = reader.iterator(raw_data=valid_data, batch_size=batch_size)
train_sequence = tf.placeholder(
dtype=tf.int32,
shape=[batch_size, reader.MAX_TOKENS_SEQUENCE[dataset]],
name="train_sequence")
train_sequence_length = tf.placeholder(
dtype=tf.int32, shape=[batch_size], name="train_sequence_length")
valid_sequence = tf.placeholder(
dtype=tf.int32,
shape=[batch_size, reader.MAX_TOKENS_SEQUENCE[dataset]],
name="valid_sequence")
valid_sequence_length = tf.placeholder(
dtype=tf.int32, shape=[batch_size], name="valid_sequence_length")
disc_inputs_train = {
"sequence": train_sequence,
"sequence_length": train_sequence_length,
}
disc_inputs_valid = {
"sequence": valid_sequence,
"sequence_length": valid_sequence_length,
}
if config.use_pretrained_embedding:
embedding_source = utils.get_embedding_path(config.data_dir, config.dataset)
vocab_file = "/tmp/vocab.txt"
with gfile.GFile(vocab_file, "w") as f:
for i in range(len(id_to_word)):
f.write(id_to_word[i] + "\n")
logging.info("Temporary vocab file: %s", vocab_file)
else:
embedding_source = None
vocab_file = None
gen = generators.LSTMGen(
vocab_size=vocab_size,
feature_sizes=[config.gen_feature_size] * config.num_layers_gen,
max_sequence_length=reader.MAX_TOKENS_SEQUENCE[config.dataset],
batch_size=config.batch_size,
use_layer_norm=config.layer_norm_gen,
trainable_embedding_size=config.trainable_embedding_size,
input_dropout=config.gen_input_dropout,
output_dropout=config.gen_output_dropout,
pad_token=reader.PAD_INT,
embedding_source=embedding_source,
vocab_file=vocab_file,
)
gen_outputs = gen()
disc = discriminator_nets.LSTMEmbedDiscNet(
vocab_size=vocab_size,
feature_sizes=[config.disc_feature_size] * config.num_layers_disc,
trainable_embedding_size=config.trainable_embedding_size,
embedding_source=embedding_source,
use_layer_norm=config.layer_norm_disc,
pad_token=reader.PAD_INT,
vocab_file=vocab_file,
dropout=config.disc_dropout,
)
disc_inputs = {
"sequence": gen_outputs["sequence"],
"sequence_length": gen_outputs["sequence_length"],
}
gen_logits = disc(**disc_inputs)
train_logits = disc(**disc_inputs_train)
valid_logits = disc(**disc_inputs_valid)
# Saver.
saver = tf.train.Saver()
# Reduce over time and batch.
train_probs = tf.reduce_mean(tf.nn.sigmoid(train_logits))
valid_probs = tf.reduce_mean(tf.nn.sigmoid(valid_logits))
gen_probs = tf.reduce_mean(tf.nn.sigmoid(gen_logits))
outputs = {
"train_probs": train_probs,
"valid_probs": valid_probs,
"gen_probs": gen_probs,
"gen_sequences": gen_outputs["sequence"],
"valid_sequences": valid_sequence
}
# Get average discriminator score and store generated sequences.
all_valid_sentences = []
all_gen_sentences = []
all_gen_sequences = []
mean_train_prob = 0.0
mean_valid_prob = 0.0
mean_gen_prob = 0.0
logging.info("Graph constructed, generating batches.")
num_batches = num_examples_for_eval // batch_size + 1
# Restrict the thread pool size to prevent excessive GCU usage on Borg.
tf_config = tf.ConfigProto()
tf_config.intra_op_parallelism_threads = 16
tf_config.inter_op_parallelism_threads = 16
with tf.Session(config=tf_config) as sess:
# Restore variables from checkpoints.
logging.info("Restoring variables.")
saver.restore(sess, checkpoint_path)
for i in range(num_batches):
logging.info("Batch %d / %d", i, num_batches)
train_data_np = next(train_iterator)
valid_data_np = next(valid_iterator)
feed_dict = {
train_sequence: train_data_np["sequence"],
train_sequence_length: train_data_np["sequence_length"],
valid_sequence: valid_data_np["sequence"],
valid_sequence_length: valid_data_np["sequence_length"],
}
outputs_np = sess.run(outputs, feed_dict=feed_dict)
all_gen_sequences.extend(outputs_np["gen_sequences"])
gen_sentences = utils.batch_sequences_to_sentences(
outputs_np["gen_sequences"], id_to_word)
valid_sentences = utils.batch_sequences_to_sentences(
outputs_np["valid_sequences"], id_to_word)
all_valid_sentences.extend(valid_sentences)
all_gen_sentences.extend(gen_sentences)
mean_train_prob += outputs_np["train_probs"] / batch_size
mean_valid_prob += outputs_np["valid_probs"] / batch_size
mean_gen_prob += outputs_np["gen_probs"] / batch_size
logging.info("Evaluating FID.")
# Compute FID
fid = eval_metrics.fid(
generated_sentences=all_gen_sentences[:num_examples_for_eval],
real_sentences=all_valid_sentences[:num_examples_for_eval])
utils.write_eval_results(config.checkpoint_dir, all_gen_sentences,
os.path.basename(checkpoint_path), mean_train_prob,
mean_valid_prob, mean_gen_prob, fid)
if __name__ == "__main__":
app.run(main)
| deepmind-research-master | scratchgan/experiment.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.io import gfile
from scratchgan import reader
EVAL_FILENAME = "evaluated_checkpoints.csv"
GLOVE_DIM = 300
GLOVE_STD = 0.3836 # Standard dev. of GloVe embeddings.
def _get_embedding_initializer(vocab_file, embedding_source, vocab_size):
"""Loads pretrained embeddings from a file in GloVe format."""
with gfile.GFile(embedding_source, "r") as f:
embedding_lines = f.readlines()
# First line contains embedding dim.
_, embedding_dim = list(map(int, embedding_lines[0].split()))
# Get the tokens as strings.
tokens = [line.split()[0] for line in embedding_lines[1:]]
# Get the actual embedding matrix.
unsorted_emb = np.array(
[[float(x) for x in line.split()[1:]] for line in embedding_lines[1:]])
# Get the expected vocab order.
with gfile.GFile(vocab_file, "r") as f:
tokens_order = [l.strip() for l in f.readlines()]
assert vocab_size == len(tokens_order)
# Put the embeddings in the order.
sorted_emb = np.zeros((vocab_size, embedding_dim))
for i, token in enumerate(tokens_order):
if token in tokens:
sorted_emb[i, :] = unsorted_emb[tokens.index(token), :]
else: # If we don't have a pretrained embedding, initialize randomly.
sorted_emb[i, :] = np.random.normal(
loc=0.0, scale=GLOVE_STD, size=(GLOVE_DIM,))
return sorted_emb.astype(np.float32)
def append_position_signal(embeddings, position_dim=8):
"""Append position signal. See get_position_signal."""
batch_size, sequence_length, embedding_dim = embeddings.get_shape().as_list()
positions = get_position_signal(sequence_length, position_dim)
# Append to embeddings.
position_inputs = tf.tile(positions[None, :, :], [batch_size, 1, 1])
embeddings_pos = tf.concat([embeddings, position_inputs], axis=2)
embeddings_pos.shape.assert_is_compatible_with(
[batch_size, sequence_length, embedding_dim + position_dim])
return embeddings_pos
def get_position_signal(sequence_length, position_dim=8):
"""Return fixed position signal as sine waves.
Sine waves frequencies are linearly spaced so that shortest is 2 and
longest is half the maximum length. That way the longest frequency
is long enough to be monotonous over the whole sequence length.
Sine waves are also shifted so that they don't all start with the same
value.
We don't use learned positional embeddings because these embeddings are
projected linearly along with the original embeddings, and the projection is
learned.
Args:
sequence_length: int, T, length of the sequence..
position_dim: int, P, number of sine waves.
Returns:
A [T, P] tensor, position embeddings.
"""
# Compute the frequencies.
periods = tf.exp(
tf.lin_space(
tf.log(2.0), tf.log(tf.to_float(sequence_length)), position_dim))
frequencies = 1.0 / periods # Shape [T, P].
# Compute the sine waves.
xs = frequencies[None, :] * tf.to_float(tf.range(sequence_length)[:, None])
shifts = tf.lin_space(0.0, 2.0, position_dim)[None, :] # [1, P]
positions = tf.math.cos(math.pi * (xs + shifts)) # [T, P]
positions.shape.assert_is_compatible_with([sequence_length, position_dim])
return positions
def get_mask_by_length(lengths, max_length):
"""Returns a mask where x[i , j] = (j < lengths[i]).
Args:
lengths: [B] tensor of int32 such that 0 <= lengths[i] <= max_length.
max_length: scalar tensor of int32.
Returns:
[B, max_length] tensor of booleans such that x[i, j] is True
if and only if j < lengths[i].
"""
batch_size = lengths.get_shape().as_list()[0]
indices = tf.range(start=0, limit=max_length)
all_indices = tf.tile(indices[None, :], [batch_size, 1])
all_lengths = tf.tile(lengths[:, None], [1, max_length])
mask = (all_indices < all_lengths)
mask_boolean = tf.cast(mask, tf.bool)
return mask_boolean
def get_mask_past_symbol(reference, symbol, optimize_for_tpu=False):
"""For each row, mask is True before and at the first occurrence of symbol."""
batch_size, max_length = reference.get_shape().as_list()
symbol = tf.convert_to_tensor(symbol)
symbol.shape.assert_is_compatible_with([])
first_indices = get_first_occurrence_indices(reference, symbol,
optimize_for_tpu)
first_indices.shape.assert_is_compatible_with([batch_size])
keep_lengths = tf.minimum(first_indices, max_length)
mask = get_mask_by_length(keep_lengths, max_length)
mask.shape.assert_is_compatible_with([batch_size, max_length])
mask.set_shape([batch_size, max_length])
return mask
def get_first_occurrence_indices(reference, symbol, optimize_for_tpu=False):
"""For each row in reference, get index after the first occurrence of symbol.
If symbol is not present on a row, return reference.shape[1] instead.
Args:
reference: [B, T] tensor of elements of the same type as symbol.
symbol: int or [] scalar tensor of the same dtype as symbol.
optimize_for_tpu: bool, whether to use a TPU-capable variant.
Returns:
A [B] reference of tf.int32 where x[i] is such that
reference[i, x[i]-1] == symbol, and reference[i, j] != symbol
for j<i-1. If symbol is not present on row i then x[i] = T.
"""
if optimize_for_tpu:
# Run code which can be compiled on TPU.
# Transpose refernce to [T, B]
reference = tf.transpose(reference, [1, 0])
range_tensor = tf.range(reference.shape.as_list()[0])
indexes = tf.stack([range_tensor] * reference.shape.as_list()[1], 1)
symbol = tf.stack([symbol] * reference.shape.as_list()[1], 0)
initial_indices = tf.constant(
reference.shape.as_list()[0],
shape=[reference.shape.as_list()[1]],
dtype=tf.int32)
# We want a function which moves backwards.
def fn(current_index, elems):
ref, ind = elems
return tf.where(tf.equal(ref, symbol), ind + 1, current_index)
min_indexes = tf.scan(
fn, (reference, indexes),
initializer=initial_indices,
parallel_iterations=1,
reverse=True)
return min_indexes[0]
batch_size, max_length = reference.get_shape().as_list()
symbol = tf.convert_to_tensor(symbol)
symbol.shape.assert_is_compatible_with([])
# Add symbol at the end of each row, to make sure tf.where works.
tensor = tf.concat(
[reference, tf.tile(symbol[None, None], [batch_size, 1])], axis=1)
index_all_occurrences = tf.where(tf.equal(tensor, symbol))
index_all_occurrences = tf.cast(index_all_occurrences, tf.int32)
# `index_all_occurrences` is a [N, 2] tensor with coordinates of all positions
# of `symbol` in `tensor`. So N will be >= batch size since there can be
# several `symbol` in one row of tensor. We need to take only the position
# of the first occurrence for each row. `segment_min` does that, taking the
# lowest column index for each row index.
index_first_occurrences = tf.segment_min(index_all_occurrences[:, 1],
index_all_occurrences[:, 0])
index_first_occurrences.set_shape([batch_size])
index_first_occurrences = tf.minimum(index_first_occurrences + 1, max_length)
return index_first_occurrences
def sequence_to_sentence(sequence, id_to_word):
"""Turn a sequence into a sentence , inverse of sentence_to_sequence."""
words = []
for token_index in sequence:
if token_index in id_to_word:
words.append(id_to_word[token_index])
else:
words.append(reader.UNK)
return " ".join(words)
def batch_sequences_to_sentences(sequences, id_to_word):
return [sequence_to_sentence(sequence, id_to_word) for sequence in sequences]
def write_eval_results(checkpoint_dir, all_gen_sentences, checkpoint_name,
mean_train_prob, mean_valid_prob, mean_gen_prob, fid):
"""Write evaluation results to disk."""
to_write = ",".join(
map(str, [
checkpoint_name, mean_train_prob, mean_valid_prob, mean_gen_prob, fid
]))
eval_filepath = os.path.join(checkpoint_dir, EVAL_FILENAME)
previous_eval_content = ""
if gfile.exists(eval_filepath):
with gfile.GFile(eval_filepath, "r") as f:
previous_eval_content = f.read()
with gfile.GFile(eval_filepath, "w") as f:
f.write(previous_eval_content + to_write + "\n")
with gfile.GFile(
os.path.join(checkpoint_dir, checkpoint_name + "_sentences.txt"),
"w") as f:
f.write("\n".join(all_gen_sentences))
def maybe_pick_models_to_evaluate(checkpoint_dir):
"""Pick a checkpoint to evaluate that has not been evaluated already."""
logging.info("Picking checkpoint to evaluate from %s.", checkpoint_dir)
filenames = gfile.listdir(checkpoint_dir)
filenames = [f[:-5] for f in filenames if f[-5:] == ".meta"]
logging.info("Found existing checkpoints: %s", filenames)
evaluated_filenames = []
if gfile.exists(os.path.join(checkpoint_dir, EVAL_FILENAME)):
with gfile.GFile(os.path.join(checkpoint_dir, EVAL_FILENAME), "r") as f:
evaluated_filenames = [l.strip().split(",")[0] for l in f.readlines()]
logging.info("Found already evaluated checkpoints: %s", evaluated_filenames)
checkpoints_to_evaluate = [
f for f in filenames if f not in evaluated_filenames
]
logging.info("Remaining potential checkpoints: %s", checkpoints_to_evaluate)
if checkpoints_to_evaluate:
return os.path.join(checkpoint_dir, checkpoints_to_evaluate[0])
else:
return None
def get_embedding_path(data_dir, dataset):
"""By convention, this is where we store the embedding."""
return os.path.join(data_dir, "glove_%s.txt" % dataset)
def make_partially_trainable_embeddings(vocab_file, embedding_source,
vocab_size, trainable_embedding_size):
"""Makes embedding matrix with pretrained GloVe [1] part and trainable part.
[1] Pennington, J., Socher, R., & Manning, C. (2014, October). Glove: Global
vectors for word representation. In Proceedings of the 2014 conference on
empirical methods in natural language processing (EMNLP) (pp. 1532-1543).
Args:
vocab_file: vocabulary file.
embedding_source: path to the actual embeddings.
vocab_size: number of words in vocabulary.
trainable_embedding_size: size of the trainable part of the embeddings.
Returns:
A matrix of partially pretrained embeddings.
"""
# Our embeddings have 2 parts: a pre-trained, frozen, GloVe part,
# and a trainable, randomly initialized part.
# The standard deviation of the GloVe part is used to initialize
# the trainable part, so that both part have roughly the same distribution.
#
# Let g_ij be the j-th coordinates of the GloVe embedding of the i-th word.
# So that 0 < i < |vocab| and 0 < j < 300.
# Then sum_ij (g_ij - sum_kl g_kl)^2 = (0.3836)^2
#
# In reality g_ij follows a truncated normal distribution
# min(max(N(0, s), -4.2), 4.2) but we approximate it by N(0, 0.3836).
embedding_initializer = _get_embedding_initializer(
vocab_file=vocab_file,
embedding_source=embedding_source,
vocab_size=vocab_size)
pretrained_embedding = tf.get_variable(
"pretrained_embedding",
initializer=embedding_initializer,
dtype=tf.float32)
trainable_embedding = tf.get_variable(
"trainable_embedding",
shape=[vocab_size, trainable_embedding_size],
initializer=tf.initializers.random_normal(mean=0.0, stddev=GLOVE_STD))
# We just concatenate embeddings, they will pass through a projection
# matrix afterwards.
embedding = tf.concat([pretrained_embedding, trainable_embedding], axis=1)
return embedding
| deepmind-research-master | scratchgan/utils.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses for sequential GANs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
def sequential_cross_entropy_loss(logits, expected):
"""The cross entropy loss for binary classification.
Used to train the discriminator when not using WGAN loss.
Assume logits is the log probability of classifying as 1. (real).
Args:
logits: a `tf.Tensor`, the model produced logits, shape [batch_size,
sequence_length].
expected: a `tf.Tensor`, the expected output, shape [batch_size,
sequence_length].
Returns:
A scalar `tf.Tensor`, the average loss obtained on the given inputs.
"""
batch_size, sequence_length = logits.shape.as_list()
expected = tf.cast(expected, tf.float32)
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=expected, logits=logits)
return tf.reshape(ce, [batch_size, sequence_length])
def reinforce_loss(disc_logits, gen_logprobs, gamma, decay):
"""The REINFORCE loss.
Args:
disc_logits: float tensor, shape [batch_size, sequence_length].
gen_logprobs: float32 tensor, shape [batch_size, sequence_length]
gamma: a float, discount factor for cumulative reward.
decay: a float, decay rate for the EWMA baseline of REINFORCE.
Returns:
Float tensor, shape [batch_size, sequence_length], the REINFORCE loss for
each timestep.
"""
# Assume 1 logit for each timestep.
batch_size, sequence_length = disc_logits.shape.as_list()
gen_logprobs.shape.assert_is_compatible_with([batch_size, sequence_length])
disc_predictions = tf.nn.sigmoid(disc_logits)
# MaskGAN uses log(D), but this is more stable empirically.
rewards = 2.0 * disc_predictions - 1
# Compute cumulative rewards.
rewards_list = tf.unstack(rewards, axis=1)
cumulative_rewards = []
for t in range(sequence_length):
cum_value = tf.zeros(shape=[batch_size])
for s in range(t, sequence_length):
cum_value += np.power(gamma, (s - t)) * rewards_list[s]
cumulative_rewards.append(cum_value)
cumulative_rewards = tf.stack(cumulative_rewards, axis=1)
cumulative_rewards.shape.assert_is_compatible_with(
[batch_size, sequence_length])
with tf.variable_scope("reinforce", reuse=tf.AUTO_REUSE):
ewma_reward = tf.get_variable("ewma_reward", initializer=0.0)
mean_reward = tf.reduce_mean(cumulative_rewards)
new_ewma_reward = decay * ewma_reward + (1.0 - decay) * mean_reward
update_op = tf.assign(ewma_reward, new_ewma_reward)
# REINFORCE
with tf.control_dependencies([update_op]):
advantage = cumulative_rewards - ewma_reward
loss = -tf.stop_gradient(advantage) * gen_logprobs
loss.shape.assert_is_compatible_with([batch_size, sequence_length])
return loss, cumulative_rewards, ewma_reward
| deepmind-research-master | scratchgan/losses.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discriminator networks for text data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
import tensorflow.compat.v1 as tf
from scratchgan import utils
class LSTMEmbedDiscNet(snt.AbstractModule):
"""An LSTM discriminator that operates on word indexes."""
def __init__(self,
feature_sizes,
vocab_size,
use_layer_norm,
trainable_embedding_size,
dropout,
pad_token,
embedding_source=None,
vocab_file=None,
name='LSTMEmbedDiscNet'):
super(LSTMEmbedDiscNet, self).__init__(name=name)
self._feature_sizes = feature_sizes
self._vocab_size = vocab_size
self._use_layer_norm = use_layer_norm
self._trainable_embedding_size = trainable_embedding_size
self._embedding_source = embedding_source
self._vocab_file = vocab_file
self._dropout = dropout
self._pad_token = pad_token
if self._embedding_source:
assert vocab_file
def _build(self, sequence, sequence_length, is_training=True):
"""Connect to the graph.
Args:
sequence: A [batch_size, max_sequence_length] tensor of int. For example
the indices of words as sampled by the generator.
sequence_length: A [batch_size] tensor of int. Length of the sequence.
is_training: Boolean, False to disable dropout.
Returns:
A [batch_size, max_sequence_length, feature_size] tensor of floats. For
each sequence in the batch, the features should (hopefully) allow to
distinguish if the value at each timestep is real or generated.
"""
batch_size, max_sequence_length = sequence.shape.as_list()
keep_prob = (1.0 - self._dropout) if is_training else 1.0
if self._embedding_source:
all_embeddings = utils.make_partially_trainable_embeddings(
self._vocab_file, self._embedding_source, self._vocab_size,
self._trainable_embedding_size)
else:
all_embeddings = tf.get_variable(
'trainable_embedding',
shape=[self._vocab_size, self._trainable_embedding_size],
trainable=True)
_, self._embedding_size = all_embeddings.shape.as_list()
input_embeddings = tf.nn.dropout(all_embeddings, keep_prob=keep_prob)
embeddings = tf.nn.embedding_lookup(input_embeddings, sequence)
embeddings.shape.assert_is_compatible_with(
[batch_size, max_sequence_length, self._embedding_size])
position_dim = 8
embeddings_pos = utils.append_position_signal(embeddings, position_dim)
embeddings_pos = tf.reshape(
embeddings_pos,
[batch_size * max_sequence_length, self._embedding_size + position_dim])
lstm_inputs = snt.Linear(self._feature_sizes[0])(embeddings_pos)
lstm_inputs = tf.reshape(
lstm_inputs, [batch_size, max_sequence_length, self._feature_sizes[0]])
lstm_inputs.shape.assert_is_compatible_with(
[batch_size, max_sequence_length, self._feature_sizes[0]])
encoder_cells = []
for feature_size in self._feature_sizes:
encoder_cells += [
snt.LSTM(feature_size, use_layer_norm=self._use_layer_norm)
]
encoder_cell = snt.DeepRNN(encoder_cells)
initial_state = encoder_cell.initial_state(batch_size)
hidden_states, _ = tf.nn.dynamic_rnn(
cell=encoder_cell,
inputs=lstm_inputs,
sequence_length=sequence_length,
initial_state=initial_state,
swap_memory=True)
hidden_states.shape.assert_is_compatible_with(
[batch_size, max_sequence_length,
sum(self._feature_sizes)])
logits = snt.BatchApply(snt.Linear(1))(hidden_states)
logits.shape.assert_is_compatible_with([batch_size, max_sequence_length, 1])
logits_flat = tf.reshape(logits, [batch_size, max_sequence_length])
# Mask past first PAD symbol
#
# Note that we still rely on tf.nn.bidirectional_dynamic_rnn taking
# into account the sequence_length properly, because otherwise
# the logits at a given timestep will depend on the inputs for all other
# timesteps, including the ones that should be masked.
mask = utils.get_mask_past_symbol(sequence, self._pad_token)
masked_logits_flat = logits_flat * tf.cast(mask, tf.float32)
return masked_logits_flat
| deepmind-research-master | scratchgan/discriminator_nets.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for reading open sourced Learning Complex Physics data."""
import functools
import numpy as np
import tensorflow.compat.v1 as tf
# Create a description of the features.
_FEATURE_DESCRIPTION = {
'position': tf.io.VarLenFeature(tf.string),
}
_FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT = _FEATURE_DESCRIPTION.copy()
_FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT['step_context'] = tf.io.VarLenFeature(
tf.string)
_FEATURE_DTYPES = {
'position': {
'in': np.float32,
'out': tf.float32
},
'step_context': {
'in': np.float32,
'out': tf.float32
}
}
_CONTEXT_FEATURES = {
'key': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'particle_type': tf.io.VarLenFeature(tf.string)
}
def convert_to_tensor(x, encoded_dtype):
if len(x) == 1:
out = np.frombuffer(x[0].numpy(), dtype=encoded_dtype)
else:
out = []
for el in x:
out.append(np.frombuffer(el.numpy(), dtype=encoded_dtype))
out = tf.convert_to_tensor(np.array(out))
return out
def parse_serialized_simulation_example(example_proto, metadata):
"""Parses a serialized simulation tf.SequenceExample.
Args:
example_proto: A string encoding of the tf.SequenceExample proto.
metadata: A dict of metadata for the dataset.
Returns:
context: A dict, with features that do not vary over the trajectory.
parsed_features: A dict of tf.Tensors representing the parsed examples
across time, where axis zero is the time axis.
"""
if 'context_mean' in metadata:
feature_description = _FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT
else:
feature_description = _FEATURE_DESCRIPTION
context, parsed_features = tf.io.parse_single_sequence_example(
example_proto,
context_features=_CONTEXT_FEATURES,
sequence_features=feature_description)
for feature_key, item in parsed_features.items():
convert_fn = functools.partial(
convert_to_tensor, encoded_dtype=_FEATURE_DTYPES[feature_key]['in'])
parsed_features[feature_key] = tf.py_function(
convert_fn, inp=[item.values], Tout=_FEATURE_DTYPES[feature_key]['out'])
# There is an extra frame at the beginning so we can calculate pos change
# for all frames used in the paper.
position_shape = [metadata['sequence_length'] + 1, -1, metadata['dim']]
# Reshape positions to correct dim:
parsed_features['position'] = tf.reshape(parsed_features['position'],
position_shape)
# Set correct shapes of the remaining tensors.
sequence_length = metadata['sequence_length'] + 1
if 'context_mean' in metadata:
context_feat_len = len(metadata['context_mean'])
parsed_features['step_context'] = tf.reshape(
parsed_features['step_context'],
[sequence_length, context_feat_len])
# Decode particle type explicitly
context['particle_type'] = tf.py_function(
functools.partial(convert_fn, encoded_dtype=np.int64),
inp=[context['particle_type'].values],
Tout=[tf.int64])
context['particle_type'] = tf.reshape(context['particle_type'], [-1])
return context, parsed_features
def split_trajectory(context, features, window_length=7):
"""Splits trajectory into sliding windows."""
# Our strategy is to make sure all the leading dimensions are the same size,
# then we can use from_tensor_slices.
trajectory_length = features['position'].get_shape().as_list()[0]
# We then stack window_length position changes so the final
# trajectory length will be - window_length +1 (the 1 to make sure we get
# the last split).
input_trajectory_length = trajectory_length - window_length + 1
model_input_features = {}
# Prepare the context features per step.
model_input_features['particle_type'] = tf.tile(
tf.expand_dims(context['particle_type'], axis=0),
[input_trajectory_length, 1])
if 'step_context' in features:
global_stack = []
for idx in range(input_trajectory_length):
global_stack.append(features['step_context'][idx:idx + window_length])
model_input_features['step_context'] = tf.stack(global_stack)
pos_stack = []
for idx in range(input_trajectory_length):
pos_stack.append(features['position'][idx:idx + window_length])
# Get the corresponding positions
model_input_features['position'] = tf.stack(pos_stack)
return tf.data.Dataset.from_tensor_slices(model_input_features)
| deepmind-research-master | learning_to_simulate/reading_utils.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simple matplotlib rendering of a rollout prediction against ground truth.
Usage (from parent directory):
`python -m learning_to_simulate.render_rollout --rollout_path={OUTPUT_PATH}/rollout_test_1.pkl`
Where {OUTPUT_PATH} is the output path passed to `train.py` in "eval_rollout"
mode.
It may require installing Tkinter with `sudo apt-get install python3.7-tk`.
""" # pylint: disable=line-too-long
import pickle
from absl import app
from absl import flags
from matplotlib import animation
import matplotlib.pyplot as plt
import numpy as np
flags.DEFINE_string("rollout_path", None, help="Path to rollout pickle file")
flags.DEFINE_integer("step_stride", 3, help="Stride of steps to skip.")
flags.DEFINE_boolean("block_on_show", True, help="For test purposes.")
FLAGS = flags.FLAGS
TYPE_TO_COLOR = {
3: "black", # Boundary particles.
0: "green", # Rigid solids.
7: "magenta", # Goop.
6: "gold", # Sand.
5: "blue", # Water.
}
def main(unused_argv):
if not FLAGS.rollout_path:
raise ValueError("A `rollout_path` must be passed.")
with open(FLAGS.rollout_path, "rb") as file:
rollout_data = pickle.load(file)
fig, axes = plt.subplots(1, 2, figsize=(10, 5))
plot_info = []
for ax_i, (label, rollout_field) in enumerate(
[("Ground truth", "ground_truth_rollout"),
("Prediction", "predicted_rollout")]):
# Append the initial positions to get the full trajectory.
trajectory = np.concatenate([
rollout_data["initial_positions"],
rollout_data[rollout_field]], axis=0)
ax = axes[ax_i]
ax.set_title(label)
bounds = rollout_data["metadata"]["bounds"]
ax.set_xlim(bounds[0][0], bounds[0][1])
ax.set_ylim(bounds[1][0], bounds[1][1])
ax.set_xticks([])
ax.set_yticks([])
ax.set_aspect(1.)
points = {
particle_type: ax.plot([], [], "o", ms=2, color=color)[0]
for particle_type, color in TYPE_TO_COLOR.items()}
plot_info.append((ax, trajectory, points))
num_steps = trajectory.shape[0]
def update(step_i):
outputs = []
for _, trajectory, points in plot_info:
for particle_type, line in points.items():
mask = rollout_data["particle_types"] == particle_type
line.set_data(trajectory[step_i, mask, 0],
trajectory[step_i, mask, 1])
outputs.append(line)
return outputs
unused_animation = animation.FuncAnimation(
fig, update,
frames=np.arange(0, num_steps, FLAGS.step_stride), interval=10)
plt.show(block=FLAGS.block_on_show)
if __name__ == "__main__":
app.run(main)
| deepmind-research-master | learning_to_simulate/render_rollout.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Full model implementation accompanying ICML 2020 submission.
"Learning to Simulate Complex Physics with Graph Networks"
Alvaro Sanchez-Gonzalez*, Jonathan Godwin*, Tobias Pfaff*, Rex Ying,
Jure Leskovec, Peter W. Battaglia
https://arxiv.org/abs/2002.09405
"""
import graph_nets as gn
import sonnet as snt
import tensorflow.compat.v1 as tf
from learning_to_simulate import connectivity_utils
from learning_to_simulate import graph_network
STD_EPSILON = 1e-8
class LearnedSimulator(snt.AbstractModule):
"""Learned simulator from https://arxiv.org/pdf/2002.09405.pdf."""
def __init__(
self,
num_dimensions,
connectivity_radius,
graph_network_kwargs,
boundaries,
normalization_stats,
num_particle_types,
particle_type_embedding_size,
name="LearnedSimulator"):
"""Inits the model.
Args:
num_dimensions: Dimensionality of the problem.
connectivity_radius: Scalar with the radius of connectivity.
graph_network_kwargs: Keyword arguments to pass to the learned part
of the graph network `model.EncodeProcessDecode`.
boundaries: List of 2-tuples, containing the lower and upper boundaries of
the cuboid containing the particles along each dimensions, matching
the dimensionality of the problem.
normalization_stats: Dictionary with statistics with keys "acceleration"
and "velocity", containing a named tuple for each with mean and std
fields, matching the dimensionality of the problem.
num_particle_types: Number of different particle types.
particle_type_embedding_size: Embedding size for the particle type.
name: Name of the Sonnet module.
"""
super().__init__(name=name)
self._connectivity_radius = connectivity_radius
self._num_particle_types = num_particle_types
self._boundaries = boundaries
self._normalization_stats = normalization_stats
with self._enter_variable_scope():
self._graph_network = graph_network.EncodeProcessDecode(
output_size=num_dimensions, **graph_network_kwargs)
if self._num_particle_types > 1:
self._particle_type_embedding = tf.get_variable(
"particle_embedding",
[self._num_particle_types, particle_type_embedding_size],
trainable=True, use_resource=True)
def _build(self, position_sequence, n_particles_per_example,
global_context=None, particle_types=None):
"""Produces a model step, outputting the next position for each particle.
Args:
position_sequence: Sequence of positions for each node in the batch,
with shape [num_particles_in_batch, sequence_length, num_dimensions]
n_particles_per_example: Number of particles for each graph in the batch
with shape [batch_size]
global_context: Tensor of shape [batch_size, context_size], with global
context.
particle_types: Integer tensor of shape [num_particles_in_batch] with
the integer types of the particles, from 0 to `num_particle_types - 1`.
If None, we assume all particles are the same type.
Returns:
Next position with shape [num_particles_in_batch, num_dimensions] for one
step into the future from the input sequence.
"""
input_graphs_tuple = self._encoder_preprocessor(
position_sequence, n_particles_per_example, global_context,
particle_types)
normalized_acceleration = self._graph_network(input_graphs_tuple)
next_position = self._decoder_postprocessor(
normalized_acceleration, position_sequence)
return next_position
def _encoder_preprocessor(
self, position_sequence, n_node, global_context, particle_types):
# Extract important features from the position_sequence.
most_recent_position = position_sequence[:, -1]
velocity_sequence = time_diff(position_sequence) # Finite-difference.
# Get connectivity of the graph.
(senders, receivers, n_edge
) = connectivity_utils.compute_connectivity_for_batch_pyfunc(
most_recent_position, n_node, self._connectivity_radius)
# Collect node features.
node_features = []
# Normalized velocity sequence, merging spatial an time axis.
velocity_stats = self._normalization_stats["velocity"]
normalized_velocity_sequence = (
velocity_sequence - velocity_stats.mean) / velocity_stats.std
flat_velocity_sequence = snt.MergeDims(start=1, size=2)(
normalized_velocity_sequence)
node_features.append(flat_velocity_sequence)
# Normalized clipped distances to lower and upper boundaries.
# boundaries are an array of shape [num_dimensions, 2], where the second
# axis, provides the lower/upper boundaries.
boundaries = tf.constant(self._boundaries, dtype=tf.float32)
distance_to_lower_boundary = (
most_recent_position - tf.expand_dims(boundaries[:, 0], 0))
distance_to_upper_boundary = (
tf.expand_dims(boundaries[:, 1], 0) - most_recent_position)
distance_to_boundaries = tf.concat(
[distance_to_lower_boundary, distance_to_upper_boundary], axis=1)
normalized_clipped_distance_to_boundaries = tf.clip_by_value(
distance_to_boundaries / self._connectivity_radius, -1., 1.)
node_features.append(normalized_clipped_distance_to_boundaries)
# Particle type.
if self._num_particle_types > 1:
particle_type_embeddings = tf.nn.embedding_lookup(
self._particle_type_embedding, particle_types)
node_features.append(particle_type_embeddings)
# Collect edge features.
edge_features = []
# Relative displacement and distances normalized to radius
normalized_relative_displacements = (
tf.gather(most_recent_position, senders) -
tf.gather(most_recent_position, receivers)) / self._connectivity_radius
edge_features.append(normalized_relative_displacements)
normalized_relative_distances = tf.norm(
normalized_relative_displacements, axis=-1, keepdims=True)
edge_features.append(normalized_relative_distances)
# Normalize the global context.
if global_context is not None:
context_stats = self._normalization_stats["context"]
# Context in some datasets are all zero, so add an epsilon for numerical
# stability.
global_context = (global_context - context_stats.mean) / tf.math.maximum(
context_stats.std, STD_EPSILON)
return gn.graphs.GraphsTuple(
nodes=tf.concat(node_features, axis=-1),
edges=tf.concat(edge_features, axis=-1),
globals=global_context, # self._graph_net will appending this to nodes.
n_node=n_node,
n_edge=n_edge,
senders=senders,
receivers=receivers,
)
def _decoder_postprocessor(self, normalized_acceleration, position_sequence):
# The model produces the output in normalized space so we apply inverse
# normalization.
acceleration_stats = self._normalization_stats["acceleration"]
acceleration = (
normalized_acceleration * acceleration_stats.std
) + acceleration_stats.mean
# Use an Euler integrator to go from acceleration to position, assuming
# a dt=1 corresponding to the size of the finite difference.
most_recent_position = position_sequence[:, -1]
most_recent_velocity = most_recent_position - position_sequence[:, -2]
new_velocity = most_recent_velocity + acceleration # * dt = 1
new_position = most_recent_position + new_velocity # * dt = 1
return new_position
def get_predicted_and_target_normalized_accelerations(
self, next_position, position_sequence_noise, position_sequence,
n_particles_per_example, global_context=None, particle_types=None): # pylint: disable=g-doc-args
"""Produces normalized and predicted acceleration targets.
Args:
next_position: Tensor of shape [num_particles_in_batch, num_dimensions]
with the positions the model should output given the inputs.
position_sequence_noise: Tensor of the same shape as `position_sequence`
with the noise to apply to each particle.
position_sequence, n_node, global_context, particle_types: Inputs to the
model as defined by `_build`.
Returns:
Tensors of shape [num_particles_in_batch, num_dimensions] with the
predicted and target normalized accelerations.
"""
# Add noise to the input position sequence.
noisy_position_sequence = position_sequence + position_sequence_noise
# Perform the forward pass with the noisy position sequence.
input_graphs_tuple = self._encoder_preprocessor(
noisy_position_sequence, n_particles_per_example, global_context,
particle_types)
predicted_normalized_acceleration = self._graph_network(input_graphs_tuple)
# Calculate the target acceleration, using an `adjusted_next_position `that
# is shifted by the noise in the last input position.
next_position_adjusted = next_position + position_sequence_noise[:, -1]
target_normalized_acceleration = self._inverse_decoder_postprocessor(
next_position_adjusted, noisy_position_sequence)
# As a result the inverted Euler update in the `_inverse_decoder` produces:
# * A target acceleration that does not explicitly correct for the noise in
# the input positions, as the `next_position_adjusted` is different
# from the true `next_position`.
# * A target acceleration that exactly corrects noise in the input velocity
# since the target next velocity calculated by the inverse Euler update
# as `next_position_adjusted - noisy_position_sequence[:,-1]`
# matches the ground truth next velocity (noise cancels out).
return predicted_normalized_acceleration, target_normalized_acceleration
def _inverse_decoder_postprocessor(self, next_position, position_sequence):
"""Inverse of `_decoder_postprocessor`."""
previous_position = position_sequence[:, -1]
previous_velocity = previous_position - position_sequence[:, -2]
next_velocity = next_position - previous_position
acceleration = next_velocity - previous_velocity
acceleration_stats = self._normalization_stats["acceleration"]
normalized_acceleration = (
acceleration - acceleration_stats.mean) / acceleration_stats.std
return normalized_acceleration
def time_diff(input_sequence):
return input_sequence[:, 1:] - input_sequence[:, :-1]
| deepmind-research-master | learning_to_simulate/learned_simulator.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# pylint: disable=line-too-long
"""Training script for https://arxiv.org/pdf/2002.09405.pdf.
Example usage (from parent directory):
`python -m learning_to_simulate.train --data_path={DATA_PATH} --model_path={MODEL_PATH}`
Evaluate model from checkpoint (from parent directory):
`python -m learning_to_simulate.train --data_path={DATA_PATH} --model_path={MODEL_PATH} --mode=eval`
Produce rollouts (from parent directory):
`python -m learning_to_simulate.train --data_path={DATA_PATH} --model_path={MODEL_PATH} --output_path={OUTPUT_PATH} --mode=eval_rollout`
"""
# pylint: enable=line-too-long
import collections
import functools
import json
import os
import pickle
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
import tree
from learning_to_simulate import learned_simulator
from learning_to_simulate import noise_utils
from learning_to_simulate import reading_utils
flags.DEFINE_enum(
'mode', 'train', ['train', 'eval', 'eval_rollout'],
help='Train model, one step evaluation or rollout evaluation.')
flags.DEFINE_enum('eval_split', 'test', ['train', 'valid', 'test'],
help='Split to use when running evaluation.')
flags.DEFINE_string('data_path', None, help='The dataset directory.')
flags.DEFINE_integer('batch_size', 2, help='The batch size.')
flags.DEFINE_integer('num_steps', int(2e7), help='Number of steps of training.')
flags.DEFINE_float('noise_std', 6.7e-4, help='The std deviation of the noise.')
flags.DEFINE_string('model_path', None,
help=('The path for saving checkpoints of the model. '
'Defaults to a temporary directory.'))
flags.DEFINE_string('output_path', None,
help='The path for saving outputs (e.g. rollouts).')
FLAGS = flags.FLAGS
Stats = collections.namedtuple('Stats', ['mean', 'std'])
INPUT_SEQUENCE_LENGTH = 6 # So we can calculate the last 5 velocities.
NUM_PARTICLE_TYPES = 9
KINEMATIC_PARTICLE_ID = 3
def get_kinematic_mask(particle_types):
"""Returns a boolean mask, set to true for kinematic (obstacle) particles."""
return tf.equal(particle_types, KINEMATIC_PARTICLE_ID)
def prepare_inputs(tensor_dict):
"""Prepares a single stack of inputs by calculating inputs and targets.
Computes n_particles_per_example, which is a tensor that contains information
about how to partition the axis - i.e. which nodes belong to which graph.
Adds a batch axis to `n_particles_per_example` and `step_context` so they can
later be batched using `batch_concat`. This batch will be the same as if the
elements had been batched via stacking.
Note that all other tensors have a variable size particle axis,
and in this case they will simply be concatenated along that
axis.
Args:
tensor_dict: A dict of tensors containing positions, and step context (
if available).
Returns:
A tuple of input features and target positions.
"""
# Position is encoded as [sequence_length, num_particles, dim] but the model
# expects [num_particles, sequence_length, dim].
pos = tensor_dict['position']
pos = tf.transpose(pos, perm=[1, 0, 2])
# The target position is the final step of the stack of positions.
target_position = pos[:, -1]
# Remove the target from the input.
tensor_dict['position'] = pos[:, :-1]
# Compute the number of particles per example.
num_particles = tf.shape(pos)[0]
# Add an extra dimension for stacking via concat.
tensor_dict['n_particles_per_example'] = num_particles[tf.newaxis]
if 'step_context' in tensor_dict:
# Take the input global context. We have a stack of global contexts,
# and we take the penultimate since the final is the target.
tensor_dict['step_context'] = tensor_dict['step_context'][-2]
# Add an extra dimension for stacking via concat.
tensor_dict['step_context'] = tensor_dict['step_context'][tf.newaxis]
return tensor_dict, target_position
def prepare_rollout_inputs(context, features):
"""Prepares an inputs trajectory for rollout."""
out_dict = {**context}
# Position is encoded as [sequence_length, num_particles, dim] but the model
# expects [num_particles, sequence_length, dim].
pos = tf.transpose(features['position'], [1, 0, 2])
# The target position is the final step of the stack of positions.
target_position = pos[:, -1]
# Remove the target from the input.
out_dict['position'] = pos[:, :-1]
# Compute the number of nodes
out_dict['n_particles_per_example'] = [tf.shape(pos)[0]]
if 'step_context' in features:
out_dict['step_context'] = features['step_context']
out_dict['is_trajectory'] = tf.constant([True], tf.bool)
return out_dict, target_position
def batch_concat(dataset, batch_size):
"""We implement batching as concatenating on the leading axis."""
# We create a dataset of datasets of length batch_size.
windowed_ds = dataset.window(batch_size)
# The plan is then to reduce every nested dataset by concatenating. We can
# do this using tf.data.Dataset.reduce. This requires an initial state, and
# then incrementally reduces by running through the dataset
# Get initial state. In this case this will be empty tensors of the
# correct shape.
initial_state = tree.map_structure(
lambda spec: tf.zeros( # pylint: disable=g-long-lambda
shape=[0] + spec.shape.as_list()[1:], dtype=spec.dtype),
dataset.element_spec)
# We run through the nest and concatenate each entry with the previous state.
def reduce_window(initial_state, ds):
return ds.reduce(initial_state, lambda x, y: tf.concat([x, y], axis=0))
return windowed_ds.map(
lambda *x: tree.map_structure(reduce_window, initial_state, x))
def get_input_fn(data_path, batch_size, mode, split):
"""Gets the learning simulation input function for tf.estimator.Estimator.
Args:
data_path: the path to the dataset directory.
batch_size: the number of graphs in a batch.
mode: either 'one_step_train', 'one_step' or 'rollout'
split: either 'train', 'valid' or 'test.
Returns:
The input function for the learning simulation model.
"""
def input_fn():
"""Input function for learning simulation."""
# Loads the metadata of the dataset.
metadata = _read_metadata(data_path)
# Create a tf.data.Dataset from the TFRecord.
ds = tf.data.TFRecordDataset([os.path.join(data_path, f'{split}.tfrecord')])
ds = ds.map(functools.partial(
reading_utils.parse_serialized_simulation_example, metadata=metadata))
if mode.startswith('one_step'):
# Splits an entire trajectory into chunks of 7 steps.
# Previous 5 velocities, current velocity and target.
split_with_window = functools.partial(
reading_utils.split_trajectory,
window_length=INPUT_SEQUENCE_LENGTH + 1)
ds = ds.flat_map(split_with_window)
# Splits a chunk into input steps and target steps
ds = ds.map(prepare_inputs)
# If in train mode, repeat dataset forever and shuffle.
if mode == 'one_step_train':
ds = ds.repeat()
ds = ds.shuffle(512)
# Custom batching on the leading axis.
ds = batch_concat(ds, batch_size)
elif mode == 'rollout':
# Rollout evaluation only available for batch size 1
assert batch_size == 1
ds = ds.map(prepare_rollout_inputs)
else:
raise ValueError(f'mode: {mode} not recognized')
return ds
return input_fn
def rollout(simulator, features, num_steps):
"""Rolls out a trajectory by applying the model in sequence."""
initial_positions = features['position'][:, 0:INPUT_SEQUENCE_LENGTH]
ground_truth_positions = features['position'][:, INPUT_SEQUENCE_LENGTH:]
global_context = features.get('step_context')
def step_fn(step, current_positions, predictions):
if global_context is None:
global_context_step = None
else:
global_context_step = global_context[
step + INPUT_SEQUENCE_LENGTH - 1][tf.newaxis]
next_position = simulator(
current_positions,
n_particles_per_example=features['n_particles_per_example'],
particle_types=features['particle_type'],
global_context=global_context_step)
# Update kinematic particles from prescribed trajectory.
kinematic_mask = get_kinematic_mask(features['particle_type'])
next_position_ground_truth = ground_truth_positions[:, step]
next_position = tf.where(kinematic_mask, next_position_ground_truth,
next_position)
updated_predictions = predictions.write(step, next_position)
# Shift `current_positions`, removing the oldest position in the sequence
# and appending the next position at the end.
next_positions = tf.concat([current_positions[:, 1:],
next_position[:, tf.newaxis]], axis=1)
return (step + 1, next_positions, updated_predictions)
predictions = tf.TensorArray(size=num_steps, dtype=tf.float32)
_, _, predictions = tf.while_loop(
cond=lambda step, state, prediction: tf.less(step, num_steps),
body=step_fn,
loop_vars=(0, initial_positions, predictions),
back_prop=False,
parallel_iterations=1)
output_dict = {
'initial_positions': tf.transpose(initial_positions, [1, 0, 2]),
'predicted_rollout': predictions.stack(),
'ground_truth_rollout': tf.transpose(ground_truth_positions, [1, 0, 2]),
'particle_types': features['particle_type'],
}
if global_context is not None:
output_dict['global_context'] = global_context
return output_dict
def _combine_std(std_x, std_y):
return np.sqrt(std_x**2 + std_y**2)
def _get_simulator(model_kwargs, metadata, acc_noise_std, vel_noise_std):
"""Instantiates the simulator."""
# Cast statistics to numpy so they are arrays when entering the model.
cast = lambda v: np.array(v, dtype=np.float32)
acceleration_stats = Stats(
cast(metadata['acc_mean']),
_combine_std(cast(metadata['acc_std']), acc_noise_std))
velocity_stats = Stats(
cast(metadata['vel_mean']),
_combine_std(cast(metadata['vel_std']), vel_noise_std))
normalization_stats = {'acceleration': acceleration_stats,
'velocity': velocity_stats}
if 'context_mean' in metadata:
context_stats = Stats(
cast(metadata['context_mean']), cast(metadata['context_std']))
normalization_stats['context'] = context_stats
simulator = learned_simulator.LearnedSimulator(
num_dimensions=metadata['dim'],
connectivity_radius=metadata['default_connectivity_radius'],
graph_network_kwargs=model_kwargs,
boundaries=metadata['bounds'],
num_particle_types=NUM_PARTICLE_TYPES,
normalization_stats=normalization_stats,
particle_type_embedding_size=16)
return simulator
def get_one_step_estimator_fn(data_path,
noise_std,
latent_size=128,
hidden_size=128,
hidden_layers=2,
message_passing_steps=10):
"""Gets one step model for training simulation."""
metadata = _read_metadata(data_path)
model_kwargs = dict(
latent_size=latent_size,
mlp_hidden_size=hidden_size,
mlp_num_hidden_layers=hidden_layers,
num_message_passing_steps=message_passing_steps)
def estimator_fn(features, labels, mode):
target_next_position = labels
simulator = _get_simulator(model_kwargs, metadata,
vel_noise_std=noise_std,
acc_noise_std=noise_std)
# Sample the noise to add to the inputs to the model during training.
sampled_noise = noise_utils.get_random_walk_noise_for_position_sequence(
features['position'], noise_std_last_step=noise_std)
non_kinematic_mask = tf.logical_not(
get_kinematic_mask(features['particle_type']))
noise_mask = tf.cast(
non_kinematic_mask, sampled_noise.dtype)[:, tf.newaxis, tf.newaxis]
sampled_noise *= noise_mask
# Get the predictions and target accelerations.
pred_target = simulator.get_predicted_and_target_normalized_accelerations(
next_position=target_next_position,
position_sequence=features['position'],
position_sequence_noise=sampled_noise,
n_particles_per_example=features['n_particles_per_example'],
particle_types=features['particle_type'],
global_context=features.get('step_context'))
pred_acceleration, target_acceleration = pred_target
# Calculate the loss and mask out loss on kinematic particles/
loss = (pred_acceleration - target_acceleration)**2
num_non_kinematic = tf.reduce_sum(
tf.cast(non_kinematic_mask, tf.float32))
loss = tf.where(non_kinematic_mask, loss, tf.zeros_like(loss))
loss = tf.reduce_sum(loss) / tf.reduce_sum(num_non_kinematic)
global_step = tf.train.get_global_step()
# Set learning rate to decay from 1e-4 to 1e-6 exponentially.
min_lr = 1e-6
lr = tf.train.exponential_decay(learning_rate=1e-4 - min_lr,
global_step=global_step,
decay_steps=int(5e6),
decay_rate=0.1) + min_lr
opt = tf.train.AdamOptimizer(learning_rate=lr)
train_op = opt.minimize(loss, global_step)
# Calculate next position and add some additional eval metrics (only eval).
predicted_next_position = simulator(
position_sequence=features['position'],
n_particles_per_example=features['n_particles_per_example'],
particle_types=features['particle_type'],
global_context=features.get('step_context'))
predictions = {'predicted_next_position': predicted_next_position}
eval_metrics_ops = {
'loss_mse': tf.metrics.mean_squared_error(
pred_acceleration, target_acceleration),
'one_step_position_mse': tf.metrics.mean_squared_error(
predicted_next_position, target_next_position)
}
return tf_estimator.EstimatorSpec(
mode=mode,
train_op=train_op,
loss=loss,
predictions=predictions,
eval_metric_ops=eval_metrics_ops)
return estimator_fn
def get_rollout_estimator_fn(data_path,
noise_std,
latent_size=128,
hidden_size=128,
hidden_layers=2,
message_passing_steps=10):
"""Gets the model function for tf.estimator.Estimator."""
metadata = _read_metadata(data_path)
model_kwargs = dict(
latent_size=latent_size,
mlp_hidden_size=hidden_size,
mlp_num_hidden_layers=hidden_layers,
num_message_passing_steps=message_passing_steps)
def estimator_fn(features, labels, mode):
del labels # Labels to conform to estimator spec.
simulator = _get_simulator(model_kwargs, metadata,
acc_noise_std=noise_std,
vel_noise_std=noise_std)
num_steps = metadata['sequence_length'] - INPUT_SEQUENCE_LENGTH
rollout_op = rollout(simulator, features, num_steps=num_steps)
squared_error = (rollout_op['predicted_rollout'] -
rollout_op['ground_truth_rollout']) ** 2
loss = tf.reduce_mean(squared_error)
eval_ops = {'rollout_error_mse': tf.metrics.mean_squared_error(
rollout_op['predicted_rollout'], rollout_op['ground_truth_rollout'])}
# Add a leading axis, since Estimator's predict method insists that all
# tensors have a shared leading batch axis fo the same dims.
rollout_op = tree.map_structure(lambda x: x[tf.newaxis], rollout_op)
return tf_estimator.EstimatorSpec(
mode=mode,
train_op=None,
loss=loss,
predictions=rollout_op,
eval_metric_ops=eval_ops)
return estimator_fn
def _read_metadata(data_path):
with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp:
return json.loads(fp.read())
def main(_):
"""Train or evaluates the model."""
if FLAGS.mode in ['train', 'eval']:
estimator = tf_estimator.Estimator(
get_one_step_estimator_fn(FLAGS.data_path, FLAGS.noise_std),
model_dir=FLAGS.model_path)
if FLAGS.mode == 'train':
# Train all the way through.
estimator.train(
input_fn=get_input_fn(FLAGS.data_path, FLAGS.batch_size,
mode='one_step_train', split='train'),
max_steps=FLAGS.num_steps)
else:
# One-step evaluation from checkpoint.
eval_metrics = estimator.evaluate(input_fn=get_input_fn(
FLAGS.data_path, FLAGS.batch_size,
mode='one_step', split=FLAGS.eval_split))
logging.info('Evaluation metrics:')
logging.info(eval_metrics)
elif FLAGS.mode == 'eval_rollout':
if not FLAGS.output_path:
raise ValueError('A rollout path must be provided.')
rollout_estimator = tf_estimator.Estimator(
get_rollout_estimator_fn(FLAGS.data_path, FLAGS.noise_std),
model_dir=FLAGS.model_path)
# Iterate through rollouts saving them one by one.
metadata = _read_metadata(FLAGS.data_path)
rollout_iterator = rollout_estimator.predict(
input_fn=get_input_fn(FLAGS.data_path, batch_size=1,
mode='rollout', split=FLAGS.eval_split))
for example_index, example_rollout in enumerate(rollout_iterator):
example_rollout['metadata'] = metadata
filename = f'rollout_{FLAGS.eval_split}_{example_index}.pkl'
filename = os.path.join(FLAGS.output_path, filename)
logging.info('Saving: %s.', filename)
if not os.path.exists(FLAGS.output_path):
os.mkdir(FLAGS.output_path)
with open(filename, 'wb') as file:
pickle.dump(example_rollout, file)
if __name__ == '__main__':
tf.disable_v2_behavior()
app.run(main)
| deepmind-research-master | learning_to_simulate/train.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools to compute the connectivity of the graph."""
import functools
import numpy as np
from sklearn import neighbors
import tensorflow.compat.v1 as tf
def _compute_connectivity(positions, radius, add_self_edges):
"""Get the indices of connected edges with radius connectivity.
Args:
positions: Positions of nodes in the graph. Shape:
[num_nodes_in_graph, num_dims].
radius: Radius of connectivity.
add_self_edges: Whether to include self edges or not.
Returns:
senders indices [num_edges_in_graph]
receiver indices [num_edges_in_graph]
"""
tree = neighbors.KDTree(positions)
receivers_list = tree.query_radius(positions, r=radius)
num_nodes = len(positions)
senders = np.repeat(range(num_nodes), [len(a) for a in receivers_list])
receivers = np.concatenate(receivers_list, axis=0)
if not add_self_edges:
# Remove self edges.
mask = senders != receivers
senders = senders[mask]
receivers = receivers[mask]
return senders, receivers
def _compute_connectivity_for_batch(
positions, n_node, radius, add_self_edges):
"""`compute_connectivity` for a batch of graphs.
Args:
positions: Positions of nodes in the batch of graphs. Shape:
[num_nodes_in_batch, num_dims].
n_node: Number of nodes for each graph in the batch. Shape:
[num_graphs in batch].
radius: Radius of connectivity.
add_self_edges: Whether to include self edges or not.
Returns:
senders indices [num_edges_in_batch]
receiver indices [num_edges_in_batch]
number of edges per graph [num_graphs_in_batch]
"""
# TODO(alvarosg): Consider if we want to support batches here or not.
# Separate the positions corresponding to particles in different graphs.
positions_per_graph_list = np.split(positions, np.cumsum(n_node[:-1]), axis=0)
receivers_list = []
senders_list = []
n_edge_list = []
num_nodes_in_previous_graphs = 0
# Compute connectivity for each graph in the batch.
for positions_graph_i in positions_per_graph_list:
senders_graph_i, receivers_graph_i = _compute_connectivity(
positions_graph_i, radius, add_self_edges)
num_edges_graph_i = len(senders_graph_i)
n_edge_list.append(num_edges_graph_i)
# Because the inputs will be concatenated, we need to add offsets to the
# sender and receiver indices according to the number of nodes in previous
# graphs in the same batch.
receivers_list.append(receivers_graph_i + num_nodes_in_previous_graphs)
senders_list.append(senders_graph_i + num_nodes_in_previous_graphs)
num_nodes_graph_i = len(positions_graph_i)
num_nodes_in_previous_graphs += num_nodes_graph_i
# Concatenate all of the results.
senders = np.concatenate(senders_list, axis=0).astype(np.int32)
receivers = np.concatenate(receivers_list, axis=0).astype(np.int32)
n_edge = np.stack(n_edge_list).astype(np.int32)
return senders, receivers, n_edge
def compute_connectivity_for_batch_pyfunc(
positions, n_node, radius, add_self_edges=True):
"""`_compute_connectivity_for_batch` wrapped in a pyfunc."""
partial_fn = functools.partial(
_compute_connectivity_for_batch, add_self_edges=add_self_edges)
senders, receivers, n_edge = tf.py_function(
partial_fn,
[positions, n_node, radius],
[tf.int32, tf.int32, tf.int32])
senders.set_shape([None])
receivers.set_shape([None])
n_edge.set_shape(n_node.get_shape())
return senders, receivers, n_edge
| deepmind-research-master | learning_to_simulate/connectivity_utils.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Methods to calculate input noise."""
import tensorflow.compat.v1 as tf
from learning_to_simulate import learned_simulator
def get_random_walk_noise_for_position_sequence(
position_sequence, noise_std_last_step):
"""Returns random-walk noise in the velocity applied to the position."""
velocity_sequence = learned_simulator.time_diff(position_sequence)
# We want the noise scale in the velocity at the last step to be fixed.
# Because we are going to compose noise at each step using a random_walk:
# std_last_step**2 = num_velocities * std_each_step**2
# so to keep `std_last_step` fixed, we apply at each step:
# std_each_step `std_last_step / np.sqrt(num_input_velocities)`
# TODO(alvarosg): Make sure this is consistent with the value and
# description provided in the paper.
num_velocities = velocity_sequence.shape.as_list()[1]
velocity_sequence_noise = tf.random.normal(
tf.shape(velocity_sequence),
stddev=noise_std_last_step / num_velocities ** 0.5,
dtype=position_sequence.dtype)
# Apply the random walk.
velocity_sequence_noise = tf.cumsum(velocity_sequence_noise, axis=1)
# Integrate the noise in the velocity to the positions, assuming
# an Euler intergrator and a dt = 1, and adding no noise to the very first
# position (since that will only be used to calculate the first position
# change).
position_sequence_noise = tf.concat([
tf.zeros_like(velocity_sequence_noise[:, 0:1]),
tf.cumsum(velocity_sequence_noise, axis=1)], axis=1)
return position_sequence_noise
| deepmind-research-master | learning_to_simulate/noise_utils.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example script accompanying ICML 2020 submission.
"Learning to Simulate Complex Physics with Graph Networks"
Alvaro Sanchez-Gonzalez*, Jonathan Godwin*, Tobias Pfaff*, Rex Ying,
Jure Leskovec, Peter W. Battaglia
https://arxiv.org/abs/2002.09405
Here we provide the utility function `sample_random_position_sequence()` which
returns a sequence of positions for a variable number of particles, similar to
what a real dataset would provide, and connect the model to it, in both,
single step inference and training mode.
Dependencies include Tensorflow 1.x, Sonnet 1.x and the Graph Nets 1.1 library.
"""
import collections
from learning_to_simulate import learned_simulator
from learning_to_simulate import noise_utils
import numpy as np
import tensorflow.compat.v1 as tf
INPUT_SEQUENCE_LENGTH = 6
SEQUENCE_LENGTH = INPUT_SEQUENCE_LENGTH + 1 # add one target position.
NUM_DIMENSIONS = 3
NUM_PARTICLE_TYPES = 6
BATCH_SIZE = 5
GLOBAL_CONTEXT_SIZE = 6
Stats = collections.namedtuple("Stats", ["mean", "std"])
DUMMY_STATS = Stats(
mean=np.zeros([NUM_DIMENSIONS], dtype=np.float32),
std=np.ones([NUM_DIMENSIONS], dtype=np.float32))
DUMMY_CONTEXT_STATS = Stats(
mean=np.zeros([GLOBAL_CONTEXT_SIZE], dtype=np.float32),
std=np.ones([GLOBAL_CONTEXT_SIZE], dtype=np.float32))
DUMMY_BOUNDARIES = [(-1., 1.)] * NUM_DIMENSIONS
def sample_random_position_sequence():
"""Returns mock data mimicking the input features collected by the encoder."""
num_particles = tf.random_uniform(
shape=(), minval=50, maxval=1000, dtype=tf.int32)
position_sequence = tf.random.normal(
shape=[num_particles, SEQUENCE_LENGTH, NUM_DIMENSIONS])
return position_sequence
def main():
# Build the model.
learnable_model = learned_simulator.LearnedSimulator(
num_dimensions=NUM_DIMENSIONS,
connectivity_radius=0.05,
graph_network_kwargs=dict(
latent_size=128,
mlp_hidden_size=128,
mlp_num_hidden_layers=2,
num_message_passing_steps=10,
),
boundaries=DUMMY_BOUNDARIES,
normalization_stats={"acceleration": DUMMY_STATS,
"velocity": DUMMY_STATS,
"context": DUMMY_CONTEXT_STATS,},
num_particle_types=NUM_PARTICLE_TYPES,
particle_type_embedding_size=16,
)
# Sample a batch of particle sequences with shape:
# [TOTAL_NUM_PARTICLES, SEQUENCE_LENGTH, NUM_DIMENSIONS]
sampled_position_sequences = [
sample_random_position_sequence() for _ in range(BATCH_SIZE)]
position_sequence_batch = tf.concat(sampled_position_sequences, axis=0)
# Count how many particles are present in each element in the batch.
# [BATCH_SIZE]
n_particles_per_example = tf.stack(
[tf.shape(seq)[0] for seq in sampled_position_sequences], axis=0)
# Sample particle types.
# [TOTAL_NUM_PARTICLES]
particle_types = tf.random_uniform(
[tf.shape(position_sequence_batch)[0]],
0, NUM_PARTICLE_TYPES, dtype=tf.int32)
# Sample global context.
global_context = tf.random_uniform(
[BATCH_SIZE, GLOBAL_CONTEXT_SIZE], -1., 1., dtype=tf.float32)
# Separate input sequence from target sequence.
# [TOTAL_NUM_PARTICLES, INPUT_SEQUENCE_LENGTH, NUM_DIMENSIONS]
input_position_sequence = position_sequence_batch[:, :-1]
# [TOTAL_NUM_PARTICLES, NUM_DIMENSIONS]
target_next_position = position_sequence_batch[:, -1]
# Single step of inference with the model to predict next position for each
# particle [TOTAL_NUM_PARTICLES, NUM_DIMENSIONS].
predicted_next_position = learnable_model(
input_position_sequence, n_particles_per_example, global_context,
particle_types)
print(f"Per-particle output tensor: {predicted_next_position}")
# Obtaining predicted and target normalized accelerations for training.
position_sequence_noise = (
noise_utils.get_random_walk_noise_for_position_sequence(
input_position_sequence, noise_std_last_step=6.7e-4))
# Both with shape [TOTAL_NUM_PARTICLES, NUM_DIMENSIONS]
predicted_normalized_acceleration, target_normalized_acceleration = (
learnable_model.get_predicted_and_target_normalized_accelerations(
target_next_position, position_sequence_noise,
input_position_sequence, n_particles_per_example, global_context,
particle_types))
print(f"Predicted norm. acceleration: {predicted_normalized_acceleration}")
print(f"Target norm. acceleration: {target_normalized_acceleration}")
with tf.train.SingularMonitoredSession() as sess:
sess.run([predicted_next_position,
predicted_normalized_acceleration,
target_normalized_acceleration])
if __name__ == "__main__":
tf.disable_v2_behavior()
main()
| deepmind-research-master | learning_to_simulate/model_demo.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Graph network implementation accompanying ICML 2020 submission.
"Learning to Simulate Complex Physics with Graph Networks"
Alvaro Sanchez-Gonzalez*, Jonathan Godwin*, Tobias Pfaff*, Rex Ying,
Jure Leskovec, Peter W. Battaglia
https://arxiv.org/abs/2002.09405
The Sonnet `EncodeProcessDecode` module provided here implements the learnable
parts of the model.
It assumes an encoder preprocessor has already built a graph with
connectivity and features as described in the paper, with features normalized
to zero-mean unit-variance.
Dependencies include Tensorflow 1.x, Sonnet 1.x and the Graph Nets 1.1 library.
"""
from typing import Callable
import graph_nets as gn
import sonnet as snt
import tensorflow as tf
Reducer = Callable[[tf.Tensor, tf.Tensor, tf.Tensor], tf.Tensor]
def build_mlp(
hidden_size: int, num_hidden_layers: int, output_size: int) -> snt.Module:
"""Builds an MLP."""
return snt.nets.MLP(
output_sizes=[hidden_size] * num_hidden_layers + [output_size]) # pytype: disable=bad-return-type # gen-stub-imports
class EncodeProcessDecode(snt.AbstractModule):
"""Encode-Process-Decode function approximator for learnable simulator."""
def __init__(
self,
latent_size: int,
mlp_hidden_size: int,
mlp_num_hidden_layers: int,
num_message_passing_steps: int,
output_size: int,
reducer: Reducer = tf.math.unsorted_segment_sum,
name: str = "EncodeProcessDecode"):
"""Inits the model.
Args:
latent_size: Size of the node and edge latent representations.
mlp_hidden_size: Hidden layer size for all MLPs.
mlp_num_hidden_layers: Number of hidden layers in all MLPs.
num_message_passing_steps: Number of message passing steps.
output_size: Output size of the decode node representations as required
by the downstream update function.
reducer: Reduction to be used when aggregating the edges in the nodes in
the interaction network. This should be a callable whose signature
matches tf.math.unsorted_segment_sum.
name: Name of the model.
"""
super().__init__(name=name)
self._latent_size = latent_size
self._mlp_hidden_size = mlp_hidden_size
self._mlp_num_hidden_layers = mlp_num_hidden_layers
self._num_message_passing_steps = num_message_passing_steps
self._output_size = output_size
self._reducer = reducer
with self._enter_variable_scope():
self._networks_builder()
def _build(self, input_graph: gn.graphs.GraphsTuple) -> tf.Tensor:
"""Forward pass of the learnable dynamics model."""
# Encode the input_graph.
latent_graph_0 = self._encode(input_graph)
# Do `m` message passing steps in the latent graphs.
latent_graph_m = self._process(latent_graph_0)
# Decode from the last latent graph.
return self._decode(latent_graph_m)
def _networks_builder(self):
"""Builds the networks."""
def build_mlp_with_layer_norm():
mlp = build_mlp(
hidden_size=self._mlp_hidden_size,
num_hidden_layers=self._mlp_num_hidden_layers,
output_size=self._latent_size)
return snt.Sequential([mlp, snt.LayerNorm()])
# The encoder graph network independently encodes edge and node features.
encoder_kwargs = dict(
edge_model_fn=build_mlp_with_layer_norm,
node_model_fn=build_mlp_with_layer_norm)
self._encoder_network = gn.modules.GraphIndependent(**encoder_kwargs)
# Create `num_message_passing_steps` graph networks with unshared parameters
# that update the node and edge latent features.
# Note that we can use `modules.InteractionNetwork` because
# it also outputs the messages as updated edge latent features.
self._processor_networks = []
for _ in range(self._num_message_passing_steps):
self._processor_networks.append(
gn.modules.InteractionNetwork(
edge_model_fn=build_mlp_with_layer_norm,
node_model_fn=build_mlp_with_layer_norm,
reducer=self._reducer))
# The decoder MLP decodes node latent features into the output size.
self._decoder_network = build_mlp(
hidden_size=self._mlp_hidden_size,
num_hidden_layers=self._mlp_num_hidden_layers,
output_size=self._output_size)
def _encode(
self, input_graph: gn.graphs.GraphsTuple) -> gn.graphs.GraphsTuple:
"""Encodes the input graph features into a latent graph."""
# Copy the globals to all of the nodes, if applicable.
if input_graph.globals is not None:
broadcasted_globals = gn.blocks.broadcast_globals_to_nodes(input_graph)
input_graph = input_graph.replace(
nodes=tf.concat([input_graph.nodes, broadcasted_globals], axis=-1),
globals=None)
# Encode the node and edge features.
latent_graph_0 = self._encoder_network(input_graph)
return latent_graph_0
def _process(
self, latent_graph_0: gn.graphs.GraphsTuple) -> gn.graphs.GraphsTuple:
"""Processes the latent graph with several steps of message passing."""
# Do `m` message passing steps in the latent graphs.
# (In the shared parameters case, just reuse the same `processor_network`)
latent_graph_prev_k = latent_graph_0
latent_graph_k = latent_graph_0
for processor_network_k in self._processor_networks:
latent_graph_k = self._process_step(
processor_network_k, latent_graph_prev_k)
latent_graph_prev_k = latent_graph_k
latent_graph_m = latent_graph_k
return latent_graph_m
def _process_step(
self, processor_network_k: snt.Module,
latent_graph_prev_k: gn.graphs.GraphsTuple) -> gn.graphs.GraphsTuple:
"""Single step of message passing with node/edge residual connections."""
# One step of message passing.
latent_graph_k = processor_network_k(latent_graph_prev_k)
# Add residuals.
latent_graph_k = latent_graph_k.replace(
nodes=latent_graph_k.nodes+latent_graph_prev_k.nodes,
edges=latent_graph_k.edges+latent_graph_prev_k.edges)
return latent_graph_k
def _decode(self, latent_graph: gn.graphs.GraphsTuple) -> tf.Tensor:
"""Decodes from the latent graph."""
return self._decoder_network(latent_graph.nodes)
| deepmind-research-master | learning_to_simulate/graph_network.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sqlalchemy schema for the metadata db."""
import sqlalchemy
from sqlalchemy.ext import declarative
Column = sqlalchemy.Column
Integer = sqlalchemy.Integer
String = sqlalchemy.String
LargeBinary = sqlalchemy.LargeBinary
ForeignKey = sqlalchemy.ForeignKey
# pylint: disable=invalid-name
# https://docs.sqlalchemy.org/en/13/orm/tutorial.html
Base = declarative.declarative_base()
EpisodeTag = sqlalchemy.Table(
'EpisodeTags', Base.metadata,
Column(
'EpisodeId', String, ForeignKey('Episodes.EpisodeId'),
primary_key=True),
Column('Tag', String, ForeignKey('Tags.Name'), primary_key=True))
"""Table relating episodes and tags.
Attributes:
EpisodeId: A string of digits that uniquely identifies the episode.
Tag: Human readable tag name.
"""
class Episode(Base):
"""Table describing individual episodes.
Attributes:
EpisodeId: A string of digits that uniquely identifies the episode.
TaskId: A human readable name for the task corresponding to the behavior
that generated the episode.
DataPath: The name of the episode file holding the data for this episode.
Timestamp: A unix timestamp recording when the episode was generated.
EpisodeType: A string describing the type of policy that generated the
episode. Possible values are:
- `EPISODE_ROBOT_AGENT`: The behavior policy is a learned or scripted
controller.
- `EPISODE_ROBOT_TELEOPERATION`: The behavior policy is a human
teleoperating the robot.
- `EPISODE_ROBOT_DAGGER`: The behavior policy is a mix of controller
and human generated actions.
Tags: A list of tags attached to this episode.
Rewards: A list of `RewardSequence`s containing sketched rewards for this
episode.
"""
__tablename__ = 'Episodes'
EpisodeId = Column(String, primary_key=True)
TaskId = Column(String)
DataPath = Column(String)
Timestamp = Column(Integer)
EpisodeType = Column(String)
Tags = sqlalchemy.orm.relationship(
'Tag', secondary=EpisodeTag, back_populates='Episodes')
Rewards = sqlalchemy.orm.relationship(
'RewardSequence', backref='Episode')
class Tag(Base):
"""Table of tags that can be attached to episodes.
Attributes:
Name: Human readable tag name.
Episodes: The epsidoes that have been annotated with this tag.
"""
__tablename__ = 'Tags'
Name = Column(String, primary_key=True)
Episodes = sqlalchemy.orm.relationship(
'Episode', secondary=EpisodeTag, back_populates='Tags')
class RewardSequence(Base):
"""Table describing reward sequences for episodes.
Attributes:
EpisodeId: Foreign key into the `Episodes` table.
RewardSequenceId: Distinguishes multiple rewards for the same episode.
RewardTaskId: A human readable name of the task for this reward signal.
Typically the same as the corresponding `TaskId` in the `Episodes`
table.
Type: A string describing the type of reward signal. Currently the only
value is `REWARD_SKETCH`.
User: The name of the user who produced this reward sequence.
Values: A sequence of float32 values, packed as a binary blob. There is one
float value for each frame of the episode, corresponding to the
annotated reward.
"""
__tablename__ = 'RewardSequences'
EpisodeId = Column(
'EpisodeId', String, ForeignKey('Episodes.EpisodeId'), primary_key=True)
RewardSequenceId = Column(String, primary_key=True)
RewardTaskId = Column('RewardTaskId', String)
Type = Column(String)
User = Column(String)
Values = Column(LargeBinary)
class ArchiveFile(Base):
"""Table describing where episodes are stored in archives.
This information is relevant if you want to download or extract a specific
episode from the archives they are distributed in.
Attributes:
EpisodeId: Foreign key into the `Episodes` table.
ArchiveFile: Name of the archive file containing the corresponding episode.
"""
__tablename__ = 'ArchiveFiles'
EpisodeId = Column(
'EpisodeId', String, ForeignKey('Episodes.EpisodeId'), primary_key=True)
ArchiveFile = Column(String)
# pylint: enable=invalid-name
| deepmind-research-master | sketchy/metadata_schema.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of loading rewards from the metadata file."""
from absl import app
from absl import flags
import numpy as np
import sqlalchemy
from sketchy import metadata_schema
flags.DEFINE_string(
'metadata', '/tmp/metadata.sqlite', 'Path to metadata file.')
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
engine = sqlalchemy.create_engine('sqlite:///' + FLAGS.metadata)
session = sqlalchemy.orm.sessionmaker(bind=engine)()
episodes = session.query(metadata_schema.Episode).join(
metadata_schema.RewardSequence).limit(5)
for episode in episodes:
rewards = np.frombuffer(episode.Rewards[0].Values, dtype=np.float32)
print('---')
print(f'Episode: {episode.EpisodeId}')
print(f'Episode file: {episode.DataPath}')
print(f'Reward type: {episode.Rewards[0].Type}')
print(f'Reward values: {rewards}')
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | sketchy/reward_example.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| deepmind-research-master | sketchy/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of loading sketchy data in tensorflow."""
from absl import app
from absl import flags
import matplotlib.pyplot as plt
import tensorflow.compat.v2 as tf
from sketchy import sketchy
flags.DEFINE_boolean('show_images', False, 'Enable to show example images.')
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.enable_v2_behavior()
# The example file contains only a few timesteps from a single episode.
dataset = sketchy.load_frames('sketchy/example_data.tfrecords')
dataset = dataset.prefetch(5)
for example in dataset:
print('---')
for name, value in sorted(example.items()):
print(name, value.dtype, value.shape)
if FLAGS.show_images:
plt.imshow(example['pixels/basket_front_left'])
plt.show()
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | sketchy/dataset_example.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for loading sketchy data into tensorflow."""
import tensorflow.compat.v2 as tf
def load_frames(filenames, num_parallel_reads=1, num_map_threads=None):
if not num_map_threads:
num_map_threads = num_parallel_reads
dataset = tf.data.TFRecordDataset(
filenames, num_parallel_reads=num_parallel_reads)
return dataset.map(_parse_example, num_parallel_calls=num_map_threads)
_FEATURES = {
# Actions
'actions':
tf.io.FixedLenFeature(shape=7, dtype=tf.float32),
# Observations
'gripper/joints/velocity':
tf.io.FixedLenFeature(shape=1, dtype=tf.float32),
'gripper/joints/torque':
tf.io.FixedLenFeature(shape=1, dtype=tf.float32),
'gripper/grasp':
tf.io.FixedLenFeature(shape=1, dtype=tf.int64),
'gripper/joints/angle':
tf.io.FixedLenFeature(shape=1, dtype=tf.float32),
'sawyer/joints/velocity':
tf.io.FixedLenFeature(shape=7, dtype=tf.float32),
'sawyer/pinch/pose':
tf.io.FixedLenFeature(shape=7, dtype=tf.float32),
'sawyer/tcp/pose':
tf.io.FixedLenFeature(shape=7, dtype=tf.float32),
'sawyer/tcp/effort':
tf.io.FixedLenFeature(shape=6, dtype=tf.float32),
'sawyer/joints/torque':
tf.io.FixedLenFeature(shape=7, dtype=tf.float32),
'sawyer/tcp/velocity':
tf.io.FixedLenFeature(shape=6, dtype=tf.float32),
'sawyer/joints/angle':
tf.io.FixedLenFeature(shape=7, dtype=tf.float32),
'wrist/torque':
tf.io.FixedLenFeature(shape=3, dtype=tf.float32),
'wrist/force':
tf.io.FixedLenFeature(shape=3, dtype=tf.float32),
'pixels/basket_front_left':
tf.io.FixedLenFeature(shape=1, dtype=tf.string),
'pixels/basket_back_left':
tf.io.FixedLenFeature(shape=1, dtype=tf.string),
'pixels/basket_front_right':
tf.io.FixedLenFeature(shape=1, dtype=tf.string),
'pixels/royale_camera_driver_depth':
tf.io.FixedLenFeature(shape=(171, 224, 1), dtype=tf.float32),
'pixels/royale_camera_driver_gray':
tf.io.FixedLenFeature(shape=1, dtype=tf.string),
'pixels/usbcam0':
tf.io.FixedLenFeature(shape=1, dtype=tf.string),
'pixels/usbcam1':
tf.io.FixedLenFeature(shape=1, dtype=tf.string),
}
def _parse_example(example):
return _decode_images(tf.io.parse_single_example(example, _FEATURES))
def _decode_images(record):
for name, value in list(record.items()):
if value.dtype == tf.string:
record[name] = tf.io.decode_jpeg(value[0])
return record
| deepmind-research-master | sketchy/sketchy.py |
# Copyright 2018 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines some `predicates` for the predicate_task."""
import abc
import colorsys
import numpy as np
HSV_SATURATION = 0.5
HSV_ACTIVATED_SATURATION = 0.75
HSV_VALUE = 1.0
WALKER_GOAL_RGBA = [0, 0, 0]
WALKER_GOAL_PRESSED_RGBA = [128, 128, 128]
INACTIVE_OBSERVATION_VALUE = [-1] * 5
# Define globals for the special encoding.
MOVABLE_TYPES = {'walker': 0, 'box': 1}
TARGET_TYPES = {'box': 0, 'target': 1}
PREDICATE_TYPES = {'on': 0, 'close_to': 1, 'far_from': 2}
class BasePredicate(object, metaclass=abc.ABCMeta):
"""Base class for all predicates."""
def __init__(self, walker):
self._walker = walker
@abc.abstractmethod
def reinitialize(self, random_state):
"""Reinitializes a new, potentially random, predicate state.
The reinitialize method should reset to a new predicate state which could
update the `objects_in_use` by the `Predicate`. This method could be called
multiple times before a finally binding predicate set has been found.
Therefore no changes to the model that are not reversible should be made
here (setting colors etc). Any changes affecting the Mujoco model should
instead be made in the `activate_predicate` method call.
Args:
random_state: An instance of `np.RandomState` which may be seeded to
ensure a deterministic environment.
"""
pass
@abc.abstractmethod
def activate_predicate(self):
"""Activates the current predicate configuration.
Any changes that are non-reversible like setting object properties or
affinities *must* only be done in this method. At this point, the
`predicate_task` logic has confirmed that a valid predicate configuration
has been found.
"""
pass
@property
def objects_in_use(self):
"""Returns the `set` of objects used for this episode."""
return set()
@abc.abstractproperty
def observation_value(self):
"""Returns a `dict` to be used as the predicate observable."""
pass
@abc.abstractmethod
def is_active(self, physics):
"""Boolean method indicating whether the predicate has been activated.
If `True`, it implies the condition for the predicate has been satisfied
and the walker can be rewarded.
Args:
physics: An instance of `control.Physics`.
"""
pass
@property
def inactive_observation_value(self):
"""observation_value indicating a `Predicate` is inactive.
The `PredicateTask` randomly samples the number of active predicates to be
used on each episode. For a consistent `observation_spec`, the predicates
that are not active need a special observation_value that cannot be used
anywhere else.
Returns:
A special value indicating that the predicate is inactive and is not used
by any other predicate in the task.
"""
return INACTIVE_OBSERVATION_VALUE
class MoveWalkerToTarget(BasePredicate):
"""Predicate to move a walker to a specific target."""
def __init__(self, walker, target, target_index=0):
"""Predicate to move a walker or box to a target.
Args:
walker: An locomotion `Walker` instance to use for this predicate.
target: `locomotion.prop` instance containing an `activated` property.
target_index: An 'int' argument to add to the observable to indicate the
index of the target.
"""
super(MoveWalkerToTarget, self).__init__(walker)
self._target = target
self._target_id = target_index
def reinitialize(self, random_state):
self._target.deregister_entities()
def activate_predicate(self):
self._target.register_entities(self._walker)
self._target.set_colors(WALKER_GOAL_RGBA, WALKER_GOAL_PRESSED_RGBA)
@property
def objects_in_use(self):
return set([self._walker, self._target])
@property
def observation_value(self):
return np.array([
MOVABLE_TYPES['walker'], 0, TARGET_TYPES['target'], self._target_id,
PREDICATE_TYPES['close_to']
])
def is_active(self, physics):
return self._target.activated
class MoveWalkerToRandomTarget(BasePredicate):
"""Predicate to move a walker to a random target."""
def __init__(self, walker, targets=None):
"""Predicate to move a walker or box to a target.
Args:
walker: An locomotion `Walker` instance to use for this predicate.
targets: An optional list of `locomotion.prop` instances each of which
contains an `activated` property.
"""
super(MoveWalkerToRandomTarget, self).__init__(walker)
self._targets = targets
self._target_to_move_to = None
def reinitialize(self, random_state):
if self._target_to_move_to is not None:
self._target_to_move_to.deregister_entities()
self._target_to_move_to = random_state.choice(self._targets)
self._target_idx = self._targets.index(self._target_to_move_to)
def activate_predicate(self):
self._target_to_move_to.register_entities(self._walker)
self._target_to_move_to.set_colors(WALKER_GOAL_RGBA,
WALKER_GOAL_PRESSED_RGBA)
@property
def objects_in_use(self):
return set([self._walker, self._target_to_move_to])
@property
def observation_value(self):
return np.array([
MOVABLE_TYPES['walker'], 0, TARGET_TYPES['target'], self._target_idx,
PREDICATE_TYPES['close_to']
])
def is_active(self, physics):
return self._target_to_move_to.activated
class MoveWalkerToBox(BasePredicate):
"""Predicate to move a walker to a specific box."""
def __init__(self, walker, box, box_index=0, detection_region=None):
"""Predicate to move a walker to a specific box.
Args:
walker: An locomotion `Walker` instance to use for this predicate.
box: A `manipulation.prop` instance to move.
box_index: An integer index to use for the observable to identify the
`box`.
detection_region: A 2-tuple indicating the tolerances in x and y for the
walker to be deemed `close_to` the box. If `None`, contact based
detection is used.
"""
super(MoveWalkerToBox, self).__init__(walker)
self._box = box
self._detection_region = detection_region
self._box_index = box_index
self._walker_geoms = None
def reinitialize(self, random_state):
if self._walker_geoms is None:
# pylint: disable=protected-access
self._walker_geoms = set(self._walker._mjcf_root.find_all('geom'))
def activate_predicate(self):
self._box.geom.rgba[:3] = WALKER_GOAL_RGBA
@property
def objects_in_use(self):
return set([self._walker, self._box])
@property
def observation_value(self):
return np.array([
MOVABLE_TYPES['walker'], 0, TARGET_TYPES['box'], self._box_index,
PREDICATE_TYPES['close_to']
])
def is_active(self, physics):
if self._detection_region is None:
return self._is_walker_contacting_box(physics)
else:
return np.all(
np.abs(
physics.bind(self._walker.root_body).xpos -
physics.bind(self._box.geom).xpos)[:2] < self._detection_region)
def _is_walker_contacting_box(self, physics):
walker_geom_ids = [
physics.bind(geom).element_id for geom in self._walker_geoms
]
for contact in physics.data.contact:
contact_geoms = set([contact.geom1, contact.geom2])
if (physics.bind(self._box.geom).element_id in contact_geoms and
contact_geoms.intersection(walker_geom_ids)):
return True
return False
class MoveBoxToBox(BasePredicate):
"""Predicate to move a walker to a specific box."""
def __init__(self,
walker,
first_box,
second_box,
first_box_index=0,
second_box_index=1,
detection_region=None):
"""Predicate to move a walker to a specific box.
Args:
walker: An locomotion `Walker` instance to use for this predicate.
first_box: A `manipulation.prop` instance to move.
second_box: A `manipulation.prop` instance to move.
first_box_index: An integer index to use for the observable to identify
the `box`.
second_box_index: An integer index to use for the observable to identify
the `box`.
detection_region: A 2-tuple indicating the tolerances in x and y for the
walker to be deemed `close_to` the box. If `None`, contact based
detection is used.
"""
super(MoveBoxToBox, self).__init__(walker)
self._first_box = first_box
self._second_box = second_box
self._detection_region = detection_region
self._first_box_index = first_box_index
self._second_box_index = second_box_index
self._walker_geoms = None
def reinitialize(self, random_state):
if self._walker_geoms is None:
# pylint: disable=protected-access
self._walker_geoms = set(self._walker._mjcf_root.find_all('geom'))
def activate_predicate(self):
self._first_box.geom.rgba[:3] = WALKER_GOAL_RGBA
@property
def objects_in_use(self):
return set([self._first_box, self._second_box])
@property
def observation_value(self):
return np.array([
MOVABLE_TYPES['box'], self._first_box_index, TARGET_TYPES['box'],
self._second_box_index, PREDICATE_TYPES['close_to']
])
def is_active(self, physics):
if self._detection_region is None:
return self._are_boxes_in_contact(physics)
else:
return np.all(
np.abs(
physics.bind(self._first_box.geom).xpos -
physics.bind(self._second_box.geom).xpos)[:2] <
self._detection_region)
def _are_boxes_in_contact(self, physics):
for contact in physics.data.contact:
contact_geoms = set([contact.geom1, contact.geom2])
if (physics.bind(self._first_box.geom).element_id in contact_geoms and
physics.bind(self._second_box.geom).element_id in contact_geoms):
return True
return False
class MoveBoxToTarget(BasePredicate):
"""Predicate to move a walker to a specific target."""
def __init__(self, walker, box, target, box_index=0, target_index=0):
"""Predicate to move a walker or box to a target.
Args:
walker: An locomotion `Walker` instance to use for this predicate.
box: A `manipulation.prop` to move to the target.
target: `locomotion.prop` instance containing an `activated` property.
box_index: An 'int' argument to add to the observable to indicate the
index of the box.
target_index: An 'int' argument to add to the observable to indicate the
index of the target.
"""
super(MoveBoxToTarget, self).__init__(walker)
self._box = box
self._target = target
self._box_id = box_index
self._target_id = target_index
self._original_box_size = np.copy(box.geom.size)
self._rgb = None
self._activated_rgb = None
def reinitialize(self, random_state):
self._target.deregister_entities()
self._get_box_properties(random_state)
def _get_box_properties(self, random_state):
hue0 = random_state.uniform()
hue = (hue0 + self._target_id) % 1.0
self._rgb = colorsys.hsv_to_rgb(hue, HSV_SATURATION, HSV_VALUE)
self._activated_rgb = colorsys.hsv_to_rgb(hue, HSV_ACTIVATED_SATURATION,
HSV_VALUE)
def activate_predicate(self):
self._target.set_colors(self._rgb, self._activated_rgb)
self._box.geom.rgba[:3] = self._rgb
self._target.register_entities(self._box)
@property
def objects_in_use(self):
return set([self._box, self._target])
@property
def observation_value(self):
return np.array([
MOVABLE_TYPES['box'], self._box_id, TARGET_TYPES['target'],
self._target_id, PREDICATE_TYPES['close_to']
])
def is_active(self, physics):
return self._target.activated
class MoveBoxToRandomTarget(BasePredicate):
"""Predicate to move a walker to a random target."""
def __init__(self, walker, box, box_index=0, targets=None):
"""Predicate to move a walker or box to a target.
Args:
walker: An locomotion `Walker` instance to use for this predicate.
box: A `manipulation.prop` to move to the target.
box_index: An optional 'int' argument to add to the observable to indicate
the index of the box.
targets: An optional list of `locomotion.prop` instances each of which
contains an `activated` property.
"""
super(MoveBoxToRandomTarget, self).__init__(walker)
self._targets = targets
self._box_to_move = box
self._box_index = box_index
self._target_to_move_to = None
self._original_box_size = np.copy(box.geom.size)
self._rgb = None
self._activated_rgb = None
def reinitialize(self, random_state):
if self._target_to_move_to is not None:
self._target_to_move_to.deregister_entities()
self._target_to_move_to = random_state.choice(self._targets)
self._target_idx = self._targets.index(self._target_to_move_to)
self._get_box_properties(random_state)
def _get_box_properties(self, random_state):
hue0 = random_state.uniform()
hue = (hue0 + (self._target_idx / len(self._targets))) % 1.0
self._rgb = colorsys.hsv_to_rgb(hue, HSV_SATURATION, HSV_VALUE)
self._activated_rgb = colorsys.hsv_to_rgb(hue, HSV_ACTIVATED_SATURATION,
HSV_VALUE)
def activate_predicate(self):
self._target_to_move_to.set_colors(self._rgb, self._activated_rgb)
self._box_to_move.geom.rgba[:3] = self._rgb
self._target_to_move_to.register_entities(self._box_to_move)
@property
def objects_in_use(self):
return set([self._box_to_move, self._target_to_move_to])
@property
def observation_value(self):
return np.array([
MOVABLE_TYPES['box'], self._box_index,
TARGET_TYPES['target'], self._target_idx,
PREDICATE_TYPES['close_to']
])
def is_active(self, physics):
return self._target_to_move_to.activated
| deepmind-research-master | box_arrangement/predicates.py |
# Copyright 2018 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A task where different `Predicate`s need to be solved.
In each episode a spiking reward is given for each `Predicate` solved with an
extra reward bonus added when all of the predicates are solved. On each episode
the number of predicates are sampled randomly. This provides a common interface
to specify distributions over tasks ranging in difficulty levels but with common
components. Each `Predicate` involves some manipulation of the walker, props and
targets which thus allows for rich configurations of tasks to be defined.
"""
import colorsys
import functools
from dm_control import composer
from dm_control.composer.observation import observable
from dm_env import specs
import numpy as np
_FLOOR_GAP_CHAR = '#'
_AMBIENT_HEADLIGHT = 0.8
_HSV_SATURATION = 0.5
_HSV_ACTIVATED_SATURATION = 0.75
_HSV_VALUE = 1.0
_PROP_SIZE = 0.5
_MAX_ITERATIONS = 1000
def _generate_target_permutation(num_targets, random_state):
targets = list(range(num_targets))
random_state.shuffle(targets)
return targets
class PredicateTask(composer.Task):
"""Requires objects to be moved onto targets."""
def __init__(self,
walker,
maze_arena,
predicates,
props=None,
targets=None,
max_num_predicates=1,
randomize_num_predicates=False,
predicate_prob=None,
reward_scale=1.0,
terminating_reward_bonus=5.0,
regenerate_predicates=False,
physics_timestep=0.001,
control_timestep=0.025,
alive_threshold=-0.5):
"""Initializes a task with multiple sub-components(predicates) to be solved.
This task essentially contains different flavors of go to target. The
task contains a walker, props and target positions. To solve the entire
task, the walker would need to solve a certain number of 'predicates' or
sub-tasks. For instance, the task could contain 2 predicates for the
walker going to a target position and the walker moving a box to a target
position. In such a case, there is an implicit ordering of the way the
walker needs to solve things to achieve the net task.
Args:
walker: A `Walker` instance.
maze_arena: An `Entity` that defines a maze-like arena.
predicates: A list of `Predicate` instances for ths task.
props: An optional list of `manipulation.prop` instances for the task.
These are used to generate observables for the task.
targets: An optional list of `locomotion.prop` instances for the task.
These are used to generate observables for the task.
max_num_predicates: The maximum number of predicates to use in each
episode of the task.
randomize_num_predicates: A `bool` flag indicating whether the number of
`valid` predicates should be randomized for each task. If set to `True`,
then on each episode, between 1 and `num_predicates` are chosen as valid
predicates and `predicate.invalid_observation_value` is output for the
remaining slots in the observation.
predicate_prob: An optional `list` containing the probabilities for each
of the `predicates`. If not `None`, must have the same length as
`predicates.
reward_scale: `float` to scale the reward.
terminating_reward_bonus: A bonus added to the reward when all predicates
have been solved.
regenerate_predicates: A `bool` flag indicating which when set, spawns a
new set of predicates when the previous set is successful instead of
terminating.
physics_timestep: The time step of the physics simulation.
control_timestep: Should be an integer multiple of the physics time step.
alive_threshold: Aliveness in [-1., 0.].
Raises:
ValueError: If `num_props` is greater than `num_targets` or if
`num_predicates` is greater than `num_targets`.
"""
if max_num_predicates > len(predicates):
raise ValueError('Not enough predicates for task. The maximum number of '
'predicates can be '
'{} but only {} predicates provided.'.format(
max_num_predicates, len(predicates)))
self._arena = maze_arena
self._walker = walker
self._reward_scale = reward_scale
self._alive_threshold = alive_threshold
self._terminating_reward_bonus = terminating_reward_bonus
self._arena.mjcf_model.visual.headlight.ambient = [_AMBIENT_HEADLIGHT] * 3
maze_arena.text_maze_regenerated_hook = self._regenerate_positions
self._max_num_predicates = max_num_predicates
self._predicates = predicates
self._predicate_prob = predicate_prob
self._randomize_num_predicates = randomize_num_predicates
self._active_predicates = []
self._regen_predicates = regenerate_predicates
self._reward = 0
# Targets.
self._targets = targets
for target in targets:
self._arena.attach(target)
if props is None:
props = []
# Props.
self._props = props
# M Props + 1 Walker and we choose 'N' predicates as the task.
for prop in props:
prop.geom.rgba = [0, 0, 0, 1] # Will be randomized for each episode.
self._arena.add_free_entity(prop)
# Create walkers and corresponding observables.
walker.create_root_joints(self._arena.attach(walker))
self._create_per_walker_observables(walker)
self._generate_target_permutation = None
maze_arena.text_maze_regenerated_hook = self._regenerate_positions
# Set time steps.
self.set_timesteps(
physics_timestep=physics_timestep, control_timestep=control_timestep)
def _create_per_walker_observables(self, walker):
# Enable proprioceptive observables.
for obs in (walker.observables.proprioception +
walker.observables.kinematic_sensors +
[walker.observables.position, walker.observables.orientation]):
obs.enabled = True
xpos_origin_callable = lambda phys: phys.bind(walker.root_body).xpos
# Egocentric prop positions.
# For each prop, we add the positions for the 8 corners using the sites.
for prop_id, prop in enumerate(self._props):
def _prop_callable(physics, prop=prop):
return [physics.bind(s).xpos for s in prop.corner_sites]
if len(self._props) > 1:
observable_name = 'prop_{}_position'.format(prop_id)
else:
observable_name = 'prop_position'
walker.observables.add_egocentric_vector(
observable_name,
observable.Generic(_prop_callable),
origin_callable=xpos_origin_callable)
# Egocentric target positions.
def _target_callable(physics):
target_list = []
for target in self._targets:
target_list.append(target.site_pos(physics))
return np.array(target_list)
walker.observables.add_egocentric_vector(
'target_positions',
observable.Generic(_target_callable),
origin_callable=xpos_origin_callable)
# Whether targets are activated.
def _predicate_activated_callable(physics):
predicate_activated_list = np.full(self._max_num_predicates, True)
for i, predicate in enumerate(self._active_predicates):
predicate_activated_list[i] = predicate.is_active(physics)
return predicate_activated_list
walker.observables.add_observable(
'predicates_activated',
observable.Generic(_predicate_activated_callable))
self._observables = self._walker.observables.as_dict()
# Predicate observables.
for pred_idx in range(self._max_num_predicates):
def _predicate_callable(_, pred_idx=pred_idx):
"""Callable for the predicate observation."""
if pred_idx in range(len(self._active_predicates)):
predicate = self._active_predicates[pred_idx]
return predicate.observation_value
else:
# Use any predicates inactive observation to fill the rest.
predicate = self._predicates[0]
return predicate.inactive_observation_value
predicate_name = 'predicate_{}'.format(pred_idx)
self._observables[predicate_name] = observable.Generic(
_predicate_callable)
self._observables[predicate_name].enabled = True
@property
def observables(self):
return self._observables
@property
def name(self):
return 'predicate_task'
@property
def root_entity(self):
return self._arena
def _regenerate_positions(self):
target_permutation = self._generate_target_permutation(
len(self._arena.target_positions))
num_permutations = len(self._props) + len(self._targets)
target_permutation = target_permutation[:num_permutations]
if len(self._props) + len(self._targets) > len(
self._arena.target_positions):
raise RuntimeError(
'The generated maze does not contain enough target positions '
'for the requested number of props ({}) and targets ({}): got {}.'
.format(
len(self._props), len(self._targets),
len(self._arena.target_positions)))
self._prop_positions = []
for i in range(len(self._props)):
self._prop_positions.append(
self._arena.target_positions[target_permutation[i]])
self._target_positions = []
for i in range(len(self._targets)):
idx = i + len(self._props)
self._target_positions.append(
self._arena.target_positions[target_permutation[idx]])
def initialize_episode_mjcf(self, random_state):
self._generate_target_permutation = functools.partial(
_generate_target_permutation, random_state=random_state)
self._arena.regenerate()
# Set random colors for the props and targets.
self._set_random_colors(random_state)
self._set_active_predicates(random_state)
def _set_active_predicates(self, random_state):
# Reinitialize predicates to set any properties they want.
iteration = 0
valid_set_found = False
while not valid_set_found and iteration < _MAX_ITERATIONS:
for predicate in self._predicates:
predicate.reinitialize(random_state)
if self._randomize_num_predicates and self._max_num_predicates > 1:
num_predicates = random_state.choice(
list(range(1, self._max_num_predicates + 1)), size=1)[0]
else:
num_predicates = self._max_num_predicates
valid_set_found = self._choose_random_predicates(random_state,
num_predicates)
iteration += 1
if not valid_set_found:
raise ValueError(
'Could not find set of active predicates with '
'unique objects are after {} iterations.'.format(_MAX_ITERATIONS))
for predicate in self._active_predicates:
predicate.activate_predicate()
def _choose_random_predicates(self, random_state, num_predicates):
self._active_predicates = random_state.choice(
self._predicates,
replace=False,
size=num_predicates,
p=self._predicate_prob)
objects_in_common = self._active_predicates[0].objects_in_use
for predicate in self._active_predicates[1:]:
new_objects = predicate.objects_in_use
if objects_in_common.intersection(new_objects):
return False
objects_in_common.union(new_objects)
return True
def _set_random_colors(self, random_state):
hue0 = random_state.uniform()
hues = [(hue0 + i / len(self._targets)) % 1.0
for i in range(len(self._targets))]
rgbs = [
colorsys.hsv_to_rgb(hue, _HSV_SATURATION, _HSV_VALUE) for hue in hues
]
activated_rgbs = [
colorsys.hsv_to_rgb(hue, _HSV_ACTIVATED_SATURATION, _HSV_VALUE)
for hue in hues
]
# There are fewer props than targets.
# Pick as far apart colors for each prop as possible.
if self._props:
targets_per_prop = len(self._targets) // len(self._props)
else:
targets_per_prop = len(self._targets)
for prop_id in range(len(self._props)):
# The first few targets have to match the props' color.
rgb_id = prop_id * targets_per_prop
self._props[prop_id].geom.rgba[:3] = rgbs[rgb_id]
self._targets[prop_id].set_colors(rgbs[rgb_id], activated_rgbs[rgb_id])
# Assign colors not used by any prop to decoy targets.
for decoy_target_offset in range(targets_per_prop - 1):
target_id = len(
self._props) + prop_id * targets_per_prop + decoy_target_offset
rgb_id = prop_id * targets_per_prop + decoy_target_offset
self._targets[target_id].set_colors(rgbs[rgb_id], rgbs[rgb_id])
# Remainder loop for targets.
for target_id in range(targets_per_prop * len(self._props),
len(self._targets)):
self._targets[target_id].set_colors(rgbs[target_id], rgbs[target_id])
def initialize_episode(self, physics, random_state):
self._first_step = True
self._was_active = [False] * len(self._active_predicates)
walker = self._walker
spawn_indices = random_state.permutation(len(self._arena.spawn_positions))
spawn_index = spawn_indices[0]
walker.reinitialize_pose(physics, random_state)
spawn_position = self._arena.spawn_positions[spawn_index]
spawn_rotation = random_state.uniform(-np.pi, np.pi)
spawn_quat = np.array(
[np.cos(spawn_rotation / 2), 0, 0,
np.sin(spawn_rotation / 2)])
walker.shift_pose(
physics, [spawn_position[0], spawn_position[1], 0.0],
spawn_quat,
rotate_velocity=True)
for prop, prop_xy_position in zip(self._props, self._prop_positions):
# Position at the middle of a maze cell.
prop_position = np.array(
[prop_xy_position[0], prop_xy_position[1], prop.geom.size[2]])
# Randomly rotate the prop around the z-axis.
prop_rotation = random_state.uniform(-np.pi, np.pi)
prop_quat = np.array(
[np.cos(prop_rotation / 2), 0, 0,
np.sin(prop_rotation / 2)])
# Taking into account the prop's orientation, first calculate how much we
# can displace the prop from the center of a maze cell without any part of
# it sticking out of the cell.
x, y, _ = prop.geom.size
cos = np.cos(prop_rotation)
sin = np.sin(prop_rotation)
x_max = max([np.abs(x * cos - y * sin), np.abs(x * cos + y * sin)])
y_max = max([np.abs(y * cos + x * sin), np.abs(y * cos - x * sin)])
prop_max_displacement = self._arena.xy_scale / 2 - np.array(
[x_max, y_max])
assert np.all(prop_max_displacement >= 0)
prop_max_displacement *= 0.99 # Safety factor.
# Then randomly displace the prop from the center of the maze cell.
prop_position[:2] += prop_max_displacement * random_state.uniform(
-1, 1, 2)
# Commit the prop's final pose.
prop.set_pose(physics, position=prop_position, quaternion=prop_quat)
for target, target_position in zip(self._targets, self._target_positions):
target_position[2] = _PROP_SIZE
target.set_position(physics, target_position)
def before_step(self, physics, actions, random_state):
if isinstance(actions, list):
actions = np.concatenate(actions)
super(PredicateTask, self).before_step(physics, actions, random_state)
if self._first_step:
self._first_step = False
else:
self._was_active = [
predicate.is_active(physics) for predicate in self._active_predicates
]
def after_step(self, physics, random_state):
if self._all_predicates_satisfied() and self._regen_predicates:
self._set_random_colors(random_state)
self._set_active_predicates(random_state)
super(PredicateTask, self).after_step(physics, random_state)
def get_reward(self, physics):
reward = 0.0
for predicate, was_active in zip(self._active_predicates, self._was_active):
if predicate.is_active(physics) and not was_active:
reward += 1.0
elif was_active and not predicate.is_active(physics):
reward -= 1.0
if self._all_predicates_satisfied():
reward += self._terminating_reward_bonus
self._reward = reward
return reward * self._reward_scale
def _all_predicates_satisfied(self):
return sum(self._was_active) == len(self._active_predicates)
def should_terminate_episode(self, physics):
return ((self._all_predicates_satisfied() and not self._regen_predicates) or
self._walker.aliveness(physics) < self._alive_threshold)
def get_discount(self, physics):
if self.should_terminate_episode(physics):
return 0.0
return 1.0
def get_reward_spec(self):
return specs.Array(shape=[], dtype=np.float32)
def get_discount_spec(self):
return specs.Array(shape=[], dtype=np.float32)
| deepmind-research-master | box_arrangement/predicate_task.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| deepmind-research-master | box_arrangement/__init__.py |
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['absl-py', 'dm_control', 'numpy', 'dm_env']
setup(
name='box_arrangement',
version='0.1',
description=('Sparse reward tasks involving moving and pushing boxes to'
' targets.'),
url='https://github.com/deepmind/deepmind-research/box_arrangement',
author='DeepMind',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
platforms=['any'],
license='Apache 2.0',
)
| deepmind-research-master | box_arrangement/setup.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for box_arrangement.predicate_task."""
from absl.testing import absltest
from dm_control import composer
from dm_control.entities import props
from dm_control.locomotion import arenas
from dm_control.locomotion import walkers
import numpy as np
from box_arrangement import predicate_task
from box_arrangement import predicates
_EGOCENTRIC_OBSERVABLES = [
"walker/body_height",
"walker/end_effectors_pos",
"walker/joints_pos",
"walker/joints_vel",
"walker/sensors_accelerometer",
"walker/sensors_gyro",
"walker/sensors_velocimeter",
"walker/world_zaxis",
]
class PredicateTaskTest(absltest.TestCase):
def _setup_basic_gtt_task(self, num_targets=1, reward_scale=1.0):
walker = walkers.Ant()
text_maze = arenas.padded_room.PaddedRoom(
room_size=8, num_objects=2, pad_with_walls=True)
maze_arena = arenas.MazeWithTargets(maze=text_maze)
targets = []
for _ in range(num_targets):
targets.append(
props.PositionDetector(
pos=[0, 0, 0.5],
size=[0.5, 0.5, 0.5],
inverted=False,
visible=True))
test_predicates = [predicates.MoveWalkerToRandomTarget(walker, targets)]
self._task = predicate_task.PredicateTask(
walker=walker,
maze_arena=maze_arena,
predicates=test_predicates,
targets=targets,
randomize_num_predicates=False,
reward_scale=reward_scale,
terminating_reward_bonus=2.0,
)
random_state = np.random.RandomState(12345)
self._env = composer.Environment(self._task, random_state=random_state)
self._walker = walker
self._targets = targets
def test_observables(self):
self._setup_basic_gtt_task()
timestep = self._env.reset()
self.assertIn("predicate_0", timestep.observation)
self.assertIn("walker/target_positions", timestep.observation)
for observable in _EGOCENTRIC_OBSERVABLES:
self.assertIn(observable, timestep.observation)
def test_termination_and_discount(self):
self._setup_basic_gtt_task()
self._env.reset()
target_pos = (0, 0, 0.5)
# Initialize the walker away from the target.
self._walker.set_pose(
self._env.physics, position=(-2, 0, 0.0), quaternion=(1, 0, 0, 0))
self._targets[0].set_position(
self._env.physics,
target_pos)
self._env.physics.forward()
zero_action = np.zeros_like(self._env.physics.data.ctrl)
for _ in range(10):
timestep = self._env.step(zero_action)
self.assertEqual(timestep.discount, 1.0)
self.assertEqual(timestep.reward, 0.0)
walker_pos = (0, 0, 0.0)
self._walker.set_pose(
self._env.physics,
position=walker_pos)
self._env.physics.forward()
# For a single predicate, first the reward is +1.0 for activating the
# predicate
timestep = self._env.step(zero_action)
self.assertEqual(timestep.discount, 1.0)
self.assertEqual(timestep.reward, 1.0)
# If the predicate is active and *remains* active, the discount gets to 0.0
# and the terminating reward bonus is given.
timestep = self._env.step(zero_action)
self.assertEqual(timestep.discount, 0.0)
self.assertEqual(timestep.reward, 2.0)
# Make sure this is a termination step.
self.assertTrue(timestep.last())
def test_reward_scaling(self):
self._setup_basic_gtt_task(reward_scale=10.0)
self._env.reset()
zero_action = np.zeros_like(self._env.physics.data.ctrl)
target_pos = (0, 0, 0.5)
walker_pos = (0, 0, 0.0)
self._targets[0].set_position(self._env.physics, target_pos)
self._walker.set_pose(self._env.physics, position=walker_pos)
self._env.physics.forward()
# For a single predicate, first the reward is +1.0 for activating the
# predicate
timestep = self._env.step(zero_action)
self.assertEqual(timestep.discount, 1.0)
self.assertEqual(timestep.reward, 10.0)
# If the predicate is active and *remains* active, the discount gets to 0.0
# and the terminating reward bonus is given.
timestep = self._env.step(zero_action)
self.assertEqual(timestep.discount, 0.0)
self.assertEqual(timestep.reward, 20.0)
# Make sure this is a termination step.
self.assertTrue(timestep.last())
def test_too_few_predicates_raises_exception(self):
walker = walkers.Ant()
num_targets = 1
text_maze = arenas.padded_room.PaddedRoom(
room_size=8, num_objects=2, pad_with_walls=True)
maze_arena = arenas.MazeWithTargets(maze=text_maze)
targets = []
for _ in range(num_targets):
targets.append(
props.PositionDetector(
pos=[0, 0, 0.5],
size=[0.5, 0.5, 0.5],
inverted=False,
visible=True))
test_predicates = []
with self.assertRaisesWithLiteralMatch(
ValueError, "Not enough predicates for task."
" The maximum number of "
"predicates can be "
"1 but only 0 predicates provided."):
predicate_task.PredicateTask(
walker=walker,
maze_arena=maze_arena,
predicates=test_predicates,
targets=targets,
randomize_num_predicates=False,
reward_scale=1.0,
terminating_reward_bonus=2.0,
)
def test_error_too_few_targets(self):
walker = walkers.Ant()
num_targets = 5
text_maze = arenas.padded_room.PaddedRoom(
room_size=8, num_objects=2, pad_with_walls=True)
maze_arena = arenas.MazeWithTargets(maze=text_maze)
targets = []
for _ in range(num_targets):
targets.append(
props.PositionDetector(
pos=[0, 0, 0.5],
size=[0.5, 0.5, 0.5],
inverted=False,
visible=True))
test_predicates = [predicates.MoveWalkerToRandomTarget(walker, targets)]
task = predicate_task.PredicateTask(
walker=walker,
maze_arena=maze_arena,
predicates=test_predicates,
targets=targets,
randomize_num_predicates=False,
reward_scale=1.0,
terminating_reward_bonus=2.0,
)
random_state = np.random.RandomState(12345)
env = composer.Environment(task, random_state=random_state)
with self.assertRaisesWithLiteralMatch(
RuntimeError, "The generated maze does not contain enough target "
"positions for the requested number of props (0) and targets (5): "
"got 2."
):
env.reset()
def test_error_if_no_predicates_found(self):
walker = walkers.Ant()
num_targets = 2
text_maze = arenas.padded_room.PaddedRoom(
room_size=8, num_objects=6, pad_with_walls=True)
maze_arena = arenas.MazeWithTargets(maze=text_maze)
targets = []
for _ in range(num_targets):
targets.append(
props.PositionDetector(
pos=[0, 0, 0.5],
size=[0.5, 0.5, 0.5],
inverted=False,
visible=True))
# Moving the walker to two targets is not possible since the walker is a
# shared object in use.
test_predicates = [predicates.MoveWalkerToTarget(walker, targets[0]),
predicates.MoveWalkerToTarget(walker, targets[1])]
task = predicate_task.PredicateTask(
walker=walker,
maze_arena=maze_arena,
predicates=test_predicates,
targets=targets[1:],
randomize_num_predicates=False,
max_num_predicates=2,
reward_scale=1.0,
terminating_reward_bonus=2.0,
)
random_state = np.random.RandomState(12345)
env = composer.Environment(task, random_state=random_state)
with self.assertRaisesWithLiteralMatch(
ValueError, "Could not find set of active predicates"
" with unique objects are after 1000 iterations."):
env.reset()
# However moving to one of the two targets is fine.
walker = walkers.Ant()
num_targets = 2
text_maze = arenas.padded_room.PaddedRoom(
room_size=8, num_objects=6, pad_with_walls=True)
maze_arena = arenas.MazeWithTargets(maze=text_maze)
targets = []
for _ in range(num_targets):
targets.append(
props.PositionDetector(
pos=[0, 0, 0.5],
size=[0.5, 0.5, 0.5],
inverted=False,
visible=True))
test_predicates = [predicates.MoveWalkerToTarget(walker, targets[0]),
predicates.MoveWalkerToTarget(walker, targets[1])]
task = predicate_task.PredicateTask(
walker=walker,
maze_arena=maze_arena,
predicates=test_predicates,
targets=targets[1:],
randomize_num_predicates=False,
max_num_predicates=1,
reward_scale=1.0,
terminating_reward_bonus=2.0,
)
random_state = np.random.RandomState(12345)
env = composer.Environment(task, random_state=random_state)
env.reset()
if __name__ == "__main__":
absltest.main()
| deepmind-research-master | box_arrangement/predicate_task_test.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to launch viewer with an example environment."""
from absl import app
from absl import flags
from dm_control import viewer
from box_arrangement import task_examples
FLAGS = flags.FLAGS
flags.DEFINE_enum('task', 'go_to_target', [
'go_to_target', 'move_box', 'move_box_or_go_to_target',
'move_box_and_go_to_target'
], 'The task to visualize.')
TASKS = {
'go_to_target': task_examples.go_to_k_targets,
'move_box': task_examples.move_box,
'move_box_or_go_to_target': task_examples.move_box_or_gtt,
'move_box_and_go_to_target': task_examples.move_box_and_gtt,
}
def main(unused_argv):
viewer.launch(environment_loader=TASKS[FLAGS.task])
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | box_arrangement/explore.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example tasks used in publications."""
from dm_control import composer
from dm_control.entities import props
from dm_control.locomotion import arenas as locomotion_arenas
from dm_control.locomotion import walkers
from dm_control.manipulation import props as manipulation_props
from box_arrangement import dmlab_assets
from box_arrangement import predicates as predicates_module
from box_arrangement.predicate_task import PredicateTask
DEFAULT_TIME_LIMIT = 20.0
DEFAULT_CONTROL_TIMESTEP = 0.05
MIN_ROOM_SIZE = 3
def _make_predicate_task(n_boxes, n_targets,
include_gtt_predicates, include_move_box_predicates,
max_num_predicates, control_timestep, time_limit):
"""Auxiliary function to construct different predicates tasks."""
walker = walkers.Ant()
skybox = dmlab_assets.SkyBox(style='sky_03')
wall = dmlab_assets.WallTextures(style='style_03')
floor = dmlab_assets.FloorTextures(style='style_03')
# Make room size become bigger once the number of objects become larger.
num_objects = n_boxes + n_targets
room_size = max(MIN_ROOM_SIZE, num_objects)
text_maze = locomotion_arenas.padded_room.PaddedRoom(
room_size=room_size, num_objects=num_objects, pad_with_walls=True)
arena = locomotion_arenas.MazeWithTargets(
maze=text_maze,
skybox_texture=skybox,
wall_textures=wall,
floor_textures=floor)
boxes = []
for _ in range(n_boxes):
boxes.append(
manipulation_props.BoxWithSites(mass=1.5, half_lengths=[0.5, 0.5, 0.5]))
targets = []
for _ in range(n_targets):
targets.append(
props.PositionDetector(
pos=[0, 0, 0.5], size=[0.5, 0.5, 0.5], inverted=False,
visible=True))
predicates = []
if include_gtt_predicates:
predicates.append(
predicates_module.MoveWalkerToRandomTarget(
walker=walker, targets=targets))
if include_move_box_predicates:
for box_idx in range(len(boxes)):
predicates.append(
predicates_module.MoveBoxToRandomTarget(
walker=walker,
box=boxes[box_idx],
box_index=box_idx,
targets=targets))
task = PredicateTask(
walker=walker,
maze_arena=arena,
predicates=predicates,
props=boxes,
targets=targets,
max_num_predicates=max_num_predicates,
randomize_num_predicates=False,
reward_scale=10.,
regenerate_predicates=False,
physics_timestep=0.005,
control_timestep=control_timestep)
env = composer.Environment(task=task, time_limit=time_limit)
return env
def go_to_k_targets(n_targets=3,
time_limit=DEFAULT_TIME_LIMIT,
control_timestep=DEFAULT_CONTROL_TIMESTEP):
"""Loads `go_to_k_targets` task."""
return _make_predicate_task(
n_boxes=0,
n_targets=n_targets,
include_gtt_predicates=True,
include_move_box_predicates=False,
max_num_predicates=1,
control_timestep=control_timestep,
time_limit=time_limit)
def move_box(n_targets=3,
time_limit=DEFAULT_TIME_LIMIT,
control_timestep=DEFAULT_CONTROL_TIMESTEP):
"""Loads `move_box` task."""
return _make_predicate_task(
n_boxes=1,
n_targets=n_targets,
include_gtt_predicates=False,
include_move_box_predicates=True,
max_num_predicates=1,
control_timestep=control_timestep,
time_limit=time_limit)
def move_box_or_gtt(n_targets=3,
time_limit=DEFAULT_TIME_LIMIT,
control_timestep=DEFAULT_CONTROL_TIMESTEP):
"""Loads `move_box_or_gtt` task."""
return _make_predicate_task(
n_boxes=1,
n_targets=n_targets,
include_gtt_predicates=True,
include_move_box_predicates=True,
max_num_predicates=1,
control_timestep=control_timestep,
time_limit=time_limit)
def move_box_and_gtt(n_targets=3,
time_limit=DEFAULT_TIME_LIMIT,
control_timestep=DEFAULT_CONTROL_TIMESTEP):
"""Loads `move_box_or_gtt` task."""
return _make_predicate_task(
n_boxes=1,
n_targets=n_targets,
include_gtt_predicates=True,
include_move_box_predicates=True,
max_num_predicates=2,
control_timestep=control_timestep,
time_limit=time_limit)
| deepmind-research-master | box_arrangement/task_examples.py |
# Copyright 2018 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DeepMind Lab textures."""
from dm_control import composer
from dm_control import mjcf
from labmaze import assets as labmaze_assets
class SkyBox(composer.Entity):
"""Represents a texture asset for the sky box."""
def _build(self, style):
labmaze_textures = labmaze_assets.get_sky_texture_paths(style)
self._mjcf_root = mjcf.RootElement(model='dmlab_' + style)
self._texture = self._mjcf_root.asset.add(
'texture', type='skybox', name='texture',
fileleft=labmaze_textures.left, fileright=labmaze_textures.right,
fileup=labmaze_textures.up, filedown=labmaze_textures.down,
filefront=labmaze_textures.front, fileback=labmaze_textures.back)
@property
def mjcf_model(self):
return self._mjcf_root
@property
def texture(self):
return self._texture
class WallTextures(composer.Entity):
"""Represents wall texture assets."""
def _build(self, style):
labmaze_textures = labmaze_assets.get_wall_texture_paths(style)
self._mjcf_root = mjcf.RootElement(model='dmlab_' + style)
self._textures = []
for texture_name, texture_path in labmaze_textures.items():
self._textures.append(self._mjcf_root.asset.add(
'texture', type='2d', name=texture_name,
file=texture_path.format(texture_name)))
@property
def mjcf_model(self):
return self._mjcf_root
@property
def textures(self):
return self._textures
class FloorTextures(composer.Entity):
"""Represents floor texture assets."""
def _build(self, style):
labmaze_textures = labmaze_assets.get_floor_texture_paths(style)
self._mjcf_root = mjcf.RootElement(model='dmlab_' + style)
self._textures = []
for texture_name, texture_path in labmaze_textures.items():
self._textures.append(self._mjcf_root.asset.add(
'texture', type='2d', name=texture_name,
file=texture_path.format(texture_name)))
@property
def mjcf_model(self):
return self._mjcf_root
@property
def textures(self):
return self._textures
| deepmind-research-master | box_arrangement/dmlab_assets.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet (post-activation) with FixUp."""
# pylint: disable=invalid-name
import functools
import haiku as hk
import jax
import jax.numpy as jnp
from nfnets import base
nonlinearities = {
'swish': jax.nn.silu,
'relu': jax.nn.relu,
'identity': lambda x: x}
class FixUp_ResNet(hk.Module):
"""Fixup based ResNet."""
variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]},
'ResNet101': {'depth': [3, 4, 23, 3]},
'ResNet152': {'depth': [3, 8, 36, 3]},
'ResNet200': {'depth': [3, 24, 36, 3]},
'ResNet288': {'depth': [24, 24, 24, 24]},
'ResNet600': {'depth': [50, 50, 50, 50]},
}
def __init__(self, num_classes, variant='ResNet50', width=4,
stochdepth_rate=0.1, drop_rate=None,
activation='relu', fc_init=jnp.zeros,
name='FixUp_ResNet'):
super().__init__(name=name)
self.num_classes = num_classes
self.variant = variant
self.width = width
# Get variant info
block_params = self.variant_dict[self.variant]
self.width_pattern = [item * self.width for item in [64, 128, 256, 512]]
self.depth_pattern = block_params['depth']
self.activation = nonlinearities[activation]
if drop_rate is None:
self.drop_rate = block_params['drop_rate']
else:
self.drop_rate = drop_rate
self.which_conv = functools.partial(hk.Conv2D,
with_bias=False)
# Stem
ch = int(16 * self.width)
self.initial_conv = self.which_conv(ch, kernel_shape=7, stride=2,
padding='SAME',
name='initial_conv')
# Body
self.blocks = []
num_blocks = sum(self.depth_pattern)
index = 0 # Overall block index
block_args = (self.width_pattern, self.depth_pattern, [1, 2, 2, 2])
for block_width, stage_depth, stride in zip(*block_args):
for block_index in range(stage_depth):
# Block stochastic depth drop-rate
block_stochdepth_rate = stochdepth_rate * index / num_blocks
self.blocks += [ResBlock(ch, block_width, num_blocks,
stride=stride if block_index == 0 else 1,
activation=self.activation,
which_conv=self.which_conv,
stochdepth_rate=block_stochdepth_rate,
)]
ch = block_width
index += 1
# Head
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x, is_training=True, return_metrics=False):
"""Return the output of the final layer without any [log-]softmax."""
# Stem
outputs = {}
out = self.initial_conv(x)
bias1 = hk.get_parameter('bias1', (), x.dtype, init=jnp.zeros)
out = self.activation(out + bias1)
out = hk.max_pool(out, window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1), padding='SAME')
if return_metrics:
outputs.update(base.signal_metrics(out, 0))
# Blocks
for i, block in enumerate(self.blocks):
out, res_avg_var = block(out, is_training=is_training)
if return_metrics:
outputs.update(base.signal_metrics(out, i + 1))
outputs[f'res_avg_var_{i}'] = res_avg_var
# Final-conv->activation, pool, dropout, classify
pool = jnp.mean(out, [1, 2])
outputs['pool'] = pool
# Optionally apply dropout
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
bias2 = hk.get_parameter('bias2', (), pool.dtype, init=jnp.zeros)
outputs['logits'] = self.fc(pool + bias2)
return outputs
def count_flops(self, h, w):
flops = []
flops += [base.count_conv_flops(3, self.initial_conv, h, w)]
h, w = h / 2, w / 2
# Body FLOPs
for block in self.blocks:
flops += [block.count_flops(h, w)]
if block.stride > 1:
h, w = h / block.stride, w / block.stride
# Count flops for classifier
flops += [self.blocks[-1].out_ch * self.fc.output_size]
return flops, sum(flops)
class ResBlock(hk.Module):
"""Post-activation Fixup Block."""
def __init__(self, in_ch, out_ch, num_blocks, bottleneck_ratio=0.25,
kernel_size=3, stride=1,
which_conv=hk.Conv2D, activation=jax.nn.relu,
stochdepth_rate=None, name=None):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
self.kernel_size = kernel_size
self.activation = activation
# Bottleneck width
self.width = int(self.out_ch * bottleneck_ratio)
self.stride = stride
# Conv 0 (typically expansion conv)
conv0_init = hk.initializers.RandomNormal(
stddev=((2 / self.width)**0.5) * (num_blocks**(-0.25)))
self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME',
name='conv0', w_init=conv0_init)
# Grouped NxN conv
conv1_init = hk.initializers.RandomNormal(
stddev=((2 / (self.width * (kernel_size**2)))**0.5)
* (num_blocks**(-0.25)))
self.conv1 = which_conv(self.width, kernel_shape=kernel_size, stride=stride,
padding='SAME', name='conv1', w_init=conv1_init)
# Conv 2, typically projection conv
self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME',
name='conv2', w_init=hk.initializers.Constant(0))
# Use shortcut conv on channel change or downsample.
self.use_projection = stride > 1 or self.in_ch != self.out_ch
if self.use_projection:
shortcut_init = hk.initializers.RandomNormal(
stddev=(2 / self.out_ch) ** 0.5)
self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1,
stride=stride, padding='SAME',
name='conv_shortcut',
w_init=shortcut_init)
# Are we using stochastic depth?
self._has_stochdepth = (stochdepth_rate is not None and
stochdepth_rate > 0. and stochdepth_rate < 1.0)
if self._has_stochdepth:
self.stoch_depth = base.StochDepth(stochdepth_rate)
def __call__(self, x, is_training):
bias1a = hk.get_parameter('bias1a', (), x.dtype, init=jnp.zeros)
bias1b = hk.get_parameter('bias1b', (), x.dtype, init=jnp.zeros)
bias2a = hk.get_parameter('bias2a', (), x.dtype, init=jnp.zeros)
bias2b = hk.get_parameter('bias2b', (), x.dtype, init=jnp.zeros)
bias3a = hk.get_parameter('bias3a', (), x.dtype, init=jnp.zeros)
bias3b = hk.get_parameter('bias3b', (), x.dtype, init=jnp.zeros)
scale = hk.get_parameter('scale', (), x.dtype, init=jnp.ones)
out = x + bias1a
shortcut = out
if self.use_projection: # Downsample with conv1x1
shortcut = self.conv_shortcut(shortcut)
out = self.conv0(out)
out = self.activation(out + bias1b)
out = self.conv1(out + bias2a)
out = self.activation(out + bias2b)
out = self.conv2(out + bias3a)
out = out * scale + bias3b
# Get average residual variance for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
# Apply stochdepth if applicable.
if self._has_stochdepth:
out = self.stoch_depth(out, is_training)
# SkipInit Gain
out = out + shortcut
return self.activation(out), res_avg_var
def count_flops(self, h, w):
# Count conv FLOPs based on input HW
expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w)
# If block is strided we decrease resolution here.
dw_flops = base.count_conv_flops(self.width, self.conv1, h, w)
if self.stride > 1:
h, w = h / self.stride, w / self.stride
if self.use_projection:
sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w)
else:
sc_flops = 0
contract_flops = base.count_conv_flops(self.width, self.conv2, h, w)
return sum([expand_flops, dw_flops, contract_flops, sc_flops])
| deepmind-research-master | nfnets/fixup_resnet.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""ImageNet experiment with NF-RegNets."""
from ml_collections import config_dict
from nfnets import experiment
def get_config():
"""Return config object for training."""
config = experiment.get_config()
# Experiment config.
train_batch_size = 1024 # Global batch size.
images_per_epoch = 1281167
num_epochs = 360
steps_per_epoch = images_per_epoch / train_batch_size
config.training_steps = ((images_per_epoch * num_epochs) // train_batch_size)
config.random_seed = 0
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
lr=0.4,
num_epochs=num_epochs,
label_smoothing=0.1,
model='NF_RegNet',
image_size=224,
use_ema=True,
ema_decay=0.99999, # Cinco nueves amigos
ema_start=0,
augment_name='mixup_cutmix',
train_batch_size=train_batch_size,
eval_batch_size=50,
eval_subset='test',
num_classes=1000,
which_dataset='imagenet',
which_loss='softmax_cross_entropy', # One of softmax or sigmoid
bfloat16=False,
lr_schedule=dict(
name='WarmupCosineDecay',
kwargs=dict(num_steps=config.training_steps,
start_val=0,
min_val=0.001,
warmup_steps=5*steps_per_epoch),
),
lr_scale_by_bs=False,
optimizer=dict(
name='SGD',
kwargs={'momentum': 0.9, 'nesterov': True,
'weight_decay': 5e-5,},
),
model_kwargs=dict(
variant='B0',
width=0.75,
expansion=2.25,
se_ratio=0.5,
alpha=0.2,
stochdepth_rate=0.1,
drop_rate=None,
activation='silu',
),
)))
# Set weight decay based on variant (scaled as 5e-5 + 1e-5 * level)
variant = config.experiment_kwargs.config.model_kwargs.variant
weight_decay = {'B0': 5e-5, 'B1': 6e-5, 'B2': 7e-5,
'B3': 8e-5, 'B4': 9e-5, 'B5': 1e-4}[variant]
config.experiment_kwargs.config.optimizer.kwargs.weight_decay = weight_decay
return config
Experiment = experiment.Experiment
| deepmind-research-master | nfnets/experiment_nf_regnets.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNetV2 (Pre-activation) with SkipInit."""
# pylint: disable=invalid-name
import haiku as hk
import jax
import jax.numpy as jnp
from nfnets import base
# Nonlinearities
nonlinearities = {
'swish': jax.nn.silu,
'relu': jax.nn.relu,
'identity': lambda x: x}
class SkipInit_ResNet(hk.Module):
"""Skip-Init based ResNet."""
variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]},
'ResNet101': {'depth': [3, 4, 23, 3]},
'ResNet152': {'depth': [3, 8, 36, 3]},
'ResNet200': {'depth': [3, 24, 36, 3]},
'ResNet288': {'depth': [24, 24, 24, 24]},
'ResNet600': {'depth': [50, 50, 50, 50]},
}
def __init__(self, num_classes, variant='ResNet50', width=4,
stochdepth_rate=0.1, drop_rate=None,
activation='relu', fc_init=jnp.zeros,
name='SkipInit_ResNet'):
super().__init__(name=name)
self.num_classes = num_classes
self.variant = variant
self.width = width
# Get variant info
block_params = self.variant_dict[self.variant]
self.width_pattern = [item * self.width for item in [64, 128, 256, 512]]
self.depth_pattern = block_params['depth']
self.activation = nonlinearities[activation]
if drop_rate is None:
self.drop_rate = block_params['drop_rate']
else:
self.drop_rate = drop_rate
self.which_conv = hk.Conv2D
# Stem
ch = int(16 * self.width)
self.initial_conv = self.which_conv(ch, kernel_shape=7, stride=2,
padding='SAME', with_bias=False,
name='initial_conv')
# Body
self.blocks = []
num_blocks = sum(self.depth_pattern)
index = 0 # Overall block index
block_args = (self.width_pattern, self.depth_pattern, [1, 2, 2, 2])
for block_width, stage_depth, stride in zip(*block_args):
for block_index in range(stage_depth):
# Block stochastic depth drop-rate
block_stochdepth_rate = stochdepth_rate * index / num_blocks
self.blocks += [NFResBlock(ch, block_width,
stride=stride if block_index == 0 else 1,
activation=self.activation,
which_conv=self.which_conv,
stochdepth_rate=block_stochdepth_rate,
)]
ch = block_width
index += 1
# Head
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x, is_training=True, return_metrics=False):
"""Return the output of the final layer without any [log-]softmax."""
# Stem
outputs = {}
out = self.initial_conv(x)
out = hk.max_pool(out, window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1), padding='SAME')
if return_metrics:
outputs.update(base.signal_metrics(out, 0))
# Blocks
for i, block in enumerate(self.blocks):
out, res_avg_var = block(out, is_training=is_training)
if return_metrics:
outputs.update(base.signal_metrics(out, i + 1))
outputs[f'res_avg_var_{i}'] = res_avg_var
# Final-conv->activation, pool, dropout, classify
pool = jnp.mean(self.activation(out), [1, 2])
outputs['pool'] = pool
# Optionally apply dropout
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
outputs['logits'] = self.fc(pool)
return outputs
def count_flops(self, h, w):
flops = []
flops += [base.count_conv_flops(3, self.initial_conv, h, w)]
h, w = h / 2, w / 2
# Body FLOPs
for block in self.blocks:
flops += [block.count_flops(h, w)]
if block.stride > 1:
h, w = h / block.stride, w / block.stride
# Count flops for classifier
flops += [self.blocks[-1].out_ch * self.fc.output_size]
return flops, sum(flops)
class NFResBlock(hk.Module):
"""Normalizer-Free pre-activation ResNet Block."""
def __init__(self, in_ch, out_ch, bottleneck_ratio=0.25,
kernel_size=3, stride=1,
which_conv=hk.Conv2D, activation=jax.nn.relu,
stochdepth_rate=None, name=None):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
self.kernel_size = kernel_size
self.activation = activation
# Bottleneck width
self.width = int(self.out_ch * bottleneck_ratio)
self.stride = stride
# Conv 0 (typically expansion conv)
self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME',
name='conv0')
# Grouped NxN conv
self.conv1 = which_conv(self.width, kernel_shape=kernel_size, stride=stride,
padding='SAME', name='conv1')
# Conv 2, typically projection conv
self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME',
name='conv2')
# Use shortcut conv on channel change or downsample.
self.use_projection = stride > 1 or self.in_ch != self.out_ch
if self.use_projection:
self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1,
stride=stride, padding='SAME',
name='conv_shortcut')
# Are we using stochastic depth?
self._has_stochdepth = (stochdepth_rate is not None and
stochdepth_rate > 0. and stochdepth_rate < 1.0)
if self._has_stochdepth:
self.stoch_depth = base.StochDepth(stochdepth_rate)
def __call__(self, x, is_training):
out = self.activation(x)
shortcut = x
if self.use_projection: # Downsample with conv1x1
shortcut = self.conv_shortcut(out)
out = self.conv0(out)
out = self.conv1(self.activation(out))
out = self.conv2(self.activation(out))
# Get average residual standard deviation for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
# Apply stochdepth if applicable.
if self._has_stochdepth:
out = self.stoch_depth(out, is_training)
# SkipInit Gain
out = out * hk.get_parameter('skip_gain', (), out.dtype, init=jnp.zeros)
return out + shortcut, res_avg_var
def count_flops(self, h, w):
# Count conv FLOPs based on input HW
expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w)
# If block is strided we decrease resolution here.
dw_flops = base.count_conv_flops(self.width, self.conv1, h, w)
if self.stride > 1:
h, w = h / self.stride, w / self.stride
if self.use_projection:
sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w)
else:
sc_flops = 0
# SE flops happen on avg-pooled activations
contract_flops = base.count_conv_flops(self.width, self.conv2, h, w)
return sum([expand_flops, dw_flops, contract_flops, sc_flops])
| deepmind-research-master | nfnets/skipinit_resnet.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalizer-Free RegNets."""
# pylint: disable=invalid-name
import haiku as hk
import jax
import jax.numpy as jnp
from nfnets import base
class NF_RegNet(hk.Module):
"""Normalizer-Free RegNets."""
variant_dict = base.nf_regnet_params
def __init__(self, num_classes, variant='B0',
width=0.75, expansion=2.25, group_size=8, se_ratio=0.5,
alpha=0.2, stochdepth_rate=0.1, drop_rate=None,
activation='swish', fc_init=jnp.zeros,
name='NF_RegNet'):
super().__init__(name=name)
self.num_classes = num_classes
self.variant = variant
self.width = width
self.expansion = expansion
self.group_size = group_size
self.se_ratio = se_ratio
# Get variant info
block_params = self.variant_dict[self.variant]
self.train_imsize = block_params['train_imsize']
self.test_imsize = block_params['test_imsize']
self.width_pattern = block_params['width']
self.depth_pattern = block_params['depth']
self.activation = base.nonlinearities[activation]
if drop_rate is None:
self.drop_rate = block_params['drop_rate']
else:
self.drop_rate = drop_rate
self.which_conv = base.WSConv2D
# Stem
ch = int(self.width_pattern[0] * self.width)
self.initial_conv = self.which_conv(ch, kernel_shape=3, stride=2,
padding='SAME', name='initial_conv')
# Body
self.blocks = []
expected_std = 1.0
num_blocks = sum(self.depth_pattern)
index = 0 # Overall block index
for block_width, stage_depth in zip(self.width_pattern, self.depth_pattern):
for block_index in range(stage_depth):
# Scalar pre-multiplier so each block sees an N(0,1) input at init
beta = 1./ expected_std
# Block stochastic depth drop-rate
block_stochdepth_rate = stochdepth_rate * index / num_blocks
# Use a bottleneck expansion ratio of 1 for first block following EffNet
expand_ratio = 1 if index == 0 else expansion
out_ch = (int(block_width * self.width))
self.blocks += [NFBlock(ch, out_ch,
expansion=expand_ratio, se_ratio=se_ratio,
group_size=self.group_size,
stride=2 if block_index == 0 else 1,
beta=beta, alpha=alpha,
activation=self.activation,
which_conv=self.which_conv,
stochdepth_rate=block_stochdepth_rate,
)]
ch = out_ch
index += 1
# Reset expected std but still give it 1 block of growth
if block_index == 0:
expected_std = 1.0
expected_std = (expected_std **2 + alpha**2)**0.5
# Head with final conv mimicking EffNets
self.final_conv = self.which_conv(int(1280 * ch // 440), kernel_shape=1,
padding='SAME', name='final_conv')
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x, is_training=True, return_metrics=False):
"""Return the output of the final layer without any [log-]softmax."""
# Stem
outputs = {}
out = self.initial_conv(x)
if return_metrics:
outputs.update(base.signal_metrics(out, 0))
# Blocks
for i, block in enumerate(self.blocks):
out, res_avg_var = block(out, is_training=is_training)
if return_metrics:
outputs.update(base.signal_metrics(out, i + 1))
outputs[f'res_avg_var_{i}'] = res_avg_var
# Final-conv->activation, pool, dropout, classify
out = self.activation(self.final_conv(out))
pool = jnp.mean(out, [1, 2])
outputs['pool'] = pool
# Optionally apply dropout
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
outputs['logits'] = self.fc(pool)
return outputs
def count_flops(self, h, w):
flops = []
flops += [base.count_conv_flops(3, self.initial_conv, h, w)]
h, w = h / 2, w / 2
# Body FLOPs
for block in self.blocks:
flops += [block.count_flops(h, w)]
if block.stride > 1:
h, w = h / block.stride, w / block.stride
# Head module FLOPs
out_ch = self.blocks[-1].out_ch
flops += [base.count_conv_flops(out_ch, self.final_conv, h, w)]
# Count flops for classifier
flops += [self.final_conv.output_channels * self.fc.output_size]
return flops, sum(flops)
class NFBlock(hk.Module):
"""Normalizer-Free RegNet Block."""
def __init__(self, in_ch, out_ch, expansion=2.25, se_ratio=0.5,
kernel_size=3, group_size=8, stride=1,
beta=1.0, alpha=0.2,
which_conv=base.WSConv2D, activation=jax.nn.relu,
stochdepth_rate=None, name=None):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
self.expansion = expansion
self.se_ratio = se_ratio
self.kernel_size = kernel_size
self.activation = activation
self.beta, self.alpha = beta, alpha
# Round expanded with based on group count
width = int(self.in_ch * expansion)
self.groups = width // group_size
self.width = group_size * self.groups
self.stride = stride
# Conv 0 (typically expansion conv)
self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME',
name='conv0')
# Grouped NxN conv
self.conv1 = which_conv(self.width, kernel_shape=kernel_size, stride=stride,
padding='SAME', feature_group_count=self.groups,
name='conv1')
# Conv 2, typically projection conv
self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME',
name='conv2')
# Use shortcut conv on channel change or downsample.
self.use_projection = stride > 1 or self.in_ch != self.out_ch
if self.use_projection:
self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1,
padding='SAME', name='conv_shortcut')
# Squeeze + Excite Module
self.se = base.SqueezeExcite(self.width, self.width, self.se_ratio)
# Are we using stochastic depth?
self._has_stochdepth = (stochdepth_rate is not None and
stochdepth_rate > 0. and stochdepth_rate < 1.0)
if self._has_stochdepth:
self.stoch_depth = base.StochDepth(stochdepth_rate)
def __call__(self, x, is_training):
out = self.activation(x) * self.beta
if self.stride > 1: # Average-pool downsample.
shortcut = hk.avg_pool(out, window_shape=(1, 2, 2, 1),
strides=(1, 2, 2, 1), padding='SAME')
if self.use_projection:
shortcut = self.conv_shortcut(shortcut)
elif self.use_projection:
shortcut = self.conv_shortcut(out)
else:
shortcut = x
out = self.conv0(out)
out = self.conv1(self.activation(out))
out = 2 * self.se(out) * out # Multiply by 2 for rescaling
out = self.conv2(self.activation(out))
# Get average residual standard deviation for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
# Apply stochdepth if applicable.
if self._has_stochdepth:
out = self.stoch_depth(out, is_training)
# SkipInit Gain
out = out * hk.get_parameter('skip_gain', (), out.dtype, init=jnp.zeros)
return out * self.alpha + shortcut, res_avg_var
def count_flops(self, h, w):
# Count conv FLOPs based on input HW
expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w)
# If block is strided we decrease resolution here.
dw_flops = base.count_conv_flops(self.width, self.conv1, h, w)
if self.stride > 1:
h, w = h / self.stride, w / self.stride
if self.use_projection:
sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w)
else:
sc_flops = 0
# SE flops happen on avg-pooled activations
se_flops = self.se.fc0.output_size * self.width
se_flops += self.se.fc0.output_size * self.se.fc1.output_size
contract_flops = base.count_conv_flops(self.width, self.conv2, h, w)
return sum([expand_flops, dw_flops, se_flops, contract_flops, sc_flops])
| deepmind-research-master | nfnets/nf_regnet.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quick script to test that experiment can import and run."""
import jax
import jax.numpy as jnp
from nfnets import experiment
from nfnets import experiment_nfnets
def test_experiment():
"""Tests the main experiment."""
config = experiment.get_config()
exp_config = config.experiment_kwargs.config
exp_config.train_batch_size = 2
exp_config.eval_batch_size = 2
exp_config.lr = 0.1
exp_config.fake_data = True
exp_config.model_kwargs.width = 2
print(exp_config.model_kwargs)
xp = experiment.Experiment('train', exp_config, jax.random.PRNGKey(0))
bcast = jax.pmap(lambda x: x)
global_step = bcast(jnp.zeros(jax.local_device_count()))
rng = bcast(jnp.stack([jax.random.PRNGKey(0)] * jax.local_device_count()))
print('Taking a single experiment step for test purposes!')
result = xp.step(global_step, rng)
print(f'Step successfully taken, resulting metrics are {result}')
def test_nfnet_experiment():
"""Tests the NFNet experiment."""
config = experiment_nfnets.get_config()
exp_config = config.experiment_kwargs.config
exp_config.train_batch_size = 2
exp_config.eval_batch_size = 2
exp_config.lr = 0.1
exp_config.fake_data = True
exp_config.model_kwargs.width = 2
print(exp_config.model_kwargs)
xp = experiment_nfnets.Experiment('train', exp_config, jax.random.PRNGKey(0))
bcast = jax.pmap(lambda x: x)
global_step = bcast(jnp.zeros(jax.local_device_count()))
rng = bcast(jnp.stack([jax.random.PRNGKey(0)] * jax.local_device_count()))
print('Taking a single NFNet experiment step for test purposes!')
result = xp.step(global_step, rng)
print(f'NFNet Step successfully taken, resulting metrics are {result}')
test_experiment()
test_nfnet_experiment()
| deepmind-research-master | nfnets/test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adaptive gradient clipping transform for Optax."""
import jax
import jax.numpy as jnp
import optax
def compute_norm(x, axis, keepdims):
"""Axis-wise euclidean norm."""
return jnp.sum(x ** 2, axis=axis, keepdims=keepdims) ** 0.5
def unitwise_norm(x):
"""Compute norms of each output unit separately, also for linear layers."""
if len(jnp.squeeze(x).shape) <= 1: # Scalars and vectors
axis = None
keepdims = False
elif len(x.shape) in [2, 3]: # Linear layers of shape IO or multihead linear
axis = 0
keepdims = True
elif len(x.shape) == 4: # Conv kernels of shape HWIO
axis = [0, 1, 2,]
keepdims = True
else:
raise ValueError(f'Got a parameter with shape not in [1, 2, 4]! {x}')
return compute_norm(x, axis, keepdims)
def my_clip(g_norm, max_norm, grad):
"""Applies my gradient clipping unit-wise."""
trigger = g_norm < max_norm
# This little max(., 1e-6) is distinct from the normal eps and just prevents
# division by zero. It technically should be impossible to engage.
clipped_grad = grad * (max_norm / jnp.maximum(g_norm, 1e-6))
return jnp.where(trigger, grad, clipped_grad)
def adaptive_grad_clip(clip, eps=1e-3) -> optax.GradientTransformation:
"""Clip updates to be at most clipping * parameter_norm.
References:
[Brock, Smith, De, Simonyan 2021] High-Performance Large-Scale Image
Recognition Without Normalization.
Args:
clip: Maximum allowed ratio of update norm to parameter norm.
eps: epsilon term to prevent clipping of zero-initialized params.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_):
return optax.ClipByGlobalNormState()
def update_fn(updates, state, params):
g_norm = jax.tree_map(unitwise_norm, updates)
p_norm = jax.tree_map(unitwise_norm, params)
# Maximum allowable norm
max_norm = jax.tree_map(lambda x: clip * jnp.maximum(x, eps), p_norm)
# If grad norm > clipping * param_norm, rescale
updates = jax.tree_map(my_clip, g_norm, max_norm, updates)
return updates, state
return optax.GradientTransformation(init_fn, update_fn)
| deepmind-research-master | nfnets/agc_optax.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet dataset with typical pre-processing and advanced augs."""
# pylint: disable=logging-format-interpolation
import enum
import itertools as it
import logging
import re
from typing import Generator, Mapping, Optional, Sequence, Text, Tuple
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
from nfnets import autoaugment
Batch = Mapping[Text, np.ndarray]
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
AUTOTUNE = tf.data.experimental.AUTOTUNE
class Split(enum.Enum):
"""Imagenet dataset split."""
TRAIN = 1
TRAIN_AND_VALID = 2
VALID = 3
TEST = 4
@classmethod
def from_string(cls, name: Text) -> 'Split':
return {'TRAIN': Split.TRAIN, 'TRAIN_AND_VALID': Split.TRAIN_AND_VALID,
'VALID': Split.VALID, 'VALIDATION': Split.VALID,
'TEST': Split.TEST}[name.upper()]
@property
def num_examples(self):
return {Split.TRAIN_AND_VALID: 1281167, Split.TRAIN: 1271167,
Split.VALID: 10000, Split.TEST: 50000}[self]
def load(
split: Split,
*,
is_training: bool,
batch_dims: Sequence[int],
name: str = 'imagenet',
dtype: jnp.dtype = jnp.float32,
transpose: bool = False,
fake_data: bool = False,
image_size: Tuple[int, int] = (224, 224),
augment_name: Optional[str] = None,
eval_preproc: str = 'crop_resize',
augment_before_mix: bool = True,
) -> Generator[Batch, None, None]:
"""Loads the given split of the dataset.
Args:
split: Dataset split to use.
is_training: If true, use training preproc and augmentation.
batch_dims: List indicating how to batch the dataset (typically expected to
be of shape (num_devices, bs_per_device)
name: Which dataset to use, (must be 'imagenet')
dtype: One of float32 or bfloat16 (bf16 may not be supported fully)
transpose: If true, employs double transpose trick.
fake_data: Return batches of fake data for debugging purposes.
image_size: Final image size returned by dataset pipeline. Note that the
exact procedure to arrive at this size will depend on the chosen preproc.
augment_name: Optional additional aug strategy (applied atop the default
of distorted bboxes and random L/R flips). Specified with a string
such as 'cutmix_mixup_0.4_randaugment_415'. See README for deets.
eval_preproc: Eval preproc method, either 'crop_resize' (crop on the long
edge then resize) or `resize_crop_{pct}`, which will resize the image to
`image_size / pct` on each side then take a center crop.
augment_before_mix: Apply augs like RA/AA before or after cutmix/mixup.
Yields:
A TFDS numpy iterator.
"""
start, end = _shard(split, jax.host_id(), jax.host_count())
if fake_data:
print('Using fake data!')
images = np.zeros(tuple(batch_dims) + image_size + (3,), dtype=dtype)
labels = np.zeros(tuple(batch_dims), dtype=np.int32)
if transpose:
axes = tuple(range(images.ndim))
axes = axes[:-4] + axes[-3:] + (axes[-4],) # NHWC -> HWCN
images = np.transpose(images, axes)
yield from it.repeat({'images': images, 'labels': labels}, end - start)
return
total_batch_size = np.prod(batch_dims)
if name.lower() == 'imagenet':
tfds_split = tfds.core.ReadInstruction(_to_tfds_split(split),
from_=start, to=end, unit='abs')
ds = tfds.load('imagenet2012:5.*.*', split=tfds_split,
decoders={'image': tfds.decode.SkipDecoding()})
else:
raise ValueError('Only imagenet is presently supported for this dataset.')
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = 48
options.experimental_threading.max_intra_op_parallelism = 1
options.experimental_optimization.map_parallelization = True
options.experimental_optimization.parallel_batch = True
if is_training:
options.experimental_deterministic = False
ds = ds.with_options(options)
if is_training:
if jax.host_count() > 1:
# Only cache if we are reading a subset of the dataset.
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=None)
else:
if split.num_examples % total_batch_size != 0:
raise ValueError(f'Test/valid must be divisible by {total_batch_size}')
def augment_normalize(batch):
"""Optionally augment, then normalize an image."""
batch = dict(**batch)
image = _augment_image(batch['images'], is_training, augment_name)
batch['images'] = _normalize_image(image)
return batch
def preprocess(example):
image = _preprocess_image(example['image'], is_training, image_size,
eval_preproc)
label = tf.cast(example['label'], tf.int32)
out = {'images': image, 'labels': label}
if augment_name is not None and 'cutmix' in augment_name:
out['mask'] = cutmix_padding(*image_size)
out['cutmix_ratio'] = tf.reduce_mean(out['mask'])
if augment_name is not None and 'mixup' in augment_name:
mixup_alpha = 0.2 # default to alpha=0.2
# If float provided, get it
if 'mixup_' in augment_name:
alpha = augment_name.split('mixup_')[1].split('_')
if any(alpha) and re.match(r'^-?\d+(?:\.\d+)?$', alpha[0]) is not None:
mixup_alpha = float(alpha[0])
beta = tfp.distributions.Beta(mixup_alpha, mixup_alpha)
out['mixup_ratio'] = beta.sample()
# Apply augs before mixing?
if augment_before_mix or augment_name is None:
out = augment_normalize(out)
return out
ds = ds.map(preprocess, num_parallel_calls=AUTOTUNE)
ds = ds.prefetch(AUTOTUNE)
def transpose_fn(batch):
# Applies the double-transpose trick for TPU.
batch = dict(**batch)
batch['images'] = tf.transpose(batch['images'], (1, 2, 3, 0))
return batch
def cast_fn(batch):
batch = dict(**batch)
batch['images'] = tf.cast(batch['images'], tf.dtypes.as_dtype(dtype))
return batch
for i, batch_size in enumerate(reversed(batch_dims)):
if i == 0:
# Deal with vectorized MixUp + CutMix ops
if augment_name is not None:
if 'mixup' in augment_name or 'cutmix' in augment_name:
ds = ds.batch(batch_size * 2)
else:
ds = ds.map(augment_normalize, num_parallel_calls=AUTOTUNE)
ds = ds.batch(batch_size)
# Apply mixup, cutmix, or mixup + cutmix
if 'mixup' in augment_name and 'cutmix' not in augment_name:
logging.info('Applying MixUp!')
ds = ds.map(my_mixup, num_parallel_calls=AUTOTUNE)
elif 'cutmix' in augment_name and 'mixup' not in augment_name:
logging.info('Applying CutMix!')
ds = ds.map(my_cutmix, num_parallel_calls=AUTOTUNE)
elif 'mixup' in augment_name and 'cutmix' in augment_name:
logging.info('Applying MixUp and CutMix!')
ds = ds.map(my_mixup_cutmix, num_parallel_calls=AUTOTUNE)
# If applying augs after mixing, unbatch, map, and rebatch
if (not augment_before_mix and
('mixup' in augment_name or 'cutmix' in augment_name)):
ds = ds.unbatch().map(augment_normalize, num_parallel_calls=AUTOTUNE)
ds = ds.batch(batch_size)
else:
ds = ds.batch(batch_size)
# Transpose and cast as needbe
if transpose:
ds = ds.map(transpose_fn) # NHWC -> HWCN
# NOTE: You may be tempted to move the casting earlier on in the pipeline,
# but for bf16 some operations will end up silently placed on the TPU and
# this causes stalls while TF and JAX battle for the accelerator.
ds = ds.map(cast_fn)
else:
ds = ds.batch(batch_size)
ds = ds.prefetch(AUTOTUNE)
ds = tfds.as_numpy(ds)
yield from ds
def cutmix_padding(h, w):
"""Returns image mask for CutMix.
Taken from (https://github.com/google/edward2/blob/master/experimental
/marginalization_mixup/data_utils.py#L367)
Args:
h: image height.
w: image width.
"""
r_x = tf.random.uniform([], 0, w, tf.int32)
r_y = tf.random.uniform([], 0, h, tf.int32)
# Beta dist in paper, but they used Beta(1,1) which is just uniform.
image1_proportion = tf.random.uniform([])
patch_length_ratio = tf.math.sqrt(1 - image1_proportion)
r_w = tf.cast(patch_length_ratio * tf.cast(w, tf.float32), tf.int32)
r_h = tf.cast(patch_length_ratio * tf.cast(h, tf.float32), tf.int32)
bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w)
bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h)
bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w)
bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h)
# Create the binary mask.
pad_left = bbx1
pad_top = bby1
pad_right = tf.maximum(w - bbx2, 0)
pad_bottom = tf.maximum(h - bby2, 0)
r_h = bby2 - bby1
r_w = bbx2 - bbx1
mask = tf.pad(
tf.ones((r_h, r_w)),
paddings=[[pad_top, pad_bottom], [pad_left, pad_right]],
mode='CONSTANT',
constant_values=0)
mask.set_shape((h, w))
return mask[..., None] # Add channel dim.
def my_cutmix(batch):
"""Cutmix."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
mask = batch['mask'][:bs]
images = (mask * batch['images'][:bs] + (1.0 - mask) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = batch['cutmix_ratio'][:bs]
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup(batch):
"""Mixup."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
ratio = batch['mixup_ratio'][:bs, None, None, None]
images = (ratio * batch['images'][:bs] + (1.0 - ratio) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = ratio[..., 0, 0, 0] # Unsqueeze
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def mixup_or_cutmix(batch):
"""Randomly applies one of cutmix or mixup to a batch."""
logging.info('Randomly applying cutmix or mixup with 50% chance!')
return tf.cond(
tf.cast(tf.random.uniform([], maxval=2, dtype=tf.int32), tf.bool),
lambda: my_mixup(batch),
lambda: my_cutmix(batch))
def my_mixup_cutmix(batch):
"""Apply mixup to half the batch, and cutmix to the other."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 4
mixup_ratio = batch['mixup_ratio'][:bs, None, None, None]
mixup_images = (mixup_ratio * batch['images'][:bs]
+ (1.0 - mixup_ratio) * batch['images'][bs:2*bs])
mixup_labels = batch['labels'][:bs]
mixup_mix_labels = batch['labels'][bs:2*bs]
cutmix_mask = batch['mask'][2*bs:3*bs]
cutmix_images = (cutmix_mask * batch['images'][2*bs:3*bs]
+ (1.0 - cutmix_mask) * batch['images'][-bs:])
cutmix_labels = batch['labels'][2*bs:3*bs]
cutmix_mix_labels = batch['labels'][-bs:]
cutmix_ratio = batch['cutmix_ratio'][2*bs : 3*bs]
return {'images': tf.concat([mixup_images, cutmix_images], axis=0),
'labels': tf.concat([mixup_labels, cutmix_labels], axis=0),
'mix_labels': tf.concat([mixup_mix_labels, cutmix_mix_labels], 0),
'ratio': tf.concat([mixup_ratio[..., 0, 0, 0], cutmix_ratio], axis=0)}
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
if split in (Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
arange = np.arange(split.num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
is_training: bool,
image_size: Sequence[int],
eval_preproc: str = 'crop_resize'
) -> tf.Tensor:
"""Returns processed and resized images."""
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
if is_training:
image = _decode_and_random_crop(image_bytes, image_size)
image = tf.image.random_flip_left_right(image)
assert image.dtype == tf.uint8
image = tf.image.resize(image, image_size, tf.image.ResizeMethod.BICUBIC)
else:
if eval_preproc == 'crop_resize':
image = _decode_and_center_crop(image_bytes, image_size=image_size)
assert image.dtype == tf.uint8
image = tf.image.resize(image, image_size, tf.image.ResizeMethod.BICUBIC)
elif 'resize_crop' in eval_preproc:
# Pass in crop percent
crop_pct = float(eval_preproc.split('_')[-1])
image = _decode_and_resize_then_crop(image_bytes, image_size=image_size,
crop_pct=crop_pct)
else:
raise ValueError(f'Unknown Eval Preproc {eval_preproc} provided!')
return image
def _augment_image(
image: tf.Tensor,
is_training: bool,
augment_name: Optional[str] = None,
) -> tf.Tensor:
"""Applies AA/RA to an image."""
if is_training and augment_name:
if 'autoaugment' in augment_name or 'randaugment' in augment_name:
input_image_type = image.dtype
image = tf.clip_by_value(image, 0.0, 255.0)
# Autoaugment requires a uint8 image; we cast here and then cast back
image = tf.cast(image, dtype=tf.uint8)
if 'autoaugment' in augment_name:
logging.info(f'Applying AutoAugment policy {augment_name}')
image = autoaugment.distort_image_with_autoaugment(image, 'v0')
elif 'randaugment' in augment_name:
magnitude = int(augment_name.split('_')[-1]) # pytype: disable=attribute-error
# Allow passing in num_layers as a magnitude > 100
if magnitude > 100:
num_layers = magnitude // 100
magnitude = magnitude - int(num_layers * 100)
else:
num_layers = 2
logging.info(f'Applying RA {num_layers} x {magnitude}')
image = autoaugment.distort_image_with_randaugment(
image, num_layers=num_layers, magnitude=magnitude)
image = tf.cast(image, dtype=input_image_type)
return image
def _normalize_image(image: tf.Tensor) -> tf.Tensor:
"""Normalize the image to zero mean and unit variance."""
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def _distorted_bounding_box_crop(
image_bytes: tf.Tensor,
*,
jpeg_shape: tf.Tensor,
bbox: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
) -> tf.Tensor:
"""Generates cropped_image using one of the bboxes randomly distorted."""
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
jpeg_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = [offset_y, offset_x, target_height, target_width]
image = crop(image_bytes, crop_window)
return image
def _decode_and_random_crop(image_bytes: tf.Tensor,
image_size: Sequence[int] = (224, 224),
jpeg_shape: Optional[tf.Tensor] = None
) -> tf.Tensor:
"""Make a random crop of chosen size."""
if jpeg_shape is None:
jpeg_shape = get_shape(image_bytes)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = _distorted_bounding_box_crop(
image_bytes,
jpeg_shape=jpeg_shape,
bbox=bbox,
min_object_covered=0.1,
aspect_ratio_range=(3 / 4, 4 / 3),
area_range=(0.08, 1.0),
max_attempts=10)
if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))):
# If the random crop failed fall back to center crop.
image = _decode_and_center_crop(image_bytes, jpeg_shape, image_size)
return image
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
image_size: Sequence[int] = (224, 224),
) -> tf.Tensor:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
jpeg_shape = get_shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
# Pad the image with at least 32px on the short edge and take a
# crop that maintains aspect ratio.
scale = tf.minimum(tf.cast(image_height, tf.float32) / (image_size[0] + 32),
tf.cast(image_width, tf.float32) / (image_size[1] + 32))
padded_center_crop_height = tf.cast(scale * image_size[0], tf.int32)
padded_center_crop_width = tf.cast(scale * image_size[1], tf.int32)
offset_height = ((image_height - padded_center_crop_height) + 1) // 2
offset_width = ((image_width - padded_center_crop_width) + 1) // 2
crop_window = [offset_height, offset_width,
padded_center_crop_height, padded_center_crop_width]
image = crop(image_bytes, crop_window)
return image
def get_shape(image_bytes):
"""Gets the image shape for jpeg bytes or a uint8 decoded image."""
if image_bytes.dtype == tf.dtypes.string:
image_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
image_shape = tf.shape(image_bytes)
return image_shape
def crop(image_bytes, crop_window):
"""Helper function to crop a jpeg or a decoded image."""
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
return image
def _decode_and_resize_then_crop(
image_bytes: tf.Tensor,
image_size: Sequence[int] = (224, 224),
crop_pct: float = 1.0,
) -> tf.Tensor:
"""Rescales an image to image_size / crop_pct, then center crops."""
image = tf.image.decode_jpeg(image_bytes, channels=3)
# Scale image to "scaled size" before taking a center crop
if crop_pct > 1.0: # If crop_pct is >1, treat it as num pad pixels (like VGG)
scale_size = tuple([int(x + crop_pct) for x in image_size])
else:
scale_size = tuple([int(float(x) / crop_pct) for x in image_size])
image = tf.image.resize(image, scale_size, tf.image.ResizeMethod.BICUBIC)
crop_height = tf.cast(image_size[0], tf.int32)
crop_width = tf.cast(image_size[1], tf.int32)
offset_height = ((scale_size[0] - crop_height) + 1) // 2
offset_width = ((scale_size[1] - crop_width) + 1) // 2
crop_window = [offset_height, offset_width, crop_height, crop_width]
image = crop(image, crop_window)
return image
| deepmind-research-master | nfnets/dataset.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Basic Jaxline ImageNet experiment."""
import importlib
import sys
from absl import flags
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import base_config
from jaxline import experiment
from jaxline import platform
from jaxline import utils as jl_utils
from ml_collections import config_dict
import numpy as np
from nfnets import dataset
from nfnets import optim
from nfnets import utils
# pylint: disable=logging-format-interpolation
FLAGS = flags.FLAGS
# We define the experiment launch config in the same file as the experiment to
# keep things self-contained in a single file, but one might consider moving the
# config and/or sweep functions to a separate file, if necessary.
def get_config():
"""Return config object for training."""
config = base_config.get_base_config()
# Experiment config.
train_batch_size = 1024 # Global batch size.
images_per_epoch = 1281167
num_epochs = 90
steps_per_epoch = images_per_epoch / train_batch_size
config.training_steps = ((images_per_epoch * num_epochs) // train_batch_size)
config.random_seed = 0
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
lr=0.1,
num_epochs=num_epochs,
label_smoothing=0.1,
model='ResNet',
image_size=224,
use_ema=False,
ema_decay=0.9999, # Quatros nuevos amigos
ema_start=0,
which_ema='tf1_ema',
augment_name=None, # 'mixup_cutmix',
augment_before_mix=True,
eval_preproc='crop_resize',
train_batch_size=train_batch_size,
eval_batch_size=50,
eval_subset='test',
num_classes=1000,
which_dataset='imagenet',
fake_data=False,
which_loss='softmax_cross_entropy', # For now, must be softmax
transpose=True, # Use the double-transpose trick?
bfloat16=False,
lr_schedule=dict(
name='WarmupCosineDecay',
kwargs=dict(
num_steps=config.training_steps,
start_val=0,
min_val=0,
warmup_steps=5 * steps_per_epoch),
),
lr_scale_by_bs=True,
optimizer=dict(
name='SGD',
kwargs={
'momentum': 0.9,
'nesterov': True,
'weight_decay': 1e-4,
},
),
model_kwargs=dict(
width=4,
which_norm='BatchNorm',
norm_kwargs=dict(
create_scale=True,
create_offset=True,
decay_rate=0.9,
), # cross_replica_axis='i'),
variant='ResNet50',
activation='relu',
drop_rate=0.0,
),
),))
# Training loop config: log and checkpoint every minute
config.log_train_data_interval = 60
config.log_tensors_interval = 60
config.save_checkpoint_interval = 60
config.eval_specific_checkpoint_dir = ''
return config
class Experiment(experiment.AbstractExperiment):
"""Imagenet experiment."""
CHECKPOINT_ATTRS = {
'_params': 'params',
'_state': 'state',
'_ema_params': 'ema_params',
'_ema_state': 'ema_state',
'_opt_state': 'opt_state',
}
def __init__(self, mode, config, init_rng):
super().__init__(mode=mode)
self.mode = mode
self.config = config
self.init_rng = init_rng
# Checkpointed experiment state.
self._params = None
self._state = None
self._ema_params = None
self._ema_state = None
self._opt_state = None
# Input pipelines.
self._train_input = None
self._eval_input = None
# Get model, loaded in from the zoo
module_prefix = 'nfnets.'
self.model_module = importlib.import_module(
(module_prefix + self.config.model.lower()))
self.net = hk.transform_with_state(self._forward_fn)
# Assign image sizes
if self.config.get('override_imsize', False):
self.train_imsize = self.config.image_size
self.test_imsize = self.config.get('eval_image_size', self.train_imsize)
else:
variant_dict = getattr(self.model_module, self.config.model).variant_dict
variant_dict = variant_dict[self.config.model_kwargs.variant]
self.train_imsize = variant_dict.get('train_imsize',
self.config.image_size)
# Test imsize defaults to model-specific value, then to config imsize
test_imsize = self.config.get('eval_image_size', self.config.image_size)
self.test_imsize = variant_dict.get('test_imsize', test_imsize)
donate_argnums = (0, 1, 2, 6, 7) if self.config.use_ema else (0, 1, 2)
self.train_fn = jax.pmap(
self._train_fn, axis_name='i', donate_argnums=donate_argnums)
self.eval_fn = jax.pmap(self._eval_fn, axis_name='i')
def _initialize_train(self):
self._train_input = self._build_train_input()
# Initialize net and EMA copy of net if no params available.
if self._params is None:
inputs = next(self._train_input)
init_net = jax.pmap(
lambda *a: self.net.init(*a, is_training=True), axis_name='i')
init_rng = jl_utils.bcast_local_devices(self.init_rng)
self._params, self._state = init_net(init_rng, inputs)
if self.config.use_ema:
self._ema_params, self._ema_state = init_net(init_rng, inputs)
num_params = hk.data_structures.tree_size(self._params)
logging.info(f'Net parameters: {num_params / jax.local_device_count()}')
self._make_opt()
def _make_opt(self):
# Separate conv params and gains/biases
def pred(mod, name, val): # pylint:disable=unused-argument
return (name in ['scale', 'offset', 'b'] or 'gain' in name or
'bias' in name)
gains_biases, weights = hk.data_structures.partition(pred, self._params)
# Lr schedule with batch-based LR scaling
if self.config.lr_scale_by_bs:
max_lr = (self.config.lr * self.config.train_batch_size) / 256
else:
max_lr = self.config.lr
lr_sched_fn = getattr(optim, self.config.lr_schedule.name)
lr_schedule = lr_sched_fn(max_val=max_lr, **self.config.lr_schedule.kwargs)
# Optimizer; no need to broadcast!
opt_kwargs = {key: val for key, val in self.config.optimizer.kwargs.items()}
opt_kwargs['lr'] = lr_schedule
opt_module = getattr(optim, self.config.optimizer.name)
self.opt = opt_module([{
'params': gains_biases,
'weight_decay': None
}, {
'params': weights
}], **opt_kwargs)
if self._opt_state is None:
self._opt_state = self.opt.states()
else:
self.opt.plugin(self._opt_state)
def _forward_fn(self, inputs, is_training):
net_kwargs = {
'num_classes': self.config.num_classes,
**self.config.model_kwargs
}
net = getattr(self.model_module, self.config.model)(**net_kwargs)
if self.config.get('transpose', False):
images = jnp.transpose(inputs['images'], (3, 0, 1, 2)) # HWCN -> NHWC
else:
images = inputs['images']
if self.config.bfloat16 and self.mode == 'train':
images = utils.to_bf16(images)
return net(images, is_training=is_training)['logits']
def _one_hot(self, value):
"""One-hot encoding potentially over a sequence of labels."""
y = jax.nn.one_hot(value, self.config.num_classes)
return y
def _loss_fn(self, params, state, inputs, rng):
logits, state = self.net.apply(params, state, rng, inputs, is_training=True)
y = self._one_hot(inputs['labels'])
if 'mix_labels' in inputs: # Handle cutmix/mixup label mixing
logging.info('Using mixup or cutmix!')
y1 = self._one_hot(inputs['mix_labels'])
y = inputs['ratio'][:, None] * y + (1. - inputs['ratio'][:, None]) * y1
if self.config.label_smoothing > 0: # get smoothy
spositives = 1. - self.config.label_smoothing
snegatives = self.config.label_smoothing / self.config.num_classes
y = spositives * y + snegatives
if self.config.bfloat16: # Cast logits to float32
logits = logits.astype(jnp.float32)
which_loss = getattr(utils, self.config.which_loss)
loss = which_loss(logits, y, reduction='mean')
metrics = utils.topk_correct(logits, inputs['labels'], prefix='train_')
# Average top-1 and top-5 correct labels
metrics = jax.tree_map(jnp.mean, metrics)
metrics['train_loss'] = loss # Metrics will be pmeaned so don't divide here
scaled_loss = loss / jax.device_count() # Grads get psummed so do divide
return scaled_loss, (metrics, state)
def _train_fn(self, params, states, opt_states, inputs, rng, global_step,
ema_params, ema_states):
"""Runs one batch forward + backward and run a single opt step."""
grad_fn = jax.grad(self._loss_fn, argnums=0, has_aux=True)
if self.config.bfloat16:
in_params, states = jax.tree_map(utils.to_bf16, (params, states))
else:
in_params = params
grads, (metrics, states) = grad_fn(in_params, states, inputs, rng)
if self.config.bfloat16:
states, metrics, grads = jax.tree_map(utils.from_bf16,
(states, metrics, grads))
# Sum gradients and average losses for pmap
grads = jax.lax.psum(grads, 'i')
metrics = jax.lax.pmean(metrics, 'i')
# Compute updates and update parameters
metrics['learning_rate'] = self.opt._hyperparameters['lr'](global_step) # pylint: disable=protected-access
params, opt_states = self.opt.step(params, grads, opt_states, global_step)
if ema_params is not None:
ema_fn = getattr(utils, self.config.get('which_ema', 'tf1_ema'))
ema = lambda x, y: ema_fn(x, y, self.config.ema_decay, global_step)
ema_params = jax.tree_map(ema, ema_params, params)
ema_states = jax.tree_map(ema, ema_states, states)
return {
'params': params,
'states': states,
'opt_states': opt_states,
'ema_params': ema_params,
'ema_states': ema_states,
'metrics': metrics
}
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, global_step, rng, *unused_args, **unused_kwargs):
if self._train_input is None:
self._initialize_train()
inputs = next(self._train_input)
out = self.train_fn(
params=self._params,
states=self._state,
opt_states=self._opt_state,
inputs=inputs,
rng=rng,
global_step=global_step,
ema_params=self._ema_params,
ema_states=self._ema_state)
self._params, self._state = out['params'], out['states']
self._opt_state = out['opt_states']
self._ema_params, self._ema_state = out['ema_params'], out['ema_states']
self.opt.plugin(self._opt_state)
return jl_utils.get_first(out['metrics'])
def _build_train_input(self):
num_devices = jax.device_count()
global_batch_size = self.config.train_batch_size
bs_per_device, ragged = divmod(global_batch_size, num_devices)
if ragged:
raise ValueError(
f'Global batch size {global_batch_size} must be divisible by '
f'num devices {num_devices}')
return dataset.load(
dataset.Split.TRAIN_AND_VALID,
is_training=True,
batch_dims=[jax.local_device_count(), bs_per_device],
transpose=self.config.get('transpose', False),
image_size=(self.train_imsize,) * 2,
augment_name=self.config.augment_name,
augment_before_mix=self.config.get('augment_before_mix', True),
name=self.config.which_dataset,
fake_data=self.config.get('fake_data', False))
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, **unused_args):
metrics = self._eval_epoch(self._params, self._state)
if self.config.use_ema:
ema_metrics = self._eval_epoch(self._ema_params, self._ema_state)
metrics.update({f'ema_{key}': val for key, val in ema_metrics.items()})
logging.info(f'[Step {global_step}] Eval scalars: {metrics}')
return metrics
def _eval_epoch(self, params, state):
"""Evaluates an epoch."""
num_samples = 0.
summed_metrics = None
for inputs in self._build_eval_input():
num_samples += np.prod(inputs['labels'].shape[:2]) # Account for pmaps
metrics = self.eval_fn(params, state, inputs)
# Accumulate the sum of metrics for each step.
metrics = jax.tree_map(lambda x: jnp.sum(x[0], axis=0), metrics)
if summed_metrics is None:
summed_metrics = metrics
else:
summed_metrics = jax.tree_map(jnp.add, summed_metrics, metrics)
mean_metrics = jax.tree_map(lambda x: x / num_samples, summed_metrics)
return jax.device_get(mean_metrics)
def _eval_fn(self, params, state, inputs):
"""Evaluate a single batch and return loss and top-k acc."""
logits, _ = self.net.apply(params, state, None, inputs, is_training=False)
y = self._one_hot(inputs['labels'])
which_loss = getattr(utils, self.config.which_loss)
loss = which_loss(logits, y, reduction=None)
metrics = utils.topk_correct(logits, inputs['labels'], prefix='eval_')
metrics['eval_loss'] = loss
return jax.lax.psum(metrics, 'i')
def _build_eval_input(self):
"""Builds the evaluation input pipeline."""
bs_per_device = (self.config.eval_batch_size // jax.local_device_count())
split = dataset.Split.from_string(self.config.eval_subset)
eval_preproc = self.config.get('eval_preproc', 'crop_resize')
return dataset.load(
split,
is_training=False,
batch_dims=[jax.local_device_count(), bs_per_device],
transpose=self.config.get('transpose', False),
image_size=(self.test_imsize,) * 2,
name=self.config.which_dataset,
eval_preproc=eval_preproc,
fake_data=self.config.get('fake_data', False))
if __name__ == '__main__':
flags.mark_flag_as_required('config')
platform.main(Experiment, sys.argv[1:])
| deepmind-research-master | nfnets/experiment.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""ImageNet experiment with NFNets."""
import sys
from absl import flags
import haiku as hk
from jaxline import platform
from ml_collections import config_dict
from nfnets import experiment
from nfnets import optim
FLAGS = flags.FLAGS
def get_config():
"""Return config object for training."""
config = experiment.get_config()
# Experiment config.
train_batch_size = 4096 # Global batch size.
images_per_epoch = 1281167
num_epochs = 360
steps_per_epoch = images_per_epoch / train_batch_size
config.training_steps = ((images_per_epoch * num_epochs) // train_batch_size)
config.random_seed = 0
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
lr=0.1,
num_epochs=num_epochs,
label_smoothing=0.1,
model='NFNet',
image_size=224,
use_ema=True,
ema_decay=0.99999,
ema_start=0,
augment_name=None,
augment_before_mix=False,
eval_preproc='resize_crop_32',
train_batch_size=train_batch_size,
eval_batch_size=50,
eval_subset='test',
num_classes=1000,
which_dataset='imagenet',
which_loss='softmax_cross_entropy', # One of softmax or sigmoid
bfloat16=True,
lr_schedule=dict(
name='WarmupCosineDecay',
kwargs=dict(num_steps=config.training_steps,
start_val=0,
min_val=0.0,
warmup_steps=5*steps_per_epoch),
),
lr_scale_by_bs=True,
optimizer=dict(
name='SGD_AGC',
kwargs={'momentum': 0.9, 'nesterov': True,
'weight_decay': 2e-5,
'clipping': 0.01, 'eps': 1e-3},
),
model_kwargs=dict(
variant='F0',
width=1.0,
se_ratio=0.5,
alpha=0.2,
stochdepth_rate=0.25,
drop_rate=None, # Use native drop-rate
activation='gelu',
final_conv_mult=2,
final_conv_ch=None,
use_two_convs=True,
),
)))
# Unlike NF-RegNets, use the same weight decay for all, but vary RA levels
variant = config.experiment_kwargs.config.model_kwargs.variant
# RandAugment levels (e.g. 405 = 4 layers, magnitude 5, 205 = 2 layers, mag 5)
augment = {'F0': '405', 'F1': '410', 'F2': '410', 'F3': '415',
'F4': '415', 'F5': '415', 'F6': '415', 'F7': '415'}[variant]
aug_base_name = 'cutmix_mixup_randaugment'
config.experiment_kwargs.config.augment_name = f'{aug_base_name}_{augment}'
return config
class Experiment(experiment.Experiment):
"""Experiment with correct parameter filtering for applying AGC."""
def _make_opt(self):
# Separate conv params and gains/biases
def pred_gb(mod, name, val):
del mod, val
return (name in ['scale', 'offset', 'b']
or 'gain' in name or 'bias' in name)
gains_biases, weights = hk.data_structures.partition(pred_gb, self._params)
def pred_fc(mod, name, val):
del name, val
return 'linear' in mod and 'squeeze_excite' not in mod
fc_weights, weights = hk.data_structures.partition(pred_fc, weights)
# Lr schedule with batch-based LR scaling
if self.config.lr_scale_by_bs:
max_lr = (self.config.lr * self.config.train_batch_size) / 256
else:
max_lr = self.config.lr
lr_sched_fn = getattr(optim, self.config.lr_schedule.name)
lr_schedule = lr_sched_fn(max_val=max_lr, **self.config.lr_schedule.kwargs)
# Optimizer; no need to broadcast!
opt_kwargs = {key: val for key, val in self.config.optimizer.kwargs.items()}
opt_kwargs['lr'] = lr_schedule
opt_module = getattr(optim, self.config.optimizer.name)
self.opt = opt_module([{'params': gains_biases, 'weight_decay': None,},
{'params': fc_weights, 'clipping': None},
{'params': weights}], **opt_kwargs)
if self._opt_state is None:
self._opt_state = self.opt.states()
else:
self.opt.plugin(self._opt_state)
if __name__ == '__main__':
flags.mark_flag_as_required('config')
platform.main(Experiment, sys.argv[1:])
| deepmind-research-master | nfnets/experiment_nfnets.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet model family."""
import functools
import haiku as hk
import jax
import jax.numpy as jnp
from nfnets import base
class ResNet(hk.Module):
"""ResNetv2 Models."""
variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]},
'ResNet101': {'depth': [3, 4, 23, 3]},
'ResNet152': {'depth': [3, 8, 36, 3]},
'ResNet200': {'depth': [3, 24, 36, 3]},
'ResNet288': {'depth': [24, 24, 24, 24]},
'ResNet600': {'depth': [50, 50, 50, 50]},
}
def __init__(self, width, num_classes,
variant='ResNet50',
which_norm='BatchNorm', norm_kwargs=None,
activation='relu', drop_rate=0.0,
fc_init=jnp.zeros, conv_kwargs=None,
preactivation=True, use_se=False, se_ratio=0.25,
name='ResNet'):
super().__init__(name=name)
self.width = width
self.num_classes = num_classes
self.variant = variant
self.depth_pattern = self.variant_dict[variant]['depth']
self.activation = getattr(jax.nn, activation)
self.drop_rate = drop_rate
self.which_norm = getattr(hk, which_norm)
if norm_kwargs is not None:
self.which_norm = functools.partial(self.which_norm, **norm_kwargs)
if conv_kwargs is not None:
self.which_conv = functools.partial(hk.Conv2D, **conv_kwargs)
else:
self.which_conv = hk.Conv2D
self.preactivation = preactivation
# Stem
self.initial_conv = self.which_conv(16 * self.width, kernel_shape=7,
stride=2, padding='SAME',
with_bias=False, name='initial_conv')
if not self.preactivation:
self.initial_bn = self.which_norm(name='initial_bn')
which_block = ResBlockV2 if self.preactivation else ResBlockV1
# Body
self.blocks = []
for multiplier, blocks_per_stage, stride in zip([64, 128, 256, 512],
self.depth_pattern,
[1, 2, 2, 2]):
for block_index in range(blocks_per_stage):
self.blocks += [which_block(multiplier * self.width,
use_projection=block_index == 0,
stride=stride if block_index == 0 else 1,
activation=self.activation,
which_norm=self.which_norm,
which_conv=self.which_conv,
use_se=use_se,
se_ratio=se_ratio)]
# Head
self.final_bn = self.which_norm(name='final_bn')
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x, is_training, test_local_stats=False,
return_metrics=False):
"""Return the output of the final layer without any [log-]softmax."""
outputs = {}
# Stem
out = self.initial_conv(x)
if not self.preactivation:
out = self.activation(self.initial_bn(out, is_training, test_local_stats))
out = hk.max_pool(out, window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1), padding='SAME')
if return_metrics:
outputs.update(base.signal_metrics(out, 0))
# Blocks
for i, block in enumerate(self.blocks):
out, res_var = block(out, is_training, test_local_stats)
if return_metrics:
outputs.update(base.signal_metrics(out, i + 1))
outputs[f'res_avg_var_{i}'] = res_var
if self.preactivation:
out = self.activation(self.final_bn(out, is_training, test_local_stats))
# Pool, dropout, classify
pool = jnp.mean(out, axis=[1, 2])
# Return pool before dropout in case we want to regularize it separately.
outputs['pool'] = pool
# Optionally apply dropout
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
outputs['logits'] = self.fc(pool)
return outputs
class ResBlockV2(hk.Module):
"""ResNet preac block, 1x1->3x3->1x1 with strides and shortcut downsample."""
def __init__(self, out_ch, stride=1, use_projection=False,
activation=jax.nn.relu, which_norm=hk.BatchNorm,
which_conv=hk.Conv2D, use_se=False, se_ratio=0.25,
name=None):
super().__init__(name=name)
self.out_ch = out_ch
self.stride = stride
self.use_projection = use_projection
self.activation = activation
self.which_norm = which_norm
self.which_conv = which_conv
self.use_se = use_se
self.se_ratio = se_ratio
self.width = self.out_ch // 4
self.bn0 = which_norm(name='bn0')
self.conv0 = which_conv(self.width, kernel_shape=1, with_bias=False,
padding='SAME', name='conv0')
self.bn1 = which_norm(name='bn1')
self.conv1 = which_conv(self.width, stride=self.stride,
kernel_shape=3, with_bias=False,
padding='SAME', name='conv1')
self.bn2 = which_norm(name='bn2')
self.conv2 = which_conv(self.out_ch, kernel_shape=1, with_bias=False,
padding='SAME', name='conv2')
if self.use_projection:
self.conv_shortcut = which_conv(self.out_ch, stride=stride,
kernel_shape=1, with_bias=False,
padding='SAME', name='conv_shortcut')
if self.use_se:
self.se = base.SqueezeExcite(self.out_ch, self.out_ch, self.se_ratio)
def __call__(self, x, is_training, test_local_stats):
bn_args = (is_training, test_local_stats)
out = self.activation(self.bn0(x, *bn_args))
if self.use_projection:
shortcut = self.conv_shortcut(out)
else:
shortcut = x
out = self.conv0(out)
out = self.conv1(self.activation(self.bn1(out, *bn_args)))
out = self.conv2(self.activation(self.bn2(out, *bn_args)))
if self.use_se:
out = self.se(out) * out
# Get average residual standard deviation for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
return out + shortcut, res_avg_var
class ResBlockV1(ResBlockV2):
"""Post-Ac Residual Block."""
def __call__(self, x, is_training, test_local_stats):
bn_args = (is_training, test_local_stats)
if self.use_projection:
shortcut = self.conv_shortcut(x)
shortcut = self.which_norm(name='shortcut_bn')(shortcut, *bn_args)
else:
shortcut = x
out = self.activation(self.bn0(self.conv0(x), *bn_args))
out = self.activation(self.bn1(self.conv1(out), *bn_args))
out = self.bn2(self.conv2(out), *bn_args)
if self.use_se:
out = self.se(out) * out
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
return self.activation(out + shortcut), res_avg_var
| deepmind-research-master | nfnets/resnet.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils."""
import dill
import jax
import jax.numpy as jnp
import tree
def reduce_fn(x, mode):
"""Reduce fn for various losses."""
if mode == 'none' or mode is None:
return jnp.asarray(x)
elif mode == 'sum':
return jnp.sum(x)
elif mode == 'mean':
return jnp.mean(x)
else:
raise ValueError('Unsupported reduction option.')
def softmax_cross_entropy(logits, labels, reduction='sum'):
"""Computes softmax cross entropy given logits and one-hot class labels.
Args:
logits: Logit output values.
labels: Ground truth one-hot-encoded labels.
reduction: Type of reduction to apply to loss.
Returns:
Loss value. If `reduction` is `none`, this has the same shape as `labels`;
otherwise, it is scalar.
Raises:
ValueError: If the type of `reduction` is unsupported.
"""
loss = -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
return reduce_fn(loss, reduction)
def topk_correct(logits, labels, mask=None, prefix='', topk=(1, 5)):
"""Calculate top-k error for multiple k values."""
metrics = {}
argsorted_logits = jnp.argsort(logits)
for k in topk:
pred_labels = argsorted_logits[..., -k:]
# Get the number of examples where the label is in the top-k predictions
correct = any_in(pred_labels, labels).any(axis=-1).astype(jnp.float32)
if mask is not None:
correct *= mask
metrics[f'{prefix}top_{k}_acc'] = correct
return metrics
@jax.vmap
def any_in(prediction, target):
"""For each row in a and b, checks if any element of a is in b."""
return jnp.isin(prediction, target)
def tf1_ema(ema_value, current_value, decay, step):
"""Implements EMA with TF1-style decay warmup."""
decay = jnp.minimum(decay, (1.0 + step) / (10.0 + step))
return ema_value * decay + current_value * (1 - decay)
def ema(ema_value, current_value, decay, step):
"""Implements EMA without any warmup."""
del step
return ema_value * decay + current_value * (1 - decay)
to_bf16 = lambda x: x.astype(jnp.bfloat16) if x.dtype == jnp.float32 else x
from_bf16 = lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x
def _replicate(x, devices=None):
"""Replicate an object on each device."""
x = jax.numpy.array(x)
if devices is None:
devices = jax.local_devices()
return jax.device_put_sharded(len(devices) * [x], devices)
def broadcast(obj):
"""Broadcasts an object to all devices."""
if obj is not None and not isinstance(obj, bool):
return _replicate(obj)
else:
return obj
def split_tree(tuple_tree, base_tree, n):
"""Splits tuple_tree with n-tuple leaves into n trees."""
return [tree.map_structure_up_to(base_tree, lambda x: x[i], tuple_tree) # pylint: disable=cell-var-from-loop
for i in range(n)]
def load_haiku_file(filename):
"""Loads a haiku parameter tree, using dill."""
with open(filename, 'rb') as in_file:
output = dill.load(in_file)
return output
def flatten_haiku_tree(haiku_dict):
"""Flattens a haiku parameter tree into a flat dictionary."""
out = {}
for module in haiku_dict.keys():
out_module = module.replace('/~/', '.').replace('/', '.')
for key in haiku_dict[module]:
out_key = f'{out_module}.{key}'
out[out_key] = haiku_dict[module][key]
return out
| deepmind-research-master | nfnets/utils.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment and RandAugment policies for enhanced image preprocessing.
AutoAugment Reference: https://arxiv.org/abs/1805.09501
RandAugment Reference: https://arxiv.org/abs/1909.13719
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import math
from ml_collections import config_dict
import tensorflow.compat.v1 as tf
from tensorflow_addons import image as contrib_image
# pylint: disable=deprecated-method
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract the pixel from 255.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = contrib_image.rotate(wrap(image), radians)
return unwrap(image, replace)
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
image = contrib_image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
image = contrib_image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = contrib_image.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = contrib_image.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.to_float(tf.reduce_min(image))
hi = tf.to_float(tf.reduce_max(image))
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.to_float(im) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
with tf.device('/cpu:0'):
# Some augmentation that uses depth-wise conv will cause crashing when
# training on GPU. See (b/156242594) for details.
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', rate=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def invert(image):
"""Inverts the image pixels."""
image = tf.convert_to_tensor(image)
return 255 - image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Cutout': cutout,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random_uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def level_to_arg(hparams):
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Invert': lambda level: (),
'Rotate': _rotate_level_to_arg,
'Posterize': lambda level: (int((level/_MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level/_MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level/_MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'ShearX': _shear_level_to_arg,
'ShearY': _shear_level_to_arg,
'Cutout': lambda level: (int((level/_MAX_LEVEL) * hparams.cutout_const),),
# pylint:disable=g-long-lambda
'TranslateX': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
'TranslateY': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
# pylint:enable=g-long-lambda
}
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getargspec(func)[0]:
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
# pytype:disable=wrong-arg-types
if 'replace' in inspect.getargspec(func)[0]:
# Make sure replace is the final argument
assert 'replace' == inspect.getargspec(func)[0][-1]
args = tuple(list(args) + [replace_value])
# pytype:enable=wrong-arg-types
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getargspec(func)[0]:
prob = 1.0
# pytype:enable=wrong-arg-types
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image = tf.cond(
should_apply_op,
lambda: func(image, *args),
lambda: image)
return augmented_image
def select_and_apply_random_policy(policies, image):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image),
lambda: image)
return image
def build_and_apply_nas_policy(policies, image,
augmentation_hparams):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
augmentation_hparams: Hparams associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function.
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_):
for func, prob, args in tf_policy_:
image_ = _apply_func_with_prob(
func, image_, args, prob)
return image_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_image = select_and_apply_random_policy(
tf_policies, image)
return augmented_image
def distort_image_with_autoaugment(image, augmentation_name):
"""Applies the AutoAugment policy to `image`.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
Returns:
A tuple containing the augmented versions of `image`.
"""
available_policies = {'v0': policy_v0,
'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
policy = available_policies[augmentation_name]()
# Hparams that will be used for AutoAugment.
augmentation_hparams = config_dict.ConfigDict(dict(
cutout_const=100, translate_const=250))
return build_and_apply_nas_policy(policy, image, augmentation_hparams)
def distort_image_with_randaugment(image, num_layers, magnitude):
"""Applies the RandAugment policy to `image`.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 30].
Returns:
The augmented version of `image`.
"""
replace_value = [128] * 3
tf.logging.info('Using RandAug.')
augmentation_hparams = config_dict.ConfigDict(dict(
cutout_const=40, translate_const=100))
available_ops = [
'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize',
'Solarize', 'Color', 'Contrast', 'Brightness', 'Sharpness',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Cutout', 'SolarizeAdd']
for layer_num in range(num_layers):
op_to_select = tf.random_uniform(
[], maxval=len(available_ops), dtype=tf.int32)
random_magnitude = float(magnitude)
with tf.name_scope('randaug_layer_{}'.format(layer_num)):
for (i, op_name) in enumerate(available_ops):
prob = tf.random_uniform([], minval=0.2, maxval=0.8, dtype=tf.float32)
func, _, args = _parse_policy_info(op_name, prob, random_magnitude,
replace_value, augmentation_hparams)
image = tf.cond(
tf.equal(i, op_to_select),
# pylint:disable=g-long-lambda
lambda selected_func=func, selected_args=args: selected_func(
image, *selected_args),
# pylint:enable=g-long-lambda
lambda: image)
return image
| deepmind-research-master | nfnets/autoaugment.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.