python_code
stringlengths
0
229k
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the GNU General Public License version 3. from ...util.model import BenchmarkModel from .build_sam import sam_model_registry from .predictor import SamPredictor from PIL import Image import numpy as np import cv2 from torchbenchmark.tasks import COMPUTER_VISION import torch import os class Model(BenchmarkModel): task = COMPUTER_VISION.SEGMENTATION DEFAULT_EVAL_BSIZE = 32 def __init__(self, test, device, batch_size=1, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) # Checkpoint options are here https://github.com/facebookresearch/segment-anything#model-checkpoints data_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), '.data') sam_checkpoint = os.path.join(data_folder, 'sam_vit_h_4b8939.pth') model_type = "vit_h" self.model = sam_model_registry[model_type](checkpoint=sam_checkpoint) self.model.to(device=device) data_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), '.data') image_path = os.path.join(data_folder, 'truck.jpg') self.image = cv2.imread(image_path) self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) self.sample_image = torch.randn((3, 256, 256)).to(device) def get_module(self): example_input = [ { 'image': self.sample_image, 'original_size': (256, 256), } ] multimask_output = False return self.model, (example_input, multimask_output) def train(self): error_msg = """ As of May 17, 2023 Some base VIT checkpoints are available for SAM but getting the dataset requires a research license. It's easy to make up a training loop on random data and if that's interesting please let @msaroufim know https://github.com/facebookresearch/segment-anything#dataset """ return NotImplementedError(error_msg) def eval(self): # To test for bfloat16 uncomment the below line # predictor = SamPredictor(self.model.to(dtype=torch.bfloat16)) predictor = SamPredictor(self.model) predictor.set_image(self.image) input_point = np.array([[500, 375]]) input_label = np.array([1]) masks, scores, logits = predictor.predict( point_coords=input_point, point_labels=input_label, multimask_output=True) return (masks,)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn from typing import Type class MLPBlock(nn.Module): def __init__( self, embedding_dim: int, mlp_dim: int, act: Type[nn.Module] = nn.GELU, ) -> None: super().__init__() self.lin1 = nn.Linear(embedding_dim, mlp_dim) self.lin2 = nn.Linear(mlp_dim, embedding_dim) self.act = act() def forward(self, x: torch.Tensor) -> torch.Tensor: return self.lin2(self.act(self.lin1(x))) # From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa # Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa class LayerNorm2d(nn.Module): def __init__(self, num_channels: int, eps: float = 1e-6) -> None: super().__init__() self.weight = nn.Parameter(torch.ones(num_channels)) self.bias = nn.Parameter(torch.zeros(num_channels)) self.eps = eps def forward(self, x: torch.Tensor) -> torch.Tensor: u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch from torch import Tensor, nn import math from typing import Tuple, Type from .common import MLPBlock class TwoWayTransformer(nn.Module): def __init__( self, depth: int, embedding_dim: int, num_heads: int, mlp_dim: int, activation: Type[nn.Module] = nn.ReLU, attention_downsample_rate: int = 2, ) -> None: """ A transformer decoder that attends to an input image using queries whose positional embedding is supplied. Args: depth (int): number of layers in the transformer embedding_dim (int): the channel dimension for the input embeddings num_heads (int): the number of heads for multihead attention. Must divide embedding_dim mlp_dim (int): the channel dimension internal to the MLP block activation (nn.Module): the activation to use in the MLP block """ super().__init__() self.depth = depth self.embedding_dim = embedding_dim self.num_heads = num_heads self.mlp_dim = mlp_dim self.layers = nn.ModuleList() for i in range(depth): self.layers.append( TwoWayAttentionBlock( embedding_dim=embedding_dim, num_heads=num_heads, mlp_dim=mlp_dim, activation=activation, attention_downsample_rate=attention_downsample_rate, skip_first_layer_pe=(i == 0), ) ) self.final_attn_token_to_image = Attention( embedding_dim, num_heads, downsample_rate=attention_downsample_rate ) self.norm_final_attn = nn.LayerNorm(embedding_dim) def forward( self, image_embedding: Tensor, image_pe: Tensor, point_embedding: Tensor, ) -> Tuple[Tensor, Tensor]: """ Args: image_embedding (torch.Tensor): image to attend to. Should be shape B x embedding_dim x h x w for any h and w. image_pe (torch.Tensor): the positional encoding to add to the image. Must have the same shape as image_embedding. point_embedding (torch.Tensor): the embedding to add to the query points. Must have shape B x N_points x embedding_dim for any N_points. Returns: torch.Tensor: the processed point_embedding torch.Tensor: the processed image_embedding """ # BxCxHxW -> BxHWxC == B x N_image_tokens x C bs, c, h, w = image_embedding.shape image_embedding = image_embedding.flatten(2).permute(0, 2, 1) image_pe = image_pe.flatten(2).permute(0, 2, 1) # Prepare queries queries = point_embedding keys = image_embedding # Apply transformer blocks and final layernorm for layer in self.layers: queries, keys = layer( queries=queries, keys=keys, query_pe=point_embedding, key_pe=image_pe, ) # Apply the final attention layer from the points to the image q = queries + point_embedding k = keys + image_pe attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) queries = queries + attn_out queries = self.norm_final_attn(queries) return queries, keys class TwoWayAttentionBlock(nn.Module): def __init__( self, embedding_dim: int, num_heads: int, mlp_dim: int = 2048, activation: Type[nn.Module] = nn.ReLU, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False, ) -> None: """ A transformer block with four layers: (1) self-attention of sparse inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp block on sparse inputs, and (4) cross attention of dense inputs to sparse inputs. Arguments: embedding_dim (int): the channel dimension of the embeddings num_heads (int): the number of heads in the attention layers mlp_dim (int): the hidden dimension of the mlp block activation (nn.Module): the activation of the mlp block skip_first_layer_pe (bool): skip the PE on the first layer """ super().__init__() self.self_attn = Attention(embedding_dim, num_heads) self.norm1 = nn.LayerNorm(embedding_dim) self.cross_attn_token_to_image = Attention( embedding_dim, num_heads, downsample_rate=attention_downsample_rate ) self.norm2 = nn.LayerNorm(embedding_dim) self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) self.norm3 = nn.LayerNorm(embedding_dim) self.norm4 = nn.LayerNorm(embedding_dim) self.cross_attn_image_to_token = Attention( embedding_dim, num_heads, downsample_rate=attention_downsample_rate ) self.skip_first_layer_pe = skip_first_layer_pe def forward( self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor ) -> Tuple[Tensor, Tensor]: # Self attention block if self.skip_first_layer_pe: queries = self.self_attn(q=queries, k=queries, v=queries) else: q = queries + query_pe attn_out = self.self_attn(q=q, k=q, v=queries) queries = queries + attn_out queries = self.norm1(queries) # Cross attention block, tokens attending to image embedding q = queries + query_pe k = keys + key_pe attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) queries = queries + attn_out queries = self.norm2(queries) # MLP block mlp_out = self.mlp(queries) queries = queries + mlp_out queries = self.norm3(queries) # Cross attention block, image embedding attending to tokens q = queries + query_pe k = keys + key_pe attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) keys = keys + attn_out keys = self.norm4(keys) return queries, keys class Attention(nn.Module): """ An attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and values. """ def __init__( self, embedding_dim: int, num_heads: int, downsample_rate: int = 1, ) -> None: super().__init__() self.embedding_dim = embedding_dim self.internal_dim = embedding_dim // downsample_rate self.num_heads = num_heads assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim." self.q_proj = nn.Linear(embedding_dim, self.internal_dim) self.k_proj = nn.Linear(embedding_dim, self.internal_dim) self.v_proj = nn.Linear(embedding_dim, self.internal_dim) self.out_proj = nn.Linear(self.internal_dim, embedding_dim) def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: b, n, c = x.shape x = x.reshape(b, n, num_heads, c // num_heads) return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head def _recombine_heads(self, x: Tensor) -> Tensor: b, n_heads, n_tokens, c_per_head = x.shape x = x.transpose(1, 2) return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: # Input projections q = self.q_proj(q) k = self.k_proj(k) v = self.v_proj(v) # Separate into heads q = self._separate_heads(q, self.num_heads) k = self._separate_heads(k, self.num_heads) v = self._separate_heads(v, self.num_heads) # Attention _, _, _, c_per_head = q.shape attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens attn = attn / math.sqrt(c_per_head) attn = torch.softmax(attn, dim=-1) # Get output out = attn @ v out = self._recombine_heads(out) out = self.out_proj(out) return out
import os import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) def download_checkpoint(): subprocess.check_call(['wget', '-P', '.data', 'https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth']) def download_data(): subprocess.check_call(['wget', '-P', '.data', 'https://github.com/facebookresearch/segment-anything/raw/main/notebooks/images/truck.jpg']) if __name__ == '__main__': pip_install_requirements() # Create .data folder in the script's directory data_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), '.data') os.makedirs(data_folder, exist_ok=True) # Download checkpoint and data files to the .data folder download_checkpoint() download_data()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple, Type from .common import LayerNorm2d, MLPBlock # This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa class ImageEncoderViT(nn.Module): def __init__( self, img_size: int = 1024, patch_size: int = 16, in_chans: int = 3, embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4.0, out_chans: int = 256, qkv_bias: bool = True, norm_layer: Type[nn.Module] = nn.LayerNorm, act_layer: Type[nn.Module] = nn.GELU, use_abs_pos: bool = True, use_rel_pos: bool = False, rel_pos_zero_init: bool = True, window_size: int = 0, global_attn_indexes: Tuple[int, ...] = (), ) -> None: """ Args: img_size (int): Input image size. patch_size (int): Patch size. in_chans (int): Number of input image channels. embed_dim (int): Patch embedding dimension. depth (int): Depth of ViT. num_heads (int): Number of attention heads in each ViT block. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool): If True, add a learnable bias to query, key, value. norm_layer (nn.Module): Normalization layer. act_layer (nn.Module): Activation layer. use_abs_pos (bool): If True, use absolute positional embeddings. use_rel_pos (bool): If True, add relative positional embeddings to the attention map. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. window_size (int): Window size for window attention blocks. global_attn_indexes (list): Indexes for blocks using global attention. """ super().__init__() self.img_size = img_size self.patch_embed = PatchEmbed( kernel_size=(patch_size, patch_size), stride=(patch_size, patch_size), in_chans=in_chans, embed_dim=embed_dim, ) self.pos_embed: Optional[nn.Parameter] = None if use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = nn.Parameter( torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim) ) self.blocks = nn.ModuleList() for i in range(depth): block = Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, norm_layer=norm_layer, act_layer=act_layer, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, window_size=window_size if i not in global_attn_indexes else 0, input_size=(img_size // patch_size, img_size // patch_size), ) self.blocks.append(block) self.neck = nn.Sequential( nn.Conv2d( embed_dim, out_chans, kernel_size=1, bias=False, ), LayerNorm2d(out_chans), nn.Conv2d( out_chans, out_chans, kernel_size=3, padding=1, bias=False, ), LayerNorm2d(out_chans), ) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.patch_embed(x) if self.pos_embed is not None: x = x + self.pos_embed for blk in self.blocks: x = blk(x) x = self.neck(x.permute(0, 3, 1, 2)) return x class Block(nn.Module): """Transformer blocks with support of window attention and residual propagation blocks""" def __init__( self, dim: int, num_heads: int, mlp_ratio: float = 4.0, qkv_bias: bool = True, norm_layer: Type[nn.Module] = nn.LayerNorm, act_layer: Type[nn.Module] = nn.GELU, use_rel_pos: bool = False, rel_pos_zero_init: bool = True, window_size: int = 0, input_size: Optional[Tuple[int, int]] = None, ) -> None: """ Args: dim (int): Number of input channels. num_heads (int): Number of attention heads in each ViT block. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool): If True, add a learnable bias to query, key, value. norm_layer (nn.Module): Normalization layer. act_layer (nn.Module): Activation layer. use_rel_pos (bool): If True, add relative positional embeddings to the attention map. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. window_size (int): Window size for window attention blocks. If it equals 0, then use global attention. input_size (tuple(int, int) or None): Input resolution for calculating the relative positional parameter size. """ super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, input_size=input_size if window_size == 0 else (window_size, window_size), ) self.norm2 = norm_layer(dim) self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer) self.window_size = window_size def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x x = self.norm1(x) # Window partition if self.window_size > 0: H, W = x.shape[1], x.shape[2] x, pad_hw = window_partition(x, self.window_size) x = self.attn(x) # Reverse window partition if self.window_size > 0: x = window_unpartition(x, self.window_size, pad_hw, (H, W)) x = shortcut + x x = x + self.mlp(self.norm2(x)) return x class Attention(nn.Module): """Multi-head Attention block with relative position embeddings.""" def __init__( self, dim: int, num_heads: int = 8, qkv_bias: bool = True, use_rel_pos: bool = False, rel_pos_zero_init: bool = True, input_size: Optional[Tuple[int, int]] = None, ) -> None: """ Args: dim (int): Number of input channels. num_heads (int): Number of attention heads. qkv_bias (bool): If True, add a learnable bias to query, key, value. rel_pos (bool): If True, add relative positional embeddings to the attention map. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. input_size (tuple(int, int) or None): Input resolution for calculating the relative positional parameter size. """ super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim**-0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.proj = nn.Linear(dim, dim) self.use_rel_pos = use_rel_pos if self.use_rel_pos: assert ( input_size is not None ), "Input size must be provided if using relative positional encoding." # initialize relative positional embeddings self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) def forward(self, x: torch.Tensor) -> torch.Tensor: B, H, W, _ = x.shape # qkv with shape (3, B, nHead, H * W, C) qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # q, k, v with shape (B * nHead, H * W, C) q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) attn = (q * self.scale) @ k.transpose(-2, -1) if self.use_rel_pos: attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) attn = attn.softmax(dim=-1) x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) x = self.proj(x) return x def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: """ Partition into non-overlapping windows with padding if needed. Args: x (tensor): input tokens with [B, H, W, C]. window_size (int): window size. Returns: windows: windows after partition with [B * num_windows, window_size, window_size, C]. (Hp, Wp): padded height and width before partition """ B, H, W, C = x.shape pad_h = (window_size - H % window_size) % window_size pad_w = (window_size - W % window_size) % window_size if pad_h > 0 or pad_w > 0: x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) Hp, Wp = H + pad_h, W + pad_w x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows, (Hp, Wp) def window_unpartition( windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int] ) -> torch.Tensor: """ Window unpartition into original sequences and removing padding. Args: windows (tensor): input tokens with [B * num_windows, window_size, window_size, C]. window_size (int): window size. pad_hw (Tuple): padded height and width (Hp, Wp). hw (Tuple): original height and width (H, W) before padding. Returns: x: unpartitioned sequences with [B, H, W, C]. """ Hp, Wp = pad_hw H, W = hw B = windows.shape[0] // (Hp * Wp // window_size // window_size) x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) if Hp > H or Wp > W: x = x[:, :H, :W, :].contiguous() return x def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (int): size of query q. k_size (int): size of key k. rel_pos (Tensor): relative position embeddings (L, C). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos if needed. if rel_pos.shape[0] != max_rel_dist: # Interpolate rel pos. rel_pos_resized = F.interpolate( rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode="linear", ) rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) else: rel_pos_resized = rel_pos # Scale the coords with short length if shapes for q and k are different. q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()] def add_decomposed_rel_pos( attn: torch.Tensor, q: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: Tuple[int, int], k_size: Tuple[int, int], ) -> torch.Tensor: """ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 Args: attn (Tensor): attention map. q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. q_size (Tuple): spatial sequence size of query q with (q_h, q_w). k_size (Tuple): spatial sequence size of key k with (k_h, k_w). Returns: attn (Tensor): attention map with added relative positional embeddings. """ q_h, q_w = q_size k_h, k_w = k_size Rh = get_rel_pos(q_h, k_h, rel_pos_h) Rw = get_rel_pos(q_w, k_w, rel_pos_w) B, _, dim = q.shape r_q = q.reshape(B, q_h, q_w, dim) rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) attn = ( attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] ).view(B, q_h * q_w, k_h * k_w) return attn class PatchEmbed(nn.Module): """ Image to Patch Embedding. """ def __init__( self, kernel_size: Tuple[int, int] = (16, 16), stride: Tuple[int, int] = (16, 16), padding: Tuple[int, int] = (0, 0), in_chans: int = 3, embed_dim: int = 768, ) -> None: """ Args: kernel_size (Tuple): kernel size of the projection layer. stride (Tuple): stride of the projection layer. padding (Tuple): padding size of the projection layer. in_chans (int): Number of input image channels. embed_dim (int): Patch embedding dimension. """ super().__init__() self.proj = nn.Conv2d( in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding ) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj(x) # B C H W -> B H W C x = x.permute(0, 2, 3, 1) return x
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from torch import nn from typing import Any, Optional, Tuple, Type from .common import LayerNorm2d class PromptEncoder(nn.Module): def __init__( self, embed_dim: int, image_embedding_size: Tuple[int, int], input_image_size: Tuple[int, int], mask_in_chans: int, activation: Type[nn.Module] = nn.GELU, ) -> None: """ Encodes prompts for input to SAM's mask decoder. Arguments: embed_dim (int): The prompts' embedding dimension image_embedding_size (tuple(int, int)): The spatial size of the image embedding, as (H, W). input_image_size (int): The padded size of the image as input to the image encoder, as (H, W). mask_in_chans (int): The number of hidden channels used for encoding input masks. activation (nn.Module): The activation to use when encoding input masks. """ super().__init__() self.embed_dim = embed_dim self.input_image_size = input_image_size self.image_embedding_size = image_embedding_size self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)] self.point_embeddings = nn.ModuleList(point_embeddings) self.not_a_point_embed = nn.Embedding(1, embed_dim) self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1]) self.mask_downscaling = nn.Sequential( nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), LayerNorm2d(mask_in_chans // 4), activation(), nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), LayerNorm2d(mask_in_chans), activation(), nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1), ) self.no_mask_embed = nn.Embedding(1, embed_dim) def get_dense_pe(self) -> torch.Tensor: """ Returns the positional encoding used to encode point prompts, applied to a dense set of points the shape of the image encoding. Returns: torch.Tensor: Positional encoding with shape 1x(embed_dim)x(embedding_h)x(embedding_w) """ return self.pe_layer(self.image_embedding_size).unsqueeze(0) def _embed_points( self, points: torch.Tensor, labels: torch.Tensor, pad: bool, ) -> torch.Tensor: """Embeds point prompts.""" points = points + 0.5 # Shift to center of pixel if pad: padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) points = torch.cat([points, padding_point], dim=1) labels = torch.cat([labels, padding_label], dim=1) point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size) point_embedding[labels == -1] = 0.0 point_embedding[labels == -1] += self.not_a_point_embed.weight point_embedding[labels == 0] += self.point_embeddings[0].weight point_embedding[labels == 1] += self.point_embeddings[1].weight return point_embedding def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: """Embeds box prompts.""" boxes = boxes + 0.5 # Shift to center of pixel coords = boxes.reshape(-1, 2, 2) corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size) corner_embedding[:, 0, :] += self.point_embeddings[2].weight corner_embedding[:, 1, :] += self.point_embeddings[3].weight return corner_embedding def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: """Embeds mask inputs.""" mask_embedding = self.mask_downscaling(masks) return mask_embedding def _get_batch_size( self, points: Optional[Tuple[torch.Tensor, torch.Tensor]], boxes: Optional[torch.Tensor], masks: Optional[torch.Tensor], ) -> int: """ Gets the batch size of the output given the batch size of the input prompts. """ if points is not None: return points[0].shape[0] elif boxes is not None: return boxes.shape[0] elif masks is not None: return masks.shape[0] else: return 1 def _get_device(self) -> torch.device: return self.point_embeddings[0].weight.device def forward( self, points: Optional[Tuple[torch.Tensor, torch.Tensor]], boxes: Optional[torch.Tensor], masks: Optional[torch.Tensor], ) -> Tuple[torch.Tensor, torch.Tensor]: """ Embeds different types of prompts, returning both sparse and dense embeddings. Arguments: points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates and labels to embed. boxes (torch.Tensor or none): boxes to embed masks (torch.Tensor or none): masks to embed Returns: torch.Tensor: sparse embeddings for the points and boxes, with shape BxNx(embed_dim), where N is determined by the number of input points and boxes. torch.Tensor: dense embeddings for the masks, in the shape Bx(embed_dim)x(embed_H)x(embed_W) """ bs = self._get_batch_size(points, boxes, masks) sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device()) if points is not None: coords, labels = points point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) if boxes is not None: box_embeddings = self._embed_boxes(boxes) sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) if masks is not None: dense_embeddings = self._embed_masks(masks) else: dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] ) return sparse_embeddings, dense_embeddings class PositionEmbeddingRandom(nn.Module): """ Positional encoding using random spatial frequencies. """ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None: super().__init__() if scale is None or scale <= 0.0: scale = 1.0 self.register_buffer( "positional_encoding_gaussian_matrix", scale * torch.randn((2, num_pos_feats)), ) def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: """Positionally encode points that are normalized to [0,1].""" # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape coords = 2 * coords - 1 coords = coords.to(self.positional_encoding_gaussian_matrix.dtype) coords = coords @ self.positional_encoding_gaussian_matrix coords = 2 * np.pi * coords # outputs d_1 x ... x d_n x C shape return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) def forward(self, size: Tuple[int, int]) -> torch.Tensor: """Generate positional encoding for a grid of the specified size.""" h, w = size device: Any = self.positional_encoding_gaussian_matrix.device grid = torch.ones((h, w), device=device, dtype=torch.float32) y_embed = grid.cumsum(dim=0) - 0.5 x_embed = grid.cumsum(dim=1) - 0.5 y_embed = y_embed / h x_embed = x_embed / w pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) return pe.permute(2, 0, 1) # C x H x W def forward_with_coords( self, coords_input: torch.Tensor, image_size: Tuple[int, int] ) -> torch.Tensor: """Positionally encode points that are not normalized to [0,1].""" coords = coords_input.clone() coords[:, :, 0] = coords[:, :, 0] / image_size[1] coords[:, :, 1] = coords[:, :, 1] / image_size[0] return self._pe_encoding(coords.to(torch.float)) # B x N x C
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from torch.nn import functional as F from typing import Any, Dict, List, Tuple from .image_encoder import ImageEncoderViT from .mask_decoder import MaskDecoder from .prompt_encoder import PromptEncoder class Sam(nn.Module): mask_threshold: float = 0.0 image_format: str = "RGB" def __init__( self, image_encoder: ImageEncoderViT, prompt_encoder: PromptEncoder, mask_decoder: MaskDecoder, pixel_mean: List[float] = [123.675, 116.28, 103.53], pixel_std: List[float] = [58.395, 57.12, 57.375], ) -> None: """ SAM predicts object masks from an image and input prompts. Arguments: image_encoder (ImageEncoderViT): The backbone used to encode the image into image embeddings that allow for efficient mask prediction. prompt_encoder (PromptEncoder): Encodes various types of input prompts. mask_decoder (MaskDecoder): Predicts masks from the image embeddings and encoded prompts. pixel_mean (list(float)): Mean values for normalizing pixels in the input image. pixel_std (list(float)): Std values for normalizing pixels in the input image. """ super().__init__() self.image_encoder = image_encoder self.prompt_encoder = prompt_encoder self.mask_decoder = mask_decoder self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) @property def device(self) -> Any: return self.pixel_mean.device @torch.no_grad() def forward( self, batched_input: List[Dict[str, Any]], multimask_output: bool, ) -> List[Dict[str, torch.Tensor]]: """ Predicts masks end-to-end from provided images and prompts. If prompts are not known in advance, using SamPredictor is recommended over calling the model directly. Arguments: batched_input (list(dict)): A list over input images, each a dictionary with the following keys. A prompt key can be excluded if it is not present. 'image': The image as a torch tensor in 3xHxW format, already transformed for input to the model. 'original_size': (tuple(int, int)) The original size of the image before transformation, as (H, W). 'point_coords': (torch.Tensor) Batched point prompts for this image, with shape BxNx2. Already transformed to the input frame of the model. 'point_labels': (torch.Tensor) Batched labels for point prompts, with shape BxN. 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4. Already transformed to the input frame of the model. 'mask_inputs': (torch.Tensor) Batched mask inputs to the model, in the form Bx1xHxW. multimask_output (bool): Whether the model should predict multiple disambiguating masks, or return a single mask. Returns: (list(dict)): A list over input images, where each element is as dictionary with the following keys. 'masks': (torch.Tensor) Batched binary mask predictions, with shape BxCxHxW, where B is the number of input prompts, C is determined by multimask_output, and (H, W) is the original size of the image. 'iou_predictions': (torch.Tensor) The model's predictions of mask quality, in shape BxC. 'low_res_logits': (torch.Tensor) Low resolution logits with shape BxCxHxW, where H=W=256. Can be passed as mask input to subsequent iterations of prediction. """ input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0) image_embeddings = self.image_encoder(input_images) outputs = [] for image_record, curr_embedding in zip(batched_input, image_embeddings): if "point_coords" in image_record: points = (image_record["point_coords"], image_record["point_labels"]) else: points = None sparse_embeddings, dense_embeddings = self.prompt_encoder( points=points, boxes=image_record.get("boxes", None), masks=image_record.get("mask_inputs", None), ) low_res_masks, iou_predictions = self.mask_decoder( image_embeddings=curr_embedding.unsqueeze(0), image_pe=self.prompt_encoder.get_dense_pe(), sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, ) masks = self.postprocess_masks( low_res_masks, input_size=image_record["image"].shape[-2:], original_size=image_record["original_size"], ) masks = masks > self.mask_threshold outputs.append( { "masks": masks, "iou_predictions": iou_predictions, "low_res_logits": low_res_masks, } ) return outputs def postprocess_masks( self, masks: torch.Tensor, input_size: Tuple[int, ...], original_size: Tuple[int, ...], ) -> torch.Tensor: """ Remove padding and upscale masks to the original image size. Arguments: masks (torch.Tensor): Batched masks from the mask_decoder, in BxCxHxW format. input_size (tuple(int, int)): The size of the image input to the model, in (H, W) format. Used to remove padding. original_size (tuple(int, int)): The original size of the image before resizing for input to the model, in (H, W) format. Returns: (torch.Tensor): Batched masks in BxCxHxW format, where (H, W) is given by original_size. """ masks = F.interpolate( masks, (self.image_encoder.img_size, self.image_encoder.img_size), mode="bilinear", align_corners=False, ) masks = masks[..., : input_size[0], : input_size[1]] masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False) return masks def preprocess(self, x: torch.Tensor) -> torch.Tensor: """Normalize pixel values and pad to a square input.""" # Normalize colors x = (x - self.pixel_mean) / self.pixel_std # Pad h, w = x.shape[-2:] padh = self.image_encoder.img_size - h padw = self.image_encoder.img_size - w x = F.pad(x, (0, padw, 0, padh)) return x
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from torch.nn import functional as F from typing import List, Tuple, Type from .common import LayerNorm2d class MaskDecoder(nn.Module): def __init__( self, *, transformer_dim: int, transformer: nn.Module, num_multimask_outputs: int = 3, activation: Type[nn.Module] = nn.GELU, iou_head_depth: int = 3, iou_head_hidden_dim: int = 256, ) -> None: """ Predicts masks given an image and prompt embeddings, using a transformer architecture. Arguments: transformer_dim (int): the channel dimension of the transformer transformer (nn.Module): the transformer used to predict masks num_multimask_outputs (int): the number of masks to predict when disambiguating masks activation (nn.Module): the type of activation to use when upscaling masks iou_head_depth (int): the depth of the MLP used to predict mask quality iou_head_hidden_dim (int): the hidden dimension of the MLP used to predict mask quality """ super().__init__() self.transformer_dim = transformer_dim self.transformer = transformer self.num_multimask_outputs = num_multimask_outputs self.iou_token = nn.Embedding(1, transformer_dim) self.num_mask_tokens = num_multimask_outputs + 1 self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) self.output_upscaling = nn.Sequential( nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), LayerNorm2d(transformer_dim // 4), activation(), nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), activation(), ) self.output_hypernetworks_mlps = nn.ModuleList( [ MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for i in range(self.num_mask_tokens) ] ) self.iou_prediction_head = MLP( transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth ) def forward( self, image_embeddings: torch.Tensor, image_pe: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, multimask_output: bool, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Predict masks given image and prompt embeddings. Arguments: image_embeddings (torch.Tensor): the embeddings from the image encoder image_pe (torch.Tensor): positional encoding with the shape of image_embeddings sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs multimask_output (bool): Whether to return multiple masks or a single mask. Returns: torch.Tensor: batched predicted masks torch.Tensor: batched predictions of mask quality """ masks, iou_pred = self.predict_masks( image_embeddings=image_embeddings, image_pe=image_pe, sparse_prompt_embeddings=sparse_prompt_embeddings, dense_prompt_embeddings=dense_prompt_embeddings, ) # Select the correct mask or masks for output if multimask_output: mask_slice = slice(1, None) else: mask_slice = slice(0, 1) masks = masks[:, mask_slice, :, :] iou_pred = iou_pred[:, mask_slice] # Prepare output return masks, iou_pred def predict_masks( self, image_embeddings: torch.Tensor, image_pe: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, ) -> Tuple[torch.Tensor, torch.Tensor]: """Predicts masks. See 'forward' for more details.""" # Concatenate output tokens output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1) tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) # Expand per-image data in batch direction to be per-mask src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) src = src + dense_prompt_embeddings pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) b, c, h, w = src.shape # Run the transformer tokens = tokens.to(src.dtype) hs, src = self.transformer(src, pos_src, tokens) iou_token_out = hs[:, 0, :] mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] # Upscale mask embeddings and predict masks using the mask tokens src = src.transpose(1, 2).view(b, c, h, w) upscaled_embedding = self.output_upscaling(src) hyper_in_list: List[torch.Tensor] = [] for i in range(self.num_mask_tokens): hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])) hyper_in = torch.stack(hyper_in_list, dim=1) b, c, h, w = upscaled_embedding.shape masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) # Generate mask quality predictions iou_pred = self.iou_prediction_head(iou_token_out) return masks, iou_pred # Lightly adapted from # https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa class MLP(nn.Module): def __init__( self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, sigmoid_output: bool = False, ) -> None: super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList( nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) ) self.sigmoid_output = sigmoid_output def forward(self, x): for i, layer in enumerate(self.layers): x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) if self.sigmoid_output: x = F.sigmoid(x) return x
from torchbenchmark.tasks import NLP from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel class Model(HuggingFaceModel): task = NLP.LANGUAGE_MODELING DEFAULT_TRAIN_BSIZE = 4 DEFAULT_EVAL_BSIZE = 1 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(name="hf_Bert", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
import subprocess import sys import os from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements() patch_transformers() model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) cache_model(model_name)
import torch from . import tke_pytorch from typing import Tuple from torchbenchmark.tasks import OTHER from ...util.model import BenchmarkModel def _generate_inputs(size): import numpy as np import math np.random.seed(17) shape = ( math.ceil(2 * size ** (1 / 3)), math.ceil(2 * size ** (1 / 3)), math.ceil(0.25 * size ** (1 / 3)), ) # masks maskU, maskV, maskW = ( (np.random.rand(*shape) < 0.8).astype("float64") for _ in range(3) ) # 1d arrays dxt, dxu = (np.random.randn(shape[0]) for _ in range(2)) dyt, dyu = (np.random.randn(shape[1]) for _ in range(2)) dzt, dzw = (np.random.randn(shape[2]) for _ in range(2)) cost, cosu = (np.random.randn(shape[1]) for _ in range(2)) # 2d arrays kbot = np.random.randint(0, shape[2], size=shape[:2]) forc_tke_surface = np.random.randn(*shape[:2]) # 3d arrays kappaM, mxl, forc = (np.random.randn(*shape) for _ in range(3)) # 4d arrays u, v, w, tke, dtke = (np.random.randn(*shape, 3) for _ in range(5)) return ( u, v, w, maskU, maskV, maskW, dxt, dxu, dyt, dyu, dzt, dzw, cost, cosu, kbot, kappaM, mxl, forc, forc_tke_surface, tke, dtke, ) class TurbulentKineticEnergy(torch.nn.Module): def __init__(self, device): super(TurbulentKineticEnergy, self).__init__() self.device = device def forward( self, u, v, w, maskU, maskV, maskW, dxt, dxu, dyt, dyu, dzt, dzw, cost, cosu, kbot, kappaM, mxl, forc, forc_tke_surface, tke, dtke, ): # tke and dtke will be modified in integrate_tke and generate inconsistent results # so clone them before passing them in return tke_pytorch.integrate_tke( u, v, w, maskU, maskV, maskW, dxt, dxu, dyt, dyu, dzt, dzw, cost, cosu, kbot, kappaM, mxl, forc, forc_tke_surface, torch.clone(tke), torch.clone(dtke), ) class Model(BenchmarkModel): task = OTHER.OTHER_TASKS # Original input size: [2 ** i for i in range(12, 23, 2)] # Source: https://github.com/dionhaefner/pyhpc-benchmarks/blob/650ecc650e394df829944ffcf09e9d646ec69691/run.py#L25 # Pick data-point when i = 20, size = 1048576 DEFAULT_EVAL_BSIZE = 1048576 ALLOW_CUSTOMIZE_BSIZE = False CANNOT_SET_CUSTOM_OPTIMIZER = True def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.model = TurbulentKineticEnergy(self.device).to(device=self.device) input_size = self.batch_size self.example_inputs = tuple( torch.from_numpy(x).to(self.device) for x in _generate_inputs(input_size) ) def get_module(self): return self.model, self.example_inputs def train(self): raise NotImplementedError("Training not supported") def eval(self) -> Tuple[torch.Tensor]: model, example_inputs = self.get_module() with torch.no_grad(): out = model(*example_inputs) return out
import torch def solve_tridiag(a, b, c, d): """ Solves a tridiagonal matrix system with diagonals a, b, c and RHS vector d. """ assert a.shape == b.shape and a.shape == c.shape and a.shape == d.shape n = a.shape[-1] for i in range(1, n): w = a[..., i] / b[..., i - 1] b[..., i] += -w * c[..., i - 1] d[..., i] += -w * d[..., i - 1] out = torch.empty_like(a) out[..., -1] = d[..., -1] / b[..., -1] for i in range(n - 2, -1, -1): out[..., i] = (d[..., i] - c[..., i] * out[..., i + 1]) / b[..., i] return out def solve_implicit(ks, a, b, c, d, b_edge): land_mask = (ks >= 0)[:, :, None] edge_mask = land_mask & ( torch.arange(a.shape[2], device=ks.device)[None, None, :] == ks[:, :, None] ) water_mask = land_mask & ( torch.arange(a.shape[2], device=ks.device)[None, None, :] >= ks[:, :, None] ) a_tri = water_mask * a * torch.logical_not(edge_mask) b_tri = torch.where(water_mask, b, 1.0) b_tri = torch.where(edge_mask, b_edge, b_tri) c_tri = water_mask * c d_tri = water_mask * d return solve_tridiag(a_tri, b_tri, c_tri, d_tri), water_mask def _calc_cr(rjp, rj, rjm, vel): """ Calculates cr value used in superbee advection scheme """ eps = 1e-20 # prevent division by 0 return torch.where(vel > 0.0, rjm, rjp) / torch.where(torch.abs(rj) < eps, eps, rj) def pad_z_edges(arr): arr_shape = list(arr.shape) arr_shape[2] += 2 out = torch.zeros(arr_shape, dtype=arr.dtype, device=arr.device) out[:, :, 1:-1] = arr return out def limiter(cr): return torch.maximum( torch.tensor([0.0], device=cr.device), torch.maximum( torch.minimum(torch.tensor([1.0], device=cr.device), 2 * cr), torch.minimum(torch.tensor([2.0], device=cr.device), cr), ), ) def _adv_superbee(vel, var, mask, dx, axis: int, cost, cosu, dt_tracer: float): if axis == 0: dx = cost[None, 2:-2, None] * dx[1:-2, None, None] uCFL = torch.abs(vel[1:-2, 2:-2, :] * dt_tracer / dx) rjp = (var[3:, 2:-2, :] - var[2:-1, 2:-2, :]) * mask[2:-1, 2:-2, :] rj = (var[2:-1, 2:-2, :] - var[1:-2, 2:-2, :]) * mask[1:-2, 2:-2, :] rjm = (var[1:-2, 2:-2, :] - var[:-3, 2:-2, :]) * mask[:-3, 2:-2, :] cr = limiter(_calc_cr(rjp, rj, rjm, vel[1:-2, 2:-2, :])) return ( vel[1:-2, 2:-2, :] * (var[2:-1, 2:-2, :] + var[1:-2, 2:-2, :]) * 0.5 - torch.abs(vel[1:-2, 2:-2, :]) * ((1.0 - cr) + uCFL * cr) * rj * 0.5 ) elif axis == 1: dx = (cost * dx)[None, 1:-2, None] velfac = cosu[None, 1:-2, None] uCFL = torch.abs(velfac * vel[2:-2, 1:-2, :] * dt_tracer / dx) rjp = (var[2:-2, 3:, :] - var[2:-2, 2:-1, :]) * mask[2:-2, 2:-1, :] rj = (var[2:-2, 2:-1, :] - var[2:-2, 1:-2, :]) * mask[2:-2, 1:-2, :] rjm = (var[2:-2, 1:-2, :] - var[2:-2, :-3, :]) * mask[2:-2, :-3, :] cr = limiter(_calc_cr(rjp, rj, rjm, vel[2:-2, 1:-2, :])) return ( velfac * vel[2:-2, 1:-2, :] * (var[2:-2, 2:-1, :] + var[2:-2, 1:-2, :]) * 0.5 - torch.abs(velfac * vel[2:-2, 1:-2, :]) * ((1.0 - cr) + uCFL * cr) * rj * 0.5 ) elif axis == 2: vel, var, mask = [pad_z_edges(a) for a in (vel, var, mask)] dx = dx[None, None, :-1] uCFL = torch.abs(vel[2:-2, 2:-2, 1:-2] * dt_tracer / dx) rjp = (var[2:-2, 2:-2, 3:] - var[2:-2, 2:-2, 2:-1]) * mask[2:-2, 2:-2, 2:-1] rj = (var[2:-2, 2:-2, 2:-1] - var[2:-2, 2:-2, 1:-2]) * mask[2:-2, 2:-2, 1:-2] rjm = (var[2:-2, 2:-2, 1:-2] - var[2:-2, 2:-2, :-3]) * mask[2:-2, 2:-2, :-3] cr = limiter(_calc_cr(rjp, rj, rjm, vel[2:-2, 2:-2, 1:-2])) return ( vel[2:-2, 2:-2, 1:-2] * (var[2:-2, 2:-2, 2:-1] + var[2:-2, 2:-2, 1:-2]) * 0.5 - torch.abs(vel[2:-2, 2:-2, 1:-2]) * ((1.0 - cr) + uCFL * cr) * rj * 0.5 ) else: raise ValueError("axis must be 0, 1, or 2") def adv_flux_superbee_wgrid( adv_fe, adv_fn, adv_ft, var, u_wgrid, v_wgrid, w_wgrid, maskW, dxt, dyt, dzw, cost, cosu, dt_tracer: float, ): """ Calculates advection of a tracer defined on Wgrid """ maskUtr = torch.zeros_like(maskW) maskUtr[:-1, :, :] = maskW[1:, :, :] * maskW[:-1, :, :] adv_fe[...] = 0.0 adv_fe[1:-2, 2:-2, :] = _adv_superbee( u_wgrid, var, maskUtr, dxt, 0, cost, cosu, dt_tracer ) maskVtr = torch.zeros_like(maskW) maskVtr[:, :-1, :] = maskW[:, 1:, :] * maskW[:, :-1, :] adv_fn[...] = 0.0 adv_fn[2:-2, 1:-2, :] = _adv_superbee( v_wgrid, var, maskVtr, dyt, 1, cost, cosu, dt_tracer ) maskWtr = torch.zeros_like(maskW) maskWtr[:, :, :-1] = maskW[:, :, 1:] * maskW[:, :, :-1] adv_ft[...] = 0.0 adv_ft[2:-2, 2:-2, :-1] = _adv_superbee( w_wgrid, var, maskWtr, dzw, 2, cost, cosu, dt_tracer ) def integrate_tke( u, v, w, maskU, maskV, maskW, dxt, dxu, dyt, dyu, dzt, dzw, cost, cosu, kbot, kappaM, mxl, forc, forc_tke_surface, tke, dtke, ): tau = 0 taup1 = 1 taum1 = 2 dt_tracer = 1.0 dt_mom = 1 AB_eps = 0.1 alpha_tke = 1.0 c_eps = 0.7 K_h_tke = 2000.0 flux_east = torch.zeros_like(maskU) flux_north = torch.zeros_like(maskU) flux_top = torch.zeros_like(maskU) sqrttke = torch.sqrt( torch.maximum(torch.tensor([0.0], device=tke.device), tke[:, :, :, tau]) ) """ integrate Tke equation on W grid with surface flux boundary condition """ dt_tke = dt_mom # use momentum time step to prevent spurious oscillations """ vertical mixing and dissipation of TKE """ ks = kbot[2:-2, 2:-2] - 1 a_tri = torch.zeros_like(maskU[2:-2, 2:-2]) b_tri = torch.zeros_like(maskU[2:-2, 2:-2]) c_tri = torch.zeros_like(maskU[2:-2, 2:-2]) d_tri = torch.zeros_like(maskU[2:-2, 2:-2]) delta = torch.zeros_like(maskU[2:-2, 2:-2]) delta[:, :, :-1] = ( dt_tke / dzt[None, None, 1:] * alpha_tke * 0.5 * (kappaM[2:-2, 2:-2, :-1] + kappaM[2:-2, 2:-2, 1:]) ) a_tri[:, :, 1:-1] = -delta[:, :, :-2] / dzw[None, None, 1:-1] a_tri[:, :, -1] = -delta[:, :, -2] / (0.5 * dzw[-1]) b_tri[:, :, 1:-1] = ( 1 + (delta[:, :, 1:-1] + delta[:, :, :-2]) / dzw[None, None, 1:-1] + dt_tke * c_eps * sqrttke[2:-2, 2:-2, 1:-1] / mxl[2:-2, 2:-2, 1:-1] ) b_tri[:, :, -1] = ( 1 + delta[:, :, -2] / (0.5 * dzw[-1]) + dt_tke * c_eps / mxl[2:-2, 2:-2, -1] * sqrttke[2:-2, 2:-2, -1] ) b_tri_edge = ( 1 + delta / dzw[None, None, :] + dt_tke * c_eps / mxl[2:-2, 2:-2, :] * sqrttke[2:-2, 2:-2, :] ) c_tri[:, :, :-1] = -delta[:, :, :-1] / dzw[None, None, :-1] d_tri[...] = tke[2:-2, 2:-2, :, tau] + dt_tke * forc[2:-2, 2:-2, :] d_tri[:, :, -1] += dt_tke * forc_tke_surface[2:-2, 2:-2] / (0.5 * dzw[-1]) sol, water_mask = solve_implicit(ks, a_tri, b_tri, c_tri, d_tri, b_edge=b_tri_edge) tke[2:-2, 2:-2, :, taup1] = torch.where(water_mask, sol, tke[2:-2, 2:-2, :, taup1]) """ Add TKE if surface density flux drains TKE in uppermost box """ tke_surf_corr = torch.zeros(maskU.shape[:2], device=maskU.device) mask = tke[2:-2, 2:-2, -1, taup1] < 0.0 tke_surf_corr[2:-2, 2:-2] = torch.where( mask, -tke[2:-2, 2:-2, -1, taup1] * 0.5 * dzw[-1] / dt_tke, 0.0 ) tke[2:-2, 2:-2, -1, taup1] = torch.maximum( torch.tensor([0.0], device=tke.device), tke[2:-2, 2:-2, -1, taup1] ) """ add tendency due to lateral diffusion """ flux_east[:-1, :, :] = ( K_h_tke * (tke[1:, :, :, tau] - tke[:-1, :, :, tau]) / (cost[None, :, None] * dxu[:-1, None, None]) * maskU[:-1, :, :] ) flux_east[-1, :, :] = 0.0 flux_north[:, :-1, :] = ( K_h_tke * (tke[:, 1:, :, tau] - tke[:, :-1, :, tau]) / dyu[None, :-1, None] * maskV[:, :-1, :] * cosu[None, :-1, None] ) flux_north[:, -1, :] = 0.0 tke[2:-2, 2:-2, :, taup1] += ( dt_tke * maskW[2:-2, 2:-2, :] * ( (flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :]) / (cost[None, 2:-2, None] * dxt[2:-2, None, None]) + (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :]) / (cost[None, 2:-2, None] * dyt[None, 2:-2, None]) ) ) """ add tendency due to advection """ adv_flux_superbee_wgrid( flux_east, flux_north, flux_top, tke[:, :, :, tau], u[..., tau], v[..., tau], w[..., tau], maskW, dxt, dyt, dzw, cost, cosu, dt_tracer, ) dtke[2:-2, 2:-2, :, tau] = maskW[2:-2, 2:-2, :] * ( -(flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :]) / (cost[None, 2:-2, None] * dxt[2:-2, None, None]) - (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :]) / (cost[None, 2:-2, None] * dyt[None, 2:-2, None]) ) dtke[:, :, 0, tau] += -flux_top[:, :, 0] / dzw[0] dtke[:, :, 1:-1, tau] += -(flux_top[:, :, 1:-1] - flux_top[:, :, :-2]) / dzw[1:-1] dtke[:, :, -1, tau] += -(flux_top[:, :, -1] - flux_top[:, :, -2]) / (0.5 * dzw[-1]) """ Adam Bashforth time stepping """ tke[:, :, :, taup1] += dt_tracer * ( (1.5 + AB_eps) * dtke[:, :, :, tau] - (0.5 + AB_eps) * dtke[:, :, :, taum1] ) return tke, dtke, tke_surf_corr def prepare_inputs(*inputs, device): out = [ torch.as_tensor(a, device=device) for a in inputs ] if device == "gpu": torch.cuda.synchronize() return out def run(*inputs, device="cpu"): with torch.no_grad(): outputs = integrate_tke(*inputs) if device == "gpu": torch.cuda.synchronize() return outputs
if __name__ == "__main__": pass
from torchbenchmark.tasks import NLP from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel class Model(HuggingFaceModel): task = NLP.LANGUAGE_MODELING # Original train batch size per device: 8 # Source: https://github.com/huggingface/transformers/blob/master/examples/flax/language-modeling/run_t5_mlm_flax.py#L83 DEFAULT_TRAIN_BSIZE = 8 # Original eval batch size per device: 8 # Downscale to 1 to fit in Nvidia T4 of the infra DEFAULT_EVAL_BSIZE = 1 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(name="hf_T5", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
import subprocess import sys import os from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements() patch_transformers() model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) cache_model(model_name)
# Copyright (c) 2017 NVIDIA Corporation import argparse from math import sqrt parser = argparse.ArgumentParser(description='RMSE_calculator') parser.add_argument('--path_to_predictions', type=str, default="", metavar='N', help='Path file with actual ratings and predictions') parser.add_argument('--round', action='store_true', help='round predictions to nearest') args = parser.parse_args() print(args) def main(): with open(args.path_to_predictions, 'r') as inpt: lines = inpt.readlines() n = 0 denom = 0.0 for line in lines: parts = line.split('\t') prediction = float(parts[2]) if not args.round else round(float(parts[2])) rating = float(parts[3]) denom += (prediction - rating)*(prediction - rating) n += 1 print("####################") print("RMSE: {}".format(sqrt(denom/n))) print("####################") if __name__ == '__main__': main()
# Benchmark created from NVidia DeepRecommender github project: # https://github.com/NVIDIA/DeepRecommender # a32a8a5c23092c551616acf6fac5b32e1155d18b # Test supports eval and train modes for cpu and cuda targets. # # Both nvtrain.py and nvinfer.py support all original command # line parameters but tensorflow dependency for logging has # been removed. import torch from ...util.model import BenchmarkModel from torchbenchmark.tasks import RECOMMENDATION from typing import Tuple import gc from .nvtrain import DeepRecommenderTrainBenchmark from .nvinfer import DeepRecommenderInferenceBenchmark class Model(BenchmarkModel): task = RECOMMENDATION.RECOMMENDATION DEFAULT_TRAIN_BSIZE = 256 DEFAULT_EVAL_BSIZE = 256 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.eval_mode = True if self.test == "eval" else False if test == "train": self.model = DeepRecommenderTrainBenchmark(device = self.device, jit = False, batch_size=self.batch_size) elif test == "eval": self.model = DeepRecommenderInferenceBenchmark(device = self.device, jit = False, batch_size=self.batch_size) def jit_callback(self): self.model.rencoder = torch.jit.trace(self.model.rencoder, (self.model.toyinputs, )) def get_module(self): if self.eval_mode: return self.model.rencoder, (self.model.toyinputs,) return self.model.rencoder, (self.model.toyinputs,) def set_module(self, new_model): self.model.rencoder = new_model def set_eval(self): self.eval_mode = True def set_train(self): self.eval_mode = False def get_optimizer(self): return self.model.get_optimizer() def set_optimizer(self, optimizer) -> None: self.model.set_optimizer(optimizer) def train(self): self.model.train() def eval(self) -> Tuple[torch.Tensor]: out = self.model.eval() return (out, ) def timed_infer(self): self.model.TimedInferenceRun() def timed_train(self): self.model.TimedTrainingRun()
# Copyright (c) 2017 NVIDIA Corporation # parameters to run benchmark on cpu # --path_to_train_data Netflix/N1W_TRAIN --path_to_eval_data Netflix/N1W_TEST --hidden_layers 512,512,1024 --non_linearity_type selu --save_path model_save/model.epoch_0 --drop_prob 0.8 --predictions_path preds.txt --nooutput --forcecpu # parameters to run benchmark on cuda # --path_to_train_data Netflix/N1W_TRAIN --path_to_eval_data Netflix/N1W_TEST --hidden_layers 512,512,1024 --non_linearity_type selu --save_path model_save/model.epoch_0 --drop_prob 0.8 --predictions_path preds.txt --nooutput --forcecuda import torch import argparse import copy import time import os from .reco_encoder.data import input_layer from .reco_encoder.model import model from torch.autograd import Variable from pathlib import Path import torch.autograd.profiler as profiler def getCommandLineArgs() : parser = argparse.ArgumentParser(description='RecoEncoder') parser.add_argument('--drop_prob', type=float, default=0.0, metavar='N', help='dropout drop probability') parser.add_argument('--constrained', action='store_true', help='constrained autoencoder') parser.add_argument('--skip_last_layer_nl', action='store_true', help='if present, decoder\'s last layer will not apply non-linearity function') parser.add_argument('--hidden_layers', type=str, default="1024,512,512,128", metavar='N', help='hidden layer sizes, comma-separated') parser.add_argument('--path_to_train_data', type=str, default="", metavar='N', help='Path to training data') parser.add_argument('--path_to_eval_data', type=str, default="", metavar='N', help='Path to evaluation data') parser.add_argument('--non_linearity_type', type=str, default="selu", metavar='N', help='type of the non-linearity used in activations') parser.add_argument('--save_path', type=str, default="autorec.pt", metavar='N', help='where to save model') parser.add_argument('--predictions_path', type=str, default="out.txt", metavar='N', help='where to save predictions') parser.add_argument('--batch_size', type=int, default=1, metavar='N', help='inference batch size') parser.add_argument('--jit', action='store_true', help='jit-ify model before running') parser.add_argument('--forcecuda', action='store_true', help='force cuda use') parser.add_argument('--forcecpu', action='store_true', help='force cpu use') parser.add_argument('--nooutput', action='store_true', help='disable writing output to file') parser.add_argument('--silent', action='store_true', help='disable output messages') parser.add_argument('--profile', action='store_true', help='enable profiler and stat print') args = parser.parse_args() return args def getBenchmarkArgs(forceCuda): class Args: pass args = Args() args.drop_prob = 0.8 args.constrained = False args.skip_last_layer_nl = False args.hidden_layers = '512,512,1024' args.path_to_train_data = os.path.dirname(__file__) + '/Netflix/N1W_TRAIN' args.path_to_eval_data = os.path.dirname(__file__) + '/Netflix/N1W_TEST' args.non_linearity_type = 'selu' args.save_path = 'model_save/model.epoch_0' args.predictions_path = 'preds.txt' args.batch_size = 1 args.jit = False args.forcecuda = forceCuda args.forcecpu = not forceCuda args.nooutput = True args.silent = True args.profile = False return args def processArgState(args) : if not args.silent: print(args) if args.forcecpu and args.forcecuda: print("Error, force cpu and cuda cannot both be set") quit() args.use_cuda = torch.cuda.is_available() # global flag if not args.silent: if args.use_cuda: print('GPU is available.') else: print('GPU is not available.') if args.use_cuda and args.forcecpu: args.use_cuda = False if not args.silent: if args.use_cuda: print('Running On GPU') else: print('Running On CUDA') if args.profile: print('Profiler Enabled') return args class DeepRecommenderInferenceBenchmark: def __init__(self, device = 'cpu', jit=False, batch_size=256, usecommandlineargs = False) : self.toytest = True self.batch_size = batch_size # number of movies in netflix training set. self.node_count = 197951 if self.toytest: self.toyinputs = torch.randn(self.batch_size,self.node_count).to(device) if usecommandlineargs: self.args = getCommandLineArgs() else: if device == "cpu": forcecuda = False elif device == "cuda": forcecuda = True else: # unknown device string, quit init return self.args = getBenchmarkArgs(forcecuda) args = processArgState(self.args) self.params = dict() self.params['batch_size'] = self.args.batch_size self.params['data_dir'] = self.args.path_to_train_data self.params['major'] = 'users' self.params['itemIdInd'] = 1 self.params['userIdInd'] = 0 if not self.args.silent: print("Loading training data") if self.toytest == False: self.data_layer = input_layer.UserItemRecDataProvider(params=self.params) if not self.args.silent: print("Data loaded") print("Total items found: {}".format(len(self.data_layer.data.keys()))) print("Vector dim: {}".format(self.data_layer.vector_dim)) print("Loading eval data") self.eval_params = copy.deepcopy(self.params) # must set eval batch size to 1 to make sure no examples are missed self.eval_params['batch_size'] = 1 self.eval_params['data_dir'] = self.args.path_to_eval_data if self.toytest: self.rencoder = model.AutoEncoder(layer_sizes=[self.node_count] + [int(l) for l in self.args.hidden_layers.split(',')], nl_type=self.args.non_linearity_type, is_constrained=self.args.constrained, dp_drop_prob=self.args.drop_prob, last_layer_activations=not self.args.skip_last_layer_nl) else: self.eval_data_layer = input_layer.UserItemRecDataProvider(params=self.eval_params, user_id_map=self.data_layer.userIdMap, item_id_map=self.data_layer.itemIdMap) self.rencoder = model.AutoEncoder(layer_sizes=[self.data_layer.vector_dim] + [int(l) for l in self.args.hidden_layers.split(',')], nl_type=self.args.non_linearity_type, is_constrained=self.args.constrained, dp_drop_prob=self.args.drop_prob, last_layer_activations=not self.args.skip_last_layer_nl) self.path_to_model = Path(self.args.save_path) if self.path_to_model.is_file(): print("Loading model from: {}".format(self.path_to_model)) self.rencoder.load_state_dict(torch.load(self.args.save_path)) if not self.args.silent: print('######################################################') print('######################################################') print('############# AutoEncoder Model: #####################') print(self.rencoder) print('######################################################') print('######################################################') self.rencoder.eval() if self.args.use_cuda: self.rencoder = self.rencoder.cuda() if self.toytest == False: self.inv_userIdMap = {v: k for k, v in self.data_layer.userIdMap.items()} self.inv_itemIdMap = {v: k for k, v in self.data_layer.itemIdMap.items()} self.eval_data_layer.src_data = self.data_layer.data def eval(self, niter=1): for iteration in range(niter): if self.toytest: out = self.rencoder(self.toyinputs) continue for i, ((out, src), majorInd) in enumerate(self.eval_data_layer.iterate_one_epoch_eval(for_inf=True)): inputs = Variable(src.cuda().to_dense() if self.args.use_cuda else src.to_dense()) targets_np = out.to_dense().numpy()[0, :] out = self.rencoder(inputs) if not self.args.nooutput: self.outputs = out.cpu().data.numpy()[0, :] non_zeros = targets_np.nonzero()[0].tolist() major_key = self.inv_userIdMap [majorInd] with open(self.args.predictions_path, 'w') as outf: for ind in non_zeros: outf.write("{}\t{}\t{}\t{}\n".format(major_key, self.inv_itemIdMap[ind], self.outputs[ind], targets_np[ind])) if i % 10000 == 0: print("Done: {}".format(i)) return out def TimedInferenceRun(self) : print('Timed Inference Start') e_start_time = time.time() if self.args.profile: with profiler.profile(record_shapes=True, use_cuda=True) as prof: with profiler.record_function("Inference"): self.eval() else: self.eval() e_end_time = time.time() print('Timed Inference Complete') print('Inference finished in {} seconds' .format(e_end_time - e_start_time)) if self.args.profile: print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10)) prof.export_chrome_trace("trace.json") def main(): benchmarkCuda = DeepRecommenderInferenceBenchmark(device='cuda') benchmarkCuda.TimedInferenceRun() benchmarkCPU = DeepRecommenderInferenceBenchmark(device='cpu') benchmarkCPU.TimedInferenceRun() if __name__ == '__main__': main()
# Copyright (c) 2017 NVIDIA Corporation # to run against cuda: # --gpu_ids 0 --path_to_train_data Netflix/N1W_TRAIN --path_to_eval_data Netflix/N1W_VALID --hidden_layers 512,512,1024 --non_linearity_type selu --batch_size 128 --logdir model_save --drop_prob 0.8 --optimizer momentum --lr 0.005 --weight_decay 0 --aug_step 1 --noise_prob 0 --num_epochs 1 --summary_frequency 1000 --forcecuda # to run on cpu: # --gpu_ids 0 --path_to_train_data Netflix/N1W_TRAIN --path_to_eval_data Netflix/N1W_VALID --hidden_layers 512,512,1024 --non_linearity_type selu --batch_size 128 --logdir model_save --drop_prob 0.8 --optimizer momentum --lr 0.005 --weight_decay 0 --aug_step 1 --noise_prob 0 --num_epochs 1 --summary_frequency 1000 --forcecpu import torch import argparse from .reco_encoder.data import input_layer from .reco_encoder.model import model import torch.optim as optim from torch.optim.lr_scheduler import MultiStepLR import torch.nn as nn from torch.autograd import Variable import copy import time from pathlib import Path #from .logger import Logger from math import sqrt import numpy as np import os import torch.autograd.profiler as profiler def getTrainBenchmarkArgs() : class Args: pass args = Args() args.lr = 0.005 args.weight_decay = 0 args.drop_prob = 0.8 args.noise_prob = 0 args.batch_size = 128 args.summary_frequency = 1000 args.aug_step = 1 args.constrained = False args.skip_last_layer_nl = False args.num_epochs = 1 args.save_every = 3 args.optimizer = 'momentum' args.hidden_layers = '512,512,1024' args.gpu_ids = '0' args.path_to_train_data = os.path.dirname(__file__) + '/Netflix/N1W_TRAIN' args.path_to_eval_data = os.path.dirname(__file__) + '/Netflix/N1W_VALID' args.non_linearity_type = 'selu' args.logdir = 'model_save' args.nooutput = True args.silent = True args.forcecuda = False args.forcecpu = False args.profile = False return args def getTrainCommandLineArgs() : parser = argparse.ArgumentParser(description='RecoEncoder') parser.add_argument('--lr', type=float, default=0.00001, metavar='N', help='learning rate') parser.add_argument('--weight_decay', type=float, default=0.0, metavar='N', help='L2 weight decay') parser.add_argument('--drop_prob', type=float, default=0.0, metavar='N', help='dropout drop probability') parser.add_argument('--noise_prob', type=float, default=0.0, metavar='N', help='noise probability') parser.add_argument('--batch_size', type=int, default=64, metavar='N', help='global batch size') parser.add_argument('--summary_frequency', type=int, default=100, metavar='N', help='how often to save summaries') parser.add_argument('--aug_step', type=int, default=-1, metavar='N', help='do data augmentation every X step') parser.add_argument('--constrained', action='store_true', help='constrained autoencoder') parser.add_argument('--skip_last_layer_nl', action='store_true', help='if present, decoder\'s last layer will not apply non-linearity function') parser.add_argument('--num_epochs', type=int, default=50, metavar='N', help='maximum number of epochs') parser.add_argument('--save_every', type=int, default=3, metavar='N', help='save every N number of epochs') parser.add_argument('--optimizer', type=str, default="momentum", metavar='N', help='optimizer kind: adam, momentum, adagrad or rmsprop') parser.add_argument('--hidden_layers', type=str, default="1024,512,512,128", metavar='N', help='hidden layer sizes, comma-separated') parser.add_argument('--gpu_ids', type=str, default="0", metavar='N', help='comma-separated gpu ids to use for data parallel training') parser.add_argument('--path_to_train_data', type=str, default="", metavar='N', help='Path to training data') parser.add_argument('--path_to_eval_data', type=str, default="", metavar='N', help='Path to evaluation data') parser.add_argument('--non_linearity_type', type=str, default="selu", metavar='N', help='type of the non-linearity used in activations') parser.add_argument('--logdir', type=str, default="logs", metavar='N', help='where to save model and write logs') parser.add_argument('--nooutput', action='store_true', help='disable writing output to file') parser.add_argument('--silent', action='store_true', help='disable all messages') parser.add_argument('--forcecuda', action='store_true', help='force cuda use') parser.add_argument('--forcecpu', action='store_true', help='force cpu use') parser.add_argument('--profile', action='store_true', help='enable profiler and stat print') args = parser.parse_args() return args def processTrainArgState(args) : if not args.silent: print(args) if args.forcecpu and args.forcecuda: print("Error, force cpu and cuda cannot both be set") quit() args.use_cuda = torch.cuda.is_available() # global flag if not args.silent: if args.use_cuda: print('GPU is available.') else: print('GPU is not available.') if args.use_cuda and args.forcecpu: args.use_cuda = False if not args.silent: if args.use_cuda: print('Running On CUDA') else: print('Running On CPU') return args def log_var_and_grad_summaries(logger, layers, global_step, prefix, log_histograms=False): """ Logs variable and grad stats for layer. Transfers data from GPU to CPU automatically :param logger: TB logger :param layers: param list :param global_step: global step for TB :param prefix: name prefix :param log_histograms: (default: False) whether or not log histograms :return: """ for ind, w in enumerate(layers): # Variables w_var = w.data.cpu().numpy() logger.scalar_summary("Variables/FrobNorm/{}_{}".format(prefix, ind), np.linalg.norm(w_var), global_step) if log_histograms: logger.histo_summary(tag="Variables/{}_{}".format(prefix, ind), values=w.data.cpu().numpy(), step=global_step) # Gradients w_grad = w.grad.data.cpu().numpy() logger.scalar_summary("Gradients/FrobNorm/{}_{}".format(prefix, ind), np.linalg.norm(w_grad), global_step) if log_histograms: logger.histo_summary(tag="Gradients/{}_{}".format(prefix, ind), values=w.grad.data.cpu().numpy(), step=global_step) def DoTrainEval(encoder, evaluation_data_layer, use_cuda): encoder.eval() denom = 0.0 total_epoch_loss = 0.0 for i, (eval, src) in enumerate(evaluation_data_layer.iterate_one_epoch_eval()): inputs = Variable(src.cuda().to_dense() if use_cuda else src.to_dense()) targets = Variable(eval.cuda().to_dense() if use_cuda else eval.to_dense()) outputs = encoder(inputs) loss, num_ratings = model.MSEloss(outputs, targets) total_epoch_loss += loss.item() denom += num_ratings.item() return sqrt(total_epoch_loss / denom) class DeepRecommenderTrainBenchmark: def __init__(self, device="cpu", jit=False, batch_size=256, processCommandLine = False): self.TrainInit(device, jit, batch_size, processCommandLine) def TrainInit(self, device="cpu", jit=False, batch_size=256, processCommandLine = False): # Force test to run in toy mode. Single call of fake data to model. self.toytest = True self.toybatch = batch_size # number of movies in netflix training set. self.toyvocab = 197951 self.toyinputs = torch.randn(self.toybatch, self.toyvocab) if (processCommandLine) : self.args = getTrainCommandLineArgs() else: self.args = getTrainBenchmarkArgs() if device == "cpu": forcecuda = False elif device == "cuda": forcecuda = True else: # unknown device string, quit init return self.args.forcecuda = forcecuda self.args.forcecpu = not forcecuda self.args = processTrainArgState(self.args) if self.toytest == False: self.logger = Logger(self.args.logdir) self.params = dict() self.params['batch_size'] = self.args.batch_size self.params['data_dir'] = self.args.path_to_train_data self.params['major'] = 'users' self.params['itemIdInd'] = 1 self.params['userIdInd'] = 0 if self.toytest == False: if not self.args.silent: print("Loading training data") self.data_layer = input_layer.UserItemRecDataProvider(params=self.params) if not self.args.silent: print("Data loaded") print("Total items found: {}".format(len(self.data_layer.data.keys()))) print("Vector dim: {}".format(self.data_layer.vector_dim)) print("Loading eval data") self.eval_params = copy.deepcopy(self.params) # must set eval batch size to 1 to make sure no examples are missed if self.toytest: self.rencoder = model.AutoEncoder(layer_sizes=[self.toyvocab] + [int(l) for l in self.args.hidden_layers.split(',')], nl_type=self.args.non_linearity_type, is_constrained=self.args.constrained, dp_drop_prob=self.args.drop_prob, last_layer_activations=not self.args.skip_last_layer_nl) else: self.eval_params['data_dir'] = self.args.path_to_eval_data self.eval_data_layer = input_layer.UserItemRecDataProvider(params=self.eval_params, user_id_map=self.data_layer.userIdMap, # the mappings are provided item_id_map=self.data_layer.itemIdMap) self.eval_data_layer.src_data = self.data_layer.data self.rencoder = model.AutoEncoder(layer_sizes=[self.data_layer.vector_dim] + [int(l) for l in self.args.hidden_layers.split(',')], nl_type=self.args.non_linearity_type, is_constrained=self.args.constrained, dp_drop_prob=self.args.drop_prob, last_layer_activations=not self.args.skip_last_layer_nl) os.makedirs(self.args.logdir, exist_ok=True) self.model_checkpoint = self.args.logdir + "/model" self.path_to_model = Path(self.model_checkpoint) if self.path_to_model.is_file(): print("Loading model from: {}".format(self.model_checkpoint)) self.rencoder.load_state_dict(torch.load(self.model_checkpoint)) if not self.args.silent: print('######################################################') print('######################################################') print('############# AutoEncoder Model: #####################') print(self.rencoder) print('######################################################') print('######################################################') if self.args.use_cuda: gpu_ids = [int(g) for g in self.args.gpu_ids.split(',')] if not self.args.silent: print('Using GPUs: {}'.format(gpu_ids)) if len(gpu_ids)>1: self.rencoder = nn.DataParallel(self.rencoder, device_ids=gpu_ids) self.rencoder = self.rencoder.cuda() self.toyinputs = self.toyinputs.to(device) if self.args.optimizer == "adam": self.optimizer = optim.Adam(self.rencoder.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay) elif self.args.optimizer == "adagrad": self.optimizer = optim.Adagrad(self.rencoder.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay) elif self.args.optimizer == "momentum": self.optimizer = optim.SGD(self.rencoder.parameters(), lr=self.args.lr, momentum=0.9, weight_decay=self.args.weight_decay) self.scheduler = MultiStepLR(self.optimizer, milestones=[24, 36, 48, 66, 72], gamma=0.5) elif args.optimizer == "rmsprop": self.optimizer = optim.RMSprop(self.rencoder.parameters(), lr=self.args.lr, momentum=0.9, weight_decay=self.args.weight_decay) else: raise ValueError('Unknown optimizer kind') self.t_loss = 0.0 self.t_loss_denom = 0.0 self.denom = 0.0 self.total_epoch_loss = 0.0 self.global_step = 0 if self.args.noise_prob > 0.0: self.dp = nn.Dropout(p=self.args.noise_prob) def get_optimizer(self): return self.optimizer def set_optimizer(self, optimizer): self.optimizer = optimizer def DoTrain(self): self.rencoder.train() #if self.args.optimizer == "momentum": # self.scheduler.step() for i, mb in enumerate(self.data_layer.iterate_one_epoch()): inputs = Variable(mb.cuda().to_dense() if self.args.use_cuda else mb.to_dense()) self.optimizer.zero_grad() outputs = self.rencoder(inputs) loss, num_ratings = model.MSEloss(outputs, inputs) loss = loss / num_ratings loss.backward() self.optimizer.step() self.global_step += 1 self.t_loss += loss.item() self.t_loss_denom += 1 if not self.args.nooutput: if i % self.args.summary_frequency == 0: print('[%d, %5d] RMSE: %.7f' % (self.epoch, i, sqrt(self.t_loss / self.t_loss_denom))) self.logger.scalar_summary("Training_RMSE", sqrt(self.t_loss/self.t_loss_denom), self.global_step) self.t_loss = 0 self.t_loss_denom = 0.0 log_var_and_grad_summaries(self.logger, self.rencoder.encode_w, self.global_step, "Encode_W") log_var_and_grad_summaries(self.logger, self.rencoder.encode_b, self.global_step, "Encode_b") if not self.rencoder.is_constrained: log_var_and_grad_summaries(self.logger, self.rencoder.decode_w, self.global_step, "Decode_W") log_var_and_grad_summaries(self.logger, self.rencoder.decode_b, self.global_step, "Decode_b") self.total_epoch_loss += loss.item() self.denom += 1 #if args.aug_step > 0 and i % args.aug_step == 0 and i > 0: if self.args.aug_step > 0: # Magic data augmentation trick happen here for t in range(self.args.aug_step): inputs = Variable(outputs.data) if self.args.noise_prob > 0.0: inputs = dp(inputs) self.optimizer.zero_grad() outputs = self.rencoder(inputs) loss, num_ratings = model.MSEloss(outputs, inputs) loss = loss / num_ratings loss.backward() self.optimizer.step() def train(self, niter=1) : for self.epoch in range(niter): if self.toytest: self.rencoder.train() self.optimizer.zero_grad() outputs = self.rencoder(self.toyinputs) loss, num_ratings = model.MSEloss(outputs, self.toyinputs) loss = loss / num_ratings loss.backward() self.optimizer.step() continue if not self.args.silent: print('Doing epoch {} of {}'.format(self.epoch, niter)) print('Timing Start') e_start_time = time.time() self.DoTrain() if not self.args.silent: e_end_time = time.time() print('Timing End') if self.args.profile: print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10)) prof.export_chrome_trace("trace.json") print('Total epoch {} finished in {} seconds with TRAINING RMSE loss: {}' .format(self.epoch, e_end_time - e_start_time, sqrt(self.total_epoch_loss/self.denom))) if not self.args.silent: self.logger.scalar_summary("Training_RMSE_per_epoch", sqrt(self.total_epoch_loss/self.denom), self.epoch) self.logger.scalar_summary("Epoch_time", e_end_time - e_start_time, self.epoch) if self.epoch % self.args.save_every == 0 or self.epoch == self.args.num_epochs - 1: eval_loss = DoTrainEval(self.rencoder, self.eval_data_layer, self.args.use_cuda) print('Epoch {} EVALUATION LOSS: {}'.format(self.epoch, eval_loss)) self.logger.scalar_summary("EVALUATION_RMSE", eval_loss, self.epoch) print("Saving model to {}".format(self.model_checkpoint + ".epoch_"+str(self.epoch))) torch.save(self.rencoder.state_dict(), self.model_checkpoint + ".epoch_"+str(self.epoch)) if not self.args.nooutput: print("Saving model to {}".format(self.model_checkpoint + ".last")) torch.save(self.rencoder.state_dict(), self.model_checkpoint + ".last") # save to onnx dummy_input = Variable(torch.randn(self.params['batch_size'], self.data_layer.vector_dim).type(torch.float)) torch.onnx.export(self.rencoder.float(), dummy_input.cuda() if self.args.use_cuda else dummy_input, self.model_checkpoint + ".onnx", verbose=True) print("ONNX model saved to {}!".format(self.model_checkpoint + ".onnx")) def TimedTrainingRun(self): if self.args.profile: with profiler.profile(record_shapes=True, use_cuda=self.args.use_cuda) as prof: with profiler.record_function("training_epoch"): self.train(self.args.num_epochs) else: self.train(self.args.num_epochs) def main() : gpuTrain = DeepRecommenderTrainBenchmark(device = 'cuda') gpuTrain.TimedTrainingRun() gpuTrain = DeepRecommenderBenchmark(device = 'cpu') gpuTrain.TimedTrainingRun() if __name__ == '__main__': main()
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements()
# Copyright (c) 2017 NVIDIA Corporation from os import listdir, path, makedirs import random import sys import time import datetime def print_stats(data): total_ratings = 0 print("STATS") for user in data: total_ratings += len(data[user]) print("Total Ratings: {}".format(total_ratings)) print("Total User count: {}".format(len(data.keys()))) def save_data_to_file(data, filename): with open(filename, 'w') as out: for userId in data: for record in data[userId]: out.write("{}\t{}\t{}\n".format(userId, record[0], record[1])) def create_NETFLIX_data_timesplit(all_data, train_min, train_max, test_min, test_max): """ Creates time-based split of NETFLIX data into train, and (validation, test) :param all_data: :param train_min: :param train_max: :param test_min: :param test_max: :return: """ train_min_ts = time.mktime(datetime.datetime.strptime(train_min,"%Y-%m-%d").timetuple()) train_max_ts = time.mktime(datetime.datetime.strptime(train_max, "%Y-%m-%d").timetuple()) test_min_ts = time.mktime(datetime.datetime.strptime(test_min, "%Y-%m-%d").timetuple()) test_max_ts = time.mktime(datetime.datetime.strptime(test_max, "%Y-%m-%d").timetuple()) training_data = dict() validation_data = dict() test_data = dict() train_set_items = set() for userId, userRatings in all_data.items(): time_sorted_ratings = sorted(userRatings, key=lambda x: x[2]) # sort by timestamp for rating_item in time_sorted_ratings: if rating_item[2] >= train_min_ts and rating_item[2] <= train_max_ts: if not userId in training_data: training_data[userId] = [] training_data[userId].append(rating_item) train_set_items.add(rating_item[0]) # keep track of items from training set elif rating_item[2] >= test_min_ts and rating_item[2] <= test_max_ts: if not userId in training_data: # only include users seen in the training set continue p = random.random() if p <=0.5: if not userId in validation_data: validation_data[userId] = [] validation_data[userId].append(rating_item) else: if not userId in test_data: test_data[userId] = [] test_data[userId].append(rating_item) # remove items not not seen in training set for userId, userRatings in test_data.items(): test_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items] for userId, userRatings in validation_data.items(): validation_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items] return training_data, validation_data, test_data def main(args): user2id_map = dict() item2id_map = dict() userId = 0 itemId = 0 all_data = dict() folder = args[1] out_folder = args[2] # create necessary folders: for output_dir in [(out_folder + f) for f in [ "/N1W_TRAIN", "/N1W_VALID", "/N1W_TEST", "/N3M_TRAIN", "/N3M_VALID", "/N3M_TEST", "/N6M_TRAIN", "/N6M_VALID", "/N6M_TEST", "/N1Y_TRAIN", "/N1Y_VALID", "/N1Y_TEST", "/NF_TRAIN", "/NF_VALID", "/NF_TEST"]]: makedirs(output_dir, exist_ok=True) text_files = [path.join(folder, f) for f in listdir(folder) if path.isfile(path.join(folder, f)) and ('.txt' in f)] for text_file in text_files: with open(text_file, 'r') as f: print("Processing: {}".format(text_file)) lines = f.readlines() item = int(lines[0][:-2]) # remove newline and : if not item in item2id_map: item2id_map[item] = itemId itemId += 1 for rating in lines[1:]: parts = rating.strip().split(",") user = int(parts[0]) if not user in user2id_map: user2id_map[user] = userId userId += 1 rating = float(parts[1]) ts = int(time.mktime(datetime.datetime.strptime(parts[2],"%Y-%m-%d").timetuple())) if user2id_map[user] not in all_data: all_data[user2id_map[user]] = [] all_data[user2id_map[user]].append((item2id_map[item], rating, ts)) print("STATS FOR ALL INPUT DATA") print_stats(all_data) # Netflix 1 week, for benchmark (n1w_train, n1w_valid, n1w_test) = create_NETFLIX_data_timesplit(all_data, "2005-09-01", "2005-09-07", "2005-09-10", "2005-09-11") print("Netflix 1w train") print_stats(n1w_train) save_data_to_file(n1w_train, out_folder+"/N1W_TRAIN/n1w.train.txt") print("Netflix 1w valid") print_stats(n1w_valid) save_data_to_file(n1w_valid, out_folder + "/N1W_VALID/n1w.valid.txt") print("Netflix 1w test") print_stats(n1w_test) save_data_to_file(n1w_test, out_folder + "/N1W_TEST/n1w.test.txt") print("finished 1 week!") quit() # Netflix full (nf_train, nf_valid, nf_test) = create_NETFLIX_data_timesplit(all_data, "1999-12-01", "2005-11-30", "2005-12-01", "2005-12-31") print("Netflix full train") print_stats(nf_train) save_data_to_file(nf_train, out_folder + "/NF_TRAIN/nf.train.txt") print("Netflix full valid") print_stats(nf_valid) save_data_to_file(nf_valid, out_folder + "/NF_VALID/nf.valid.txt") print("Netflix full test") print_stats(nf_test) save_data_to_file(nf_test, out_folder + "/NF_TEST/nf.test.txt") (n3m_train, n3m_valid, n3m_test) = create_NETFLIX_data_timesplit(all_data, "2005-09-01", "2005-11-30", "2005-12-01", "2005-12-31") print("Netflix 3m train") print_stats(n3m_train) save_data_to_file(n3m_train, out_folder+"/N3M_TRAIN/n3m.train.txt") print("Netflix 3m valid") print_stats(n3m_valid) save_data_to_file(n3m_valid, out_folder + "/N3M_VALID/n3m.valid.txt") print("Netflix 3m test") print_stats(n3m_test) save_data_to_file(n3m_test, out_folder + "/N3M_TEST/n3m.test.txt") (n6m_train, n6m_valid, n6m_test) = create_NETFLIX_data_timesplit(all_data, "2005-06-01", "2005-11-30", "2005-12-01", "2005-12-31") print("Netflix 6m train") print_stats(n6m_train) save_data_to_file(n6m_train, out_folder+"/N6M_TRAIN/n6m.train.txt") print("Netflix 6m valid") print_stats(n6m_valid) save_data_to_file(n6m_valid, out_folder + "/N6M_VALID/n6m.valid.txt") print("Netflix 6m test") print_stats(n6m_test) save_data_to_file(n6m_test, out_folder + "/N6M_TEST/n6m.test.txt") # Netflix 1 year (n1y_train, n1y_valid, n1y_test) = create_NETFLIX_data_timesplit(all_data, "2004-06-01", "2005-05-31", "2005-06-01", "2005-06-30") print("Netflix 1y train") print_stats(n1y_train) save_data_to_file(n1y_train, out_folder + "/N1Y_TRAIN/n1y.train.txt") print("Netflix 1y valid") print_stats(n1y_valid) save_data_to_file(n1y_valid, out_folder + "/N1Y_VALID/n1y.valid.txt") print("Netflix 1y test") print_stats(n1y_test) save_data_to_file(n1y_test, out_folder + "/N1Y_TEST/n1y.test.txt") if __name__ == "__main__": main(sys.argv)
# Copyright (c) 2017 NVIDIA Corporation import sys import datetime import random from math import floor def print_stats(data): total_ratings = 0 print("STATS") for user in data: total_ratings += len(data[user]) print("Total Ratings: {}".format(total_ratings)) print("Total User count: {}".format(len(data.keys()))) def save_data_to_file(data, filename): with open(filename, 'w') as out: for userId in data: for record in data[userId]: out.write("{}\t{}\t{}\n".format(userId, record[0], record[1])) def main(args): inpt = args[1] out_prefix = args[2] percent = 0.7 user2id_map = dict() item2id_map = dict() userId = 0 itemId = 0 data = dict() min_ts = 100000000000 max_ts = 0 total_rating_count = 0 with open(inpt, 'r') as inpt_f: #ratings.csv headers: userId,movieId,rating,timestamp for line in inpt_f: if 'userId' in line: continue parts = line.split(',') user = int(parts[0]) item = int(parts[1]) rating = float(parts[2]) ts = int(parts[3]) if min_ts > ts: min_ts = ts if max_ts < ts: max_ts = ts if not user in user2id_map: user2id_map[user] = userId userId += 1 if not item in item2id_map: item2id_map[item] = itemId itemId += 1 total_rating_count += 1 if user2id_map[user] not in data: data[user2id_map[user]] = [] data[user2id_map[user]].append((item2id_map[item], rating, ts)) print("STATS") print("Total Ratings: {}".format(total_rating_count)) print("Total User count: {}".format(len(user2id_map))) print("Total Item count: {}".format(len(item2id_map))) print("Minimum ts: {}, which is {}".format(min_ts, datetime.datetime.fromtimestamp(min_ts).strftime('%Y-%m-%d'))) print("Maximum ts: {}, which is {}".format(max_ts, datetime.datetime.fromtimestamp(max_ts).strftime('%Y-%m-%d'))) training_data = dict() validation_data = dict() test_data = dict() train_set_items = set() for userId in data.keys(): if len(data[userId]) < 2: #print("WARNING, userId {} has less than 2 ratings, skipping user...".format(userId)) continue time_sorted_ratings = sorted(data[userId], key=lambda x: x[2]) # sort by timestamp last_train_ind = floor(percent * len(time_sorted_ratings)) training_data[userId] = time_sorted_ratings[:last_train_ind] for rating_item in time_sorted_ratings[:last_train_ind]: train_set_items.add(rating_item[0]) # keep track of items from training set p = random.random() if p <= 0.5: validation_data[userId] = time_sorted_ratings[last_train_ind:] else: test_data[userId] = time_sorted_ratings[last_train_ind:] # remove items not not seen in training set for userId, userRatings in test_data.items(): test_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items] for userId, userRatings in validation_data.items(): validation_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items] print("Training Data") print_stats(training_data) save_data_to_file(training_data, out_prefix+".train") print("Validation Data") print_stats(validation_data) save_data_to_file(validation_data, out_prefix + ".valid") print("Test Data") print_stats(test_data) save_data_to_file(test_data, out_prefix + ".test") if __name__ == "__main__": main(sys.argv)
# Copyright (c) 2017 NVIDIA Corporation
# Copyright (c) 2017 NVIDIA Corporation
# Copyright (c) 2017 NVIDIA Corporation import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as weight_init from torch.autograd import Variable def activation(input, kind): #print("Activation: {}".format(kind)) if kind == 'selu': return F.selu(input) elif kind == 'relu': return F.relu(input) elif kind == 'relu6': return F.relu6(input) elif kind == 'sigmoid': return F.sigmoid(input) elif kind == 'tanh': return F.tanh(input) elif kind == 'elu': return F.elu(input) elif kind == 'lrelu': return F.leaky_relu(input) elif kind == 'swish': return input*F.sigmoid(input) elif kind == 'none': return input else: raise ValueError('Unknown non-linearity type') def MSEloss(inputs, targets, size_average=False): mask = targets != 0 num_ratings = torch.sum(mask.float()) criterion = nn.MSELoss(reduction='sum' if not size_average else 'mean') return criterion(inputs * mask.float(), targets), Variable(torch.Tensor([1.0])) if size_average else num_ratings class AutoEncoder(nn.Module): def __init__(self, layer_sizes, nl_type='selu', is_constrained=True, dp_drop_prob=0.0, last_layer_activations=True): """ Describes an AutoEncoder model :param layer_sizes: Encoder network description. Should start with feature size (e.g. dimensionality of x). For example: [10000, 1024, 512] will result in: - encoder 2 layers: 10000x1024 and 1024x512. Representation layer (z) will be 512 - decoder 2 layers: 512x1024 and 1024x10000. :param nl_type: (default 'selu') Type of no-linearity :param is_constrained: (default: True) Should constrain decoder weights :param dp_drop_prob: (default: 0.0) Dropout drop probability :param last_layer_activations: (default: True) Whether to apply activations on last decoder layer """ super(AutoEncoder, self).__init__() self._dp_drop_prob = dp_drop_prob self._last_layer_activations = last_layer_activations if dp_drop_prob > 0: self.drop = nn.Dropout(dp_drop_prob) self._last = len(layer_sizes) - 2 self._nl_type = nl_type self.encode_w = nn.ParameterList( [nn.Parameter(torch.rand(layer_sizes[i + 1], layer_sizes[i])) for i in range(len(layer_sizes) - 1)]) for ind, w in enumerate(self.encode_w): weight_init.xavier_uniform_(w) self.encode_b = nn.ParameterList( [nn.Parameter(torch.zeros(layer_sizes[i + 1])) for i in range(len(layer_sizes) - 1)]) reversed_enc_layers = list(reversed(layer_sizes)) self.is_constrained = is_constrained if not is_constrained: self.decode_w = nn.ParameterList( [nn.Parameter(torch.rand(reversed_enc_layers[i + 1], reversed_enc_layers[i])) for i in range(len(reversed_enc_layers) - 1)]) for ind, w in enumerate(self.decode_w): nn.init.xavier_uniform_(w) self.decode_b = nn.ParameterList( [nn.Parameter(torch.zeros(reversed_enc_layers[i + 1])) for i in range(len(reversed_enc_layers) - 1)]) if False: print("******************************") print("******************************") print(layer_sizes) print("Dropout drop probability: {}".format(self._dp_drop_prob)) print("Encoder pass:") for ind, w in enumerate(self.encode_w): print(w.data.size()) print(self.encode_b[ind].size()) print("Decoder pass:") if self.is_constrained: print('Decoder is constrained') for ind, w in enumerate(list(reversed(self.encode_w))): print(w.transpose(0, 1).size()) print(self.decode_b[ind].size()) else: for ind, w in enumerate(self.decode_w): print(w.data.size()) print(self.decode_b[ind].size()) print("******************************") print("******************************") def encode(self, x): for ind, w in enumerate(self.encode_w): x = activation(input=F.linear(input=x, weight=w, bias=self.encode_b[ind]), kind=self._nl_type) if self._dp_drop_prob > 0: # apply dropout only on code layer x = self.drop(x) return x def decode(self, z): if False: #self.is_constrained: for ind, w in enumerate(list(reversed(self.encode_w))): # constrained autoencode re-uses weights from encoder z = activation(input=F.linear(input=z, weight=w.transpose(0, 1), bias=self.decode_b[ind]), # last layer or decoder should not apply non linearities kind=self._nl_type if ind!=self._last or self._last_layer_activations else 'none') #if self._dp_drop_prob > 0 and ind!=self._last: # and no dp on last layer # z = self.drop(z) else: for ind, w in enumerate(self.decode_w): z = activation(input=F.linear(input=z, weight=w, bias=self.decode_b[ind]), # last layer or decoder should not apply non linearities kind=self._nl_type if ind!=self._last or self._last_layer_activations else 'none') #if self._dp_drop_prob > 0 and ind!=self._last: # and no dp on last layer # z = self.drop(z) return z def forward(self, x): return self.decode(self.encode(x))
# Copyright (c) 2017 NVIDIA Corporation
# Copyright (c) 2017 NVIDIA Corporation """Data Layer Classes""" from os import listdir, path from random import shuffle import torch class UserItemRecDataProvider: def __init__(self, params, user_id_map=None, item_id_map=None): self._params = params self._data_dir = self.params['data_dir'] self._extension = ".txt" if 'extension' not in self.params else self.params['extension'] self._i_id = 0 if 'itemIdInd' not in self.params else self.params['itemIdInd'] self._u_id = 1 if 'userIdInd' not in self.params else self.params['userIdInd'] self._r_id = 2 if 'ratingInd' not in self.params else self.params['ratingInd'] self._major = 'items' if 'major' not in self.params else self.params['major'] if not (self._major == 'items' or self._major == 'users'): raise ValueError("Major must be 'users' or 'items', but got {}".format(self._major)) self._major_ind = self._i_id if self._major == 'items' else self._u_id self._minor_ind = self._u_id if self._major == 'items' else self._i_id self._delimiter = '\t' if 'delimiter' not in self.params else self.params['delimiter'] if user_id_map is None or item_id_map is None: self._build_maps() else: self._user_id_map = user_id_map self._item_id_map = item_id_map major_map = self._item_id_map if self._major == 'items' else self._user_id_map minor_map = self._user_id_map if self._major == 'items' else self._item_id_map self._vector_dim = len(minor_map) src_files = [path.join(self._data_dir, f) for f in listdir(self._data_dir) if path.isfile(path.join(self._data_dir, f)) and f.endswith(self._extension)] self._batch_size = self.params['batch_size'] self.data = dict() for source_file in src_files: with open(source_file, 'r') as src: for line in src.readlines(): parts = line.strip().split(self._delimiter) if len(parts)<3: raise ValueError('Encountered badly formatted line in {}'.format(source_file)) key = major_map[int(parts[self._major_ind])] value = minor_map[int(parts[self._minor_ind])] rating = float(parts[self._r_id]) #print("Key: {}, Value: {}, Rating: {}".format(key, value, rating)) if key not in self.data: self.data[key] = [] self.data[key].append((value, rating)) def _build_maps(self): self._user_id_map = dict() self._item_id_map = dict() src_files = [path.join(self._data_dir, f) for f in listdir(self._data_dir) if path.isfile(path.join(self._data_dir, f)) and f.endswith(self._extension)] u_id = 0 i_id = 0 for source_file in src_files: with open(source_file, 'r') as src: for line in src.readlines(): parts = line.strip().split(self._delimiter) if len(parts)<3: raise ValueError('Encountered badly formatted line in {}'.format(source_file)) u_id_orig = int(parts[self._u_id]) if u_id_orig not in self._user_id_map: self._user_id_map[u_id_orig] = u_id u_id += 1 i_id_orig = int(parts[self._i_id]) if i_id_orig not in self._item_id_map: self._item_id_map[i_id_orig] = i_id i_id += 1 def iterate_one_epoch(self): data = self.data keys = list(data.keys()) shuffle(keys) s_ind = 0 e_ind = self._batch_size while e_ind < len(keys): local_ind = 0 inds1 = [] inds2 = [] vals = [] for ind in range(s_ind, e_ind): inds2 += [v[0] for v in data[keys[ind]]] inds1 += [local_ind]*len([v[0] for v in data[keys[ind]]]) vals += [v[1] for v in data[keys[ind]]] local_ind += 1 i_torch = torch.LongTensor([inds1, inds2]) v_torch = torch.FloatTensor(vals) mini_batch = torch.sparse.FloatTensor(i_torch, v_torch, torch.Size([self._batch_size, self._vector_dim])) s_ind += self._batch_size e_ind += self._batch_size yield mini_batch def iterate_one_epoch_eval(self, for_inf=False): keys = list(self.data.keys()) s_ind = 0 while s_ind < len(keys): inds1 = [0] * len([v[0] for v in self.data[keys[s_ind]]]) inds2 = [v[0] for v in self.data[keys[s_ind]]] vals = [v[1] for v in self.data[keys[s_ind]]] src_inds1 = [0] * len([v[0] for v in self.src_data[keys[s_ind]]]) src_inds2 = [v[0] for v in self.src_data[keys[s_ind]]] src_vals = [v[1] for v in self.src_data[keys[s_ind]]] i_torch = torch.LongTensor([inds1, inds2]) v_torch = torch.FloatTensor(vals) src_i_torch = torch.LongTensor([src_inds1, src_inds2]) src_v_torch = torch.FloatTensor(src_vals) mini_batch = (torch.sparse.FloatTensor(i_torch, v_torch, torch.Size([1, self._vector_dim])), torch.sparse.FloatTensor(src_i_torch, src_v_torch, torch.Size([1, self._vector_dim]))) s_ind += 1 if not for_inf: yield mini_batch else: yield mini_batch, keys[s_ind - 1] @property def vector_dim(self): return self._vector_dim @property def userIdMap(self): return self._user_id_map @property def itemIdMap(self): return self._item_id_map @property def params(self): return self._params
import os from torchbenchmark.tasks import COMPUTER_VISION from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__))) MODEL_DIR = os.path.abspath(os.path.dirname(__file__)) class Model(Detectron2Model): task = COMPUTER_VISION.SEGMENTATION model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl") def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(variant="COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
import os from torchbenchmark.util.framework.detectron2 import install_detectron2 MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__))) MODEL_DIR = os.path.abspath(os.path.dirname(__file__)) if __name__ == '__main__': install_detectron2(MODEL_NAME, MODEL_DIR)
import torch import torch.optim as optim import torch.nn as nn import torch.utils.data as data import torchvision.models as models from opacus import PrivacyEngine from opacus.validators.module_validator import ModuleValidator from typing import Tuple from ...util.model import BenchmarkModel from torchbenchmark.tasks import OTHER class Model(BenchmarkModel): task = OTHER.OTHER_TASKS DEFAULT_TRAIN_BSIZE = 64 DEFAULT_EVAL_BSIZE = 64 def __init__(self, test, device, batch_size=None, extra_args=[]): # disable torchdynamo-fx2trt because it never terminates if "--torchdynamo" in extra_args and "fx2trt" in extra_args: raise NotImplementedError("TorchDynamo Fx2trt is not supported because of hanging issue. " "See: https://github.com/facebookresearch/torchdynamo/issues/109") super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.model = models.resnet18(num_classes=10) self.model = ModuleValidator.fix(self.model) self.model = self.model.to(device) # Cifar10 images are 32x32 and have 10 classes self.example_inputs = ( torch.randn((self.batch_size, 3, 32, 32), device=self.device), ) self.example_target = torch.randint(0, 10, (self.batch_size,), device=self.device) dataset = data.TensorDataset(self.example_inputs[0], self.example_target) self.dummy_loader = data.DataLoader(dataset, batch_size=self.batch_size) self.noise_multiplier: float=1.0 self.max_grad_norm: float=1.0 self.poisson_sampling: bool=False self.optimizer = optim.Adam(self.model.parameters(), lr=0.001) self.criterion = nn.CrossEntropyLoss() self.privacy_engine = PrivacyEngine() self.model, self.optimizer, _ = self.privacy_engine.make_private( module=self.model, optimizer=self.optimizer, data_loader=self.dummy_loader, noise_multiplier=self.noise_multiplier, max_grad_norm=self.max_grad_norm, poisson_sampling=self.poisson_sampling, ) def get_module(self): return self.model, self.example_inputs def get_optimizer(self): return self.optimizer def set_optimizer(self, optimizer) -> None: self.optimizer = optimizer self.model, self.optimizer, _ = self.privacy_engine.make_private( module=self.model, optimizer=self.optimizer, data_loader=self.dummy_loader, noise_multiplier=1.0, max_grad_norm=1.0, poisson_sampling=False, ) def train(self): model = self.model (images, ) = self.example_inputs model.train() targets = self.example_target output = model(images) loss = self.criterion(output, targets) loss.backward() self.optimizer.step() self.optimizer.zero_grad() def eval(self) -> Tuple[torch.Tensor]: model = self.model (images, ) = self.example_inputs model.eval() targets = self.example_target with torch.no_grad(): out = model(images) return (out, )
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements()
from ...util.model import BenchmarkModel from torchbenchmark.tasks import NLP import torch from .model import GPT, SequenceGeneratorNanoGPT, GPTConfig, GPTGenerationConfig class Model(BenchmarkModel): task = NLP.GENERATION DEFAULT_EVAL_BSIZE = 1 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) # Use the default configs self.gpt_config = GPTConfig() self.generator_config = GPTGenerationConfig(32, 0.8, 200) self.model = SequenceGeneratorNanoGPT(GPT(self.gpt_config), self.generator_config).eval().to(self.device) self.prompt_size = 64 self.example_inputs = ( torch.randint(1, self.gpt_config.vocab_size, (self.batch_size, self.prompt_size)).to(self.device), ) def get_module(self): return self.model, self.example_inputs def train(self): return NotImplementedError("Training not supported for this model") def eval(self): with torch.no_grad(): out = self.model(*self.example_inputs) return (out,)
""" Full definition of a GPT Language Model, all of it in this single file. References: 1) the official GPT-2 TensorFlow implementation released by OpenAI: https://github.com/openai/gpt-2/blob/master/src/model.py 2) huggingface/transformers PyTorch implementation: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py """ import math import inspect from typing import Optional from dataclasses import dataclass import torch import torch.nn as nn from torch.nn import functional as F # @torch.jit.script # good to enable when not using torch.compile, disable when using (our default) def new_gelu(x): """ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Reference: Gaussian Error Linear Units (GELU) paper: https://arxiv.org/abs/1606.08415 """ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0)))) class LayerNorm(nn.Module): """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """ def __init__(self, ndim, bias): super().__init__() self.weight = nn.Parameter(torch.ones(ndim)) self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None def forward(self, input): return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5) class CausalSelfAttention(nn.Module): def __init__(self, config): super().__init__() assert config.n_embd % config.n_head == 0 # key, query, value projections for all heads, but in a batch self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias) # output projection self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias) # regularization self.attn_dropout = nn.Dropout(config.dropout) self.resid_dropout = nn.Dropout(config.dropout) self.n_head = config.n_head self.n_embd = config.n_embd self.dropout = config.dropout # flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0 self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') if not self.flash: print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0") # causal mask to ensure that attention is only applied to the left in the input sequence self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)) .view(1, 1, config.block_size, config.block_size)) def forward(self, x): B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd) # calculate query, key, values for all heads in batch and move head forward to be the batch dim q, k, v = self.c_attn(x).split(self.n_embd, dim=2) k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) if self.flash: # efficient attention using Flash Attention CUDA kernels y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout if self.training else 0, is_causal=True) else: # manual implementation of attention att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf')) att = F.softmax(att, dim=-1) att = self.attn_dropout(att) y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side # output projection y = self.resid_dropout(self.c_proj(y)) return y class MLP(nn.Module): def __init__(self, config): super().__init__() self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias) self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias) self.dropout = nn.Dropout(config.dropout) def forward(self, x): x = self.c_fc(x) x = new_gelu(x) x = self.c_proj(x) x = self.dropout(x) return x class Block(nn.Module): def __init__(self, config): super().__init__() self.ln_1 = LayerNorm(config.n_embd, bias=config.bias) self.attn = CausalSelfAttention(config) self.ln_2 = LayerNorm(config.n_embd, bias=config.bias) self.mlp = MLP(config) def forward(self, x): x = x + self.attn(self.ln_1(x)) x = x + self.mlp(self.ln_2(x)) return x @dataclass class GPTConfig: block_size: int = 1024 vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency n_layer: int = 12 n_head: int = 12 n_embd: int = 768 dropout: float = 0.0 bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster @dataclass class GPTGenerationConfig: max_new_tokens: int = 512 # max number of new tokens to generate temperature: float = 1.0 # temperature for sampling. > 1.0: more exploring, < 1.0: more conservative. top_k: Optional[int] = None # top_k > 0: keep only top k tokens with highest probability (top-k filtering). class GPT(nn.Module): def __init__(self, config): super().__init__() assert config.vocab_size is not None assert config.block_size is not None self.config = config self.transformer = nn.ModuleDict(dict( wte = nn.Embedding(config.vocab_size, config.n_embd), wpe = nn.Embedding(config.block_size, config.n_embd), drop = nn.Dropout(config.dropout), h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]), ln_f = LayerNorm(config.n_embd, bias=config.bias), )) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) # with weight tying when using torch.compile() some warnings get generated: # "UserWarning: functional_call was passed multiple values for tied weights. # This behavior is deprecated and will be an error in future versions" # not 100% sure what this is, so far seems to be harmless. TODO investigate self.transformer.wte.weight = self.lm_head.weight # https://paperswithcode.com/method/weight-tying # init all weights self.apply(self._init_weights) # apply special scaled init to the residual projections, per GPT-2 paper for pn, p in self.named_parameters(): if pn.endswith('c_proj.weight'): torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer)) # report number of parameters print("number of parameters: %.2fM" % (self.get_num_params()/1e6,)) def get_num_params(self, non_embedding=True): """ Return the number of parameters in the model. For non-embedding count (default), the position embeddings get subtracted. The token embeddings would too, except due to the parameter sharing these params are actually used as weights in the final layer, so we include them. """ n_params = sum(p.numel() for p in self.parameters()) if non_embedding: n_params -= self.transformer.wpe.weight.numel() return n_params def _init_weights(self, module): if isinstance(module, nn.Linear): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) if module.bias is not None: torch.nn.init.zeros_(module.bias) elif isinstance(module, nn.Embedding): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) def forward(self, idx, targets=None): device = idx.device b, t = idx.size() assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t) # forward the GPT model itself tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) pos_emb = self.transformer.wpe(pos) # position embeddings of shape (1, t, n_embd) x = self.transformer.drop(tok_emb + pos_emb) for block in self.transformer.h: x = block(x) x = self.transformer.ln_f(x) if targets is not None: # if we are given some desired targets also calculate the loss logits = self.lm_head(x) loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1) else: # inference-time mini-optimization: only forward the lm_head on the very last position logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim loss = None return logits, loss def crop_block_size(self, block_size): # model surgery to decrease the block size if necessary # e.g. we may load the GPT2 pretrained model checkpoint (block size 1024) # but want to use a smaller block size for some smaller, simpler model assert block_size <= self.config.block_size self.config.block_size = block_size self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:block_size]) for block in self.transformer.h: if hasattr(block.attn, 'bias'): block.attn.bias = block.attn.bias[:,:,:block_size,:block_size] @classmethod def from_pretrained(cls, model_type, override_args=None): assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'} override_args = override_args or {} # default to empty dict # only dropout can be overridden see more notes below assert all(k == 'dropout' for k in override_args) from transformers import GPT2LMHeadModel print("loading weights from pretrained gpt: %s" % model_type) # n_layer, n_head and n_embd are determined from model_type config_args = { 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params }[model_type] print("forcing vocab_size=50257, block_size=1024, bias=True") config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints config_args['bias'] = True # always True for GPT model checkpoints # we can override the dropout rate, if desired if 'dropout' in override_args: print(f"overriding dropout rate to {override_args['dropout']}") config_args['dropout'] = override_args['dropout'] # create a from-scratch initialized minGPT model config = GPTConfig(**config_args) model = GPT(config) sd = model.state_dict() sd_keys = sd.keys() sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param # init a huggingface/transformers model model_hf = GPT2LMHeadModel.from_pretrained(model_type) sd_hf = model_hf.state_dict() # copy while ensuring all of the parameters are aligned and match in names and shapes sd_keys_hf = sd_hf.keys() sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer) transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight'] # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear # this means that we have to transpose these weights when we import them assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}" for k in sd_keys_hf: if any(k.endswith(w) for w in transposed): # special treatment for the Conv1D weights we need to transpose assert sd_hf[k].shape[::-1] == sd[k].shape with torch.no_grad(): sd[k].copy_(sd_hf[k].t()) else: # vanilla copy over the other parameters assert sd_hf[k].shape == sd[k].shape with torch.no_grad(): sd[k].copy_(sd_hf[k]) return model def configure_optimizers(self, weight_decay, learning_rate, betas, device_type): # start with all of the candidate parameters param_dict = {pn: p for pn, p in self.named_parameters()} # filter out those that do not require grad param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad} # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no. # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't. decay_params = [p for n, p in param_dict.items() if p.dim() >= 2] nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2] optim_groups = [ {'params': decay_params, 'weight_decay': weight_decay}, {'params': nodecay_params, 'weight_decay': 0.0} ] num_decay_params = sum(p.numel() for p in decay_params) num_nodecay_params = sum(p.numel() for p in nodecay_params) print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters") print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters") # Create AdamW optimizer and use the fused version if it is available fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters use_fused = fused_available and device_type == 'cuda' extra_args = dict(fused=True) if use_fused else dict() optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args) print(f"using fused AdamW: {use_fused}") return optimizer def estimate_mfu(self, fwdbwd_per_iter, dt): """ estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """ # first estimate the number of flops we do per iteration. # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311 N = self.get_num_params() cfg = self.config L, H, Q, T = cfg.n_layer, cfg.n_head, cfg.n_embd//cfg.n_head, cfg.block_size flops_per_token = 6*N + 12*L*H*Q*T flops_per_fwdbwd = flops_per_token * T flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter # express our flops throughput as ratio of A100 bfloat16 peak flops flops_achieved = flops_per_iter * (1.0/dt) # per second flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS mfu = flops_achieved / flops_promised return mfu @torch.no_grad() def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None): """ Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete the sequence max_new_tokens times, feeding the predictions back into the model each time. Most likely you'll want to make sure to be in model.eval() mode of operation for this. """ for _ in range(max_new_tokens): # if the sequence context is growing too long we must crop it at block_size idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:] # forward the model to get the logits for the index in the sequence logits, _ = self(idx_cond) # pluck the logits at the final step and scale by desired temperature logits = logits[:, -1, :] / temperature # optionally crop the logits to only the top k options if top_k is not None: v, _ = torch.topk(logits, min(top_k, logits.size(-1))) logits[logits < v[:, [-1]]] = -float('Inf') # apply softmax to convert logits to (normalized) probabilities probs = F.softmax(logits, dim=-1) # sample from the distribution idx_next = torch.multinomial(probs, num_samples=1) # append sampled index to the running sequence and continue idx = torch.cat((idx, idx_next), dim=1) return idx class SequenceGeneratorNanoGPT(nn.Module): def __init__(self, model, generate_config) -> None: super().__init__() self.base_model: GPT = model self.generate_config: GPTGenerationConfig = generate_config def forward(self, idx): return self.base_model.generate(idx, self.generate_config.max_new_tokens, self.generate_config.temperature, self.generate_config.top_k)
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel from torchbenchmark.tasks import COMPUTER_VISION import torch.optim as optim import torch import torchvision.models as models class Model(TorchVisionModel): task = COMPUTER_VISION.CLASSIFICATION # Original train batch size: 512, out of memory on V100 GPU # Use hierarchical batching to scale down: 512 = batch_size (32) * epoch_size (16) # Source: https://github.com/forresti/SqueezeNet DEFAULT_TRAIN_BSIZE = 32 DEFAULT_EVAL_BSIZE = 16 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(model_name="squeezenet1_1", test=test, device=device, batch_size=batch_size, weights=models.SqueezeNet1_1_Weights.IMAGENET1K_V1, extra_args=extra_args) self.epoch_size = 16 def train(self): optimizer = optim.Adam(self.model.parameters()) loss = torch.nn.CrossEntropyLoss() optimizer.zero_grad() for _ in range(self.epoch_size): pred = self.model(*self.example_inputs) y = torch.empty(pred.shape[0], dtype=torch.long, device=self.device).random_(pred.shape[1]) loss(pred, y).backward() optimizer.step()
import argparse import random from collections import deque import math import gym import numpy as np class ActionRepeatWrapper(gym.Wrapper): def __init__(self, env, repeat_multiplier=8): super().__init__(env) self.action_space = gym.spaces.Box( -1.0, 1.0, shape=(1 + self.env.action_space.shape[0],) ) self.repeat_multiplier = repeat_multiplier / 2.0 def step(self, action): repeat_action = max(math.floor((action[0] + 1.0) * self.repeat_multiplier), 1) main_action = action[1:] total_reward = 0 for _ in range(repeat_action): next_state, reward, done, _ = self.env.step(main_action) total_reward += reward return next_state, total_reward, done, {} class ChannelsFirstWrapper(gym.ObservationWrapper): """ Some pixel-based gym environments use a (Height, Width, Channel) image format. This wrapper rolls those axes to (Channel, Height, Width) to work with pytorch Conv2D layers. """ def __init__(self, env): super().__init__(env) self.observation_space.shape = ( env.observation_space.shape[-1], ) + env.observation_space.shape[:-1] def observation(self, frame): frame = np.transpose(frame, (2, 0, 1)) return np.ascontiguousarray(frame) class NormalizeObservationSpace(gym.ObservationWrapper): def __init__(self, env, obs_mean, obs_std): super().__init__(env) self.mean = obs_mean self.std = obs_std + 1e-5 def observation(self, x): return (x - self.mean) / self.std class NormalizeContinuousActionSpace(gym.ActionWrapper): def __init__(self, env): super().__init__(env) self._true_action_space = env.action_space self.action_space = gym.spaces.Box( low=-1.0, high=1.0, shape=self._true_action_space.shape, dtype=np.float32, ) def action(self, action): true_delta = self._true_action_space.high - self._true_action_space.low norm_delta = self.action_space.high - self.action_space.low action = (action - self.action_space.low) / norm_delta action = action * true_delta + self._true_action_space.low return action def robosuite_action_adjustment(robosuite_env, verbose=False): if verbose: action_space = robosuite_env.action_space high = action_space.high same_high = np.all(high == high[0]) low = action_space.low same_low = np.all(low == low[0]) shape = action_space.shape[0] print("RoboSuite Action Space Report:") if same_high and same_low: print(f"Uniformly Bounded Action Space in [{low[0]}, {high[0]}]^{shape}") else: print(f"Non-uniform Bounded Action Space with elements = {zip(low, high)}") print("\nAttempting to normalize action space using dc.envs.Normalize...\n") env = NormalizeContinuousActionSpace(robosuite_env) if verbose: action_space = env.action_space high = action_space.high same_high = np.all(high == high[0]) low = action_space.low same_low = np.all(low == low[0]) shape = action_space.shape[0] print("Normalized RoboSuite Action Space Report:") if same_high and same_low: print(f"Uniformly Bounded Action Space in [{low[0]}, {high[0]}]^{shape}") else: print(f"Non-uniform Bounded Action Space with elements = {zip(low, high)}") return env class FlattenObsWrapper(gym.ObservationWrapper): """ Simple wrapper that flattens an image observation into a state vector when CNNs are overkill. """ def __init__(self, env): super().__init__(env) self.observation_space.shape = (np.prod(env.observation_space.shape),) def observation(self, obs): return obs.flatten() class ConcatObsWrapper(gym.ObservationWrapper): def __init__(self, env): super().__init__(env) obs_space_shape = sum(x.shape[0] for x in self.observation_space) self.observation_space.shape = (obs_space_shape,) def observation(self, obs): return np.concatenate(obs, axis=0) def highway_env(env_id): """ Convenience function to turn all the highway_env environments into continuous control tasks. highway_env: https://highway-env.readthedocs.io/en/latest/index.html """ import gym import highway_env env = gym.make(env_id) env.configure({"action": {"type": "ContinuousAction"}}) env.reset() env = NormalizeContinuousActionSpace(env) env = FlattenObsWrapper(env) return env class DiscreteActionWrapper(gym.ActionWrapper): """ This is intended to let the action be any scalar (float or int) or np array (float or int) of size 1. floats are cast to ints using python's standard rounding. """ def __init__(self, env): super().__init__(env) self.action_space.shape = (env.action_space.n,) def action(self, action): if isinstance(action, np.ndarray): if len(action.shape) > 0: action = action[0] return int(action) class FrameStack(gym.Wrapper): def __init__(self, env, num_stack): gym.Wrapper.__init__(self, env) self._k = num_stack self._frames = deque([], maxlen=num_stack) shp = env.observation_space.shape self.observation_space = gym.spaces.Box( low=0, high=1, shape=((shp[0] * num_stack,) + shp[1:]), dtype=env.observation_space.dtype, ) def reset(self): obs = self.env.reset() for _ in range(self._k): self._frames.append(obs) return self._get_obs() def step(self, action): obs, reward, done, info = self.env.step(action) self._frames.append(obs) return self._get_obs(), reward, done, info def _get_obs(self): assert len(self._frames) == self._k return np.concatenate(list(self._frames), axis=0) class GoalBasedWrapper(gym.ObservationWrapper): """ Some goal-based envs (like the Gym Robotics suite) use dictionary observations with one entry for the current state and another to describe the goal. This wrapper concatenates those into a single vector so it can be used just like any other env. """ def __init__(self, env): super().__init__(env) self.observation_space.shape = ( env.observation_space["observation"].shape[0] + env.observation_space["desired_goal"].shape[0], ) def observation(self, obs_dict): return self._flatten_obs(obs_dict) def _flatten_obs(self, obs_dict): return np.concatenate((obs_dict["observation"], obs_dict["desired_goal"])) def add_gym_args(parser): """ Add a --env_id cl flag to an argparser """ parser.add_argument("--env_id", type=str, default="Pendulum-v1") parser.add_argument("--seed", type=int, default=123) def load_gym(env_id="CartPole-v1", seed=None, normalize_action_space=True, **_): """ Load an environment from OpenAI gym (or pybullet_gym, if installed) """ # optional pybullet import try: import pybullet import pybulletgym except ImportError: pass env = gym.make(env_id) if normalize_action_space and isinstance(env.action_space, gym.spaces.Box): env = NormalizeContinuousActionSpace(env) if seed is None: seed = random.randint(1, 100000) env.reset(seed=seed) return env def add_dmc_args(parser): """ Add cl flags associated with the deepmind control suite to a parser """ parser.add_argument("--domain_name", type=str, default="fish") parser.add_argument("--task_name", type=str, default="swim") parser.add_argument( "--from_pixels", action="store_true", help="Use image observations" ) parser.add_argument("--height", type=int, default=84) parser.add_argument("--width", type=int, default=84) parser.add_argument("--camera_id", type=int, default=0) parser.add_argument("--frame_skip", type=int, default=1) parser.add_argument("--frame_stack", type=int, default=3) parser.add_argument("--channels_last", action="store_true") parser.add_argument("--rgb", action="store_true") parser.add_argument("--seed", type=int, default=231) def add_atari_args(parser): parser.add_argument("--game_id", type=str, default="Boxing-v0") parser.add_argument("--noop_max", type=int, default=30) parser.add_argument("--frame_skip", type=int, default=1) parser.add_argument("--screen_size", type=int, default=84) parser.add_argument("--terminal_on_life_loss", action="store_true") parser.add_argument("--rgb", action="store_true") parser.add_argument("--normalize", action="store_true") parser.add_argument("--frame_stack", type=int, default=4) parser.add_argument("--seed", type=int, default=231) def load_atari( game_id, seed=None, noop_max=30, frame_skip=1, screen_size=84, terminal_on_life_loss=False, rgb=False, normalize=False, frame_stack=4, clip_reward=True, **_, ): """ Load a game from the Atari benchmark, with the usual settings Note that the simplest game ids (e.g. Boxing-v0) come with frame skipping by default, and you'll get an error if the frame_skp arg > 1. Use `BoxingNoFrameskip-v0` with frame_skip > 1. """ env = gym.make(game_id) if seed is None: seed = random.randint(1, 100000) env.reset(seed=seed) env = gym.wrappers.AtariPreprocessing( env, noop_max=noop_max, frame_skip=frame_skip, screen_size=screen_size, terminal_on_life_loss=terminal_on_life_loss, grayscale_obs=False, # use GrayScale wrapper instead... scale_obs=normalize, ) if not rgb: env = gym.wrappers.GrayScaleObservation(env, keep_dim=True) if clip_reward: env = ClipReward(env) env = ChannelsFirstWrapper(env) env = FrameStack(env, num_stack=frame_stack) env = DiscreteActionWrapper(env) return env class ClipReward(gym.RewardWrapper): def __init__(self, env, low=-1.0, high=1.0): super().__init__(env) self._clip_low = low self._clip_high = high def reward(self, rew): return max(min(rew, self._clip_high), self._clip_low) def load_dmc( domain_name, task_name, seed=None, from_pixels=False, frame_stack=1, height=84, width=84, camera_id=0, frame_skip=1, channels_last=False, rgb=False, **_, ): """ Load a task from the deepmind control suite. Uses dmc2gym (https://github.com/denisyarats/dmc2gym) Note that setting seed=None (the default) picks a random seed """ import dmc2gym if seed is None: seed = random.randint(1, 100000) env = dmc2gym.make( domain_name=domain_name, task_name=task_name, from_pixels=from_pixels, height=height, width=width, camera_id=camera_id, visualize_reward=False, frame_skip=frame_skip, channels_first=not channels_last if rgb else False, # if we're using RGB, set the channel order here ) if not rgb and from_pixels: env = gym.wrappers.GrayScaleObservation(env, keep_dim=True) env = ChannelsFirstWrapper(env) if from_pixels: env = FrameStack(env, num_stack=frame_stack) return env
import dataclasses @dataclasses.dataclass class SACConfig: env_id = "Pendulum-v1" seed = 123 num_steps = 1 transitions_per_step = 1 max_episode_steps = 10 batch_size = 512 tau = 0.005 actor_lr = 1e-4 critic_lr = 1e-4 gamma = 0.99 init_alpha = 0.1 alpha_lr = 1e-4 buffer_size = 1_000_000 eval_interval = 5000 eval_episodes = 10 warmup_steps = 1 render = False actor_clip = 0.0 critic_clip = 0.0 name = "sac_run" actor_l2 = 0.0 critic_l2 = 0.0 target_delay = 2 actor_delay = 1 save_interval = 100_000 verbosy = 0 gradient_updates_per_step = 1 prioritized_replay = False skip_save_to_disk = True skip_log_to_disk = True discrete_actions = False log_std_low = -10.0 log_std_high = 2.0 self_regularized = False sr_max_critic_updates_per_step = 10 sr_critic_target_improvement_init = 0.7 sr_critic_target_improvement_final = 0.9 train_env_path = "input_data/train_env.pkl" test_env_path = "input_data/test_env.pkl"
import torch import os import copy import pickle import math from itertools import chain from ...util.model import BenchmarkModel from torchbenchmark.tasks import REINFORCEMENT_LEARNING from typing import Tuple from .config import SACConfig from .envs import load_gym from .sac import SACAgent from .replay import PrioritizedReplayBuffer, ReplayBuffer from .utils import hard_update, soft_update def learn_standard( buffer, target_agent, agent, actor_optimizer, critic_optimizer, log_alpha_optimizer, target_entropy, batch_size, log_alpha, gamma, critic_clip, actor_clip, update_policy=True, device=None, ): per = isinstance(buffer, PrioritizedReplayBuffer) if per: batch, imp_weights, priority_idxs = buffer.sample(batch_size) imp_weights = imp_weights.to(device) else: batch = buffer.sample(batch_size) # prepare transitions for models state_batch, action_batch, reward_batch, next_state_batch, done_batch = batch state_batch = state_batch.to(device) next_state_batch = next_state_batch.to(device) action_batch = action_batch.to(device) reward_batch = reward_batch.to(device) done_batch = done_batch.to(device) agent.train() ################### ## CRITIC UPDATE ## ################### alpha = torch.exp(log_alpha) with torch.no_grad(): action_dist_s1 = agent.actor(next_state_batch) action_s1 = action_dist_s1.rsample() logp_a1 = action_dist_s1.log_prob(action_s1).sum(-1, keepdim=True) target_action_value_s1 = torch.min( target_agent.critic1(next_state_batch, action_s1), target_agent.critic2(next_state_batch, action_s1), ) td_target = reward_batch + gamma * (1.0 - done_batch) * ( target_action_value_s1 - (alpha * logp_a1) ) # update critics agent_critic1_pred = agent.critic1(state_batch, action_batch) agent_critic2_pred = agent.critic2(state_batch, action_batch) td_error1 = td_target - agent_critic1_pred td_error2 = td_target - agent_critic2_pred critic_loss = 0.5 * (td_error1 ** 2 + td_error2 ** 2) if per: critic_loss *= imp_weights critic_loss = critic_loss.mean() critic_optimizer.zero_grad() critic_loss.backward() if critic_clip: torch.nn.utils.clip_grad_norm_( chain(agent.critic1.parameters(), agent.critic2.parameters()), critic_clip ) critic_optimizer.step() if update_policy: ################## ## ACTOR UPDATE ## ################## dist = agent.actor(state_batch) agent_actions = dist.rsample() logp_a = dist.log_prob(agent_actions).sum(-1, keepdim=True) actor_loss = -( torch.min( agent.critic1(state_batch, agent_actions), agent.critic2(state_batch, agent_actions), ) - (alpha.detach() * logp_a) ).mean() actor_optimizer.zero_grad() actor_loss.backward() if actor_clip: torch.nn.utils.clip_grad_norm_(agent.actor.parameters(), actor_clip) actor_optimizer.step() ################## ## ALPHA UPDATE ## ################## alpha_loss = (-alpha * (logp_a + target_entropy).detach()).mean() log_alpha_optimizer.zero_grad() alpha_loss.backward() log_alpha_optimizer.step() if per: new_priorities = (abs(td_error1) + 1e-5).cpu().detach().squeeze(1).numpy() buffer.update_priorities(priority_idxs, new_priorities) class Model(BenchmarkModel): task = REINFORCEMENT_LEARNING.OTHER_RL # Original train batch size: 256 # Source: https://github.com/pranz24/pytorch-soft-actor-critic/blob/398595e0d9dca98b7db78c7f2f939c969431871a/main.py#L31 # This model doesn't support customizing batch size, or data prefetching DEFAULT_TRAIN_BSIZE = 256 DEFAULT_EVAL_BSIZE = 256 ALLOW_CUSTOMIZE_BSIZE = False def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.args = SACConfig() self.args.batch_size = self.batch_size # Construct agent current_dir = os.path.dirname(os.path.abspath(__file__)) self.train_env = load_gym(self.args.env_id, self.args.seed) self.test_env = load_gym(self.args.env_id, self.args.seed) self.obs_shape = self.train_env.observation_space.shape self.actions_shape = self.train_env.action_space.shape self.agent = SACAgent(self.obs_shape[0], self.actions_shape[0], self.args.log_std_low, self.args.log_std_high, self.device) if self.args.prioritized_replay: buffer_t = PrioritizedReplayBuffer else: buffer_t = ReplayBuffer self.buffer = buffer_t( self.args.buffer_size, device=self.device, state_shape=self.train_env.observation_space.shape, state_dtype=float, action_shape=(1,), ) self.learning_method = "Standard" self.agent.to(device) if not self.args.self_regularized: # initialize target networks self.target_agent = copy.deepcopy(self.agent) self.target_agent.to(device) hard_update(self.target_agent.critic1, self.agent.critic1) hard_update(self.target_agent.critic2, self.agent.critic2) self.target_agent.train() self.critic_optimizer = torch.optim.Adam( chain(self.agent.critic1.parameters(), self.agent.critic2.parameters(),), lr=self.args.critic_lr, weight_decay=self.args.critic_l2, betas=(0.9, 0.999), ) self.actor_optimizer = torch.optim.Adam( self.agent.actor.parameters(), lr=self.args.actor_lr, weight_decay=self.args.actor_l2, betas=(0.9, 0.999), ) self.log_alpha = torch.Tensor([math.log(self.args.init_alpha)]).to(device) self.log_alpha.requires_grad = True self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=self.args.alpha_lr, betas=(0.5, 0.999)) if not self.args.discrete_actions: self.target_entropy = -self.train_env.action_space.shape[0] else: self.target_entropy = -math.log(1.0 / self.train_env.action_space.n) * 0.98 if self.args.self_regularized: # the critic target improvement ratio is annealed during training self.critic_target_imp_slope = ( self.args.sr_critic_target_improvement_final - self.args.sr_critic_target_improvement_init ) / self.args.num_steps self.current_target_imp = lambda step: min( self.args.sr_critic_target_improvement_init + self.critic_target_imp_slope * step, self.args.sr_critic_target_improvement_final, ) def get_module(self): model = self.agent.actor state = self.train_env.reset() action = self.agent.sample_action(state) next_state, reward, done, info = self.train_env.step(action) self.buffer.push(state, action, reward, next_state, done) batch = self.buffer.sample(self.args.batch_size) state_batch, action_batch, reward_batch, next_state_batch, done_batch = batch state_batch = state_batch.to(self.device) return model, (state_batch, ) def set_module(self, new_model): self.agent.actor = new_model def train(self): # Setup self.target_agent.train() done = True niter = 1 for step in range(niter): if done: state = self.train_env.reset() steps_this_ep = 0 done = False action = self.agent.sample_action(state) next_state, reward, done, info = self.train_env.step(action) self.buffer.push(state, action, reward, next_state, done) state = next_state steps_this_ep += 1 if steps_this_ep >= self.args.max_episode_steps: done = True for _ in range(self.args.gradient_updates_per_step): learn_standard( buffer=self.buffer, target_agent=self.target_agent, agent=self.agent, actor_optimizer=self.actor_optimizer, critic_optimizer=self.critic_optimizer, log_alpha=self.log_alpha, log_alpha_optimizer=self.log_alpha_optimizer, target_entropy=self.target_entropy, batch_size=self.args.batch_size, gamma=self.args.gamma, critic_clip=self.args.critic_clip, actor_clip=self.args.actor_clip, update_policy=step % self.args.actor_delay == 0, device=self.device ) # move target model towards training model if not self.args.self_regularized and (step % self.args.target_delay == 0): soft_update(self.target_agent.critic1, self.agent.critic1, self.args.tau) soft_update(self.target_agent.critic2, self.agent.critic2, self.args.tau) def eval(self) -> Tuple[torch.Tensor]: niter = 1 with torch.no_grad(): discount= 1.0 episode_return_history = [] for episode in range(niter): episode_return = 0.0 state = self.test_env.reset() done, info = False, {} for step_num in range(self.args.max_episode_steps): if done: break action = self.agent.forward(state) state, reward, done, info = self.test_env.step(action) episode_return += reward * (discount ** step_num) episode_return_history.append(episode_return) retval = torch.tensor(episode_return_history) return (torch.tensor(action), ) def get_optimizer(self): return (self.actor_optimizer, self.critic_optimizer, self.log_alpha_optimizer) def set_optimizer(self, optimizer) -> None: self.actor_optimizer, self.critic_optimizer, self.log_alpha_optimizer = optimizer
import argparse import copy import math import os from itertools import chain import numpy as np import tensorboardX import torch import torch.nn.functional as F import tqdm from . import envs, nets, replay, utils class SACAgent: def __init__( self, obs_space_size, act_space_size, log_std_low, log_std_high, device, actor_net_cls=nets.StochasticActor, critic_net_cls=nets.BigCritic, hidden_size=1024, ): self.actor = actor_net_cls( obs_space_size, act_space_size, log_std_low, log_std_high, dist_impl="pyd", hidden_size=hidden_size, ) self.critic1 = critic_net_cls(obs_space_size, act_space_size, hidden_size) self.critic2 = critic_net_cls(obs_space_size, act_space_size, hidden_size) self.device = device def to(self, device): self.actor = self.actor.to(device) self.critic1 = self.critic1.to(device) self.critic2 = self.critic2.to(device) def eval(self): self.actor.eval() self.critic1.eval() self.critic2.eval() def train(self): self.actor.train() self.critic1.train() self.critic2.train() def save(self, path): actor_path = os.path.join(path, "actor.pt") critic1_path = os.path.join(path, "critic1.pt") critic2_path = os.path.join(path, "critic2.pt") torch.save(self.actor.state_dict(), actor_path) torch.save(self.critic1.state_dict(), critic1_path) torch.save(self.critic2.state_dict(), critic2_path) def load(self, path): actor_path = os.path.join(path, "actor.pt") critic1_path = os.path.join(path, "critic1.pt") critic2_path = os.path.join(path, "critic2.pt") self.actor.load_state_dict(torch.load(actor_path)) self.critic1.load_state_dict(torch.load(critic1_path)) self.critic2.load_state_dict(torch.load(critic2_path)) def forward(self, state, from_cpu=True): if from_cpu: state = self.process_state(state) self.actor.eval() with torch.no_grad(): act_dist = self.actor.forward(state) act = act_dist.mean self.actor.train() if from_cpu: act = self.process_act(act) return act def sample_action(self, state, from_cpu=True): if from_cpu: state = self.process_state(state) self.actor.eval() with torch.no_grad(): act_dist = self.actor.forward(state) act = act_dist.sample() self.actor.train() if from_cpu: act = self.process_act(act) return act def process_state(self, state): return torch.from_numpy(np.expand_dims(state, 0).astype(np.float32)).to( self.device ) def process_act(self, act): return np.squeeze(act.clamp(-1.0, 1.0).cpu().numpy(), 0) class SACDAgent(SACAgent): def __init__(self, obs_space_size, act_space_size): self.actor = nets.BaselineDiscreteActor(obs_space_size, act_space_size) self.critic1 = nets.BaselineDiscreteCritic(obs_space_size, act_space_size) self.critic2 = nets.BaselineDiscreteCritic(obs_space_size, act_space_size) def forward(self, state): state = self.process_state(state) self.actor.eval() with torch.no_grad(): act_dist = self.actor.forward(state) act = torch.argmax(act_dist.probs, dim=1) self.actor.train() return self.process_act(act)
import numpy as np import torch def unique(sorted_array): """ More efficient implementation of np.unique for sorted arrays :param sorted_array: (np.ndarray) :return:(np.ndarray) sorted_array without duplicate elements """ if len(sorted_array) == 1: return sorted_array left = sorted_array[:-1] right = sorted_array[1:] uniques = np.append(right != left, True) return sorted_array[uniques] class SegmentTree: def __init__(self, capacity, operation, neutral_element): """ Build a Segment Tree data structure. https://en.wikipedia.org/wiki/Segment_tree Can be used as regular array that supports Index arrays, but with two important differences: a) setting item's value is slightly slower. It is O(lg capacity) instead of O(1). b) user has access to an efficient ( O(log segment size) ) `reduce` operation which reduces `operation` over a contiguous subsequence of items in the array. :param capacity: (int) Total size of the array - must be a power of two. :param operation: (lambda (Any, Any): Any) operation for combining elements (eg. sum, max) must form a mathematical group together with the set of possible values for array elements (i.e. be associative) :param neutral_element: (Any) neutral element for the operation above. eg. float('-inf') for max and 0 for sum. """ assert ( capacity > 0 and capacity & (capacity - 1) == 0 ), "capacity must be positive and a power of 2." self._capacity = capacity self._value = [neutral_element for _ in range(2 * capacity)] self._operation = operation self.neutral_element = neutral_element def _reduce_helper(self, start, end, node, node_start, node_end): if start == node_start and end == node_end: return self._value[node] mid = (node_start + node_end) // 2 if end <= mid: return self._reduce_helper(start, end, 2 * node, node_start, mid) else: if mid + 1 <= start: return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end) else: return self._operation( self._reduce_helper(start, mid, 2 * node, node_start, mid), self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end), ) def reduce(self, start=0, end=None): """ Returns result of applying `self.operation` to a contiguous subsequence of the array. self.operation(arr[start], operation(arr[start+1], operation(... arr[end]))) :param start: (int) beginning of the subsequence :param end: (int) end of the subsequences :return: (Any) result of reducing self.operation over the specified range of array elements. """ if end is None: end = self._capacity if end < 0: end += self._capacity end -= 1 return self._reduce_helper(start, end, 1, 0, self._capacity - 1) def __setitem__(self, idx, val): # indexes of the leaf idxs = idx + self._capacity self._value[idxs] = val if isinstance(idxs, int): idxs = np.array([idxs]) # go up one level in the tree and remove duplicate indexes idxs = unique(idxs // 2) while len(idxs) > 1 or idxs[0] > 0: # as long as there are non-zero indexes, update the corresponding values self._value[idxs] = self._operation( self._value[2 * idxs], self._value[2 * idxs + 1] ) # go up one level in the tree and remove duplicate indexes idxs = unique(idxs // 2) def __getitem__(self, idx): assert np.max(idx) < self._capacity assert 0 <= np.min(idx) return self._value[self._capacity + idx] class SumSegmentTree(SegmentTree): def __init__(self, capacity): super(SumSegmentTree, self).__init__( capacity=capacity, operation=np.add, neutral_element=0.0 ) self._value = np.array(self._value) def sum(self, start=0, end=None): """ Returns arr[start] + ... + arr[end] :param start: (int) start position of the reduction (must be >= 0) :param end: (int) end position of the reduction (must be < len(arr), can be None for len(arr) - 1) :return: (Any) reduction of SumSegmentTree """ return super(SumSegmentTree, self).reduce(start, end) def find_prefixsum_idx(self, prefixsum): """ Find the highest index `i` in the array such that sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum for each entry in prefixsum if array values are probabilities, this function allows to sample indexes according to the discrete probability efficiently. :param prefixsum: (np.ndarray) float upper bounds on the sum of array prefix :return: (np.ndarray) highest indexes satisfying the prefixsum constraint """ if isinstance(prefixsum, float): prefixsum = np.array([prefixsum]) assert 0 <= np.min(prefixsum) assert np.max(prefixsum) <= self.sum() + 1e-5 assert isinstance(prefixsum[0], float) idx = np.ones(len(prefixsum), dtype=int) cont = np.ones(len(prefixsum), dtype=bool) while np.any(cont): # while not all nodes are leafs idx[cont] = 2 * idx[cont] prefixsum_new = np.where( self._value[idx] <= prefixsum, prefixsum - self._value[idx], prefixsum ) # prepare update of prefixsum for all right children idx = np.where( np.logical_or(self._value[idx] > prefixsum, np.logical_not(cont)), idx, idx + 1, ) # Select child node for non-leaf nodes prefixsum = prefixsum_new # update prefixsum cont = idx < self._capacity # collect leafs return idx - self._capacity class MinSegmentTree(SegmentTree): def __init__(self, capacity): super(MinSegmentTree, self).__init__( capacity=capacity, operation=np.minimum, neutral_element=float("inf") ) self._value = np.array(self._value) def min(self, start=0, end=None): """ Returns min(arr[start], ..., arr[end]) :param start: (int) start position of the reduction (must be >= 0) :param end: (int) end position of the reduction (must be < len(arr), can be None for len(arr) - 1) :return: (Any) reduction of MinSegmentTree """ return super(MinSegmentTree, self).reduce(start, end) class ReplayBufferStorage: def __init__(self, size, obs_shape, act_shape, device, obs_dtype=torch.float32): self.s_dtype = obs_dtype self.device = device # buffer arrays self.s_stack = torch.zeros((size,) + obs_shape, dtype=self.s_dtype, device=device) self.action_stack = torch.zeros((size,) + act_shape, dtype=torch.float32, device=device) self.reward_stack = torch.zeros((size, 1), dtype=torch.float32, device=device) self.s1_stack = torch.zeros((size,) + obs_shape, dtype=self.s_dtype, device=device) self.done_stack = torch.zeros((size, 1), dtype=torch.int, device=device) self.obs_shape = obs_shape self.size = size self._next_idx = 0 self._max_filled = 0 def __len__(self): return self._max_filled def add(self, s, a, r, s_1, d): # this buffer supports batched experience if len(s.shape) > len(self.obs_shape): # there must be a batch dimension num_samples = len(s) else: num_samples = 1 r, d = [r], [d] if not isinstance(s, torch.Tensor): # convert states to numpy (checking for LazyFrames) if not isinstance(s, np.ndarray): s = np.asarray(s) if not isinstance(s_1, np.ndarray): s_1 = np.asarray(s_1) # convert to torch tensors s = torch.from_numpy(s) a = torch.from_numpy(a).float() r = torch.Tensor(r).float() s_1 = torch.from_numpy(s_1) d = torch.Tensor(d).int() # make sure tensors are floats not doubles if self.s_dtype is torch.float32: s = s.float() s_1 = s_1.float() s = s.to(self.device) a = a.to(self.device) r = r.to(self.device) s_1 = s_1.to(self.device) d = d.int().to(self.device) # Store at end of buffer. Wrap around if past end. R = np.arange(self._next_idx, self._next_idx + num_samples) % self.size self.s_stack[R] = s self.action_stack[R] = a self.reward_stack[R] = r self.s1_stack[R] = s_1 self.done_stack[R] = d # Advance index. self._max_filled = min( max(self._next_idx + num_samples, self._max_filled), self.size ) self._next_idx = (self._next_idx + num_samples) % self.size return R def __getitem__(self, indices): try: iter(indices) except ValueError: raise IndexError( "ReplayBufferStorage getitem called with indices object that is not iterable" ) # converting states and actions to float here instead of inside the learning loop # of each agent seems fine for now. state = self.s_stack[indices].float() action = self.action_stack[indices].float() reward = self.reward_stack[indices] next_state = self.s1_stack[indices].float() done = self.done_stack[indices] return (state, action, reward, next_state, done) def __setitem__(self, indices, experience): s, a, r, s1, d = experience self.s_stack[indices] = s.float() self.action_stack[indices] = a.float() self.reward_stack[indices] = r self.s1_stack[indices] = s1.float() self.done_stack[indices] = d def get_all_transitions(self): return ( self.s_stack[: self._max_filled], self.action_stack[: self._max_filled], self.reward_stack[: self._max_filled], self.s1_stack[: self._max_filled], self.done_stack[: self._max_filled], ) class ReplayBuffer: def __init__(self, size, device, state_shape=None, action_shape=None, state_dtype=float): self.device=device self._maxsize = size self.state_shape = state_shape self.state_dtype = self._convert_dtype(state_dtype) self.action_shape = action_shape self._storage = None assert self.state_shape, "Must provide shape of state space to ReplayBuffer" assert self.action_shape, "Must provide shape of action space to ReplayBuffer" def _convert_dtype(self, dtype): if dtype in [int, np.uint8, torch.uint8]: return torch.uint8 elif dtype in [float, np.float32, np.float64, torch.float32, torch.float64]: return torch.float32 elif dtype in ["int32", np.int32]: return torch.int32 else: raise ValueError(f"Uncreocgnized replay buffer dtype: {dtype}") def __len__(self): return len(self._storage) if self._storage is not None else 0 def push(self, state, action, reward, next_state, done): if self._storage is None: self._storage = ReplayBufferStorage( self._maxsize, device=self.device, obs_shape=self.state_shape, act_shape=self.action_shape, obs_dtype=self.state_dtype, ) return self._storage.add(state, action, reward, next_state, done) def sample(self, batch_size, get_idxs=False): random_idxs = torch.randint(len(self._storage), (batch_size,)).to(self.device) if get_idxs: return self._storage[random_idxs], random_idxs.cpu().numpy() else: return self._storage[random_idxs] def get_all_transitions(self): return self._storage.get_all_transitions() def load_experience(self, s, a, r, s1, d): assert ( s.shape[0] <= self._maxsize ), "Experience dataset is larger than the buffer." if len(r.shape) < 2: r = np.expand_dims(r, 1) if len(d.shape) < 2: d = np.expand_dims(d, 1) self.push(s, a, r, s1, d) class PrioritizedReplayBuffer(ReplayBuffer): def __init__( self, size, state_shape, action_shape, state_dtype=float, alpha=0.6, beta=1.0 ): super(PrioritizedReplayBuffer, self).__init__( size, state_shape, action_shape, state_dtype ) assert alpha >= 0 self.alpha = alpha self.beta = beta it_capacity = 1 while it_capacity < size: it_capacity *= 2 self._it_sum = SumSegmentTree(it_capacity) self._it_min = MinSegmentTree(it_capacity) self._max_priority = 1.0 def push(self, s, a, r, s_1, d, priorities=None): R = super().push(s, a, r, s_1, d) if priorities is None: priorities = self._max_priority self._it_sum[R] = priorities ** self.alpha self._it_min[R] = priorities ** self.alpha def _sample_proportional(self, batch_size): mass = [] total = self._it_sum.sum(0, len(self._storage) - 1) mass = np.random.random(size=batch_size) * total idx = self._it_sum.find_prefixsum_idx(mass) return idx def sample(self, batch_size): idxes = self._sample_proportional(batch_size) p_min = self._it_min.min() / self._it_sum.sum() max_weight = (p_min * len(self._storage)) ** (-self.beta) p_sample = self._it_sum[idxes] / self._it_sum.sum() weights = (p_sample * len(self._storage)) ** (-self.beta) / max_weight return self._storage[idxes], torch.from_numpy(weights), idxes def sample_uniform(self, batch_size): return super().sample(batch_size, get_idxs=True) def update_priorities(self, idxes, priorities): assert len(idxes) == len(priorities) assert np.min(priorities) > 0 assert np.min(idxes) >= 0 assert np.max(idxes) < len(self._storage) self._it_sum[idxes] = priorities ** self.alpha self._it_min[idxes] = priorities ** self.alpha self._max_priority = max(self._max_priority, np.max(priorities)) class MultiPriorityBuffer(ReplayBuffer): def __init__( self, size, trees, state_shape, action_shape, state_dtype=float, alpha=0.6, beta=1.0, ): super(MultiPriorityBuffer, self).__init__( size, state_shape, action_shape, state_dtype ) assert alpha >= 0 self.alpha = alpha self.beta = beta it_capacity = 1 while it_capacity < size: it_capacity *= 2 self.sum_trees = [SumSegmentTree(it_capacity) for _ in range(trees)] self.min_trees = [MinSegmentTree(it_capacity) for _ in range(trees)] self._max_priority = 1.0 def push(self, s, a, r, s_1, d, priorities=None): R = super().push(s, a, r, s_1, d) if priorities is None: priorities = self._max_priority for sum_tree in self.sum_trees: sum_tree[R] = priorities ** self.alpha for min_tree in self.min_trees: min_tree[R] = priorities ** self.alpha def _sample_proportional(self, batch_size, tree_num): mass = [] total = self.sum_trees[tree_num].sum(0, len(self._storage) - 1) mass = np.random.random(size=batch_size) * total idx = self.sum_trees[tree_num].find_prefixsum_idx(mass) return idx def sample(self, batch_size, tree_num): idxes = self._sample_proportional(batch_size, tree_num) p_min = self.min_trees[tree_num].min() / self.sum_trees[tree_num].sum() max_weight = (p_min * len(self._storage)) ** (-self.beta) p_sample = self.sum_trees[tree_num][idxes] / self.sum_trees[tree_num].sum() weights = (p_sample * len(self._storage)) ** (-self.beta) / max_weight return self._storage[idxes], torch.from_numpy(weights), idxes def sample_uniform(self, batch_size): return super().sample(batch_size, get_idxs=True) def update_priorities(self, idxes, priorities, tree_num): assert len(idxes) == len(priorities) assert np.min(priorities) > 0 assert np.min(idxes) >= 0 assert np.max(idxes) < len(self._storage) self.sum_trees[tree_num][idxes] = priorities ** self.alpha self.min_trees[tree_num][idxes] = priorities ** self.alpha self._max_priority = max(self._max_priority, np.max(priorities))
import math import os import random from collections import namedtuple import gym import numpy as np import torch def clean_hparams_dict(hparams_dict): return {key: val for key, val in hparams_dict.items() if val} def get_grad_norm(model): total_norm = 0.0 for p in model.parameters(): try: param = p.grad.data except AttributeError: continue else: param_norm = param.norm(2) total_norm += param_norm.item() ** 2 total_norm = total_norm ** (1.0 / 2) return total_norm def torch_and_pad(x): if not isinstance(x, np.ndarray): x = np.array(x) return torch.from_numpy(x.astype(np.float32)).unsqueeze(0) def mean(lst): return float(sum(lst)) / len(lst) def make_process_dirs(run_name, base_path="dc_saves"): base_dir = os.path.join(base_path, run_name) i = 0 while os.path.exists(base_dir + f"_{i}"): i += 1 base_dir += f"_{i}" os.makedirs(base_dir) return base_dir def compute_conv_output( inp_shape, kernel_size, padding=(0, 0), dilation=(1, 1), stride=(1, 1) ): """ Compute the shape of the output of a torch Conv2d layer using the formula from the docs. every argument is a tuple corresponding to (height, width), e.g. kernel_size=(3, 4) """ height_out = math.floor( ( (inp_shape[0] + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1) / stride[0] ) + 1 ) width_out = math.floor( ( (inp_shape[1] + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1) / stride[1] ) + 1 ) return height_out, width_out def soft_update(target, source, tau): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau) def hard_update(target, source): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_(param.data) """ This is all from: https://github.com/matthiasplappert/keras-rl/blob/master/rl/random.py """ class AnnealedGaussianProcess: def __init__(self, mu, sigma, sigma_min, n_steps_annealing): self.mu = mu self.sigma = sigma self.n_steps = 0 if sigma_min is not None: self.m = -float(sigma - sigma_min) / float(n_steps_annealing) self.c = sigma self.sigma_min = sigma_min else: self.m = 0.0 self.c = sigma self.sigma_min = sigma @property def current_sigma(self): sigma = max(self.sigma_min, self.m * float(self.n_steps) + self.c) return sigma class OrnsteinUhlenbeckProcess(AnnealedGaussianProcess): def __init__( self, theta, mu=0.0, sigma=1.0, dt=1e-2, x0=None, size=1, sigma_min=None, n_steps_annealing=1000, ): super(OrnsteinUhlenbeckProcess, self).__init__( mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing ) self.theta = theta self.mu = mu self.dt = dt self.x0 = x0 self.size = size self.reset_states() def sample(self): x = ( self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.current_sigma * np.sqrt(self.dt) * np.random.normal(size=self.size) ) self.x_prev = x self.n_steps += 1 return x def reset_states(self): self.x_prev = self.x0 if self.x0 is not None else np.zeros(self.size) class GaussianExplorationNoise: def __init__(self, size, start_scale=1.0, final_scale=0.1, steps_annealed=1000): assert start_scale >= final_scale self.size = size self.start_scale = start_scale self.final_scale = final_scale self.steps_annealed = steps_annealed self._current_scale = start_scale self._scale_slope = (start_scale - final_scale) / steps_annealed def sample(self): noise = self._current_scale * torch.randn(*self.size) self._current_scale = max( self._current_scale - self._scale_slope, self.final_scale ) return noise.numpy() def reset_states(self): pass
import os import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements()
import math import numpy as np import torch import torch.nn.functional as F from torch import distributions as pyd from torch import nn from . import utils def weight_init(m): if isinstance(m, nn.Linear): nn.init.orthogonal_(m.weight.data) m.bias.data.fill_(0.0) elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): # delta-orthogonal init from https://arxiv.org/pdf/1806.05393.pdf assert m.weight.size(2) == m.weight.size(3) m.weight.data.fill_(0.0) m.bias.data.fill_(0.0) mid = m.weight.size(2) // 2 gain = nn.init.calculate_gain("relu") nn.init.orthogonal_(m.weight.data[:, :, mid, mid], gain) class BigPixelEncoder(nn.Module): def __init__(self, obs_shape, out_dim=50): super().__init__() channels = obs_shape[0] self.conv1 = nn.Conv2d(channels, 32, kernel_size=3, stride=2) self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1) self.conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1) self.conv4 = nn.Conv2d(32, 32, kernel_size=3, stride=1) output_height, output_width = utils.compute_conv_output( obs_shape[1:], kernel_size=(3, 3), stride=(2, 2) ) for _ in range(3): output_height, output_width = utils.compute_conv_output( (output_height, output_width), kernel_size=(3, 3), stride=(1, 1) ) self.fc = nn.Linear(output_height * output_width * 32, out_dim) self.ln = nn.LayerNorm(out_dim) self.apply(weight_init) def forward(self, obs): obs /= 255.0 x = F.relu(self.conv1(obs)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = x.view(x.size(0), -1) x = self.fc(x) x = self.ln(x) state = torch.tanh(x) return state class SmallPixelEncoder(nn.Module): def __init__(self, obs_shape, out_dim=50): super().__init__() channels = obs_shape[0] self.conv1 = nn.Conv2d(channels, 32, kernel_size=8, stride=4) self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2) self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1) output_height, output_width = utils.compute_conv_output( obs_shape[1:], kernel_size=(8, 8), stride=(4, 4) ) output_height, output_width = utils.compute_conv_output( (output_height, output_width), kernel_size=(4, 4), stride=(2, 2) ) output_height, output_width = utils.compute_conv_output( (output_height, output_width), kernel_size=(3, 3), stride=(1, 1) ) self.fc = nn.Linear(output_height * output_width * 64, out_dim) self.apply(weight_init) def forward(self, obs): obs /= 255.0 x = F.relu(self.conv1(obs)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = x.view(x.size(0), -1) state = self.fc(x) return state class StochasticActor(nn.Module): def __init__( self, state_space_size, act_space_size, log_std_low=-10, log_std_high=2, hidden_size=1024, dist_impl="pyd", ): super().__init__() assert dist_impl in ["pyd", "beta"] self.fc1 = nn.Linear(state_space_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.fc3 = nn.Linear(hidden_size, 2 * act_space_size) self.log_std_low = log_std_low self.log_std_high = log_std_high self.apply(weight_init) self.dist_impl = dist_impl def forward(self, state): x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) out = self.fc3(x) mu, log_std = out.chunk(2, dim=1) if self.dist_impl == "pyd": log_std = torch.tanh(log_std) log_std = self.log_std_low + 0.5 * ( self.log_std_high - self.log_std_low ) * (log_std + 1) std = log_std.exp() dist = SquashedNormal(mu, std) elif self.dist_impl == "beta": out = 1.0 + F.softplus(out) alpha, beta = out.chunk(2, dim=1) dist = BetaDist(alpha, beta) return dist class BigCritic(nn.Module): def __init__(self, state_space_size, act_space_size, hidden_size=1024): super().__init__() self.fc1 = nn.Linear(state_space_size + act_space_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.fc3 = nn.Linear(hidden_size, 1) self.apply(weight_init) def forward(self, state, action): x = F.relu(self.fc1(torch.cat((state, action), dim=1))) x = F.relu(self.fc2(x)) out = self.fc3(x) return out class BaselineActor(nn.Module): def __init__(self, state_size, action_size, hidden_size=400): super().__init__() self.fc1 = nn.Linear(state_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.out = nn.Linear(hidden_size, action_size) def forward(self, state): x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) act = torch.tanh(self.out(x)) return act class BaselineCritic(nn.Module): def __init__(self, state_size, action_size): super().__init__() self.fc1 = nn.Linear(state_size + action_size, 400) self.fc2 = nn.Linear(400, 300) self.out = nn.Linear(300, 1) def forward(self, state, action): x = torch.cat((state, action), dim=1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) val = self.out(x) return val class BetaDist(pyd.transformed_distribution.TransformedDistribution): class _BetaDistTransform(pyd.transforms.Transform): domain = pyd.constraints.real codomain = pyd.constraints.interval(-1.0, 1.0) def __init__(self, cache_size=1): super().__init__(cache_size=cache_size) def __eq__(self, other): return isinstance(other, _BetaDistTransform) def _inverse(self, y): return (y.clamp(-0.99, 0.99) + 1.0) / 2.0 def _call(self, x): return (2.0 * x) - 1.0 def log_abs_det_jacobian(self, x, y): # return log det jacobian |dy/dx| given input and output return torch.Tensor([math.log(2.0)]).to(x.device) def __init__(self, alpha, beta): self.base_dist = pyd.beta.Beta(alpha, beta) transforms = [self._BetaDistTransform()] super().__init__(self.base_dist, transforms) @property def mean(self): mu = self.base_dist.mean for tr in self.transforms: mu = tr(mu) return mu """ Credit for actor distribution code: https://github.com/denisyarats/pytorch_sac/blob/master/agent/actor.py """ class TanhTransform(pyd.transforms.Transform): domain = pyd.constraints.real codomain = pyd.constraints.interval(-1.0, 1.0) bijective = True sign = +1 def __init__(self, cache_size=1): super().__init__(cache_size=cache_size) @staticmethod def atanh(x): return 0.5 * (x.log1p() - (-x).log1p()) def __eq__(self, other): return isinstance(other, TanhTransform) def _call(self, x): return x.tanh() def _inverse(self, y): return self.atanh(y.clamp(-0.99, 0.99)) def log_abs_det_jacobian(self, x, y): return 2.0 * (math.log(2.0) - x - F.softplus(-2.0 * x)) class SquashedNormal(pyd.transformed_distribution.TransformedDistribution): def __init__(self, loc, scale): self.loc = loc self.scale = scale self.base_dist = pyd.Normal(loc, scale) transforms = [TanhTransform()] super().__init__(self.base_dist, transforms) @property def mean(self): mu = self.loc for tr in self.transforms: mu = tr(mu) return mu class GracBaselineActor(nn.Module): def __init__(self, obs_size, action_size): super().__init__() self.fc1 = nn.Linear(obs_size, 400) self.fc2 = nn.Linear(400, 300) self.fc_mean = nn.Linear(300, action_size) self.fc_std = nn.Linear(300, action_size) def forward(self, state, stochastic=False): x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) mean = torch.tanh(self.fc_mean(x)) std = F.softplus(self.fc_std(x)) + 1e-3 dist = pyd.Normal(mean, std) return dist class BaselineDiscreteActor(nn.Module): def __init__(self, obs_shape, action_size, hidden_size=300): super().__init__() self.fc1 = nn.Linear(obs_shape, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.act_p = nn.Linear(hidden_size, action_size) def forward(self, state): x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) act_p = F.softmax(self.act_p(x), dim=1) dist = pyd.categorical.Categorical(act_p) return dist class BaselineDiscreteCritic(nn.Module): def __init__(self, obs_shape, action_shape, hidden_size=300): super().__init__() self.fc1 = nn.Linear(obs_shape, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.out = nn.Linear(hidden_size, action_shape) def forward(self, state): x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) vals = self.out(x) return vals
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel from torchbenchmark.tasks import COMPUTER_VISION import torchvision.models as models class Model(TorchVisionModel): task = COMPUTER_VISION.CLASSIFICATION # Train batch size: use the training batch in paper. # Source: https://arxiv.org/pdf/1608.06993.pdf DEFAULT_TRAIN_BSIZE = 256 DEFAULT_EVAL_BSIZE = 64 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(model_name="densenet121", test=test, device=device, batch_size=batch_size, weights=models.DenseNet121_Weights.IMAGENET1K_V1, extra_args=extra_args)
# Ported from pytorch example: # https://github.com/pytorch/examples/blob/master/dcgan/main.py from __future__ import print_function import argparse import os import random from typing import Any, Tuple import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim as optim import torch.utils.data import torchvision.datasets as dset import torchvision.transforms as transforms import torchvision.utils as vutils import numpy as np from pathlib import Path from ...util.model import BenchmarkModel from torchbenchmark.tasks import COMPUTER_VISION class DCGAN: def __init__(self, bench): # Spatial size of training images. All images will be resized to this # size using a transformer. self.image_size = 64 # Number of channels in the training images. For color images this is 3 self.nc = 3 # Size of z latent vector (i.e. size of generator input) self.nz = 100 # Size of feature maps in generator self.ngf = 64 # Size of feature maps in discriminator self.ndf = 64 # Number of training epochs self.num_epochs = 5 # Learning rate for optimizers self.lr = 0.0002 # Beta1 hyperparam for Adam optimizers self.beta1 = 0.5 # Number of GPUs available. Use 0 for CPU mode. self.ngpu = 1 self.device = bench.device # custom weights initialization called on netG and netD def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0) class Generator(nn.Module): def __init__(self, dcgan): super(Generator, self).__init__() self.ngpu = dcgan.ngpu self.main = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d( dcgan.nz, dcgan.ngf * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(dcgan.ngf * 8), nn.ReLU(True), # state size. (dcgan.ngf*8) x 4 x 4 nn.ConvTranspose2d(dcgan.ngf * 8, dcgan.ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(dcgan.ngf * 4), nn.ReLU(True), # state size. (dcgan.ngf*4) x 8 x 8 nn.ConvTranspose2d( dcgan.ngf * 4, dcgan.ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(dcgan.ngf * 2), nn.ReLU(True), # state size. (dcgan.ngf*2) x 16 x 16 nn.ConvTranspose2d( dcgan.ngf * 2, dcgan.ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(dcgan.ngf), nn.ReLU(True), # state size. (dcgan.ngf) x 32 x 32 nn.ConvTranspose2d( dcgan.ngf, dcgan.nc, 4, 2, 1, bias=False), nn.Tanh() # state size. (dcgan.nc) x 64 x 64 ) self.debug_print = False def forward(self, input): if self.debug_print: print(input.shape) return self.main(input) class Discriminator(nn.Module): def __init__(self, ncgan): ngpu = ncgan.ngpu nc = ncgan.nc ndf = ncgan.ndf super(Discriminator, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # input is (nc) x 64 x 64 nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf) x 32 x 32 nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 2), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*2) x 16 x 16 nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*4) x 8 x 8 nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 8), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*8) x 4 x 4 nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False), nn.Sigmoid() ) def forward(self, input): return self.main(input) class Model(BenchmarkModel): task = COMPUTER_VISION.GENERATION DEFAULT_TRAIN_BSIZE = 32 DEFAULT_EVAL_BSIZE = 256 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.debug_print = False self.root = str(Path(__file__).parent) self.dcgan = DCGAN(self) dcgan = self.dcgan device = dcgan.device ngpu = dcgan.ngpu nz = dcgan.nz lr = dcgan.lr beta1 = dcgan.beta1 num_epochs = dcgan.num_epochs # Create the generator self.netG = Generator(dcgan).to(device) # Handle multi-gpu if desired if (dcgan.device == 'cuda') and (ngpu > 1): self.netG = nn.DataParallel(self.netG, list(range(ngpu))) # Apply the weights_init function to randomly initialize all weights # to mean=0, stdev=0.2. self.netG.apply(weights_init) if self.debug_print: # Print the model print(self.netG) # Create the Discriminator netD = Discriminator(dcgan).to(device) # Handle multi-gpu if desired if (dcgan.device == 'cuda') and (ngpu > 1): netD = nn.DataParallel(self.netD, list(range(ngpu))) # Apply the weights_init function to randomly initialize all weights # to mean=0, stdev=0.2. netD.apply(weights_init) if self.debug_print: # Print the model print(netD) # Initialize BCELoss function self.criterion = nn.BCELoss() # Create batch of latent vectors that we will use to visualize # the progression of the generator self.fixed_noise = torch.randn(64, nz, 1, 1, device=device) # Establish convention for real and fake labels during training self.real_label = 1. self.fake_label = 0. # Random values as surrogate for batch of photos self.exmaple_inputs = torch.randn(self.batch_size, 3, 64, 64, device=self.device) self.model = netD if test == "train": # Setup Adam optimizers for both G and D self.optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999)) self.optimizerG = optim.Adam(self.netG.parameters(), lr=lr, betas=(beta1, 0.999)) elif test == "eval": # inference would just run descriminator so thats what we'll do too. self.inference_just_descriminator = True if False == self.inference_just_descriminator: self.eval_noise = torch.randn(self.batch_size, nz, 1, 1, device=self.device) def jit_callback(self): self.model = torch.jit.trace(self.model,(self.exmaple_inputs,)) if self.test == "eval" and False == self.inference_just_descriminator: self.netG = torch.jit.trace(self.netG,(self.eval_noise,)) def get_module(self): return self.model, (self.exmaple_inputs,) def eval(self): if False == self.inference_just_descriminator: # Generate fake image batch with G self.eval_fake = self.netG(self.eval_noise) # Since we just updated D, perform another forward pass of all-fake batch through D output = self.model(self.exmaple_inputs).view(-1) return (output, ) def train(self): # Training Loop # Lists to keep track of progress img_list = [] iters = 0 dcgan = self.dcgan device = dcgan.device num_epochs = dcgan.num_epochs num_train_batch = 1 lr = dcgan.lr nz = dcgan.nz beta1 = dcgan.beta1 netD = self.model netG = self.netG criterion = self.criterion optimizerD = self.optimizerD optimizerG = self.optimizerG real_label = self.real_label fake_label = self.fake_label benchmark_pic = self.exmaple_inputs # For each epoch for epoch in range(num_epochs): for i in range(num_train_batch): ############################ # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z))) ########################### ## Train with all-real batch netD.zero_grad() # Format batch real_cpu = benchmark_pic b_size = real_cpu.size(0) label = torch.full((b_size,), real_label, dtype=torch.float, device=device) # Forward pass real batch through D output = netD(real_cpu).view(-1) # Calculate loss on all-real batch errD_real = criterion(output, label) # Calculate gradients for D in backward pass errD_real.backward() D_x = output.mean().item() ## Train with all-fake batch # Generate batch of latent vectors noise = torch.randn(b_size, nz, 1, 1, device=device) # Generate fake image batch with G fake = netG(noise) label.fill_(fake_label) # Classify all fake batch with D output = netD(fake.detach()).view(-1) # Calculate D's loss on the all-fake batch errD_fake = criterion(output, label) # Calculate the gradients for this batch, accumulated (summed) with previous gradients errD_fake.backward() D_G_z1 = output.mean().item() # Compute error of D as sum over the fake and the real batches errD = errD_real + errD_fake # Update D optimizerD.step() ############################ # (2) Update G network: maximize log(D(G(z))) ########################### netG.zero_grad() label.fill_(real_label) # fake labels are real for generator cost # Since we just updated D, perform another forward pass of all-fake batch through D output = netD(fake).view(-1) # Calculate G's loss based on this output errG = criterion(output, label) # Calculate gradients for G errG.backward() D_G_z2 = output.mean().item() # Update G optimizerG.step() # This model has TWO optimizers! Try returning both. def get_optimizer(self): return (self.optimizerD, self.optimizerG) # `optimizer` has type Tuple but we want this function to override the parent's # so keep the name and schema the same. def set_optimizer(self, optimizer) -> None: self.optimizerD, self.optimizerG = optimizer
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements()
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel from torchbenchmark.tasks import SPEECH import torch class Model(HuggingFaceModel): task = SPEECH.RECOGNITION DEFAULT_EVAL_BSIZE = 8 DEFAULT_EVAL_CUDA_PRECISION = "fp16" def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(name="hf_Whisper", test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.feature_size = 80 self.sequence_length = 3000 self.input_features = torch.randn(size=(self.batch_size, self.feature_size, self.sequence_length),device=self.device) self.example_inputs = {"input_features": self.input_features.to(self.device), "input_ids" : self.input_features.to(self.device)} self.model.to(self.device) def train(self): raise NotImplementedError("Training is not implemented.") def eval(self): self.model.eval() with torch.no_grad(): self.model(self.example_inputs["input_ids"]) def enable_fp16_half(self): self.model.half() self.example_inputs = {"input_features": self.input_features.half().to(self.device), "input_ids" : self.input_features.half().to(self.device)}
import subprocess import sys import os from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements() patch_transformers() model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) cache_model(model_name)
import os from torchbenchmark.tasks import COMPUTER_VISION from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model MODEL_NAME = os.path.basename(os.path.dirname(__file__)) MODEL_DIR = os.path.abspath(os.path.dirname(__file__)) class Model(Detectron2Model): task = COMPUTER_VISION.DETECTION model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl") def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(variant="COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
import os from torchbenchmark.util.framework.detectron2 import install_detectron2 MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__))) MODEL_DIR = os.path.abspath(os.path.dirname(__file__)) if __name__ == '__main__': install_detectron2(MODEL_NAME, MODEL_DIR)
import os from torchbenchmark.tasks import COMPUTER_VISION from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__))) MODEL_DIR = os.path.abspath(os.path.dirname(__file__)) class Model(Detectron2Model): task = COMPUTER_VISION.SEGMENTATION model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl") def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(variant="COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
import os from torchbenchmark.util.framework.detectron2 import install_detectron2 MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__))) MODEL_DIR = os.path.abspath(os.path.dirname(__file__)) if __name__ == '__main__': install_detectron2(MODEL_NAME, MODEL_DIR)
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel from torchbenchmark.tasks import COMPUTER_VISION import torchvision.models as models class Model(TorchVisionModel): task = COMPUTER_VISION.CLASSIFICATION DEFAULT_TRAIN_BSIZE = 128 DEFAULT_EVAL_BSIZE = 64 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(model_name="shufflenet_v2_x1_0", test=test, device=device, batch_size=batch_size, weights=models.ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1, extra_args=extra_args)
from torchbenchmark.util.framework.gnn.model_factory import BasicGNNModel from torchbenchmark.tasks import GNN class Model(BasicGNNModel): def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(model_name="gcn", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
from torchbenchmark.util.framework.gnn import install_pytorch_geometric if __name__ == '__main__': install_pytorch_geometric()
from torchbenchmark.util.framework.gnn.model_factory import BasicGNNModel from torchbenchmark.tasks import GNN class Model(BasicGNNModel): def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(model_name="gin", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
from torchbenchmark.util.framework.gnn import install_pytorch_geometric if __name__ == '__main__': install_pytorch_geometric()
import torch def get_drhodT(salt, temp, p): rho0 = 1024.0 z0 = 0.0 theta0 = 283.0 - 273.15 grav = 9.81 betaT = 1.67e-4 betaTs = 1e-5 gammas = 1.1e-8 zz = -p - z0 thetas = temp - theta0 return -(betaTs * thetas + betaT * (1 - gammas * grav * zz * rho0)) * rho0 def get_drhodS(salt, temp, p): betaS = 0.78e-3 rho0 = 1024.0 return betaS * rho0 * torch.ones_like(temp) def dm_taper(sx): """ tapering function for isopycnal slopes """ iso_slopec = 1e-3 iso_dslope = 1e-3 return 0.5 * (1.0 + torch.tanh((-torch.abs(sx) + iso_slopec) / iso_dslope)) def isoneutral_diffusion_pre( maskT, maskU, maskV, maskW, dxt, dxu, dyt, dyu, dzt, dzw, cost, cosu, salt, temp, zt, K_iso, K_11, K_22, K_33, Ai_ez, Ai_nz, Ai_bx, Ai_by, ): """ Isopycnal diffusion for tracer following functional formulation by Griffies et al Code adopted from MOM2.1 """ epsln = 1e-20 K_iso_steep = 50.0 tau = 0 device = K_11.device dTdx = torch.zeros_like(K_11) dSdx = torch.zeros_like(K_11) dTdy = torch.zeros_like(K_11) dSdy = torch.zeros_like(K_11) dTdz = torch.zeros_like(K_11) dSdz = torch.zeros_like(K_11) """ drho_dt and drho_ds at centers of T cells """ drdT = maskT * get_drhodT(salt[:, :, :, tau], temp[:, :, :, tau], torch.abs(zt)) drdS = maskT * get_drhodS(salt[:, :, :, tau], temp[:, :, :, tau], torch.abs(zt)) """ gradients at top face of T cells """ dTdz[:, :, :-1] = ( maskW[:, :, :-1] * (temp[:, :, 1:, tau] - temp[:, :, :-1, tau]) / dzw[None, None, :-1] ) dSdz[:, :, :-1] = ( maskW[:, :, :-1] * (salt[:, :, 1:, tau] - salt[:, :, :-1, tau]) / dzw[None, None, :-1] ) """ gradients at eastern face of T cells """ dTdx[:-1, :, :] = ( maskU[:-1, :, :] * (temp[1:, :, :, tau] - temp[:-1, :, :, tau]) / (dxu[:-1, None, None] * cost[None, :, None]) ) dSdx[:-1, :, :] = ( maskU[:-1, :, :] * (salt[1:, :, :, tau] - salt[:-1, :, :, tau]) / (dxu[:-1, None, None] * cost[None, :, None]) ) """ gradients at northern face of T cells """ dTdy[:, :-1, :] = ( maskV[:, :-1, :] * (temp[:, 1:, :, tau] - temp[:, :-1, :, tau]) / dyu[None, :-1, None] ) dSdy[:, :-1, :] = ( maskV[:, :-1, :] * (salt[:, 1:, :, tau] - salt[:, :-1, :, tau]) / dyu[None, :-1, None] ) """ Compute Ai_ez and K11 on center of east face of T cell. """ diffloc = torch.zeros_like(K_11) diffloc[1:-2, 2:-2, 1:] = 0.25 * ( K_iso[1:-2, 2:-2, 1:] + K_iso[1:-2, 2:-2, :-1] + K_iso[2:-1, 2:-2, 1:] + K_iso[2:-1, 2:-2, :-1] ) diffloc[1:-2, 2:-2, 0] = 0.5 * (K_iso[1:-2, 2:-2, 0] + K_iso[2:-1, 2:-2, 0]) sumz = torch.zeros_like(K_11)[1:-2, 2:-2] for kr in range(2): ki = 0 if kr == 1 else 1 if kr == 1: su = K_11.shape[2] else: su = K_11.shape[2] - 1 for ip in range(2): drodxe = ( drdT[1 + ip : -2 + ip, 2:-2, ki:] * dTdx[1:-2, 2:-2, ki:] + drdS[1 + ip : -2 + ip, 2:-2, ki:] * dSdx[1:-2, 2:-2, ki:] ) drodze = ( drdT[1 + ip : -2 + ip, 2:-2, ki:] * dTdz[1 + ip : -2 + ip, 2:-2, :su] + drdS[1 + ip : -2 + ip, 2:-2, ki:] * dSdz[1 + ip : -2 + ip, 2:-2, :su] ) sxe = -drodxe / ( torch.min(drodze, torch.tensor([0.0], device=device)) - epsln ) taper = dm_taper(sxe) sumz[:, :, ki:] += ( dzw[None, None, :su] * maskU[1:-2, 2:-2, ki:] * torch.max( torch.tensor([K_iso_steep], device=device), diffloc[1:-2, 2:-2, ki:] * taper, ) ) Ai_ez[1:-2, 2:-2, ki:, ip, kr] = taper * sxe * maskU[1:-2, 2:-2, ki:] K_11[1:-2, 2:-2, :] = sumz / (4.0 * dzt[None, None, :]) """ Compute Ai_nz and K_22 on center of north face of T cell. """ diffloc[...] = 0 diffloc[2:-2, 1:-2, 1:] = 0.25 * ( K_iso[2:-2, 1:-2, 1:] + K_iso[2:-2, 1:-2, :-1] + K_iso[2:-2, 2:-1, 1:] + K_iso[2:-2, 2:-1, :-1] ) diffloc[2:-2, 1:-2, 0] = 0.5 * (K_iso[2:-2, 1:-2, 0] + K_iso[2:-2, 2:-1, 0]) sumz = torch.zeros_like(K_11)[2:-2, 1:-2] for kr in range(2): ki = 0 if kr == 1 else 1 if kr == 1: su = K_11.shape[2] else: su = K_11.shape[2] - 1 for jp in range(2): drodyn = ( drdT[2:-2, 1 + jp : -2 + jp, ki:] * dTdy[2:-2, 1:-2, ki:] + drdS[2:-2, 1 + jp : -2 + jp, ki:] * dSdy[2:-2, 1:-2, ki:] ) drodzn = ( drdT[2:-2, 1 + jp : -2 + jp, ki:] * dTdz[2:-2, 1 + jp : -2 + jp, :su] + drdS[2:-2, 1 + jp : -2 + jp, ki:] * dSdz[2:-2, 1 + jp : -2 + jp, :su] ) syn = -drodyn / ( torch.min(torch.tensor([0.0], device=device), drodzn) - epsln ) taper = dm_taper(syn) sumz[:, :, ki:] += ( dzw[None, None, :su] * maskV[2:-2, 1:-2, ki:] * torch.max( torch.tensor([K_iso_steep], device=device), diffloc[2:-2, 1:-2, ki:] * taper, ) ) Ai_nz[2:-2, 1:-2, ki:, jp, kr] = taper * syn * maskV[2:-2, 1:-2, ki:] K_22[2:-2, 1:-2, :] = sumz / (4.0 * dzt[None, None, :]) """ compute Ai_bx, Ai_by and K33 on top face of T cell. """ sumx = torch.zeros_like(K_11)[2:-2, 2:-2, :-1] sumy = torch.zeros_like(K_11)[2:-2, 2:-2, :-1] for kr in range(2): if kr == 1: sl = 1 su = K_11.shape[2] else: sl = 0 su = K_11.shape[2] - 1 drodzb = ( drdT[2:-2, 2:-2, sl:su] * dTdz[2:-2, 2:-2, :-1] + drdS[2:-2, 2:-2, sl:su] * dSdz[2:-2, 2:-2, :-1] ) # eastward slopes at the top of T cells for ip in range(2): drodxb = ( drdT[2:-2, 2:-2, sl:su] * dTdx[1 + ip : -3 + ip, 2:-2, sl:su] + drdS[2:-2, 2:-2, sl:su] * dSdx[1 + ip : -3 + ip, 2:-2, sl:su] ) sxb = -drodxb / ( torch.min(torch.tensor([0.0], device=device), drodzb) - epsln ) taper = dm_taper(sxb) sumx += ( dxu[1 + ip : -3 + ip, None, None] * K_iso[2:-2, 2:-2, :-1] * taper * sxb ** 2 * maskW[2:-2, 2:-2, :-1] ) Ai_bx[2:-2, 2:-2, :-1, ip, kr] = taper * sxb * maskW[2:-2, 2:-2, :-1] # northward slopes at the top of T cells for jp in range(2): facty = cosu[1 + jp : -3 + jp] * dyu[1 + jp : -3 + jp] drodyb = ( drdT[2:-2, 2:-2, sl:su] * dTdy[2:-2, 1 + jp : -3 + jp, sl:su] + drdS[2:-2, 2:-2, sl:su] * dSdy[2:-2, 1 + jp : -3 + jp, sl:su] ) syb = -drodyb / ( torch.min(torch.tensor([0.0], device=device), drodzb) - epsln ) taper = dm_taper(syb) sumy += ( facty[None, :, None] * K_iso[2:-2, 2:-2, :-1] * taper * syb ** 2 * maskW[2:-2, 2:-2, :-1] ) Ai_by[2:-2, 2:-2, :-1, jp, kr] = taper * syb * maskW[2:-2, 2:-2, :-1] K_33[2:-2, 2:-2, :-1] = sumx / (4 * dxt[2:-2, None, None]) + sumy / ( 4 * dyt[None, 2:-2, None] * cost[None, 2:-2, None] ) K_33[2:-2, 2:-2, -1] = 0.0 return K_11, K_22, K_33, Ai_ez, Ai_nz, Ai_bx, Ai_by def prepare_inputs(*inputs, device): out = [ torch.as_tensor(a, device=device) for a in inputs ] if device == "gpu": torch.cuda.synchronize() return out def run(*inputs, device="cpu"): with torch.no_grad(): outputs = isoneutral_diffusion_pre(*inputs) if device == "gpu": torch.cuda.synchronize() return outputs
import torch from . import isoneutral_pytorch from torchbenchmark.tasks import OTHER from ...util.model import BenchmarkModel from typing import Tuple def _generate_inputs(size): import math import numpy as np np.random.seed(17) shape = ( math.ceil(2 * size ** (1 / 3)), math.ceil(2 * size ** (1 / 3)), math.ceil(0.25 * size ** (1 / 3)), ) # masks maskT, maskU, maskV, maskW = ( (np.random.rand(*shape) < 0.8).astype("float64") for _ in range(4) ) # 1d arrays dxt, dxu = (np.random.randn(shape[0]) for _ in range(2)) dyt, dyu = (np.random.randn(shape[1]) for _ in range(2)) dzt, dzw, zt = (np.random.randn(shape[2]) for _ in range(3)) cost, cosu = (np.random.randn(shape[1]) for _ in range(2)) # 3d arrays K_iso, K_iso_steep, K_11, K_22, K_33 = (np.random.randn(*shape) for _ in range(5)) # 4d arrays salt, temp = (np.random.randn(*shape, 3) for _ in range(2)) # 5d arrays Ai_ez, Ai_nz, Ai_bx, Ai_by = (np.zeros((*shape, 2, 2)) for _ in range(4)) return ( maskT, maskU, maskV, maskW, dxt, dxu, dyt, dyu, dzt, dzw, cost, cosu, salt, temp, zt, K_iso, K_11, K_22, K_33, Ai_ez, Ai_nz, Ai_bx, Ai_by, ) class IsoneutralMixing(torch.nn.Module): def __init__(self): super(IsoneutralMixing, self).__init__() def forward( self, maskT, maskU, maskV, maskW, dxt, dxu, dyt, dyu, dzt, dzw, cost, cosu, salt, temp, zt, K_iso, K_11, K_22, K_33, Ai_ez, Ai_nz, Ai_bx, Ai_by, ): return isoneutral_pytorch.isoneutral_diffusion_pre( maskT, maskU, maskV, maskW, dxt, dxu, dyt, dyu, dzt, dzw, cost, cosu, salt, temp, zt, K_iso, K_11, K_22, K_33, Ai_ez, Ai_nz, Ai_bx, Ai_by, ) class Model(BenchmarkModel): task = OTHER.OTHER_TASKS # Original input size: [2 ** i for i in range(12, 23, 2)] # Source: https://github.com/dionhaefner/pyhpc-benchmarks/blob/650ecc650e394df829944ffcf09e9d646ec69691/run.py#L25 # Pick data-point when i = 20, size = 1048576 DEFAULT_EVAL_BSIZE = 1048576 CANNOT_SET_CUSTOM_OPTIMIZER = True def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.model = IsoneutralMixing().to(device=device) input_size = self.batch_size raw_inputs = _generate_inputs(input_size) if hasattr(isoneutral_pytorch, "prepare_inputs"): inputs = isoneutral_pytorch.prepare_inputs(*raw_inputs, device=device) self.example_inputs = inputs def get_module(self): return self.model, self.example_inputs def train(self): raise NotImplementedError("Training not supported") def eval(self) -> Tuple[torch.Tensor]: model, example_inputs = self.get_module() with torch.no_grad(): out = model(*example_inputs) return out
if __name__ == "__main__": pass
# This example was adapated from https://github.com/muhrin/milad # It is licensed under the GLPv3 license. You can find a copy of it # here: https://www.gnu.org/licenses/gpl-3.0.en.html . import torch import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from functorch import vmap, jacrev from typing import Tuple from ...util.model import BenchmarkModel from torchbenchmark.tasks import OTHER sigma = 0.5 epsilon = 4. def lennard_jones(r): return epsilon * ((sigma / r)**12 - (sigma / r)**6) def lennard_jones_force(r): """Get magnitude of LJ force""" return -epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7)) def make_prediction(model, drs): norms = torch.linalg.norm(drs, dim=1).reshape(-1, 1) energies = model(norms) network_derivs = vmap(jacrev(model))(norms).squeeze(-1) forces = -network_derivs * drs / norms return energies, forces def loss_fn(energies, forces, predicted_energies, predicted_forces): return F.mse_loss(energies, predicted_energies) + 0.01 * F.mse_loss(forces, predicted_forces) / 3 class Model(BenchmarkModel): task = OTHER.OTHER_TASKS DEFAULT_TRAIN_BSIZE = 1000 DEFAULT_EVAL_BSIZE = 1000 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.model = nn.Sequential( nn.Linear(1, 16), nn.Tanh(), nn.Linear(16, 16), nn.Tanh(), nn.Linear(16, 16), nn.Tanh(), nn.Linear(16, 16), nn.Tanh(), nn.Linear(16, 1) ) self.model = self.model.to(device) r = torch.linspace(0.5, 2 * sigma, steps=self.batch_size) # Create a bunch of vectors that point along positive-x. # These are the dummy inputs to the model. self.drs = torch.outer(r, torch.tensor([1.0, 0, 0])).to(device=device) # Generate some dummy targets based off of some interpretation of the lennard_jones force. norms = torch.linalg.norm(self.drs, dim=1).reshape(-1, 1) self.norms = norms # Create training energies self.training_energies = torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1) # Create forces with random direction vectors self.training_forces = torch.stack([ force * dr for force, dr in zip(map(lennard_jones_force, norms), self.drs) ]) self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3) def get_module(self): return self.model, (self.norms, ) def train(self): model = self.model optimizer = self.optimizer model.train() optimizer.zero_grad() energies, forces = make_prediction(model, self.drs) loss = loss_fn(self.training_energies, self.training_forces, energies, forces) loss.backward() optimizer.step() def eval(self) -> Tuple[torch.Tensor]: model = self.model model.eval() with torch.no_grad(): out = make_prediction(model, self.drs) return out
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements()
from torchbenchmark.tasks import NLP from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel class Model(HuggingFaceModel): task = NLP.LANGUAGE_MODELING DEFAULT_TRAIN_BSIZE = 4 DEFAULT_EVAL_BSIZE = 1 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(name="hf_Bert_large", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
import subprocess import sys import os from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements() patch_transformers() model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) cache_model(model_name)
""" fastNLP model (TorchBenchmark Version) This model resembles the "BertEmedding Q&A" task in [fastNLP Tutorial](https://fastnlp.readthedocs.io/zh/latest/tutorials/extend_1_bert_embedding.html). Input data simulates [CMRC2018 dataset](https://ymcui.com/cmrc2018/). The program runs only for benchmark purposes and doesn't provide correctness results. """ import logging from typing import Tuple import torch import random import inspect import numpy as np from fastNLP.embeddings import BertEmbedding from fastNLP.models import BertForQuestionAnswering from fastNLP.core.callback import CallbackManager from fastNLP.core.batch import DataSetIter from fastNLP.core.losses import CMRC2018Loss from fastNLP.core.metrics import CMRC2018Metric from fastNLP.io.pipe.qa import CMRC2018BertPipe from fastNLP import WarmupCallback, GradientClipCallback from fastNLP.core.optimizer import AdamW from fastNLP.core import logger # Import CMRC2018 data generator from .cmrc2018_simulator import generate_inputs from .cmrc2018_simulator import CMRC2018_DIR, CMRC2018_CONFIG_DIR # TorchBench imports from torchbenchmark.util.model import BenchmarkModel from torchbenchmark.tasks import NLP torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False logger.setLevel(logging.WARNING) class Model(BenchmarkModel): task = NLP.LANGUAGE_MODELING # Use the train batch size from the original CMRC2018 Q&A task # Source: https://fastnlp.readthedocs.io/zh/latest/tutorials/extend_1_bert_embedding.html DEFAULT_TRAIN_BSIZE = 6 DEFAULT_EVAL_BSIZE = 1 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.input_dir = CMRC2018_DIR # Generate input data files # FastNLP loader requires both train and eval files, so we need to generate both of them if test == "train": generate_inputs(train_batch_size=self.batch_size, eval_batch_size=self.DEFAULT_EVAL_BSIZE) elif test == "eval": generate_inputs(train_batch_size=self.DEFAULT_TRAIN_BSIZE, eval_batch_size=self.batch_size) data_bundle = CMRC2018BertPipe().process_from_file(paths=self.input_dir) data_bundle.rename_field('chars', 'words') self.embed = BertEmbedding(data_bundle.get_vocab('words'), model_dir_or_name=CMRC2018_CONFIG_DIR, requires_grad=True, include_cls_sep=False, auto_truncate=True, dropout=0.5, word_dropout=0.01) self.model = self._move_model_to_device(BertForQuestionAnswering(self.embed), device=device) if self._model_contains_inner_module(self.model): self._forward_func = self.model.module.forward else: self._forward_func = self.model.forward # Do not spawn new processes on small scale of data self.num_workers = 0 if self.test == "train": self.model.train() self.trainer = self.model self.train_data = data_bundle.get_dataset('train') self.data = self.train_data self.losser = CMRC2018Loss() self.metrics = CMRC2018Metric() self.update_every = 10 wm_callback = WarmupCallback(schedule='linear') gc_callback = GradientClipCallback(clip_value=1, clip_type='norm') callbacks = [wm_callback, gc_callback] self.optimizer = AdamW(self.model.parameters(), lr=5e-5) self.callback_manager = CallbackManager(env={"trainer":self}, callbacks=callbacks) elif self.test == "eval": self.model.eval() self.data = data_bundle.get_dataset('dev') example_inputs = DataSetIter(dataset=self.data, batch_size=self.batch_size, sampler=None, num_workers=self.num_workers, drop_last=False) self.example_inputs = self._prefetch(example_inputs) def get_module(self): batch_x, _batch_y = list(self.example_inputs)[0] return self.model, (batch_x["words"], ) # Sliced version of fastNLP.Tester._test() def eval(self) -> Tuple[torch.Tensor]: self._mode(self.model, is_test=True) self._predict_func = self.model.forward with torch.no_grad(): for batch_x, _batch_y in self.example_inputs: pred_dict = self._data_forward(self._predict_func, batch_x) # return a tuple of Tensors return (pred_dict['pred_start'], pred_dict['pred_end'] ) # Sliced version of fastNLP.Trainer._train() def train(self): self.step = 0 self.n_epochs = 1 self._mode(self.model, is_test=False) self.callback_manager.on_train_begin() for _epoch in range(self.n_epochs): self.callback_manager.on_epoch_begin() for batch_x, batch_y in self.example_inputs: self.step += 1 prediction = self._data_forward(self.model, batch_x) self.callback_manager.on_loss_begin(batch_y, prediction) loss = self._compute_loss(prediction, batch_y).mean() self.callback_manager.on_backward_begin(loss) self._grad_backward(loss) self.callback_manager.on_backward_end() self._update() self.callback_manager.on_step_end() self.callback_manager.on_batch_end() self.callback_manager.on_epoch_end() self.callback_manager.on_train_end() def _prefetch(self, example_inputs): prefetched_data = [] for batch_x, batch_y in example_inputs: self._move_dict_value_to_device(batch_x, batch_y, device=self.device) prefetched_data.append((batch_x, batch_y)) return prefetched_data # Helper functions def _build_args(self, func, **kwargs): spect = inspect.getfullargspec(func) if spect.varkw is not None: return kwargs needed_args = set(spect.args) defaults = [] if spect.defaults is not None: defaults = [arg for arg in spect.defaults] start_idx = len(spect.args) - len(defaults) output = {name: default for name, default in zip(spect.args[start_idx:], defaults)} output.update({name: val for name, val in kwargs.items() if name in needed_args}) return output def _move_dict_value_to_device(self, *args, device, non_blocking=False): for arg in args: if isinstance(arg, dict): for key, value in arg.items(): if isinstance(value, torch.Tensor): arg[key] = value.to(device, non_blocking=non_blocking) else: raise TypeError("Only support `dict` type right now.") def _model_contains_inner_module(self, model): if isinstance(model, torch.nn.Module): if isinstance(model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)): return True return False def _move_model_to_device(self, model, device): model = model.to(device) return model def _mode(self, model, is_test=False): r"""Train mode or Test mode. This is for PyTorch currently. :param model: a PyTorch model :param bool is_test: whether in test mode or not. """ if is_test: model.eval() else: model.train() def _update(self): r"""Perform weight update on a model. """ if self.step % self.update_every == 0: self.optimizer.step() def _data_forward(self, network, x): x = self._build_args(self._forward_func, **x) y = network(**x) if not isinstance(y, dict): raise TypeError( f"The return value of {_get_func_signature(self._forward_func)} should be dict, got {type(y)}.") return y def _grad_backward(self, loss): r"""Compute gradient with link rules. :param loss: a scalar where back-prop starts For PyTorch, just do "loss.backward()" """ if (self.step-1) % self.update_every == 0: self.model.zero_grad() loss.backward() def _compute_loss(self, predict, truth): r"""Compute loss given prediction and ground truth. :param predict: prediction dict, produced by model.forward :param truth: ground truth dict, produced by batch_y :return: a scalar """ return self.losser(predict, truth) def get_optimizer(self): r"""Gets the optimizer if initiated""" if hasattr(self, "optimizer"): return self.optimizer return None def set_optimizer(self, optimizer) -> None: r"""Sets the optimizer regardless of whether it's been initiated""" self.optimizer = optimizer
import subprocess import os import sys import patch def patch_fastnlp(): import fastNLP current_dir = os.path.dirname(os.path.abspath(__file__)) patch_file = os.path.join(current_dir, "fastnlp.patch") fastNLP_dir = os.path.dirname(fastNLP.__file__) fastNLP_target_file = os.path.join(fastNLP_dir, "embeddings", "bert_embedding.py") p = patch.fromfile(patch_file) if not p.apply(strip=1, root=fastNLP_dir): print("Failed to patch fastNLP. Exit.") exit(1) def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements() patch_fastnlp()
""" Generator of a simulated CMRC2018 dataset. Use random Chinese characters with the same length as the original dataset. """ import os import pathlib import json import random TRAIN_NUM_BATCH = 1 EVAL_NUM_BATCH = 1 CMRC2018_TRAIN_SPEC = { # Original # "data_size": 2403, # Benchmark "data_size": 6, # placeholder, will be replaced by the true batch size "title_length": 5, "paragraph_size": 1, "context_length": 456, "qas_size": 5, "query_length": 15, "answers_size": 1, "answers_length": 7 } CMRC2018_DEV_SPEC = { # Original # "data_size": 848, # Benchmark "data_size": 1, # placeholder, will be replaced by the true batch size "title_length": 4, "paragraph_size": 1, "context_length": 455, "qas_size": 4, "query_length": 15, "answers_size": 3, "answers_length": 7 } CMRC2018_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), ".data", "cmrc2018-sim") CMRC2018_CONFIG_DIR = os.path.join(CMRC2018_DIR, "config") CMRC2018_TRAIN_SIM = os.path.join(CMRC2018_DIR, "train.json") CMRC2018_DEV_SIM = os.path.join(CMRC2018_DIR, "dev.json") CMRC2018_VOCAB_SIM = os.path.join(CMRC2018_CONFIG_DIR, "vocab.txt") CMRC2018_BERT_CONFIG = os.path.join(CMRC2018_CONFIG_DIR, "bert_config.json") VOCAB_SET = set() # Generate random Chinese string with length l def _GBK2312(l): head = 0xd7 while head == 0xd7: head = random.randint(0xb0, 0xf7) body = random.randint(0xa1, 0xfe) val = f'{head:x} {body:x}' s = bytes.fromhex(val).decode('gb2312') VOCAB_SET.add(s) if l == 0: return s else: return s + _GBK2312(l-1) def _generate_cmrc2018(spec): simdata = {} simdata["version"] = "v1.0-sim" simdata["data"] = [] for ind in range(spec["data_size"]): item = {} para = {} item["id"] = f"DEV_{ind}" item["title"] = _GBK2312(spec["title_length"]) item["paragraphs"] = [] para["id"] = item["id"] para["context"] = _GBK2312(spec["context_length"]) para["qas"] = [] for qind in range(spec["qas_size"]): q = {} q["question"] = _GBK2312(spec["query_length"]) q["id"] = f"{item['id']}_QUERY_{qind}" q["answers"] = [] for ans in range(spec["answers_size"]): ans = {} ans["text"] = _GBK2312(spec["answers_length"]) ans["answer_start"] = 0 q["answers"].append(ans) para["qas"].append(q) item["paragraphs"].append(para) simdata["data"].append(item) return simdata def _create_dir_if_nonexist(dirpath): pathlib.Path(dirpath).mkdir(parents=True, exist_ok=True) def _dump_data(data, path): with open(path, "w", encoding='utf8') as dp: json.dump(data, dp, indent=4, ensure_ascii=False) def _generate_dev(batch_size): CMRC2018_DEV_SPEC["data_size"] = batch_size * EVAL_NUM_BATCH dev_data = _generate_cmrc2018(CMRC2018_DEV_SPEC) _dump_data(dev_data, CMRC2018_DEV_SIM) def _generate_train(batch_size): CMRC2018_TRAIN_SPEC["data_size"] = batch_size * TRAIN_NUM_BATCH dev_data = _generate_cmrc2018(CMRC2018_TRAIN_SPEC) _dump_data(dev_data, CMRC2018_TRAIN_SIM) # MUST be called after generate_dev() AND generate_train()! def _generate_vocab(): never_split = ["[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]"] VOCAB_SET.update(never_split) with open(CMRC2018_VOCAB_SIM, "w") as vf: vf.write("\n".join(list(VOCAB_SET))) def _copy_bert_config(): current_dir = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(current_dir, "bert_config.json"), "r") as configf: config = configf.read() with open(CMRC2018_BERT_CONFIG, "w") as configf: configf.write(config) def _setup_os_env(): os.environ["TORCHBENCH_FASTNLP_CONFIG_PATH"] = CMRC2018_BERT_CONFIG def _create_empty_bin(): CMRC2018_CONFIG_DIR = os.path.join(CMRC2018_DIR, "config") bin_file = os.path.join(CMRC2018_CONFIG_DIR, "chinese_wwm_pytorch.bin") with open(bin_file, "w") as bf: bf.write("") def generate_inputs(train_batch_size, eval_batch_size): _create_dir_if_nonexist(CMRC2018_DIR) _create_dir_if_nonexist(os.path.join(CMRC2018_DIR, "config")) _generate_dev(eval_batch_size) _generate_train(train_batch_size) _generate_vocab() _create_empty_bin() _copy_bert_config() _setup_os_env()
import torch from . import eos_pytorch from torchbenchmark.tasks import OTHER from ...util.model import BenchmarkModel from typing import Tuple def _generate_inputs(size): import math import numpy as np np.random.seed(17) shape = ( math.ceil(2 * size ** (1/3)), math.ceil(2 * size ** (1/3)), math.ceil(0.25 * size ** (1/3)), ) s = np.random.uniform(1e-2, 10, size=shape) t = np.random.uniform(-12, 20, size=shape) p = np.random.uniform(0, 1000, size=(1, 1, shape[-1])) return s, t, p class EquationOfState(torch.nn.Module): def __init__(self): super(EquationOfState, self).__init__() def forward(self, s, t, p): return eos_pytorch.gsw_dHdT(s, t, p) class Model(BenchmarkModel): task = OTHER.OTHER_TASKS # Original size: [2 ** i for i in range(12, 23, 2) # Source: https://github.com/dionhaefner/pyhpc-benchmarks/blob/650ecc650e394df829944ffcf09e9d646ec69691/run.py#L25 # Pick data point: i = 20, size = 1048576 DEFAULT_EVAL_BSIZE = 1048576 CANNOT_SET_CUSTOM_OPTIMIZER = True def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.model = EquationOfState().to(device=self.device) input_size = self.batch_size raw_inputs = _generate_inputs(input_size) if hasattr(eos_pytorch, "prepare_inputs"): inputs = eos_pytorch.prepare_inputs(*raw_inputs, device=device) self.example_inputs = inputs def get_module(self): return self.model, self.example_inputs def train(self): raise NotImplementedError("Training not supported") def eval(self) -> Tuple[torch.Tensor]: model, example_inputs = self.get_module() with torch.no_grad(): out = model(*example_inputs) return (out, )
""" ========================================================================== in-situ density, dynamic enthalpy and derivatives from Absolute Salinity and Conservative Temperature, using the computationally-efficient 48-term expression for density in terms of SA, CT and p (IOC et al., 2010). ========================================================================== """ import torch def gsw_dHdT(sa, ct, p): """ d/dT of dynamic enthalpy, analytical derivative sa : Absolute Salinity [g/kg] ct : Conservative Temperature [deg C] p : sea pressure [dbar] """ v01 = 9.998420897506056e2 v02 = 2.839940833161907e0 v03 = -3.147759265588511e-2 v04 = 1.181805545074306e-3 v05 = -6.698001071123802e0 v06 = -2.986498947203215e-2 v07 = 2.327859407479162e-4 v08 = -3.988822378968490e-2 v09 = 5.095422573880500e-4 v10 = -1.426984671633621e-5 v11 = 1.645039373682922e-7 v12 = -2.233269627352527e-2 v13 = -3.436090079851880e-4 v14 = 3.726050720345733e-6 v15 = -1.806789763745328e-4 v16 = 6.876837219536232e-7 v17 = -3.087032500374211e-7 v18 = -1.988366587925593e-8 v19 = -1.061519070296458e-11 v20 = 1.550932729220080e-10 v21 = 1.0e0 v22 = 2.775927747785646e-3 v23 = -2.349607444135925e-5 v24 = 1.119513357486743e-6 v25 = 6.743689325042773e-10 v26 = -7.521448093615448e-3 v27 = -2.764306979894411e-5 v28 = 1.262937315098546e-7 v29 = 9.527875081696435e-10 v30 = -1.811147201949891e-11 v31 = -3.303308871386421e-5 v32 = 3.801564588876298e-7 v33 = -7.672876869259043e-9 v34 = -4.634182341116144e-11 v35 = 2.681097235569143e-12 v36 = 5.419326551148740e-6 v37 = -2.742185394906099e-5 v38 = -3.212746477974189e-7 v39 = 3.191413910561627e-9 v40 = -1.931012931541776e-12 v41 = -1.105097577149576e-7 v42 = 6.211426728363857e-10 v43 = -1.119011592875110e-10 v44 = -1.941660213148725e-11 v45 = -1.864826425365600e-14 v46 = 1.119522344879478e-14 v47 = -1.200507748551599e-15 v48 = 6.057902487546866e-17 t1 = v45 * ct t2 = 0.2e1 * t1 t3 = v46 * sa t4 = 0.5 * v12 t5 = v14 * ct t7 = ct * (v13 + t5) t8 = 0.5 * t7 t11 = sa * (v15 + v16 * ct) t12 = 0.5 * t11 t13 = t4 + t8 + t12 t15 = v19 * ct t19 = v17 + ct * (v18 + t15) + v20 * sa t20 = 1.0 / t19 t24 = v47 + v48 * ct t25 = 0.5 * v13 t26 = 1.0 * t5 t27 = sa * v16 t28 = 0.5 * t27 t29 = t25 + t26 + t28 t33 = t24 * t13 t34 = t19 ** 2 t35 = 1.0 / t34 t37 = v18 + 2.0 * t15 t38 = t35 * t37 t48 = ct * (v44 + t1 + t3) t57 = v40 * ct t59 = ct * (v39 + t57) t64 = t13 ** 2 t68 = t20 * t29 t71 = t24 * t64 t74 = v04 * ct t76 = ct * (v03 + t74) t79 = v07 * ct t82 = torch.sqrt(sa) t83 = v11 * ct t85 = ct * (v10 + t83) t92 = ( v01 + ct * (v02 + t76) + sa * (v05 + ct * (v06 + t79) + t82 * (v08 + ct * (v09 + t85))) ) t93 = v48 * t92 t105 = ( v02 + t76 + ct * (v03 + 2.0 * t74) + sa * (v06 + 2.0 * t79 + t82 * (v09 + t85 + ct * (v10 + 2.0 * t83))) ) t106 = t24 * t105 t107 = v44 + t2 + t3 t110 = v43 + t48 t117 = t24 * t92 t120 = 4.0 * t71 * t20 - t117 - 2.0 * t110 * t13 t123 = ( v38 + t59 + ct * (v39 + 2.0 * t57) + sa * v42 + ( 4.0 * v48 * t64 * t20 + 8.0 * t33 * t68 - 4.0 * t71 * t38 - t93 - t106 - 2.0 * t107 * t13 - 2.0 * t110 * t29 ) * t20 - t120 * t35 * t37 ) t128 = t19 * p t130 = p * (1.0 * v12 + 1.0 * t7 + 1.0 * t11 + t128) t131 = 1.0 / t92 t133 = 1.0 + t130 * t131 t134 = torch.log(t133) t143 = v37 + ct * (v38 + t59) + sa * (v41 + v42 * ct) + t120 * t20 t152 = t37 * p t156 = t92 ** 2 t165 = v25 * ct t167 = ct * (v24 + t165) t169 = ct * (v23 + t167) t175 = v30 * ct t177 = ct * (v29 + t175) t179 = ct * (v28 + t177) t185 = v35 * ct t187 = ct * (v34 + t185) t189 = ct * (v33 + t187) t199 = t13 * t20 t217 = 2.0 * t117 * t199 - t110 * t92 t234 = ( v21 + ct * (v22 + t169) + sa * (v26 + ct * (v27 + t179) + v36 * sa + t82 * (v31 + ct * (v32 + t189))) + t217 * t20 ) t241 = t64 - t92 * t19 t242 = torch.sqrt(t241) t243 = 1.0 / t242 t244 = t4 + t8 + t12 - t242 t245 = 1.0 / t244 t247 = t4 + t8 + t12 + t242 + t128 t248 = 1.0 / t247 t249 = t242 * t245 * t248 t252 = 1.0 + 2.0 * t128 * t249 t253 = torch.log(t252) t254 = t243 * t253 t259 = t234 * t19 - t143 * t13 t264 = t259 * t20 t272 = 2.0 * t13 * t29 - t105 * t19 - t92 * t37 t282 = t128 * t242 t283 = t244 ** 2 t287 = t243 * t272 / 2.0 t292 = t247 ** 2 t305 = ( 0.1e5 * p * ( v44 + t2 + t3 - 2.0 * v48 * t13 * t20 - 2.0 * t24 * t29 * t20 + 2.0 * t33 * t38 + 0.5 * v48 * p ) * t20 - 0.1e5 * p * (v43 + t48 - 2.0 * t33 * t20 + 0.5 * t24 * p) * t38 + 0.5e4 * t123 * t20 * t134 - 0.5e4 * t143 * t35 * t134 * t37 + 0.5e4 * t143 * t20 * (p * (1.0 * v13 + 2.0 * t5 + 1.0 * t27 + t152) * t131 - t130 / t156 * t105) / t133 + 0.5e4 * ( ( v22 + t169 + ct * (v23 + t167 + ct * (v24 + 2.0 * t165)) + sa * ( v27 + t179 + ct * (v28 + t177 + ct * (v29 + 2.0 * t175)) + t82 * (v32 + t189 + ct * (v33 + t187 + ct * (v34 + 2.0 * t185))) ) + ( 2.0 * t93 * t199 + 2.0 * t106 * t199 + 2.0 * t117 * t68 - 2.0 * t117 * t13 * t35 * t37 - t107 * t92 - t110 * t105 ) * t20 - t217 * t35 * t37 ) * t19 + t234 * t37 - t123 * t13 - t143 * t29 ) * t20 * t254 - 0.5e4 * t259 * t35 * t254 * t37 - 0.25e4 * t264 / t242 / t241 * t253 * t272 + 0.5e4 * t264 * t243 * ( 2.0 * t152 * t249 + t128 * t243 * t245 * t248 * t272 - 2.0 * t282 / t283 * t248 * (t25 + t26 + t28 - t287) - 2.0 * t282 * t245 / t292 * (t25 + t26 + t28 + t287 + t152) ) / t252 ) return t305 def prepare_inputs(sa, ct, p, device): out = [ torch.as_tensor(a, device=device) for a in (sa, ct, p) ] if device == "gpu": torch.cuda.synchronize() return out def run(sa, ct, p, device="cpu"): with torch.no_grad(): out = gsw_dHdT(sa, ct, p) if device == "gpu": torch.cuda.synchronize() return out
if __name__ == "__main__": pass
# Generated by gen_torchvision_benchmark.py import torch import torch.optim as optim import torchvision.models as models from torch.quantization import quantize_fx from torchbenchmark.tasks import COMPUTER_VISION from ...util.model import BenchmarkModel from typing import Tuple class Model(BenchmarkModel): task = COMPUTER_VISION.CLASSIFICATION # Train batch size: 96 # Source: https://arxiv.org/pdf/1801.04381.pdf DEFAULT_TRAIN_BSIZE = 96 DEFAULT_EVAL_BSIZE = 96 def __init__(self, test, device, batch_size=None, extra_args=[]): if test == "eval" and device != "cpu": raise NotImplementedError("The eval test only supports CPU.") super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.model = models.mobilenet_v2().to(self.device) self.example_inputs = (torch.randn((self.batch_size, 3, 224, 224)).to(self.device),) self.prep_qat_train() # config+prepare steps are required for both train and eval if self.test == "eval": self.prep_qat_eval() self.optimizer = None def prep_qat_train(self): qconfig_dict = {"": torch.quantization.get_default_qat_qconfig('fbgemm')} self.model.train() self.model = quantize_fx.prepare_qat_fx(self.model, qconfig_dict, self.example_inputs) def train(self): if self.get_optimizer() is None: self.set_optimizer(optim.Adam(self.model.parameters())) loss = torch.nn.CrossEntropyLoss() self.optimizer.zero_grad() pred = self.model(*self.example_inputs) y = torch.empty(pred.shape[0], dtype=torch.long, device=self.device).random_(pred.shape[1]) loss(pred, y).backward() self.optimizer.step() def prep_qat_eval(self): self.model = quantize_fx.convert_fx(self.model) self.model.eval() def eval(self) -> Tuple[torch.Tensor]: example_inputs = self.example_inputs[0][0].unsqueeze(0) out = self.model(example_inputs) return (out, ) def get_module(self): return self.model, self.example_inputs def get_optimizer(self): return self.optimizer def set_optimizer(self, optimizer) -> None: self.optimizer = optimizer
import argparse import numpy as np import random import torch import torch.nn as nn import torch.nn.functional as F from torch import optim from typing import Tuple torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False from .pytorch_unet.unet import UNet from .pytorch_unet.utils.dice_score import dice_loss from ...util.model import BenchmarkModel from torchbenchmark.tasks import COMPUTER_VISION class Model(BenchmarkModel): task = COMPUTER_VISION.SEGMENTATION DEFAULT_TRAIN_BSIZE = 1 DEFAULT_EVAL_BSIZE = 1 DEFAULT_TRAIN_CUDA_PRECISION = "amp" DEFAULT_EVAL_CUDA_PRECISION = "amp" def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.args = self._get_args() # The sample inputs shape used here mimic the setting of the original repo # Source image link: https://www.kaggle.com/c/carvana-image-masking-challenge/code # Source images are 1280 x 1918, but the original code scales it in half to 640 x 959 # The batch size is 1 and there are 3 channels for the image inputs and 1 for the mask self.example_inputs = torch.rand((self.batch_size, 3, 640, 959), dtype=torch.float32).to(self.device) self.model = UNet(n_channels=3, n_classes=2, bilinear=True).to(self.device) if test == "train": self.sample_masks = torch.randint(0, 1, (self.batch_size, 640, 959), dtype=torch.int64).to(self.device) self.model.train() elif test == "eval": self.model.eval() self.optimizer = optim.RMSprop(self.model.parameters(), lr=self.args.lr, weight_decay=1e-8, momentum=0.9) def get_module(self): return self.model, (self.example_inputs,) def enable_amp(self): self.args.amp = True def train(self): grad_scaler = torch.cuda.amp.GradScaler(enabled=self.args.amp) criterion = nn.CrossEntropyLoss() self.model.train() if True: with torch.cuda.amp.autocast(enabled=self.args.amp): masks_pred = self.model(self.example_inputs) masks_true = self.sample_masks loss = criterion(masks_pred, masks_true) + \ dice_loss( F.softmax(masks_pred, dim=1).float(), F.one_hot(masks_true, self.model.n_classes).permute(0, 3, 1, 2).float(), multiclass=True) self.optimizer.zero_grad(set_to_none=True) grad_scaler.scale(loss).backward() grad_scaler.step(self.optimizer) grad_scaler.update() def jit_callback(self): if self.test == 'eval': self.model = torch.jit.optimize_for_inference( \ torch.jit.freeze(torch.jit.script(self.model.eval()), preserved_attrs=["n_classes"])) else: self.model = torch.jit.script(self.model) def eval(self) -> Tuple[torch.Tensor]: self.model.eval() with torch.no_grad(): with torch.cuda.amp.autocast(enabled=self.args.amp): mask_pred = self.model(self.example_inputs) if self.model.n_classes == 1: mask_pred = (F.sigmoid(mask_pred) > 0.5).float() else: mask_pred = F.one_hot(mask_pred.argmax(dim=1), self.model.n_classes).permute(0, 3, 1, 2).float() return (mask_pred, ) def _get_args(self): parser = argparse.ArgumentParser(description='Train the UNet on images and target masks') parser.add_argument('--learning-rate', '-l', metavar='LR', type=float, default=0.00001, help='Learning rate', dest='lr') parser.add_argument('--amp', action='store_true', default=False, help='Use mixed precision') return parser.parse_args([])
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'pytorch_unet/requirements.txt']) if __name__ == '__main__': pip_install_requirements()
import argparse import logging import os import numpy as np import torch import torch.nn.functional as F from PIL import Image from torchvision import transforms from utils.data_loading import BasicDataset from unet import UNet from utils.utils import plot_img_and_mask def predict_img(net, full_img, device, scale_factor=1, out_threshold=0.5): net.eval() img = torch.from_numpy(BasicDataset.preprocess(full_img, scale_factor, is_mask=False)) img = img.unsqueeze(0) img = img.to(device=device, dtype=torch.float32) with torch.no_grad(): output = net(img) if net.n_classes > 1: probs = F.softmax(output, dim=1)[0] else: probs = torch.sigmoid(output)[0] tf = transforms.Compose([ transforms.ToPILImage(), transforms.Resize((full_img.size[1], full_img.size[0])), transforms.ToTensor() ]) full_mask = tf(probs.cpu()).squeeze() if net.n_classes == 1: return (full_mask > out_threshold).numpy() else: return F.one_hot(full_mask.argmax(dim=0), net.n_classes).permute(2, 0, 1).numpy() def get_args(): parser = argparse.ArgumentParser(description='Predict masks from input images') parser.add_argument('--model', '-m', default='MODEL.pth', metavar='FILE', help='Specify the file in which the model is stored') parser.add_argument('--input', '-i', metavar='INPUT', nargs='+', help='Filenames of input images', required=True) parser.add_argument('--output', '-o', metavar='INPUT', nargs='+', help='Filenames of output images') parser.add_argument('--viz', '-v', action='store_true', help='Visualize the images as they are processed') parser.add_argument('--no-save', '-n', action='store_true', help='Do not save the output masks') parser.add_argument('--mask-threshold', '-t', type=float, default=0.5, help='Minimum probability value to consider a mask pixel white') parser.add_argument('--scale', '-s', type=float, default=0.5, help='Scale factor for the input images') return parser.parse_args() def get_output_filenames(args): def _generate_name(fn): split = os.path.splitext(fn) return f'{split[0]}_OUT{split[1]}' return args.output or list(map(_generate_name, args.input)) def mask_to_image(mask: np.ndarray): if mask.ndim == 2: return Image.fromarray((mask * 255).astype(np.uint8)) elif mask.ndim == 3: return Image.fromarray((np.argmax(mask, axis=0) * 255 / mask.shape[0]).astype(np.uint8)) if __name__ == '__main__': args = get_args() in_files = args.input out_files = get_output_filenames(args) net = UNet(n_channels=3, n_classes=2) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') logging.info(f'Loading model {args.model}') logging.info(f'Using device {device}') net.to(device=device) net.load_state_dict(torch.load(args.model, map_location=device)) logging.info('Model loaded!') for i, filename in enumerate(in_files): logging.info(f'\nPredicting image {filename} ...') img = Image.open(filename) mask = predict_img(net=net, full_img=img, scale_factor=args.scale, out_threshold=args.mask_threshold, device=device) if not args.no_save: out_filename = out_files[i] result = mask_to_image(mask) result.save(out_filename) logging.info(f'Mask saved to {out_filename}') if args.viz: logging.info(f'Visualizing results for image {filename}, close to continue...') plot_img_and_mask(img, mask)
import argparse import logging import sys from pathlib import Path import torch import torch.nn as nn import torch.nn.functional as F import wandb from torch import optim from torch.utils.data import DataLoader, random_split from tqdm import tqdm from utils.data_loading import BasicDataset, CarvanaDataset from utils.dice_score import dice_loss from evaluate import evaluate from unet import UNet dir_img = Path('./data/imgs/') dir_mask = Path('./data/masks/') dir_checkpoint = Path('./checkpoints/') def train_net(net, device, epochs: int = 5, batch_size: int = 1, learning_rate: float = 0.001, val_percent: float = 0.1, save_checkpoint: bool = True, img_scale: float = 0.5, amp: bool = False): # 1. Create dataset try: dataset = CarvanaDataset(dir_img, dir_mask, img_scale) except (AssertionError, RuntimeError): dataset = BasicDataset(dir_img, dir_mask, img_scale) # 2. Split into train / validation partitions n_val = int(len(dataset) * val_percent) n_train = len(dataset) - n_val train_set, val_set = random_split(dataset, [n_train, n_val], generator=torch.Generator().manual_seed(0)) # 3. Create data loaders loader_args = dict(batch_size=batch_size, num_workers=4, pin_memory=True) train_loader = DataLoader(train_set, shuffle=True, **loader_args) val_loader = DataLoader(val_set, shuffle=False, drop_last=True, **loader_args) # (Initialize logging) experiment = wandb.init(project='U-Net', resume='allow', anonymous='must') experiment.config.update(dict(epochs=epochs, batch_size=batch_size, learning_rate=learning_rate, val_percent=val_percent, save_checkpoint=save_checkpoint, img_scale=img_scale, amp=amp)) logging.info(f'''Starting training: Epochs: {epochs} Batch size: {batch_size} Learning rate: {learning_rate} Training size: {n_train} Validation size: {n_val} Checkpoints: {save_checkpoint} Device: {device.type} Images scaling: {img_scale} Mixed Precision: {amp} ''') # 4. Set up the optimizer, the loss, the learning rate scheduler and the loss scaling for AMP optimizer = optim.RMSprop(net.parameters(), lr=learning_rate, weight_decay=1e-8, momentum=0.9) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience=2) # goal: maximize Dice score grad_scaler = torch.cuda.amp.GradScaler(enabled=amp) criterion = nn.CrossEntropyLoss() global_step = 0 # 5. Begin training for epoch in range(epochs): net.train() epoch_loss = 0 with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar: for batch in train_loader: images = batch['image'] true_masks = batch['mask'] assert images.shape[1] == net.n_channels, \ f'Network has been defined with {net.n_channels} input channels, ' \ f'but loaded images have {images.shape[1]} channels. Please check that ' \ 'the images are loaded correctly.' images = images.to(device=device, dtype=torch.float32) true_masks = true_masks.to(device=device, dtype=torch.long) with torch.cuda.amp.autocast(enabled=amp): masks_pred = net(images) loss = criterion(masks_pred, true_masks) \ + dice_loss(F.softmax(masks_pred, dim=1).float(), F.one_hot(true_masks, net.n_classes).permute(0, 3, 1, 2).float(), multiclass=True) optimizer.zero_grad(set_to_none=True) grad_scaler.scale(loss).backward() grad_scaler.step(optimizer) grad_scaler.update() pbar.update(images.shape[0]) global_step += 1 epoch_loss += loss.item() experiment.log({ 'train loss': loss.item(), 'step': global_step, 'epoch': epoch }) pbar.set_postfix(**{'loss (batch)': loss.item()}) # Evaluation round if global_step % (n_train // (10 * batch_size)) == 0: histograms = {} for tag, value in net.named_parameters(): tag = tag.replace('/', '.') histograms['Weights/' + tag] = wandb.Histogram(value.data.cpu()) histograms['Gradients/' + tag] = wandb.Histogram(value.grad.data.cpu()) val_score = evaluate(net, val_loader, device) scheduler.step(val_score) logging.info('Validation Dice score: {}'.format(val_score)) experiment.log({ 'learning rate': optimizer.param_groups[0]['lr'], 'validation Dice': val_score, 'images': wandb.Image(images[0].cpu()), 'masks': { 'true': wandb.Image(true_masks[0].float().cpu()), 'pred': wandb.Image(torch.softmax(masks_pred, dim=1)[0].float().cpu()), }, 'step': global_step, 'epoch': epoch, **histograms }) if save_checkpoint: Path(dir_checkpoint).mkdir(parents=True, exist_ok=True) torch.save(net.state_dict(), str(dir_checkpoint / 'checkpoint_epoch{}.pth'.format(epoch + 1))) logging.info(f'Checkpoint {epoch + 1} saved!') def get_args(): parser = argparse.ArgumentParser(description='Train the UNet on images and target masks') parser.add_argument('--epochs', '-e', metavar='E', type=int, default=5, help='Number of epochs') parser.add_argument('--batch-size', '-b', dest='batch_size', metavar='B', type=int, default=1, help='Batch size') parser.add_argument('--learning-rate', '-l', metavar='LR', type=float, default=0.00001, help='Learning rate', dest='lr') parser.add_argument('--load', '-f', type=str, default=False, help='Load model from a .pth file') parser.add_argument('--scale', '-s', type=float, default=0.5, help='Downscaling factor of the images') parser.add_argument('--validation', '-v', dest='val', type=float, default=10.0, help='Percent of the data that is used as validation (0-100)') parser.add_argument('--amp', action='store_true', default=False, help='Use mixed precision') return parser.parse_args() if __name__ == '__main__': args = get_args() logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') logging.info(f'Using device {device}') # Change here to adapt to your data # n_channels=3 for RGB images # n_classes is the number of probabilities you want to get per pixel net = UNet(n_channels=3, n_classes=2, bilinear=True) logging.info(f'Network:\n' f'\t{net.n_channels} input channels\n' f'\t{net.n_classes} output channels (classes)\n' f'\t{"Bilinear" if net.bilinear else "Transposed conv"} upscaling') if args.load: net.load_state_dict(torch.load(args.load, map_location=device)) logging.info(f'Model loaded from {args.load}') net.to(device=device) try: train_net(net=net, epochs=args.epochs, batch_size=args.batch_size, learning_rate=args.lr, device=device, img_scale=args.scale, val_percent=args.val / 100, amp=args.amp) except KeyboardInterrupt: torch.save(net.state_dict(), 'INTERRUPTED.pth') logging.info('Saved interrupt') sys.exit(0)
import torch import torch.nn.functional as F from tqdm import tqdm from utils.dice_score import multiclass_dice_coeff, dice_coeff def evaluate(net, dataloader, device): net.eval() num_val_batches = len(dataloader) dice_score = 0 # iterate over the validation set for batch in tqdm(dataloader, total=num_val_batches, desc='Validation round', unit='batch', leave=False): image, mask_true = batch['image'], batch['mask'] # move images and labels to correct device and type image = image.to(device=device, dtype=torch.float32) mask_true = mask_true.to(device=device, dtype=torch.long) mask_true = F.one_hot(mask_true, net.n_classes).permute(0, 3, 1, 2).float() with torch.no_grad(): # predict the mask mask_pred = net(image) # convert to one-hot format if net.n_classes == 1: mask_pred = (F.sigmoid(mask_pred) > 0.5).float() # compute the Dice score dice_score += dice_coeff(mask_pred, mask_true, reduce_batch_first=False) else: mask_pred = F.one_hot(mask_pred.argmax(dim=1), net.n_classes).permute(0, 3, 1, 2).float() # compute the Dice score, ignoring background dice_score += multiclass_dice_coeff(mask_pred[:, 1:, ...], mask_true[:, 1:, ...], reduce_batch_first=False) net.train() return dice_score / num_val_batches
import torch from unet import UNet as _UNet def unet_carvana(pretrained=False): """ UNet model trained on the Carvana dataset ( https://www.kaggle.com/c/carvana-image-masking-challenge/data ). Set the scale to 0.5 (50%) when predicting. """ net = _UNet(n_channels=3, n_classes=2, bilinear=True) if pretrained: checkpoint = 'https://github.com/milesial/Pytorch-UNet/releases/download/v2.0/unet_carvana_scale0.5_epoch1.pth' net.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=True)) return net
import logging from os import listdir from os.path import splitext from pathlib import Path import numpy as np import torch from PIL import Image from torch.utils.data import Dataset class BasicDataset(Dataset): def __init__(self, images_dir: str, masks_dir: str, scale: float = 1.0, mask_suffix: str = ''): self.images_dir = Path(images_dir) self.masks_dir = Path(masks_dir) assert 0 < scale <= 1, 'Scale must be between 0 and 1' self.scale = scale self.mask_suffix = mask_suffix self.ids = [splitext(file)[0] for file in listdir(images_dir) if not file.startswith('.')] if not self.ids: raise RuntimeError(f'No input file found in {images_dir}, make sure you put your images there') logging.info(f'Creating dataset with {len(self.ids)} examples') def __len__(self): return len(self.ids) @classmethod def preprocess(cls, pil_img, scale, is_mask): w, h = pil_img.size newW, newH = int(scale * w), int(scale * h) assert newW > 0 and newH > 0, 'Scale is too small, resized images would have no pixel' pil_img = pil_img.resize((newW, newH)) img_ndarray = np.asarray(pil_img) if img_ndarray.ndim == 2 and not is_mask: img_ndarray = img_ndarray[np.newaxis, ...] elif not is_mask: img_ndarray = img_ndarray.transpose((2, 0, 1)) if not is_mask: img_ndarray = img_ndarray / 255 return img_ndarray @classmethod def load(cls, filename): ext = splitext(filename)[1] if ext in ['.npz', '.npy']: return Image.fromarray(np.load(filename)) elif ext in ['.pt', '.pth']: return Image.fromarray(torch.load(filename).numpy()) else: return Image.open(filename) def __getitem__(self, idx): name = self.ids[idx] mask_file = list(self.masks_dir.glob(name + self.mask_suffix + '.*')) img_file = list(self.images_dir.glob(name + '.*')) assert len(mask_file) == 1, f'Either no mask or multiple masks found for the ID {name}: {mask_file}' assert len(img_file) == 1, f'Either no image or multiple images found for the ID {name}: {img_file}' mask = self.load(mask_file[0]) img = self.load(img_file[0]) assert img.size == mask.size, \ 'Image and mask {name} should be the same size, but are {img.size} and {mask.size}' img = self.preprocess(img, self.scale, is_mask=False) mask = self.preprocess(mask, self.scale, is_mask=True) return { 'image': torch.as_tensor(img.copy()).float().contiguous(), 'mask': torch.as_tensor(mask.copy()).long().contiguous() } class CarvanaDataset(BasicDataset): def __init__(self, images_dir, masks_dir, scale=1): super().__init__(images_dir, masks_dir, scale, mask_suffix='_mask')
import torch from torch import Tensor def dice_coeff(input: Tensor, target: Tensor, reduce_batch_first: bool = False, epsilon=1e-6): # Average of Dice coefficient for all batches, or for a single mask assert input.size() == target.size() if input.dim() == 2 and reduce_batch_first: raise ValueError(f'Dice: asked to reduce batch but got tensor without batch dimension (shape {input.shape})') if input.dim() == 2 or reduce_batch_first: inter = torch.dot(input.reshape(-1), target.reshape(-1)) sets_sum = torch.sum(input) + torch.sum(target) if sets_sum.item() == 0: sets_sum = 2 * inter return (2 * inter + epsilon) / (sets_sum + epsilon) else: # compute and average metric for each batch element dice = 0 for i in range(input.shape[0]): dice += dice_coeff(input[i, ...], target[i, ...]) return dice / input.shape[0] def multiclass_dice_coeff(input: Tensor, target: Tensor, reduce_batch_first: bool = False, epsilon=1e-6): # Average of Dice coefficient for all classes assert input.size() == target.size() dice = 0 for channel in range(input.shape[1]): dice += dice_coeff(input[:, channel, ...], target[:, channel, ...], reduce_batch_first, epsilon) return dice / input.shape[1] def dice_loss(input: Tensor, target: Tensor, multiclass: bool = False): # Dice loss (objective to minimize) between 0 and 1 assert input.size() == target.size() fn = multiclass_dice_coeff if multiclass else dice_coeff return 1 - fn(input, target, reduce_batch_first=True)
import matplotlib.pyplot as plt def plot_img_and_mask(img, mask): classes = mask.shape[0] if len(mask.shape) > 2 else 1 fig, ax = plt.subplots(1, classes + 1) ax[0].set_title('Input image') ax[0].imshow(img) if classes > 1: for i in range(classes): ax[i + 1].set_title(f'Output mask (class {i + 1})') ax[i + 1].imshow(mask[:, :, i]) else: ax[1].set_title(f'Output mask') ax[1].imshow(mask) plt.xticks([]), plt.yticks([]) plt.show()
from .unet_model import UNet
""" Parts of the U-Net model """ import torch import torch.nn as nn import torch.nn.functional as F class DoubleConv(nn.Module): """(convolution => [BN] => ReLU) * 2""" def __init__(self, in_channels, out_channels, mid_channels=None): super().__init__() if not mid_channels: mid_channels = out_channels self.double_conv = nn.Sequential( nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1), nn.BatchNorm2d(mid_channels), nn.ReLU(inplace=True), nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True) ) def forward(self, x): return self.double_conv(x) class Down(nn.Module): """Downscaling with maxpool then double conv""" def __init__(self, in_channels, out_channels): super().__init__() self.maxpool_conv = nn.Sequential( nn.MaxPool2d(2), DoubleConv(in_channels, out_channels) ) def forward(self, x): return self.maxpool_conv(x) class Up(nn.Module): """Upscaling then double conv""" def __init__(self, in_channels, out_channels, bilinear=True): super().__init__() # if bilinear, use the normal convolutions to reduce the number of channels if bilinear: self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.conv = DoubleConv(in_channels, out_channels, in_channels // 2) else: self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2) self.conv = DoubleConv(in_channels, out_channels) def forward(self, x1, x2): x1 = self.up(x1) # input is CHW diffY = x2.size()[2] - x1.size()[2] diffX = x2.size()[3] - x1.size()[3] x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2]) # if you have padding issues, see # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd x = torch.cat([x2, x1], dim=1) return self.conv(x) class OutConv(nn.Module): def __init__(self, in_channels, out_channels): super(OutConv, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) def forward(self, x): return self.conv(x)
""" Full assembly of the parts to form the complete network """ from .unet_parts import * class UNet(nn.Module): def __init__(self, n_channels, n_classes, bilinear=True): super(UNet, self).__init__() self.n_channels = n_channels self.n_classes = n_classes self.bilinear = bilinear self.inc = DoubleConv(n_channels, 64) self.down1 = Down(64, 128) self.down2 = Down(128, 256) self.down3 = Down(256, 512) factor = 2 if bilinear else 1 self.down4 = Down(512, 1024 // factor) self.up1 = Up(1024, 512 // factor, bilinear) self.up2 = Up(512, 256 // factor, bilinear) self.up3 = Up(256, 128 // factor, bilinear) self.up4 = Up(128, 64, bilinear) self.outc = OutConv(64, n_classes) def forward(self, x): x1 = self.inc(x) x2 = self.down1(x1) x3 = self.down2(x2) x4 = self.down3(x3) x5 = self.down4(x4) x = self.up1(x5, x4) x = self.up2(x, x3) x = self.up3(x, x2) x = self.up4(x, x1) logits = self.outc(x) return logits
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel from torchbenchmark.tasks import COMPUTER_VISION import torchvision.models as models class Model(TorchVisionModel): task = COMPUTER_VISION.CLASSIFICATION DEFAULT_TRAIN_BSIZE = 32 DEFAULT_EVAL_BSIZE = 32 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(model_name="resnet152", test=test, device=device, batch_size=batch_size, weights=models.ResNet152_Weights.IMAGENET1K_V1, extra_args=extra_args)
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceGenerationModel class Model(HuggingFaceGenerationModel): def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(name="hf_T5_generate", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel from torchbenchmark.tasks import COMPUTER_VISION import torchvision.models as models class Model(TorchVisionModel): task = COMPUTER_VISION.CLASSIFICATION DEFAULT_TRAIN_BSIZE = 16 DEFAULT_EVAL_BSIZE = 8 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(model_name="resnet18", test=test, device=device, batch_size=batch_size, weights=models.ResNet18_Weights.IMAGENET1K_V1, extra_args=extra_args)
import numpy as np import torch import torchvision import cv2, pdb def composite4(fg, bg, a): fg = np.array(fg, np.float32) alpha= np.expand_dims(a / 255,axis=2) im = alpha * fg + (1 - alpha) * bg im = im.astype(np.uint8) return im def compose_image_withshift(alpha_pred,fg_pred,bg,seg): image_sh=torch.zeros(fg_pred.shape) if alpha_pred.is_cuda: image_sh = image_sh.cuda() for t in range(0,fg_pred.shape[0]): al_tmp=to_image(seg[t,...]).squeeze(2) where = np.array(np.where((al_tmp>0.1).astype(np.float32))) x1, y1 = np.amin(where, axis=1) x2, y2 = np.amax(where, axis=1) #select shift n=np.random.randint(-(y1-10),al_tmp.shape[1]-y2-10) #n positive indicates shift to right alpha_pred_sh=torch.cat((alpha_pred[t,:,:,-n:],alpha_pred[t,:,:,:-n]),dim=2) fg_pred_sh=torch.cat((fg_pred[t,:,:,-n:],fg_pred[t,:,:,:-n]),dim=2) alpha_pred_sh=(alpha_pred_sh+1)/2 image_sh[t,...]=fg_pred_sh*alpha_pred_sh + (1-alpha_pred_sh)*bg[t,...] if alpha_pred.is_cuda: image_sh = image_sh.cuda() return torch.autograd.Variable(image_sh) def get_bbox(mask,R,C): where = np.array(np.where(mask)) x1, y1 = np.amin(where, axis=1) x2, y2 = np.amax(where, axis=1) bbox_init=[x1,y1,np.maximum(x2-x1,y2-y1),np.maximum(x2-x1,y2-y1)] bbox=create_bbox(bbox_init,(R,C)) return bbox def crop_images(crop_list,reso,bbox): for i in range(0,len(crop_list)): img=crop_list[i] if img.ndim>=3: img_crop=img[bbox[0]:bbox[0]+bbox[2],bbox[1]:bbox[1]+bbox[3],...]; img_crop=cv2.resize(img_crop,reso) else: img_crop=img[bbox[0]:bbox[0]+bbox[2],bbox[1]:bbox[1]+bbox[3]]; img_crop=cv2.resize(img_crop,reso) crop_list[i]=img_crop return crop_list def create_bbox(bbox_init,sh): w=np.maximum(bbox_init[2],bbox_init[3]) x1=bbox_init[0]-0.1*w y1=bbox_init[1]-0.1*w x2=bbox_init[0]+1.1*w y2=bbox_init[1]+1.1*w if x1<0: x1=0 if y1<0: y1=0 if x2>=sh[0]: x2=sh[0]-1 if y2>=sh[1]: y2=sh[1]-1 bbox=np.around([x1,y1,x2-x1,y2-y1]).astype('int') return bbox def uncrop(alpha,bbox,R=720,C=1280): alpha=cv2.resize(alpha,(bbox[3],bbox[2])) if alpha.ndim==2: alpha_uncrop=np.zeros((R,C)) alpha_uncrop[bbox[0]:bbox[0]+bbox[2],bbox[1]:bbox[1]+bbox[3]]=alpha else: alpha_uncrop=np.zeros((R,C,3)) alpha_uncrop[bbox[0]:bbox[0]+bbox[2],bbox[1]:bbox[1]+bbox[3],:]=alpha return alpha_uncrop.astype(np.uint8) def to_image(rec0): rec0=((rec0.data).cpu()).numpy() rec0=(rec0+1)/2 rec0=rec0.transpose((1,2,0)) rec0[rec0>1]=1 rec0[rec0<0]=0 return rec0 def write_tb_log(image,tag,log_writer,i): # image1 output_to_show = image.cpu().data[0:4,...] output_to_show = (output_to_show + 1)/2.0 grid = torchvision.utils.make_grid(output_to_show,nrow=4) log_writer.add_image(tag, grid, i + 1)
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np #import matplotlib.pyplot as plt import pdb from torch.nn.modules.loss import _Loss from torch.autograd import Function, Variable #import scipy.io as sio class alpha_loss(_Loss): def __init__(self): super(alpha_loss,self).__init__() def forward(self,alpha,alpha_pred,mask): return normalized_l1_loss(alpha,alpha_pred,mask) class compose_loss(_Loss): def __init__(self): super(compose_loss,self).__init__() def forward(self,image,alpha_pred,fg,bg,mask): alpha_pred=(alpha_pred+1)/2 comp=fg*alpha_pred + (1-alpha_pred)*bg return normalized_l1_loss(image,comp,mask) class alpha_gradient_loss(_Loss): def __init__(self): super(alpha_gradient_loss,self).__init__() def forward(self,alpha,alpha_pred,mask): if alpha.is_cuda: fx = torch.Tensor([[1, 0, -1],[2, 0, -2],[1, 0, -1]]); fx=fx.view((1,1,3,3)); fx=Variable(fx.cuda()) fy = torch.Tensor([[1, 2, 1],[0, 0, 0],[-1, -2, -1]]); fy=fy.view((1,1,3,3)); fy=Variable(fy.cuda()) else: fx = torch.Tensor([[1, 0, -1],[2, 0, -2],[1, 0, -1]]); fx=fx.view((1,1,3,3)); fx=Variable(fx) fy = torch.Tensor([[1, 2, 1],[0, 0, 0],[-1, -2, -1]]); fy=fy.view((1,1,3,3)); fy=Variable(fy) G_x = F.conv2d(alpha,fx,padding=1); G_y = F.conv2d(alpha,fy,padding=1) G_x_pred = F.conv2d(alpha_pred,fx,padding=1); G_y_pred = F.conv2d(alpha_pred,fy,padding=1) loss=normalized_l1_loss(G_x,G_x_pred,mask) + normalized_l1_loss(G_y,G_y_pred,mask) return loss class alpha_gradient_reg_loss(_Loss): def __init__(self): super(alpha_gradient_reg_loss,self).__init__() def forward(self,alpha,mask): if alpha.is_cuda: fx = torch.Tensor([[1, 0, -1],[2, 0, -2],[1, 0, -1]]); fx=fx.view((1,1,3,3)); fx=Variable(fx.cuda()) fy = torch.Tensor([[1, 2, 1],[0, 0, 0],[-1, -2, -1]]); fy=fy.view((1,1,3,3)); fy=Variable(fy.cuda()) else: fx = torch.Tensor([[1, 0, -1],[2, 0, -2],[1, 0, -1]]); fx=fx.view((1,1,3,3)); fx=Variable(fx) fy = torch.Tensor([[1, 2, 1],[0, 0, 0],[-1, -2, -1]]); fy=fy.view((1,1,3,3)); fy=Variable(fy) G_x = F.conv2d(alpha,fx,padding=1); G_y = F.conv2d(alpha,fy,padding=1) loss=(torch.sum(torch.abs(G_x))+torch.sum(torch.abs(G_y)))/torch.sum(mask) return loss class GANloss(_Loss): def __init__(self): super(GANloss,self).__init__() def forward(self,pred,label_type): MSE=nn.MSELoss() loss=0 for i in range(0,len(pred)): if label_type: labels=torch.ones(pred[i][0].shape) else: labels=torch.zeros(pred[i][0].shape) if pred[i][0].is_cuda: labels=Variable(labels.cuda()) else: labels=Variable(labels) loss += MSE(pred[i][0],labels) return loss/len(pred) def normalized_l1_loss(alpha,alpha_pred,mask): loss=0; eps=1e-6; for i in range(alpha.shape[0]): if mask[i,...].sum()>0: loss = loss + torch.sum(torch.abs(alpha[i,...]*mask[i,...]-alpha_pred[i,...]*mask[i,...]))/(torch.sum(mask[i,...])+eps) loss=loss/alpha.shape[0] return loss
import os from io import BytesIO import tarfile import tempfile from six.moves import urllib import numpy as np from PIL import Image import cv2, pdb, glob, argparse import tensorflow as tf class DeepLabModel(object): """Class to load deeplab model and run inference.""" INPUT_TENSOR_NAME = 'ImageTensor:0' OUTPUT_TENSOR_NAME = 'SemanticPredictions:0' INPUT_SIZE = 513 FROZEN_GRAPH_NAME = 'frozen_inference_graph' def __init__(self, tarball_path): #"""Creates and loads pretrained deeplab model.""" self.graph = tf.Graph() graph_def = None # Extract frozen graph from tar archive. tar_file = tarfile.open(tarball_path) for tar_info in tar_file.getmembers(): if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name): file_handle = tar_file.extractfile(tar_info) graph_def = tf.GraphDef.FromString(file_handle.read()) break tar_file.close() if graph_def is None: raise RuntimeError('Cannot find inference graph in tar archive.') with self.graph.as_default(): tf.import_graph_def(graph_def, name='') self.sess = tf.Session(graph=self.graph) def run(self, image): """Runs inference on a single image. Args: image: A PIL.Image object, raw input image. Returns: resized_image: RGB image resized from original input image. seg_map: Segmentation map of `resized_image`. """ width, height = image.size resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height) target_size = (int(resize_ratio * width), int(resize_ratio * height)) resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS) batch_seg_map = self.sess.run( self.OUTPUT_TENSOR_NAME, feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]}) seg_map = batch_seg_map[0] return resized_image, seg_map def create_pascal_label_colormap(): """Creates a label colormap used in PASCAL VOC segmentation benchmark. Returns: A Colormap for visualizing segmentation results. """ colormap = np.zeros((256, 3), dtype=int) ind = np.arange(256, dtype=int) for shift in reversed(range(8)): for channel in range(3): colormap[:, channel] |= ((ind >> channel) & 1) << shift ind >>= 3 return colormap def label_to_color_image(label): """Adds color defined by the dataset colormap to the label. Args: label: A 2D array with integer type, storing the segmentation label. Returns: result: A 2D array with floating type. The element of the array is the color indexed by the corresponding element in the input label to the PASCAL color map. Raises: ValueError: If label is not of rank 2 or its value is larger than color map maximum entry. """ if label.ndim != 2: raise ValueError('Expect 2-D input label') colormap = create_pascal_label_colormap() if np.max(label) >= len(colormap): raise ValueError('label value too large.') return colormap[label] parser = argparse.ArgumentParser(description='Deeplab Segmentation') parser.add_argument('-i', '--input_dir', type=str, required=True,help='Directory to save the output results. (required)') args=parser.parse_args() dir_name=args.input_dir; ## setup #################### LABEL_NAMES = np.asarray([ 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv' ]) FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1) FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP) MODEL_NAME = 'xception_coco_voctrainval' # @param ['mobilenetv2_coco_voctrainaug', 'mobilenetv2_coco_voctrainval', 'xception_coco_voctrainaug', 'xception_coco_voctrainval'] _DOWNLOAD_URL_PREFIX = 'http://download.tensorflow.org/models/' _MODEL_URLS = { 'mobilenetv2_coco_voctrainaug': 'deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz', 'mobilenetv2_coco_voctrainval': 'deeplabv3_mnv2_pascal_trainval_2018_01_29.tar.gz', 'xception_coco_voctrainaug': 'deeplabv3_pascal_train_aug_2018_01_04.tar.gz', 'xception_coco_voctrainval': 'deeplabv3_pascal_trainval_2018_01_04.tar.gz', } _TARBALL_NAME = _MODEL_URLS[MODEL_NAME] model_dir = 'deeplab_model' if not os.path.exists(model_dir): tf.gfile.MakeDirs(model_dir) download_path = os.path.join(model_dir, _TARBALL_NAME) if not os.path.exists(download_path): print('downloading model to %s, this might take a while...' % download_path) urllib.request.urlretrieve(_DOWNLOAD_URL_PREFIX + _MODEL_URLS[MODEL_NAME], download_path) print('download completed! loading DeepLab model...') MODEL = DeepLabModel(download_path) print('model loaded successfully!') ####################################################################################### list_im=glob.glob(dir_name + '/*_img.png'); list_im.sort() for i in range(0,len(list_im)): image = Image.open(list_im[i]) res_im,seg=MODEL.run(image) seg=cv2.resize(seg.astype(np.uint8),image.size) mask_sel=(seg==15).astype(np.float32) name=list_im[i].replace('img','masksDL') cv2.imwrite(name,(255*mask_sel).astype(np.uint8)) str_msg='\nDone: ' + dir_name print(str_msg)
import numpy as np import cv2, pdb, glob, argparse MAX_FEATURES = 500 GOOD_MATCH_PERCENT = 0.15 def alignImages(im1, im2,masksDL): # Convert images to grayscale im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY) im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY) akaze = cv2.AKAZE_create() keypoints1, descriptors1 = akaze.detectAndCompute(im1, None) keypoints2, descriptors2 = akaze.detectAndCompute(im2, None) # Match features. matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE) matches = matcher.match(descriptors1, descriptors2, None) # Sort matches by score matches.sort(key=lambda x: x.distance, reverse=False) # Remove not so good matches numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT) matches = matches[:numGoodMatches] # Extract location of good matches points1 = np.zeros((len(matches), 2), dtype=np.float32) points2 = np.zeros((len(matches), 2), dtype=np.float32) for i, match in enumerate(matches): points1[i, :] = keypoints1[match.queryIdx].pt points2[i, :] = keypoints2[match.trainIdx].pt # Find homography h, mask = cv2.findHomography(points1, points2, cv2.RANSAC) # Use homography height, width, channels = im2.shape im1Reg = cv2.warpPerspective(im1, h, (width, height)) # copy image in the empty region, unless it is a foreground. Then copy background mask_rep=(np.sum(im1Reg.astype('float32'),axis=2)==0) im1Reg[mask_rep,0]=im2[mask_rep,0] im1Reg[mask_rep,1]=im2[mask_rep,1] im1Reg[mask_rep,2]=im2[mask_rep,2] mask_rep1=np.logical_and(mask_rep , masksDL[...,0]==255) im1Reg[mask_rep1,0]=im1[mask_rep1,0] im1Reg[mask_rep1,1]=im1[mask_rep1,1] im1Reg[mask_rep1,2]=im1[mask_rep1,2] return im1Reg def adjustExposure(img,back,mask): kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) mask = cv2.dilate(mask, kernel, iterations=10) mask1 = cv2.dilate(mask, kernel, iterations=300) msk=mask1.astype(np.float32)/255-mask.astype(np.float32)/255; msk=msk.astype(np.bool) back_tr=back back_tr[...,0]=bias_gain(img[...,0],back[...,0],msk) back_tr[...,1]=bias_gain(img[...,1],back[...,1],msk) back_tr[...,2]=bias_gain(img[...,2],back[...,2],msk) return back_tr def bias_gain(orgR,capR,cap_mask): capR=capR.astype('float32') orgR=orgR.astype('float32') xR=capR[cap_mask] yR=orgR[cap_mask] gainR=np.nanstd(yR)/np.nanstd(xR); biasR=np.nanmean(yR)-gainR*np.nanmean(xR); cap_tran=capR*gainR+biasR; return cap_tran.astype('float32') parser = argparse.ArgumentParser(description='Deeplab Segmentation') parser.add_argument('-i', '--input_dir', type=str, required=True,help='Directory to save the output results. (required)') args=parser.parse_args() dir_name=args.input_dir list_im=glob.glob(dir_name + '/*_img.png'); list_im.sort() for i in range(0,len(list_im)): image = cv2.imread(list_im[i],cv2.IMREAD_COLOR) back = cv2.imread(list_im[i].replace('img','back'),cv2.IMREAD_COLOR) mask = cv2.imread(list_im[i].replace('img','masksDL')) #back_new = adjustExposure(image,back,mask[...,0]) back_align = alignImages(back, image,mask) cv2.imwrite(list_im[i].replace('img','back'),back_align) str_msg='\nDone: ' + dir_name print(str_msg)
from __future__ import print_function import torch from torch.autograd import Variable import torch.nn as nn import torch.optim as optim from tensorboardX import SummaryWriter import os import time import argparse from data_loader import AdobeDataAffineHR from functions import * from networks import ResnetConditionHR, conv_init from loss_functions import alpha_loss, compose_loss, alpha_gradient_loss #CUDA #os.environ["CUDA_VISIBLE_DEVICES"]="4" print('CUDA Device: ' + os.environ["CUDA_VISIBLE_DEVICES"]) """Parses arguments.""" parser = argparse.ArgumentParser(description='Training Background Matting on Adobe Dataset.') parser.add_argument('-n', '--name', type=str, help='Name of tensorboard and model saving folders.') parser.add_argument('-bs', '--batch_size', type=int, help='Batch Size.') parser.add_argument('-res', '--reso', type=int, help='Input image resolution') parser.add_argument('-epoch', '--epoch', type=int, default=60,help='Maximum Epoch') parser.add_argument('-n_blocks1', '--n_blocks1', type=int, default=7,help='Number of residual blocks after Context Switching.') parser.add_argument('-n_blocks2', '--n_blocks2', type=int, default=3,help='Number of residual blocks for Fg and alpha each.') args=parser.parse_args() ##Directories tb_dir='TB_Summary/' + args.name model_dir='Models/' + args.name if not os.path.exists(model_dir): os.makedirs(model_dir) if not os.path.exists(tb_dir): os.makedirs(tb_dir) ## Input list data_config_train = {'reso': [args.reso,args.reso], 'trimapK': [5,5], 'noise': True} # choice for data loading parameters # DATA LOADING print('\n[Phase 1] : Data Preparation') def collate_filter_none(batch): batch = list(filter(lambda x: x is not None, batch)) return torch.utils.data.dataloader.default_collate(batch) #Original Data traindata = AdobeDataAffineHR(csv_file='Data_adobe/Adobe_train_data.csv',data_config=data_config_train,transform=None) #Write a dataloader function that can read the database provided by .csv file train_loader = torch.utils.data.DataLoader(traindata, batch_size=args.batch_size, shuffle=True, num_workers=args.batch_size, collate_fn=collate_filter_none) print('\n[Phase 2] : Initialization') net=ResnetConditionHR(input_nc=(3,3,1,4), output_nc=4, n_blocks1=7, n_blocks2=3, norm_layer=nn.BatchNorm2d) net.apply(conv_init) net=nn.DataParallel(net) #net.load_state_dict(torch.load(model_dir + 'net_epoch_X')) #uncomment this if you are initializing your model net.cuda() torch.backends.cudnn.benchmark=True #Loss l1_loss=alpha_loss() c_loss=compose_loss() g_loss=alpha_gradient_loss() optimizer = optim.Adam(net.parameters(), lr=1e-4) #optimizer.load_state_dict(torch.load(model_dir + 'optim_epoch_X')) #uncomment this if you are initializing your model log_writer=SummaryWriter(tb_dir) print('Starting Training') step=50 #steps to visualize training images in tensorboard KK=len(train_loader) for epoch in range(0,args.epoch): net.train(); netL, alL, fgL, fg_cL, al_fg_cL, elapse_run, elapse=0,0,0,0,0,0,0 t0=time.time(); testL=0; ct_tst=0; for i,data in enumerate(train_loader): #Initiating fg, bg, alpha, image, seg, bg_tr, multi_fr = data['fg'], data['bg'], data['alpha'], data['image'], data['seg'], data['bg_tr'], data['multi_fr'] fg, bg, alpha, image, seg, bg_tr, multi_fr = Variable(fg.cuda()), Variable(bg.cuda()), Variable(alpha.cuda()), Variable(image.cuda()), Variable(seg.cuda()), Variable(bg_tr.cuda()), Variable(multi_fr.cuda()) mask=(alpha>-0.99).type(torch.cuda.FloatTensor) mask0=Variable(torch.ones(alpha.shape).cuda()) tr0=time.time() alpha_pred,fg_pred=net(image,bg_tr,seg,multi_fr) ## Put needed loss here al_loss=l1_loss(alpha,alpha_pred,mask0) fg_loss=l1_loss(fg,fg_pred,mask) al_mask=(alpha_pred>0.95).type(torch.cuda.FloatTensor) fg_pred_c=image*al_mask + fg_pred*(1-al_mask) fg_c_loss= c_loss(image,alpha_pred,fg_pred_c,bg,mask0) al_fg_c_loss=g_loss(alpha,alpha_pred,mask0) loss=al_loss + 2*fg_loss + fg_c_loss + al_fg_c_loss optimizer.zero_grad() loss.backward() optimizer.step() netL += loss.data alL += al_loss.data fgL += fg_loss.data fg_cL += fg_c_loss.data al_fg_cL += al_fg_c_loss.data log_writer.add_scalar('training_loss', loss.data, epoch*KK + i + 1) log_writer.add_scalar('alpha_loss', al_loss.data, epoch*KK + i + 1) log_writer.add_scalar('fg_loss', fg_loss.data, epoch*KK + i + 1) log_writer.add_scalar('comp_loss', fg_c_loss.data, epoch*KK + i + 1) log_writer.add_scalar('alpha_gradient_loss', al_fg_c_loss.data, epoch*KK + i + 1) t1=time.time() elapse +=t1 -t0 elapse_run += t1-tr0 t0=t1 testL+=loss.data ct_tst+=1 if i % step == (step-1): print('[%d, %5d] Total-loss: %.4f Alpha-loss: %.4f Fg-loss: %.4f Comp-loss: %.4f Alpha-gradient-loss: %.4f Time-all: %.4f Time-fwbw: %.4f' % (epoch + 1, i + 1, netL/step, alL/step, fgL/step, fg_cL/step, al_fg_cL/step, elapse/step, elapse_run/step)) netL, alL, fgL, fg_cL, al_fg_cL, elapse_run, elapse=0,0,0,0,0,0,0 write_tb_log(image,'image',log_writer,i) write_tb_log(seg,'seg',log_writer,i) write_tb_log(alpha,'alpha',log_writer,i) write_tb_log(alpha_pred,'alpha_pred',log_writer,i) write_tb_log(fg*mask,'fg',log_writer,i) write_tb_log(fg_pred*mask,'fg_pred',log_writer,i) write_tb_log(multi_fr[0:4,0,...].unsqueeze(1),'multi_fr',log_writer,i) #composition alpha_pred=(alpha_pred+1)/2 comp=fg_pred*alpha_pred + (1-alpha_pred)*bg write_tb_log(comp,'composite',log_writer,i) del comp del fg, bg, alpha, image, alpha_pred, fg_pred, seg, multi_fr #Saving torch.save(net.state_dict(), model_dir + 'net_epoch_%d_%.4f.pth' %(epoch,testL/ct_tst)) torch.save(optimizer.state_dict(), model_dir + 'optim_epoch_%d_%.4f.pth' %(epoch,testL/ct_tst))
import numpy as np import cv2, pdb, glob, argparse MAX_FEATURES = 500 GOOD_MATCH_PERCENT = 0.15 def alignImages(im1, im2,masksDL): # Convert images to grayscale im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY) im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY) akaze = cv2.AKAZE_create() keypoints1, descriptors1 = akaze.detectAndCompute(im1, None) keypoints2, descriptors2 = akaze.detectAndCompute(im2, None) # Match features. matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE) matches = matcher.match(descriptors1, descriptors2, None) # Sort matches by score matches.sort(key=lambda x: x.distance, reverse=False) # Remove not so good matches numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT) matches = matches[:numGoodMatches] # Extract location of good matches points1 = np.zeros((len(matches), 2), dtype=np.float32) points2 = np.zeros((len(matches), 2), dtype=np.float32) for i, match in enumerate(matches): points1[i, :] = keypoints1[match.queryIdx].pt points2[i, :] = keypoints2[match.trainIdx].pt # Find homography h, mask = cv2.findHomography(points1, points2, cv2.RANSAC) # Use homography height, width, channels = im2.shape im1Reg = cv2.warpPerspective(im1, h, (width, height)) # copy image in the empty region, unless it is a foreground. Then copy background mask_rep=(np.sum(im1Reg.astype('float32'),axis=2)==0) im1Reg[mask_rep,0]=im2[mask_rep,0] im1Reg[mask_rep,1]=im2[mask_rep,1] im1Reg[mask_rep,2]=im2[mask_rep,2] mask_rep1=np.logical_and(mask_rep , masksDL[...,0]==255) im1Reg[mask_rep1,0]=im1[mask_rep1,0] im1Reg[mask_rep1,1]=im1[mask_rep1,1] im1Reg[mask_rep1,2]=im1[mask_rep1,2] return im1Reg def adjustExposure(img,back,mask): kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) mask = cv2.dilate(mask, kernel, iterations=10) mask1 = cv2.dilate(mask, kernel, iterations=300) msk=mask1.astype(np.float32)/255-mask.astype(np.float32)/255; msk=msk.astype(np.bool) bias=np.zeros((1,3)); gain=np.ones((1,3)) bias[0,0],gain[0,0]=bias_gain(img[...,0],back[...,0],msk) bias[0,1],gain[0,1]=bias_gain(img[...,1],back[...,1],msk) bias[0,2],gain[0,2]=bias_gain(img[...,2],back[...,2],msk) return bias,gain def bias_gain(orgR,capR,cap_mask): xR=capR[cap_mask] yR=orgR[cap_mask] gainR=np.nanstd(yR)/np.nanstd(xR); biasR=np.nanmean(yR)-gainR*np.nanmean(xR); return biasR,gainR parser = argparse.ArgumentParser(description='Deeplab Segmentation') parser.add_argument('-i', '--input_dir', type=str, required=True,help='Directory to save the output results. (required)') parser.add_argument('-v_name','--video_name',type=str, default=None,help='Name of the video') args=parser.parse_args() dir_name=args.input_dir list_im=glob.glob(dir_name + '/*_img.png'); list_im.sort() back=cv2.imread(args.video_name); # back=back.astype('float32')/255 # #adjust bias-gain # bias=[]; gain=[] # for i in range(0,len(list_im),30): # image = cv2.imread(list_im[i]); image=image.astype('float32')/255 # mask = cv2.imread(list_im[i].replace('img','masksDL')) # b,g=adjustExposure(image,back,mask[...,0]) # bias.append(b); gain.append(g) # Bias=np.median(np.asarray(bias),axis=0).squeeze(0); # Gain=np.median(np.asarray(gain),axis=0).squeeze(0) # back_new=back # back_new[...,0]=Gain[0]*back[...,0]+Bias[0] # back_new[...,1]=Gain[1]*back[...,1]+Bias[1] # back_new[...,2]=Gain[2]*back[...,2]+Bias[2] # back_new=(255*back_new).astype(np.uint8) for i in range(0,len(list_im)): image = cv2.imread(list_im[i]) mask = cv2.imread(list_im[i].replace('img','masksDL')) back_align = alignImages(back, image,mask) cv2.imwrite(list_im[i].replace('img','back'),back_align) print('Done: ' + str(i+1) + '/' + str(len(list_im)))
from __future__ import print_function, division import os import torch import pandas as pd import skimage from skimage import io import numpy as np import matplotlib.pyplot as plt import pdb, random from torch.utils.data import Dataset, DataLoader import random, os, cv2 unknown_code=128 class VideoData(Dataset): def __init__(self,csv_file,data_config,transform=None): self.frames = pd.read_csv(csv_file,sep=';') self.transform = transform self.resolution=data_config['reso'] def __len__(self): return len(self.frames) def __getitem__(self,idx): img = io.imread(self.frames.iloc[idx, 0]) back = io.imread(self.frames.iloc[idx, 1]) seg = io.imread(self.frames.iloc[idx, 2]) fr1 = cv2.cvtColor(io.imread(self.frames.iloc[idx, 3]), cv2.COLOR_BGR2GRAY) fr2 = cv2.cvtColor(io.imread(self.frames.iloc[idx, 4]), cv2.COLOR_BGR2GRAY) fr3 = cv2.cvtColor(io.imread(self.frames.iloc[idx, 5]), cv2.COLOR_BGR2GRAY) fr4 = cv2.cvtColor(io.imread(self.frames.iloc[idx, 6]), cv2.COLOR_BGR2GRAY) back_rnd = io.imread(self.frames.iloc[idx, 7]) sz=self.resolution if np.random.random_sample() > 0.5: img = cv2.flip(img,1) seg = cv2.flip(seg,1) back = cv2.flip(back,1) back_rnd = cv2.flip(back_rnd,1) fr1=cv2.flip(fr1,1); fr2=cv2.flip(fr2,1); fr3=cv2.flip(fr3,1); fr4=cv2.flip(fr4,1) #make frames together multi_fr=np.zeros((img.shape[0],img.shape[1],4)) multi_fr[...,0]=fr1; multi_fr[...,1]=fr2; multi_fr[...,2]=fr3; multi_fr[...,3]=fr4; #allow random cropping centered on the segmentation map bbox=create_bbox(seg,seg.shape[0],seg.shape[1]) img=apply_crop(img,bbox,self.resolution) seg=apply_crop(seg,bbox,self.resolution) back=apply_crop(back,bbox,self.resolution) back_rnd=apply_crop(back_rnd,bbox,self.resolution) multi_fr=apply_crop(multi_fr,bbox,self.resolution) #convert seg to guidance map #segg=create_seg_guide(seg,self.resolution) sample = {'image': to_tensor(img), 'seg': to_tensor(create_seg_guide(seg,self.resolution)), 'bg': to_tensor(back), 'multi_fr': to_tensor(multi_fr), 'seg-gt':to_tensor(seg), 'back-rnd': to_tensor(back_rnd)} if self.transform: sample = self.transform(sample) return sample class AdobeDataAffineHR(Dataset): def __init__(self,csv_file,data_config,transform=None): self.frames = pd.read_csv(csv_file,sep=';') self.transform = transform self.resolution=data_config['reso'] self.trimapK=data_config['trimapK'] self.noise=data_config['noise'] def __len__(self): return len(self.frames) def __getitem__(self,idx): try: #load fg = io.imread(self.frames.iloc[idx, 0]) alpha = io.imread(self.frames.iloc[idx, 1]) image = io.imread(self.frames.iloc[idx, 2]) back = io.imread(self.frames.iloc[idx, 3]) fg = cv2.resize(fg, dsize=(800,800)) alpha = cv2.resize(alpha, dsize=(800,800)) back = cv2.resize(back, dsize=(800,800)) image = cv2.resize(image, dsize=(800,800)) sz=self.resolution #random flip if np.random.random_sample() > 0.5: alpha = cv2.flip(alpha,1) fg = cv2.flip(fg,1) back = cv2.flip(back,1) image = cv2.flip(image,1) trimap=generate_trimap(alpha,self.trimapK[0],self.trimapK[1],False) #randcom crop+scale different_sizes = [(576,576),(608,608),(640,640),(672,672),(704,704),(736,736),(768,768),(800,800)] crop_size = random.choice(different_sizes) x, y = random_choice(trimap, crop_size) fg = safe_crop(fg, x, y, crop_size,sz) alpha = safe_crop(alpha, x, y, crop_size,sz) image = safe_crop(image, x, y, crop_size,sz) back = safe_crop(back, x, y, crop_size,sz) trimap = safe_crop(trimap, x, y, crop_size,sz) #Perturb Background: random noise addition or gamma change if self.noise: if np.random.random_sample() > 0.6: sigma=np.random.randint(low=2, high=6) mu=np.random.randint(low=0, high=14)-7 back_tr=add_noise(back,mu,sigma) else: back_tr=skimage.exposure.adjust_gamma(back,np.random.normal(1,0.12)) #Create motion cues: transform foreground and create 4 additional images affine_fr=np.zeros((fg.shape[0],fg.shape[1],4)) for t in range(0,4): T=np.random.normal(0,5,(2,1)); theta=np.random.normal(0,7); R=np.array([[np.cos(np.deg2rad(theta)), -np.sin(np.deg2rad(theta))],[np.sin(np.deg2rad(theta)), np.cos(np.deg2rad(theta))]]) sc=np.array([[1+np.random.normal(0,0.05), 0],[0,1]]); sh=np.array([[1, np.random.normal(0,0.05)*(np.random.random_sample() > 0.5)],[np.random.normal(0,0.05)*(np.random.random_sample() > 0.5), 1]]); A=np.concatenate((sc*sh*R, T), axis=1); fg_tr = cv2.warpAffine(fg.astype(np.uint8),A,(fg.shape[1],fg.shape[0]),flags=cv2.INTER_LINEAR,borderMode=cv2.BORDER_REFLECT) alpha_tr = cv2.warpAffine(alpha.astype(np.uint8),A,(fg.shape[1],fg.shape[0]),flags=cv2.INTER_NEAREST,borderMode=cv2.BORDER_REFLECT) sigma=np.random.randint(low=2, high=6) mu=np.random.randint(low=0, high=14)-7 back_tr0=add_noise(back,mu,sigma) affine_fr[...,t]=cv2.cvtColor(composite(fg_tr,back_tr0,alpha_tr), cv2.COLOR_BGR2GRAY) sample = {'image': to_tensor(image), 'fg': to_tensor(fg), 'alpha': to_tensor(alpha), 'bg': to_tensor(back), 'trimap': to_tensor(trimap), 'bg_tr': to_tensor(back_tr), 'seg': to_tensor(create_seg(alpha,trimap)), 'multi_fr': to_tensor(affine_fr)} if self.transform: sample = self.transform(sample) return sample except Exception as e: print("Error loading: " + self.frames.iloc[idx, 0]) print(e) #Functions def create_seg_guide(rcnn,reso): kernel_er = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel_dil = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) rcnn=rcnn.astype(np.float32)/255; rcnn[rcnn>0.2]=1; K=25 zero_id=np.nonzero(np.sum(rcnn,axis=1)==0) del_id=zero_id[0][zero_id[0]>250] if len(del_id)>0: del_id=[del_id[0]-2,del_id[0]-1,*del_id] rcnn=np.delete(rcnn,del_id,0) rcnn = cv2.copyMakeBorder( rcnn, 0, K + len(del_id), 0, 0, cv2.BORDER_REPLICATE) rcnn = cv2.erode(rcnn, kernel_er, iterations=np.random.randint(10,20)) rcnn = cv2.dilate(rcnn, kernel_dil, iterations=np.random.randint(3,7)) k_size_list=[(21,21),(31,31),(41,41)] rcnn=cv2.GaussianBlur(rcnn.astype(np.float32),random.choice(k_size_list),0) rcnn=(255*rcnn).astype(np.uint8) rcnn=np.delete(rcnn, range(reso[0],reso[0]+K), 0) return rcnn def crop_holes(img,cx,cy,crop_size): img[cy:cy+crop_size[0],cx:cx+crop_size[1]]=0 return img def create_seg(alpha,trimap): #old num_holes=np.random.randint(low=0, high=3) crop_size_list=[(15,15),(25,25),(35,35),(45,45)] kernel_er = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) kernel_dil = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) seg = (alpha>0.5).astype(np.float32) #print('Before %.4f max: %.4f' %(seg.sum(),seg.max())) #old seg = cv2.erode(seg, kernel_er, iterations=np.random.randint(low=10,high=20)) seg = cv2.dilate(seg, kernel_dil, iterations=np.random.randint(low=15,high=30)) #print('After %.4f max: %.4f' %(seg.sum(),seg.max())) seg=seg.astype(np.float32) seg=(255*seg).astype(np.uint8) for i in range(num_holes): crop_size=random.choice(crop_size_list) cx,cy = random_choice(trimap,crop_size) seg=crop_holes(seg,cx,cy,crop_size) trimap=crop_holes(trimap,cx,cy,crop_size) k_size_list=[(21,21),(31,31),(41,41)] seg=cv2.GaussianBlur(seg.astype(np.float32),random.choice(k_size_list),0) return seg.astype(np.uint8) def apply_crop(img,bbox,reso): img_crop=img[bbox[0]:bbox[0]+bbox[2],bbox[1]:bbox[1]+bbox[3],...]; img_crop=cv2.resize(img_crop,reso) return img_crop def create_bbox(mask,R,C): where = np.array(np.where(mask)) x1, y1 = np.amin(where, axis=1) x2, y2 = np.amax(where, axis=1) w=np.maximum(y2-y1,x2-x1); bd=np.random.uniform(0.1,0.4) x1=x1-np.round(bd*w) y1=y1-np.round(bd*w) y2=y2+np.round(bd*w) if x1<0: x1=0 if y1<0: y1=0 if y2>=C: y2=C if x2>=R: x2=R-1 bbox=np.around([x1,y1,x2-x1,y2-y1]).astype('int') return bbox def composite(fg, bg, a): fg = fg.astype(np.float32); bg=bg.astype(np.float32); a=a.astype(np.float32); alpha= np.expand_dims(a / 255,axis=2) im = alpha * fg + (1 - alpha) * bg im = im.astype(np.uint8) return im def add_noise(back,mean,sigma): back=back.astype(np.float32) row,col,ch= back.shape gauss = np.random.normal(mean,sigma,(row,col,ch)) gauss = gauss.reshape(row,col,ch) #gauss = np.repeat(gauss[:, :, np.newaxis], ch, axis=2) noisy = back + gauss noisy[noisy<0]=0; noisy[noisy>255]=255; return noisy.astype(np.uint8) def safe_crop(mat, x, y, crop_size,img_size,cubic=True): img_rows, img_cols = img_size crop_height, crop_width = crop_size if len(mat.shape) == 2: ret = np.zeros((crop_height, crop_width), np.float32) else: ret = np.zeros((crop_height, crop_width, 3), np.float32) crop = mat[y:y + crop_height, x:x + crop_width] h, w = crop.shape[:2] ret[0:h, 0:w] = crop if crop_size != (img_rows, img_cols): if cubic: ret = cv2.resize(ret, dsize=(img_rows, img_cols)) else: ret = cv2.resize(ret, dsize=(img_rows, img_cols), interpolation=cv2.INTER_NEAREST) return ret def generate_trimap(alpha,K1,K2,train_mode): kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) fg = np.array(np.equal(alpha, 255).astype(np.float32)) if train_mode: K=np.random.randint(K1,K2) else: K=np.round((K1+K2)/2).astype('int') fg = cv2.erode(fg, kernel, iterations=K) unknown = np.array(np.not_equal(alpha, 0).astype(np.float32)) unknown = cv2.dilate(unknown, kernel, iterations=2*K) trimap = fg * 255 + (unknown - fg) * 128 return trimap.astype(np.uint8) def random_choice(trimap, crop_size=(320, 320)): img_height, img_width = trimap.shape[0:2] crop_height, crop_width = crop_size val_idx=np.zeros((img_height,img_width)) val_idx[int(crop_height/2):int(img_height-crop_height/2),int(crop_width/2):int(img_width-crop_width/2)]=1 y_indices, x_indices = np.where(np.logical_and(trimap == unknown_code,val_idx==1)) num_unknowns = len(y_indices) x, y = 0, 0 if num_unknowns > 0: ix = np.random.choice(range(num_unknowns)) center_x = x_indices[ix] center_y = y_indices[ix] x = max(0, center_x - int(crop_width / 2)) y = max(0, center_y - int(crop_height / 2)) #added extra return x, y def to_tensor(pic): if len(pic.shape)>=3: img = torch.from_numpy(pic.transpose((2, 0, 1))) else: img=torch.from_numpy(pic) img=img.unsqueeze(0) # backward compatibility return 2*(img.float().div(255))-1