text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
import logging
import torch.nn as nn
from ..runner import load_checkpoint
class AlexNet(nn.Module):
"""AlexNet backbone.
Args:
num_classes (int): number of classes for classification.
"""
def __init__(self, num_classes=-1):
super(AlexNet, self).__init__()
self.num_classes = num_classes
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
if self.num_classes > 0:
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
# use default initializer
pass
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.features(x)
if self.num_classes > 0:
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
return x
|
Cream/CDARTS/CDARTS_detection/mmcv/cnn/alexnet.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/cnn/alexnet.py",
"repo_id": "Cream",
"token_count": 1038
}
| 267 |
from __future__ import division
import cv2
import numpy as np
def imflip(img, direction='horizontal'):
"""Flip an image horizontally or vertically.
Args:
img (ndarray): Image to be flipped.
direction (str): The flip direction, either "horizontal" or "vertical".
Returns:
ndarray: The flipped image.
"""
assert direction in ['horizontal', 'vertical']
if direction == 'horizontal':
return np.flip(img, axis=1)
else:
return np.flip(img, axis=0)
def imrotate(img,
angle,
center=None,
scale=1.0,
border_value=0,
auto_bound=False):
"""Rotate an image.
Args:
img (ndarray): Image to be rotated.
angle (float): Rotation angle in degrees, positive values mean
clockwise rotation.
center (tuple): Center of the rotation in the source image, by default
it is the center of the image.
scale (float): Isotropic scale factor.
border_value (int): Border value.
auto_bound (bool): Whether to adjust the image size to cover the whole
rotated image.
Returns:
ndarray: The rotated image.
"""
if center is not None and auto_bound:
raise ValueError('`auto_bound` conflicts with `center`')
h, w = img.shape[:2]
if center is None:
center = ((w - 1) * 0.5, (h - 1) * 0.5)
assert isinstance(center, tuple)
matrix = cv2.getRotationMatrix2D(center, -angle, scale)
if auto_bound:
cos = np.abs(matrix[0, 0])
sin = np.abs(matrix[0, 1])
new_w = h * sin + w * cos
new_h = h * cos + w * sin
matrix[0, 2] += (new_w - w) * 0.5
matrix[1, 2] += (new_h - h) * 0.5
w = int(np.round(new_w))
h = int(np.round(new_h))
rotated = cv2.warpAffine(img, matrix, (w, h), borderValue=border_value)
return rotated
def bbox_clip(bboxes, img_shape):
"""Clip bboxes to fit the image shape.
Args:
bboxes (ndarray): Shape (..., 4*k)
img_shape (tuple): (height, width) of the image.
Returns:
ndarray: Clipped bboxes.
"""
assert bboxes.shape[-1] % 4 == 0
clipped_bboxes = np.empty_like(bboxes, dtype=bboxes.dtype)
clipped_bboxes[..., 0::2] = np.maximum(
np.minimum(bboxes[..., 0::2], img_shape[1] - 1), 0)
clipped_bboxes[..., 1::2] = np.maximum(
np.minimum(bboxes[..., 1::2], img_shape[0] - 1), 0)
return clipped_bboxes
def bbox_scaling(bboxes, scale, clip_shape=None):
"""Scaling bboxes w.r.t the box center.
Args:
bboxes (ndarray): Shape(..., 4).
scale (float): Scaling factor.
clip_shape (tuple, optional): If specified, bboxes that exceed the
boundary will be clipped according to the given shape (h, w).
Returns:
ndarray: Scaled bboxes.
"""
if float(scale) == 1.0:
scaled_bboxes = bboxes.copy()
else:
w = bboxes[..., 2] - bboxes[..., 0] + 1
h = bboxes[..., 3] - bboxes[..., 1] + 1
dw = (w * (scale - 1)) * 0.5
dh = (h * (scale - 1)) * 0.5
scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1)
if clip_shape is not None:
return bbox_clip(scaled_bboxes, clip_shape)
else:
return scaled_bboxes
def imcrop(img, bboxes, scale=1.0, pad_fill=None):
"""Crop image patches.
3 steps: scale the bboxes -> clip bboxes -> crop and pad.
Args:
img (ndarray): Image to be cropped.
bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.
scale (float, optional): Scale ratio of bboxes, the default value
1.0 means no padding.
pad_fill (number or list): Value to be filled for padding, None for
no padding.
Returns:
list or ndarray: The cropped image patches.
"""
chn = 1 if img.ndim == 2 else img.shape[2]
if pad_fill is not None:
if isinstance(pad_fill, (int, float)):
pad_fill = [pad_fill for _ in range(chn)]
assert len(pad_fill) == chn
_bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes
scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32)
clipped_bbox = bbox_clip(scaled_bboxes, img.shape)
patches = []
for i in range(clipped_bbox.shape[0]):
x1, y1, x2, y2 = tuple(clipped_bbox[i, :])
if pad_fill is None:
patch = img[y1:y2 + 1, x1:x2 + 1, ...]
else:
_x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :])
if chn == 1:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1)
else:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn)
patch = np.array(
pad_fill, dtype=img.dtype) * np.ones(
patch_shape, dtype=img.dtype)
x_start = 0 if _x1 >= 0 else -_x1
y_start = 0 if _y1 >= 0 else -_y1
w = x2 - x1 + 1
h = y2 - y1 + 1
patch[y_start:y_start + h, x_start:x_start +
w, ...] = img[y1:y1 + h, x1:x1 + w, ...]
patches.append(patch)
if bboxes.ndim == 1:
return patches[0]
else:
return patches
def impad(img, shape, pad_val=0):
"""Pad an image to a certain shape.
Args:
img (ndarray): Image to be padded.
shape (tuple): Expected padding shape.
pad_val (number or sequence): Values to be filled in padding areas.
Returns:
ndarray: The padded image.
"""
if not isinstance(pad_val, (int, float)):
assert len(pad_val) == img.shape[-1]
if len(shape) < len(img.shape):
shape = shape + (img.shape[-1], )
assert len(shape) == len(img.shape)
for i in range(len(shape) - 1):
assert shape[i] >= img.shape[i]
pad = np.empty(shape, dtype=img.dtype)
pad[...] = pad_val
pad[:img.shape[0], :img.shape[1], ...] = img
return pad
def impad_to_multiple(img, divisor, pad_val=0):
"""Pad an image to ensure each edge to be multiple to some number.
Args:
img (ndarray): Image to be padded.
divisor (int): Padded image edges will be multiple to divisor.
pad_val (number or sequence): Same as :func:`impad`.
Returns:
ndarray: The padded image.
"""
pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor
pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor
return impad(img, (pad_h, pad_w), pad_val)
|
Cream/CDARTS/CDARTS_detection/mmcv/image/transforms/geometry.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/image/transforms/geometry.py",
"repo_id": "Cream",
"token_count": 3037
}
| 268 |
from .hook import Hook
class ClosureHook(Hook):
def __init__(self, fn_name, fn):
assert hasattr(self, fn_name)
assert callable(fn)
setattr(self, fn_name, fn)
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/closure.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/closure.py",
"repo_id": "Cream",
"token_count": 85
}
| 269 |
import functools
import sys
import time
from getpass import getuser
from socket import gethostname
import torch
import torch.distributed as dist
import mmcv
def get_host_info():
return '{}@{}'.format(getuser(), gethostname())
def get_dist_info():
if torch.__version__ < '1.0':
initialized = dist._initialized
else:
initialized = dist.is_initialized()
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def master_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
return wrapper
def get_time_str():
return time.strftime('%Y%m%d_%H%M%S', time.localtime())
def obj_from_dict(info, parent=None, default_args=None):
"""Initialize an object from dict.
The dict must contain the key "type", which indicates the object type, it
can be either a string or type, such as "list" or ``list``. Remaining
fields are treated as the arguments for constructing the object.
Args:
info (dict): Object types and arguments.
parent (:class:`module`): Module which may containing expected object
classes.
default_args (dict, optional): Default arguments for initializing the
object.
Returns:
any type: Object built from the dict.
"""
assert isinstance(info, dict) and 'type' in info
assert isinstance(default_args, dict) or default_args is None
args = info.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
if parent is not None:
obj_type = getattr(parent, obj_type)
else:
obj_type = sys.modules[obj_type]
elif not isinstance(obj_type, type):
raise TypeError('type must be a str or valid type, but got {}'.format(
type(obj_type)))
if default_args is not None:
for name, value in default_args.items():
args.setdefault(name, value)
return obj_type(**args)
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/utils.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/utils.py",
"repo_id": "Cream",
"token_count": 840
}
| 270 |
import os
import os.path as osp
import subprocess
import tempfile
from mmcv.utils import requires_executable
@requires_executable('ffmpeg')
def convert_video(in_file, out_file, print_cmd=False, pre_options='',
**kwargs):
"""Convert a video with ffmpeg.
This provides a general api to ffmpeg, the executed command is::
`ffmpeg -y <pre_options> -i <in_file> <options> <out_file>`
Options(kwargs) are mapped to ffmpeg commands with the following rules:
- key=val: "-key val"
- key=True: "-key"
- key=False: ""
Args:
in_file (str): Input video filename.
out_file (str): Output video filename.
pre_options (str): Options appears before "-i <in_file>".
print_cmd (bool): Whether to print the final ffmpeg command.
"""
options = []
for k, v in kwargs.items():
if isinstance(v, bool):
if v:
options.append('-{}'.format(k))
elif k == 'log_level':
assert v in [
'quiet', 'panic', 'fatal', 'error', 'warning', 'info',
'verbose', 'debug', 'trace'
]
options.append('-loglevel {}'.format(v))
else:
options.append('-{} {}'.format(k, v))
cmd = 'ffmpeg -y {} -i {} {} {}'.format(pre_options, in_file,
' '.join(options), out_file)
if print_cmd:
print(cmd)
subprocess.call(cmd, shell=True)
@requires_executable('ffmpeg')
def resize_video(in_file,
out_file,
size=None,
ratio=None,
keep_ar=False,
log_level='info',
print_cmd=False,
**kwargs):
"""Resize a video.
Args:
in_file (str): Input video filename.
out_file (str): Output video filename.
size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1).
ratio (tuple or float): Expected resize ratio, (2, 0.5) means
(w*2, h*0.5).
keep_ar (bool): Whether to keep original aspect ratio.
log_level (str): Logging level of ffmpeg.
print_cmd (bool): Whether to print the final ffmpeg command.
"""
if size is None and ratio is None:
raise ValueError('expected size or ratio must be specified')
elif size is not None and ratio is not None:
raise ValueError('size and ratio cannot be specified at the same time')
options = {'log_level': log_level}
if size:
if not keep_ar:
options['vf'] = 'scale={}:{}'.format(size[0], size[1])
else:
options['vf'] = ('scale=w={}:h={}:force_original_aspect_ratio'
'=decrease'.format(size[0], size[1]))
else:
if not isinstance(ratio, tuple):
ratio = (ratio, ratio)
options['vf'] = 'scale="trunc(iw*{}):trunc(ih*{})"'.format(
ratio[0], ratio[1])
convert_video(in_file, out_file, print_cmd, **options)
@requires_executable('ffmpeg')
def cut_video(in_file,
out_file,
start=None,
end=None,
vcodec=None,
acodec=None,
log_level='info',
print_cmd=False,
**kwargs):
"""Cut a clip from a video.
Args:
in_file (str): Input video filename.
out_file (str): Output video filename.
start (None or float): Start time (in seconds).
end (None or float): End time (in seconds).
vcodec (None or str): Output video codec, None for unchanged.
acodec (None or str): Output audio codec, None for unchanged.
log_level (str): Logging level of ffmpeg.
print_cmd (bool): Whether to print the final ffmpeg command.
"""
options = {'log_level': log_level}
if vcodec is None:
options['vcodec'] = 'copy'
if acodec is None:
options['acodec'] = 'copy'
if start:
options['ss'] = start
else:
start = 0
if end:
options['t'] = end - start
convert_video(in_file, out_file, print_cmd, **options)
@requires_executable('ffmpeg')
def concat_video(video_list,
out_file,
vcodec=None,
acodec=None,
log_level='info',
print_cmd=False,
**kwargs):
"""Concatenate multiple videos into a single one.
Args:
video_list (list): A list of video filenames
out_file (str): Output video filename
vcodec (None or str): Output video codec, None for unchanged
acodec (None or str): Output audio codec, None for unchanged
log_level (str): Logging level of ffmpeg.
print_cmd (bool): Whether to print the final ffmpeg command.
"""
_, tmp_filename = tempfile.mkstemp(suffix='.txt', text=True)
with open(tmp_filename, 'w') as f:
for filename in video_list:
f.write('file {}\n'.format(osp.abspath(filename)))
options = {'log_level': log_level}
if vcodec is None:
options['vcodec'] = 'copy'
if acodec is None:
options['acodec'] = 'copy'
convert_video(
tmp_filename,
out_file,
print_cmd,
pre_options='-f concat -safe 0',
**options)
os.remove(tmp_filename)
|
Cream/CDARTS/CDARTS_detection/mmcv/video/processing.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/video/processing.py",
"repo_id": "Cream",
"token_count": 2503
}
| 271 |
from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .fp16 import * # noqa: F401, F403
from .mask import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
|
Cream/CDARTS/CDARTS_detection/mmdet/core/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/__init__.py",
"repo_id": "Cream",
"token_count": 118
}
| 272 |
from .base_sampler import BaseSampler
from ..assign_sampling import build_sampler
class CombinedSampler(BaseSampler):
def __init__(self, pos_sampler, neg_sampler, **kwargs):
super(CombinedSampler, self).__init__(**kwargs)
self.pos_sampler = build_sampler(pos_sampler, **kwargs)
self.neg_sampler = build_sampler(neg_sampler, **kwargs)
def _sample_pos(self, **kwargs):
raise NotImplementedError
def _sample_neg(self, **kwargs):
raise NotImplementedError
|
Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/samplers/combined_sampler.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/samplers/combined_sampler.py",
"repo_id": "Cream",
"token_count": 203
}
| 273 |
import functools
from inspect import getfullargspec
import torch
from .utils import cast_tensor_type
def auto_fp16(apply_to=None, out_fp32=False):
"""Decorator to enable fp16 training automatically.
This decorator is useful when you write custom modules and want to support
mixed precision training. If inputs arguments are fp32 tensors, they will
be converted to fp16 automatically. Arguments other than fp32 tensors are
ignored.
Args:
apply_to (Iterable, optional): The argument names to be converted.
`None` indicates all arguments.
out_fp32 (bool): Whether to convert the output back to fp32.
:Example:
class MyModule1(nn.Module)
# Convert x and y to fp16
@auto_fp16()
def forward(self, x, y):
pass
class MyModule2(nn.Module):
# convert pred to fp16
@auto_fp16(apply_to=('pred', ))
def do_something(self, pred, others):
pass
"""
def auto_fp16_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
# check if the module has set the attribute `fp16_enabled`, if not,
# just fallback to the original method.
if not isinstance(args[0], torch.nn.Module):
raise TypeError('@auto_fp16 can only be used to decorate the '
'method of nn.Module')
if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
return old_func(*args, **kwargs)
# get the arg spec of the decorated method
args_info = getfullargspec(old_func)
# get the argument names to be casted
args_to_cast = args_info.args if apply_to is None else apply_to
# convert the args that need to be processed
new_args = []
# NOTE: default args are not taken into consideration
if args:
arg_names = args_info.args[:len(args)]
for i, arg_name in enumerate(arg_names):
if arg_name in args_to_cast:
new_args.append(
cast_tensor_type(args[i], torch.float, torch.half))
else:
new_args.append(args[i])
# convert the kwargs that need to be processed
new_kwargs = {}
if kwargs:
for arg_name, arg_value in kwargs.items():
if arg_name in args_to_cast:
new_kwargs[arg_name] = cast_tensor_type(
arg_value, torch.float, torch.half)
else:
new_kwargs[arg_name] = arg_value
# apply converted arguments to the decorated method
output = old_func(*new_args, **new_kwargs)
# cast the results back to fp32 if necessary
if out_fp32:
output = cast_tensor_type(output, torch.half, torch.float)
return output
return new_func
return auto_fp16_wrapper
def force_fp32(apply_to=None, out_fp16=False):
"""Decorator to convert input arguments to fp32 in force.
This decorator is useful when you write custom modules and want to support
mixed precision training. If there are some inputs that must be processed
in fp32 mode, then this decorator can handle it. If inputs arguments are
fp16 tensors, they will be converted to fp32 automatically. Arguments other
than fp16 tensors are ignored.
Args:
apply_to (Iterable, optional): The argument names to be converted.
`None` indicates all arguments.
out_fp16 (bool): Whether to convert the output back to fp16.
:Example:
class MyModule1(nn.Module)
# Convert x and y to fp32
@force_fp32()
def loss(self, x, y):
pass
class MyModule2(nn.Module):
# convert pred to fp32
@force_fp32(apply_to=('pred', ))
def post_process(self, pred, others):
pass
"""
def force_fp32_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
# check if the module has set the attribute `fp16_enabled`, if not,
# just fallback to the original method.
if not isinstance(args[0], torch.nn.Module):
raise TypeError('@force_fp32 can only be used to decorate the '
'method of nn.Module')
if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
return old_func(*args, **kwargs)
# get the arg spec of the decorated method
args_info = getfullargspec(old_func)
# get the argument names to be casted
args_to_cast = args_info.args if apply_to is None else apply_to
# convert the args that need to be processed
new_args = []
if args:
arg_names = args_info.args[:len(args)]
for i, arg_name in enumerate(arg_names):
if arg_name in args_to_cast:
new_args.append(
cast_tensor_type(args[i], torch.half, torch.float))
else:
new_args.append(args[i])
# convert the kwargs that need to be processed
new_kwargs = dict()
if kwargs:
for arg_name, arg_value in kwargs.items():
if arg_name in args_to_cast:
new_kwargs[arg_name] = cast_tensor_type(
arg_value, torch.half, torch.float)
else:
new_kwargs[arg_name] = arg_value
# apply converted arguments to the decorated method
output = old_func(*new_args, **new_kwargs)
# cast the results back to fp32 if necessary
if out_fp16:
output = cast_tensor_type(output, torch.float, torch.half)
return output
return new_func
return force_fp32_wrapper
|
Cream/CDARTS/CDARTS_detection/mmdet/core/fp16/decorators.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/fp16/decorators.py",
"repo_id": "Cream",
"token_count": 2998
}
| 274 |
import os.path as osp
import mmcv
import numpy as np
from torch.utils.data import Dataset
from mmdet.core import eval_map, eval_recalls
from .pipelines import Compose
from .registry import DATASETS
@DATASETS.register_module
class CustomDataset(Dataset):
"""Custom dataset for detection.
Annotation format:
[
{
'filename': 'a.jpg',
'width': 1280,
'height': 720,
'ann': {
'bboxes': <np.ndarray> (n, 4),
'labels': <np.ndarray> (n, ),
'bboxes_ignore': <np.ndarray> (k, 4), (optional field)
'labels_ignore': <np.ndarray> (k, 4) (optional field)
}
},
...
]
The `ann` field is optional for testing.
"""
CLASSES = None
def __init__(self,
ann_file,
pipeline,
data_root=None,
img_prefix='',
seg_prefix=None,
proposal_file=None,
test_mode=False,
filter_empty_gt=True):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.ann_file):
self.ann_file = osp.join(self.data_root, self.ann_file)
if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if not (self.proposal_file is None
or osp.isabs(self.proposal_file)):
self.proposal_file = osp.join(self.data_root,
self.proposal_file)
# load annotations (and proposals)
self.img_infos = self.load_annotations(self.ann_file)
if self.proposal_file is not None:
self.proposals = self.load_proposals(self.proposal_file)
else:
self.proposals = None
# filter images too small
if not test_mode:
valid_inds = self._filter_imgs()
self.img_infos = [self.img_infos[i] for i in valid_inds]
if self.proposals is not None:
self.proposals = [self.proposals[i] for i in valid_inds]
# set group flag for the sampler
if not self.test_mode:
self._set_group_flag()
# processing pipeline
self.pipeline = Compose(pipeline)
def __len__(self):
return len(self.img_infos)
def load_annotations(self, ann_file):
return mmcv.load(ann_file)
def load_proposals(self, proposal_file):
return mmcv.load(proposal_file)
def get_ann_info(self, idx):
return self.img_infos[idx]['ann']
def pre_pipeline(self, results):
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
def _filter_imgs(self, min_size=32):
"""Filter images too small."""
valid_inds = []
for i, img_info in enumerate(self.img_infos):
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.img_infos[i]
if img_info['width'] / img_info['height'] > 1:
self.flag[i] = 1
def _rand_another(self, idx):
pool = np.where(self.flag == self.flag[idx])[0]
return np.random.choice(pool)
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
def prepare_train_img(self, idx):
img_info = self.img_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
img_info = self.img_infos[idx]
results = dict(img_info=img_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. It must be a float
when evaluating mAP, and can be a list when evaluating recall.
Default: 0.5.
scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
Default: None.
"""
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError('metric {} is not supported'.format(metric))
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = {}
if metric == 'mAP':
assert isinstance(iou_thr, float)
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=scale_ranges,
iou_thr=iou_thr,
dataset=self.CLASSES,
logger=logger)
eval_results['mAP'] = mean_ap
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
if isinstance(iou_thr, float):
iou_thr = [iou_thr]
recalls = eval_recalls(
gt_bboxes,
results,
proposal_nums,
iou_thr,
print_summary=False)
for i, num in enumerate(proposal_nums):
for j, iou in enumerate(iou_thr):
eval_results['recall@{}@{}'.format(num, iou)] = recalls[i,
j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results['AR@{}'.format(num)] = ar[i]
return eval_results
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/custom.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/custom.py",
"repo_id": "Cream",
"token_count": 3973
}
| 275 |
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
import numpy as np
from .custom import CustomDataset
from .registry import DATASETS
@DATASETS.register_module
class XMLDataset(CustomDataset):
def __init__(self, min_size=None, **kwargs):
super(XMLDataset, self).__init__(**kwargs)
self.cat2label = {cat: i + 1 for i, cat in enumerate(self.CLASSES)}
self.min_size = min_size
def load_annotations(self, ann_file):
img_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = 'JPEGImages/{}.jpg'.format(img_id)
xml_path = osp.join(self.img_prefix, 'Annotations',
'{}.xml'.format(img_id))
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
img_infos.append(
dict(id=img_id, filename=filename, width=width, height=height))
return img_infos
def get_ann_info(self, idx):
img_id = self.img_infos[idx]['id']
xml_path = osp.join(self.img_prefix, 'Annotations',
'{}.xml'.format(img_id))
tree = ET.parse(xml_path)
root = tree.getroot()
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
label = self.cat2label[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [
int(bnd_box.find('xmin').text),
int(bnd_box.find('ymin').text),
int(bnd_box.find('xmax').text),
int(bnd_box.find('ymax').text)
]
ignore = False
if self.min_size:
assert not self.test_mode
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
if w < self.min_size or h < self.min_size:
ignore = True
if difficult or ignore:
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if not bboxes:
bboxes = np.zeros((0, 4))
labels = np.zeros((0, ))
else:
bboxes = np.array(bboxes, ndmin=2) - 1
labels = np.array(labels)
if not bboxes_ignore:
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0, ))
else:
bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1
labels_ignore = np.array(labels_ignore)
ann = dict(
bboxes=bboxes.astype(np.float32),
labels=labels.astype(np.int64),
bboxes_ignore=bboxes_ignore.astype(np.float32),
labels_ignore=labels_ignore.astype(np.int64))
return ann
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/xml_style.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/xml_style.py",
"repo_id": "Cream",
"token_count": 1657
}
| 276 |
# --------------------------------------------------------
# Copyright (c) 2019 Jianyuan Guo ([email protected])
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from .mbblock_ops import OPS
PRIMITIVES = [
'ir_k3_e3',
'ir_k3_e6',
'ir_k3_e6_r2',
'ir_k5_e3',
'ir_k5_e6',
'ir_k7_e6'
]
norm_cfg_ = {
'BN': nn.BatchNorm2d,
'SyncBN': nn.SyncBatchNorm,
'GN': nn.GroupNorm,
}
norm_layer = norm_cfg_['BN']
class MbblockHead(nn.Module):
def __init__(self, latency=None, gamma=0.02, genotype=None, **kwargs):
super(MbblockHead, self).__init__()
self.latency = latency
self.gamma = gamma
self.genotype = genotype
self.last_dim = kwargs.get('out_channels', [256])[-1]
self.strides = kwargs.get('strides')
self.out_channels = kwargs.get('out_channels')
bn_type = kwargs.get('bn_type', 'BN')
self.cells = nn.ModuleList()
input_size = 7
_in_channel = self.last_dim # usually the same as input channel in detector
for _genotype, _stride, _out_channel in zip(genotype, self.strides, self.out_channels):
self.cells.append(OPS[_genotype](input_size, _in_channel, _out_channel, _stride, bn=bn_type))
input_size = input_size // _stride
_in_channel = _out_channel
for m in self.modules():
if isinstance(m, nn.SyncBatchNorm):
m._specify_ddp_gpu_num(1)
def forward(self, x):
for cell in self.cells:
x = cell(x)
return x, None
|
Cream/CDARTS/CDARTS_detection/mmdet/models/bbox_heads/auto_head/mbblock_head_search.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/bbox_heads/auto_head/mbblock_head_search.py",
"repo_id": "Cream",
"token_count": 747
}
| 277 |
import torch
from mmdet.core import bbox2roi, build_assigner, build_sampler
from .two_stage import TwoStageDetector
from .. import builder
from ..registry import DETECTORS
@DETECTORS.register_module
class MaskScoringRCNN(TwoStageDetector):
"""Mask Scoring RCNN.
https://arxiv.org/abs/1903.00241
"""
def __init__(self,
backbone,
rpn_head,
bbox_roi_extractor,
bbox_head,
mask_roi_extractor,
mask_head,
train_cfg,
test_cfg,
neck=None,
shared_head=None,
mask_iou_head=None,
pretrained=None):
super(MaskScoringRCNN, self).__init__(
backbone=backbone,
neck=neck,
shared_head=shared_head,
rpn_head=rpn_head,
bbox_roi_extractor=bbox_roi_extractor,
bbox_head=bbox_head,
mask_roi_extractor=mask_roi_extractor,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
self.mask_iou_head = builder.build_head(mask_iou_head)
self.mask_iou_head.init_weights()
# TODO: refactor forward_train in two stage to reduce code redundancy
def forward_train(self,
img,
img_meta,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None):
x = self.extract_feat(img)
losses = dict()
# RPN forward and loss
if self.with_rpn:
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,
self.train_cfg.rpn)
rpn_losses = self.rpn_head.loss(
*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
proposal_inputs = rpn_outs + (img_meta, proposal_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
else:
proposal_list = proposals
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)
bbox_sampler = build_sampler(
self.train_cfg.rcnn.sampler, context=self)
num_imgs = img.size(0)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[i],
gt_bboxes[i],
gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
if self.with_bbox:
rois = bbox2roi([res.bboxes for res in sampling_results])
# TODO: a more flexible way to decide which feature maps to use
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
bbox_targets = self.bbox_head.get_target(sampling_results,
gt_bboxes, gt_labels,
self.train_cfg.rcnn)
loss_bbox = self.bbox_head.loss(cls_score, bbox_pred,
*bbox_targets)
losses.update(loss_bbox)
# mask head forward and loss
if self.with_mask:
if not self.share_roi_extractor:
pos_rois = bbox2roi(
[res.pos_bboxes for res in sampling_results])
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_feats = bbox_feats[pos_inds]
mask_pred = self.mask_head(mask_feats)
mask_targets = self.mask_head.get_target(sampling_results,
gt_masks,
self.train_cfg.rcnn)
pos_labels = torch.cat(
[res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head.loss(mask_pred, mask_targets,
pos_labels)
losses.update(loss_mask)
# mask iou head forward and loss
pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels]
mask_iou_pred = self.mask_iou_head(mask_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)
), pos_labels]
mask_iou_targets = self.mask_iou_head.get_target(
sampling_results, gt_masks, pos_mask_pred, mask_targets,
self.train_cfg.rcnn)
loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred,
mask_iou_targets)
losses.update(loss_mask_iou)
return losses
def simple_test_mask(self,
x,
img_meta,
det_bboxes,
det_labels,
rescale=False):
# image shape of the first image in the batch (only one)
ori_shape = img_meta[0]['ori_shape']
scale_factor = img_meta[0]['scale_factor']
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes - 1)]
mask_scores = [[] for _ in range(self.mask_head.num_classes - 1)]
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
_bboxes = (
det_bboxes[:, :4] * scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor(
x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head(mask_feats)
segm_result = self.mask_head.get_seg_masks(mask_pred, _bboxes,
det_labels,
self.test_cfg.rcnn,
ori_shape, scale_factor,
rescale)
# get mask scores with mask iou head
mask_iou_pred = self.mask_iou_head(
mask_feats,
mask_pred[range(det_labels.size(0)), det_labels + 1])
mask_scores = self.mask_iou_head.get_mask_scores(
mask_iou_pred, det_bboxes, det_labels)
return segm_result, mask_scores
|
Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/mask_scoring_rcnn.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/mask_scoring_rcnn.py",
"repo_id": "Cream",
"token_count": 5128
}
| 278 |
from .fcn_mask_head import FCNMaskHead
from .fused_semantic_head import FusedSemanticHead
from .grid_head import GridHead
from .htc_mask_head import HTCMaskHead
from .maskiou_head import MaskIoUHead
__all__ = [
'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
'MaskIoUHead'
]
|
Cream/CDARTS/CDARTS_detection/mmdet/models/mask_heads/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/mask_heads/__init__.py",
"repo_id": "Cream",
"token_count": 113
}
| 279 |
# --------------------------------------------------------
# Copyright (c) 2019 Jianyuan Guo ([email protected])
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import kaiming_init, constant_init, xavier_init
from mmdet.core import auto_fp16
from ..registry import NECKS
from ..utils import ConvModule
from .auto_neck.build_neck import build_search_neck
@NECKS.register_module
class SearchPAFPN(nn.Module):
r""" PAFPN Arch
TBS TD TBS BU
C5 -----> C5 P5 -----> N5 N5
C4 -----> C4 P4 -----> N4 N4
C3 -----> C3 P3 -----> N3 N3
C2 -----> C2 P2 -----> N2 N2
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=True,
relu_before_extra_convs=False,
conv_cfg=None,
norm_cfg=None,
activation=None,
pa_kernel=3,
search_neck=None):
super(SearchPAFPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.relu_before_extra_convs = relu_before_extra_convs
self.fp16_enabled = False
self.pa_kernel = pa_kernel
self.SearchNeck = build_search_neck(search_neck)
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.extra_convs_on_inputs = extra_convs_on_inputs
self.pa_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level - 1): # Faster (0,3) one-stage (1,3)
if pa_kernel > 0:
pa_conv = ConvModule(
out_channels, out_channels, pa_kernel,
padding=(pa_kernel-1)//2, conv_cfg=conv_cfg, norm_cfg=norm_cfg,
activation=activation, inplace=True)
self.pa_convs.append(pa_conv)
# add extra conv layers (e.g., RetinaNet); one-stage 5-4+1
extra_levels = num_outs - self.backbone_end_level + self.start_level
if add_extra_convs and extra_levels >= 1:
self.fpn_convs = nn.ModuleList()
for i in range(extra_levels):
if i == 0 and self.extra_convs_on_inputs:
in_channel = self.in_channels[self.backbone_end_level - 1]
else:
in_channel = out_channels
extra_fpn_conv = ConvModule(
in_channel, out_channels, 3,
stride=2, padding=1, conv_cfg=conv_cfg,
norm_cfg=norm_cfg, activation=self.activation, inplace=True)
self.fpn_convs.append(extra_fpn_conv)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
@auto_fp16()
def forward(self, inputs):
# inputs [C2, C3, C4, C5]
assert len(inputs) == len(self.in_channels)
# build top-down laterals
laterals = self.SearchNeck(inputs[self.start_level:], 1)
used_backbone_levels = len(laterals) # Faster rcnn:4; one-stage:3
# Top-down path
for i in range(used_backbone_levels - 1, 0, -1):
laterals[i - 1] += F.interpolate(laterals[i], scale_factor=2, mode='nearest')
laterals_mid = self.SearchNeck(laterals, 2)
# Bottom-up path
# build outputs
if self.pa_kernel > 0:
outs = [laterals_mid[0]]
for i in range(0, self.backbone_end_level - self.start_level - 1): # Faster: [0,3]
tmp = F.max_pool2d(outs[i], 2, stride=2) + laterals_mid[i + 1]
outs.append(self.pa_convs[i](tmp))
else:
outs = laterals_mid
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.extra_convs_on_inputs:
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[0](orig))
else:
outs.append(self.fpn_convs[0](outs[-1]))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i-used_backbone_levels](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i-used_backbone_levels](outs[-1]))
return tuple(outs), None
|
Cream/CDARTS/CDARTS_detection/mmdet/models/necks/search_pafpn.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/necks/search_pafpn.py",
"repo_id": "Cream",
"token_count": 2998
}
| 280 |
from .dcn import (DeformConv, DeformConvPack, ModulatedDeformConv,
ModulatedDeformConvPack, DeformRoIPooling,
DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack,
deform_conv, modulated_deform_conv, deform_roi_pooling)
from .gcb import ContextBlock
from .nms import nms, soft_nms
from .roi_align import RoIAlign, roi_align
from .roi_pool import RoIPool, roi_pool
from .sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss
from .masked_conv import MaskedConv2d
__all__ = [
'nms', 'soft_nms', 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool',
'DeformConv', 'DeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',
'ModulatedDeformRoIPoolingPack', 'ModulatedDeformConv',
'ModulatedDeformConvPack', 'deform_conv', 'modulated_deform_conv',
'deform_roi_pooling', 'SigmoidFocalLoss', 'sigmoid_focal_loss',
'MaskedConv2d', 'ContextBlock'
]
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/__init__.py",
"repo_id": "Cream",
"token_count": 408
}
| 281 |
#include <torch/extension.h>
#include <cmath>
#include <vector>
int ROIPoolForwardLaucher(const at::Tensor features, const at::Tensor rois,
const float spatial_scale, const int channels,
const int height, const int width, const int num_rois,
const int pooled_h, const int pooled_w,
at::Tensor output, at::Tensor argmax);
int ROIPoolBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois,
const at::Tensor argmax, const float spatial_scale,
const int batch_size, const int channels,
const int height, const int width,
const int num_rois, const int pooled_h,
const int pooled_w, at::Tensor bottom_grad);
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ")
#define CHECK_CONTIGUOUS(x) \
TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ")
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
int roi_pooling_forward_cuda(at::Tensor features, at::Tensor rois,
int pooled_height, int pooled_width,
float spatial_scale, at::Tensor output,
at::Tensor argmax) {
CHECK_INPUT(features);
CHECK_INPUT(rois);
CHECK_INPUT(output);
CHECK_INPUT(argmax);
// Number of ROIs
int num_rois = rois.size(0);
int size_rois = rois.size(1);
if (size_rois != 5) {
printf("wrong roi size\n");
return 0;
}
int channels = features.size(1);
int height = features.size(2);
int width = features.size(3);
ROIPoolForwardLaucher(features, rois, spatial_scale, channels, height, width,
num_rois, pooled_height, pooled_width, output, argmax);
return 1;
}
int roi_pooling_backward_cuda(at::Tensor top_grad, at::Tensor rois,
at::Tensor argmax, float spatial_scale,
at::Tensor bottom_grad) {
CHECK_INPUT(top_grad);
CHECK_INPUT(rois);
CHECK_INPUT(argmax);
CHECK_INPUT(bottom_grad);
int pooled_height = top_grad.size(2);
int pooled_width = top_grad.size(3);
int num_rois = rois.size(0);
int size_rois = rois.size(1);
if (size_rois != 5) {
printf("wrong roi size\n");
return 0;
}
int batch_size = bottom_grad.size(0);
int channels = bottom_grad.size(1);
int height = bottom_grad.size(2);
int width = bottom_grad.size(3);
ROIPoolBackwardLaucher(top_grad, rois, argmax, spatial_scale, batch_size,
channels, height, width, num_rois, pooled_height,
pooled_width, bottom_grad);
return 1;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &roi_pooling_forward_cuda, "Roi_Pooling forward (CUDA)");
m.def("backward", &roi_pooling_backward_cuda, "Roi_Pooling backward (CUDA)");
}
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_pool/src/roi_pool_cuda.cpp/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_pool/src/roi_pool_cuda.cpp",
"repo_id": "Cream",
"token_count": 1416
}
| 282 |
import inspect
import mmcv
class Registry(object):
def __init__(self, name):
self._name = name
self._module_dict = dict()
def __repr__(self):
format_str = self.__class__.__name__ + '(name={}, items={})'.format(
self._name, list(self._module_dict.keys()))
return format_str
@property
def name(self):
return self._name
@property
def module_dict(self):
return self._module_dict
def get(self, key):
return self._module_dict.get(key, None)
def _register_module(self, module_class):
"""Register a module.
Args:
module (:obj:`nn.Module`): Module to be registered.
"""
if not inspect.isclass(module_class):
raise TypeError('module must be a class, but got {}'.format(
type(module_class)))
module_name = module_class.__name__
if module_name in self._module_dict:
raise KeyError('{} is already registered in {}'.format(
module_name, self.name))
self._module_dict[module_name] = module_class
def register_module(self, cls):
self._register_module(cls)
return cls
def build_from_cfg(cfg, registry, default_args=None):
"""Build a module from config dict.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
registry (:obj:`Registry`): The registry to search the type from.
default_args (dict, optional): Default initialization arguments.
Returns:
obj: The constructed object.
"""
assert isinstance(cfg, dict) and 'type' in cfg
assert isinstance(default_args, dict) or default_args is None
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
obj_type = registry.get(obj_type)
if obj_type is None:
raise KeyError('{} is not in the {} registry'.format(
obj_type, registry.name))
elif inspect.isclass(obj_type):
obj_cls = obj_type
else:
raise TypeError('type must be a str or valid type, but got {}'.format(
type(obj_type)))
if default_args is not None:
for name, value in default_args.items():
args.setdefault(name, value)
return obj_type(**args)
|
Cream/CDARTS/CDARTS_detection/mmdet/utils/registry.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/utils/registry.py",
"repo_id": "Cream",
"token_count": 975
}
| 283 |
## Prerequisites
- Ubuntu 16.04
- Python 3.7
- CUDA 11.1 (lower versions may work but were not tested)
- NVIDIA GPU (>= 11G graphic memory) + CuDNN v7.3
This repository has been tested on RTX 3090. Configurations (e.g batch size, image patch size) may need to be changed on different platforms.
## Installation
* Clone this repo:
```bash
cd CDARTS_segmentation
```
* Install dependencies:
```bash
bash install.sh
```
## Usage
### 0. Prepare the dataset
* Download the [leftImg8bit_trainvaltest.zip](https://www.cityscapes-dataset.com/file-handling/?packageID=3) and [gtFine_trainvaltest.zip](https://www.cityscapes-dataset.com/file-handling/?packageID=1) from the Cityscapes.
* Prepare the annotations by using the [createTrainIdLabelImgs.py](https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createTrainIdLabelImgs.py).
* Put the [file of image list](tools/datasets/cityscapes/) into where you save the dataset.
### 1. Train from scratch
* `cd HRTNet/train`
* Set the dataset path via `ln -s $YOUR_DATA_PATH ../DATASET`
* Set the output path via `mkdir ../OUTPUT`
* Train from scratch
```
export DETECTRON2_DATASETS="$Your_DATA_PATH"
NGPUS=8
python -m torch.distributed.launch --nproc_per_node=$NGPUS train.py --world_size $NGPUS --seed 12367 --config ../configs/cityscapes/cydas.yaml
```
### 2. Evaluation
We provide training models and logs, which can be downloaded from [Google Drive](https://drive.google.com/drive/folders/1CkFp24bEDq0wUp504BQ68jn5Vs069qox?usp=sharing).
```bash
cd train
```
* Download the pretrained weights of the from [Google Drive](https://drive.google.com/drive/folders/1CkFp24bEDq0wUp504BQ68jn5Vs069qox?usp=sharing).
* Set `config.model_path = $YOUR_MODEL_PATH` in `cydas.yaml`.
* Set `config.json_file = $CDARTS_MODEL` in `cydas.yaml`.
* Start the evaluation process:
```bash
CUDA_VISIBLE_DEVICES=0 python test.py
|
Cream/CDARTS/CDARTS_segmentation/README.md/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/README.md",
"repo_id": "Cream",
"token_count": 670
}
| 284 |
# ------------------------------------------------------------------------------
# Loads Cityscapes panoptic dataset.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import json
import os
import numpy as np
from .cityscapes import Cityscapes
from .utils import DatasetDescriptor
from ..transforms import build_transforms, PanopticTargetGenerator, SemanticTargetGenerator
_CITYSCAPES_INFORMATION = DatasetDescriptor(
splits_to_sizes={'train': 2975,
'trainval': 3475,
'val': 500,
'test': 1525},
num_classes=19,
ignore_label=255,
)
# Add 1 void label.
_CITYSCAPES_PANOPTIC_TRAIN_ID_TO_EVAL_ID = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 31, 32, 33, 0]
_CITYSCAPES_THING_LIST = [11, 12, 13, 14, 15, 16, 17, 18]
class CityscapesPanoptic(Cityscapes):
"""
Cityscapes panoptic segmentation dataset.
Arguments:
root: Str, root directory.
split: Str, data split, e.g. train/val/test.
is_train: Bool, for training or testing.
crop_size: Tuple, crop size.
mirror: Bool, whether to apply random horizontal flip.
min_scale: Float, min scale in scale augmentation.
max_scale: Float, max scale in scale augmentation.
scale_step_size: Float, step size to select random scale.
mean: Tuple, image mean.
std: Tuple, image std.
semantic_only: Bool, only use semantic segmentation label.
ignore_stuff_in_offset: Boolean, whether to ignore stuff region when training the offset branch.
small_instance_area: Integer, indicates largest area for small instances.
small_instance_weight: Integer, indicates semantic loss weights for small instances.
"""
def __init__(self,
root,
split,
is_train=True,
crop_size=(513, 1025),
mirror=True,
min_scale=0.5,
max_scale=2.,
scale_step_size=0.25,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
semantic_only=False,
ignore_stuff_in_offset=False,
small_instance_area=0,
small_instance_weight=1,
**kwargs):
super(CityscapesPanoptic, self).__init__(root, split, is_train, crop_size, mirror, min_scale, max_scale,
scale_step_size, mean, std)
self.num_classes = _CITYSCAPES_INFORMATION.num_classes
self.ignore_label = _CITYSCAPES_INFORMATION.ignore_label
self.label_pad_value = (0, 0, 0)
self.has_instance = True
self.label_divisor = 1000
self.label_dtype = np.float32
self.thing_list = _CITYSCAPES_THING_LIST
# Get image and annotation list.
if split == 'test':
self.img_list = self._get_files('image', self.split)
self.ann_list = None
self.ins_list = None
else:
self.img_list = []
self.ann_list = []
self.ins_list = []
json_filename = os.path.join(self.root, 'gtFine', 'cityscapes_panoptic_{}_trainId.json'.format(self.split))
dataset = json.load(open(json_filename))
for img in dataset['images']:
img_file_name = img['file_name']
self.img_list.append(os.path.join(
self.root, 'leftImg8bit', self.split, img_file_name.split('_')[0],
img_file_name.replace('_gtFine', '')))
for ann in dataset['annotations']:
ann_file_name = ann['file_name']
self.ann_list.append(os.path.join(
self.root, 'gtFine', 'cityscapes_panoptic_{}_trainId'.format(self.split), ann_file_name))
self.ins_list.append(ann['segments_info'])
assert len(self) == _CITYSCAPES_INFORMATION.splits_to_sizes[self.split]
self.transform = build_transforms(self, is_train)
if semantic_only:
self.target_transform = SemanticTargetGenerator(self.ignore_label, self.rgb2id)
else:
self.target_transform = PanopticTargetGenerator(self.ignore_label, self.rgb2id, _CITYSCAPES_THING_LIST,
sigma=8, ignore_stuff_in_offset=ignore_stuff_in_offset,
small_instance_area=small_instance_area,
small_instance_weight=small_instance_weight)
# Generates semantic label for evaluation.
self.raw_label_transform = SemanticTargetGenerator(self.ignore_label, self.rgb2id)
@staticmethod
def train_id_to_eval_id():
return _CITYSCAPES_PANOPTIC_TRAIN_ID_TO_EVAL_ID
@staticmethod
def rgb2id(color):
"""Converts the color to panoptic label.
Color is created by `color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]`.
Args:
color: Ndarray or a tuple, color encoded image.
Returns:
Panoptic label.
"""
if isinstance(color, np.ndarray) and len(color.shape) == 3:
if color.dtype == np.uint8:
color = color.astype(np.int32)
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
return int(color[0] + 256 * color[1] + 256 * 256 * color[2])
|
Cream/CDARTS/CDARTS_segmentation/dataloaders/segdatasets/cityscapes_panoptic.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/dataloaders/segdatasets/cityscapes_panoptic.py",
"repo_id": "Cream",
"token_count": 2718
}
| 285 |
# ------------------------------------------------------------------------------
# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/sem_seg_evaluation.py
# Modified by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import logging
from collections import OrderedDict
import numpy as np
from fvcore.common.file_io import PathManager
from segmentation.utils import save_annotation
class SemanticEvaluator:
"""
Evaluate semantic segmentation
"""
def __init__(self, num_classes, ignore_label=255, output_dir=None, train_id_to_eval_id=None):
"""
Args:
num_classes (int): number of classes
ignore_label (int): value in semantic segmentation ground truth. Predictions for the
corresponding pixels should be ignored.
output_dir (str): an output directory to dump results.
train_id_to_eval_id (list): maps training id to evaluation id.
"""
self._output_dir = output_dir
if self._output_dir:
PathManager.mkdirs(self._output_dir)
self._num_classes = num_classes
self._ignore_label = ignore_label
self._N = num_classes + 1 # store ignore label in the last class
self._train_id_to_eval_id = train_id_to_eval_id
self._conf_matrix = np.zeros((self._N, self._N), dtype=np.int64)
self._logger = logging.getLogger(__name__)
@staticmethod
def _convert_train_id_to_eval_id(prediction, train_id_to_eval_id):
"""Converts the predicted label for evaluation.
There are cases where the training labels are not equal to the evaluation
labels. This function is used to perform the conversion so that we could
evaluate the results on the evaluation server.
Args:
prediction: Semantic segmentation prediction.
train_id_to_eval_id (list): maps training id to evaluation id.
Returns:
Semantic segmentation prediction whose labels have been changed.
"""
converted_prediction = prediction.copy()
for train_id, eval_id in enumerate(train_id_to_eval_id):
converted_prediction[prediction == train_id] = eval_id
return converted_prediction
def update(self, pred, gt, image_filename=None):
pred = pred.astype(np.int)
gt = gt.astype(np.int)
gt[gt == self._ignore_label] = self._num_classes
self._conf_matrix += np.bincount(
self._N * pred.reshape(-1) + gt.reshape(-1), minlength=self._N ** 2
).reshape(self._N, self._N)
if self._output_dir:
if self._train_id_to_eval_id is not None:
pred = self._convert_train_id_to_eval_id(pred, self._train_id_to_eval_id)
if image_filename is None:
raise ValueError('Need to provide filename to save.')
save_annotation(
pred, self._output_dir, image_filename, add_colormap=False)
def evaluate(self):
"""
Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):
* Mean intersection-over-union averaged across classes (mIoU)
* Frequency Weighted IoU (fwIoU)
* Mean pixel accuracy averaged across classes (mACC)
* Pixel Accuracy (pACC)
"""
acc = np.zeros(self._num_classes, dtype=np.float)
iou = np.zeros(self._num_classes, dtype=np.float)
tp = self._conf_matrix.diagonal()[:-1].astype(np.float)
pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float)
class_weights = pos_gt / np.sum(pos_gt)
pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float)
acc_valid = pos_gt > 0
acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
iou_valid = (pos_gt + pos_pred) > 0
union = pos_gt + pos_pred - tp
iou[acc_valid] = tp[acc_valid] / union[acc_valid]
macc = np.sum(acc) / np.sum(acc_valid)
miou = np.sum(iou) / np.sum(iou_valid)
fiou = np.sum(iou * class_weights)
pacc = np.sum(tp) / np.sum(pos_gt)
res = {}
res["mIoU"] = 100 * miou
res["fwIoU"] = 100 * fiou
res["mACC"] = 100 * macc
res["pACC"] = 100 * pacc
results = OrderedDict({"sem_seg": res})
self._logger.info(results)
return results
|
Cream/CDARTS/CDARTS_segmentation/segmentation/evaluation/semantic.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/evaluation/semantic.py",
"repo_id": "Cream",
"token_count": 1892
}
| 286 |
# ------------------------------------------------------------------------------
# Loss functions.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import torch
import torch.nn as nn
from torch.nn import functional as F
class RegularCE(nn.Module):
"""
Regular cross entropy loss for semantic segmentation, support pixel-wise loss weight.
Arguments:
ignore_label: Integer, label to ignore.
weight: Tensor, a manual rescaling weight given to each class.
"""
def __init__(self, ignore_label=-1, weight=None):
super(RegularCE, self).__init__()
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(weight=weight,
ignore_index=ignore_label,
reduction='none')
def forward(self, logits, labels, **kwargs):
if 'semantic_weights' in kwargs:
pixel_losses = self.criterion(logits, labels) * kwargs['semantic_weights']
pixel_losses = pixel_losses.contiguous().view(-1)
else:
pixel_losses = self.criterion(logits, labels).contiguous().view(-1)
mask = labels.contiguous().view(-1) != self.ignore_label
pixel_losses = pixel_losses[mask]
return pixel_losses.mean()
class OhemCE(nn.Module):
"""
Online hard example mining with cross entropy loss, for semantic segmentation.
This is widely used in PyTorch semantic segmentation frameworks.
Reference: https://github.com/HRNet/HRNet-Semantic-Segmentation/blob/1b3ae72f6025bde4ea404305d502abea3c2f5266/lib/core/criterion.py#L29
Arguments:
ignore_label: Integer, label to ignore.
threshold: Float, threshold for softmax score (of gt class), only predictions with softmax score
below this threshold will be kept.
min_kept: Integer, minimum number of pixels to be kept, it is used to adjust the
threshold value to avoid number of examples being too small.
weight: Tensor, a manual rescaling weight given to each class.
"""
def __init__(self, ignore_label=-1, threshold=0.7,
min_kept=100000, weight=None):
super(OhemCE, self).__init__()
self.threshold = threshold
self.min_kept = max(1, min_kept)
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(weight=weight,
ignore_index=ignore_label,
reduction='none')
def forward(self, logits, labels, **kwargs):
predictions = F.softmax(logits, dim=1)
if 'semantic_weights' in kwargs:
pixel_losses = self.criterion(logits, labels) * kwargs['semantic_weights']
pixel_losses = pixel_losses.contiguous().view(-1)
else:
pixel_losses = self.criterion(logits, labels).contiguous().view(-1)
mask = labels.contiguous().view(-1) != self.ignore_label
tmp_labels = labels.clone()
tmp_labels[tmp_labels == self.ignore_label] = 0
# Get the score for gt class at each pixel location.
predictions = predictions.gather(1, tmp_labels.unsqueeze(1))
predictions, indices = predictions.contiguous().view(-1, )[mask].contiguous().sort()
min_value = predictions[min(self.min_kept, predictions.numel() - 1)]
threshold = max(min_value, self.threshold)
pixel_losses = pixel_losses[mask][indices]
pixel_losses = pixel_losses[predictions < threshold]
return pixel_losses.mean()
class DeepLabCE(nn.Module):
"""
Hard pixel mining mining with cross entropy loss, for semantic segmentation.
This is used in TensorFlow DeepLab frameworks.
Reference: https://github.com/tensorflow/models/blob/bd488858d610e44df69da6f89277e9de8a03722c/research/deeplab/utils/train_utils.py#L33
Arguments:
ignore_label: Integer, label to ignore.
top_k_percent_pixels: Float, the value lies in [0.0, 1.0]. When its value < 1.0, only compute the loss for
the top k percent pixels (e.g., the top 20% pixels). This is useful for hard pixel mining.
weight: Tensor, a manual rescaling weight given to each class.
"""
def __init__(self, ignore_label=-1, top_k_percent_pixels=1.0, weight=None):
super(DeepLabCE, self).__init__()
self.top_k_percent_pixels = top_k_percent_pixels
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(weight=weight,
ignore_index=ignore_label,
reduction='none')
def forward(self, logits, labels, **kwargs):
if 'semantic_weights' in kwargs:
pixel_losses = self.criterion(logits, labels) * kwargs['semantic_weights']
pixel_losses = pixel_losses.contiguous().view(-1)
else:
pixel_losses = self.criterion(logits, labels).contiguous().view(-1)
if self.top_k_percent_pixels == 1.0:
return pixel_losses.mean()
top_k_pixels = int(self.top_k_percent_pixels * pixel_losses.numel())
pixel_losses, _ = torch.topk(pixel_losses, top_k_pixels)
return pixel_losses.mean()
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/loss/criterion.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/loss/criterion.py",
"repo_id": "Cream",
"token_count": 2230
}
| 287 |
# ------------------------------------------------------------------------------
# Saves raw outputs and targets.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import os
import numpy as np
import PIL.Image as img
import torch
from .save_annotation import label_to_color_image
from .flow_vis import flow_compute_color
def save_debug_images(dataset, batch_images, batch_targets, batch_outputs, out_dir=None, iteration=0,
target_keys=('semantic', 'center', 'offset', 'center_weights', 'offset_weights'),
output_keys=('semantic', 'center', 'offset'),
iteration_to_remove=-1, is_train=True):
"""Saves a mini-batch of images for debugging purpose.
- image: the augmented input image
- label: the augmented labels including
- semantic: semantic segmentation label
- center: center heatmap
- offset: offset field
- instance_ignore_mask: ignore mask
- prediction: the raw output of the model (without post-processing)
- semantic: semantic segmentation label
- center: center heatmap
- offset: offset field
Args:
dataset: The Dataset.
batch_images: Tensor of shape [N, 3, H, W], a batch of input images.
batch_targets: Dict, a dict containing batch of targets.
- semantic: a Tensor of shape [N, H, W]
- center: a Tensor of shape [N, 1, H, W]
- offset: a Tensor of shape [N, 2, H, W]
- semantic_weights: a Tensor of shape [N, H, W]
- center_weights: a Tensor of shape [N, H, W]
- offset_weights: a Tensor of shape [N, H, W]
batch_outputs: Dict, a dict containing batch of outputs.
- semantic: a Tensor of shape [N, H, W]
- center: a Tensor of shape [N, 1, H, W]
- offset: a Tensor of shape [N, 2, H, W]
out_dir: String, the directory to which the results will be saved.
iteration: Integer, iteration number.
target_keys: List, target keys to save.
output_keys: List, output keys to save.
iteration_to_remove: Integer, iteration number to remove.
is_train: Boolean, save train or test debugging image.
"""
batch_size = batch_images.size(0)
map_height = batch_images.size(2)
map_width = batch_images.size(3)
grid_image = np.zeros(
(map_height, batch_size * map_width, 3), dtype=np.uint8
)
num_targets = len(target_keys)
grid_target = np.zeros(
(num_targets * map_height, batch_size * map_width, 3), dtype=np.uint8
)
num_outputs = len(output_keys)
grid_output = np.zeros(
(num_outputs * map_height, batch_size * map_width, 3), dtype=np.uint8
)
semantic_pred = torch.argmax(batch_outputs['semantic'].detach(), dim=1)
if 'foreground' in batch_outputs:
foreground_pred = torch.argmax(batch_outputs['foreground'].detach(), dim=1)
else:
foreground_pred = None
for i in range(batch_size):
width_begin = map_width * i
width_end = map_width * (i + 1)
# save images
image = dataset.reverse_transform(batch_images[i])
grid_image[:, width_begin:width_end, :] = image
if 'semantic' in target_keys:
# save gt semantic
gt_sem = batch_targets['semantic'][i].cpu().numpy()
gt_sem = label_to_color_image(gt_sem, dataset.create_label_colormap())
grid_target[:map_height, width_begin:width_end, :] = gt_sem
if 'center' in target_keys:
# save gt center
gt_ctr = batch_targets['center'][i].squeeze().cpu().numpy()
gt_ctr = gt_ctr[:, :, None] * np.array([255, 0, 0]).reshape((1, 1, 3))
gt_ctr = gt_ctr.clip(0, 255)
# gt_ctr = 0.7 * gt_ctr + (1 - 0.3) * image
grid_target[map_height:2 * map_height, width_begin:width_end, :] = gt_ctr
if 'offset' in target_keys:
# save gt offset
gt_off = batch_targets['offset'][i].permute(1, 2, 0).cpu().numpy()
gt_off = flow_compute_color(gt_off[:, :, 1], gt_off[:, :, 0])
grid_target[2 * map_height:3 * map_height, width_begin:width_end, :] = gt_off
if 'semantic_weights' in target_keys:
# save ignore mask
gt_ign = batch_targets['semantic_weights'][i].cpu().numpy()
gt_ign = gt_ign[:, :, None] / np.max(gt_ign) * 255
gt_ign = np.tile(gt_ign, (1, 1, 3))
grid_target[3 * map_height:4 * map_height, width_begin:width_end, :] = gt_ign
if 'center_weights' in target_keys:
# save ignore mask
gt_ign = batch_targets['center_weights'][i].cpu().numpy()
gt_ign = gt_ign[:, :, None] * 255
gt_ign = np.tile(gt_ign, (1, 1, 3))
grid_target[4 * map_height:5 * map_height, width_begin:width_end, :] = gt_ign
if 'offset_weights' in target_keys:
# save ignore mask
gt_ign = batch_targets['offset_weights'][i].cpu().numpy()
gt_ign = gt_ign[:, :, None] * 255
gt_ign = np.tile(gt_ign, (1, 1, 3))
grid_target[5 * map_height:6 * map_height, width_begin:width_end, :] = gt_ign
if 'foreground' in target_keys:
# save gt foreground
gt_fg = batch_targets['foreground'][i].cpu().numpy()
gt_fg = gt_fg[:, :, None] * 255
grid_target[6 * map_height:7 * map_height, width_begin:width_end, :] = gt_fg
if 'semantic' in output_keys:
# save pred semantic
pred_sem = semantic_pred[i].cpu().numpy()
pred_sem = label_to_color_image(pred_sem, dataset.create_label_colormap())
grid_output[:map_height, width_begin:width_end, :] = pred_sem
if 'center' in output_keys:
# save pred center
pred_ctr = batch_outputs['center'][i].detach().squeeze().cpu().numpy()
pred_ctr = pred_ctr[:, :, None] * np.array([255, 0, 0]).reshape((1, 1, 3))
pred_ctr = pred_ctr.clip(0, 255)
# pred_ctr = 0.7 * pred_ctr + (1 - 0.3) * image
grid_output[map_height:2 * map_height, width_begin:width_end, :] = pred_ctr
if 'offset' in output_keys:
# save pred offset
pred_ctr = batch_outputs['offset'][i].detach().permute(1, 2, 0).cpu().numpy()
pred_ctr = flow_compute_color(pred_ctr[:, :, 1], pred_ctr[:, :, 0])
grid_output[2 * map_height:3 * map_height, width_begin:width_end, :] = pred_ctr
if 'foreground' in output_keys:
# save pred foreground
if foreground_pred is not None:
pred_fg = foreground_pred[i].cpu().numpy()
pred_fg = pred_fg[:, :, None] * 255
grid_output[3 * map_height:4 * map_height, width_begin:width_end, :] = pred_fg
if out_dir is not None:
if is_train:
pil_image = img.fromarray(grid_image.astype(dtype=np.uint8))
with open('%s/%s_%d.png' % (out_dir, 'debug_batch_images', iteration), mode='wb') as f:
pil_image.save(f, 'PNG')
pil_image = img.fromarray(grid_target.astype(dtype=np.uint8))
with open('%s/%s_%d.png' % (out_dir, 'debug_batch_targets', iteration), mode='wb') as f:
pil_image.save(f, 'PNG')
pil_image = img.fromarray(grid_output.astype(dtype=np.uint8))
with open('%s/%s_%d.png' % (out_dir, 'debug_batch_outputs', iteration), mode='wb') as f:
pil_image.save(f, 'PNG')
else:
pil_image = img.fromarray(grid_image.astype(dtype=np.uint8))
with open('%s/%s_%d.png' % (out_dir, 'debug_test_images', iteration), mode='wb') as f:
pil_image.save(f, 'PNG')
if grid_target.size:
pil_image = img.fromarray(grid_target.astype(dtype=np.uint8))
with open('%s/%s_%d.png' % (out_dir, 'debug_test_targets', iteration), mode='wb') as f:
pil_image.save(f, 'PNG')
pil_image = img.fromarray(grid_output.astype(dtype=np.uint8))
with open('%s/%s_%d.png' % (out_dir, 'debug_test_outputs', iteration), mode='wb') as f:
pil_image.save(f, 'PNG')
if is_train:
if iteration_to_remove >= 0:
if os.path.exists('%s/%s_%d.png' % (out_dir, 'debug_batch_images', iteration_to_remove)):
os.remove('%s/%s_%d.png' % (out_dir, 'debug_batch_images', iteration_to_remove))
if os.path.exists('%s/%s_%d.png' % (out_dir, 'debug_batch_targets', iteration_to_remove)):
os.remove('%s/%s_%d.png' % (out_dir, 'debug_batch_targets', iteration_to_remove))
if os.path.exists('%s/%s_%d.png' % (out_dir, 'debug_batch_outputs', iteration_to_remove)):
os.remove('%s/%s_%d.png' % (out_dir, 'debug_batch_outputs', iteration_to_remove))
# 0 is a special iter
if os.path.exists('%s/%s_%d.png' % (out_dir, 'debug_batch_images', 0)):
os.remove('%s/%s_%d.png' % (out_dir, 'debug_batch_images', 0))
if os.path.exists('%s/%s_%d.png' % (out_dir, 'debug_batch_targets', 0)):
os.remove('%s/%s_%d.png' % (out_dir, 'debug_batch_targets', 0))
if os.path.exists('%s/%s_%d.png' % (out_dir, 'debug_batch_outputs', 0)):
os.remove('%s/%s_%d.png' % (out_dir, 'debug_batch_outputs', 0))
|
Cream/CDARTS/CDARTS_segmentation/segmentation/utils/debug.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/utils/debug.py",
"repo_id": "Cream",
"token_count": 4617
}
| 288 |
from collections import namedtuple
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
PRIMITIVES = [
'skip',
'conv',
'conv_di',
'conv_2x',
'conv_2x_di',
]
NASNet = Genotype(
normal = [
('sep_conv_5x5', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 0),
('sep_conv_3x3', 0),
('avg_pool_3x3', 1),
('skip_connect', 0),
('avg_pool_3x3', 0),
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('skip_connect', 1),
],
normal_concat = [2, 3, 4, 5, 6],
reduce = [
('sep_conv_5x5', 1),
('sep_conv_7x7', 0),
('max_pool_3x3', 1),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('sep_conv_5x5', 0),
('skip_connect', 3),
('avg_pool_3x3', 2),
('sep_conv_3x3', 2),
('max_pool_3x3', 1),
],
reduce_concat = [4, 5, 6],
)
AmoebaNet = Genotype(
normal = [
('avg_pool_3x3', 0),
('max_pool_3x3', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 2),
('sep_conv_3x3', 0),
('avg_pool_3x3', 3),
('sep_conv_3x3', 1),
('skip_connect', 1),
('skip_connect', 0),
('avg_pool_3x3', 1),
],
normal_concat = [4, 5, 6],
reduce = [
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('max_pool_3x3', 0),
('sep_conv_7x7', 2),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('max_pool_3x3', 0),
('max_pool_3x3', 1),
('conv_7x1_1x7', 0),
('sep_conv_3x3', 5),
],
reduce_concat = [3, 4, 6]
)
DARTS_V1 = Genotype(normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5])
DARTS_V2 = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])
DARTS = DARTS_V2
|
Cream/CDARTS/CDARTS_segmentation/tools/utils/genotypes.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/utils/genotypes.py",
"repo_id": "Cream",
"token_count": 1235
}
| 289 |
_BASE_: ../Cityscapes-PanopticSegmentation/Base-PanopticDeepLab-OS16.yaml
MODEL:
WEIGHTS: "detectron2://DeepLab/R-52.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
BACKBONE:
NAME: "build_resnet_deeplab_backbone"
RESNETS:
DEPTH: 50
NORM: "SyncBN"
RES5_MULTI_GRID: [1, 2, 4]
STEM_TYPE: "deeplab"
STEM_OUT_CHANNELS: 128
STRIDE_IN_1X1: False
SEM_SEG_HEAD:
NUM_CLASSES: 133
LOSS_TOP_K: 1.0
USE_DEPTHWISE_SEPARABLE_CONV: True
PANOPTIC_DEEPLAB:
STUFF_AREA: 4096
NMS_KERNEL: 41
SIZE_DIVISIBILITY: 640
USE_DEPTHWISE_SEPARABLE_CONV: True
DATASETS:
TRAIN: ("ade20k_sem_seg_train",)
TEST: ("ade20k_sem_seg_val",)
SOLVER:
BASE_LR: 0.0005
MAX_ITER: 200000000
IMS_PER_BATCH: 32
INPUT:
FORMAT: "RGB"
GAUSSIAN_SIGMA: 8
MIN_SIZE_TRAIN: (256, 320, 352, 416, 448, 512, 576, 608, 672, 704, 768, 832, 864, 928, 960, 1024)
MIN_SIZE_TRAIN_SAMPLING: "choice"
MIN_SIZE_TEST: 512
MAX_SIZE_TRAIN: 1024
MAX_SIZE_TEST: 512
CROP:
ENABLED: True
TYPE: "absolute"
SIZE: (512, 512)
|
Cream/CDARTS/CDARTS_segmentation/train/configs/ADE20K/512.yaml/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/configs/ADE20K/512.yaml",
"repo_id": "Cream",
"token_count": 565
}
| 290 |
import numpy as np
try:
from utils.darts_utils import compute_latency_ms_tensorrt as compute_latency
print("use TensorRT for latency test")
except:
from utils.darts_utils import compute_latency_ms_pytorch as compute_latency
print("use PyTorch for latency test")
import torch
import torch.nn as nn
import os.path as osp
latency_lookup_table = {}
# table_file_name = "latency_lookup_table.npy"
# if osp.isfile(table_file_name):
# latency_lookup_table = np.load(table_file_name).item()
import torch.nn.functional as F
from collections import OrderedDict
from layers import NaiveSyncBatchNorm
from operations import ConvNorm
from att_sa import Self_Attn
BatchNorm2d = NaiveSyncBatchNorm
class ConvBnRelu(nn.Module):
def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1,
groups=1, has_bn=True, norm_layer=nn.BatchNorm2d, bn_eps=1e-5,
has_relu=True, inplace=True, has_bias=False):
super(ConvBnRelu, self).__init__()
groups = 1
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize,
stride=stride, padding=pad,
dilation=dilation, groups=groups, bias=has_bias)
self.has_bn = has_bn
if self.has_bn:
self.bn = norm_layer(out_planes, eps=bn_eps)
self.has_relu = has_relu
if self.has_relu:
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
x = self.conv(x)
if self.has_bn:
x = self.bn(x)
if self.has_relu:
x = self.relu(x)
return x
class SeparableConvBnRelu(nn.Module):
def __init__(self, in_channels, out_channels,
kernel_size=1, stride=1, padding=0, dilation=1,
has_relu=True, norm_layer=nn.BatchNorm2d):
super(SeparableConvBnRelu, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride,
padding, dilation, groups=in_channels,
bias=False)
self.bn = norm_layer(in_channels)
self.point_wise_cbr = ConvBnRelu(in_channels, out_channels, 1, 1, 0,
has_bn=True, norm_layer=norm_layer,
has_relu=has_relu, has_bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.bn(x)
x = self.point_wise_cbr(x)
return x
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
in_size = inputs.size()
inputs = inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
inputs = inputs.view(in_size[0], in_size[1], 1, 1)
return inputs
class SELayer(nn.Module):
def __init__(self, in_planes, out_planes, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_planes, out_planes // reduction),
nn.ReLU(inplace=True),
nn.Linear(out_planes // reduction, out_planes),
nn.Sigmoid()
)
self.out_planes = out_planes
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, self.out_planes, 1, 1)
return y
# For DFN
class ChannelAttention(nn.Module):
def __init__(self, in_planes, out_planes, reduction):
super(ChannelAttention, self).__init__()
self.channel_attention = SELayer(in_planes, out_planes, reduction)
def forward(self, x1, x2):
fm = torch.cat([x1, x2], 1)
channel_attetion = self.channel_attention(fm)
fm = x1 * channel_attetion + x2
return fm
class BNRefine(nn.Module):
def __init__(self, in_planes, out_planes, ksize, has_bias=False,
has_relu=False, norm_layer=nn.BatchNorm2d, bn_eps=1e-5):
super(BNRefine, self).__init__()
self.conv_bn_relu = ConvBnRelu(in_planes, out_planes, ksize, 1,
ksize // 2, has_bias=has_bias,
norm_layer=norm_layer, bn_eps=bn_eps)
self.conv_refine = nn.Conv2d(out_planes, out_planes, kernel_size=ksize,
stride=1, padding=ksize // 2, dilation=1,
bias=has_bias)
self.has_relu = has_relu
if self.has_relu:
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
t = self.conv_bn_relu(x)
t = self.conv_refine(t)
if self.has_relu:
return self.relu(t + x)
return t + x
class RefineResidual(nn.Module):
def __init__(self, in_planes, out_planes, ksize, has_bias=False,
has_relu=False, norm_layer=nn.BatchNorm2d, bn_eps=1e-5):
super(RefineResidual, self).__init__()
self.conv_1x1 = nn.Conv2d(in_planes, out_planes, kernel_size=1,
stride=1, padding=0, dilation=1,
bias=has_bias)
self.cbr = ConvBnRelu(out_planes, out_planes, ksize, 1,
ksize // 2, has_bias=has_bias,
norm_layer=norm_layer, bn_eps=bn_eps)
self.conv_refine = nn.Conv2d(out_planes, out_planes, kernel_size=ksize,
stride=1, padding=ksize // 2, dilation=1,
bias=has_bias)
self.has_relu = has_relu
if self.has_relu:
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x = self.conv_1x1(x)
t = self.cbr(x)
t = self.conv_refine(t)
if self.has_relu:
return self.relu(t + x)
return t + x
# For BiSeNet
class AttentionRefinement(nn.Module):
def __init__(self, in_planes, out_planes,
norm_layer=nn.BatchNorm2d):
super(AttentionRefinement, self).__init__()
self.conv_3x3 = ConvBnRelu(in_planes, out_planes, 3, 1, 1,
has_bn=True, norm_layer=norm_layer,
has_relu=True, has_bias=False)
self.channel_attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
ConvBnRelu(out_planes, out_planes, 1, 1, 0,
has_bn=True, norm_layer=norm_layer,
has_relu=False, has_bias=False),
nn.Sigmoid()
)
def forward(self, x):
fm = self.conv_3x3(x)
fm_se = self.channel_attention(fm)
fm = fm * fm_se
return fm
class FeatureFusion(nn.Module):
def __init__(self, in_planes, out_planes, reduction=1, Fch=16, scale=4, branch=2, norm_layer=nn.BatchNorm2d):
super(FeatureFusion, self).__init__()
self.conv_1x1 = ConvBnRelu(in_planes, out_planes, 1, 1, 0,
has_bn=True, norm_layer=norm_layer,
has_relu=True, has_bias=False)
# self.channel_attention = nn.Sequential(
# nn.AdaptiveAvgPool2d(1),
# ConvBnRelu(out_planes, out_planes // reduction, 1, 1, 0,
# has_bn=False, norm_layer=norm_layer,
# has_relu=True, has_bias=False),
# ConvBnRelu(out_planes // reduction, out_planes, 1, 1, 0,
# has_bn=False, norm_layer=norm_layer,
# has_relu=False, has_bias=False),
# nn.Sigmoid()
# )
self._Fch = Fch
self._scale = scale
self._branch = branch
@staticmethod
def _latency(h, w, C_in, C_out):
layer = FeatureFusion(C_in, C_out)
latency = compute_latency(layer, (1, C_in, h, w))
return latency
def forward_latency(self, size):
name = "ff_H%d_W%d_C%d"%(size[1], size[2], size[0])
if name in latency_lookup_table:
latency = latency_lookup_table[name]
return latency, size
else:
print("not found in latency_lookup_table:", name)
latency = FeatureFusion._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._scale*self._Fch*self._branch)
latency_lookup_table[name] = latency
np.save("latency_lookup_table.npy", latency_lookup_table)
return latency, size
def forward(self, fm):
# fm is already a concatenation of multiple scales
fm = self.conv_1x1(fm)
return fm
# fm_se = self.channel_attention(fm)
# output = fm + fm * fm_se
# return output
class Head(nn.Module):
def __init__(self, in_planes, out_planes=19, Fch=16, scale=4, branch=2, is_aux=False, norm_layer=nn.BatchNorm2d, fmap_size=(128, 256)):
super(Head, self).__init__()
if in_planes <= 64:
mid_planes = in_planes
elif in_planes <= 256:
if is_aux:
mid_planes = in_planes
else:
mid_planes = in_planes
else:
# in_planes > 256:
if is_aux:
mid_planes = in_planes // 2
else:
mid_planes = in_planes // 2
self.att_sa = Self_Attn(dim=in_planes, fmap_size=fmap_size, dim_out=mid_planes, proj_factor=4, downsample=False)
# self.conv_3x3 = ConvBnRelu(in_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.conv_1x1 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, stride=1, padding=0)
self._in_planes = in_planes
self._out_planes = out_planes
self._Fch = Fch
self._scale = scale
self._branch = branch
@staticmethod
def _latency(h, w, C_in, C_out=19):
layer = Head(C_in, C_out)
latency = compute_latency(layer, (1, C_in, h, w))
return latency
def forward_latency(self, size):
assert size[0] == self._in_planes, "size[0] %d, self._in_planes %d"%(size[0], self._in_planes)
name = "head_H%d_W%d_Cin%d_Cout%d"%(size[1], size[2], size[0], self._out_planes)
if name in latency_lookup_table:
latency = latency_lookup_table[name]
return latency, (self._out_planes, size[1], size[2])
else:
print("not found in latency_lookup_table:", name)
latency = Head._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._out_planes)
latency_lookup_table[name] = latency
np.save("latency_lookup_table.npy", latency_lookup_table)
return latency, (self._out_planes, size[1], size[2])
def forward(self, x):
# fm = self.conv_3x3(x)
fm = self.att_sa(x)
output = self.conv_1x1(fm)
return output
class Decoder(nn.Module):
def __init__(self, in_planes, low_level_inplanes, out_planes=19, Fch=16, scale=4, branch=2, is_aux=False, norm_layer=nn.BatchNorm2d, fmap_size=(128, 256)):
super(Decoder, self).__init__()
C_low = 48
self.feature_projection = ConvNorm(low_level_inplanes, C_low, kernel_size=1, stride=1, padding=0, bias=False, groups=1, slimmable=False)
# in_planes = in_planes + C_low
if in_planes <= 64:
mid_planes = in_planes
elif in_planes <= 256:
if is_aux:
mid_planes = in_planes
else:
mid_planes = in_planes
else:
# in_planes > 256:
if is_aux:
mid_planes = in_planes // 2
else:
mid_planes = in_planes // 2
self.att_sa = Self_Attn(dim=in_planes, fmap_size=fmap_size, dim_out=mid_planes, proj_factor=4, downsample=False)
self.conv_3x3 = ConvBnRelu(mid_planes + C_low, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.conv_1x1 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, stride=1, padding=0)
self._in_planes = in_planes
self._out_planes = out_planes
self._Fch = Fch
self._scale = scale
self._branch = branch
@staticmethod
def _latency(h, w, C_in, C_out=19):
layer = Head(C_in, C_out)
latency = compute_latency(layer, (1, C_in, h, w))
return latency
def forward_latency(self, size):
assert size[0] == self._in_planes, "size[0] %d, self._in_planes %d"%(size[0], self._in_planes)
name = "head_H%d_W%d_Cin%d_Cout%d"%(size[1], size[2], size[0], self._out_planes)
if name in latency_lookup_table:
latency = latency_lookup_table[name]
return latency, (self._out_planes, size[1], size[2])
else:
print("not found in latency_lookup_table:", name)
latency = Head._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._out_planes)
latency_lookup_table[name] = latency
np.save("latency_lookup_table.npy", latency_lookup_table)
return latency, (self._out_planes, size[1], size[2])
def forward(self, x, low_level_feat):
low_level_feat = self.feature_projection(low_level_feat)
x = self.att_sa(x)
x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=False)
x = torch.cat((x, low_level_feat), dim=1)
# x = self.att_sa(x)
x = self.conv_3x3(x)
output = self.conv_1x1(x)
return output
class BasicResidual_downup_2x(nn.Module):
def __init__(self, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1):
super(BasicResidual_downup_2x, self).__init__()
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
groups = 1
self.C_in = C_in
self.C_out = C_out
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.groups = groups
assert stride in [1, 2]
if self.stride == 2: self.dilation = 1
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(C_in, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False)
# self.bn1 = nn.BatchNorm2d(C_out)
self.bn1 = BatchNorm2d(C_out)
self.conv2 = nn.Conv2d(C_out, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False)
# self.bn2 = nn.BatchNorm2d(C_out)
self.bn2 = BatchNorm2d(C_out)
if self.stride==1:
self.downsample = nn.Sequential(
nn.Conv2d(C_in, C_out, 1, 1, padding=0, dilation=dilation, groups=groups, bias=False),
BatchNorm2d(C_out)
)
def forward(self, x):
out = F.interpolate(x, size=(int(x.size(2))//2, int(x.size(3))//2), mode='bilinear', align_corners=False)
out = self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.stride == 1:
out = F.interpolate(out, size=(int(x.size(2)), int(x.size(3))), mode='bilinear', align_corners=False)
out = out + self.downsample(x)
out = self.relu(out)
return out
class PanopticHead(nn.Module):
def __init__(self, in_planes, out_planes=19, Fch=16, scale=4, branch=2, is_aux=False, norm_layer=nn.BatchNorm2d, fmap_size=(128, 256)):
super(PanopticHead, self).__init__()
if in_planes <= 64:
mid_planes = in_planes
elif in_planes <= 256:
if is_aux:
mid_planes = in_planes
else:
mid_planes = in_planes
else:
# in_planes > 256:
if is_aux:
mid_planes = in_planes // 2
else:
mid_planes = in_planes // 2
decoder2_planes = mid_planes // 2
self.att_sa = Self_Attn(dim=in_planes, fmap_size=(128, 256), dim_out=in_planes, proj_factor=4, downsample=False)
self.decoder1 = BasicResidual_downup_2x(in_planes, mid_planes, 3, 1, 1)
self.conv_3x3 = ConvBnRelu(mid_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.conv_1x1 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, stride=1, padding=0)
self._in_planes = in_planes
self._out_planes = out_planes
self._Fch = Fch
self._scale = scale
self._branch = branch
# self.att_sa2 = Self_Attn(dim=in_planes, fmap_size=(128, 256), dim_out=mid_planes, proj_factor=4, downsample=False)
self.decoder2 = BasicResidual_downup_2x(in_planes, decoder2_planes, 3, 1, 1)
self.center_conv_3x3 = ConvBnRelu(decoder2_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.center_conv_1x1 = nn.Conv2d(mid_planes, 1, kernel_size=1, stride=1, padding=0)
self.offset_conv_3x3 = ConvBnRelu(decoder2_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.offset_conv_1x1 = nn.Conv2d(mid_planes, 2, kernel_size=1, stride=1, padding=0)
@staticmethod
def _latency(h, w, C_in, C_out=19):
layer = PanopticHead(C_in, C_out)
latency = compute_latency(layer, (1, C_in, h, w))
return latency
def forward_latency(self, size):
assert size[0] == self._in_planes, "size[0] %d, self._in_planes %d"%(size[0], self._in_planes)
name = "panoptichead%d_W%d_Cin%d_Cout%d"%(size[1], size[2], size[0], self._out_planes)
if name in latency_lookup_table:
latency = latency_lookup_table[name]
return latency, (self._out_planes, size[1], size[2])
else:
print("not found in latency_lookup_table:", name)
latency = Head._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._out_planes)
latency_lookup_table[name] = latency
np.save("latency_lookup_table.npy", latency_lookup_table)
return latency, (self._out_planes, size[1], size[2])
def forward(self, x):
output_dict = OrderedDict()
xs = self.att_sa(x)
# semantic = self.att_sa1(x)
semantic = self.decoder1(xs)
semantic = self.conv_3x3(semantic)
semantic = self.conv_1x1(semantic)
# other = self.att_sa2(x)
other = self.decoder2(x)
center = self.center_conv_3x3(other)
center = self.center_conv_1x1(center)
offset = self.offset_conv_3x3(other)
offset = self.offset_conv_1x1(offset)
output_dict['semantic'] = semantic
output_dict['center'] = center
output_dict['offset'] = offset
return output_dict
class PanopticHeadDecoder(nn.Module):
def __init__(self, in_planes, low_level_inplanes, out_planes=19, Fch=16, scale=4, branch=2, is_aux=False, norm_layer=nn.BatchNorm2d, fmap_size=(128, 256)):
super(PanopticHeadDecoder, self).__init__()
C_low = 48
self.feature_projection = ConvNorm(low_level_inplanes, C_low, kernel_size=1, stride=1, padding=0, bias=False, groups=1, slimmable=False)
self.feature_projection_sem = ConvNorm(low_level_inplanes, C_low, kernel_size=1, stride=1, padding=0, bias=False, groups=1, slimmable=False)
# in_planes = in_planes + C_low
if in_planes <= 64:
mid_planes = in_planes
elif in_planes <= 256:
if is_aux:
mid_planes = in_planes
else:
mid_planes = in_planes
else:
# in_planes > 256:
if is_aux:
mid_planes = in_planes // 2
else:
mid_planes = in_planes // 2
decoder2_planes = mid_planes // 2
self.att_sa = Self_Attn(dim=in_planes, fmap_size=fmap_size, dim_out=in_planes, proj_factor=4, downsample=False)
# self.att_sa1 = Self_Attn(dim=in_planes, fmap_size=(128, 256), dim_out=mid_planes, proj_factor=4, downsample=False)
self.decoder1 = BasicResidual_downup_2x(in_planes+C_low, mid_planes, 3, 1, 1)
self.conv_3x3 = ConvBnRelu(mid_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.conv_1x1 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, stride=1, padding=0)
self._in_planes = in_planes
self._out_planes = out_planes
self._Fch = Fch
self._scale = scale
self._branch = branch
# self.att_sa2 = Self_Attn(dim=in_planes, fmap_size=(128, 256), dim_out=mid_planes, proj_factor=4, downsample=False)
self.decoder2 = BasicResidual_downup_2x(in_planes+C_low, decoder2_planes, 3, 1, 1)
self.center_conv_3x3 = ConvBnRelu(decoder2_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.center_conv_1x1 = nn.Conv2d(mid_planes, 1, kernel_size=1, stride=1, padding=0)
self.offset_conv_3x3 = ConvBnRelu(decoder2_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.offset_conv_1x1 = nn.Conv2d(mid_planes, 2, kernel_size=1, stride=1, padding=0)
@staticmethod
def _latency(h, w, C_in, C_out=19):
layer = PanopticHead(C_in, C_out)
latency = compute_latency(layer, (1, C_in, h, w))
return latency
def forward_latency(self, size):
assert size[0] == self._in_planes, "size[0] %d, self._in_planes %d"%(size[0], self._in_planes)
name = "panopticheaddecoder%d_W%d_Cin%d_Cout%d"%(size[1], size[2], size[0], self._out_planes)
if name in latency_lookup_table:
latency = latency_lookup_table[name]
return latency, (self._out_planes, size[1], size[2])
else:
print("not found in latency_lookup_table:", name)
latency = Head._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._out_planes)
latency_lookup_table[name] = latency
np.save("latency_lookup_table.npy", latency_lookup_table)
return latency, (self._out_planes, size[1], size[2])
def forward(self, x, low_level_feat):
output_dict = OrderedDict()
xs = self.att_sa(x)
low_level_feat_sem = self.feature_projection_sem(low_level_feat)
xs = F.interpolate(xs, size=low_level_feat_sem.size()[2:], mode='bilinear', align_corners=False)
xs = torch.cat((xs, low_level_feat_sem), dim=1)
semantic = self.decoder1(xs)
semantic = self.conv_3x3(semantic)
semantic = self.conv_1x1(semantic)
low_level_feat = self.feature_projection(low_level_feat)
x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=False)
x = torch.cat((x, low_level_feat), dim=1)
other = self.decoder2(x)
center = self.center_conv_3x3(other)
center = self.center_conv_1x1(center)
offset = self.offset_conv_3x3(other)
offset = self.offset_conv_1x1(offset)
output_dict['semantic'] = semantic
output_dict['center'] = center
output_dict['offset'] = offset
return output_dict
|
Cream/CDARTS/CDARTS_segmentation/train/seg_oprs.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/seg_oprs.py",
"repo_id": "Cream",
"token_count": 11850
}
| 291 |
import math
import torch
import random
import numpy as np
import torch.distributed as dist
from torch.utils.data import Sampler
from PIL import Image, ImageEnhance, ImageOps
class SubsetDistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
shuffle (optional): If true (default), sampler will shuffle the indices
"""
def __init__(self, dataset, indices, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.indices = indices
self.num_samples = int(math.ceil(len(self.indices) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
# indices = torch.randperm(len(self.dataset), generator=g).tolist()
indices = list(self.indices[i] for i in torch.randperm(len(self.indices)))
else:
# indices = list(range(len(self.dataset)))
indices = self.indices
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
class data_prefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
self.next_input = self.next_input.float()
self.next_input = self.next_input.sub_(self.mean).div_(self.std)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
return input, target
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class ImageNetPolicy(object):
""" Randomly choose one of the best 24 Sub-policies on ImageNet.
Example:
>>> policy = ImageNetPolicy()
>>> transformed = policy(image)
Example as a PyTorch Transform:
>>> transform=transforms.Compose([
>>> transforms.Resize(256),
>>> ImageNetPolicy(),
>>> transforms.ToTensor()])
"""
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [
SubPolicy(0.4, "posterize", 8, 0.6, "rotate", 9, fillcolor),
SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor),
SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor),
SubPolicy(0.6, "posterize", 7, 0.6, "posterize", 6, fillcolor),
SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor),
SubPolicy(0.4, "equalize", 4, 0.8, "rotate", 8, fillcolor),
SubPolicy(0.6, "solarize", 3, 0.6, "equalize", 7, fillcolor),
SubPolicy(0.8, "posterize", 5, 1.0, "equalize", 2, fillcolor),
SubPolicy(0.2, "rotate", 3, 0.6, "solarize", 8, fillcolor),
SubPolicy(0.6, "equalize", 8, 0.4, "posterize", 6, fillcolor),
SubPolicy(0.8, "rotate", 8, 0.4, "color", 0, fillcolor),
SubPolicy(0.4, "rotate", 9, 0.6, "equalize", 2, fillcolor),
SubPolicy(0.0, "equalize", 7, 0.8, "equalize", 8, fillcolor),
SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor),
SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor),
SubPolicy(0.8, "rotate", 8, 1.0, "color", 2, fillcolor),
SubPolicy(0.8, "color", 8, 0.8, "solarize", 7, fillcolor),
SubPolicy(0.4, "sharpness", 7, 0.6, "invert", 8, fillcolor),
SubPolicy(0.6, "shearX", 5, 1.0, "equalize", 9, fillcolor),
SubPolicy(0.4, "color", 0, 0.6, "equalize", 3, fillcolor),
SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor),
SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor),
SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor),
SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor),
SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor)
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment ImageNet Policy"
class CIFAR10Policy(object):
""" Randomly choose one of the best 25 Sub-policies on CIFAR10.
Example:
>>> policy = CIFAR10Policy()
>>> transformed = policy(image)
Example as a PyTorch Transform:
>>> transform=transforms.Compose([
>>> transforms.Resize(256),
>>> CIFAR10Policy(),
>>> transforms.ToTensor()])
"""
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [
SubPolicy(0.1, "invert", 7, 0.2, "contrast", 6, fillcolor),
SubPolicy(0.7, "rotate", 2, 0.3, "translateX", 9, fillcolor),
SubPolicy(0.8, "sharpness", 1, 0.9, "sharpness", 3, fillcolor),
SubPolicy(0.5, "shearY", 8, 0.7, "translateY", 9, fillcolor),
SubPolicy(0.5, "autocontrast", 8, 0.9, "equalize", 2, fillcolor),
SubPolicy(0.2, "shearY", 7, 0.3, "posterize", 7, fillcolor),
SubPolicy(0.4, "color", 3, 0.6, "brightness", 7, fillcolor),
SubPolicy(0.3, "sharpness", 9, 0.7, "brightness", 9, fillcolor),
SubPolicy(0.6, "equalize", 5, 0.5, "equalize", 1, fillcolor),
SubPolicy(0.6, "contrast", 7, 0.6, "sharpness", 5, fillcolor),
SubPolicy(0.7, "color", 7, 0.5, "translateX", 8, fillcolor),
SubPolicy(0.3, "equalize", 7, 0.4, "autocontrast", 8, fillcolor),
SubPolicy(0.4, "translateY", 3, 0.2, "sharpness", 6, fillcolor),
SubPolicy(0.9, "brightness", 6, 0.2, "color", 8, fillcolor),
SubPolicy(0.5, "solarize", 2, 0.0, "invert", 3, fillcolor),
SubPolicy(0.2, "equalize", 0, 0.6, "autocontrast", 0, fillcolor),
SubPolicy(0.2, "equalize", 8, 0.6, "equalize", 4, fillcolor),
SubPolicy(0.9, "color", 9, 0.6, "equalize", 6, fillcolor),
SubPolicy(0.8, "autocontrast", 4, 0.2, "solarize", 8, fillcolor),
SubPolicy(0.1, "brightness", 3, 0.7, "color", 0, fillcolor),
SubPolicy(0.4, "solarize", 5, 0.9, "autocontrast", 3, fillcolor),
SubPolicy(0.9, "translateY", 9, 0.7, "translateY", 9, fillcolor),
SubPolicy(0.9, "autocontrast", 2, 0.8, "solarize", 3, fillcolor),
SubPolicy(0.8, "equalize", 8, 0.1, "invert", 3, fillcolor),
SubPolicy(0.7, "translateY", 9, 0.9, "autocontrast", 1, fillcolor)
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment CIFAR10 Policy"
class SVHNPolicy(object):
""" Randomly choose one of the best 25 Sub-policies on SVHN.
Example:
>>> policy = SVHNPolicy()
>>> transformed = policy(image)
Example as a PyTorch Transform:
>>> transform=transforms.Compose([
>>> transforms.Resize(256),
>>> SVHNPolicy(),
>>> transforms.ToTensor()])
"""
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [
SubPolicy(0.9, "shearX", 4, 0.2, "invert", 3, fillcolor),
SubPolicy(0.9, "shearY", 8, 0.7, "invert", 5, fillcolor),
SubPolicy(0.6, "equalize", 5, 0.6, "solarize", 6, fillcolor),
SubPolicy(0.9, "invert", 3, 0.6, "equalize", 3, fillcolor),
SubPolicy(0.6, "equalize", 1, 0.9, "rotate", 3, fillcolor),
SubPolicy(0.9, "shearX", 4, 0.8, "autocontrast", 3, fillcolor),
SubPolicy(0.9, "shearY", 8, 0.4, "invert", 5, fillcolor),
SubPolicy(0.9, "shearY", 5, 0.2, "solarize", 6, fillcolor),
SubPolicy(0.9, "invert", 6, 0.8, "autocontrast", 1, fillcolor),
SubPolicy(0.6, "equalize", 3, 0.9, "rotate", 3, fillcolor),
SubPolicy(0.9, "shearX", 4, 0.3, "solarize", 3, fillcolor),
SubPolicy(0.8, "shearY", 8, 0.7, "invert", 4, fillcolor),
SubPolicy(0.9, "equalize", 5, 0.6, "translateY", 6, fillcolor),
SubPolicy(0.9, "invert", 4, 0.6, "equalize", 7, fillcolor),
SubPolicy(0.3, "contrast", 3, 0.8, "rotate", 4, fillcolor),
SubPolicy(0.8, "invert", 5, 0.0, "translateY", 2, fillcolor),
SubPolicy(0.7, "shearY", 6, 0.4, "solarize", 8, fillcolor),
SubPolicy(0.6, "invert", 4, 0.8, "rotate", 4, fillcolor),
SubPolicy(0.3, "shearY", 7, 0.9, "translateX", 3, fillcolor),
SubPolicy(0.1, "shearX", 6, 0.6, "invert", 5, fillcolor),
SubPolicy(0.7, "solarize", 2, 0.6, "translateY", 7, fillcolor),
SubPolicy(0.8, "shearY", 4, 0.8, "invert", 8, fillcolor),
SubPolicy(0.7, "shearX", 9, 0.8, "translateY", 3, fillcolor),
SubPolicy(0.8, "shearY", 5, 0.7, "autocontrast", 3, fillcolor),
SubPolicy(0.7, "shearX", 2, 0.1, "invert", 5, fillcolor)
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment SVHN Policy"
class SubPolicy(object):
def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)):
ranges = {
"shearX": np.linspace(0, 0.3, 10),
"shearY": np.linspace(0, 0.3, 10),
"translateX": np.linspace(0, 150 / 331, 10),
"translateY": np.linspace(0, 150 / 331, 10),
"rotate": np.linspace(0, 30, 10),
"color": np.linspace(0.0, 0.9, 10),
"posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
"solarize": np.linspace(256, 0, 10),
"contrast": np.linspace(0.0, 0.9, 10),
"sharpness": np.linspace(0.0, 0.9, 10),
"brightness": np.linspace(0.0, 0.9, 10),
"autocontrast": [0] * 10,
"equalize": [0] * 10,
"invert": [0] * 10
}
# from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
def rotate_with_fill(img, magnitude):
rot = img.convert("RGBA").rotate(magnitude)
return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode)
func = {
"shearX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"shearY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"translateX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),
fillcolor=fillcolor),
"translateY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),
fillcolor=fillcolor),
"rotate": lambda img, magnitude: rotate_with_fill(img, magnitude),
"color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])),
"posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude),
"solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude),
"contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"autocontrast": lambda img, magnitude: ImageOps.autocontrast(img),
"equalize": lambda img, magnitude: ImageOps.equalize(img),
"invert": lambda img, magnitude: ImageOps.invert(img)
}
self.p1 = p1
self.operation1 = func[operation1]
self.magnitude1 = ranges[operation1][magnitude_idx1]
self.p2 = p2
self.operation2 = func[operation2]
self.magnitude2 = ranges[operation2][magnitude_idx2]
def __call__(self, img):
if random.random() < self.p1: img = self.operation1(img, self.magnitude1)
if random.random() < self.p2: img = self.operation2(img, self.magnitude2)
return img
def fast_collate(batch):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8 )
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
if(nump_array.ndim < 3):
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
def mixup_data(x, y, alpha=1.0, use_cuda=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
|
Cream/CDARTS/benchmark201/datasets/data_utils.py/0
|
{
"file_path": "Cream/CDARTS/benchmark201/datasets/data_utils.py",
"repo_id": "Cream",
"token_count": 8183
}
| 292 |
import os
import argparse
parser = argparse.ArgumentParser(description='supernet training')
parser.add_argument('path', type=str, default='train',
help='mode')
args = parser.parse_args()
def main():
file_path = args.path
info = {}
cnt = 0
dataset_idx = 0
dataset = ['cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120']
acc = [['train', 'valid'], ['train', 'test'], ['train', 'valid', 'test'], ['train', 'valid', 'test']]
with open(file_path, 'r') as f:
for line in f:
line = line.split(' ')
if 'datasets' in line:
cnt = cnt + 1
info[cnt] = {}
dataset_idx = 0
if line[0] in dataset:
top1 = []
info[cnt][line[0]] = {}
for item in line:
if '%' in item:
item = item.split("%")[0]
top1.append(float(item))
if len(top1) > 0:
for value, name in zip(top1, acc[dataset_idx]):
info[cnt][line[0]][name] = value
dataset_idx = dataset_idx + 1
for key in info.keys():
print(key, info[key])
if __name__ == '__main__':
main()
|
Cream/CDARTS/benchmark201/utils/get_info.py/0
|
{
"file_path": "Cream/CDARTS/benchmark201/utils/get_info.py",
"repo_id": "Cream",
"token_count": 690
}
| 293 |
import torch
import torch.nn as nn
import torch.nn.functional as F
cos = nn.CosineSimilarity(dim=1, eps=1e-6)
mse = nn.MSELoss()
smooth_l1 = nn.SmoothL1Loss()
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
def Loss_interactive(outputs, teacher_outputs, T=2, interactive_type=0):
if interactive_type==0:
loss = nn.KLDivLoss()(F.log_softmax(outputs/T, dim=1), F.softmax(teacher_outputs/T, dim=1))
elif interactive_type==1:
# Cosine distance
loss = -torch.mean(cos(outputs, teacher_outputs))
elif interactive_type==2:
loss = mse(outputs, teacher_outputs)
elif interactive_type == 3:
loss = smooth_l1(outputs, teacher_outputs)
else:
raise Exception("Wrong interactive type!")
return loss * (T * T)
|
Cream/CDARTS/lib/models/loss.py/0
|
{
"file_path": "Cream/CDARTS/lib/models/loss.py",
"repo_id": "Cream",
"token_count": 573
}
| 294 |
import os
import time
import torch
import torchvision
from collections import OrderedDict
from lib.utils.util import AverageMeter, accuracy, reduce_tensor
# retrain function
def train_epoch(
epoch, model, loader, optimizer, loss_fn, cfg,
lr_scheduler=None, saver=None, output_dir='', use_amp=False,
model_ema=None, logger=None, writer=None, local_rank=0):
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
prec1_m = AverageMeter()
prec5_m = AverageMeter()
model.train()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
optimizer.zero_grad()
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
input = input.cuda()
target = target.cuda()
output = model(input)
loss = loss_fn(output, target)
prec1, prec5 = accuracy(output, target, topk=(1, 5))
if cfg.NUM_GPU > 1:
reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU)
prec1 = reduce_tensor(prec1, cfg.NUM_GPU)
prec5 = reduce_tensor(prec5, cfg.NUM_GPU)
else:
reduced_loss = loss.data
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
prec1_m.update(prec1.item(), output.size(0))
prec5_m.update(prec5.item(), output.size(0))
if model_ema is not None:
model_ema.update(model)
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % cfg.LOG_INTERVAL == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if local_rank == 0:
logger.info(
'Train: {} [{:>4d}/{}] '
'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) '
'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f}) '
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'LR: {lr:.3e}'
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
epoch,
batch_idx,
len(loader),
loss=losses_m,
top1=prec1_m,
top5=prec5_m,
batch_time=batch_time_m,
rate=input.size(0) *
cfg.NUM_GPU /
batch_time_m.val,
rate_avg=input.size(0) *
cfg.NUM_GPU /
batch_time_m.avg,
lr=lr,
data_time=data_time_m))
writer.add_scalar(
'Loss/train',
prec1_m.avg,
epoch *
len(loader) +
batch_idx)
writer.add_scalar(
'Accuracy/train',
prec1_m.avg,
epoch *
len(loader) +
batch_idx)
writer.add_scalar(
'Learning_Rate',
optimizer.param_groups[0]['lr'],
epoch * len(loader) + batch_idx)
if cfg.SAVE_IMAGES and output_dir:
torchvision.utils.save_image(
input, os.path.join(
output_dir, 'train-batch-%d.jpg' %
batch_idx), padding=0, normalize=True)
if saver is not None and cfg.RECOVERY_INTERVAL and (
last_batch or (batch_idx + 1) % cfg.RECOVERY_INTERVAL == 0):
saver.save_recovery(
model,
optimizer,
cfg,
epoch,
model_ema=model_ema,
use_amp=use_amp,
batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(
num_updates=num_updates,
metric=losses_m.avg)
end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
|
Cream/Cream/lib/core/retrain.py/0
|
{
"file_path": "Cream/Cream/lib/core/retrain.py",
"repo_id": "Cream",
"token_count": 2740
}
| 295 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Written by Hao Du and Houwen Peng
# email: [email protected] and [email protected]
import sys
import argparse
import torch.nn as nn
from torch import optim as optim
from thop import profile, clever_format
from timm.utils import *
from lib.config import cfg
def get_path_acc(model, path, val_loader, args, val_iters=50):
prec1_m = AverageMeter()
prec5_m = AverageMeter()
with torch.no_grad():
for batch_idx, (input, target) in enumerate(val_loader):
if batch_idx >= val_iters:
break
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
output = model(input, path)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(
0,
reduce_factor,
reduce_factor).mean(
dim=2)
target = target[0:target.size(0):reduce_factor]
prec1, prec5 = accuracy(output, target, topk=(1, 5))
torch.cuda.synchronize()
prec1_m.update(prec1.item(), output.size(0))
prec5_m.update(prec5.item(), output.size(0))
return (prec1_m.avg, prec5_m.avg)
def get_logger(file_path):
""" Make python logger """
log_format = '%(asctime)s | %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
logger = logging.getLogger('')
formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p')
file_handler = logging.FileHandler(file_path)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def add_weight_decay_supernet(model, args, weight_decay=1e-5, skip_list=()):
decay = []
no_decay = []
meta_layer_no_decay = []
meta_layer_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(
".bias") or name in skip_list:
if 'meta_layer' in name:
meta_layer_no_decay.append(param)
else:
no_decay.append(param)
else:
if 'meta_layer' in name:
meta_layer_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0., 'lr': args.lr},
{'params': decay, 'weight_decay': weight_decay, 'lr': args.lr},
{'params': meta_layer_no_decay, 'weight_decay': 0., 'lr': args.meta_lr},
{'params': meta_layer_decay, 'weight_decay': 0, 'lr': args.meta_lr},
]
def create_optimizer_supernet(args, model, has_apex, filter_bias_and_bn=True):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if 'adamw' in opt_lower or 'radam' in opt_lower:
weight_decay /= args.lr
if weight_decay and filter_bias_and_bn:
parameters = add_weight_decay_supernet(model, args, weight_decay)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(
), 'APEX and CUDA required for fused optimizers'
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
optimizer = optim.SGD(
parameters,
momentum=args.momentum,
weight_decay=weight_decay,
nesterov=True)
elif opt_lower == 'momentum':
optimizer = optim.SGD(
parameters,
momentum=args.momentum,
weight_decay=weight_decay,
nesterov=False)
elif opt_lower == 'adam':
optimizer = optim.Adam(
parameters, weight_decay=weight_decay, eps=args.opt_eps)
else:
assert False and "Invalid optimizer"
raise ValueError
return optimizer
def convert_lowercase(cfg):
keys = cfg.keys()
lowercase_keys = [key.lower() for key in keys]
values = [cfg.get(key) for key in keys]
for lowercase_key, value in zip(lowercase_keys, values):
cfg.setdefault(lowercase_key, value)
return cfg
def parse_config_args(exp_name):
parser = argparse.ArgumentParser(description=exp_name)
parser.add_argument('--cfg', type=str,
default='../experiments/workspace/retrain/retrain.yaml',
help='configuration of cream')
parser.add_argument('--local_rank', type=int, default=0,
help='local_rank')
args = parser.parse_args()
cfg.merge_from_file(args.cfg)
converted_cfg = convert_lowercase(cfg)
return args, converted_cfg
def get_model_flops_params(model, input_size=(1, 3, 224, 224)):
input = torch.randn(input_size)
macs, params = profile(deepcopy(model), inputs=(input,), verbose=False)
macs, params = clever_format([macs, params], "%.3f")
return macs, params
def cross_entropy_loss_with_soft_target(pred, soft_target):
logsoftmax = nn.LogSoftmax(dim=1)
return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1))
def create_supernet_scheduler(cfg, optimizer):
ITERS = cfg.EPOCHS * \
(1280000 / (cfg.NUM_GPU * cfg.DATASET.BATCH_SIZE))
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda step: (
cfg.LR - step / ITERS) if step <= ITERS else 0, last_epoch=-1)
return lr_scheduler, cfg.EPOCHS
|
Cream/Cream/lib/utils/util.py/0
|
{
"file_path": "Cream/Cream/lib/utils/util.py",
"repo_id": "Cream",
"token_count": 2691
}
| 296 |
'''
Build trainining/testing datasets
'''
import os
import json
from torchvision import datasets, transforms
from torchvision.datasets.folder import ImageFolder, default_loader
import torch
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import create_transform
try:
from timm.data import TimmDatasetTar
except ImportError:
# for higher version of timm
from timm.data import ImageDataset as TimmDatasetTar
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None,
category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
# assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name']
path_json = os.path.join(
root, f'{"train" if train else "val"}{year}.json')
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f"train{year}.json")
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if king[0] not in targeter.keys():
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
# __getitem__ and __len__ inherited from ImageFolder
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(
args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif args.data_set == 'IMNET':
prefix = 'train' if is_train else 'val'
data_dir = os.path.join(args.data_path, f'{prefix}.tar')
if os.path.exists(data_dir):
dataset = TimmDatasetTar(data_dir, transform=transform)
else:
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == 'IMNETEE':
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 10
elif args.data_set == 'FLOWERS':
root = os.path.join(args.data_path, 'train' if is_train else 'test')
dataset = datasets.ImageFolder(root, transform=transform)
if is_train:
dataset = torch.utils.data.ConcatDataset(
[dataset for _ in range(100)])
nb_classes = 102
elif args.data_set == 'INAT':
dataset = INatDataset(args.data_path, train=is_train, year=2018,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'INAT19':
dataset = INatDataset(args.data_path, train=is_train, year=2019,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if args.finetune:
t.append(
transforms.Resize((args.input_size, args.input_size),
interpolation=3)
)
else:
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
# to maintain same ratio w.r.t. 224 images
transforms.Resize(size, interpolation=3),
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
|
Cream/EfficientViT/classification/data/datasets.py/0
|
{
"file_path": "Cream/EfficientViT/classification/data/datasets.py",
"repo_id": "Cream",
"token_count": 2417
}
| 297 |
#!/usr/bin/env bash
CONFIG=$1
GPUS=$2
PORT=${PORT:-29500}
PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
$(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3}
|
Cream/EfficientViT/downstream/dist_train.sh/0
|
{
"file_path": "Cream/EfficientViT/downstream/dist_train.sh",
"repo_id": "Cream",
"token_count": 108
}
| 298 |
# Mini-DeiT
This repo is for MiniViT for DeiTs.
## Model Zoo
Model | Params. | Input | Top-1 Acc. % | Top-5 Acc. % | Download link
--- |:---:|:---:|:---:|:---:|:---:
Mini-DeiT-Ti | 3M | 224x224 | 73.0 | 91.6 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini_deit_tiny_patch16_224.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_deit_tiny.txt)
Mini-DeiT-S | 11M | 224x224 | 80.9 | 95.6 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini_deit_small_patch16_224.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_deit_small.txt)
Mini-DeiT-B | 44M | 224x224 | 83.2 | 96.5 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini_deit_base_patch16_224.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_deit_base.txt)
Mini-DeiT-B| 44M | 384x384 | 84.9 | 97.2 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini_deit_base_patch16_384.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_deit_base_384.txt)
# Usage
Create the environment:
```bash
pip install -r requirements.txt
```
Compile operations:
```bash
cd rpe_ops
python setup.py install --user
```
## Data Preparation
You can download the ImageNet-1K dataset from [`http://www.image-net.org/`](http://www.image-net.org/).
The train set and validation set should be saved as the `*.tar` archives:
```
ImageNet/
├── train.tar
└── val.tar
```
Our code also supports storing images as individual files as follow:
```
ImageNet/
├── train
│ ├── n01440764
│ │ ├── n01440764_10026.JPEG
│ │ ├── n01440764_10027.JPEG
...
├── val
│ ├── n01440764
│ │ ├── ILSVRC2012_val_00000293.JPEG
```
## Training
Training Mini-DeiT-Ti
```bash
python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --model mini_deit_tiny_patch16_224 --batch-size 128 --data-path ./ImageNet --output_dir ./outputs --teacher-model regnety_160 --distillation-type soft --distillation-alpha 1.0 --drop-path 0.0
```
<details>
<summary>Training Mini-DeiT-S</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --model mini_deit_small_patch16_224 --batch-size 128 --data-path ./ImageNet --output_dir ./outputs --teacher-model regnety_160 --distillation-type soft --distillation-alpha 1.0 --drop-path 0.0
</code></pre>
</details>
<details>
<summary>Training Mini-DeiT-B</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --model mini_deit_base_patch16_224 --batch-size 128 --data-path ./ImageNet --output_dir ./outputs --teacher-model regnety_160 --distillation-type soft --distillation-alpha 1.0 --drop-path 0.1
</code></pre>
</details>
<details>
<summary>Finetune Mini-DeiT-B with resolution 384</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --model mini_deit_base_patch16_384 --batch-size 32 --data-path ./ImageNet --output_dir ./outputs --finetune checkpoints/mini_deit_base_patch16_224.pth --input-size 384 --lr 5e-6 --min-lr 5e-6 --weight-decay 1e-8 --epochs 30
</code></pre>
</details>
## Evaluation
Run the following commands for evaluation:
Evaluate Mini-DeiT-Ti
```bash
python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --model mini_deit_tiny_patch16_224 --batch-size 128 --data-path ./ImageNet --output_dir ./outputs --resume ./checkpoints/mini_deit_tiny_patch16_224.pth --eval
```
<details>
<summary>Evaluate Mini-DeiT-S</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --model mini_deit_small_patch16_224 --batch-size 128 --data-path ./ImageNet --output_dir ./outputs --resume ./checkpoints/mini_deit_small_patch16_224.pth --eval
</code></pre>
</details>
<details>
<summary>Evaluate Mini-DeiT-B</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --model mini_deit_base_patch16_224 --batch-size 128 --data-path ./ImageNet --output_dir ./outputs --resume ./checkpoints/mini_deit_base_patch16_224.pth --eval
</code></pre>
</details>
<details>
<summary>Evaluate Mini-DeiT-B-384</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --model mini_deit_base_patch16_384 --batch-size 32 --data-path ./ImageNet --output_dir ./outputs --resume ./checkpoints/mini_deit_base_patch16_384.pth --input-size 384 --eval
</code></pre>
</details>
## Bibtex
If this repo is helpful for you, please consider to cite it. Thank you! :)
```bibtex
@InProceedings{MiniViT,
title = {MiniViT: Compressing Vision Transformers With Weight Multiplexing},
author = {Zhang, Jinnian and Peng, Houwen and Wu, Kan and Liu, Mengchen and Xiao, Bin and Fu, Jianlong and Yuan, Lu},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2022},
pages = {12145-12154}
}
```
# License
Our code is based on [DeiT](https://github.com/facebookresearch/deit). Thank you!
[Apache License](./LICENSE)
|
Cream/MiniViT/Mini-DeiT/README.md/0
|
{
"file_path": "Cream/MiniViT/Mini-DeiT/README.md",
"repo_id": "Cream",
"token_count": 2032
}
| 299 |
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import io
import os
import time
from collections import defaultdict, deque
import datetime
import torch
import torch.distributed as dist
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
|
Cream/MiniViT/Mini-DeiT/utils.py/0
|
{
"file_path": "Cream/MiniViT/Mini-DeiT/utils.py",
"repo_id": "Cream",
"token_count": 3386
}
| 300 |
import io
import os
import time
import torch.distributed as dist
import torch.utils.data as data
from PIL import Image
from .zipreader import is_zip_path, ZipReader
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, class_to_idx, extensions):
images = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def make_dataset_with_ann(ann_file, img_prefix, extensions):
images = []
with open(ann_file, "r") as f:
contents = f.readlines()
for line_str in contents:
path_contents = [c for c in line_str.split('\t')]
im_file_name = path_contents[0]
class_index = int(path_contents[1])
assert str.lower(os.path.splitext(im_file_name)[-1]) in extensions
item = (os.path.join(img_prefix, im_file_name), class_index)
images.append(item)
return images
class DatasetFolder(data.Dataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (list[string]): A list of allowed extensions.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
Attributes:
samples (list): List of (sample path, class_index) tuples
"""
def __init__(self, root, loader, extensions, ann_file='', img_prefix='', transform=None, target_transform=None,
cache_mode="no"):
# image folder mode
if ann_file == '':
_, class_to_idx = find_classes(root)
samples = make_dataset(root, class_to_idx, extensions)
# zip mode
else:
samples = make_dataset_with_ann(os.path.join(root, ann_file),
os.path.join(root, img_prefix),
extensions)
if len(samples) == 0:
raise (RuntimeError("Found 0 files in subfolders of: " + root + "\n" +
"Supported extensions are: " + ",".join(extensions)))
self.root = root
self.loader = loader
self.extensions = extensions
self.samples = samples
self.labels = [y_1k for _, y_1k in samples]
self.classes = list(set(self.labels))
self.transform = transform
self.target_transform = target_transform
self.cache_mode = cache_mode
if self.cache_mode != "no":
self.init_cache()
def init_cache(self):
assert self.cache_mode in ["part", "full"]
n_sample = len(self.samples)
global_rank = dist.get_rank()
world_size = dist.get_world_size()
samples_bytes = [None for _ in range(n_sample)]
start_time = time.time()
for index in range(n_sample):
if index % (n_sample // 10) == 0:
t = time.time() - start_time
print(f'global_rank {dist.get_rank()} cached {index}/{n_sample} takes {t:.2f}s per block')
start_time = time.time()
path, target = self.samples[index]
if self.cache_mode == "full":
samples_bytes[index] = (ZipReader.read(path), target)
elif self.cache_mode == "part" and index % world_size == global_rank:
samples_bytes[index] = (ZipReader.read(path), target)
else:
samples_bytes[index] = (path, target)
self.samples = samples_bytes
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
if isinstance(path, bytes):
img = Image.open(io.BytesIO(path))
elif is_zip_path(path):
data = ZipReader.read(path)
img = Image.open(io.BytesIO(data))
else:
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_img_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class CachedImageFolder(DatasetFolder):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, ann_file='', img_prefix='', transform=None, target_transform=None,
loader=default_img_loader, cache_mode="no"):
super(CachedImageFolder, self).__init__(root, loader, IMG_EXTENSIONS,
ann_file=ann_file, img_prefix=img_prefix,
transform=transform, target_transform=target_transform,
cache_mode=cache_mode)
self.imgs = self.samples
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
image = self.loader(path)
if self.transform is not None:
img = self.transform(image)
else:
img = image
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
|
Cream/MiniViT/Mini-Swin/data/cached_image_folder.py/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/data/cached_image_folder.py",
"repo_id": "Cream",
"token_count": 3975
}
| 301 |
import os
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.nn.functional as F
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
try:
# noinspection PyUnresolvedReferences
from apex import amp
except ImportError:
amp = None
import argparse
from config import get_config
def parse_option():
parser = argparse.ArgumentParser('Mini-Swin training and evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--batch-size', type=int, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
parser.add_argument('--load_tar', action='store_true', help='whether to load data from tar files')
# distributed training
parser.add_argument("--local_rank", type=int, help='local rank for DistributedDataParallel')
# Training
parser.add_argument('--total_train_epoch', default=-1, type=int, help='the total number of epochs for training')
parser.add_argument('--resume_weight_only', action='store_true', help='whether to only restore weight, used for initialization of multi-stage training')
parser.add_argument('--base_lr', default=-1.0, type=float, help='the base learning rate')
parser.add_argument('--weight_decay', default=-1.0, type=float, help='the weight decay value!')
parser.add_argument('--drop_path_rate', default=-1.0, type=float, help='the value for drop path rate!')
parser.add_argument('--train_224to384', action='store_true', help='whether finetuning from resolution 224 to 384')
# MiniViT - Weight Distillation
parser.add_argument('--do_distill', action='store_true', help='start distillation')
parser.add_argument('--teacher', default='', type=str, metavar='PATH', help='the path for teacher model')
parser.add_argument('--temperature', default=1.0, type=float,
help='the temperature for distillation loss')
parser.add_argument('--alpha', default=0.0, type=float, help='the weight to balance the soft label loss and ground-truth label loss')
parser.add_argument('--ar', default=1, type=int, help='The number of relative heads')
parser.add_argument('--student_layer_list', default='11', type=str, help='The index of layer in the student to be used for distillation loss')
parser.add_argument('--teacher_layer_list', default='23', type=str, help='The index of layer in the teacher to be used for distillation loss')
parser.add_argument('--attn_loss', action='store_true', help='whether to use the attention loss')
parser.add_argument('--hidden_loss', action='store_true', help='whether to use hidden loss along with the attention loss!')
parser.add_argument('--hidden_weight', default=1.0, type=float, help='the weight for hidden loss!')
parser.add_argument('--hidden_relation', action='store_true', help='whether to use the hidden relation loss!')
parser.add_argument('--qkv_weight', default=1.0, type=float, help='the weight for qkv loss!')
parser.add_argument('--is_student', action='store_true', help='if True, additional linear layers are created for hidden MSE loss')
parser.add_argument('--fit_size_c', default=-1, type=int, help='when this number is positive, then the output dimension of the linear layers created for hidden MSE loss will be set to this number')
## MiniViT - Weight Transformation
parser.add_argument('--is_sep_layernorm', action='store_true', help='whether to use separate layer normalization in each shared layer')
parser.add_argument('--is_transform_ffn', action='store_true', help='whether to use transformations for FFN')
parser.add_argument('--is_transform_heads', action='store_true', help='whether to use transformations for MSA')
args = parser.parse_args()
config = get_config(args)
return args, config
def load_checkpoint(config, model, optimizer, lr_scheduler, logger):
logger.info(f"==============> Resuming form {config.MODEL.RESUME}....................")
if config.MODEL.RESUME.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
config.MODEL.RESUME, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')
model_state = checkpoint['model']
if config.TRAIN.TRAIN_224TO384:
mnames = ['head.weight', 'head.bias'] # (cls, 1024), (cls, )
now_model_state = model.state_dict()
if mnames[-1] in model_state:
ckpt_head_bias = model_state[mnames[-1]]
if ckpt_head_bias.shape != model.head.bias.shape:
for mname in mnames:
p = model_state[mname].new_zeros(now_model_state[mname].shape)
if mname.endswith('.weight'):
trunc_normal_(p, std=.02)
elif mname.endswith('.bias'):
nn.init.constant_(p, 0)
else:
assert 0
model_state[mname] = p
# drop attn mask
for k in list(model_state.keys()):
if 'attn_mask' in k or 'relative_position_index' in k:
model_state.pop(k)
mode = 'interpolate'
for key in list(model_state.keys()):
value = model_state[key]
if 'relative_position_bias_table' in key:
l, nh = value.size()
l2, nh2 = now_model_state[key].size()
l2 = int(l2 ** 0.5)
sl = int(l ** 0.5)
if sl == 13:
pad = 5
elif sl == 27:
pad = 10
else:
assert sl in [23, 47], sl
continue
if mode == "interpolate":
# table: (L, num_heads)
value = F.interpolate(value.permute(1, 0).view(1, nh, sl, sl),size=(l2, l2), mode='bicubic') # (1, nh, l2, l2)
value = value.reshape(nh, l2 * l2).permute(1, 0)
model_state[key] = value
if config.TRAIN.TRAIN_224TO384:
model.load_state_dict(model_state, strict=False)
else:
model.load_state_dict(model_state, strict=True)
max_accuracy = 0.0
if config.EVAL_MODE or config.DISTILL.RESUME_WEIGHT_ONLY:
logger.info(f"==============> RESUME_WEIGHT_ONLY mode is on....................")
del checkpoint
torch.cuda.empty_cache()
return max_accuracy
if not config.EVAL_MODE and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
if optimizer is not None and not config.DISTILL.STOP_LOADING_OPTIMIZER:
try:
optimizer.load_state_dict(checkpoint['optimizer'], strict=False)
except:
logger.info('==============> Inconsistency occurred! Skipping loading optimizer...')
if lr_scheduler is not None:
try:
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
except:
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'][0])
config.defrost()
config.TRAIN.START_EPOCH = checkpoint['epoch'] + 1
config.freeze()
if 'amp' in checkpoint and config.AMP_OPT_LEVEL != "O0" and checkpoint['config'].AMP_OPT_LEVEL != "O0":
amp.load_state_dict(checkpoint['amp'])
logger.info(f"=> loaded successfully '{config.MODEL.RESUME}' (epoch {checkpoint['epoch']})")
if 'max_accuracy' in checkpoint:
max_accuracy = checkpoint['max_accuracy']
if not config.EVAL_MODE and 'epoch' in checkpoint:
config.defrost()
config.TRAIN.START_EPOCH = checkpoint['epoch'] + 1
config.freeze()
if not config.EVAL_MODE and 'epoch' in checkpoint:
if optimizer is not None and not config.DISTILL.STOP_LOADING_OPTIMIZER:
try:
logger.info('Try loading optimizer (2nd trial)')
optimizer.load_state_dict(checkpoint['optimizer'])
logger.info('=> optimizer loaded successfully')
except:
logger.info('==============> Inconsistency occurred! Skipping loading optimizer...')
del checkpoint
torch.cuda.empty_cache()
return max_accuracy
def save_checkpoint(config, epoch, model, max_accuracy, optimizer, lr_scheduler, logger):
save_state = {'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'max_accuracy': max_accuracy,
'epoch': epoch,
'config': config}
if config.AMP_OPT_LEVEL != "O0":
save_state['amp'] = amp.state_dict()
if lr_scheduler is not None:
save_state['lr_scheduler'] = lr_scheduler.state_dict(),
save_path = os.path.join(config.OUTPUT, f'ckpt_epoch_{epoch}.pth')
logger.info(f"{save_path} saving......")
torch.save(save_state, save_path)
logger.info(f"{save_path} saved !!!")
def get_grad_norm(parameters, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1. / norm_type)
return total_norm
def auto_resume_helper(output_dir):
checkpoints = os.listdir(output_dir)
checkpoints = [ckpt for ckpt in checkpoints if ckpt.endswith('pth')]
print(f"All checkpoints founded in {output_dir}: {checkpoints}")
if len(checkpoints) > 0:
latest_checkpoint = max([os.path.join(output_dir, d) for d in checkpoints], key=os.path.getmtime)
print(f"The latest checkpoint founded: {latest_checkpoint}")
resume_file = latest_checkpoint
else:
resume_file = None
return resume_file
def reduce_tensor(tensor, n=None):
if n is None:
n = dist.get_world_size()
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt = rt / n
return rt
|
Cream/MiniViT/Mini-Swin/utils.py/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/utils.py",
"repo_id": "Cream",
"token_count": 4868
}
| 302 |
import torch
import torch.nn as nn
from torch.nn import functional as F
try:
import torch.distributed.nn
from torch import distributed as dist
has_distributed = True
except ImportError:
has_distributed = False
try:
import horovod.torch as hvd
except ImportError:
hvd = None
def gather_features(
image_features,
text_features,
local_loss=False,
gather_with_grad=False,
rank=0,
world_size=1,
use_horovod=False
):
assert has_distributed, 'torch.distributed did not import correctly, please use a PyTorch version with support.'
if use_horovod:
assert hvd is not None, 'Please install horovod'
if gather_with_grad:
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
else:
with torch.no_grad():
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features = list(
all_image_features.chunk(world_size, dim=0))
gathered_text_features = list(
all_text_features.chunk(world_size, dim=0))
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
else:
# We gather tensors from all gpus
if gather_with_grad:
all_image_features = torch.cat(
torch.distributed.nn.all_gather(image_features), dim=0)
all_text_features = torch.cat(
torch.distributed.nn.all_gather(text_features), dim=0)
else:
gathered_image_features = [torch.zeros_like(
image_features) for _ in range(world_size)]
gathered_text_features = [torch.zeros_like(
text_features) for _ in range(world_size)]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
return all_image_features, all_text_features
def gather_feature(
image_features,
local_loss=False,
gather_with_grad=False,
rank=0,
world_size=1,
use_horovod=False
):
if use_horovod:
assert hvd is not None, 'Please install horovod'
if gather_with_grad:
all_image_features = hvd.allgather(image_features)
else:
with torch.no_grad():
all_image_features = hvd.allgather(image_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features = list(
all_image_features.chunk(world_size, dim=0))
gathered_image_features[rank] = image_features
all_image_features = torch.cat(gathered_image_features, dim=0)
else:
# We gather tensors from all gpus
if gather_with_grad:
all_image_features = torch.cat(
torch.distributed.nn.all_gather(image_features), dim=0)
else:
gathered_image_features = [torch.zeros_like(
image_features) for _ in range(world_size)]
dist.all_gather(gathered_image_features, image_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features[rank] = image_features
all_image_features = torch.cat(gathered_image_features, dim=0)
return all_image_features
class ClipLoss(nn.Module):
def __init__(
self,
local_loss=False,
gather_with_grad=False,
cache_labels=False,
rank=0,
world_size=1,
use_horovod=False,
):
super().__init__()
self.local_loss = local_loss
self.gather_with_grad = gather_with_grad
self.cache_labels = cache_labels
self.rank = rank
self.world_size = world_size
self.use_horovod = use_horovod
# cache state
self.prev_num_logits = 0
self.labels = {}
def forward(self, image_features, text_features, logit_scale):
device = image_features.device
if self.world_size > 1:
all_image_features, all_text_features = gather_features(
image_features, text_features,
self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod)
if self.local_loss:
logits_per_image = logit_scale * image_features @ all_text_features.T
logits_per_text = logit_scale * text_features @ all_image_features.T
else:
logits_per_image = logit_scale * all_image_features @ all_text_features.T
logits_per_text = logits_per_image.T
else:
logits_per_image = logit_scale * image_features @ text_features.T
logits_per_text = logit_scale * text_features @ image_features.T
# calculated ground-truth and cache if enabled
num_logits = logits_per_image.shape[0]
if self.prev_num_logits != num_logits or device not in self.labels:
labels = torch.arange(num_logits, device=device, dtype=torch.long)
if self.world_size > 1 and self.local_loss:
labels = labels + num_logits * self.rank
if self.cache_labels:
self.labels[device] = labels
self.prev_num_logits = num_logits
else:
labels = self.labels[device]
total_loss = (
F.cross_entropy(logits_per_image, labels) +
F.cross_entropy(logits_per_text, labels)
) / 2
return total_loss
|
Cream/TinyCLIP/src/open_clip/loss.py/0
|
{
"file_path": "Cream/TinyCLIP/src/open_clip/loss.py",
"repo_id": "Cream",
"token_count": 3078
}
| 303 |
from typing import Optional, Sequence, Tuple
import torch
import torch.nn as nn
import torchvision.transforms.functional as F
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
CenterCrop
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
class ResizeMaxSize(nn.Module):
def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fn='max', fill=0):
super().__init__()
if not isinstance(max_size, int):
raise TypeError(f"Size should be int. Got {type(max_size)}")
self.max_size = max_size
self.interpolation = interpolation
self.fn = min if fn == 'min' else min
self.fill = fill
def forward(self, img):
if isinstance(img, torch.Tensor):
height, width = img.shape[:2]
else:
width, height = img.size
scale = self.max_size / float(max(height, width))
if scale != 1.0:
new_size = tuple(round(dim * scale) for dim in (height, width))
img = F.resize(img, new_size, self.interpolation)
pad_h = self.max_size - new_size[0]
pad_w = self.max_size - new_size[1]
img = F.pad(img, padding=[
pad_w // 2, pad_h // 2, pad_w - pad_w // 2, pad_h - pad_h // 2], fill=self.fill)
return img
class ResizeMaxSize(nn.Module):
def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fn='max', fill=0):
super().__init__()
if not isinstance(max_size, int):
raise TypeError(f"Size should be int. Got {type(max_size)}")
self.max_size = max_size
self.interpolation = interpolation
self.fn = min if fn == 'min' else min
self.fill = fill
def forward(self, img):
if isinstance(img, torch.Tensor):
height, width = img.shape[:2]
else:
width, height = img.size
scale = self.max_size / float(max(height, width))
if scale != 1.0:
new_size = tuple(round(dim * scale) for dim in (height, width))
img = F.resize(img, new_size, self.interpolation)
pad_h = self.max_size - new_size[0]
pad_w = self.max_size - new_size[1]
img = F.pad(img, padding=[
pad_w // 2, pad_h // 2, pad_w - pad_w // 2, pad_h - pad_h // 2], fill=self.fill)
return img
def _convert_to_rgb(image):
return image.convert('RGB')
def image_transform(
image_size: int,
is_train: bool,
mean: Optional[Tuple[float, ...]] = None,
std: Optional[Tuple[float, ...]] = None,
resize_longest_max: bool = False,
fill_color: int = 0,
val_keep_ratio: bool = True,
):
mean = mean or OPENAI_DATASET_MEAN
if not isinstance(mean, (list, tuple)):
mean = (mean,) * 3
std = std or OPENAI_DATASET_STD
if not isinstance(std, (list, tuple)):
std = (std,) * 3
if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:
# for square size, pass size as int so that Resize() uses aspect preserving shortest edge
image_size = image_size[0]
normalize = Normalize(mean=mean, std=std)
if is_train:
return Compose([
RandomResizedCrop(image_size, scale=(0.9, 1.0),
interpolation=InterpolationMode.BICUBIC),
_convert_to_rgb,
ToTensor(),
normalize,
])
else:
if resize_longest_max:
transforms = [
ResizeMaxSize(image_size, fill=fill_color)
]
else:
if val_keep_ratio:
transforms = [
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
]
else:
transforms = [
Resize((image_size, image_size),
interpolation=InterpolationMode.BICUBIC),
]
transforms.extend([
_convert_to_rgb,
ToTensor(),
normalize,
])
return Compose(transforms)
|
Cream/TinyCLIP/src/open_clip/transform.py/0
|
{
"file_path": "Cream/TinyCLIP/src/open_clip/transform.py",
"repo_id": "Cream",
"token_count": 2065
}
| 304 |
import numpy as np
def assign_learning_rate(optimizer, new_lr):
if isinstance(optimizer, list):
for opt in optimizer:
assign_learning_rate(opt, new_lr)
else:
for param_group in optimizer.param_groups:
param_group["lr"] = new_lr
def _warmup_lr(base_lr, warmup_length, step):
return base_lr * (step + 1) / warmup_length
def cosine_lr(optimizer, base_lr, warmup_length, steps):
def _lr_adjuster(step):
if step < warmup_length:
lr = _warmup_lr(base_lr, warmup_length, step)
else:
e = step - warmup_length
es = steps - warmup_length
lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr
assign_learning_rate(optimizer, lr)
return lr
return _lr_adjuster
def cosine_lr_start(optimizer, base_lr, warmup_length, steps, start_steps):
def _lr_adjuster(step):
if step < start_steps:
# lr = 0.0001
lr = 0.00005
elif step < warmup_length + start_steps:
lr = _warmup_lr(base_lr, warmup_length, step - start_steps)
else:
e = step - warmup_length - start_steps
es = steps - warmup_length - start_steps
lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr
assign_learning_rate(optimizer, lr)
return lr
return _lr_adjuster
def cosine_lr_start_nowarmup(optimizer, base_lr, steps, start_steps):
def _lr_adjuster(step):
if step < start_steps:
lr = 0.0001
else:
e = step - start_steps
es = steps - start_steps
lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr
assign_learning_rate(optimizer, lr)
return lr
return _lr_adjuster
def step_lr(optimizer, start_steps):
def _lr_adjuster(step):
if step > start_steps:
lr = 0
assign_learning_rate(optimizer, lr)
return lr
else:
return None
return _lr_adjuster
def exponential_lr(optimizer, base_lr, warmup_length, steps, gamma, w):
def _lr_adjuster(step):
if step < warmup_length:
lr = _warmup_lr(base_lr, warmup_length, step)
else:
e = step - warmup_length
es = steps - warmup_length
# lr = base_lr * gamma ** (e / es * w)
# min_lr = base_lr * gamma ** (w)
# w = np.log(min_lr / base_lr) / np.log(gamma)
lr = base_lr * gamma ** (e / es * w)
assign_learning_rate(optimizer, lr)
return lr
return _lr_adjuster
|
Cream/TinyCLIP/src/training/scheduler.py/0
|
{
"file_path": "Cream/TinyCLIP/src/training/scheduler.py",
"repo_id": "Cream",
"token_count": 1286
}
| 305 |
import os
# from torchvision.datasets import CIFAR100, CIFAR10, MNIST, QMNIST, KMNIST, FashionMNIST, ImageNet, ImageFolder
from torchvision.datasets import CIFAR100, CIFAR10, MNIST, KMNIST, FashionMNIST, ImageFolder
try:
from torchvision.datasets import Places365
has_places365 = True
except ImportError:
has_places365 = False
try:
from torchvision.datasets import INaturalist
has_inaturalist = True
except ImportError:
has_inaturalist = False
from .dataset import IterableImageDataset, ImageDataset
_TORCH_BASIC_DS = dict(
cifar10=CIFAR10,
cifar100=CIFAR100,
mnist=MNIST,
#qmist=QMNIST,
kmnist=KMNIST,
fashion_mnist=FashionMNIST,
)
_TRAIN_SYNONYM = {'train', 'training'}
_EVAL_SYNONYM = {'val', 'valid', 'validation', 'eval', 'evaluation'}
def _search_split(root, split):
# look for sub-folder with name of split in root and use that if it exists
split_name = split.split('[')[0]
try_root = os.path.join(root, split_name)
if os.path.exists(try_root):
return try_root
def _try(syn):
for s in syn:
try_root = os.path.join(root, s)
if os.path.exists(try_root):
return try_root
return root
if split_name in _TRAIN_SYNONYM:
root = _try(_TRAIN_SYNONYM)
elif split_name in _EVAL_SYNONYM:
root = _try(_EVAL_SYNONYM)
return root
def create_dataset(
name,
root,
split='validation',
search_split=True,
class_map=None,
load_bytes=False,
is_training=False,
download=False,
batch_size=None,
repeats=0,
**kwargs
):
""" Dataset factory method
In parenthesis after each arg are the type of dataset supported for each arg, one of:
* folder - default, timm folder (or tar) based ImageDataset
* torch - torchvision based datasets
* TFDS - Tensorflow-datasets wrapper in IterabeDataset interface via IterableImageDataset
* all - any of the above
Args:
name: dataset name, empty is okay for folder based datasets
root: root folder of dataset (all)
split: dataset split (all)
search_split: search for split specific child fold from root so one can specify
`imagenet/` instead of `/imagenet/val`, etc on cmd line / config. (folder, torch/folder)
class_map: specify class -> index mapping via text file or dict (folder)
load_bytes: load data, return images as undecoded bytes (folder)
download: download dataset if not present and supported (TFDS, torch)
is_training: create dataset in train mode, this is different from the split.
For Iterable / TDFS it enables shuffle, ignored for other datasets. (TFDS)
batch_size: batch size hint for (TFDS)
repeats: dataset repeats per iteration i.e. epoch (TFDS)
**kwargs: other args to pass to dataset
Returns:
Dataset object
"""
name = name.lower()
if name.startswith('torch/'):
name = name.split('/', 2)[-1]
torch_kwargs = dict(root=root, download=download, **kwargs)
if name in _TORCH_BASIC_DS:
ds_class = _TORCH_BASIC_DS[name]
use_train = split in _TRAIN_SYNONYM
ds = ds_class(train=use_train, **torch_kwargs)
elif name == 'inaturalist' or name == 'inat':
assert has_inaturalist, 'Please update to PyTorch 1.10, torchvision 0.11+ for Inaturalist'
target_type = 'full'
split_split = split.split('/')
if len(split_split) > 1:
target_type = split_split[0].split('_')
if len(target_type) == 1:
target_type = target_type[0]
split = split_split[-1]
if split in _TRAIN_SYNONYM:
split = '2021_train'
elif split in _EVAL_SYNONYM:
split = '2021_valid'
ds = INaturalist(version=split, target_type=target_type, **torch_kwargs)
elif name == 'places365':
assert has_places365, 'Please update to a newer PyTorch and torchvision for Places365 dataset.'
if split in _TRAIN_SYNONYM:
split = 'train-standard'
elif split in _EVAL_SYNONYM:
split = 'val'
ds = Places365(split=split, **torch_kwargs)
elif name == 'imagenet':
if split in _EVAL_SYNONYM:
split = 'val'
ds = ImageNet(split=split, **torch_kwargs)
elif name == 'image_folder' or name == 'folder':
# in case torchvision ImageFolder is preferred over timm ImageDataset for some reason
if search_split and os.path.isdir(root):
# look for split specific sub-folder in root
root = _search_split(root, split)
ds = ImageFolder(root, **kwargs)
else:
assert False, f"Unknown torchvision dataset {name}"
elif name.startswith('tfds/'):
ds = IterableImageDataset(
root, parser=name, split=split, is_training=is_training,
download=download, batch_size=batch_size, repeats=repeats, **kwargs)
else:
# FIXME support more advance split cfg for ImageFolder/Tar datasets in the future
if search_split and os.path.isdir(root):
# look for split specific sub-folder in root
root = _search_split(root, split)
ds = ImageDataset(root, parser=name, class_map=class_map, load_bytes=load_bytes, **kwargs)
return ds
|
Cream/TinyViT/data/augmentation/dataset_factory.py/0
|
{
"file_path": "Cream/TinyViT/data/augmentation/dataset_factory.py",
"repo_id": "Cream",
"token_count": 2475
}
| 306 |
""" Real labels evaluator for ImageNet
Paper: `Are we done with ImageNet?` - https://arxiv.org/abs/2006.07159
Based on Numpy example at https://github.com/google-research/reassessed-imagenet
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
import json
import numpy as np
class RealLabelsImagenet:
def __init__(self, filenames, real_json='real.json', topk=(1, 5)):
with open(real_json) as real_labels:
real_labels = json.load(real_labels)
real_labels = {f'ILSVRC2012_val_{i + 1:08d}.JPEG': labels for i, labels in enumerate(real_labels)}
self.real_labels = real_labels
self.filenames = filenames
assert len(self.filenames) == len(self.real_labels)
self.topk = topk
self.is_correct = {k: [] for k in topk}
self.sample_idx = 0
def add_result(self, output):
maxk = max(self.topk)
_, pred_batch = output.topk(maxk, 1, True, True)
pred_batch = pred_batch.cpu().numpy()
for pred in pred_batch:
filename = self.filenames[self.sample_idx]
filename = os.path.basename(filename)
if self.real_labels[filename]:
for k in self.topk:
self.is_correct[k].append(
any([p in self.real_labels[filename] for p in pred[:k]]))
self.sample_idx += 1
def get_accuracy(self, k=None):
if k is None:
return {k: float(np.mean(self.is_correct[k])) * 100 for k in self.topk}
else:
return float(np.mean(self.is_correct[k])) * 100
|
Cream/TinyViT/data/augmentation/real_labels.py/0
|
{
"file_path": "Cream/TinyViT/data/augmentation/real_labels.py",
"repo_id": "Cream",
"token_count": 742
}
| 307 |
# --------------------------------------------------------
# TinyViT Main (train/validate)
# Copyright (c) 2022 Microsoft
# Based on the code: Swin Transformer
# (https://github.com/microsoft/swin-transformer)
# Add distillation with saved teacher logits
# --------------------------------------------------------
import os
import time
import random
import argparse
import datetime
from collections import defaultdict
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy
from my_meter import AverageMeter
from config import get_config
from models import build_model
from data import build_loader
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import load_checkpoint, load_pretrained, save_checkpoint,\
NativeScalerWithGradNormCount,\
auto_resume_helper, is_main_process,\
add_common_args,\
get_git_info
from models.remap_layer import RemapLayer
remap_layer_22kto1k = RemapLayer('./imagenet_1kto22k.txt')
try:
import wandb
except ImportError:
wandb = None
NORM_ITER_LEN = 100
def parse_option():
parser = argparse.ArgumentParser(
'TinyViT training and evaluation script', add_help=False)
add_common_args(parser)
args = parser.parse_args()
config = get_config(args)
return args, config
def main(args, config):
dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn = build_loader(
config)
logger.info(f"Creating model:{config.MODEL.TYPE}/{config.MODEL.NAME}")
model = build_model(config)
if not args.only_cpu:
model.cuda()
if args.use_sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
logger.info(str(model))
optimizer = build_optimizer(config, model)
if not args.only_cpu:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False)
model_without_ddp = model.module
else:
model_without_ddp = model
loss_scaler = NativeScalerWithGradNormCount(grad_scaler_enabled=config.AMP_ENABLE)
n_parameters = sum(p.numel()
for p in model.parameters() if p.requires_grad)
logger.info(f"number of params: {n_parameters}")
if hasattr(model_without_ddp, 'flops'):
flops = model_without_ddp.flops()
logger.info(f"number of GFLOPs: {flops / 1e9}")
lr_scheduler = build_scheduler(config, optimizer, len(
data_loader_train) // config.TRAIN.ACCUMULATION_STEPS)
if config.DISTILL.ENABLED:
# we disable MIXUP and CUTMIX when knowledge distillation
assert len(
config.DISTILL.TEACHER_LOGITS_PATH) > 0, "Please fill in DISTILL.TEACHER_LOGITS_PATH"
criterion = SoftTargetCrossEntropy()
else:
if config.AUG.MIXUP > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif config.MODEL.LABEL_SMOOTHING > 0.:
criterion = LabelSmoothingCrossEntropy(
smoothing=config.MODEL.LABEL_SMOOTHING)
else:
criterion = torch.nn.CrossEntropyLoss()
max_accuracy = 0.0
if config.TRAIN.AUTO_RESUME:
resume_file = auto_resume_helper(config.OUTPUT)
if resume_file:
if config.MODEL.RESUME:
logger.warning(
f"auto-resume changing resume file from {config.MODEL.RESUME} to {resume_file}")
config.defrost()
config.MODEL.RESUME = resume_file
config.freeze()
logger.info(f'auto resuming from {resume_file}')
else:
logger.info(
f'no checkpoint found in {config.OUTPUT}, ignoring auto resume')
if config.MODEL.RESUME:
max_accuracy = load_checkpoint(
config, model_without_ddp, optimizer, lr_scheduler, loss_scaler, logger)
acc1, acc5, loss = validate(args, config, data_loader_val, model)
logger.info(
f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
if config.EVAL_MODE:
return
if config.MODEL.PRETRAINED and (not config.MODEL.RESUME):
load_pretrained(config, model_without_ddp, logger)
acc1, acc5, loss = validate(args, config, data_loader_val, model)
logger.info(
f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
if config.THROUGHPUT_MODE:
throughput(data_loader_val, model, logger)
return
logger.info("Start training")
start_time = time.time()
for epoch in range(config.TRAIN.START_EPOCH, config.TRAIN.EPOCHS):
# set_epoch for dataset_train when distillation
if hasattr(dataset_train, 'set_epoch'):
dataset_train.set_epoch(epoch)
data_loader_train.sampler.set_epoch(epoch)
if config.DISTILL.ENABLED:
train_one_epoch_distill_using_saved_logits(
args, config, model, criterion, data_loader_train, optimizer, epoch, mixup_fn, lr_scheduler, loss_scaler)
else:
train_one_epoch(args, config, model, criterion,
data_loader_train, optimizer, epoch, mixup_fn, lr_scheduler, loss_scaler)
if dist.get_rank() == 0 and (epoch % config.SAVE_FREQ == 0 or epoch == (config.TRAIN.EPOCHS - 1)):
save_checkpoint(config, epoch, model_without_ddp,
max_accuracy, optimizer, lr_scheduler, loss_scaler, logger)
acc1, acc5, loss = validate(args, config, data_loader_val, model)
logger.info(
f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
max_accuracy = max(max_accuracy, acc1)
logger.info(f'Max accuracy: {max_accuracy:.2f}%')
if is_main_process() and args.use_wandb:
wandb.log({
f"val/acc@1": acc1,
f"val/acc@5": acc5,
f"val/loss": loss,
"epoch": epoch,
})
wandb.run.summary['epoch'] = epoch
wandb.run.summary['best_acc@1'] = max_accuracy
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info('Training time {}'.format(total_time_str))
def is_valid_grad_norm(num):
if num is None:
return False
return not bool(torch.isinf(num)) and not bool(torch.isnan(num))
def set_bn_state(config, model):
if config.TRAIN.EVAL_BN_WHEN_TRAINING:
for m in model.modules():
if isinstance(m, torch.nn.modules.batchnorm._BatchNorm):
m.eval()
def train_one_epoch(args, config, model, criterion, data_loader, optimizer, epoch, mixup_fn, lr_scheduler, loss_scaler):
model.train()
set_bn_state(config, model)
optimizer.zero_grad()
num_steps = len(data_loader)
batch_time = AverageMeter()
loss_meter = AverageMeter()
norm_meter = AverageMeter()
scaler_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
start = time.time()
end = time.time()
for idx, (samples, targets) in enumerate(data_loader):
normal_global_idx = epoch * NORM_ITER_LEN + \
(idx * NORM_ITER_LEN // num_steps)
samples = samples.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
original_targets = targets.argmax(dim=1)
else:
original_targets = targets
with torch.cuda.amp.autocast(enabled=config.AMP_ENABLE):
outputs = model(samples)
loss = criterion(outputs, targets)
loss = loss / config.TRAIN.ACCUMULATION_STEPS
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(
optimizer, 'is_second_order') and optimizer.is_second_order
grad_norm = loss_scaler(loss, optimizer, clip_grad=config.TRAIN.CLIP_GRAD,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0)
if (idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0:
optimizer.zero_grad()
lr_scheduler.step_update(
(epoch * num_steps + idx) // config.TRAIN.ACCUMULATION_STEPS)
loss_scale_value = loss_scaler.state_dict().get("scale", 1.0)
with torch.no_grad():
acc1, acc5 = accuracy(outputs, original_targets, topk=(1, 5))
acc1_meter.update(acc1.item(), targets.size(0))
acc5_meter.update(acc5.item(), targets.size(0))
torch.cuda.synchronize()
loss_meter.update(loss.item(), targets.size(0))
if is_valid_grad_norm(grad_norm):
norm_meter.update(grad_norm)
scaler_meter.update(loss_scale_value)
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
lr = optimizer.param_groups[0]['lr']
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
etas = batch_time.avg * (num_steps - idx)
logger.info(
f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t'
f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t'
f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
f'loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
f'grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t'
f'loss_scale {scaler_meter.val:.4f} ({scaler_meter.avg:.4f})\t'
f'mem {memory_used:.0f}MB')
if is_main_process() and args.use_wandb:
wandb.log({
"train/acc@1": acc1_meter.val,
"train/acc@5": acc5_meter.val,
"train/loss": loss_meter.val,
"train/grad_norm": norm_meter.val,
"train/loss_scale": scaler_meter.val,
"train/lr": lr,
}, step=normal_global_idx)
epoch_time = time.time() - start
logger.info(
f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}")
def train_one_epoch_distill_using_saved_logits(args, config, model, criterion, data_loader, optimizer, epoch, mixup_fn, lr_scheduler, loss_scaler):
model.train()
set_bn_state(config, model)
optimizer.zero_grad()
num_steps = len(data_loader)
batch_time = AverageMeter()
loss_meter = AverageMeter()
norm_meter = AverageMeter()
scaler_meter = AverageMeter()
meters = defaultdict(AverageMeter)
start = time.time()
end = time.time()
data_tic = time.time()
num_classes = config.MODEL.NUM_CLASSES
topk = config.DISTILL.LOGITS_TOPK
for idx, ((samples, targets), (logits_index, logits_value, seeds)) in enumerate(data_loader):
normal_global_idx = epoch * NORM_ITER_LEN + \
(idx * NORM_ITER_LEN // num_steps)
samples = samples.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets, seeds)
original_targets = targets.argmax(dim=1)
else:
original_targets = targets
meters['data_time'].update(time.time() - data_tic)
with torch.cuda.amp.autocast(enabled=config.AMP_ENABLE):
outputs = model(samples)
# recover teacher logits
logits_index = logits_index.long()
logits_value = logits_value.float()
logits_index = logits_index.cuda(non_blocking=True)
logits_value = logits_value.cuda(non_blocking=True)
minor_value = (1.0 - logits_value.sum(-1, keepdim=True)
) / (num_classes - topk)
minor_value = minor_value.repeat_interleave(num_classes, dim=-1)
outputs_teacher = minor_value.scatter_(-1, logits_index, logits_value)
loss = criterion(outputs, outputs_teacher)
loss = loss / config.TRAIN.ACCUMULATION_STEPS
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(
optimizer, 'is_second_order') and optimizer.is_second_order
grad_norm = loss_scaler(loss, optimizer, clip_grad=config.TRAIN.CLIP_GRAD,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0)
if (idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0:
optimizer.zero_grad()
lr_scheduler.step_update(
(epoch * num_steps + idx) // config.TRAIN.ACCUMULATION_STEPS)
loss_scale_value = loss_scaler.state_dict().get("scale", 1.0)
# compute accuracy
real_batch_size = len(original_targets)
acc1, acc5 = accuracy(outputs, original_targets, topk=(1, 5))
meters['train_acc1'].update(acc1.item(), real_batch_size)
meters['train_acc5'].update(acc5.item(), real_batch_size)
teacher_acc1, teacher_acc5 = accuracy(
outputs_teacher, original_targets, topk=(1, 5))
meters['teacher_acc1'].update(teacher_acc1.item(), real_batch_size)
meters['teacher_acc5'].update(teacher_acc5.item(), real_batch_size)
torch.cuda.synchronize()
loss_meter.update(loss.item(), real_batch_size)
if is_valid_grad_norm(grad_norm):
norm_meter.update(grad_norm)
scaler_meter.update(loss_scale_value)
batch_time.update(time.time() - end)
end = time.time()
data_tic = time.time()
if idx % config.PRINT_FREQ == 0:
lr = optimizer.param_groups[0]['lr']
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
etas = batch_time.avg * (num_steps - idx)
extra_meters_str = ''
for k, v in meters.items():
extra_meters_str += f'{k} {v.val:.4f} ({v.avg:.4f})\t'
logger.info(
f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t'
f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t'
f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
f'loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t'
f'loss_scale {scaler_meter.val:.4f} ({scaler_meter.avg:.4f})\t'
f'{extra_meters_str}'
f'mem {memory_used:.0f}MB')
if is_main_process() and args.use_wandb:
acc1_meter, acc5_meter = meters['train_acc1'], meters['train_acc5']
wandb.log({
"train/acc@1": acc1_meter.val,
"train/acc@5": acc5_meter.val,
"train/loss": loss_meter.val,
"train/grad_norm": norm_meter.val,
"train/loss_scale": scaler_meter.val,
"train/lr": lr,
}, step=normal_global_idx)
epoch_time = time.time() - start
extra_meters_str = f'Train-Summary: [{epoch}/{config.TRAIN.EPOCHS}]\t'
for k, v in meters.items():
v.sync()
extra_meters_str += f'{k} {v.val:.4f} ({v.avg:.4f})\t'
logger.info(extra_meters_str)
logger.info(
f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}")
@torch.no_grad()
def validate(args, config, data_loader, model, num_classes=1000):
criterion = torch.nn.CrossEntropyLoss()
model.eval()
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
end = time.time()
for idx, (images, target) in enumerate(data_loader):
if not args.only_cpu:
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
with torch.cuda.amp.autocast(enabled=config.AMP_ENABLE):
output = model(images)
if num_classes == 1000:
output_num_classes = output.size(-1)
if output_num_classes == 21841:
output = remap_layer_22kto1k(output)
# measure accuracy and record loss
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
acc5_meter.update(acc5.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
logger.info(
f'Test: [{idx}/{len(data_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
f'Mem {memory_used:.0f}MB')
acc1_meter.sync()
acc5_meter.sync()
logger.info(
f' The number of validation samples is {int(acc1_meter.count)}')
logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
@torch.no_grad()
def throughput(data_loader, model, logger):
# we follow the throughput measurement of LeViT repo (https://github.com/facebookresearch/LeViT/blob/main/speed_test.py)
model.eval()
T0, T1 = 10, 60
images, _ = next(iter(data_loader))
batch_size, _, H, W = images.shape
inputs = torch.randn(batch_size, 3, H, W).cuda(non_blocking=True)
# trace model to avoid python overhead
model = torch.jit.trace(model, inputs)
torch.cuda.empty_cache()
torch.cuda.synchronize()
start = time.time()
with torch.cuda.amp.autocast():
while time.time() - start < T0:
model(inputs)
timing = []
torch.cuda.synchronize()
with torch.cuda.amp.autocast():
while sum(timing) < T1:
start = time.time()
model(inputs)
torch.cuda.synchronize()
timing.append(time.time() - start)
timing = torch.as_tensor(timing, dtype=torch.float32)
throughput = batch_size / timing.mean().item()
logger.info(f"batch_size {batch_size} throughput {throughput}")
if __name__ == '__main__':
args, config = parse_option()
config.defrost()
if config.DISTILL.TEACHER_LOGITS_PATH:
config.DISTILL.ENABLED = True
config.freeze()
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
rank = int(os.environ["RANK"])
world_size = int(os.environ['WORLD_SIZE'])
print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")
else:
rank = -1
world_size = -1
if args.only_cpu:
ddp_backend = 'gloo'
else:
torch.cuda.set_device(config.LOCAL_RANK)
ddp_backend = 'nccl'
torch.distributed.init_process_group(
backend=ddp_backend, init_method='env://', world_size=world_size, rank=rank)
torch.distributed.barrier()
seed = config.SEED + dist.get_rank()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
# linear scale the learning rate according to total batch size, may not be optimal
linear_scaled_lr = config.TRAIN.BASE_LR * \
config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
linear_scaled_warmup_lr = config.TRAIN.WARMUP_LR * \
config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
linear_scaled_min_lr = config.TRAIN.MIN_LR * \
config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
# gradient accumulation also need to scale the learning rate
if config.TRAIN.ACCUMULATION_STEPS > 1:
linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_warmup_lr = linear_scaled_warmup_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_min_lr = linear_scaled_min_lr * config.TRAIN.ACCUMULATION_STEPS
config.defrost()
config.TRAIN.BASE_LR = linear_scaled_lr
config.TRAIN.WARMUP_LR = linear_scaled_warmup_lr
config.TRAIN.MIN_LR = linear_scaled_min_lr
config.freeze()
os.makedirs(config.OUTPUT, exist_ok=True)
logger = create_logger(output_dir=config.OUTPUT,
dist_rank=dist.get_rank(), name=f"{config.MODEL.NAME}")
if is_main_process():
path = os.path.join(config.OUTPUT, "config.json")
with open(path, "w") as f:
f.write(config.dump())
logger.info(f"Full config saved to {path}")
config_dict = dict(config)
config_dict['git'] = get_git_info()
if args.use_wandb:
wandb_output_path = config.OUTPUT
wandb.init(project="TinyViT", config=config_dict,
dir=wandb_output_path)
# print git info
logger.info('===== git =====')
logger.info(str(get_git_info()))
# print config
logger.info(config.dump())
main(args, config)
|
Cream/TinyViT/main.py/0
|
{
"file_path": "Cream/TinyViT/main.py",
"repo_id": "Cream",
"token_count": 10302
}
| 308 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.utils.data
import torchvision
from .coco import build as build_coco
def get_coco_api_from_dataset(dataset):
for _ in range(10):
# if isinstance(dataset, torchvision.datasets.CocoDetection):
# break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return dataset.coco
def build_dataset(image_set, args):
if args.dataset_file == 'coco':
return build_coco(image_set, args)
if args.dataset_file == 'coco_panoptic':
# to avoid making panopticapi required for coco
from .coco_panoptic import build as build_coco_panoptic
return build_coco_panoptic(image_set, args)
raise ValueError(f'dataset {args.dataset_file} not supported')
|
Cream/iRPE/DETR-with-iRPE/datasets/__init__.py/0
|
{
"file_path": "Cream/iRPE/DETR-with-iRPE/datasets/__init__.py",
"repo_id": "Cream",
"token_count": 365
}
| 309 |
"""Functional interface"""
import warnings
import math
import torch
from torch._C import _infer_size, _add_docstr
from torch.nn import _reduction as _Reduction
from torch.nn.modules import utils
from torch.nn.modules.utils import _single, _pair, _triple, _list_with_default
from torch.nn import grad # noqa: F401
from torch import _VF
from torch._jit_internal import boolean_dispatch, List, Optional, _overload, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import linear, softmax, dropout, pad
Tensor = torch.Tensor
def rpe_multi_head_attention_forward(
query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
# 2D-RPE
rpe_q=None,
rpe_k=None,
rpe_v=None,
hw=None,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
rpe_q, rpe_k, rpe_v: 2D relative position encoding on queries, keys and values
hw: (height, width) of the feature map
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tens_ops = (query, key, value, in_proj_weight, in_proj_bias,
bias_k, bias_v, out_proj_weight, out_proj_bias)
if has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward,
tens_ops,
query,
key,
value,
embed_dim_to_check,
num_heads,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
add_zero_attn,
dropout_p,
out_proj_weight,
out_proj_bias,
training=training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight,
k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight,
static_k=static_k,
static_v=static_v,
)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if (query is key or torch.equal(query, key)) and (key is value or torch.equal(key, value)):
# self-attention
q, k, v = linear(query, in_proj_weight,
in_proj_bias).chunk(3, dim=-1)
elif key is value or torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt,
in_proj_bias[embed_dim: (embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt,
in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert (
attn_mask.dtype == torch.float32
or attn_mask.dtype == torch.float64
or attn_mask.dtype == torch.float16
or attn_mask.dtype == torch.uint8
or attn_mask.dtype == torch.bool
), "Only float, byte, and bool types are supported for attn_mask, not {}".format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError(
"The size of the 2D attn_mask is not correct.")
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError(
"The size of the 3D attn_mask is not correct.")
else:
raise RuntimeError(
"attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()
[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()
[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
'''
q @ k
q: (batch_size * num_heads, length_query, head_dim)
k: (batch_size * num_heads, length_memory, head_dim)
head_dim = embed_dim // num_heads
attn_output_weights: (batch_size * num_heads, length_query, length_memory)
'''
q = q.contiguous()
k = k.contiguous()
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights_view = attn_output_weights.view(
bsz, num_heads, *attn_output_weights.shape[-2:])
# 2D relative position encoding on keys
if rpe_k is not None:
q_view = q.view(bsz, num_heads, tgt_len, head_dim)
attn_output_weights_view += rpe_k(q_view, height=hw[0], width=hw[1])
# 2D relative position encoding on queries
if rpe_q is not None:
k_view = k.view(bsz, num_heads, tgt_len, head_dim)
attn_output_weights_view += rpe_q(k_view * scaling,
height=hw[0], width=hw[1]).\
transpose(-2, -1)
assert list(attn_output_weights.size()) == [
bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float("-inf"))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(
bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float("-inf"),
)
attn_output_weights = attn_output_weights.view(
bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(attn_output_weights, dim=-1)
attn_output_weights = dropout(
attn_output_weights, p=dropout_p, training=training)
'''
attn_output_weights: (batch_size * num_heads, length_query, length_memory)
v: (batch_size * num_heads, length_memory, head_dim)
attn_output: (batch_size * num_heads, length_query, head_dim)
'''
attn_output = torch.bmm(attn_output_weights, v)
# 2D relative position encoding on values
if rpe_v is not None:
attn_output_weights = attn_output_weights.contiguous()
attn = attn_output_weights.view(
bsz, num_heads, *attn_output_weights.shape[-2:])
attn_output_view = attn_output.view(
bsz, num_heads, *attn_output.shape[-2:])
attn_output_view += rpe_v(attn, height=hw[0], width=hw[1])
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(
0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(
bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
|
Cream/iRPE/DETR-with-iRPE/models/rpe_attention/rpe_attention_function.py/0
|
{
"file_path": "Cream/iRPE/DETR-with-iRPE/models/rpe_attention/rpe_attention_function.py",
"repo_id": "Cream",
"token_count": 7847
}
| 310 |
Hiring research interns for neural architecture search projects: [email protected]
# Rethinking and Improving Relative Position Encoding for Vision Transformer
[[Paper]](https://openaccess.thecvf.com/content/ICCV2021/html/Wu_Rethinking_and_Improving_Relative_Position_Encoding_for_Vision_Transformer_ICCV_2021_paper.html)
Image Classification: DeiT with iRPE
# Model Zoo
We equip DeiT models with contextual product shared-head RPE with 50 buckets, and report their accuracy on ImageNet-1K Validation set.
Resolution: `224 x 224`
Model | RPE-Q | RPE-K | RPE-V | #Params(M) | MACs(M) | Top-1 Acc.(%) | Top-5 Acc.(%) | Link | Log
----- | ----- | ----- | ----- | ---------- | ------- | ------------- | ------------- | ---- | ---
tiny | | ✔ | | 5.76 | 1284 | 73.7 | 92.0 | [link](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/deit_tiny_patch16_224_ctx_product_50_shared_k.pth) | [log](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/log_deit_tiny_patch16_224_ctx_product_50_shared_k.txt), [detail](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/detail_deit_tiny_patch16_224_ctx_product_50_shared_k.log)
small | | ✔ | | 22.09 | 4659 | 80.9 | 95.4 | [link](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/deit_small_patch16_224_ctx_product_50_shared_k.pth) | [log](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/log_deit_small_patch16_224_ctx_product_50_shared_k.txt), [detail](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/detail_deit_small_patch16_224_ctx_product_50_shared_k.log)
small | ✔ | ✔ | | 22.13 | 4706 | 81.0 | 95.5 | [link](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/deit_small_patch16_224_ctx_product_50_shared_qk.pth) | [log](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/log_deit_small_patch16_224_ctx_product_50_shared_qk.txt), [detail](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/detail_deit_small_patch16_224_ctx_product_50_shared_qk.log)
small | ✔ | ✔ | ✔ | 22.17 | 4885 | 81.2 | 95.5 | [link](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/deit_small_patch16_224_ctx_product_50_shared_qkv.pth) | [log](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/log_deit_small_patch16_224_ctx_product_50_shared_qkv.txt), [detail](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/detail_deit_small_patch16_224_ctx_product_50_shared_qkv.log)
base | | ✔ | | 86.61 | 17684 | 82.3 | 95.9 | [link](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/deit_base_patch16_224_ctx_product_50_shared_k.pth) | [log](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/log_deit_base_patch16_224_ctx_product_50_shared_k.txt), [detail](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/detail_deit_base_patch16_224_ctx_product_50_shared_k.log)
base | ✔ | ✔ | ✔ | 86.68 | 18137 | 82.8 | 96.1 | [link](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/deit_base_patch16_224_ctx_product_50_shared_qkv.pth) | [log](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/log_deit_base_patch16_224_ctx_product_50_shared_qkv.txt), [detail](https://github.com/wkcn/iRPE-model-zoo/releases/download/1.0/detail_deit_base_patch16_224_ctx_product_50_shared_qkv.log)
# Usage
## Setup
1. Install 3rd-party packages from [requirements.txt](./requirements.txt).
Notice that the version of timm should be equal or higher than **0.3.2**, and the version of Pytorch should be equal or higher than **1.7.0**.
```bash
pip install -r ./requirements.txt
```
2. **[Optional, Recommend]** Build iRPE operators implemented by CUDA.
Although iRPE can be implemented by PyTorch native functions, the backward speed of PyTorch index function is very slow. We implement CUDA operators for more efficient training and recommend to build it.
`nvcc` is necessary to build CUDA operators.
```bash
cd rpe_ops/
python setup.py install --user
```
## Data Preparation
You can download the ImageNet-1K dataset from [`http://www.image-net.org/`](http://www.image-net.org/).
The train set and validation set should be saved as the `*.tar` archives:
```
ImageNet/
├── train.tar
└── val.tar
```
Our code also supports storing images as individual files as follow:
```
ImageNet/
├── train
│ ├── n01440764
│ │ ├── n01440764_10026.JPEG
│ │ ├── n01440764_10027.JPEG
...
├── val
│ ├── n01440764
│ │ ├── ILSVRC2012_val_00000293.JPEG
```
## Training
We define the models with iRPE in [`rpe_models.py`](./rpe_models.py).
For example, we train DeiT-S with contextual product relative position encoding on keys with 50 buckets, the model's name is `deit_small_patch16_224_ctx_product_50_shared_k`.
Run the following command:
```bash
python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --model deit_small_patch16_224_ctx_product_50_shared_k --batch-size 128 --data-path ./ImageNet/ --output_dir ./outputs/ --load-tar
```
You can remove the flag `--load-tar` if storing images as individual files : )
## Evaluation
The step is similar to training. Add `--eval --resume <the checkpoint path>`.
```bash
python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --model deit_small_patch16_224_ctx_product_50_shared_k --batch-size 128 --data-path ./ImageNet/ --output_dir ./outputs/ --load-tar --eval --resume deit_small_patch16_224_ctx_product_50_shared_k.pth
```
`--resume <the checkpoint path>` can be replaced by `--pretrained`, then the checkpoint will be downloaded automatically. The download directory is usually `$HOME/.cache/torch/hub/checkpoints`.
## Code Structure
Our code is based on [DeiT](https://github.com/facebookresearch/deit) and [pytorch-image-models](https://github.com/rwightman/pytorch-image-models). Thank you!
File | Description
-----|------------
[`irpe.py`](./irpe.py) | The implementation of image relative position encoding
[`rpe_models.py`](./rpe_models.py) | The implementation of models with iRPE
[`rpe_vision_transformer.py`](./rpe_vision_transformer.py) | We equip iRPE on `Attention`, `Block`, and `VisionTransformer` modules
[`rpe_ops`](./rpe_ops) | The CUDA implementation of iRPE operators for efficient training
# Citing iRPE
If this project is helpful for you, please cite it. Thank you! : )
```bibtex
@InProceedings{iRPE,
title = {Rethinking and Improving Relative Position Encoding for Vision Transformer},
author = {Wu, Kan and Peng, Houwen and Chen, Minghao and Fu, Jianlong and Chao, Hongyang},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
month = {October},
year = {2021},
pages = {10033-10041}
}
```
# License
[Apache License](./LICENSE)
|
Cream/iRPE/DeiT-with-iRPE/README.md/0
|
{
"file_path": "Cream/iRPE/DeiT-with-iRPE/README.md",
"repo_id": "Cream",
"token_count": 2509
}
| 311 |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
A script to run multinode training with submitit.
"""
import argparse
import os
import uuid
from pathlib import Path
import main as classification
import submitit
def parse_args():
classification_parser = classification.get_args_parser()
parser = argparse.ArgumentParser("Submitit for DeiT", parents=[classification_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=2800, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnfair", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this")
parser.add_argument('--comment', default="", type=str,
help='Comment to pass to scheduler, e.g. priority message')
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as classification
self._setup_gpu_args()
classification.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="deit")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
|
Cream/iRPE/DeiT-with-iRPE/run_with_submitit.py/0
|
{
"file_path": "Cream/iRPE/DeiT-with-iRPE/run_with_submitit.py",
"repo_id": "Cream",
"token_count": 1646
}
| 312 |
import torch as th
import torch.nn as nn
import torch.nn.functional as F
def linear_combination(x, y, epsilon):
return epsilon*x + (1-epsilon)*y
def reduce_loss(loss, reduction='mean'):
return loss.mean() if reduction == 'mean' \
else loss.sum() if reduction == 'sum' else loss
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self, epsilon=0.1, reduction='mean'):
super().__init__()
self.epsilon = epsilon
self.reduction = reduction
def forward(self, preds, target):
n = preds.size()[-1]
log_preds = F.log_softmax(preds, dim=-1)
loss = reduce_loss(-log_preds.sum(dim=-1), self.reduction)
nll = F.nll_loss(log_preds, target, reduction=self.reduction)
return linear_combination(loss/n, nll, self.epsilon)
class SoftTargetCrossEntropy(nn.Module):
def __init__(self):
super(SoftTargetCrossEntropy, self).__init__()
def forward(self, x, target):
loss = th.sum(-target * F.log_softmax(x, dim=-1), dim=-1)
return loss.mean()
def build_criterion(config, train=True):
if config.AUG.MIXUP_PROB > 0.0 and config.LOSS.LOSS == 'softmax':
criterion = SoftTargetCrossEntropy() \
if train else nn.CrossEntropyLoss()
elif config.LOSS.LABEL_SMOOTHING > 0.0 and config.LOSS.LOSS == 'softmax':
criterion = LabelSmoothingCrossEntropy(config.LOSS.LABEL_SMOOTHING)
elif config.LOSS.LOSS == 'softmax':
criterion = nn.CrossEntropyLoss()
else:
raise ValueError('Unkown loss {}'.format(config.LOSS.LOSS))
return criterion
|
CvT/lib/core/loss.py/0
|
{
"file_path": "CvT/lib/core/loss.py",
"repo_id": "CvT",
"token_count": 685
}
| 313 |
import pickle
import torch
import torch.distributed as dist
class Comm(object):
def __init__(self, local_rank=0):
self.local_rank = 0
@property
def world_size(self):
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
@property
def rank(self):
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
@property
def local_rank(self):
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return self._local_rank
@local_rank.setter
def local_rank(self, value):
if not dist.is_available():
self._local_rank = 0
if not dist.is_initialized():
self._local_rank = 0
self._local_rank = value
@property
def head(self):
return 'Rank[{}/{}]'.format(self.rank, self.world_size)
def is_main_process(self):
return self.rank == 0
def synchronize(self):
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if self.world_size == 1:
return
dist.barrier()
comm = Comm()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = comm.world_size
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = comm.world_size
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
|
CvT/lib/utils/comm.py/0
|
{
"file_path": "CvT/lib/utils/comm.py",
"repo_id": "CvT",
"token_count": 1625
}
| 314 |
import sys
sys.path.append('../')
import unittest
import numpy as np
import pandas as pd
import shutil
import os
import invoker
class TestErrorInput(unittest.TestCase):
def setUp(self):
self.__input_path = './error_test_input_file.csv'
self.__detect_mode = 'AnomalyOnly'
self.__timestamp_column = 'timestamp'
self.__value_column = 'value'
self.__batch_size = 2000
self.__threshold = 0.3
self.__sensitivity = 99
self.__append_mode = True
self.compute_stats_in_visualization = False
self.__output_path = './error_test_output_directory'
def tearDown(self):
self.deleteDataFrameDirectory()
def deleteDataFrameDirectory(self):
if os.path.exists(self.__input_path):
os.remove(self.__input_path)
if os.path.exists(self.__output_path):
shutil.rmtree(self.__output_path)
def test_empty_input(self):
df = pd.DataFrame()
df.to_csv(self.__input_path)
self.assertRaisesRegexp(Exception, "The dataset should contain at least 12 points to run this module.",
invoker.invoke,
self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column,
self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode,
self.__output_path)
def test_invalid_timestamp(self):
df = pd.DataFrame()
df['timestamp'] = 'invalid'
df['value'] = np.ones(20)
df.to_csv(self.__input_path)
self.assertRaisesRegexp(Exception, "The timestamp column specified is malformed.",
invoker.invoke,
self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column,
self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode,
self.__output_path)
def test_invalid_series_order(self):
df = pd.DataFrame()
timestamps = pd.date_range(start='2020-01-01', periods=20, freq='1D')[::-1]
df['timestamp'] = timestamps
df['value'] = np.ones(20)
df.to_csv(self.__input_path)
self.assertRaisesRegexp(Exception, "The timestamp column specified is not in ascending order.",
invoker.invoke,
self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column,
self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode,
self.__output_path)
def test_dunplicate_sereis(self):
df = pd.DataFrame()
df['value'] = np.ones(20)
df['timestamp'] = '2020-01-01'
df.to_csv(self.__input_path)
self.assertRaisesRegexp(Exception, "The timestamp column specified has duplicated timestamps.",
invoker.invoke,
self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column,
self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode,
self.__output_path)
def test_invalid_value_format(self):
df = pd.DataFrame()
timestamps = pd.date_range(start='2020-01-01', periods=20, freq='1D')
df['timestamp'] = timestamps
df['value'] = 'invalid'
df.to_csv(self.__input_path)
self.assertRaisesRegexp(Exception, 'The data in column "value" can not be parsed as float values.',
invoker.invoke,
self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column,
self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode,
self.__output_path)
def test_invalid_series_value(self):
df = pd.DataFrame()
timestamps = pd.date_range(start='2020-01-01', periods=20, freq='1D')
df['timestamp'] = timestamps
df['value'] = np.nan
df.to_csv(self.__input_path)
self.assertRaisesRegexp(Exception, 'The data in column "value" contains nan values.',
invoker.invoke,
self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column,
self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode,
self.__output_path)
def test_value_overflow(self):
df = pd.DataFrame()
timestamps = pd.date_range(start='2020-01-01', periods=20, freq='1D')
df['timestamp'] = timestamps
df['value'] = 1e200
df.to_csv(self.__input_path)
self.assertRaisesRegexp(Exception, 'The magnitude of data in column "value" exceeds limitation.',
invoker.invoke,
self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column,
self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode,
self.__output_path)
def test_not_enough_points(self):
df = pd.DataFrame()
timestamps = pd.date_range(start='2020-01-01', periods=10, freq='1D')
df['timestamp'] = timestamps
df['value'] = np.sin(np.linspace(1, 10, 10))
df.to_csv(self.__input_path)
self.assertRaisesRegexp(Exception, "The dataset should contain at least 12 points to run this module.",
invoker.invoke,
self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column,
self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode,
self.__output_path)
def test_invalid_batch_size(self):
df = pd.DataFrame()
timestamps = pd.date_range(start='2020-01-01', periods=20, freq='1D')
df['timestamp'] = timestamps
df['value'] = np.sin(np.linspace(1, 10, 20))
df.to_csv(self.__input_path)
self.assertRaisesRegexp(Exception, 'The "batchSize" parameter should be at least 12 or 0 that indicates to run all data in a batch',
invoker.invoke,
self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column,
5, self.__threshold, self.__sensitivity, self.__append_mode, self.__output_path)
def test_timestamp_column_missing(self):
df = pd.DataFrame()
timestamps = pd.date_range(start='2020-01-01', periods=20, freq='1D')
df['time'] = timestamps
df['value'] = np.sin(np.linspace(1, 10, 20))
df.to_csv(self.__input_path)
self.assertRaisesRegexp(Exception, 'Column with name or index "timestamp" not found.',
invoker.invoke,
self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column,
self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode,
self.__output_path)
def test_value_column_missing(self):
df = pd.DataFrame()
timestamps = pd.date_range(start='2020-01-01', periods=20, freq='1D')
df['timestamp'] = timestamps
df['missed'] = np.sin(np.linspace(1, 10, 20))
df.to_csv(self.__input_path)
self.assertRaisesRegexp(Exception, 'Column with name or index "value" not found.',
invoker.invoke,
self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column,
self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode,
self.__output_path)
if __name__ == '__main__':
unittest.main()
|
anomalydetector/aml_component/tests/test_error_input.py/0
|
{
"file_path": "anomalydetector/aml_component/tests/test_error_input.py",
"repo_id": "anomalydetector",
"token_count": 4170
}
| 315 |
"""
Copyright (C) Microsoft Corporation. All rights reserved.
Microsoft Corporation ("Microsoft") grants you a nonexclusive, perpetual,
royalty-free right to use, copy, and modify the software code provided by us
("Software Code"). You may not sublicense the Software Code or any use of it
(except to your affiliates and to vendors to perform work on your behalf)
through distribution, network access, service agreement, lease, rental, or
otherwise. This license does not purport to express any claim of ownership over
data you may have shared with Microsoft in the creation of the Software Code.
Unless applicable law gives you more rights, Microsoft reserves all other
rights not expressly granted herein, whether by implication, estoppel or
otherwise.
THE SOFTWARE CODE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
configs = [()]
def make_layers(Bn=True, input=256):
global configs
layers = []
layer = nn.Conv2d(input, input, kernel_size=1, stride=1, padding=0)
layers.append(layer)
if Bn:
layers.append(nn.BatchNorm2d(input))
for k, s, c in configs:
if c == -1:
layer = nn.Conv2d(kernel_size=k, stride=s, padding=0)
else:
now = []
now.append(nn.Conv1d(input, c, kernel_size=k, stride=s, padding=0))
input = c
if Bn:
now.append(nn.BatchNorm2d(input))
now.append(nn.Relu(inplace=True))
layer = nn.Sequential(*now)
layers.append(layer)
return nn.Sequential(*layers), input
class trynet(nn.Module):
def __init__(self):
super(trynet, self).__init__()
self.layer1 = nn.Conv1d(1, 128, kernel_size=128, stride=0, padding=0)
self.layer2 = nn.BatchNorm1d(128)
self.feature = make_layers()
class Anomaly(nn.Module):
def __init__(self, window=1024):
self.window = window
super(Anomaly, self).__init__()
self.layer1 = nn.Conv1d(window, window, kernel_size=1, stride=1, padding=0)
self.layer2 = nn.Conv1d(window, 2 * window, kernel_size=1, stride=1, padding=0)
self.fc1 = nn.Linear(2 * window, 4 * window)
self.fc2 = nn.Linear(4 * window, window)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = x.view(x.size(0), self.window, 1)
x = self.layer1(x)
x = self.relu(x)
x = self.layer2(x)
x = x.view(x.size(0), -1)
x = self.relu(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return torch.sigmoid(x)
def save_model(model, model_path):
try:
torch.save(model.state_dict(), model_path)
except:
torch.save(model, model_path)
def load_model(model, path):
print("loading %s" % path)
with open(path, 'rb') as f:
pretrained = torch.load(f, map_location=lambda storage, loc: storage)
model_dict = model.state_dict()
pretrained = {k: v for k, v in pretrained.items() if k in model_dict}
model_dict.update(pretrained)
model.load_state_dict(model_dict)
return model
|
anomalydetector/srcnn/net.py/0
|
{
"file_path": "anomalydetector/srcnn/net.py",
"repo_id": "anomalydetector",
"token_count": 1578
}
| 316 |
{
"python.testing.pytestArgs": [
"tests"
],
"python.testing.unittestEnabled": false,
"python.testing.nosetestsEnabled": false,
"python.testing.pytestEnabled": true,
"cmake.configureOnOpen": false
}
|
archai/.vscode/settings.json/0
|
{
"file_path": "archai/.vscode/settings.json",
"repo_id": "archai",
"token_count": 94
}
| 317 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List
from logging import Handler
import os
import time
from overrides import overrides
from threading import Lock
class AtomicFileHandler(Handler):
"""
This class opens and writes entire file instead of appending one line at a time
"""
terminator = '\n'
def __init__(self, filename, encoding=None, save_delay=30.0):
super().__init__()
filename = os.fspath(filename)
self.baseFilename = os.path.abspath(filename)
self.encoding = encoding
self._buffer:List[str] = []
self.mutex = Lock()
self._last_flush = 0.0
self.save_delay = save_delay
@overrides
def flush(self):
self._flush_buffer(force=True)
@overrides
def close(self):
try:
self._flush_buffer(force=True)
finally:
super().close()
def _open(self):
return open(self.baseFilename, 'a', encoding=self.encoding)
def _flush_buffer(self, force=False)->None:
if force or (time.time() - self._last_flush >= self.save_delay):
try:
self.mutex.acquire()
with self._open() as f:
f.writelines(self._buffer)
self._buffer.clear()
self._last_flush = time.time()
finally:
self.mutex.release()
def emit(self, record):
msg = self.format(record)
try:
self.mutex.acquire()
self._buffer.append(msg + self.terminator)
finally:
self.mutex.release()
self._flush_buffer()
|
archai/archai/common/atomic_file_handler.py/0
|
{
"file_path": "archai/archai/common/atomic_file_handler.py",
"repo_id": "archai",
"token_count": 755
}
| 318 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import glob
import sys
import logging
import datetime
import platform
import time
import numpy as np
import re
from torch import Tensor
from azure.data.tables import TableServiceClient, UpdateMode, EntityProperty, EdmType
from azure.storage.blob import BlobClient, ContainerClient
from azure.core.exceptions import ResourceNotFoundError
from shutil import rmtree
CONNECTION_NAME = 'MODEL_STORAGE_CONNECTION_STRING'
class ArchaiStore:
"""ArchaiStore wraps an Azure 'status' Table and associated Blob Storage used to provide a backing
store for models and an associated table for collating status of long running jobs. This is actually a general
purpose utility class that could be used for anything.
The naming scheme is such that each Entity in the table has a 'name' property which is a simple friendly name or a
guid, and this row will have an associated folder in the blob storage container with the same name where models and
other peripheral files can be stored.
The 'status' table supports a locking concept that allows the status table to be used as a way of coordinating jobs
across multiple machines where each machine grabs free work, locks that row until the work is done, uploads new
files, and updates the status to 'complete' then unlocks that row. So this ArchaiStore can be used as the backing
store for a simple distributed job scheduler.
This also has a convenient command line interface provided below.
"""
def __init__(self, storage_account_name, storage_account_key, blob_container_name='models', table_name='status', partition_key='main'):
self.storage_account_key = storage_account_key
self.storage_account_name = storage_account_name
self.storage_connection_string = f'DefaultEndpointsProtocol=https;AccountName={storage_account_name};AccountKey={storage_account_key};EndpointSuffix=core.windows.net'
self.blob_container_name = blob_container_name
self.status_table_name = table_name
self.partition_key = partition_key
self.service = None
self.table_client = None
self.container_client = None
@staticmethod
def parse_connection_string(storage_connection_string):
""" This helper method extracts the storage account name and key pair from a connection string
and returns that pair in a tuple. This pair can then be used to construct an ArchaiStore object """
parts = storage_connection_string.split(";")
storage_account_name = None
storage_account_key = None
for part in parts:
i = part.find('=')
key = part[0:i]
value = part[i + 1:]
if key == "AccountName":
storage_account_name = value
elif key == "AccountKey":
storage_account_key = value
if not storage_account_name:
raise Exception("storage_connection_string is missing AccountName part")
if not storage_account_key:
raise Exception("storage_connection_string is missing AccountKey part")
return (storage_account_name, storage_account_key)
def get_utc_date(self):
""" This handy function can be used to put a UTC timestamp column in your entity, like a 'model_date' column, for example. """
current_date = datetime.datetime.now()
current_date = current_date.replace(tzinfo=datetime.timezone.utc)
return current_date.isoformat()
def _get_node_id(self):
""" Return a unique name for the current machine which is used as the lock identity """
return platform.node()
def _get_status_table_service(self):
logger = logging.getLogger('azure.core.pipeline.policies.http_logging_policy')
logger.setLevel(logging.ERROR)
return TableServiceClient.from_connection_string(conn_str=self.storage_connection_string, logger=logger, logging_enable=False)
def _get_table_client(self):
if not self.table_client:
for i in range(6):
try:
if not self.service:
self.service = self._get_status_table_service()
self.table_client = self.service.create_table_if_not_exists(self.status_table_name)
return self.table_client
except Exception as e:
self.service = None
if i == 5:
raise e
else:
print(f"### error getting table client, sleeping 1 second and trying again: {e}")
time.sleep(1)
return self.table_client
def _get_container_client(self, name):
if not self.container_client:
logger = logging.getLogger('azure.core.pipeline.policies.http_logging_policy')
logger.setLevel(logging.ERROR)
self.container_client = ContainerClient.from_connection_string(
self.storage_connection_string,
container_name=name,
logger=logger,
logging_enable=False)
if not self.container_client.exists():
self.container_client.create_container()
return self.container_client
def _get_blob_client(self, name):
container = self._get_container_client(self.blob_container_name) # make sure container exists.
return BlobClient.from_connection_string(self.storage_connection_string, container_name=container.container_name, blob_name=name)
def _retry_table_operation(self, function, label, retries=5, expected=[]):
for i in range(retries + 1):
try:
self._get_table_client()
result = function()
return result
except Exception as e:
if "Bad Request" in str(e):
raise e
for t in expected:
if isinstance(e, t):
return None
print(f"error {label}: {e}")
if i == 5:
raise e
time.sleep(1)
print("trying again in 1 second")
self.table_client = None
self.service = None
def _query(self, query):
entities = []
for e in self.table_client.query_entities(query_filter=query):
entities += [e]
return entities
def get_all_status_entities(self, status=None, not_equal=False):
""" Get all status entities with optional status column filter.
For example, pass "status=complete" to find all status rows that
have the status "complete". Pass not_equal of True if you want
to check the status is not equal to the given value.
"""
query = f"PartitionKey eq '{self.partition_key}'"
if status:
if not_equal:
query += f" and status ne '{status}'"
else:
query += f" and status eq '{status}'"
results = self._retry_table_operation(lambda: self._query(query),
label='reading table')
unwrapped = []
for e in results:
unwrapped += [self._unwrap_numeric_types(e)]
return unwrapped
def get_status(self, name):
""" Get or create a new status entity with the given name.
The returned entity is a python dictionary where the name can be retrieved
using e['name'], you can then add keys to that dictionary and call update_status_entity. """
entity = self._retry_table_operation(lambda: self.table_client.get_entity(partition_key=self.partition_key, row_key=name),
label='reading entity',
expected=[ResourceNotFoundError])
if entity is None:
entity = {
'PartitionKey': self.partition_key,
'RowKey': name,
'name': name,
'status': 'new'
}
self.update_status_entity(entity)
return self._unwrap_numeric_types(entity)
def _wrap_numeric_types(self, entity):
e = {}
for k in entity.keys():
v = entity[k]
if isinstance(v, bool):
e[k] = v
elif isinstance(v, int):
e[k] = EntityProperty(v, EdmType.INT64)
elif isinstance(v, float):
e[k] = float(v) # this is casting np.float to float.
else:
e[k] = v
return e
def _unwrap_numeric_types(self, entity):
e = {}
for k in entity.keys():
v = entity[k]
if isinstance(v, EntityProperty):
e[k] = v.value
else:
e[k] = v
return e
def get_existing_status(self, name):
""" Find the given entity by name, and return it, or return None if the name is not found."""
entity = self._retry_table_operation(lambda: self.table_client.get_entity(partition_key=self.partition_key, row_key=name),
label='reading entity',
expected=[ResourceNotFoundError])
if entity is not None:
return self._unwrap_numeric_types(entity)
return None
def get_updated_status(self, e):
""" Return an updated version of the entity by querying the table again, this way you
can pick up any changes that another process may have made. """
return self.get_existing_status(e['RowKey'])
def update_status_entity(self, entity):
""" This method replaces everything in the entity store with what you have here.
The entity can store strings, bool, float, int, datetime, so anything like a python list
is best serialized using json.dumps and stored as a string, the you can use json.loads to
parse it later. """
entity = self._wrap_numeric_types(entity)
self._retry_table_operation(lambda: self.table_client.upsert_entity(entity=entity, mode=UpdateMode.REPLACE),
label='update entity')
def merge_status_entity(self, entity):
""" This method merges everything in the entity store with what you have here. So you can
add a property without clobbering any other new properties other processes have added in
parallel. Merge cannot delete properties, for that you have to use update_status_entity.
The entity can store strings, bool, float, int, datetime, so anything like a python list
is best serialized using json.dumps and stored as a string, the you can use json.loads to
parse it later."""
entity = self._wrap_numeric_types(entity)
self._retry_table_operation(lambda: self.table_client.update_entity(entity=entity, mode=UpdateMode.MERGE),
label='merge entity')
def update_status(self, name, status, priority=None):
""" This is a simple wrapper that gets the entity by name, and updates the status field.
If you already have the entity then use update_status_entity."""
entity = self.get_existing_status(name)
if entity is None:
entity = self.get_status(name)
entity['status'] = status
if priority:
entity['priority'] = priority
self.merge_status_entity(entity)
return entity
def delete_status(self, name):
""" Delete the status entry with this name, note this does not delete any associated blobs.
See delete_blobs for that. """
e = self.get_existing_status(name)
if e is not None:
self.delete_status_entity(e)
def delete_status_entity(self, e):
""" Delete the status entry with this name, note this does not delete any associated blobs.
See delete_blobs for that. """
self._retry_table_operation(lambda: self.table_client.delete_entity(e),
label='deleting status',
expected=[ResourceNotFoundError])
def upload_blob(self, folder_name, file, blob_name=None):
""" Upload the given file to the blob store, under the given folder name.
The folder name could have multiple parts like 'project/experiment/foo'.
By default the blob will use the base file name, but you can override
that with the given blob_name if you want to. """
filename = os.path.basename(file)
if blob_name:
blob = f"{folder_name}/{blob_name}"
else:
blob = f"{folder_name}/{filename}"
blob_client = self._get_blob_client(blob)
with open(file, "rb") as data:
blob_client.upload_blob(data, overwrite=True)
def lock(self, name, status):
""" Lock the named entity to this computer identified by platform.node()
and set the status to the given status. This way you can use this ArchaiStore as
a way of coordinating the parallel executing of a number of jobs, where each long
running job is allocated to a particular node in a distributed cluster using this
locking mechanism. Be sure to call unlock when done, preferably in a try/finally block. """
e = self.get_existing_status(name)
if e is None:
e = self.get_status(name)
return self.lock_entity(e, status)
def lock_entity(self, e, status):
""" Lock the given entity to this computer identified by platform.node()
and set the status to the given status. This way you can use this ArchaiStore as
a way of coordinating the parallel executing of a number of jobs, where each long
running job is allocated to a particular node in a distributed cluster using this
locking mechanism. Be sure to call unlock when done, preferably in a try/finally block. """
node_id = self._get_node_id()
name = e['name']
if self.is_locked_by_other(name):
print(f"The model {name} is locked by {e['node']}")
return None
e['status'] = status
e['node'] = node_id # lock the row until upload complete
self.merge_status_entity(e)
return e
def is_locked(self, name):
""" Return true if the entity exists and is locked by anyone (including this computer). """
e = self.get_existing_status(name)
if e is None:
return False
return 'node' in e
def is_locked_by_self(self, name):
""" Return true if the entity exists and is locked this computer. This is handy if the
computer restarts and wants to continue processing rows it has already claimed. """
e = self.get_existing_status(name)
if e is None:
return False
node_id = self._get_node_id()
return 'node' in e and e['node'] == node_id
def is_locked_by_other(self, name):
""" Return true if the entity exists and is locked some other computer. This will tell
the local computer not to touch this row of the table as someone else has it. """
e = self.get_existing_status(name)
if e is None:
return False
node_id = self._get_node_id()
return 'node' in e and e['node'] and e['node'] != node_id
def unlock(self, name):
""" Unlock the entity (regardless of who owns it - so use carefully, preferably only
when is_locked_by_self is true). """
e = self.get_status(name)
self.unlock_entity(e)
return e
def unlock_entity(self, e):
""" Unlock the entity (regardless of who owns it - so use carefully, preferably only
when is_locked_by_self is true). """
if 'node' in e:
del e['node']
self.update_status_entity(e)
else:
self.merge_status_entity(e)
return e
def get_lock(self, entity):
""" Find out what computer has the entity locked. """
if 'node' in entity and entity['node']:
return entity['node']
return None
def unlock_all(self, node_name):
""" This is a sledge hammer for unlocking all entities, use carefully.
This might be necessary if you are moving everything to a new cluster. """
for e in self.get_all_status_entities():
name = e['name']
node = e['node'] if 'node' in e else None
changed = False
if 'node' in e:
if node_name and node_name != node:
continue
e['node'] = ''
changed = True
if changed:
print(f"Unlocking job {name} on node {node}")
self.merge_status_entity(e)
def reset(self, name, except_list=[]):
""" This resets all properties on the given entity that are not primary keys,
'name' or 'status' and are not in the given except_list.
This will not touch a node that is locked by another computer. """
e = self.get_existing_status(name)
if not e:
print(f"Entity {name} not found")
else:
self._reset(e, except_list)
def _reset(self, e, except_list=[]):
name = e['name']
if self.is_locked_by_other(name):
node = self.get_lock(e)
print(f"Skipping {e['RowKey']} as it is locked by {node}")
elif self._reset_metrics(e, except_list):
e['status'] = 'reset'
print(f"Resetting entity {e['RowKey']}")
self.update_status_entity(e)
def reset_all(self, name):
""" This resets all properties on all entities that are not locked by another. """
for e in self.get_all_status_entities():
self._reset(e)
def _reset_metrics(self, entity, except_list=[]):
# now clear all data to force a full re-run of everything.
modified = False
for key in list(entity.keys()):
if key != 'PartitionKey' and key != 'RowKey' and key != 'name' and key != 'status' and key != 'node' and key not in except_list:
del entity[key]
modified = True
return modified
def upload(self, name, path, reset, priority=0, **kwargs):
""" Upload a file to the named folder in the blob store associated with this ArchaiStore and
add the given named status row in our status table. It also locks the row with 'uploading'
status until the upload is complete which ensures another machine does not try
processing work until the upload is finished. The path points to a file or a folder.
If a folder it uploads everything in that folder. This can also optionally reset
the row, since sometimes you want to upload a new model for training, then reset
all the metrics computed on the previous model. The optional priority is just a
added as a property on the row which can be used by a distributed job scheduler to
prioritize the work that is being queued up in this table. """
if not name:
raise Exception('Entity name is missing')
if '/' in name:
raise Exception('Entity name cannot contain a slash')
e = self.get_status(name)
e = self.lock(name, 'uploading')
if not e:
return
e['priority'] = priority
self.merge_status_entity(e)
try:
to_upload = []
if os.path.isdir(path):
to_upload = [os.path.join(path, f) for f in os.listdir(path)]
elif os.path.isfile(path):
to_upload = [path]
else:
raise Exception(f'Path not found: {path}')
for f in to_upload:
# upload the file
print(f'Uploading file: {f} to blob: {name}')
self.upload_blob(name, f)
except Exception as ex:
print(f'### upload failed: {ex}')
# record uploaded status and unlock it.
if reset:
self._reset_metrics(e)
e['status'] = 'uploaded'
for k in kwargs:
e[k] = kwargs[k]
self.unlock_entity(e)
def batch_upload(self, path, glob_pattern='*.onnx', override=False, reset=False, priority=0, **kwargs):
""" Upload all the matching files in the given path to the blob store
where the status table 'name' will be the base name of the files found
by the given non-recursive glob_pattern.
"""
if not os.path.isdir(path):
raise Exception(f'Path is not a directory: {path}')
models = glob.glob(os.path.join(path, glob_pattern))
if len(models) == 0:
print(f"No *.onnx models found in {path}")
for file in models:
name = os.path.splitext(os.path.basename(file))[0]
if override or not self.get_existing_status(name):
self.upload(name, file, reset, priority, **kwargs)
else:
print(f"Skipping {name} as it already exists")
def download(self, name, folder, specific_file=None):
""" Download files from the given folder name from our associated blob container
and return a list of the local paths to all downloaded files. If an optional specific_file is
given then it tries to find and download that file only. Returns a list of local files created.
The specific_file can be a regular expression like '*.onnx'. """
container = self._get_container_client(self.blob_container_name)
if not container.exists():
return []
if not os.path.isdir(folder):
os.makedirs(folder)
local_file = None
prefix = f'{name}/'
downloaded = []
if specific_file:
specific_file_re = re.compile(specific_file)
for blob in container.list_blobs(name_starts_with=prefix):
file_name = blob.name[len(prefix):]
download = False
if specific_file:
if not specific_file_re.match(file_name):
continue
else:
download = True
local_file = os.path.join(folder, file_name)
else:
download = True
local_file = os.path.join(folder, file_name)
if download:
local_file = os.path.realpath(local_file)
dir = os.path.dirname(local_file)
if os.path.isfile(dir):
os.unlink(dir)
elif os.path.isdir(local_file):
rmtree(local_file)
os.makedirs(dir, exist_ok=True)
blob_client = container.get_blob_client(blob)
try:
with open(local_file, 'wb') as f:
data = blob_client.download_blob()
f.write(data.readall())
downloaded += [local_file]
except Exception as e:
print(f"### Error downloading blob '{blob}' to local file: {e}")
return downloaded
def delete_blobs(self, name, specific_file=None):
""" Delete all the blobs associated with the given entity name. """
container = self._get_container_client(self.blob_container_name)
prefix = f'{name}/'
for blob in container.list_blobs(name_starts_with=prefix):
file_name = blob.name[len(prefix):]
if specific_file and file_name != specific_file:
continue
container.delete_blob(blob)
def list_blobs(self, prefix=None):
""" List all the blobs associated with the given prefix. """
container = self._get_container_client(self.blob_container_name)
return [blob.name for blob in container.list_blobs(name_starts_with=prefix)]
def print_entities(self, entities, columns=None):
keys = []
for e in entities:
for k in e:
if k not in keys and k != 'PartitionKey' and k != 'RowKey':
if columns is None or k in columns:
keys += [k]
# so we can convert to .csv format
print(", ".join(keys))
for e in entities:
for k in keys:
if k in e:
x = e[k]
if isinstance(x, EntityProperty) and x.edm_type is EdmType.INT64:
x = x.value
v = str(x).replace(',', ' ').replace('\r\n', ' ').replace('\n', ' ').replace('\r', ' ')
print(f"{v}", end='')
print(', ', end='')
print()
def status(con_str, args):
parser = argparse.ArgumentParser(description='Print status in .csv format')
parser.add_argument('--status', help='Optional match for the status column (default None).')
parser.add_argument('--name', help='Optional name of single status row to return (default None).')
parser.add_argument('--not_equal', '-ne', help='Switch the match to a not-equal comparison.', action="store_true")
parser.add_argument('--locked', help='Find entities that are locked by a node.', action="store_true")
parser.add_argument('--cols', help='Comma separated list of columns to report (default is to print all)')
parser.add_argument('--table', help='Table name to use (default "status")', default='status')
args = parser.parse_args(args)
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name=args.table)
if args.name is not None:
e = store.get_existing_status(args.name)
if e is None:
entities = []
else:
entities = [e]
else:
entities = store.get_all_status_entities(args.status, args.not_equal)
if args.locked:
entities = [e for e in entities if 'node' in e and e['node']]
if args.name:
entities = [e for e in entities if 'name' in e and e['name'] == args.name]
columns = None
if args.cols:
columns = [x.strip() for x in args.cols.split(',')]
store.print_entities(entities, columns)
def upload(con_str, args):
parser = argparse.ArgumentParser(description='Upload a named model (and optional accompanying files) to azure blob store')
parser.add_argument('name', help='Friendly name of the folder to put this in.')
parser.add_argument('file', help='Path to the file to upload to Azure ' +
'or a folder to upload all files in that folder to the same azure blob folder.')
parser.add_argument('--priority', type=int, help='Optional priority override for this job. ' +
'Larger numbers mean lower priority')
parser.add_argument('--reset', help='Reset stats for the model if it exists already.', action="store_true")
parser.add_argument('--table', help='Table name to use (default "status")', default='status')
args = parser.parse_args(args)
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name=args.table)
store.upload(args.name, args.file, args.reset, priority=args.priority)
def batch_upload(con_str, args):
parser = argparse.ArgumentParser(description='Upload a a set of *.onnx files to the azure blob store' +
'using the file name of each onnx file as the friendly folder name in the store.')
parser.add_argument('path', help='Path to the folder containing onnx files to upload to Azure ')
parser.add_argument('--override', help='Allow overriding existing models in the store.', action="store_true")
parser.add_argument('--reset', help='Reset stats for any models we are overriding.', action="store_true")
parser.add_argument('--priority', type=int, help='Optional priority override for these jobs. ' +
'Larger numbers mean lower priority')
parser.add_argument('--table', help='Table name to use (default "status")', default='status')
args = parser.parse_args(args)
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name=args.table)
store.batch_upload(args.path, args.override, args.reset, priority=args.priority)
def download(con_str, args):
parser = argparse.ArgumentParser(
description="Download assets from azure blob store using friendly name.")
parser.add_argument('--name', help='Friendly name of model to download (if not provided it downloads them all')
parser.add_argument('--file', help='The optional name of the files to download instead of getting them all.')
parser.add_argument('--table', help='Table name to use (default "status")', default='status')
args = parser.parse_args(args)
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name=args.table)
friendly_name = args.name
if not friendly_name:
friendly_names = [e['name'] for e in store.get_all_status_entities()]
else:
friendly_names = [friendly_name]
specific_file = args.file
for friendly_name in friendly_names:
downloaded = store.download(friendly_name, friendly_name, specific_file)
if len(downloaded) == 0 and specific_file:
print(f"file {specific_file} not found")
def delete(con_str, args):
parser = argparse.ArgumentParser(description='Delete a model from azure using its friendly name')
parser.add_argument('name', help='The friendly name allocated by the upload script.')
parser.add_argument('--file', help='Delete just the one file associated with the friendly name.')
parser.add_argument('--table', help='Table name to use (default "status")', default='status')
args = parser.parse_args(args)
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name=args.table)
store.delete_blobs(args.name, args.file)
if not args.file:
store.delete_status(args.name)
def reset(con_str, args):
parser = argparse.ArgumentParser(
description='Reset the named entity.')
parser.add_argument('name', help='The friendly name to reset or "*" to reset all rows', default=None)
parser.add_argument('--table', help='Table name to use (default "status")', default='status')
args = parser.parse_args(args)
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name=args.table)
if args.name == "*":
store.reset_all()
else:
store.reset(args.name)
def unlock(con_str, args):
parser = argparse.ArgumentParser(
description='Unlock all jobs for given node or unlock all jobs.')
parser.add_argument('--node', help='Optional node name (default None).')
parser.add_argument('--table', help='Table name to use (default "status")', default='status')
args = parser.parse_args(args)
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name=args.table)
store.unlock_all(args.node)
def lock(con_str, args):
parser = argparse.ArgumentParser(
description='Lock the named entity.')
parser.add_argument('name', help='The name of the entity to lock')
parser.add_argument('--status', help='The new status of the entity (default "busy")', default='busy')
parser.add_argument('--table', help='Table name to use (default "status")', default='status')
args = parser.parse_args(args)
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name=args.table)
store.lock(args.name, args.status)
if __name__ == '__main__':
con_str = os.getenv(CONNECTION_NAME)
if not con_str:
print(f"Please specify your {CONNECTION_NAME} environment variable.")
sys.exit(1)
if len(sys.argv) <= 1:
print("Expecting a command, one of 'status', 'upload', 'batch_upload', 'delete', 'download', 'reset', 'unlock'")
sys.exit(1)
cmd = sys.argv[1]
args = sys.argv[2:]
if cmd == 'status':
status(con_str, args)
elif cmd == 'upload':
upload(con_str, args)
elif cmd == 'batch_upload':
batch_upload(con_str, args)
elif cmd == 'download':
download(con_str, args)
elif cmd == 'delete':
delete(con_str, args)
elif cmd == 'reset':
reset(con_str, args)
elif cmd == 'unlock':
unlock(con_str, args)
elif cmd == 'lock':
lock(con_str, args)
else:
print(f"Unknown command: {cmd}, expecting one of status, upload, download, delete, lock, unlock")
sys.exit(1)
|
archai/archai/common/store.py/0
|
{
"file_path": "archai/archai/common/store.py",
"repo_id": "archai",
"token_count": 13684
}
| 319 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, Optional
from overrides import overrides
from torch.utils.data import Dataset
from torchvision.datasets import KMNIST, MNIST, QMNIST, FashionMNIST
from torchvision.transforms import ToTensor
from archai.api.dataset_provider import DatasetProvider
from archai.common.ordered_dict_logger import OrderedDictLogger
logger = OrderedDictLogger(source=__name__)
class MnistDatasetProvider(DatasetProvider):
"""MNIST-based dataset provider."""
SUPPORTED_DATASETS = {
"fashion_mnist": FashionMNIST,
"kmnist": KMNIST,
"mnist": MNIST,
"qmnist": QMNIST,
}
def __init__(
self,
dataset: Optional[str] = "mnist",
root: Optional[str] = "dataroot",
) -> None:
"""Initialize MNIST-based dataset provider.
Args:
dataset: Name of dataset.
root: Root directory of dataset where is saved.
"""
super().__init__()
assert dataset in self.SUPPORTED_DATASETS, f"`dataset` should be one of: {list(self.SUPPORTED_DATASETS)}"
self.dataset = dataset
self.root = root
@overrides
def get_train_dataset(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
return self.SUPPORTED_DATASETS[self.dataset](
self.root,
train=True,
download=True,
transform=transform or ToTensor(),
target_transform=target_transform,
)
@overrides
def get_val_dataset(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
return self.SUPPORTED_DATASETS[self.dataset](
self.root,
train=False,
download=True,
transform=transform or ToTensor(),
target_transform=target_transform,
)
@overrides
def get_test_dataset(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
logger.warn(f"Testing set not available for `{self.dataset}`. Returning validation set ...")
return self.get_val_dataset(transform=transform, target_transform=target_transform)
|
archai/archai/datasets/cv/mnist_dataset_provider.py/0
|
{
"file_path": "archai/archai/datasets/cv/mnist_dataset_provider.py",
"repo_id": "archai",
"token_count": 1031
}
| 320 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Optional
from overrides import overrides
from archai.api.dataset_provider import DatasetProvider
from archai.common.distributed_utils import sync_workers
from archai.datasets.nlp.nvidia_dataset_provider_utils import Corpus
class NvidiaDatasetProvider(DatasetProvider):
"""NVIDIA dataset provider."""
def __init__(
self,
dataset_name: Optional[str] = "wt103",
dataset_dir: Optional[str] = "",
cache_dir: Optional[str] = "cache",
vocab_type: Optional[str] = "gpt2",
vocab_size: Optional[int] = None,
refresh_cache: Optional[bool] = False,
) -> None:
"""Initialize NVIDIA dataset provider.
Args:
dataset_name: Name of the dataset.
dataset_dir: Dataset folder.
cache_dir: Path to the cache folder.
vocab_type: Type of vocabulary/tokenizer.
vocab_size: Vocabulary size.
refresh_cache: Whether cache should be refreshed.
"""
super().__init__()
self.corpus = Corpus(
dataset_name, dataset_dir, cache_dir, vocab_type, vocab_size=vocab_size, refresh_cache=refresh_cache
)
if not self.corpus.load():
self.corpus.train_and_encode()
with sync_workers() as rank:
if rank == 0 and dataset_name != "lm1b":
self.corpus.save_cache()
@overrides
def get_train_dataset(self) -> List[int]:
return self.corpus.train
@overrides
def get_val_dataset(self) -> List[int]:
return self.corpus.valid
@overrides
def get_test_dataset(self) -> List[int]:
return self.corpus.test
|
archai/archai/datasets/nlp/nvidia_dataset_provider.py/0
|
{
"file_path": "archai/archai/datasets/nlp/nvidia_dataset_provider.py",
"repo_id": "archai",
"token_count": 773
}
| 321 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from archai.api.dataset_provider import DatasetProvider
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.model_evaluator import ModelEvaluator, AsyncModelEvaluator
from archai.discrete_search.api.predictor import MeanVar, Predictor
from archai.discrete_search.api.search_objectives import SearchObjectives
from archai.discrete_search.api.searcher import Searcher
from archai.discrete_search.api.search_space import (
DiscreteSearchSpace, EvolutionarySearchSpace,
BayesOptSearchSpace
)
__all__ = [
'DatasetProvider', 'ArchaiModel', 'ModelEvaluator', 'AsyncModelEvaluator', 'MeanVar',
'Predictor', 'SearchObjectives', 'Searcher', 'DiscreteSearchSpace',
'EvolutionarySearchSpace', 'BayesOptSearchSpace'
]
|
archai/archai/discrete_search/api/__init__.py/0
|
{
"file_path": "archai/archai/discrete_search/api/__init__.py",
"repo_id": "archai",
"token_count": 275
}
| 322 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Any, Dict, List, Optional, Tuple, Union
import onnxruntime as rt
import torch
from overrides import overrides
from archai.common.timing import MeasureBlockTime
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.model_evaluator import ModelEvaluator
from archai.common.file_utils import TemporaryFiles
class AvgOnnxLatency(ModelEvaluator):
"""Evaluate the average ONNX Latency (in seconds) of an architecture.
The latency is measured by running the model on random inputs and averaging the latency over
`num_trials` trials.
"""
def __init__(
self,
input_shape: Union[Tuple[int, ...], List[Tuple[int, ...]]],
num_trials: Optional[int] = 1,
input_dtype: Optional[str] = "torch.FloatTensor",
rand_range: Optional[Tuple[float, float]] = (0.0, 1.0),
export_kwargs: Optional[Dict[str, Any]] = None,
device: Optional[str] = 'cpu',
inf_session_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
"""Initialize the evaluator.
Args:
input_shape: Input shape(s) of the model. If a list of shapes is provided,
the model is assumed to have multiple inputs.
num_trials: Number of trials to run.
input_dtype: Data type of the input.
rand_range: Range of random values to use for the input.
export_kwargs: Keyword arguments to pass to `torch.onnx.export`.
inf_session_kwargs: Keyword arguments to pass to `onnxruntime.InferenceSession`.
"""
input_shapes = [input_shape] if isinstance(input_shape, tuple) else input_shape
rand_min, rand_max = rand_range
self.sample_input = tuple(
[
((rand_max - rand_min) * torch.rand(*input_shape) + rand_min).type(input_dtype)
for input_shape in input_shapes
]
)
self.input_dtype = input_dtype
self.rand_range = rand_range
self.num_trials = num_trials
self.export_kwargs = export_kwargs or dict()
self.inf_session_kwargs = inf_session_kwargs or dict()
self.device = device
@overrides
def evaluate(self, model: ArchaiModel, budget: Optional[float] = None) -> float:
model.arch.to("cpu")
# Exports model to ONNX
with TemporaryFiles() as tmp_file:
onnx_file = tmp_file.get_temp_file()
torch.onnx.export(
model.arch,
self.sample_input,
onnx_file,
input_names=[f"input_{i}" for i in range(len(self.sample_input))],
**self.export_kwargs,
)
# Benchmarks ONNX model
onnx_device = "CUDAExecutionProvider" if self.device == 'gpu' else "CPUExecutionProvider"
onnx_session = rt.InferenceSession(onnx_file, providers=[onnx_device], **self.inf_session_kwargs)
sample_input = {f"input_{i}": inp.numpy() for i, inp in enumerate(self.sample_input)}
inf_times = []
for _ in range(self.num_trials):
with MeasureBlockTime("onnx_inference") as t:
onnx_session.run(None, input_feed=sample_input)
inf_times.append(t.elapsed)
return sum(inf_times) / self.num_trials
|
archai/archai/discrete_search/evaluators/onnx_model.py/0
|
{
"file_path": "archai/archai/discrete_search/evaluators/onnx_model.py",
"repo_id": "archai",
"token_count": 1514
}
| 323 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import itertools
from collections import OrderedDict
from copy import deepcopy
from functools import reduce
from random import Random
from typing import Any, Dict, List, Optional, Tuple
from archai.discrete_search.search_spaces.config.arch_config import (
ArchConfig,
build_arch_config,
)
from archai.discrete_search.search_spaces.config.discrete_choice import DiscreteChoice
from archai.discrete_search.search_spaces.config.utils import flatten_dict, replace_ptree_choices, order_dict_keys
class ArchParamTree:
"""Tree of architecture parameters."""
def __init__(self, config_tree: Dict[str, Any]) -> None:
"""Initialize the class.
Args:
config_tree: Tree of architecture parameters.
"""
self.config_tree = deepcopy(config_tree)
self.params, self.constants = self._init_tree(config_tree)
@property
def num_archs(self) -> int:
"""Return the number of architectures in the search space."""
param_dict = self.to_dict(flatten=True, deduplicate_params=True, remove_constants=True)
num_options = [float(len(p.choices)) for p in param_dict.values()]
return reduce(lambda a, b: a*b, num_options, 1)
def _init_tree(self, config_tree: Dict[str, Any]) -> Tuple[OrderedDict, OrderedDict]:
params, constants = OrderedDict(), OrderedDict()
for param_name, param in config_tree.items():
if isinstance(param, DiscreteChoice):
params[param_name] = param
elif isinstance(param, dict):
params[param_name] = ArchParamTree(param)
else:
constants[param_name] = param
return params, constants
def _to_dict(
self, prefix: str, flatten: bool, dedup_param_ids: Optional[set] = None, remove_constants: Optional[bool] = True
) -> OrderedDict:
prefix = f"{prefix}." if prefix else prefix
output_dict = OrderedDict()
# if not `remove_constants`, initializes the output
# dictionary with constants first
if not remove_constants:
output_dict = OrderedDict(
[
(prefix + c_name if flatten else c_name, c_value)
for c_name, c_value in deepcopy(self.constants).items()
]
)
# Adds architecture parameters to the output dictionary
for param_name, param in self.params.items():
param_name = prefix + str(param_name) if flatten else str(param_name)
if isinstance(param, ArchParamTree):
param_dict = param._to_dict(param_name, flatten, dedup_param_ids, remove_constants)
if flatten:
output_dict.update(param_dict)
else:
output_dict[param_name] = param_dict
elif isinstance(param, DiscreteChoice):
if dedup_param_ids is None:
output_dict[param_name] = param
elif id(param) not in dedup_param_ids:
output_dict[param_name] = param
dedup_param_ids.add(id(param))
return output_dict
def to_dict(
self,
flatten: Optional[bool] = False,
deduplicate_params: Optional[bool] = False,
remove_constants: Optional[bool] = False,
) -> OrderedDict:
"""Convert the `ArchParamTree` to an ordered dictionary.
Args:
flatten: If the output dictionary should be flattened.
deduplicate_params: Removes duplicate architecture parameters.
remove_constants: Removes attributes that are not architecture params from
the output dictionary.
Returns:
Ordered dictionary of architecture parameters.
"""
return self._to_dict("", flatten, set() if deduplicate_params else None, remove_constants)
def sample_config(self, rng: Optional[Random] = None) -> ArchConfig:
"""Sample an architecture config from the search param tree.
Args:
rng: Random number generator used during sampling. If set to `None`,
`random.Random()` is used.
Returns:
Sampled architecture config.
"""
rng = rng or Random()
choices_dict = replace_ptree_choices(self.to_dict(), lambda x: x.random_sample(rng))
return build_arch_config(choices_dict)
def get_param_name_list(self) -> List[str]:
"""Get list of parameter names in the search space.
Returns:
List of parameter names.
"""
param_dict = self.to_dict(flatten=True, deduplicate_params=True, remove_constants=True)
return list(param_dict.keys())
def encode_config(self, config: ArchConfig, track_unused_params: Optional[bool] = True) -> List[float]:
"""Encode an `ArchConfig` object into a fixed-length vector of features.
This method should be used after the model object is created.
Args:
config: Architecture configuration.
track_unused_params: If `track_unused_params=True`, parameters not used during
model creation (by calling `config.pick`) will be represented as `float("NaN")`.
Returns:
List of features.
"""
deduped_features = self.to_dict(flatten=True, deduplicate_params=True, remove_constants=True)
flat_config = flatten_dict(config._config_dict)
flat_used_params = flatten_dict(config.get_used_params())
# Reorder `flat_config` and `flat_used_params` to follow the order of `deduped_features`
flat_config = order_dict_keys(deduped_features, flat_config)
flat_used_params = order_dict_keys(deduped_features, flat_used_params)
# Build feature array
features = OrderedDict([
(k, deduped_features[k].encode(v))
for k, v in flat_config.items()
if k in deduped_features
])
# Replaces unused params with NaNs if necessary
if track_unused_params:
for feature_name, enc_param in features.items():
if not flat_used_params[feature_name]:
features[feature_name] = [float("NaN") for _ in enc_param]
# Flattens the feature array
return list(itertools.chain(*features.values()))
|
archai/archai/discrete_search/search_spaces/config/arch_param_tree.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/config/arch_param_tree.py",
"repo_id": "archai",
"token_count": 2712
}
| 324 |
'''
Adapted from https://github.com/ctlllll/SGConv
'''
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import PretrainedConfig
from einops import rearrange
import opt_einsum as oe
from archai.discrete_search.search_spaces.config import ArchConfig
from ..utils import get_optim_flag
try:
from .fftconv_ import fftconv_func
except ImportError:
fftconv_func = None
optimized = True
if optimized:
contract = oe.contract
else:
contract = torch.einsum
def get_initializer(name, activation=None):
if activation in [ None, 'id', 'identity', 'linear', 'modrelu' ]:
nonlinearity = 'linear'
elif activation in ['relu', 'tanh', 'sigmoid']:
nonlinearity = activation
elif activation in ['gelu', 'swish', 'silu']:
nonlinearity = 'relu' # Close to ReLU so approximate with ReLU's gain
else:
raise NotImplementedError(f"get_initializer: activation {activation} not supported")
if name == 'uniform':
initializer = partial(torch.nn.init.kaiming_uniform_, nonlinearity=nonlinearity)
elif name == 'normal':
initializer = partial(torch.nn.init.kaiming_normal_, nonlinearity=nonlinearity)
elif name == 'xavier':
initializer = torch.nn.init.xavier_normal_
elif name == 'zero':
initializer = partial(torch.nn.init.constant_, val=0)
elif name == 'one':
initializer = partial(torch.nn.init.constant_, val=1)
else:
raise NotImplementedError(f"get_initializer: initializer type {name} not supported")
return initializer
class modrelu(nn.Module):
def __init__(self, features):
# For now we just support square layers
super(modrelu, self).__init__()
self.features = features
self.b = nn.Parameter(torch.Tensor(self.features))
self.reset_parameters()
def reset_parameters(self):
self.b.data.uniform_(-0.01, 0.01)
def forward(self, inputs):
norm = torch.abs(inputs)
biased_norm = norm + self.b
magnitude = nn.functional.relu(biased_norm)
phase = torch.sign(inputs)
return phase * magnitude
class Modrelu(modrelu):
def reset_parameters(self):
self.b.data.uniform_(-0.01, 0.01)
class TransposedLinear(nn.Module):
""" Linear module on the second-to-last dimension
Assumes shape (B, D, L), where L can be 1 or more axis
"""
def __init__(self, d_input, d_output, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.empty(d_output, d_input))
# nn.Linear default init
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
# nn.init.kaiming_uniform_(self.weight, nonlinearity='linear') # should be equivalent
if bias:
self.bias = nn.Parameter(torch.empty(d_output))
bound = 1 / math.sqrt(d_input)
nn.init.uniform_(self.bias, -bound, bound)
setattr(self.bias, "_optim", {"weight_decay": 0.0})
else:
self.bias = 0.0
def forward(self, x):
num_axis = len(x.shape[2:]) # num_axis in L, for broadcasting bias
y = contract('b u ..., v u -> b v ...', x, self.weight) + \
self.bias.view(-1, *[1]*num_axis)
return y
class TransposedLN(nn.Module):
""" LayerNorm module over second dimension
Assumes shape (B, D, L), where L can be 1 or more axis
This is slow and a dedicated CUDA/Triton implementation shuld provide substantial end-to-end speedup
"""
def __init__(self, d, scalar=True):
super().__init__()
self.scalar = scalar
if self.scalar:
self.m = nn.Parameter(torch.zeros(1))
self.s = nn.Parameter(torch.ones(1))
setattr(self.m, "_optim", {"weight_decay": 0.0})
setattr(self.s, "_optim", {"weight_decay": 0.0})
else:
self.ln = nn.LayerNorm(d)
def forward(self, x):
if self.scalar:
# calc. stats over D dim / channels
s, m = torch.std_mean(x, dim=1, unbiased=False, keepdim=True)
y = (self.s/s) * (x-m+self.m)
else:
# move channel to last axis, apply layer_norm, then move channel back to second axis
_x = self.ln(rearrange(x, 'b d ... -> b ... d'))
y = rearrange(_x, 'b ... d -> b d ...')
return y
def Activation(activation=None, size=None, dim=-1):
if activation in [None, 'id', 'identity', 'linear']:
return nn.Identity()
elif activation == 'tanh':
return nn.Tanh()
elif activation == 'relu':
return nn.ReLU()
elif activation == 'gelu':
return nn.GELU(approximate='none')
elif activation in ['swish', 'silu']:
return nn.SiLU()
elif activation == 'glu':
return nn.GLU(dim=dim)
elif activation == 'sigmoid':
return nn.Sigmoid()
elif activation == 'modrelu':
return Modrelu(size)
elif activation == 'ln':
return TransposedLN(dim)
else:
raise NotImplementedError(
"hidden activation '{}' is not implemented".format(activation))
def LinearActivation(
d_input, d_output, bias=True,
zero_bias_init=False,
transposed=False,
initializer=None,
activation=None,
activate=False, # Apply activation as part of this module
weight_norm=False,
**kwargs,
):
""" Returns a linear nn.Module with control over axes order, initialization, and activation """
# Construct core module
# linear_cls = partial(nn.Conv1d, kernel_size=1) if transposed else nn.Linear
linear_cls = TransposedLinear if transposed else nn.Linear
if activation == 'glu':
d_output *= 2
linear = linear_cls(d_input, d_output, bias=bias, **kwargs)
# Initialize weight
if initializer is not None:
get_initializer(initializer, activation)(linear.weight)
# Initialize bias
if bias and zero_bias_init:
nn.init.zeros_(linear.bias)
# Weight norm
if weight_norm:
linear = nn.utils.weight_norm(linear)
if activate and activation is not None:
activation = Activation(activation, d_output,
dim=1 if transposed else -1)
linear = nn.Sequential(linear, activation)
return linear
class Normalization(nn.Module):
def __init__(
self,
d,
transposed=False, # Length dimension is -1 or -2
_name_='layer',
**kwargs
):
super().__init__()
self.transposed = transposed
self._name_ = _name_
if _name_ == 'layer':
self.channel = True # Normalize over channel dimension
if self.transposed:
self.norm = TransposedLN(d, **kwargs)
else:
self.norm = nn.LayerNorm(d, **kwargs)
elif _name_ == 'instance':
self.channel = False
norm_args = {'affine': False, 'track_running_stats': False}
norm_args.update(kwargs)
self.norm = nn.InstanceNorm1d(d, **norm_args) # (True, True) performs very poorly
elif _name_ == 'batch':
self.channel = False
norm_args = {'affine': True, 'track_running_stats': True}
norm_args.update(kwargs)
self.norm = nn.BatchNorm1d(d, **norm_args)
elif _name_ == 'group':
self.channel = False
self.norm = nn.GroupNorm(1, d, *kwargs)
elif _name_ == 'none':
self.channel = True
self.norm = nn.Identity()
else: raise NotImplementedError
def forward(self, x):
# Handle higher dimension logic
shape = x.shape
if self.transposed:
x = rearrange(x, 'b d ... -> b d (...)')
else:
x = rearrange(x, 'b ... d -> b (...)d ')
# The cases of LayerNorm / no normalization are automatically handled in all cases
# Instance/Batch Norm work automatically with transposed axes
if self.channel or self.transposed:
x = self.norm(x)
else:
x = x.transpose(-1, -2)
x = self.norm(x)
x = x.transpose(-1, -2)
x = x.view(shape)
return x
def step(self, x, **kwargs):
assert self._name_ in ["layer", "none"]
if self.transposed: x = x.unsqueeze(-1)
x = self.forward(x)
if self.transposed: x = x.squeeze(-1)
return x
class GConv(nn.Module):
requires_length = True
def __init__(
self,
d_model,
d_state=64,
l_max=1, # Maximum length of sequence. Fine if not provided: the kernel will keep doubling in length until longer than sequence. However, this can be marginally slower if the true length is not a power of 2
channels=1, # maps 1-dim to C-dim
bidirectional=False,
# Arguments for FF
activation='gelu', # activation in between SS and FF
ln=False, # Extra normalization
postact=None, # activation after FF
initializer=None, # initializer on FF
weight_norm=False, # weight normalization on FF
hyper_act=None, # Use a "hypernetwork" multiplication
use_fast_fftconv=False,
dropout=0.0,
transposed=True, # axis ordering (B, L, D) or (B, D, L)
verbose=False,
shift=False,
linear=False,
mode="cat_randn",
# SSM Kernel arguments
**kernel_args,
):
"""
d_state: the dimension of the state, also denoted by N
l_max: the maximum sequence length, also denoted by L
if this is not known at model creation, set l_max=1
channels: can be interpreted as a number of "heads"
bidirectional: bidirectional
dropout: standard dropout argument
transposed: choose backbone axis ordering of (B, L, H) or (B, H, L) [B=batch size, L=sequence length, H=hidden dimension]
Other options are all experimental and should not need to be configured
"""
super().__init__()
self.h = d_model
self.n = d_state
self.bidirectional = bidirectional
self.ln = ln
self.channels = channels
self.transposed = transposed
self.shift = shift
self.linear = linear
self.mode = mode
self.l_max = l_max
self.verbose = verbose
self.use_fast_fftconv = use_fast_fftconv
if self.use_fast_fftconv:
assert fftconv_func is not None, 'Need to install fftconv'
assert self.channels == 1, 'channel must be 1 for fast FFTConv'
# optional multiplicative modulation GLU-style
# https://arxiv.org/abs/2002.05202
self.hyper = hyper_act is not None
if self.hyper:
channels *= 2
self.hyper_activation = Activation(hyper_act)
self.D = nn.Parameter(torch.randn(channels, self.h))
if self.bidirectional:
channels *= 2
# Pointwise
if not self.linear:
self.activation = Activation(activation)
dropout_fn = nn.Dropout2d if self.transposed else nn.Dropout
self.dropout = dropout_fn(
dropout) if dropout > 0.0 else nn.Identity()
if self.ln:
self.norm = Normalization(
self.h*self.channels, transposed=transposed)
else:
self.norm = nn.Identity()
# position-wise output transform to mix features
if not self.linear:
self.output_linear = LinearActivation(
self.h*self.channels,
self.h,
transposed=self.transposed,
initializer=initializer,
activation=postact,
activate=True,
weight_norm=weight_norm,
)
self.init_scale = kernel_args.get('init_scale', 0)
self.kernel_dim = kernel_args.get('kernel_dim', 64)
self.num_scales = kernel_args.get(
'n_scales', 1+math.ceil(math.log2(l_max/self.kernel_dim))-self.init_scale)
if self.num_scales is None:
self.num_scales = 1 + \
math.ceil(math.log2(l_max/self.kernel_dim)) - self.init_scale
self.kernel_list = nn.ParameterList()
decay_min = kernel_args.get('decay_min', 2)
decay_max = kernel_args.get('decay_max', 2)
for _ in range(self.num_scales):
if 'randn' in mode:
kernel = nn.Parameter(torch.randn(
channels, self.h, self.kernel_dim))
elif 'cos' in mode:
kernel = nn.Parameter(torch.cat([torch.cos(torch.linspace(0, 2*i*math.pi, self.kernel_dim)).expand(
channels, 1, self.kernel_dim) for i in range(self.h)], dim=1)[:, torch.randperm(self.h), :])
else:
raise ValueError(f"Unknown mode {mode}")
kernel._optim = {
'lr': kernel_args.get('lr', 0.001),
}
self.kernel_list.append(kernel)
if 'learnable' in mode:
self.decay = nn.Parameter(torch.rand(
self.h) * (decay_max - decay_min) + decay_min)
if 'fixed' in mode:
self.decay.requires_grad = False
else:
self.decay._optim = {
'lr': kernel_args.get('lr', 0.001),
}
self.register_buffer('multiplier', torch.tensor(1.0))
else:
self.register_buffer('multiplier', torch.linspace(
decay_min, decay_max, self.h).view(1, -1, 1))
self.register_buffer('kernel_norm', torch.ones(self.channels, self.h, 1))
self.register_buffer('kernel_norm_initialized',
torch.tensor(0, dtype=torch.bool))
def fft_conv(self, u, k, L):
if self.use_fast_fftconv:
k = rearrange(k, '1 h l -> h l')
dropout_mask = None
# No GeLU after the SSM
# We want output_hbl=True so that y has the same layout as u
y = fftconv_func(u, k, self.D.squeeze(0), dropout_mask, False, False, True)
y = rearrange(rearrange(y, 'b h l -> h b l'), 'h b l -> b h l')
# y = rearrange(y, 'b h l -> b 1 h l')
return y
k_f = torch.fft.rfft(k, n=2*L) # (C H L)
u_f = torch.fft.rfft(u, n=2*L) # (B H L)
# k_f.unsqueeze(-4) * u_f.unsqueeze(-3) # (B C H L)
y_f = contract('bhl,chl->bchl', u_f, k_f)
y = torch.fft.irfft(y_f, n=2*L)[..., :L] # (B C H L)
# Compute D term in state space equation - essentially a skip connection
y = y + contract('bhl,ch->bchl', u, self.D)
# Reshape to flatten channels
return rearrange(y, '... c h l -> ... (c h) l')
def forward(self, u, return_kernel=False):
"""
u: (B H L) if self.transposed else (B L H)
state: (H N) never needed unless you know what you're doing
Returns: same shape as u
"""
if not self.transposed:
u = u.transpose(-1, -2)
L = u.size(-1)
if self.use_fast_fftconv and L % 2 != 0:
u = F.pad(u, (0, 1))
kernel_list = []
interpolate_mode = 'nearest' if 'nearest' in self.mode else 'linear'
multiplier = self.multiplier
if 'sum' in self.mode:
for i in range(self.num_scales):
kernel = F.pad(
F.interpolate(
self.kernel_list[i],
scale_factor=2**(i+self.init_scale),
mode=interpolate_mode,
),
(0, self.kernel_dim*2**(self.num_scales-1+self.init_scale) -
self.kernel_dim*2**(i+self.init_scale)),
) * multiplier ** (self.num_scales - i - 1)
kernel_list.append(kernel)
k = sum(kernel_list)
elif 'cat' in self.mode:
for i in range(self.num_scales):
kernel = F.interpolate(
self.kernel_list[i],
scale_factor=2**(max(0, i-1)+self.init_scale),
mode=interpolate_mode,
) * multiplier ** (self.num_scales - i - 1)
kernel_list.append(kernel)
k = torch.cat(kernel_list, dim=-1)
else:
raise ValueError(f"Unknown mode {self.mode}")
if 'learnable' in self.mode:
k = k * torch.exp(-self.decay.view(1, -1, 1)*torch.log(
torch.arange(k.size(-1), device=k.device)+1).view(1, 1, -1))
if not self.kernel_norm_initialized:
self.kernel_norm = k.norm(dim=-1, keepdim=True).detach()
self.kernel_norm_initialized = torch.tensor(
1, dtype=torch.bool, device=k.device)
if self.verbose:
print(f"Kernel norm: {self.kernel_norm.mean()}")
print(f"Kernel size: {k.size()}")
if k.size(-1) > L:
k = k[..., :L]
elif k.size(-1) < L:
k = F.pad(k, (0, L - k.size(-1)))
k = k / self.kernel_norm # * (L / self.l_max) ** 0.5
# Convolution
if self.bidirectional:
k0, k1 = rearrange(k, '(s c) h l -> s c h l', s=2)
k = F.pad(k0, (0, L)) \
+ F.pad(k1.flip(-1), (L, 0)) \
y = self.fft_conv(u, k, L)
if not self.linear:
y = self.dropout(self.activation(y))
if not self.transposed:
y = y.transpose(-1, -2)
if not self.linear:
y = self.norm(y)
y = self.output_linear(y)
if return_kernel:
return y, k
return y, None
@property
def d_state(self):
return self.h * self.n
@property
def d_output(self):
return self.h
@property
def state_to_tensor(self):
return lambda state: rearrange('... h n -> ... (h n)', state)
class SGConv(nn.Module):
def __init__(self, arch_config: ArchConfig, hidden_size: int,
total_heads: int, op_heads: int,
hf_config: PretrainedConfig, **kwargs):
super().__init__()
assert hidden_size % total_heads == 0
self.hidden_size = hidden_size
self.total_heads = total_heads
self.op_heads = op_heads
# Architecture params
self.kernel_size = arch_config.pick('kernel_size')
self.use_fast_fftconv = get_optim_flag(hf_config, 'fast_fftconv')
self.channels = 1
self.op_size = op_heads * (hidden_size // total_heads)
self.in_proj = nn.Sequential(
nn.Linear(hidden_size, self.op_size * 2),
nn.GLU(dim=-1)
)
self.sgconv = GConv(
self.op_size, l_max=hf_config.max_position_embeddings,
channels=self.channels, kernel_dim=self.kernel_size,
use_fast_fftconv=self.use_fast_fftconv,
transposed=False, verbose=False
)
self.act = nn.GELU(approximate='none')
def forward(self, x: torch.Tensor, **kwargs):
output, _ = self.sgconv(self.in_proj(x))
return self.act(output), None
if __name__ == '__main__':
B = 2 # batch size
H = 768 # d_model
L = 2048 # sequence length
device = 'cuda'
import torch.utils.benchmark as benchmark
flash_layer = GConv(d_model=H, l_max=L, kernel_dim=128, use_fast_fftconv=True, transposed=False).to(device)
layer = GConv(d_model=H, l_max=L, kernel_dim=128, use_fast_fftconv=False, transposed=False).to(device)
u = torch.randn(B, L, H, device=device, dtype=torch.float32, requires_grad=True)
t0 = benchmark.Timer(
stmt='flash_layer(u)',
globals={'flash_layer': flash_layer, 'u': u})
t1 = benchmark.Timer(
stmt='layer(u)',
globals={'layer': layer, 'u': u})
print(t0.timeit(100))
print(t1.timeit(100))
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/sgconv.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/sgconv.py",
"repo_id": "archai",
"token_count": 9690
}
| 325 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
from typing import Any, Dict, Optional
import torch
import transformers
from archai.quantization.quantizers import FakeDynamicQuant
class FakeDynamicQuantHFConv1D(transformers.modeling_utils.Conv1D):
"""Translate a huggingface/transformers Conv1D layer into a QAT-ready Conv1D layer."""
_FLOAT_MODULE = transformers.modeling_utils.Conv1D
def __init__(
self,
*args,
dynamic_weight: Optional[bool] = True,
activation_reduce_range: Optional[bool] = True,
bits: Optional[int] = 8,
onnx_compatible: Optional[bool] = False,
qconfig: Optional[Dict[torch.nn.Module, Any]] = None,
**kwargs,
) -> None:
"""Initialize a fake quantized Conv1D layer.
Args:
dynamic_weight: Whether to use dynamic weights.
activation_reduce_range: Whether to reduce the range of activations.
bits: Number of quantization bits.
onnx_compatible: Whether quantization is compatible with ONNX.
qconfig: Quantization configuration.
"""
super().__init__(*args, **kwargs)
self.dynamic_weight = dynamic_weight
if dynamic_weight:
self.weight_fake_quant = FakeDynamicQuant(
dtype=torch.qint8,
reduce_range=False,
bits=bits,
onnx_compatible=onnx_compatible,
)
self.input_pre_process = FakeDynamicQuant(
reduce_range=activation_reduce_range,
bits=bits,
onnx_compatible=onnx_compatible,
)
@property
def fake_quant_weight(self) -> torch.Tensor:
"""Return a fake quantization over the weight matrix."""
return self.weight_fake_quant(self.weight)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.input_pre_process(x)
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.fake_quant_weight)
x = x.view(*size_out)
return x
@classmethod
def from_float(
cls: FakeDynamicQuantHFConv1D,
mod: torch.nn.Module,
qconfig: Optional[Dict[torch.nn.Module, Any]] = None,
activation_reduce_range: Optional[bool] = True,
**kwargs,
) -> FakeDynamicQuantHFConv1D:
"""Map module from float to QAT-ready.
Args:
mod: Module to be mapped.
qconfig: Quantization configuration.
activation_reduce_range: Whether to reduce the range of activations.
Returns:
QAT-ready module.
"""
assert type(mod) == cls._FLOAT_MODULE, (
" qat." + cls.__name__ + ".from_float only works for " + cls._FLOAT_MODULE.__name__
)
if not qconfig:
assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
assert mod.qconfig, "Input float module must have a valid qconfig"
qconfig = mod.qconfig
qat_conv1d = cls(
mod.nf,
mod.weight.shape[0],
activation_reduce_range=activation_reduce_range,
qconfig=qconfig,
**kwargs,
)
qat_conv1d.weight = mod.weight
qat_conv1d.bias = mod.bias
return qat_conv1d
def to_float(self) -> torch.nn.Module:
"""Map module from QAT-ready to float.
Returns:
Float-based module.
"""
weight = self.weight_fake_quant(self.weight)
float_conv1d = transformers.modeling_utils.Conv1D(self.nf, self.weight.shape[0])
float_conv1d.weight = torch.nn.Parameter(weight)
float_conv1d.bias = self.bias
return float_conv1d
class FakeDynamicQuantHFConv1DForOnnx(FakeDynamicQuantHFConv1D):
"""Allow a QAT-ready huggingface/transformers Conv1D layer to be exported with ONNX."""
def __init__(self, *args, **kwargs):
"""Initialize a fake quantized Conv1D layer compatible with ONNX."""
kwargs["activation_reduce_range"] = False
kwargs["onnx_compatible"] = True
super().__init__(*args, **kwargs)
|
archai/archai/quantization/nlp/modules.py/0
|
{
"file_path": "archai/archai/quantization/nlp/modules.py",
"repo_id": "archai",
"token_count": 1899
}
| 326 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional
from overrides import overrides
from archai.common.config import Config
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.nas.arch_trainer import ArchTrainer
from archai.supergraph.nas.model import Model
from archai.supergraph.utils.checkpoint import CheckPoint
from archai.supergraph.utils.multi_optim import MultiOptim, OptimSched
logger = get_global_logger()
class DidartsArchTrainer(ArchTrainer):
"""Train network using different optimizers for alphas and other parameters"""
def __init__(self, conf_train: Config, model: Model,
checkpoint:Optional[CheckPoint]) -> None:
super().__init__(conf_train, model, checkpoint)
self._conf_alpha_optim = conf_train['alpha_optimizer']
self._conf_alpha_sched = conf_train['alpha_lr_schedule']
@overrides
def create_multi_optim(self, train_len:int)->MultiOptim:
# optimizers, schedulers needs to be recreated for each fit call
# as they have state specific to each run
optim = self.create_optimizer(self.conf_optim, self.model.nonarch_params(recurse=True))
# create scheduler for optim before applying amp
sched, sched_on_epoch = self.create_scheduler(self.conf_sched, optim, train_len)
alpha_optim = self.create_optimizer(self._conf_alpha_optim,
self.model.all_owned().param_by_kind(None))
alpha_sched, alpha_sched_on_epoch = self.create_scheduler(self._conf_alpha_sched, alpha_optim, train_len)
multi_optim = MultiOptim()
multi_optim.append(OptimSched(optim, sched, sched_on_epoch))
multi_optim.append(OptimSched(alpha_optim, alpha_sched, alpha_sched_on_epoch))
logger.info({'multi_optim_len': len(multi_optim)})
return multi_optim
|
archai/archai/supergraph/algos/didarts/didarts_arch_trainer.py/0
|
{
"file_path": "archai/archai/supergraph/algos/didarts/didarts_arch_trainer.py",
"repo_id": "archai",
"token_count": 719
}
| 327 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from typing import List, Tuple
from overrides import overrides
from archai.common.config import Config
from archai.supergraph.algos.gumbelsoftmax.gs_op import GsOp
from archai.supergraph.nas.model_desc import (
CellType,
ConvMacroParams,
EdgeDesc,
NodeDesc,
OpDesc,
TensorShape,
TensorShapes,
)
from archai.supergraph.nas.model_desc_builder import ModelDescBuilder
from archai.supergraph.nas.operations import Op
class GsModelDescBuilder(ModelDescBuilder):
@overrides
def pre_build(self, conf_model_desc:Config)->None:
Op.register_op('gs_op',
lambda op_desc, arch_params, affine:
GsOp(op_desc, arch_params, affine))
@overrides
def build_nodes(self, stem_shapes:TensorShapes, conf_cell:Config,
cell_index:int, cell_type:CellType, node_count:int,
in_shape:TensorShape, out_shape:TensorShape) \
->Tuple[TensorShapes, List[NodeDesc]]:
assert in_shape[0]==out_shape[0]
reduction = (cell_type==CellType.Reduction)
nodes:List[NodeDesc] = []
conv_params = ConvMacroParams(in_shape[0], out_shape[0])
gs_num_sample = conf_cell['gs']['num_sample']
# add gs op for each edge
for i in range(node_count):
edges=[]
for j in range(i+2):
op_desc = OpDesc('gs_op',
params={
'conv': conv_params,
'stride': 2 if reduction and j < 2 else 1,
'gs_num_sample': gs_num_sample
}, in_len=1, trainables=None, children=None)
edge = EdgeDesc(op_desc, input_ids=[j])
edges.append(edge)
nodes.append(NodeDesc(edges=edges, conv_params=conv_params))
out_shapes = [copy.deepcopy(out_shape) for _ in range(node_count)]
return out_shapes, nodes
|
archai/archai/supergraph/algos/gumbelsoftmax/gs_model_desc_builder.py/0
|
{
"file_path": "archai/archai/supergraph/algos/gumbelsoftmax/gs_model_desc_builder.py",
"repo_id": "archai",
"token_count": 1058
}
| 328 |
"""Model specification for module connectivity individuals.
This module handles pruning the unused parts of the computation graph but should
avoid creating any TensorFlow models (this is done inside model_builder.py).
"""
from __future__ import absolute_import, division, print_function
import copy
import numpy as np
from archai.supergraph.algos.nasbench101 import graph_util
# Graphviz is optional and only required for visualization.
try:
import graphviz # pylint: disable=g-import-not-at-top
except ImportError:
pass
class ModelSpec(object):
"""Model specification given adjacency matrix and labeling."""
def __init__(self, matrix, ops, data_format='channels_last'):
"""Initialize the module spec.
Args:
matrix: ndarray or nested list with shape [V, V] for the adjacency matrix.
ops: V-length list of labels for the base ops used. The first and last
elements are ignored because they are the input and output vertices
which have no operations. The elements are retained to keep consistent
indexing.
data_format: channels_last or channels_first.
Raises:
ValueError: invalid matrix or ops
"""
if not isinstance(matrix, np.ndarray):
matrix = np.array(matrix)
shape = np.shape(matrix)
if len(shape) != 2 or shape[0] != shape[1]:
raise ValueError('matrix must be square')
if shape[0] != len(ops):
raise ValueError('length of ops must match matrix dimensions')
if not is_upper_triangular(matrix):
raise ValueError('matrix must be upper triangular')
# Both the original and pruned matrices are deep copies of the matrix and
# ops so any changes to those after initialization are not recognized by the
# spec.
self.original_matrix = copy.deepcopy(matrix)
self.original_ops = copy.deepcopy(ops)
self.matrix = copy.deepcopy(matrix)
self.ops = copy.deepcopy(ops)
self.valid_spec = True
self._prune()
self.data_format = data_format
def _prune(self):
"""Prune the extraneous parts of the graph.
General procedure:
1) Remove parts of graph not connected to input.
2) Remove parts of graph not connected to output.
3) Reorder the vertices so that they are consecutive after steps 1 and 2.
These 3 steps can be combined by deleting the rows and columns of the
vertices that are not reachable from both the input and output (in reverse).
"""
num_vertices = np.shape(self.original_matrix)[0]
# DFS forward from input
visited_from_input = set([0])
frontier = [0]
while frontier:
top = frontier.pop()
for v in range(top + 1, num_vertices):
if self.original_matrix[top, v] and v not in visited_from_input:
visited_from_input.add(v)
frontier.append(v)
# DFS backward from output
visited_from_output = set([num_vertices - 1])
frontier = [num_vertices - 1]
while frontier:
top = frontier.pop()
for v in range(0, top):
if self.original_matrix[v, top] and v not in visited_from_output:
visited_from_output.add(v)
frontier.append(v)
# Any vertex that isn't connected to both input and output is extraneous to
# the computation graph.
extraneous = set(range(num_vertices)).difference(
visited_from_input.intersection(visited_from_output))
# If the non-extraneous graph is less than 2 vertices, the input is not
# connected to the output and the spec is invalid.
if len(extraneous) > num_vertices - 2:
self.matrix = None
self.ops = None
self.valid_spec = False
return
self.matrix = np.delete(self.matrix, list(extraneous), axis=0)
self.matrix = np.delete(self.matrix, list(extraneous), axis=1)
for index in sorted(extraneous, reverse=True):
del self.ops[index]
def hash_spec(self, canonical_ops):
"""Computes the isomorphism-invariant graph hash of this spec.
Args:
canonical_ops: list of operations in the canonical ordering which they
were assigned (i.e. the order provided in the config['available_ops']).
Returns:
MD5 hash of this spec which can be used to query the dataset.
"""
# Invert the operations back to integer label indices used in graph gen.
labeling = [-1] + [canonical_ops.index(op) for op in self.ops[1:-1]] + [-2]
return graph_util.hash_module(self.matrix, labeling)
def visualize(self):
"""Creates a dot graph. Can be visualized in colab directly."""
num_vertices = np.shape(self.matrix)[0]
g = graphviz.Digraph()
g.node(str(0), 'input')
for v in range(1, num_vertices - 1):
g.node(str(v), self.ops[v])
g.node(str(num_vertices - 1), 'output')
for src in range(num_vertices - 1):
for dst in range(src + 1, num_vertices):
if self.matrix[src, dst]:
g.edge(str(src), str(dst))
return g
def is_upper_triangular(matrix):
"""True if matrix is 0 on diagonal and below."""
for src in range(np.shape(matrix)[0]):
for dst in range(0, src + 1):
if matrix[src, dst] != 0:
return False
return True
|
archai/archai/supergraph/algos/nasbench101/model_spec.py/0
|
{
"file_path": "archai/archai/supergraph/algos/nasbench101/model_spec.py",
"repo_id": "archai",
"token_count": 1836
}
| 329 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math as ma
from typing import Optional
import torch
from overrides import overrides
from torch import Tensor, nn
from torch.optim.optimizer import Optimizer
from archai.common import ml_utils
from archai.common.common import get_conf
from archai.common.config import Config
from archai.supergraph.algos.xnas.xnas_op import XnasOp
from archai.supergraph.datasets import data
from archai.supergraph.nas.arch_trainer import ArchTrainer
from archai.supergraph.nas.model import Model
from archai.supergraph.nas.model_desc import CellType
from archai.supergraph.utils.checkpoint import CheckPoint
class XnasArchTrainer(ArchTrainer):
def __init__(self, conf_train: Config, model: Model,
checkpoint: Optional[CheckPoint]) -> None:
super().__init__(conf_train, model, checkpoint)
self._conf_w_lossfn = conf_train['lossfn']
@overrides
def create_optimizer(self, conf_optim: Config, params) -> Optimizer:
# return optim that only operates on w, not alphas
return ml_utils.create_optimizer(conf_optim,
self.model.nonarch_params(recurse=True))
@overrides
def pre_fit(self, data_loaders:data.DataLoaders) -> None:
super().pre_fit(data_loaders)
# optimizers, schedulers needs to be recreated for each fit call
# as they have state
assert data_loaders.val_dl is not None
conf = get_conf()
self._train_batch = conf['nas']['search']['loader']['train_batch']
num_val_examples = len(data_loaders.val_dl) * self._train_batch
num_cells = conf['nas']['search']['model_desc']['n_cells']
num_reduction_cells = conf['nas']['search']['model_desc']['n_reductions']
num_normal_cells = num_cells - num_reduction_cells
num_primitives = len(XnasOp.PRIMITIVES)
assert num_cells > 0
assert num_reduction_cells > 0
assert num_normal_cells > 0
assert num_primitives > 0
self._normal_cell_effective_t = num_val_examples * self._epochs * num_normal_cells
self._reduction_cell_effective_t = num_val_examples * \
self._epochs * num_reduction_cells
self._normal_cell_lr = ma.sqrt(2 * ma.log(num_primitives) / (
self._normal_cell_effective_t * self._grad_clip * self._grad_clip))
self._reduction_cell_lr = ma.sqrt(2 * ma.log(num_primitives) / (
self._reduction_cell_effective_t * self._grad_clip * self._grad_clip))
self._xnas_optim = _XnasOptimizer(self._normal_cell_lr, self._reduction_cell_lr, self._normal_cell_effective_t,
self._reduction_cell_effective_t, self._train_batch, self._grad_clip,
self._multi_optim, self._apex, self.model)
@overrides
def post_fit(self, data_loaders:data.DataLoaders) -> None:
# delete state we created in pre_fit
del self._xnas_optim
return super().post_fit(data_loaders)
@overrides
def pre_epoch(self, data_loaders:data.DataLoaders)->None:
super().pre_epoch(data_loaders)
# prep val set to train alphas
assert data_loaders.val_dl is not None
self._val_dl = data_loaders.val_dl
self._valid_iter = iter(data_loaders.val_dl) # type: ignore
@overrides
def post_epoch(self, data_loaders:data.DataLoaders)->None:
del self._val_dl
del self._valid_iter # clean up
super().post_epoch(data_loaders)
@overrides
def pre_step(self, x: Tensor, y: Tensor) -> None:
super().pre_step(x, y)
# reset val loader if we exhausted it
try:
x_val, y_val = next(self._valid_iter)
except StopIteration:
# reinit iterator
self._valid_iter = iter(self._val_dl)
x_val, y_val = next(self._valid_iter)
x_val, y_val = x_val.to(self.get_device()), y_val.to(
self.get_device(), non_blocking=True)
# update alphas
self._xnas_optim.step(x, y, x_val, y_val)
@overrides
def update_checkpoint(self, checkpoint: CheckPoint) -> None:
super().update_checkpoint(checkpoint)
class _XnasOptimizer:
def __init__(self, ncell_lr: float, rcell_lr: float,
ncell_effective_t: float, rcell_effective_t: float, train_batch: int,
grad_clip: float, optim, apex, model: Model) -> None:
self._ncell_lr = ncell_lr
self._rcell_lr = rcell_lr
self._ncell_effective_t = ncell_effective_t
self._rcell_effective_t = rcell_effective_t
self._train_batch = train_batch
self._grad_clip = grad_clip
self._optim = optim
self._apex = apex
self._lossfn = nn.CrossEntropyLoss()
# to keep track of where we are in effective updates
self._t_rcell = 0
self._t_ncell = 0
self._model = model # main model with respect to w and alpha
@staticmethod
def _get_loss(model, lossfn, x, y):
logits, *_ = model(x) # might also return aux tower logits
return lossfn(logits, y)
def step(self, x_train: Tensor, y_train: Tensor, x_valid: Tensor, y_valid: Tensor) -> None:
# put model in train mode just to be safe
self._model.train()
# XNAS authors told Liam Li et al that
# the updates are made per data point instead
# of at a batch level. While nn.CrossEntropyLoss
# can give back per data point losses by using reduction='none' option,
# loss.backward() can only deal with scalar losses. So for now trying
# to do this one data point at a time to see if that
# runs reasonably fast. If not the next thing to try is
# to get the per data point loss all at once and then
# try to do loss[i].backward() and update alphas
batch_size = x_valid.shape[0]
for i in range(batch_size):
x = torch.unsqueeze(x_valid[i,:], 0)
y = torch.unsqueeze(y_valid[i], 0)
# zero out gradients for safety
self._optim.zero_grad()
# put model through val data
loss = self._get_loss(self._model, self._lossfn, x, y)
# compute gradients
loss.backward()
# do grad clip
self._apex.clip_grad(self._grad_clip, self._model, self._optim)
# for each op in the model update alphas
for cell in self._model.cells:
if cell.desc.cell_type == CellType.Reduction:
lr = self._rcell_lr
T = self._rcell_effective_t
self._t_rcell += 1
t = self._t_rcell
elif cell.desc.cell_type == CellType.Regular:
lr = self._ncell_lr
T = self._ncell_effective_t
self._t_ncell += 1
t = self._t_ncell
else:
raise NotImplementedError
# BUG: t need to be corrected
for op in cell.ops():
op.update_alphas(lr, t, T, self._grad_clip)
|
archai/archai/supergraph/algos/xnas/xnas_arch_trainer.py/0
|
{
"file_path": "archai/archai/supergraph/algos/xnas/xnas_arch_trainer.py",
"repo_id": "archai",
"token_count": 3257
}
| 330 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torchvision
from overrides import overrides
from torchvision.transforms import transforms
from archai.common import utils
from archai.common.config import Config
from archai.supergraph.datasets.dataset_provider import (
DatasetProvider,
ImgSize,
TrainTestDatasets,
register_dataset_provider,
)
class FashionMnistProvider(DatasetProvider):
def __init__(self, conf_dataset:Config):
super().__init__(conf_dataset)
self._dataroot = utils.full_path(conf_dataset['dataroot'])
@overrides
def get_datasets(self, load_train:bool, load_test:bool,
transform_train, transform_test)->TrainTestDatasets:
trainset, testset = None, None
if load_train:
trainset = torchvision.datasets.FashionMNIST(root=self._dataroot,
train=True, download=True, transform=transform_train)
if load_test:
testset = torchvision.datasets.FashionMNIST(root=self._dataroot,
train=False, download=True, transform=transform_test)
return trainset, testset
@overrides
def get_transforms(self, img_size:ImgSize)->tuple:
MEAN = [0.28604063146254594]
STD = [0.35302426207299326]
transf = [
transforms.RandomAffine(degrees=15, translate=(0.1, 0.1),
scale=(0.9, 1.1), shear=0.1),
transforms.RandomVerticalFlip()
]
normalize = [
transforms.ToTensor(),
transforms.Normalize(MEAN, STD)
]
train_transform = transforms.Compose(transf + normalize)
test_transform = transforms.Compose(normalize)
return train_transform, test_transform
register_dataset_provider('fashion_mnist', FashionMnistProvider)
|
archai/archai/supergraph/datasets/providers/fashion_mnist_provider.py/0
|
{
"file_path": "archai/archai/supergraph/datasets/providers/fashion_mnist_provider.py",
"repo_id": "archai",
"token_count": 784
}
| 331 |
import os
import torch
import torch.nn as nn
__all__ = ['MobileNetV2', 'mobilenet_v2']
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, num_classes=10, width_mult=1.0):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
## CIFAR10
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 1], # Stride 2 -> 1 for CIFAR-10
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
## END
# building first layer
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * max(1.0, width_mult))
# CIFAR10: stride 2 -> 1
features = [ConvBNReLU(3, input_channel, stride=1)]
# END
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, num_classes),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.features(x)
x = x.mean([2, 3])
x = self.classifier(x)
return x
def mobilenet_v2(pretrained=False, progress=True, device='cpu', **kwargs):
"""
Constructs a MobileNetV2 architecture from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MobileNetV2(**kwargs)
if pretrained:
script_dir = os.path.dirname(__file__)
state_dict = torch.load(script_dir+'/state_dicts/mobilenet_v2.pt', map_location=device)
model.load_state_dict(state_dict)
return model
|
archai/archai/supergraph/models/mobilenetv2.py/0
|
{
"file_path": "archai/archai/supergraph/models/mobilenetv2.py",
"repo_id": "archai",
"token_count": 2133
}
| 332 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Iterable, List, Optional
from overrides import EnforceOverrides, overrides
from torch import nn
from archai.supergraph.nas.arch_module import ArchModule
from archai.supergraph.nas.dag_edge import DagEdge
from archai.supergraph.nas.model_desc import CellDesc, NodeDesc
from archai.supergraph.nas.operations import Op
class Cell(ArchModule, EnforceOverrides):
def __init__(self, desc:CellDesc,
affine:bool, droppath:bool,
trainables_from:Optional['Cell']): # template cell, if any, to use for arch params
super().__init__()
# some of these members are public as finalizer needs access
self.desc = desc
# TODO: support any number of stems
assert len(desc.stems)==2, "Cell compiler currently only supports 2 stems"
self.s0_op = Op.create(desc.stems[0], affine=affine)
self.s1_op = Op.create(desc.stems[1], affine=affine)
self.dag = Cell._create_dag(desc.nodes(),
affine=affine, droppath=droppath,
trainables_from=trainables_from)
self.post_op = Op.create(desc.post_op, affine=affine)
@staticmethod
def _create_dag(nodes_desc:List[NodeDesc],
affine:bool, droppath:bool,
trainables_from:Optional['Cell'])->nn.ModuleList:
dag = nn.ModuleList()
for i, node_desc in enumerate(nodes_desc):
edges:nn.ModuleList = nn.ModuleList()
dag.append(edges)
# assert len(node_desc.edges) > 0
for j, edge_desc in enumerate(node_desc.edges):
edges.append(DagEdge(edge_desc,
affine=affine, droppath=droppath,
template_edge=trainables_from.dag[i][j] if trainables_from else None))
return dag
def ops(self)->Iterable[Op]:
for node in self.dag:
for edge in node:
yield edge.op()
@overrides
def forward(self, s0, s1):
s0 = self.s0_op(s0)
s1 = self.s1_op(s1)
states = [s0, s1]
for node in self.dag:
# TODO: we should probably do average here otherwise output will
# blow up as number of primitives grows
# TODO: Current assumption is that each edge has k channel
# output so node output is k channel as well
# This won't allow for arbitrary edges.
if len(node):
o = sum(edge(states) for edge in node)
else:
# support zero edges node by assuming zero op from last state
o = states[-1] + 0.0
states.append(o)
# TODO: Below assumes same shape except for channels but this won't
# happen for max pool etc shapes? Also, remove hard coded 2.
return self.post_op(states)
|
archai/archai/supergraph/nas/cell.py/0
|
{
"file_path": "archai/archai/supergraph/nas/cell.py",
"repo_id": "archai",
"token_count": 1305
}
| 333 |
import itertools
import math
import os
from collections import OrderedDict
import torch
from torch import nn
from torch.nn.parallel.data_parallel import DataParallel
from tqdm import tqdm
from archai.common import ml_utils, utils
from archai.common.common import get_tb_writer
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.datasets.data import get_dataloaders
from archai.supergraph.models import get_model, num_class
from archai.supergraph.utils.metrics import Accumulator
logger = get_global_logger()
# TODO: remove scheduler parameter?
def run_epoch(
conf, logger, model: nn.Module, loader, loss_fn, optimizer, split_type: str, epoch=0, verbose=1, scheduler=None
):
"""Runs epoch for given dataloader and model. If optimizer is supplied
then backprop and model update is done as well. This can be called from
test to train modes.
"""
writer = get_tb_writer()
# region conf vars
conf_loader = conf["autoaug"]["loader"]
epochs = conf_loader["epochs"]
conf_opt = conf["autoaug"]["optimizer"]
grad_clip = conf_opt["clip"]
# endregion
tqdm_disable = bool(os.environ.get("TASK_NAME", "")) # TODO: remove?
if verbose:
loader = tqdm(loader, disable=tqdm_disable)
loader.set_description("[%s %04d/%04d]" % (split_type, epoch, epochs))
metrics = Accumulator()
cnt = 0
steps = 0
for data, label in loader:
steps += 1
data, label = data.cuda(), label.cuda()
if optimizer:
optimizer.zero_grad()
preds = model(data)
loss = loss_fn(preds, label)
if optimizer:
loss.backward()
if getattr(optimizer, "synchronize", None):
optimizer.synchronize() # for horovod
# grad clipping defaults to 5 (same as Darts)
if grad_clip > 0.0:
nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
optimizer.step()
top1, top5 = ml_utils.accuracy(preds, label, (1, 5))
metrics.add_dict(
{
"loss": loss.item() * len(data),
"top1": top1.item() * len(data),
"top5": top5.item() * len(data),
}
)
cnt += len(data)
if verbose:
postfix = metrics / cnt
if optimizer:
if "lr" in optimizer.param_groups[0]:
postfix["lr"] = optimizer.param_groups[0]["lr"]
loader.set_postfix(postfix)
# below changes LR for every batch in epoch
# TODO: should we do LR step at epoch start only?
# if scheduler is not None:
# scheduler.step(epoch - 1 + float(steps) / total_steps)
del preds, loss, top1, top5, data, label
if tqdm_disable:
if optimizer:
logger.info(
"[%s %03d/%03d] %s lr=%.6f", split_type, epoch, epochs, metrics / cnt, optimizer.param_groups[0]["lr"]
)
else:
logger.info("[%s %03d/%03d] %s", split_type, epoch, epochs, metrics / cnt)
metrics /= cnt
if optimizer:
if "lr" in optimizer.param_groups[0]:
metrics.metrics["lr"] = optimizer.param_groups[0]["lr"]
if verbose:
for key, value in metrics.items():
writer.add_scalar("{}/{}".format(key, split_type), value, epoch)
return metrics
# NOTE that 'eval' is overloaded in this code base. 'eval' here means
# taking a trained model and running it on val or test sets. In NAS 'eval'
# often means taking a found model and training it fully (often termed 'final training').
# metric could be 'last', 'test', 'val', 'train'.
def train_and_eval(conf, val_ratio, val_fold, save_path, only_eval, reporter=None, metric="test"):
writer = get_tb_writer()
# region conf vars
conf_dataset = conf["dataset"]
dataroot = utils.full_path(conf_dataset["dataroot"])
horovod = conf["common"]["horovod"]
checkpoint_freq = conf["common"]["checkpoint"]["freq"]
conf_loader = conf["autoaug"]["loader"]
conf_model = conf["autoaug"]["model"]
ds_name = conf_dataset["name"]
aug = conf_loader["aug"]
cutout = conf_loader["cutout"]
batch_size = conf_loader["batch"]
max_batches = conf_dataset["max_batches"]
epochs = conf_loader["epochs"]
conf_model = conf["autoaug"]["model"]
conf_opt = conf["autoaug"]["optimizer"]
conf_lr_sched = conf["autoaug"]["lr_schedule"]
n_workers = conf_loader["n_workers"]
# endregion
# initialize horovod
# TODO: move to common init
if horovod:
import horovod.torch as hvd
hvd.init()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(device)
if not reporter:
def reporter(**kwargs):
return 0
# get dataloaders with transformations and splits applied
train_dl, valid_dl, test_dl = get_dataloaders(
ds_name,
batch_size,
dataroot,
aug,
cutout,
load_train=True,
load_test=True,
val_ratio=val_ratio,
val_fold=val_fold,
horovod=horovod,
n_workers=n_workers,
max_batches=max_batches,
)
# create a model & an optimizer
model = get_model(conf_model, num_class(ds_name), data_parallel=(not horovod))
# select loss function and optimizer
lossfn = nn.CrossEntropyLoss()
optimizer = ml_utils.create_optimizer(conf_opt, model.parameters())
# distributed optimizer if horovod is used
is_master = True
if horovod:
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
# issue : https://github.com/horovod/horovod/issues/1099
optimizer._requires_update = set()
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
if hvd.rank() != 0:
is_master = False
logger.debug("is_master=%s" % is_master)
# select LR schedule
scheduler = ml_utils.create_lr_scheduler(conf_lr_sched, epochs, optimizer, len(train_dl))
result = OrderedDict()
epoch_start = 1
# if model available from previous checkpount then load it
if save_path and os.path.exists(save_path):
logger.info("%s checkpoint found. loading..." % save_path)
data = torch.load(save_path)
# when checkpointing we do add 'model' key so other cases are special cases
if "model" in data or "state_dict" in data:
key = "model" if "model" in data else "state_dict"
logger.info("checkpoint epoch@%d" % data["epoch"])
# TODO: do we need change here?
if not isinstance(model, DataParallel):
# for non-dataparallel models, remove default 'module.' prefix
model.load_state_dict({k.replace("module.", ""): v for k, v in data[key].items()})
else:
# for dataparallel models, make sure 'module.' prefix exist
model.load_state_dict({k if "module." in k else "module." + k: v for k, v in data[key].items()})
# load optimizer
optimizer.load_state_dict(data["optimizer"])
# restore epoch count
if data["epoch"] < epochs:
epoch_start = data["epoch"]
else:
# epochs finished, switch to eval mode
only_eval = False
else:
model.load_state_dict({k: v for k, v in data.items()})
del data
else:
logger.info(
'model checkpoint does not exist at "%s". skip \
to pretrain weights...'
% save_path
)
only_eval = False # we made attempt to load checkpt but as it does not exist, switch to train mode
# if eval only then run model on train, test and val sets
if only_eval:
logger.info("evaluation only+")
model.eval()
rs = dict() # stores metrics for each set
rs["train"] = run_epoch(conf, logger, model, train_dl, lossfn, None, split_type="train", epoch=0)
if valid_dl:
rs["valid"] = run_epoch(conf, logger, model, valid_dl, lossfn, None, split_type="valid", epoch=0)
rs["test"] = run_epoch(conf, logger, model, test_dl, lossfn, None, split_type="test", epoch=0)
for key, setname in itertools.product(["loss", "top1", "top5"], ["train", "valid", "test"]):
result["%s_%s" % (key, setname)] = rs[setname][key]
result["epoch"] = 0
return result
# train loop
best_top1, best_valid_loss = 0, 10.0e10
max_epoch = epochs
for epoch in range(epoch_start, max_epoch + 1):
# if horovod:
# trainsampler.set_epoch(epoch)
# run train epoch and update the model
model.train()
rs = dict()
rs["train"] = run_epoch(
conf,
logger,
model,
train_dl,
lossfn,
optimizer,
split_type="train",
epoch=epoch,
verbose=is_master,
scheduler=scheduler,
)
if scheduler[0]:
scheduler[0].step()
model.eval()
# check for nan loss
if math.isnan(rs["train"]["loss"]):
raise Exception("train loss is NaN.")
# collect metrics on val and test set, checkpoint
if epoch % checkpoint_freq == 0 or epoch == max_epoch:
if valid_dl:
rs["valid"] = run_epoch(
conf, logger, model, valid_dl, lossfn, None, split_type="valid", epoch=epoch, verbose=is_master
)
rs["test"] = run_epoch(
conf, logger, model, test_dl, lossfn, None, split_type="test", epoch=epoch, verbose=is_master
)
# TODO: is this good enough condition?
if rs[metric]["loss"] < best_valid_loss or rs[metric]["top1"] > best_top1:
best_top1 = rs[metric]["top1"]
best_valid_loss = rs[metric]["loss"]
for key, setname in itertools.product(["loss", "top1", "top5"], ["train", "valid", "test"]):
result["%s_%s" % (key, setname)] = rs[setname][key]
result["epoch"] = epoch
writer.add_scalar("best_top1/valid", rs["valid"]["top1"], epoch)
writer.add_scalar("best_top1/test", rs["test"]["top1"], epoch)
reporter(
loss_valid=rs["valid"]["loss"],
top1_valid=rs["valid"]["top1"],
loss_test=rs["test"]["loss"],
top1_test=rs["test"]["top1"],
)
# save checkpoint
if is_master and save_path:
logger.info("save model@%d to %s" % (epoch, save_path))
torch.save(
{
"epoch": epoch,
"log": {
"train": rs["train"].get_dict(),
"valid": rs["valid"].get_dict(),
"test": rs["test"].get_dict(),
},
"optimizer": optimizer.state_dict(),
"model": model.state_dict(),
},
save_path,
)
del model
result["top1_test"] = best_top1
return result
|
archai/archai/supergraph/utils/augmented_trainer.py/0
|
{
"file_path": "archai/archai/supergraph/utils/augmented_trainer.py",
"repo_id": "archai",
"token_count": 5463
}
| 334 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import math
import os
import time
from typing import Any, Dict, Iterable, Iterator, Optional, Tuple, Union
import deepspeed
import mlflow
import torch
from deepspeed.pipe import PipelineModule
from deepspeed.utils import RepeatingLoader
from overrides import overrides
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader, Dataset, Sampler
from torch.utils.data.distributed import DistributedSampler
from archai.api.trainer_base import TrainerBase
from archai.common.ordered_dict_logger import OrderedDictLogger
from archai.trainers.nlp.ds_training_args import DsTrainingArguments
logger = OrderedDictLogger(source=__name__)
def _create_base_config() -> Dict[str, Any]:
return {
"train_batch_size": 256,
"train_micro_batch_size_per_gpu": 2,
"fp16": {"enabled": True, "initial_scale_power": 12},
"zero_optimization": {"stage": 0},
"optimizer": {"type": "AdamW", "params": {"lr": 5e-5, "betas": [0.9, 0.999], "eps": 1e-8}},
}
class StatefulDistributedSampler(DistributedSampler):
"""Distributed sampler that supports resuming from a given step."""
def __init__(
self,
dataset: Dataset,
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: Optional[bool] = True,
seed: Optional[int] = 0,
drop_last: Optional[bool] = False,
total_consumed_samples: Optional[int] = 0,
) -> None:
"""Initialize the sampler.
Args:
dataset: Dataset to be sampled.
num_replicas: Number of replicas.
rank: Rank of the current process.
shuffle: Whether to shuffle the dataset.
seed: Random seed.
drop_last: Whether to drop the last batch if it is smaller than the batch size.
total_consumed_samples: Total number of samples consumed.
"""
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle, seed=seed, drop_last=drop_last)
self.total_consumed_samples = total_consumed_samples
def __iter__(self) -> Iterator:
indices = list(super().__iter__())
return iter(indices[((self.total_consumed_samples // self.num_replicas) % self.num_samples) :])
class DsTrainer(TrainerBase):
"""DeepSpeed trainer."""
def __init__(
self,
model: torch.nn.Module,
args: Optional[DsTrainingArguments] = None,
optimizer: Optional[Optimizer] = None,
model_parameters: Optional[Union[Iterable[torch.Tensor], Dict[str, torch.Tensor]]] = None,
lr_scheduler: Optional[_LRScheduler] = None,
mpu: Optional[Any] = None,
dist_init_required: Optional[bool] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
) -> None:
"""Initialize by creating the DeepSpeed engine.
Args:
model: Model to be trained or evaluated.
args: DeepSpeed training arguments. If not provided, a default instance
of `DsTrainingArguments` will be used.
optimizer: Optimizer to be used for training.
model_parameters: Model parameters to be used for training.
lr_scheduler: Learning rate scheduler to be used for training.
mpu: Model parallelism unit.
dist_init_required: Whether distributed initialization is required.
train_dataset: Training dataset.
eval_dataset: Evaluation dataset.
"""
deepspeed.init_distributed()
if args is None:
args = DsTrainingArguments("tmp", ds_config=_create_base_config())
assert isinstance(args, DsTrainingArguments), "`args` should be an instance of `DsTrainingArguments`."
self.args = args
if self.args.pipe_parallel_size > 0:
assert isinstance(
model, torch.nn.Sequential
), "`model` should be an instance of `torch.nn.Sequential` for Pipeline Parallelism."
model = PipelineModule(
layers=model,
num_stages=self.args.pipe_parallel_size,
loss_fn=self.args.pipe_parallel_loss_fn,
partition_method=self.args.pipe_parallel_partition_method,
activation_checkpoint_interval=self.args.pipe_parallel_activation_checkpoint_steps,
)
self.engine, _, _, _ = deepspeed.initialize(
model=model,
optimizer=optimizer,
model_parameters=model_parameters or [p for p in model.parameters() if p.requires_grad],
lr_scheduler=lr_scheduler,
mpu=mpu,
dist_init_required=dist_init_required,
config=self.args.ds_config,
)
if self.engine.global_rank == 0:
mlflow.start_run()
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.client_state = {"global_step": 0, "total_consumed_samples": 0, "log_history": []}
@property
def data_parallel_world_size(self) -> int:
"""Return the data parallel world size."""
if self.engine.mpu:
return self.engine.mpu.get_data_parallel_world_size()
return None
@property
def data_parallel_rank(self) -> int:
"""Return the data parallel rank of the current process."""
if self.engine.mpu:
return self.engine.mpu.get_data_parallel_rank()
return None
def _get_dataloader(
self,
dataset: Dataset,
sampler: Optional[Sampler] = None,
shuffle: Optional[bool] = False,
total_consumed_samples: Optional[int] = 0,
) -> DataLoader:
if sampler is None:
sampler = StatefulDistributedSampler(
dataset,
num_replicas=self.data_parallel_world_size,
rank=self.data_parallel_rank,
shuffle=shuffle,
total_consumed_samples=total_consumed_samples,
)
return DataLoader(
dataset,
batch_size=self.engine.train_micro_batch_size_per_gpu(),
sampler=sampler,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
drop_last=True,
)
def train_batch_without_pipe_parallel(self, data_iter: Optional[Iterator] = None) -> torch.Tensor:
"""Train a batch without pipeline parallelism.
Args:
data_iter: Data iterator.
Returns:
Loss tensor.
"""
gradient_accumulation_steps = self.engine.gradient_accumulation_steps()
total_loss = 0.0
for _ in range(gradient_accumulation_steps):
input_ids, _ = next(data_iter)
input_ids = input_ids.to(self.engine.device)
outputs = self.engine(input_ids, labels=input_ids)
loss = outputs[0].mean()
self.engine.backward(loss)
self.engine.step()
total_loss += loss
return total_loss / gradient_accumulation_steps
def eval_batch_without_pipe_parallel(self, data_iter: Optional[Iterator] = None) -> torch.Tensor:
"""Evaluate a batch without pipeline parallelism.
Args:
data_iter: Data iterator.
Returns:
Loss tensor.
"""
with torch.no_grad():
gradient_accumulation_steps = self.engine.gradient_accumulation_steps()
total_loss = 0.0
for _ in range(gradient_accumulation_steps):
input_ids, _ = next(data_iter)
input_ids = input_ids.to(self.engine.device)
outputs = self.engine(input_ids, labels=input_ids)
loss = outputs[0].mean()
total_loss += loss
return total_loss / gradient_accumulation_steps
@overrides
def train(
self,
resume_from_checkpoint: Optional[str] = None,
resume_optimizer_state: Optional[bool] = True,
resume_lr_scheduler_state: Optional[bool] = True,
) -> None:
"""Train a model.
Args:
resume_from_checkpoint: Path to checkpoint to resume training from.
resume_optimizer_state: Whether to resume optimizer state.
resume_lr_scheduler_state: Whether to resume learning rate scheduler state.
"""
logger.info("Starting training ...")
logger.debug(f"Training arguments: {self.args.to_dict()}")
global_step = 0
total_consumed_samples = 0
log_history = []
if resume_from_checkpoint:
logger.info(f"Loading from checkpoint: {resume_from_checkpoint}")
try:
_, self.client_state = self.engine.load_checkpoint(
resume_from_checkpoint,
load_optimizer_states=resume_optimizer_state,
load_lr_scheduler_states=resume_lr_scheduler_state,
)
global_step = self.client_state["global_step"]
total_consumed_samples = self.client_state["total_consumed_samples"]
log_history = self.client_state["log_history"]
except:
pass
train_dataloader = self._get_dataloader(
self.train_dataset,
shuffle=True,
total_consumed_samples=total_consumed_samples,
)
train_iterator = iter(RepeatingLoader(train_dataloader))
train_time = time.time()
for step in range(global_step, self.args.max_steps):
step_time = time.time()
if self.args.pipe_parallel_size > 0:
loss = self.engine.train_batch(data_iter=train_iterator)
else:
loss = self.train_batch_without_pipe_parallel(data_iter=train_iterator)
step_time = time.time() - step_time
if self.engine.global_rank == 0:
float_loss = loss.mean().item()
samples_per_second = self.engine.train_batch_size() / step_time
learning_rate = self.engine.get_lr()[0]
metrics = {
"train/step": step + 1,
"train/loss": float_loss,
"train/ppl": math.exp(float_loss),
"train/learning_rate": learning_rate,
"train/samples_per_second": samples_per_second,
"train/step_runtime": step_time,
}
log_history.append(metrics)
mlflow.log_metrics(metrics, step=step + 1)
do_periodic_logging = (step + 1) % self.args.logging_steps == 0
if do_periodic_logging:
logger.info(
f"Step: {step + 1} | LR: {learning_rate} | "
+ f"Loss: {float_loss:.3f} | Samples/s: {samples_per_second:.3f} | "
+ f"PPL: {math.exp(float_loss):.3f}"
)
do_periodic_eval = (step + 1) % self.args.eval_steps == 0
if do_periodic_eval and self.args.do_eval:
assert self.eval_dataset, "`eval_dataset` must be supplied if `args.do_eval` is True."
eval_loss, eval_time, eval_samples_per_second, eval_steps_per_second = self.evaluate(self.eval_dataset)
if self.engine.global_rank == 0:
eval_idx = (step + 1) // self.args.eval_steps
metrics = {
"eval/idx": eval_idx,
"eval/loss": eval_loss,
"eval/ppl": math.exp(eval_loss),
"eval/runtime": eval_time,
"eval/samples_per_second": eval_samples_per_second,
"eval/steps_per_second": eval_steps_per_second,
}
log_history.append(metrics)
mlflow.log_metrics(metrics, step=eval_idx)
logger.info(
f"Eval: {eval_idx} | Seconds: {eval_time:.3f} | "
+ f"Samples/s: {eval_samples_per_second:.3f} | Loss: {eval_loss:.3f} | "
+ f"PPL: {math.exp(eval_loss):.3f}"
)
do_periodic_checkpoint = (step + 1) % self.args.save_steps == 0
if do_periodic_checkpoint:
self.client_state["global_step"] = step + 1
self.client_state["total_consumed_samples"] = self.engine.global_samples
self.client_state["log_history"] = log_history
self.engine.save_checkpoint(self.args.output_dir, step + 1, client_state=self.client_state)
with open(os.path.join(self.args.output_dir, "trainer_state.json"), "w") as f:
json.dump(self.client_state, f)
train_time = time.time() - train_time
if self.engine.global_rank == 0:
mlflow.log_metric("train/time", train_time)
mlflow.end_run()
@overrides
def evaluate(self, eval_dataset: Dataset) -> Tuple[float, float, float, float]:
"""Evaluate a model.
Args:
eval_dataset: Evaluation dataset.
Returns:
Evaluation loss, time, samples per second and steps per second.
"""
eval_dataloader = self._get_dataloader(eval_dataset, shuffle=False)
eval_iterator = iter(RepeatingLoader(eval_dataloader))
n_eval_steps = self.args.eval_max_steps or len(eval_dataloader)
eval_loss, eval_time = 0.0, time.time()
for _ in range(n_eval_steps):
if self.args.pipe_parallel_size > 0:
loss = self.engine.eval_batch(data_iter=eval_iterator)
else:
loss = self.eval_batch_without_pipe_parallel(data_iter=eval_iterator)
eval_loss += loss.mean().item()
eval_loss /= n_eval_steps
eval_time = time.time() - eval_time
eval_samples_per_second = (n_eval_steps * self.engine.train_batch_size()) / eval_time
eval_steps_per_second = n_eval_steps / eval_time
return eval_loss, eval_time, eval_samples_per_second, eval_steps_per_second
@overrides
def predict(self) -> None:
"""Predict with a model."""
raise NotImplementedError
|
archai/archai/trainers/nlp/ds_trainer.py/0
|
{
"file_path": "archai/archai/trainers/nlp/ds_trainer.py",
"repo_id": "archai",
"token_count": 6868
}
| 335 |
autoaug:
model:
type: shakeshake26_2x112d
loader:
aug: fa_reduced_cifar10
cutout: 16
batch: 512
epochs: 1800
lr_schedule:
type: 'cosine'
warmup:
multiplier: 4
epochs: 5
optimizer:
type: sgd
lr: 0.01
nesterov: True
decay: 0.002
|
archai/confs/aug/shake26_2x112d_cifar_b512.yaml/0
|
{
"file_path": "archai/confs/aug/shake26_2x112d_cifar_b512.yaml",
"repo_id": "archai",
"token_count": 150
}
| 336 |
dataset:
dataroot: '$default_dataroot' # folder where directory for each dataset exist, empty string means chose default based on OS which is typically ~/dataroot
# Typically, create symbolic link ~/dataroot pointing to yout dataset location
# cd %USERPROFILE%
# mklink /D dataroot E:\datasets
dataset_eval:
dataroot: '$default_dataroot' #folder where directory for each dataset exist, empty string means chose default based on OS which is typically ~/dataroot
|
archai/confs/datasets/dataroot.yaml/0
|
{
"file_path": "archai/confs/datasets/dataroot.yaml",
"repo_id": "archai",
"token_count": 128
}
| 337 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Root image to be based
# Available images: https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch/tags
FROM nvcr.io/nvidia/pytorch:22.10-py3
# Labels for the docker
LABEL description="NVIDIA Docker with Archai" \
repository="archai" \
tag="latest" \
creator="microsoft" \
tooltype="archai" \
createtime="01/30/2023"
# Exports environment variables
ENV PATH="/root/.local/bin:$PATH"
# Installs basic utilities
RUN set -e -o xtrace
RUN apt-get update && apt-get install -y
RUN apt-get install -y apt-utils git
# Installs Archai
# Note that APEX is not needed because it comes with NVIDIA's image
RUN git clone -b main --single-branch https://github.com/microsoft/archai.git
WORKDIR /workspace/archai
RUN pip install --user --no-cache-dir .[dev]
|
archai/docker/Dockerfile/0
|
{
"file_path": "archai/docker/Dockerfile",
"repo_id": "archai",
"token_count": 302
}
| 338 |
<jupyter_start><jupyter_text>QuickStartIn this Notebook we run Archai's [Quickstart](https://microsoft.github.io/archai/getting_started/quick_start.html) example on Azure Machine Learning. Prerequisites- Python 3.7 or later- An Azure subscription- An Azure Resource Group- An Azure Machine Learning [Workspace](https://learn.microsoft.com/en-us/azure/machine-learning/quickstart-create-resourcescreate-the-workspace)This notebook also assumes you have a python environment setup using `pip install -e .[aml]` in your Archai repository root<jupyter_code>from pathlib import Path
from IPython.display import display, Image
from IPython.core.display import HTML
from azure.ai.ml import Output, command
import archai.common.azureml_helper as aml_helper
import archai.common.notebook_helper as nb_helper<jupyter_output><empty_output><jupyter_text>Get a handle to the workspaceWe load the workspace from a workspace [configuration file](https://learn.microsoft.com/en-us/azure/machine-learning/how-to-configure-environmentlocal-and-dsvm-only-create-a-workspace-configuration-file).<jupyter_code>ml_client = aml_helper.get_aml_client_from_file("../.azureml/config.json")
print(f'Using workspace: {ml_client.workspace_name} in resource group: {ml_client.resource_group_name}')<jupyter_output><empty_output><jupyter_text>Create a compute clusterWe provision a Linux [compute cluster](https://learn.microsoft.com/en-us/azure/machine-learning/how-to-create-attach-compute-cluster?tabs=python) for this Notebook. See the [full list](https://azure.microsoft.com/en-ca/pricing/details/machine-learning/) on VM sizes and prices.<jupyter_code>cpu_compute_name = "nas-cpu-cluster-D14-v2"
compute_cluster = aml_helper.create_compute_cluster(ml_client, cpu_compute_name)<jupyter_output>You already have a cluster named nas-cpu-cluster-D14-v2, we'll reuse it as is.<jupyter_text>Create an environment based on a YAML fileAzure Machine Learning maintains a set of CPU and GPU Ubuntu Linux-based base images with common system dependencies. For the set of base images and their corresponding Dockerfiles, see the [AzureML Containers](https://github.com/Azure/AzureML-Containers) repo.<jupyter_code>archai_job_env = aml_helper.create_environment_from_file(ml_client,
image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:latest",
conda_file="conda.yaml",
version="0.0.1")<jupyter_output>Environment with name aml-archai is registered to workspace, the environment version is 0.0.1<jupyter_text>Create job<jupyter_code>job = command(experiment_name="archai_quickstart",
display_name="Archai's QuickStart",
compute=cpu_compute_name,
environment=f"{archai_job_env.name}:{archai_job_env.version}",
code="main.py",
outputs=dict(
output_path=Output(type="uri_folder", mode="rw_mount")
),
command="python main.py --output_dir ${{outputs.output_path}}"
)<jupyter_output><empty_output><jupyter_text>Run job<jupyter_code>quickstart_job = ml_client.create_or_update(job)<jupyter_output>[32mUploading main.py[32m (< 1 MB): 100%|##########| 1.74k/1.74k [00:00<00:00, 6.91kB/s]
[39m<jupyter_text>Open the job overview on Azure ML Studio in your web browser (this works when you are running this notebook in VS code).<jupyter_code>import webbrowser
webbrowser.open(quickstart_job.services["Studio"].endpoint)
job_name = quickstart_job.name
print(f'Started job: {job_name}')<jupyter_output>Started job: busy_shampoo_cqjgwy28gc<jupyter_text>Download job's output<jupyter_code>output_name = "output_path"
download_path = "output"
aml_helper.download_job_output(ml_client, job_name=quickstart_job.name, output_name=output_name, download_path=download_path)
downloaded_folder = Path(download_path) / "named-outputs" / output_name<jupyter_output><empty_output><jupyter_text>Show Pareto Frontiers<jupyter_code>param_vs_latency_img = Image(filename=downloaded_folder / "pareto_non_embedding_params_vs_onnx_latency.png")
display(param_vs_latency_img)
param_vs_memory_img = Image(filename=downloaded_folder / "pareto_non_embedding_params_vs_onnx_memory.png")
display(param_vs_memory_img)
latency_vs_memory_img = Image(filename=downloaded_folder / "pareto_onnx_latency_vs_onnx_memory.png")
display(latency_vs_memory_img)<jupyter_output><empty_output><jupyter_text>Show search state of the last iteration<jupyter_code>df = nb_helper.get_search_csv(downloaded_folder)
df = df[['archid', 'non_embedding_params', 'onnx_latency', 'onnx_memory', 'is_pareto']]
df[(df['onnx_latency'] < 0.1) & (df['is_pareto'] == True)]<jupyter_output><empty_output>
|
archai/docs/advanced_guide/cloud/azure/notebooks/quickstart/quickstart.ipynb/0
|
{
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/quickstart/quickstart.ipynb",
"repo_id": "archai",
"token_count": 1859
}
| 339 |
<jupyter_start><jupyter_text>Training a CV-based ModelTraining a CV-based model with PyTorch-Lightning is a simplified process, where the model architecture, loss function, and training process are defined using the `LightningModule`. Archai offers a set of dataset providers to load and pre-process the data. Additionally, Archai provides a `PlTrainer` which wraps the `TrainerBase` abstraction and renames methods so they fit in the search interface. Loading the DataWhen using a dataset provider, the data loading process is simplified, as the provider takes care of downloading and pre-processing the required dataset.This step is accomplished in the same way as the [previous notebook](./cv_dataset_provider.ipynb):<jupyter_code>from archai.datasets.cv.mnist_dataset_provider import MnistDatasetProvider
dataset_provider = MnistDatasetProvider()
train_dataset = dataset_provider.get_train_dataset()
val_dataset = dataset_provider.get_val_dataset()<jupyter_output><empty_output><jupyter_text>Defining the ModelOnce the data is loaded, we can define any CV-based model. In this example, we will create a simple linear model using PyTorch and wrapping it with `LightningModule` from PyTorch-Lightning.Additionally, PyTorch-Lightning requires that some methods are implemented, such as:* `forward`: Defines the forward pass of the model.* `training_step`: Defines the training step (loop) of the model.* `test_step`: If using `evaluate`, it defines the evaluation step (loop) of the model.* `configure_optimizers`: Defines the optimizer and attaches the model's parameters.<jupyter_code>import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from torch import nn
class Model(pl.LightningModule):
def __init__(self):
super().__init__()
self.linear = nn.Linear(28 * 28, 10)
def forward(self, x):
return self.linear(x)
def training_step(self, batch, batch_idx):
x, y = batch
x = x.view(x.size(0), -1)
x_hat = self.linear(x)
loss = F.cross_entropy(x_hat, y)
self.log("train_loss", loss)
return loss
def test_step(self, batch, batch_idx):
x, y = batch
x = x.view(x.size(0), -1)
x_hat = self.linear(x)
loss = F.cross_entropy(x_hat, y)
self.log("val_loss", loss)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer<jupyter_output><empty_output><jupyter_text>Running the TrainerThe final step is to use the PyTorch-Lightning trainer abstraction (`PlTrainer`) to conduct the training process, which involves optimizing the model's parameters using a pre-defined optimization algorithm and loss function, and updating the model's parameters based on the training data. This process is repeated until the model converges to a satisfactory accuracy or performance level.<jupyter_code>from torch.utils.data import DataLoader
from archai.trainers.cv.pl_trainer import PlTrainer
model = Model()
trainer = PlTrainer(max_steps=1, limit_train_batches=1, limit_test_batches=1, limit_predict_batches=1)
trainer.train(model, DataLoader(train_dataset))
trainer.evaluate(model, DataLoader(val_dataset))<jupyter_output>GPU available: False, used: False
TPU available: False, using: 0 TPU cores
IPU available: False, using: 0 IPUs
HPU available: False, using: 0 HPUs
`Trainer(limit_train_batches=1)` was configured so 1 batch per epoch will be used.
`Trainer(limit_test_batches=1)` was configured so 1 batch will be used.
`Trainer(limit_predict_batches=1)` was configured so 1 batch will be used.
Missing logger folder: c:\Users\gderosa\Projects\archai\docs\getting_started\notebooks\cv\lightning_logs
| Name | Type | Params
----------------------------------
0 | linear | Linear | 7.9 K
----------------------------------
7.9 K Trainable params
0 Non-trainable params
7.9 K Total params
0.031 Total estimated model params size (MB)
c:\Users\gderosa\Anaconda3\envs\archai\lib\site-packages\pytorch_lightning\trainer\connectors\data_connector.py:229: PossibleUserWarning: The dataloader, train_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of [...]
|
archai/docs/getting_started/notebooks/cv/pl_trainer.ipynb/0
|
{
"file_path": "archai/docs/getting_started/notebooks/cv/pl_trainer.ipynb",
"repo_id": "archai",
"token_count": 1388
}
| 340 |
<jupyter_start><jupyter_text>Training NLP-based Models with NVIDIA Defining the Model<jupyter_code>from transformers import GPT2Config, GPT2LMHeadModel
config = GPT2Config(
vocab_size=50257,
n_positions=16,
n_embd=512,
n_layer=4,
n_head=8,
embd_pdrop=0.0,
attn_pdrop=0.0,
use_cache=False,
)
model = GPT2LMHeadModel(config=config)<jupyter_output><empty_output><jupyter_text>Running the Trainer<jupyter_code>import os
from archai.trainers.nlp.nvidia_trainer import NvidiaTrainer
from archai.trainers.nlp.nvidia_training_args import NvidiaTrainingArguments
# In this example, we will create a dummy dataset with 3 splits
data_path = "dataroot/textpred/olx_tmp/"
os.makedirs(data_path, exist_ok=True)
with open(data_path + "train.txt", "w") as f:
f.write("train")
with open(data_path + "valid.txt", "w") as f:
f.write("valid")
with open(data_path + "test.txt", "w") as f:
f.write("test")
training_args = NvidiaTrainingArguments(
"nvidia-gpt2",
seed=1234,
no_cuda=True,
logging_steps=1,
do_eval=False,
dataset_name="olx_tmp",
dataset_dir="./dataroot",
vocab_type="gpt2",
vocab_size=None,
global_batch_size=1,
seq_len=16,
strategy="dp",
max_steps=1,
optim="adam",
)
trainer = NvidiaTrainer(model=model, args=training_args)
trainer.train()<jupyter_output>2023-03-21 15:15:49,613 - archai.datasets.nlp.nvidia_dataset_provider_utils — INFO — Clearing and rebuilding cache ...
2023-03-21 15:15:49,617 - archai.datasets.nlp.nvidia_dataset_provider_utils — INFO — Corpus: dataset = olx_tmp | vocab_type = gpt2 | vocab_size = None
2023-03-21 15:15:49,619 - archai.datasets.nlp.nvidia_dataset_provider_utils — INFO — Training vocabulary ...
2023-03-21 15:15:49,619 - archai.datasets.nlp.tokenizer_utils.bbpe_tokenizer — INFO — Training tokenizer with size = 50257 at c:\Users\gderosa\Projects\archai\docs\getting_started\notebooks\nlp\dataroot\textpred\olx_tmp\cache\olx_tmp\gpt2\None\vocab\bbpe_tokenizer.json ...
2023-03-21 15:15:49,619 - archai.datasets.nlp.tokenizer_utils.bbpe_tokenizer — INFO — Training tokenizer ...
2023-03-21 15:15:49,692 - archai.datasets.nlp.tokenizer_utils.bbpe_tokenizer — DEBUG — Tokenizer length: 264
2023-03-21 15:15:49,700 - archai.datasets.nlp.tokenizer_utils.bbpe_tokenizer — DEBUG — Tokenizer file path: c:\Users\gdero[...]
|
archai/docs/getting_started/notebooks/nlp/nvidia_trainer.ipynb/0
|
{
"file_path": "archai/docs/getting_started/notebooks/nlp/nvidia_trainer.ipynb",
"repo_id": "archai",
"token_count": 983
}
| 341 |
Datasets
========
.. toctree::
:maxdepth: 2
archai.datasets.cv
archai.datasets.nlp
|
archai/docs/reference/api/archai.datasets.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.datasets.rst",
"repo_id": "archai",
"token_count": 44
}
| 342 |
Search Spaces
=============
.. toctree::
:maxdepth: 2
archai.discrete_search.search_spaces.benchmark
archai.discrete_search.search_spaces.config
archai.discrete_search.search_spaces.cv
archai.discrete_search.search_spaces.nlp
|
archai/docs/reference/api/archai.discrete_search.search_spaces.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.discrete_search.search_spaces.rst",
"repo_id": "archai",
"token_count": 91
}
| 343 |
XNAS
====
Architecture Trainer
--------------------
.. automodule:: archai.supergraph.algos.xnas.xnas_arch_trainer
:members:
:undoc-members:
Experiment Runner
-----------------
.. automodule:: archai.supergraph.algos.xnas.xnas_exp_runner
:members:
:undoc-members:
Model Description Builder
-------------------------
.. automodule:: archai.supergraph.algos.xnas.xnas_model_desc_builder
:members:
:undoc-members:
Operators
---------
.. automodule:: archai.supergraph.algos.xnas.xnas_op
:members:
:undoc-members:
|
archai/docs/reference/api/archai.supergraph.algos.xnas.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.supergraph.algos.xnas.rst",
"repo_id": "archai",
"token_count": 196
}
| 344 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
[tool.black]
line-length = 120
|
archai/pyproject.toml/0
|
{
"file_path": "archai/pyproject.toml",
"repo_id": "archai",
"token_count": 29
}
| 345 |
<jupyter_start><jupyter_text>How-To Evaluate a Custom Task with LM-Eval HarnessEven though `lm_eval` framework supports more than 200 tasks, one might want to implement an additional one. With that in mind, this tutorial walks through the process of creating a custom task, including it in the registry and evaluating models with it. InstallationThe `lm_eval_harness` project is designed to be an installable module, which allow users to call it from outside its package. Thus, one can install it as follows:<jupyter_code>try:
import lm_eval_harness
except ModuleNotFoundError:
!pip install git+https://github.com/microsoft/archai.git@pre-release#subdirectory=research/lm_eval_harness<jupyter_output><empty_output><jupyter_text>Creating a Custom TaskTasks always inherits from the base class `Task`, which is implemented by the `lm_eval.base` module. When defining a custom task, there are some constants and methods that need to be overriden: Constants* `VERSION`: Indicates the version of the task for reproducibility.* `DATASET_PATH`: Name of the dataset from the Hugging Face Hub.* `DATASET_NAME`: Configuration name of the dataset from the Hugging Face Hub. Methods* `should_decontaminate()`: Whether can be decontaminated with an n-grams file.* `has_training_docs()`: Whether dataset supports a training set.* `has_validation_docs()`: Whether dataset supports a validation set.* `has_test_docs()`: Whether dataset supports a testing set.* `test_docs()`: Indicates the `DatasetDict` key to be used for the testing samples.* `doc_to_text()`: Defines the task input.* `doc_to_target()`: Defines the task target.* `construct_requests()`: Creates a tuple of requests that defines the core computation of the task (e.g., usually zero-shot is conducted using log-likelihood over the desired target token).* `process_results()`: Processes the output of the requests and calculates their metric (e.g., accuracy).* `aggregation()`: Defines how multiple outputs should be aggregated (e.g., mean).* `higher_is_better()`: Defines if a higher metric value corresponds to a better metric.*One can refer to the `lm-eval` implemented tasks if additional information is needed: https://github.com/EleutherAI/lm-evaluation-harness/tree/master/lm_eval/tasks.*In this example, we will be implementing the AX-b task from the SuperGLUE benchmark:<jupyter_code>from typing import Any, Dict, List
from datasets.arrow_dataset import Dataset
from lm_eval_harness.utils.request_factory import Request, rf
from lm_eval.base import Task
from lm_eval.metrics import mean
class AXb(Task):
VERSION = 0
DATASET_PATH = "super_glue"
DATASET_NAME = "axb"
def should_decontaminate(self) -> bool:
return False
def has_training_docs(self) -> bool:
return False
def has_validation_docs(self) -> bool:
return False
def has_test_docs(self) -> bool:
return True
def test_docs(self) -> Dataset:
return self.dataset["test"]
def doc_to_text(self, doc: Dict[str, Any]) -> str:
return f"{doc['sentence1']}\nQuestion: {doc['sentence2']} True or False?\nAnswer:"
def doc_to_target(self, doc: Dict[str, Any]) -> str:
available_labels = {0: "True", 1: "False"}
label = doc["label"]
return f" {available_labels[label]}"
def construct_requests(self, doc: Dict[str, Any], ctx: str) -> List[Request]:
ll_true = rf.loglikelihood(ctx, " True")
ll_false = rf.loglikelihood(ctx, " False")
return ll_true, ll_false
def process_results(self, doc: Dict[str, Any], results: List[str]) -> Dict[str, Any]:
ll_true, ll_false = results
prediction = int(ll_false > ll_true)
reference = doc["label"]
acc = 1.0 if (ll_true > ll_false) == reference else 0.0
return {"acc": acc}
def aggregation(self) -> Dict[str, Any]:
return {"acc": mean}
def higher_is_better(self) -> Dict[str, Any]:
return {"acc": True}<jupyter_output><empty_output><jupyter_text>Adding Task to RegistryAfter a custom task has been defined, it needs to be added to two constants that enables its usability:* `ALL_TASKS`: List of available tasks (useful when parsing from the command line).* `TASK_REGISTRY`: Dictionary mapping the task identifier and its class.<jupyter_code>from lm_eval.tasks import ALL_TASKS, TASK_REGISTRY
ALL_TASKS.append("axb")
TASK_REGISTRY.update({"axb": AXb})<jupyter_output><empty_output><jupyter_text>Evaluate using Custom TaskFinally, the custom task evaluation follows the same protocol defined by the `simple_evaluation.ipynb` example, as follows:<jupyter_code>from transformers import AutoModelForCausalLM, AutoTokenizer
from lm_eval.evaluator import make_table
from lm_eval_harness.lm_eval_evaluator import evaluate_wrapper
from lm_eval_harness.lm_eval_hf_model import HFEvalModel
model = AutoModelForCausalLM.from_pretrained("gpt2")
tokenizer = AutoTokenizer.from_pretrained("gpt2")
hf_model = HFEvalModel(model, tokenizer)
outputs = evaluate_wrapper(
hf_model,
["axb"],
num_fewshot=0,
no_cache=True,
)
print(make_table(outputs))<jupyter_output>Reusing dataset super_glue (C:\Users\gderosa\.cache\huggingface\datasets\super_glue\axb\1.0.2\d040c658e2ddef6934fdd97deb45c777b6ff50c524781ea434e7219b56a428a7)
|
archai/research/lm_eval_harness/tutorials/custom_task_evaluation.ipynb/0
|
{
"file_path": "archai/research/lm_eval_harness/tutorials/custom_task_evaluation.ipynb",
"repo_id": "archai",
"token_count": 1837
}
| 346 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
""" Script to prepare flower102 dataset for pytorch dataloader.
"""
import argparse
import os
import tempfile
from collections import defaultdict
from typing import Dict, List
from torchvision.datasets.utils import download_and_extract_archive, download_url
from archai.common import utils
def check_flower102(dataroot: str) -> bool:
flower102 = os.path.join(dataroot, "flower102")
train = os.path.join(flower102, "train")
test = os.path.join(flower102, "test")
meta = os.path.join(flower102, "meta")
if not os.path.isdir(flower102) or not os.path.isdir(train) or not os.path.isdir(test) or not os.path.isdir(meta):
return False
num_train_files = 0
for base, dirs, files in os.walk(train):
for file in files:
num_train_files += 1
if num_train_files != 6507:
return False
num_test_files = 0
for base, dirs, files in os.walk(test):
for file in files:
num_test_files += 1
if num_test_files != 1682:
return False
# all checks passed
return True
def download(dataroot: str):
DOWNLOAD_URL = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/102flowers.tgz"
with tempfile.TemporaryDirectory() as tempdir:
download_and_extract_archive(DOWNLOAD_URL, tempdir, extract_root=dataroot, remove_finished=True)
def load_test_csv_data(filename: str) -> Dict[str, List[str]]:
"""Loads the data in csv files into a dictionary with
class names as keys and list of image names as values. Works only for test data csv"""
data_dict = defaultdict(list)
with open(filename, "r") as f:
lines = f.readlines()
assert len(lines) > 0
for line in lines[1:]:
words = line.rstrip().split(",")
assert len(words) > 0
data_dict[words[0]] = words[1:]
return data_dict
def load_train_csv_data(filename: str) -> Dict[str, List[str]]:
"""Loads the data in csv files into a dictionary with
class names as keys and list of image names as values. Works only for train data csv"""
data_dict = defaultdict(list)
with open(filename, "r") as f:
lines = f.readlines()
assert len(lines) > 0
for line in lines[1:]:
words = line.rstrip().split(",")
assert len(words) > 0
data_dict[words[1]] = words[2:]
return data_dict
def copy_data_helper(data: Dict[str, List[str]], imagesroot: str, foldername: str) -> None:
for key in data.keys():
images = data[key]
for im in images:
if not im:
continue
source = os.path.join(imagesroot, im)
target = os.path.join(foldername, key, im)
if not os.path.isfile(target):
utils.copy_file(source, target)
def prepare_data(flower102_root: str):
test_file = os.path.join(flower102_root, "meta", "flowers102_test.csv")
test_data = load_test_csv_data(test_file)
# train data is split into 2 files for some reason
train1_file = os.path.join(flower102_root, "meta", "flowers102_train1.csv")
train2_file = os.path.join(flower102_root, "meta", "flowers102_train2.csv")
train_files = [train1_file, train2_file]
train_data = defaultdict(list)
for tf in train_files:
this_data = load_train_csv_data(tf)
train_data.update(this_data)
# make classname directories for train and test
for key in test_data.keys():
os.makedirs(os.path.join(flower102_root, "test", key), exist_ok=True)
os.makedirs(os.path.join(flower102_root, "train", key), exist_ok=True)
# copy images to the right locations
imagesroot = os.path.join(flower102_root, "jpg")
testfoldername = os.path.join(flower102_root, "test")
copy_data_helper(test_data, imagesroot, testfoldername)
trainfoldername = os.path.join(flower102_root, "train")
copy_data_helper(train_data, imagesroot, trainfoldername)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataroot",
type=str,
default="C:\\Users\\dedey\\dataroot",
help="root directory where flower102 folder is intended to exist. If it already exists in the format required this script will skip downloading",
)
args = parser.parse_args()
# check that dataset is in format required
# else download and prepare dataset
if not check_flower102(args.dataroot):
# make flower102 directory
flower102 = os.path.join(args.dataroot, "flower102")
train = os.path.join(flower102, "train")
test = os.path.join(flower102, "test")
meta = os.path.join(flower102, "meta")
os.makedirs(flower102, exist_ok=True)
os.makedirs(train, exist_ok=True)
os.makedirs(test, exist_ok=True)
os.makedirs(meta, exist_ok=True)
# this step will create folder jpg
# which has all the images
download(flower102)
# download the csv files for the train and test split
# from 'NAS Evaluation is Frustrating' repo
# note that download_url doesn't work in vscode debug mode
test_file_url = "https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/flowers102_test.csv"
train_file_urls = [
"https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/flowers102_train1.csv",
"https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/flowers102_train2.csv",
]
download_url(test_file_url, meta, filename=None, md5=None)
for tu in train_file_urls:
download_url(tu, meta, filename=None, md5=None)
prepare_data(flower102)
|
archai/scripts/supergraph/download_datasets/flower102_install.py/0
|
{
"file_path": "archai/scripts/supergraph/download_datasets/flower102_install.py",
"repo_id": "archai",
"token_count": 2349
}
| 347 |
from archai.common.common import common_init
from archai.supergraph.algos.nasbench101.nasbench101_dataset import Nasbench101Dataset
from archai.supergraph.datasets import data
from archai.supergraph.utils.trainer import Trainer
def main():
# 6, 7, 9, 10, 16
# model = model_builder.build(model_builder.EXAMPLE_DESC_MATRIX, model_builder.EXAMPLE_VERTEX_OPS)
nsds = Nasbench101Dataset("~/dataroot/nasbench_ds/nasbench_full.pkl")
conf = common_init(config_filepath="confs/algos/nasbench101.yaml")
conf_eval = conf["nas"]["eval"]
conf_loader = conf_eval["loader"]
conf_trainer = conf_eval["trainer"]
model = nsds.create_model(5) # 401277 is same model as example
data_loaders = data.get_data(conf_loader)
trainer = Trainer(conf_trainer, model)
trainer.fit(data_loaders)
if __name__ == "__main__":
main()
|
archai/scripts/supergraph/nasbench101/pytorch_train.py/0
|
{
"file_path": "archai/scripts/supergraph/nasbench101/pytorch_train.py",
"repo_id": "archai",
"token_count": 324
}
| 348 |
# Training Models with Archai
This folder contains the necessary files and instructions to train models using Archai.
## Installation
Before you can start training models, you need to install Archai. To do so, you can follow these instructions:
1. Open your terminal and run the following command:
```bash
pip install --user git+https://github.com/microsoft/archai.git#egg=archai[dev]
```
2. If you plan to use DeepSpeed and Flash-Attention, run this command instead:
```bash
pip install --user git+https://github.com/microsoft/archai.git#egg=archai[dev,deepspeed,flash-attn]
```
*Please note that DeepSpeed is not compatible with Windows.*
Alternatively, you can use Docker to build a Docker image with Archai and all the necessary dependencies. Simply follow the instructions in the [Dockerfile](https://github.com/microsoft/archai/blob/main/docker/Dockerfile.flash).
## Data Preparation
To prepare the data, you can use the `FastHfDatasetProvider` class to load and encode datasets from the Hugging Face Hub. This is recommended as it offers a faster way to load and encode datasets. Here is an example code:
```Python
dataset_provider = FastHfDatasetProvider.from_hub(
"wikitext",
dataset_config_name="wikitext-103-raw-v1",
tokenizer_name="Salesforce/codegen-350M-mono",
cache_dir="wikitext_cache",
)
train_dataset = dataset_provider.get_train_dataset(seq_len=2048)
eval_dataset = dataset_provider.get_val_dataset(seq_len=2048)
```
Once the dataset is encoded, it can be cached and loaded from disk later as follows:
```Python
dataset_provider = FastHfDatasetProvider.cache("wikitext_cache")
```
However, please note that this method does not apply for NVIDIA-related training, as datasets are automatically created and encoded.
## DeepSpeed
If you are using DeepSpeed, run the following command to begin training:
```bash
deepspeed deepspeed/train_codegen.py --help
```
You can customize the training by modifying the arguments defined in `CodeGenFlashConfig`, `DsTrainingArguments`, and `ds_config.json`. By default, the arguments are set to perform a toy training and explain how the pipeline works.
Additionally, if you have a model that has been previously trained with DeepSpeed, you can continue its training or fine-tune as follows:
```bash
deepspeed deepspeed/train_codegen.py --pre_trained_model_path <path_to_checkpoint>
```
## Hugging Face
If you are using Hugging Face, run the following command to begin training:
```bash
python -m torch.distributed.run --nproc_per_node=4 hf/train_codegen.py --help
```
You can customize the training by modifying the arguments defined in `CodeGenConfig` and `TrainingArguments`. By default, the arguments are set to perform a toy training and explain how the pipeline works.
## NVIDIA
If you are using NVIDIA, run the following command to begin training:
```bash
python -m torch.distributed.run --nproc_per_node=4 nvidia/train_gpt2.py --help
```
You can customize the training by modifying the arguments defined in `GPT2Config` and `NvidiaTrainingArguments`. By default, the arguments are set to perform a toy training and explain how the pipeline works.
|
archai/scripts/trainers/README.md/0
|
{
"file_path": "archai/scripts/trainers/README.md",
"repo_id": "archai",
"token_count": 914
}
| 349 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import json
import sys
from archai.common.store import ArchaiStore
CONNECTION_NAME = 'MODEL_STORAGE_CONNECTION_STRING'
def cleanup_stale_pods(store: ArchaiStore):
""" This script looks for kubernetes pods that are no longer running (e.g. the pod may have run out of
memory or may have been stopped for whatever reason) and cleans up the state in our status table to
ensure the job doesn't get zombied, it will be picked up by the next available pod. """
SCRIPT_DIR = os.path.dirname(__file__)
sys.path += [os.path.join(SCRIPT_DIR, '..', 'util')]
from shell import Shell
shell = Shell()
podinfo = shell.run(os.getcwd(), "kubectl get pods -n snpe -o json", print_output=False)
podinfo = json.loads(podinfo)
running = []
for row in podinfo['items']:
name = row['metadata']['name']
status = row['status']['phase']
if status == 'Running':
running += [name]
print(name, status)
# unlock rows that belong to non-existent kubernetes pods.
for e in store.get_all_status_entities(status='completed', not_equal=True):
name = e['name']
if 'node' in e and e['node']:
node = e['node']
status = e['status'] if 'status' in e else 'none'
print(f"Found lock by {node} with status {status}")
if node.startswith('snpe-quantizer') and node not in running:
print(f"Clearing lock on non-existant pod: {node}")
del e['node']
store.update_status_entity(e)
if __name__ == '__main__':
experiment_name = os.getenv("EXPERIMENT_NAME", "facesynthetics")
con_str = os.getenv(CONNECTION_NAME)
if not con_str:
print(f"Please specify your {CONNECTION_NAME} environment variable.")
sys.exit(1)
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name=experiment_name)
cleanup_stale_pods(store)
|
archai/tasks/face_segmentation/aml/azure/cleanup_stale_pods.py/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/azure/cleanup_stale_pods.py",
"repo_id": "archai",
"token_count": 824
}
| 350 |
<#
.SYNOPSIS
.
.DESCRIPTION
This is a handy powershell script that can cleanup old images from your azure container registry.
You can find the password in your Azure portal for the container registry under the tab named Access Keys.
.PARAMETER password
Specifies a password.
#>
param(
[Parameter(Mandatory=$true)]
[string]$password
)
$registry_name = "snpecontainerregistry001"
$tags = &az acr repository show-tags -n $registry_name --repository quantizer | ConvertFrom-JSON
if ($tags.GetType().Name -eq "String"){
# there is only one tag
Write-Host "Your registry is already clean, it contains only one image quantizer:$tags"
Exit 0
}
$latest = [Version]"0"
foreach ($t in $tags) {
Write-Host "Found tag $t"
$v = [Version]$t
if ($v -gt $latest){
$latest = $v
}
}
$a = Read-Host "Do you want to delete all images except the latest version $latest (y/n)? "
if ($a -ne "y") {
Exit 1
}
foreach ($t in $tags) {
$v = [Version]$t
if ($v -ne $latest) {
Write-Host "Deleting image quantizer:$t"
Write-Host "az acr repository delete --name $registry_name --image quantizer:$v -u $registry_name -p $password"
$rc = &az acr repository delete --name $registry_name --image quantizer:$v -u $registry_name -p $password --yes
}
}
|
archai/tasks/face_segmentation/aml/docker/quantizer/cleanup.ps1/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/docker/quantizer/cleanup.ps1",
"repo_id": "archai",
"token_count": 492
}
| 351 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import json
from pathlib import Path
from typing import List, Optional, Union
from overrides import overrides
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.model_evaluator import AsyncModelEvaluator
from archai.common.config import Config
from shutil import copyfile
from archai.common.monitor import JobCompletionMonitor
from aml.training.training_pipeline import start_training_pipeline
from azure.identity import DefaultAzureCredential
from azure.ai.ml.identity import AzureMLOnBehalfOfCredential
from azure.ai.ml import MLClient
from aml.util.setup import configure_store, get_valid_arch_id
def _get_entity_value(entity, key, default_value=''):
if key in entity:
return entity[key]
return default_value
class AmlPartialTrainingEvaluator(AsyncModelEvaluator):
""" The AmlPartialTrainingEvaluator launches partial training jobs"""
def __init__(self,
config : Config,
local_output: Path,
tr_epochs: int = 1,
timeout_seconds=3600):
self.config = config
self.tr_epochs = int(tr_epochs)
aml_config = config['aml']
workspace_name = aml_config['workspace_name']
subscription_id = aml_config['subscription_id']
resource_group_name = aml_config['resource_group']
identity = DefaultAzureCredential()
if os.getenv('AZUREML_ROOT_RUN_ID'):
identity = AzureMLOnBehalfOfCredential()
self.ml_client = MLClient(
credential=identity,
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name
)
self.local_output = local_output
self.models = []
self.timeout = timeout_seconds
self.store = configure_store(aml_config)
self.results = []
self.metric_key = self.config['training'].get('metric_key', 'val_iou')
self.failure_rate = 0.25
@overrides
def send(self, arch: ArchaiModel, budget: Optional[float] = None) -> None:
self.models += [arch]
model_id = get_valid_arch_id(arch)
e = self.store.get_status(model_id)
if self.metric_key in e and e[self.metric_key]:
metric = float(e[self.metric_key])
self.results += [{
'id': model_id,
self.metric_key: metric,
'status': _get_entity_value(e, 'status'),
'error': _get_entity_value(e, 'error')
}]
@overrides
def fetch_all(self) -> List[Union[float, None]]:
if len(self.results) > 0:
print(f'AmlPartialTrainingEvaluator: found {len(self.results)} were already trained.')
index = {}
for existing in self.results:
id = existing['id']
index[id] = existing
# pull out the models that have not yet been trained.
pending = []
for arch in self.models:
model_id = get_valid_arch_id(arch)
if model_id not in index:
pending += [arch]
if len(pending) > 0:
print(f"AmlPartialTrainingEvaluator: Starting training on {len(pending)} models")
# train all the models listed in the pending on a GPU cluster so we get much training
# happening in parallel which greatly reduces the overall Archai Search process.
description = f"AmlPartialTrainingEvaluator training {self.tr_epochs} epochs"
pipeline_job, model_names = start_training_pipeline(
description, self.ml_client, self.store, pending, self.config, self.tr_epochs, self.local_output)
job_id = pipeline_job.name
print(f'AmlPartialTrainingEvaluator: Started training pipeline: {job_id}')
# wait for all the parallel training jobs to finish
keys = [self.metric_key]
monitor = JobCompletionMonitor(self.store, self.ml_client, keys, job_id, self.timeout, throw_on_failure_rate=self.failure_rate)
models = monitor.wait(model_names)['models']
for m in models:
id = m['id']
index[id] = m
# now reassemble all results in the right order (order of the send method calls)
models = []
for arch in self.models:
model_id = get_valid_arch_id(arch)
result = index[model_id]
models += [result]
results = {'models': models}
# save the results to the output folder (which is mapped by the AML pipeline to our
# blob store under the container 'models' in the folder named the same as the
# experiment_name)
results_path = f'{self.local_output}/models.json'
summary = json.dumps(results, indent=2)
with open(results_path, 'w') as f:
f.write(summary)
# save the archai log also which can be handy for debugging later.
log = 'archai.log'
if os.path.isfile(log):
copyfile(log, f'{self.local_output}/{log}')
# extract the array of results for our return value this is the metric that the
# Archai search needs to figure out which models to continue to evolve and which are
# not so good.
metrics = []
for m in results['models']:
if self.metric_key in m:
metric = m[self.metric_key]
else:
metric = None
metrics += [metric]
self.models = [] # reset for next run.
print(f'AmlPartialTrainingEvaluator: fetch_all returning : {summary}')
return metrics
|
archai/tasks/face_segmentation/aml/training/aml_training_evaluator.py/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/training/aml_training_evaluator.py",
"repo_id": "archai",
"token_count": 2468
}
| 352 |
search:
search_space:
name: hgnet
params:
num_classes: 18
img_size: [256, 256] # (w, h)
in_channels: 3
op_subset: ['conv3x3', 'conv5x5', 'conv7x7']
stem_strides: [2]
# Number of downsampling blocks (without counting stem conv)
num_blocks: 5
# Maximum number of layers in downsampling blocks
downsample_block_max_ops: 4
# Maximum number of layers in skip blocks
skip_block_max_ops: 2
# Maximum number of layers in upsampling blocks
upsample_block_max_ops: 4
# Maximum number of layers after the final upsampling layer
post_upsample_max_ops: 2
algorithm:
name: evolution_pareto
params:
num_iters: 20
init_num_models: 20
mutations_per_parent: 5
num_crossovers: 10
max_unseen_population: 50
num_random_mix: 5
target:
name: cpu
|
archai/tasks/face_segmentation/confs/cpu_search.yaml/0
|
{
"file_path": "archai/tasks/face_segmentation/confs/cpu_search.yaml",
"repo_id": "archai",
"token_count": 393
}
| 353 |
# Text Generation
At Archai, we recognize the significance of discovering the optimal neural architecture to attain the highest performance in text generation. For this purpose, we have created an advanced neural architecture search method known as the Lightweight Transformer Search (LTS). This innovative method enables us to identify the most optimal architectures that exist on the Pareto Frontier, where trade-offs are made between several objectives, such as latency and memory usage.
## Model Gallery
We utilized GPT-2 as our base model and applied LTS on top of it to find the best performing architectures given a set of constraints. The following table showcases the results of our search:
| Model | Non-Embedding Parameters (M) | Latency (s) | Memory (MB) |
| - | - | - | - |
[gpt2_a9e3147996070fda25af4b39ed95b6a18d6d0402](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_a9e3147996070fda25af4b39ed95b6a18d6d0402) | 1.06 | 0.008 | 29.06
[gpt2_80fabe4acddff0dc796e287588e40d86e79df4b2](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_80fabe4acddff0dc796e287588e40d86e79df4b2) | 2.08 | 0.013 | 45.46
[gpt2_90682823835acabd965294775983a1d5a2c2fa43](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_90682823835acabd965294775983a1d5a2c2fa43) | 3.13 | 0.021 | 74.50
[gpt2_c76bdddb5cf59275711672daa5b8c70e6c78bf4e](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_c76bdddb5cf59275711672daa5b8c70e6c78bf4e) | 3.95 | 0.024 | 77.62
[gpt2_8f5159304179c77ecdc69c953b71a3f8fa528564](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_8f5159304179c77ecdc69c953b71a3f8fa528564) | 5.13 | 0.030 | 94.64
[gpt2_131845381012a68c3a358514fdffc12b09db1ed8](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_131845381012a68c3a358514fdffc12b09db1ed8) | 6.44 | 0.036 | 112.16
[gpt2_917c2f9601a1c29d1f280bb172015e5fb210b6b3](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_917c2f9601a1c29d1f280bb172015e5fb210b6b3) | 7.41 | 0.042 | 90.76
[gpt2_538d4b101df48595a935d90dbf4a7fb2ac09ac01](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_538d4b101df48595a935d90dbf4a7fb2ac09ac01) | 8.23 | 0.047 | 93.88
[gpt2_c679fa01f00dd6f584614c6d9784eb233b047283](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_c679fa01f00dd6f584614c6d9784eb233b047283) | 9.46 | 0.053 | 148.71
[gpt2_39563367097004cfd771d76d8822e51ad79b56d6](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_39563367097004cfd771d76d8822e51ad79b56d6) | 10.65 | 0.051 | 190.77
[gpt2_ddf63c1125f1fed5a7dd3537f640834187719996](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_ddf63c1125f1fed5a7dd3537f640834187719996) | 13.32 | 0.069 | 125.78
[gpt2_0e1b5a3c867d6473da270799061f3089a1df5afd](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_0e1b5a3c867d6473da270799061f3089a1df5afd) | 16.04 | 0.084 | 173.74
[gpt2_3b30c85ac08c6b12b0ea46cb832270ba52b7fcd8](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_3b30c85ac08c6b12b0ea46cb832270ba52b7fcd8) | 18.97 | 0.096 | 209.94
[gpt2_1e9d92f0fed7288facc68cb448863e8120ccca9c](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_1e9d92f0fed7288facc68cb448863e8120ccca9c) | 20.96 | 0.105 | 217.50
[gpt2_0e8c86e6babd924ff8b511c94cc1647bf61f81a2](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_0e8c86e6babd924ff8b511c94cc1647bf61f81a2) | 24.83 | 0.121 | 244.77
[gpt2_5fea22df661ad91676709da7a334505f15765659](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_5fea22df661ad91676709da7a334505f15765659) | 26.89 | 0.131 | 252.65
[gpt2_46e7c68a025417e20a7e13bd4c1ee71438d28069](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_46e7c68a025417e20a7e13bd4c1ee71438d28069) | 30.07 | 0.146 | 252.23
[gpt2_98b0196b5a865ba76f31723646f33e0461dc910d](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_98b0196b5a865ba76f31723646f33e0461dc910d) | 33.24 | 0.160 | 314.39
[gpt2_4352a56f3fa9e7ba6d291867d356a08022753658](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_4352a56f3fa9e7ba6d291867d356a08022753658) | 40.34 | 0.195 | 328.88
[gpt2_6c6e63116ff74ba444ff5a08cef54380073ebea3](https://huggingface.co/microsoft/lts-gpt2-sm/tree/main/gpt2_6c6e63116ff74ba444ff5a08cef54380073ebea3) | 49.85 | 0.230 | 377.68
For a straightforward usage with the `transformers` package, please refer to [microsoft/lts-gpt2-sm](https://huggingface.co/microsoft/lts-gpt2-sm) on the Hugging Face Hub.
## Searching for Pareto-optimal Architectures
We ran LTS for a total of 10 generations and discovered multiple architectures that perform well with regards to non-embedding parameters, latency, and memory. To reproduce the search, the following command can be used:
```python
python search.py -h
```
*The default arguments provided by the script were used in this task.*
### Results
The best-performing architectures with respect to non-embedding parameters and ONNX-based latency are depicted by the points in the bottom-left corner of the plot:

The best-performing architectures with respect to non-embedding parameters and ONNX-based memory are shown by the points in the bottom-left corner of the plot:

## Training the Architectures
Once the Pareto-optimal architectures have been found (located in the `models` folder), they can be trained using the following script:
```python
python train.py -h
```
*The default arguments provided by the script were used in this task. The training dataset consisted of 7.8 billion tokens from a pre-encoded version of ThePile.*
### Results
After pre-training the architectures, we performed a zero-shot evaluation over 16 tasks, and created two Pareto frontiers between the average performance (across all tasks) and ONNX-based metrics (latency and memory).

It is worth noting that the model labeled *gpt2_4352a56f3fa9e7ba6d291867d356a08022753658* (represented by the "yellow" dot) achieved the highest average performance with lower latency and memory usage than *gpt2_6c6e63116ff74ba444ff5a08cef54380073ebea3*, despite having 20% less parameters.
Furthermore, *gpt2_ddf63c1125f1fed5a7dd3537f640834187719996* (represented by the "medium green" dot) used only 13.32M non-embedding parameters, 0.069s of latency, and 125.78MB of memory, yet it attained an average performance of 0.3867. This level of performance was only 2.89% lower than that of the highest-performing model ("yellow" dot), but it utilized roughly one-third of the non-embedding parameters, latency, and memory.
## Generating Text with Pre-Trained Architectures
With our pre-trained architectures, text can be generated with ease using just a few lines of code. Simply use one of the models from our Model Gallery and start generating text:
```python
python generate_text.py "microsoft/lts-gpt2-sm" "# Halo Infinite Review" --pre_trained_model_subfolder "gpt2_ddf63c1125f1fed5a7dd3537f640834187719996"
```
|
archai/tasks/text_generation/README.md/0
|
{
"file_path": "archai/tasks/text_generation/README.md",
"repo_id": "archai",
"token_count": 2944
}
| 354 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from archai.common.ordered_dict_logger import OrderedDictLogger
def test_ordered_dict_logger():
# Assert that the default attributes are defined
logger = OrderedDictLogger(file_path="log.yaml", delay=0.0)
assert logger.file_path == "log.yaml"
assert logger.delay == 0.0
assert isinstance(logger.root_node, dict)
assert len(logger.root_node) == 0
assert logger.current_path == ""
# Assert that the updated key is defined
logger._update_key("test_key", "test_value")
assert len(logger.root_node) == 1
assert logger.root_node["test_key"] == "test_value"
assert logger.current_path == ""
# Assert that the logger can be saved
logger.save()
assert os.path.exists("log.yaml")
# Assert that the logger can be loaded
logger = OrderedDictLogger(delay=0.0)
logger.load("log.yaml")
assert len(logger.root_node) == 1
assert logger.root_node["test_key"] == "test_value"
if os.path.exists("log.yaml"):
os.remove("log.yaml")
|
archai/tests/common/test_ordered_dict_logger.py/0
|
{
"file_path": "archai/tests/common/test_ordered_dict_logger.py",
"repo_id": "archai",
"token_count": 405
}
| 355 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
import pytest
from overrides import overrides
from archai.discrete_search.api.predictor import MeanVar, Predictor
@pytest.fixture
def surrogate_model(search_objectives):
class DummyPredictor(Predictor):
def __init__(self, n_objs: int, seed1: int = 10, seed2: int = 20) -> None:
self.n_objs = n_objs
self.mean_rng = np.random.RandomState(seed1)
self.var_rng = np.random.RandomState(seed2)
@overrides
def fit(self, encoded_archs: np.ndarray, y: np.ndarray) -> None:
pass
@overrides
def predict(self, encoded_archs: np.ndarray) -> MeanVar:
n = len(encoded_archs)
return MeanVar(self.mean_rng.random(size=(n, self.n_objs)), self.var_rng.random(size=(n, self.n_objs)))
return DummyPredictor(len(search_objectives.expensive_objectives))
|
archai/tests/discrete_search/algos/fixtures/surrogate_model.py/0
|
{
"file_path": "archai/tests/discrete_search/algos/fixtures/surrogate_model.py",
"repo_id": "archai",
"token_count": 406
}
| 356 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import pytest
from archai.discrete_search.evaluators.nlp.transformer_flex_memory import (
TransformerFlexOnnxMemory,
)
from archai.discrete_search.search_spaces.nlp.transformer_flex.search_space import (
TransformerFlexSearchSpace,
)
@pytest.fixture
def search_space():
return TransformerFlexSearchSpace("gpt2")
def test_transformer_flex_onnx_memory(search_space):
arch = search_space.random_sample()
objective = TransformerFlexOnnxMemory(search_space)
# Assert that the returned memory is valid
memory = objective.evaluate(arch)
assert memory > 0.0
|
archai/tests/discrete_search/evaluators/nlp/test_transformer_flex_memory.py/0
|
{
"file_path": "archai/tests/discrete_search/evaluators/nlp/test_transformer_flex_memory.py",
"repo_id": "archai",
"token_count": 222
}
| 357 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from archai.common import utils
class A:
def __init__(self):
self.a1 = 3.14
class B:
def __init__(self):
self.a = A()
self.i = 3
self.s = "eeee"
self.d = {"k": {"kk": 5}}
def test_state_dict():
b = B()
sd = utils.state_dict(b)
b.a.a1 = 0.0
b.i = 0
b.s = ""
b.d = {"0": 0}
utils.load_state_dict(b, sd)
b0 = B()
assert utils.deep_comp(b, b0)
|
archai/tests/supergraph/test_state_dict.py/0
|
{
"file_path": "archai/tests/supergraph/test_state_dict.py",
"repo_id": "archai",
"token_count": 257
}
| 358 |
include LICENSE.txt
|
azure-devops-python-api/azure-devops/MANIFEST.in/0
|
{
"file_path": "azure-devops-python-api/azure-devops/MANIFEST.in",
"repo_id": "azure-devops-python-api",
"token_count": 6
}
| 359 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from ...v7_0.member_entitlement_management.models import *
from .member_entitlement_management_client import MemberEntitlementManagementClient
__all__ = [
'AccessLevel',
'BaseOperationResult',
'Extension',
'ExtensionSummaryData',
'GraphGroup',
'GraphMember',
'GraphSubject',
'GraphSubjectBase',
'GraphUser',
'Group',
'GroupEntitlement',
'GroupEntitlementOperationReference',
'GroupOperationResult',
'GroupOption',
'JsonPatchOperation',
'LicenseSummaryData',
'MemberEntitlement',
'MemberEntitlementOperationReference',
'MemberEntitlementsPatchResponse',
'MemberEntitlementsPostResponse',
'MemberEntitlementsResponseBase',
'OperationReference',
'OperationResult',
'PagedGraphMemberList',
'PagedList',
'ProjectEntitlement',
'ProjectRef',
'ReferenceLinks',
'SummaryData',
'TeamRef',
'UserEntitlement',
'UserEntitlementOperationReference',
'UserEntitlementOperationResult',
'UserEntitlementsPatchResponse',
'UserEntitlementsPostResponse',
'UserEntitlementsResponseBase',
'UsersSummary',
'MemberEntitlementManagementClient'
]
|
azure-devops-python-api/azure-devops/azure/devops/released/member_entitlement_management/__init__.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/released/member_entitlement_management/__init__.py",
"repo_id": "azure-devops-python-api",
"token_count": 493
}
| 360 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from ...v7_0.work_item_tracking.models import *
from .work_item_tracking_client import WorkItemTrackingClient
__all__ = [
'AccountMyWorkResult',
'AccountRecentActivityWorkItemModel',
'AccountRecentActivityWorkItemModel2',
'AccountRecentActivityWorkItemModelBase',
'AccountRecentMentionWorkItemModel',
'AccountWorkWorkItemModel',
'ArtifactUriQuery',
'ArtifactUriQueryResult',
'AttachmentReference',
'Comment',
'CommentCreate',
'CommentList',
'CommentMention',
'CommentReaction',
'CommentUpdate',
'CommentVersion',
'EmailRecipients',
'ExternalDeployment',
'ExternalEnvironment',
'ExternalPipeline',
'FieldDependentRule',
'GraphSubjectBase',
'IdentityRef',
'IdentityReference',
'JsonPatchOperation',
'Link',
'MailMessage',
'ProcessIdModel',
'ProcessMigrationResultModel',
'ProjectWorkItemStateColors',
'ProvisioningResult',
'QueryBatchGetRequest',
'QueryHierarchyItem',
'QueryHierarchyItemsResult',
'ReferenceLinks',
'ReportingWorkItemLinksBatch',
'ReportingWorkItemRevisionsBatch',
'ReportingWorkItemRevisionsFilter',
'SendMailBody',
'StreamedBatch',
'TeamContext',
'UpdateWorkItemField',
'Wiql',
'WorkArtifactLink',
'WorkItem',
'WorkItemBatchGetRequest',
'WorkItemClassificationNode',
'WorkItemComment',
'WorkItemComments',
'WorkItemCommentVersionRef',
'WorkItemDelete',
'WorkItemDeleteReference',
'WorkItemDeleteShallowReference',
'WorkItemDeleteUpdate',
'WorkItemField',
'WorkItemFieldOperation',
'WorkItemFieldReference',
'WorkItemFieldUpdate',
'WorkItemHistory',
'WorkItemIcon',
'WorkItemLink',
'WorkItemNextStateOnTransition',
'WorkItemQueryClause',
'WorkItemQueryResult',
'WorkItemQuerySortColumn',
'WorkItemReference',
'WorkItemRelation',
'WorkItemRelationType',
'WorkItemRelationUpdates',
'WorkItemStateColor',
'WorkItemStateTransition',
'WorkItemTagDefinition',
'WorkItemTemplate',
'WorkItemTemplateReference',
'WorkItemTrackingReference',
'WorkItemTrackingResource',
'WorkItemTrackingResourceReference',
'WorkItemType',
'WorkItemTypeCategory',
'WorkItemTypeColor',
'WorkItemTypeColorAndIcon',
'WorkItemTypeFieldInstance',
'WorkItemTypeFieldInstanceBase',
'WorkItemTypeFieldWithReferences',
'WorkItemTypeReference',
'WorkItemTypeStateColors',
'WorkItemTypeTemplate',
'WorkItemTypeTemplateUpdateModel',
'WorkItemUpdate',
'WorkItemTrackingClient'
]
|
azure-devops-python-api/azure-devops/azure/devops/released/work_item_tracking/__init__.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/released/work_item_tracking/__init__.py",
"repo_id": "azure-devops-python-api",
"token_count": 1035
}
| 361 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class NpmClient(Client):
"""Npm
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(NpmClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '4c83cfc1-f33a-477e-a789-29d38ffca52e'
def get_content_scoped_package(self, feed_id, package_scope, unscoped_package_name, package_version, project=None, **kwargs):
"""GetContentScopedPackage.
:param str feed_id:
:param str package_scope:
:param str unscoped_package_name:
:param str package_version:
:param str project: Project ID or project name
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='09a4eafd-123a-495c-979c-0eda7bdb9a14',
version='7.0',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_content_unscoped_package(self, feed_id, package_name, package_version, project=None, **kwargs):
"""GetContentUnscopedPackage.
Get an unscoped npm package.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='75caa482-cb1e-47cd-9f2c-c048a4b7a43e',
version='7.0',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def update_packages(self, batch_request, feed_id, project=None):
"""UpdatePackages.
Update several packages from a single feed in a single request. The updates to the packages do not happen atomically.
:param :class:`<NpmPackagesBatchRequest> <azure.devops.v7_0.npm.models.NpmPackagesBatchRequest>` batch_request: Information about the packages to update, the operation to perform, and its associated data.
:param str feed_id: Name or ID of the feed.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
content = self._serialize.body(batch_request, 'NpmPackagesBatchRequest')
self._send(http_method='POST',
location_id='06f34005-bbb2-41f4-88f5-23e03a99bb12',
version='7.0',
route_values=route_values,
content=content)
def get_readme_scoped_package(self, feed_id, package_scope, unscoped_package_name, package_version, project=None, **kwargs):
"""GetReadmeScopedPackage.
Get the Readme for a package version with an npm scope.
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope\name)
:param str unscoped_package_name: Name of the package (the 'name' part of @scope\name)
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='6d4db777-7e4a-43b2-afad-779a1d197301',
version='7.0',
route_values=route_values,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_readme_unscoped_package(self, feed_id, package_name, package_version, project=None, **kwargs):
"""GetReadmeUnscopedPackage.
Get the Readme for a package version that has no npm scope.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='1099a396-b310-41d4-a4b6-33d134ce3fcf',
version='7.0',
route_values=route_values,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def update_recycle_bin_packages(self, batch_request, feed_id, project=None):
"""UpdateRecycleBinPackages.
Delete or restore several package versions from the recycle bin.
:param :class:`<NpmPackagesBatchRequest> <azure.devops.v7_0.npm.models.NpmPackagesBatchRequest>` batch_request: Information about the packages to update, the operation to perform, and its associated data.
:param str feed_id: Name or ID of the feed.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
content = self._serialize.body(batch_request, 'NpmPackagesBatchRequest')
self._send(http_method='POST',
location_id='eefe03ef-a6a2-4a7a-a0ec-2e65a5efd64c',
version='7.0',
route_values=route_values,
content=content)
def delete_scoped_package_version_from_recycle_bin(self, feed_id, package_scope, unscoped_package_name, package_version, project=None):
"""DeleteScopedPackageVersionFromRecycleBin.
Delete a package version with an npm scope from the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name).
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
self._send(http_method='DELETE',
location_id='220f45eb-94a5-432c-902a-5b8c6372e415',
version='7.0',
route_values=route_values)
def get_scoped_package_version_metadata_from_recycle_bin(self, feed_id, package_scope, unscoped_package_name, package_version, project=None):
"""GetScopedPackageVersionMetadataFromRecycleBin.
Get information about a scoped package version in the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name)
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<NpmPackageVersionDeletionState> <azure.devops.v7_0.npm.models.NpmPackageVersionDeletionState>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='220f45eb-94a5-432c-902a-5b8c6372e415',
version='7.0',
route_values=route_values)
return self._deserialize('NpmPackageVersionDeletionState', response)
def restore_scoped_package_version_from_recycle_bin(self, package_version_details, feed_id, package_scope, unscoped_package_name, package_version, project=None):
"""RestoreScopedPackageVersionFromRecycleBin.
Restore a package version with an npm scope from the recycle bin to its feed.
:param :class:`<NpmRecycleBinPackageVersionDetails> <azure.devops.v7_0.npm.models.NpmRecycleBinPackageVersionDetails>` package_version_details:
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name).
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
content = self._serialize.body(package_version_details, 'NpmRecycleBinPackageVersionDetails')
self._send(http_method='PATCH',
location_id='220f45eb-94a5-432c-902a-5b8c6372e415',
version='7.0',
route_values=route_values,
content=content)
def delete_package_version_from_recycle_bin(self, feed_id, package_name, package_version, project=None):
"""DeletePackageVersionFromRecycleBin.
Delete a package version without an npm scope from the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
self._send(http_method='DELETE',
location_id='63a4f31f-e92b-4ee4-bf92-22d485e73bef',
version='7.0',
route_values=route_values)
def get_package_version_metadata_from_recycle_bin(self, feed_id, package_name, package_version, project=None):
"""GetPackageVersionMetadataFromRecycleBin.
Get information about an unscoped package version in the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<NpmPackageVersionDeletionState> <azure.devops.v7_0.npm.models.NpmPackageVersionDeletionState>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='63a4f31f-e92b-4ee4-bf92-22d485e73bef',
version='7.0',
route_values=route_values)
return self._deserialize('NpmPackageVersionDeletionState', response)
def restore_package_version_from_recycle_bin(self, package_version_details, feed_id, package_name, package_version, project=None):
"""RestorePackageVersionFromRecycleBin.
Restore a package version without an npm scope from the recycle bin to its feed.
:param :class:`<NpmRecycleBinPackageVersionDetails> <azure.devops.v7_0.npm.models.NpmRecycleBinPackageVersionDetails>` package_version_details:
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
content = self._serialize.body(package_version_details, 'NpmRecycleBinPackageVersionDetails')
self._send(http_method='PATCH',
location_id='63a4f31f-e92b-4ee4-bf92-22d485e73bef',
version='7.0',
route_values=route_values,
content=content)
def get_scoped_upstreaming_behavior(self, feed_id, package_scope, unscoped_package_name, project=None):
"""GetScopedUpstreamingBehavior.
Get the upstreaming behavior of the (scoped) package within the context of a feed
:param str feed_id: The name or id of the feed
:param str package_scope: The scope of the package
:param str unscoped_package_name: The name of the scoped package
:param str project: Project ID or project name
:rtype: :class:`<UpstreamingBehavior> <azure.devops.v7_0.npm.models.UpstreamingBehavior>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
response = self._send(http_method='GET',
location_id='9859c187-f6ec-41b0-862d-8003b3b404e0',
version='7.0',
route_values=route_values)
return self._deserialize('UpstreamingBehavior', response)
def set_scoped_upstreaming_behavior(self, feed_id, package_scope, unscoped_package_name, behavior, project=None):
"""SetScopedUpstreamingBehavior.
Set the upstreaming behavior of a (scoped) package within the context of a feed
:param str feed_id: The name or id of the feed
:param str package_scope: The scope of the package
:param str unscoped_package_name: The name of the scoped package
:param :class:`<UpstreamingBehavior> <azure.devops.v7_0.npm.models.UpstreamingBehavior>` behavior: The behavior to apply to the scoped package within the scope of the feed
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
content = self._serialize.body(behavior, 'UpstreamingBehavior')
self._send(http_method='PATCH',
location_id='9859c187-f6ec-41b0-862d-8003b3b404e0',
version='7.0',
route_values=route_values,
content=content)
def get_upstreaming_behavior(self, feed_id, package_name, project=None):
"""GetUpstreamingBehavior.
Get the upstreaming behavior of the (unscoped) package within the context of a feed
:param str feed_id: The name or id of the feed
:param str package_name: The name of the package
:param str project: Project ID or project name
:rtype: :class:`<UpstreamingBehavior> <azure.devops.v7_0.npm.models.UpstreamingBehavior>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
response = self._send(http_method='GET',
location_id='e27a45d3-711b-41cb-a47a-ae669b6e9076',
version='7.0',
route_values=route_values)
return self._deserialize('UpstreamingBehavior', response)
def set_upstreaming_behavior(self, feed_id, package_name, behavior, project=None):
"""SetUpstreamingBehavior.
Set the upstreaming behavior of a (scoped) package within the context of a feed
:param str feed_id: The name or id of the feed
:param str package_name: The name of the package
:param :class:`<UpstreamingBehavior> <azure.devops.v7_0.npm.models.UpstreamingBehavior>` behavior: The behavior to apply to the scoped package within the scope of the feed
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
content = self._serialize.body(behavior, 'UpstreamingBehavior')
self._send(http_method='PATCH',
location_id='e27a45d3-711b-41cb-a47a-ae669b6e9076',
version='7.0',
route_values=route_values,
content=content)
def get_scoped_package_info(self, feed_id, package_scope, unscoped_package_name, package_version, project=None):
"""GetScopedPackageInfo.
Get information about a scoped package version (such as @scope/name).
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name).
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<Package> <azure.devops.v7_0.npm.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='e93d9ec3-4022-401e-96b0-83ea5d911e09',
version='7.0',
route_values=route_values)
return self._deserialize('Package', response)
def unpublish_scoped_package(self, feed_id, package_scope, unscoped_package_name, package_version, project=None):
"""UnpublishScopedPackage.
Unpublish a scoped package version (such as @scope/name).
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name).
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<Package> <azure.devops.v7_0.npm.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='DELETE',
location_id='e93d9ec3-4022-401e-96b0-83ea5d911e09',
version='7.0',
route_values=route_values)
return self._deserialize('Package', response)
def update_scoped_package(self, package_version_details, feed_id, package_scope, unscoped_package_name, package_version, project=None):
"""UpdateScopedPackage.
:param :class:`<PackageVersionDetails> <azure.devops.v7_0.npm.models.PackageVersionDetails>` package_version_details:
:param str feed_id:
:param str package_scope:
:param str unscoped_package_name:
:param str package_version:
:param str project: Project ID or project name
:rtype: :class:`<Package> <azure.devops.v7_0.npm.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
content = self._serialize.body(package_version_details, 'PackageVersionDetails')
response = self._send(http_method='PATCH',
location_id='e93d9ec3-4022-401e-96b0-83ea5d911e09',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('Package', response)
def get_package_info(self, feed_id, package_name, package_version, project=None):
"""GetPackageInfo.
Get information about an unscoped package version.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<Package> <azure.devops.v7_0.npm.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='ed579d62-67c9-4271-be66-9b029af5bcf9',
version='7.0',
route_values=route_values)
return self._deserialize('Package', response)
def unpublish_package(self, feed_id, package_name, package_version, project=None):
"""UnpublishPackage.
Unpublish an unscoped package version.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<Package> <azure.devops.v7_0.npm.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='DELETE',
location_id='ed579d62-67c9-4271-be66-9b029af5bcf9',
version='7.0',
route_values=route_values)
return self._deserialize('Package', response)
def update_package(self, package_version_details, feed_id, package_name, package_version, project=None):
"""UpdatePackage.
:param :class:`<PackageVersionDetails> <azure.devops.v7_0.npm.models.PackageVersionDetails>` package_version_details:
:param str feed_id:
:param str package_name:
:param str package_version:
:param str project: Project ID or project name
:rtype: :class:`<Package> <azure.devops.v7_0.npm.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
content = self._serialize.body(package_version_details, 'PackageVersionDetails')
response = self._send(http_method='PATCH',
location_id='ed579d62-67c9-4271-be66-9b029af5bcf9',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('Package', response)
|
azure-devops-python-api/azure-devops/azure/devops/v7_0/npm/npm_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_0/npm/npm_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 14386
}
| 362 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
class ClientFactoryV7_1(object):
"""ClientFactoryV7_1.
A factory class to get the 7.1 preview clients.
"""
def __init__(self, connection):
self._connection = connection
def get_accounts_client(self):
"""get_accounts_client.
Gets the 7.1 version of the AccountsClient
:rtype: :class:`<AccountsClient> <azure.devops.v7_1.accounts.accounts_client.AccountsClient>`
"""
return self._connection.get_client('azure.devops.v7_1.accounts.accounts_client.AccountsClient')
def get_audit_client(self):
"""get_audit_client.
Gets the 7.1 version of the AuditClient
:rtype: :class:`<AuditClient> <azure.devops.v7_1.audit.audit_client.AuditClient>`
"""
return self._connection.get_client('azure.devops.v7_1.audit.audit_client.AuditClient')
def get_build_client(self):
"""get_build_client.
Gets the 7.1 version of the BuildClient
:rtype: :class:`<BuildClient> <azure.devops.v7_1.build.build_client.BuildClient>`
"""
return self._connection.get_client('azure.devops.v7_1.build.build_client.BuildClient')
def get_cix_client(self):
"""get_cix_client.
Gets the 7.1 version of the CixClient
:rtype: :class:`<CixClient> <azure.devops.v7_1.cix.cix_client.CixClient>`
"""
return self._connection.get_client('azure.devops.v7_1.cix.cix_client.CixClient')
def get_client_trace_client(self):
"""get_client_trace_client.
Gets the 7.1 version of the ClientTraceClient
:rtype: :class:`<ClientTraceClient> <azure.devops.v7_1.client_trace.client_trace_client.ClientTraceClient>`
"""
return self._connection.get_client('azure.devops.v7_1.client_trace.client_trace_client.ClientTraceClient')
def get_contributions_client(self):
"""get_contributions_client.
Gets the 7.1 version of the ContributionsClient
:rtype: :class:`<ContributionsClient> <azure.devops.v7_1.contributions.contributions_client.ContributionsClient>`
"""
return self._connection.get_client('azure.devops.v7_1.contributions.contributions_client.ContributionsClient')
def get_core_client(self):
"""get_core_client.
Gets the 7.1 version of the CoreClient
:rtype: :class:`<CoreClient> <azure.devops.v7_1.core.core_client.CoreClient>`
"""
return self._connection.get_client('azure.devops.v7_1.core.core_client.CoreClient')
def get_customer_intelligence_client(self):
"""get_customer_intelligence_client.
Gets the 7.1 version of the CustomerIntelligenceClient
:rtype: :class:`<CustomerIntelligenceClient> <azure.devops.v7_1.customer_intelligence.customer_intelligence_client.CustomerIntelligenceClient>`
"""
return self._connection.get_client('azure.devops.v7_1.customer_intelligence.customer_intelligence_client.CustomerIntelligenceClient')
def get_dashboard_client(self):
"""get_dashboard_client.
Gets the 7.1 version of the DashboardClient
:rtype: :class:`<DashboardClient> <azure.devops.v7_1.dashboard.dashboard_client.DashboardClient>`
"""
return self._connection.get_client('azure.devops.v7_1.dashboard.dashboard_client.DashboardClient')
def get_elastic_client(self):
"""get_elastic_client.
Gets the 7.1 version of the ElasticClient
:rtype: :class:`<ElasticClient> <azure.devops.v7_1.elastic.elastic_client.ElasticClient>`
"""
return self._connection.get_client('azure.devops.v7_1.elastic.elastic_client.ElasticClient')
def get_extension_management_client(self):
"""get_extension_management_client.
Gets the 7.1 version of the ExtensionManagementClient
:rtype: :class:`<ExtensionManagementClient> <azure.devops.v7_1.extension_management.extension_management_client.ExtensionManagementClient>`
"""
return self._connection.get_client('azure.devops.v7_1.extension_management.extension_management_client.ExtensionManagementClient')
def get_feature_availability_client(self):
"""get_feature_availability_client.
Gets the 7.1 version of the FeatureAvailabilityClient
:rtype: :class:`<FeatureAvailabilityClient> <azure.devops.v7_1.feature_availability.feature_availability_client.FeatureAvailabilityClient>`
"""
return self._connection.get_client('azure.devops.v7_1.feature_availability.feature_availability_client.FeatureAvailabilityClient')
def get_feature_management_client(self):
"""get_feature_management_client.
Gets the 7.1 version of the FeatureManagementClient
:rtype: :class:`<FeatureManagementClient> <azure.devops.v7_1.feature_management.feature_management_client.FeatureManagementClient>`
"""
return self._connection.get_client('azure.devops.v7_1.feature_management.feature_management_client.FeatureManagementClient')
def get_feed_client(self):
"""get_feed_client.
Gets the 7.1 version of the FeedClient
:rtype: :class:`<FeedClient> <azure.devops.v7_1.feed.feed_client.FeedClient>`
"""
return self._connection.get_client('azure.devops.v7_1.feed.feed_client.FeedClient')
def get_file_container_client(self):
"""get_file_container_client.
Gets the 7.1 version of the FileContainerClient
:rtype: :class:`<FileContainerClient> <azure.devops.v7_1.file_container.file_container_client.FileContainerClient>`
"""
return self._connection.get_client('azure.devops.v7_1.file_container.file_container_client.FileContainerClient')
def get_gallery_client(self):
"""get_gallery_client.
Gets the 7.1 version of the GalleryClient
:rtype: :class:`<GalleryClient> <azure.devops.v7_1.gallery.gallery_client.GalleryClient>`
"""
return self._connection.get_client('azure.devops.v7_1.gallery.gallery_client.GalleryClient')
def get_git_client(self):
"""get_git_client.
Gets the 7.1 version of the GitClient
:rtype: :class:`<GitClient> <azure.devops.v7_1.git.git_client.GitClient>`
"""
return self._connection.get_client('azure.devops.v7_1.git.git_client.GitClient')
def get_graph_client(self):
"""get_graph_client.
Gets the 7.1 version of the GraphClient
:rtype: :class:`<GraphClient> <azure.devops.v7_1.graph.graph_client.GraphClient>`
"""
return self._connection.get_client('azure.devops.v7_1.graph.graph_client.GraphClient')
def get_identity_client(self):
"""get_identity_client.
Gets the 7.1 version of the IdentityClient
:rtype: :class:`<IdentityClient> <azure.devops.v7_1.identity.identity_client.IdentityClient>`
"""
return self._connection.get_client('azure.devops.v7_1.identity.identity_client.IdentityClient')
def get_location_client(self):
"""get_location_client.
Gets the 7.1 version of the LocationClient
:rtype: :class:`<LocationClient> <azure.devops.v7_1.location.location_client.LocationClient>`
"""
return self._connection.get_client('azure.devops.v7_1.location.location_client.LocationClient')
def get_maven_client(self):
"""get_maven_client.
Gets the 7.1 version of the MavenClient
:rtype: :class:`<MavenClient> <azure.devops.v7_1.maven.maven_client.MavenClient>`
"""
return self._connection.get_client('azure.devops.v7_1.maven.maven_client.MavenClient')
def get_member_entitlement_management_client(self):
"""get_member_entitlement_management_client.
Gets the 7.1 version of the MemberEntitlementManagementClient
:rtype: :class:`<MemberEntitlementManagementClient> <azure.devops.v7_1.member_entitlement_management.member_entitlement_management_client.MemberEntitlementManagementClient>`
"""
return self._connection.get_client('azure.devops.v7_1.member_entitlement_management.member_entitlement_management_client.MemberEntitlementManagementClient')
def get_notification_client(self):
"""get_notification_client.
Gets the 7.1 version of the NotificationClient
:rtype: :class:`<NotificationClient> <azure.devops.v7_1.notification.notification_client.NotificationClient>`
"""
return self._connection.get_client('azure.devops.v7_1.notification.notification_client.NotificationClient')
def get_npm_client(self):
"""get_npm_client.
Gets the 7.1 version of the NpmClient
:rtype: :class:`<NpmClient> <azure.devops.v7_1.npm.npm_client.NpmClient>`
"""
return self._connection.get_client('azure.devops.v7_1.npm.npm_client.NpmClient')
def get_nuget_client(self):
"""get_nuget_client.
Gets the 7.1 version of the NuGetClient
:rtype: :class:`<NuGetClient> <azure.devops.v7_1.nuget.nuget_client.NuGetClient>`
"""
return self._connection.get_client('azure.devops.v7_1.nuget.nuget_client.NuGetClient')
def get_operations_client(self):
"""get_operations_client.
Gets the 7.1 version of the OperationsClient
:rtype: :class:`<OperationsClient> <azure.devops.v7_1.operations.operations_client.OperationsClient>`
"""
return self._connection.get_client('azure.devops.v7_1.operations.operations_client.OperationsClient')
def get_pipeline_permissions_client(self):
"""get_pipeline_permissions_client.
Gets the 7.1 version of the PipelinePermissionsClient
:rtype: :class:`<PipelinePermissionsClient> <azure.devops.v7_1.pipeline_permissions.pipeline_permissions_client.PipelinePermissionsClient>`
"""
return self._connection.get_client('azure.devops.v7_1.pipeline_permissions.pipeline_permissions_client.PipelinePermissionsClient')
def get_pipelines_client(self):
"""get_pipelines_client.
Gets the 7.1 version of the PipelinesClient
:rtype: :class:`<PipelinesClient> <azure.devops.v7_1.pipelines.pipelines_client.PipelinesClient>`
"""
return self._connection.get_client('azure.devops.v7_1.pipelines.pipelines_client.PipelinesClient')
def get_pipelines_checks_client(self):
"""get_pipelines_checks_client.
Gets the 7.1 version of the PipelinesChecksClient
:rtype: :class:`<PipelinesChecksClient> <azure.devops.v7_1.pipelines_checks.pipelines_checks_client.PipelinesChecksClient>`
"""
return self._connection.get_client('azure.devops.v7_1.pipelines_checks.pipelines_checks_client.PipelinesChecksClient')
def get_policy_client(self):
"""get_policy_client.
Gets the 7.1 version of the PolicyClient
:rtype: :class:`<PolicyClient> <azure.devops.v7_1.policy.policy_client.PolicyClient>`
"""
return self._connection.get_client('azure.devops.v7_1.policy.policy_client.PolicyClient')
def get_profile_client(self):
"""get_profile_client.
Gets the 7.1 version of the ProfileClient
:rtype: :class:`<ProfileClient> <azure.devops.v7_1.profile.profile_client.ProfileClient>`
"""
return self._connection.get_client('azure.devops.v7_1.profile.profile_client.ProfileClient')
def get_profile_regions_client(self):
"""get_profile_regions_client.
Gets the 7.1 version of the ProfileRegionsClient
:rtype: :class:`<ProfileRegionsClient> <azure.devops.v7_1.profile_regions.profile_regions_client.ProfileRegionsClient>`
"""
return self._connection.get_client('azure.devops.v7_1.profile_regions.profile_regions_client.ProfileRegionsClient')
def get_project_analysis_client(self):
"""get_project_analysis_client.
Gets the 7.1 version of the ProjectAnalysisClient
:rtype: :class:`<ProjectAnalysisClient> <azure.devops.v7_1.project_analysis.project_analysis_client.ProjectAnalysisClient>`
"""
return self._connection.get_client('azure.devops.v7_1.project_analysis.project_analysis_client.ProjectAnalysisClient')
def get_provenance_client(self):
"""get_provenance_client.
Gets the 7.1 version of the ProvenanceClient
:rtype: :class:`<ProvenanceClient> <azure.devops.v7_1.provenance.provenance_client.ProvenanceClient>`
"""
return self._connection.get_client('azure.devops.v7_1.provenance.provenance_client.ProvenanceClient')
def get_py_pi_api_client(self):
"""get_py_pi_api_client.
Gets the 7.1 version of the PyPiApiClient
:rtype: :class:`<PyPiApiClient> <azure.devops.v7_1.py_pi_api.py_pi_api_client.PyPiApiClient>`
"""
return self._connection.get_client('azure.devops.v7_1.py_pi_api.py_pi_api_client.PyPiApiClient')
def get_release_client(self):
"""get_release_client.
Gets the 7.1 version of the ReleaseClient
:rtype: :class:`<ReleaseClient> <azure.devops.v7_1.release.release_client.ReleaseClient>`
"""
return self._connection.get_client('azure.devops.v7_1.release.release_client.ReleaseClient')
def get_sbom_client(self):
"""get_sbom_client.
Gets the 7.1 version of the SBOMClient
:rtype: :class:`<SBOMClient> <azure.devops.v7_1.sbom.sbom_client.SBOMClient>`
"""
return self._connection.get_client('azure.devops.v7_1.sbom.sbom_client.SBOMClient')
def get_search_client(self):
"""get_search_client.
Gets the 7.1 version of the SearchClient
:rtype: :class:`<SearchClient> <azure.devops.v7_1.search.search_client.SearchClient>`
"""
return self._connection.get_client('azure.devops.v7_1.search.search_client.SearchClient')
def get_security_client(self):
"""get_security_client.
Gets the 7.1 version of the SecurityClient
:rtype: :class:`<SecurityClient> <azure.devops.v7_1.security.security_client.SecurityClient>`
"""
return self._connection.get_client('azure.devops.v7_1.security.security_client.SecurityClient')
def get_service_endpoint_client(self):
"""get_service_endpoint_client.
Gets the 7.1 version of the ServiceEndpointClient
:rtype: :class:`<ServiceEndpointClient> <azure.devops.v7_1.service_endpoint.service_endpoint_client.ServiceEndpointClient>`
"""
return self._connection.get_client('azure.devops.v7_1.service_endpoint.service_endpoint_client.ServiceEndpointClient')
def get_service_hooks_client(self):
"""get_service_hooks_client.
Gets the 7.1 version of the ServiceHooksClient
:rtype: :class:`<ServiceHooksClient> <azure.devops.v7_1.service_hooks.service_hooks_client.ServiceHooksClient>`
"""
return self._connection.get_client('azure.devops.v7_1.service_hooks.service_hooks_client.ServiceHooksClient')
def get_settings_client(self):
"""get_settings_client.
Gets the 7.1 version of the SettingsClient
:rtype: :class:`<SettingsClient> <azure.devops.v7_1.settings.settings_client.SettingsClient>`
"""
return self._connection.get_client('azure.devops.v7_1.settings.settings_client.SettingsClient')
def get_symbol_client(self):
"""get_symbol_client.
Gets the 7.1 version of the SymbolClient
:rtype: :class:`<SymbolClient> <azure.devops.v7_1.symbol.symbol_client.SymbolClient>`
"""
return self._connection.get_client('azure.devops.v7_1.symbol.symbol_client.SymbolClient')
def get_task_client(self):
"""get_task_client.
Gets the 7.1 version of the TaskClient
:rtype: :class:`<TaskClient> <azure.devops.v7_1.task.task_client.TaskClient>`
"""
return self._connection.get_client('azure.devops.v7_1.task.task_client.TaskClient')
def get_task_agent_client(self):
"""get_task_agent_client.
Gets the 7.1 version of the TaskAgentClient
:rtype: :class:`<TaskAgentClient> <azure.devops.v7_1.task_agent.task_agent_client.TaskAgentClient>`
"""
return self._connection.get_client('azure.devops.v7_1.task_agent.task_agent_client.TaskAgentClient')
def get_test_client(self):
"""get_test_client.
Gets the 7.1 version of the TestClient
:rtype: :class:`<TestClient> <azure.devops.v7_1.test.test_client.TestClient>`
"""
return self._connection.get_client('azure.devops.v7_1.test.test_client.TestClient')
def get_test_plan_client(self):
"""get_test_plan_client.
Gets the 7.1 version of the TestPlanClient
:rtype: :class:`<TestPlanClient> <azure.devops.v7_1.test_plan.test_plan_client.TestPlanClient>`
"""
return self._connection.get_client('azure.devops.v7_1.test_plan.test_plan_client.TestPlanClient')
def get_test_results_client(self):
"""get_test_results_client.
Gets the 7.1 version of the TestResultsClient
:rtype: :class:`<TestResultsClient> <azure.devops.v7_1.test_results.test_results_client.TestResultsClient>`
"""
return self._connection.get_client('azure.devops.v7_1.test_results.test_results_client.TestResultsClient')
def get_tfvc_client(self):
"""get_tfvc_client.
Gets the 7.1 version of the TfvcClient
:rtype: :class:`<TfvcClient> <azure.devops.v7_1.tfvc.tfvc_client.TfvcClient>`
"""
return self._connection.get_client('azure.devops.v7_1.tfvc.tfvc_client.TfvcClient')
def get_token_admin_client(self):
"""get_token_admin_client.
Gets the 7.1 version of the TokenAdminClient
:rtype: :class:`<TokenAdminClient> <azure.devops.v7_1.token_admin.token_admin_client.TokenAdminClient>`
"""
return self._connection.get_client('azure.devops.v7_1.token_admin.token_admin_client.TokenAdminClient')
def get_upack_api_client(self):
"""get_upack_api_client.
Gets the 7.1 version of the UPackApiClient
:rtype: :class:`<UPackApiClient> <azure.devops.v7_1.upack_api.upack_api_client.UPackApiClient>`
"""
return self._connection.get_client('azure.devops.v7_1.upack_api.upack_api_client.UPackApiClient')
def get_upack_packaging_client(self):
"""get_upack_packaging_client.
Gets the 7.1 version of the UPackPackagingClient
:rtype: :class:`<UPackPackagingClient> <azure.devops.v7_1.upack_packaging.upack_packaging_client.UPackPackagingClient>`
"""
return self._connection.get_client('azure.devops.v7_1.upack_packaging.upack_packaging_client.UPackPackagingClient')
def get_wiki_client(self):
"""get_wiki_client.
Gets the 7.1 version of the WikiClient
:rtype: :class:`<WikiClient> <azure.devops.v7_1.wiki.wiki_client.WikiClient>`
"""
return self._connection.get_client('azure.devops.v7_1.wiki.wiki_client.WikiClient')
def get_work_client(self):
"""get_work_client.
Gets the 7.1 version of the WorkClient
:rtype: :class:`<WorkClient> <azure.devops.v7_1.work.work_client.WorkClient>`
"""
return self._connection.get_client('azure.devops.v7_1.work.work_client.WorkClient')
def get_work_item_tracking_client(self):
"""get_work_item_tracking_client.
Gets the 7.1 version of the WorkItemTrackingClient
:rtype: :class:`<WorkItemTrackingClient> <azure.devops.v7_1.work_item_tracking.work_item_tracking_client.WorkItemTrackingClient>`
"""
return self._connection.get_client('azure.devops.v7_1.work_item_tracking.work_item_tracking_client.WorkItemTrackingClient')
def get_work_item_tracking_process_client(self):
"""get_work_item_tracking_process_client.
Gets the 7.1 version of the WorkItemTrackingProcessClient
:rtype: :class:`<WorkItemTrackingProcessClient> <azure.devops.v7_1.work_item_tracking_process.work_item_tracking_process_client.WorkItemTrackingProcessClient>`
"""
return self._connection.get_client('azure.devops.v7_1.work_item_tracking_process.work_item_tracking_process_client.WorkItemTrackingProcessClient')
def get_work_item_tracking_process_template_client(self):
"""get_work_item_tracking_process_template_client.
Gets the 7.1 version of the WorkItemTrackingProcessTemplateClient
:rtype: :class:`<WorkItemTrackingProcessTemplateClient> <azure.devops.v7_1.work_item_tracking_process_template.work_item_tracking_process_template_client.WorkItemTrackingProcessTemplateClient>`
"""
return self._connection.get_client('azure.devops.v7_1.work_item_tracking_process_template.work_item_tracking_process_template_client.WorkItemTrackingProcessTemplateClient')
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/client_factory.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/client_factory.py",
"repo_id": "azure-devops-python-api",
"token_count": 8654
}
| 363 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .models import *
from .elastic_client import ElasticClient
__all__ = [
'ElasticNode',
'ElasticNodeSettings',
'ElasticPool',
'ElasticPoolCreationResult',
'ElasticPoolLog',
'ElasticPoolSettings',
'GraphSubjectBase',
'IdentityRef',
'ReferenceLinks',
'TaskAgentPool',
'TaskAgentPoolReference',
'TaskAgentQueue',
'ElasticClient'
]
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/elastic/__init__.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/elastic/__init__.py",
"repo_id": "azure-devops-python-api",
"token_count": 227
}
| 364 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class FileContainerClient(Client):
"""FileContainer
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(FileContainerClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = None
def create_items(self, items, container_id, scope=None):
"""CreateItems.
[Preview API] Creates the specified items in the referenced container.
:param :class:`<VssJsonCollectionWrapper> <azure.devops.v7_1.file_container.models.VssJsonCollectionWrapper>` items:
:param int container_id:
:param str scope: A guid representing the scope of the container. This is often the project id.
:rtype: [FileContainerItem]
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'int')
query_parameters = {}
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
content = self._serialize.body(items, 'VssJsonCollectionWrapper')
response = self._send(http_method='POST',
location_id='e4f5c81e-e250-447b-9fef-bd48471bea5e',
version='7.1-preview.4',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('[FileContainerItem]', self._unwrap_collection(response))
def delete_item(self, container_id, item_path, scope=None):
"""DeleteItem.
[Preview API] Deletes the specified items in a container.
:param long container_id: Container Id.
:param str item_path: Path to delete.
:param str scope: A guid representing the scope of the container. This is often the project id.
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'long')
query_parameters = {}
if item_path is not None:
query_parameters['itemPath'] = self._serialize.query('item_path', item_path, 'str')
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
self._send(http_method='DELETE',
location_id='e4f5c81e-e250-447b-9fef-bd48471bea5e',
version='7.1-preview.4',
route_values=route_values,
query_parameters=query_parameters)
def get_containers(self, scope=None, artifact_uris=None):
"""GetContainers.
[Preview API] Gets containers filtered by a comma separated list of artifact uris within the same scope, if not specified returns all containers
:param str scope: A guid representing the scope of the container. This is often the project id.
:param str artifact_uris:
:rtype: [FileContainer]
"""
query_parameters = {}
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
if artifact_uris is not None:
query_parameters['artifactUris'] = self._serialize.query('artifact_uris', artifact_uris, 'str')
response = self._send(http_method='GET',
location_id='e4f5c81e-e250-447b-9fef-bd48471bea5e',
version='7.1-preview.4',
query_parameters=query_parameters)
return self._deserialize('[FileContainer]', self._unwrap_collection(response))
def get_items(self, container_id, scope=None, item_path=None, metadata=None, format=None, download_file_name=None, include_download_tickets=None, is_shallow=None, ignore_requested_media_type=None, include_blob_metadata=None, save_absolute_path=None):
"""GetItems.
[Preview API]
:param long container_id:
:param str scope:
:param str item_path:
:param bool metadata:
:param str format:
:param str download_file_name:
:param bool include_download_tickets:
:param bool is_shallow:
:param bool ignore_requested_media_type:
:param bool include_blob_metadata:
:param bool save_absolute_path:
:rtype: [FileContainerItem]
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'long')
query_parameters = {}
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
if item_path is not None:
query_parameters['itemPath'] = self._serialize.query('item_path', item_path, 'str')
if metadata is not None:
query_parameters['metadata'] = self._serialize.query('metadata', metadata, 'bool')
if format is not None:
query_parameters['$format'] = self._serialize.query('format', format, 'str')
if download_file_name is not None:
query_parameters['downloadFileName'] = self._serialize.query('download_file_name', download_file_name, 'str')
if include_download_tickets is not None:
query_parameters['includeDownloadTickets'] = self._serialize.query('include_download_tickets', include_download_tickets, 'bool')
if is_shallow is not None:
query_parameters['isShallow'] = self._serialize.query('is_shallow', is_shallow, 'bool')
if ignore_requested_media_type is not None:
query_parameters['ignoreRequestedMediaType'] = self._serialize.query('ignore_requested_media_type', ignore_requested_media_type, 'bool')
if include_blob_metadata is not None:
query_parameters['includeBlobMetadata'] = self._serialize.query('include_blob_metadata', include_blob_metadata, 'bool')
if save_absolute_path is not None:
query_parameters['saveAbsolutePath'] = self._serialize.query('save_absolute_path', save_absolute_path, 'bool')
response = self._send(http_method='GET',
location_id='e4f5c81e-e250-447b-9fef-bd48471bea5e',
version='7.1-preview.4',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[FileContainerItem]', self._unwrap_collection(response))
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/file_container/file_container_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/file_container/file_container_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 3019
}
| 365 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class LocationClient(Client):
"""Location
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(LocationClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = None
def get_connection_data(self, connect_options=None, last_change_id=None, last_change_id64=None):
"""GetConnectionData.
[Preview API] This was copied and adapted from TeamFoundationConnectionService.Connect()
:param str connect_options:
:param int last_change_id: Obsolete 32-bit LastChangeId
:param long last_change_id64: Non-truncated 64-bit LastChangeId
:rtype: :class:`<ConnectionData> <azure.devops.v7_1.location.models.ConnectionData>`
"""
query_parameters = {}
if connect_options is not None:
query_parameters['connectOptions'] = self._serialize.query('connect_options', connect_options, 'str')
if last_change_id is not None:
query_parameters['lastChangeId'] = self._serialize.query('last_change_id', last_change_id, 'int')
if last_change_id64 is not None:
query_parameters['lastChangeId64'] = self._serialize.query('last_change_id64', last_change_id64, 'long')
response = self._send(http_method='GET',
location_id='00d9565f-ed9c-4a06-9a50-00e7896ccab4',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('ConnectionData', response)
def get_resource_area(self, area_id, enterprise_name=None, organization_name=None):
"""GetResourceArea.
[Preview API]
:param str area_id:
:param str enterprise_name:
:param str organization_name:
:rtype: :class:`<ResourceAreaInfo> <azure.devops.v7_1.location.models.ResourceAreaInfo>`
"""
route_values = {}
if area_id is not None:
route_values['areaId'] = self._serialize.url('area_id', area_id, 'str')
query_parameters = {}
if enterprise_name is not None:
query_parameters['enterpriseName'] = self._serialize.query('enterprise_name', enterprise_name, 'str')
if organization_name is not None:
query_parameters['organizationName'] = self._serialize.query('organization_name', organization_name, 'str')
response = self._send(http_method='GET',
location_id='e81700f7-3be2-46de-8624-2eb35882fcaa',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ResourceAreaInfo', response)
def get_resource_area_by_host(self, area_id, host_id):
"""GetResourceAreaByHost.
[Preview API]
:param str area_id:
:param str host_id:
:rtype: :class:`<ResourceAreaInfo> <azure.devops.v7_1.location.models.ResourceAreaInfo>`
"""
route_values = {}
if area_id is not None:
route_values['areaId'] = self._serialize.url('area_id', area_id, 'str')
query_parameters = {}
if host_id is not None:
query_parameters['hostId'] = self._serialize.query('host_id', host_id, 'str')
response = self._send(http_method='GET',
location_id='e81700f7-3be2-46de-8624-2eb35882fcaa',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ResourceAreaInfo', response)
def get_resource_areas(self, enterprise_name=None, organization_name=None):
"""GetResourceAreas.
[Preview API]
:param str enterprise_name:
:param str organization_name:
:rtype: [ResourceAreaInfo]
"""
query_parameters = {}
if enterprise_name is not None:
query_parameters['enterpriseName'] = self._serialize.query('enterprise_name', enterprise_name, 'str')
if organization_name is not None:
query_parameters['organizationName'] = self._serialize.query('organization_name', organization_name, 'str')
response = self._send(http_method='GET',
location_id='e81700f7-3be2-46de-8624-2eb35882fcaa',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[ResourceAreaInfo]', self._unwrap_collection(response))
def get_resource_areas_by_host(self, host_id):
"""GetResourceAreasByHost.
[Preview API]
:param str host_id:
:rtype: [ResourceAreaInfo]
"""
query_parameters = {}
if host_id is not None:
query_parameters['hostId'] = self._serialize.query('host_id', host_id, 'str')
response = self._send(http_method='GET',
location_id='e81700f7-3be2-46de-8624-2eb35882fcaa',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[ResourceAreaInfo]', self._unwrap_collection(response))
def delete_service_definition(self, service_type, identifier):
"""DeleteServiceDefinition.
[Preview API]
:param str service_type:
:param str identifier:
"""
route_values = {}
if service_type is not None:
route_values['serviceType'] = self._serialize.url('service_type', service_type, 'str')
if identifier is not None:
route_values['identifier'] = self._serialize.url('identifier', identifier, 'str')
self._send(http_method='DELETE',
location_id='d810a47d-f4f4-4a62-a03f-fa1860585c4c',
version='7.1-preview.1',
route_values=route_values)
def get_service_definition(self, service_type, identifier, allow_fault_in=None, preview_fault_in=None):
"""GetServiceDefinition.
[Preview API] Finds a given service definition.
:param str service_type:
:param str identifier:
:param bool allow_fault_in: If true, we will attempt to fault in a host instance mapping if in SPS.
:param bool preview_fault_in: If true, we will calculate and return a host instance mapping, but not persist it.
:rtype: :class:`<ServiceDefinition> <azure.devops.v7_1.location.models.ServiceDefinition>`
"""
route_values = {}
if service_type is not None:
route_values['serviceType'] = self._serialize.url('service_type', service_type, 'str')
if identifier is not None:
route_values['identifier'] = self._serialize.url('identifier', identifier, 'str')
query_parameters = {}
if allow_fault_in is not None:
query_parameters['allowFaultIn'] = self._serialize.query('allow_fault_in', allow_fault_in, 'bool')
if preview_fault_in is not None:
query_parameters['previewFaultIn'] = self._serialize.query('preview_fault_in', preview_fault_in, 'bool')
response = self._send(http_method='GET',
location_id='d810a47d-f4f4-4a62-a03f-fa1860585c4c',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ServiceDefinition', response)
def get_service_definitions(self, service_type=None):
"""GetServiceDefinitions.
[Preview API]
:param str service_type:
:rtype: [ServiceDefinition]
"""
route_values = {}
if service_type is not None:
route_values['serviceType'] = self._serialize.url('service_type', service_type, 'str')
response = self._send(http_method='GET',
location_id='d810a47d-f4f4-4a62-a03f-fa1860585c4c',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('[ServiceDefinition]', self._unwrap_collection(response))
def update_service_definitions(self, service_definitions):
"""UpdateServiceDefinitions.
[Preview API]
:param :class:`<VssJsonCollectionWrapper> <azure.devops.v7_1.location.models.VssJsonCollectionWrapper>` service_definitions:
"""
content = self._serialize.body(service_definitions, 'VssJsonCollectionWrapper')
self._send(http_method='PATCH',
location_id='d810a47d-f4f4-4a62-a03f-fa1860585c4c',
version='7.1-preview.1',
content=content)
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/location/location_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/location/location_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 4313
}
| 366 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.