text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
# ------------------------------------------------------------------------------
# DeepLabV3+ decoder.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
from collections import OrderedDict
import torch
from torch import nn
from torch.nn import functional as F
from .aspp import ASPP
from .conv_module import stacked_conv
__all__ = ["DeepLabV3PlusDecoder"]
class DeepLabV3PlusDecoder(nn.Module):
def __init__(self, in_channels, feature_key, low_level_channels, low_level_key, low_level_channels_project,
decoder_channels, atrous_rates, num_classes):
super(DeepLabV3PlusDecoder, self).__init__()
self.aspp = ASPP(in_channels, out_channels=decoder_channels, atrous_rates=atrous_rates)
self.feature_key = feature_key
self.low_level_key = low_level_key
# Transform low-level feature
# low_level_channels_project = 48
self.project = nn.Sequential(
nn.Conv2d(low_level_channels, low_level_channels_project, 1, bias=False),
nn.BatchNorm2d(low_level_channels_project),
nn.ReLU()
)
# Fuse
self.fuse = stacked_conv(
decoder_channels + low_level_channels_project,
decoder_channels,
kernel_size=3,
padding=1,
num_stack=2,
conv_type='depthwise_separable_conv'
)
self.classifier = nn.Conv2d(decoder_channels, num_classes, 1)
def set_image_pooling(self, pool_size):
self.aspp.set_image_pooling(pool_size)
def forward(self, features):
pred = OrderedDict()
l = features[self.low_level_key]
x = features[self.feature_key]
x = self.aspp(x)
# low-level feature
l = self.project(l)
x = F.interpolate(x, size=l.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x, l), dim=1)
x = self.fuse(x)
x = self.classifier(x)
pred['semantic'] = x
return pred
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/decoder/deeplabv3plus.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/decoder/deeplabv3plus.py",
"repo_id": "Cream",
"token_count": 905
}
| 311 |
# ------------------------------------------------------------------------------
# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/engine/hooks.py#L195
# Modified by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
from collections import Counter
def get_lr_group_id(optimizer):
"""
Returns the group id with majority of lr.
"""
# Get the correct parameter group id to access to lr info.
largest_group = max(len(g["params"]) for g in optimizer.param_groups)
if largest_group == 1:
# If all groups have one parameter,
# then find the most common initial LR, and use it for summary
lr_count = Counter([g["lr"] for g in optimizer.param_groups])
lr = lr_count.most_common()[0][0]
for i, g in enumerate(optimizer.param_groups):
if g["lr"] == lr:
best_param_group_id = i
break
else:
for i, g in enumerate(optimizer.param_groups):
if len(g["params"]) == largest_group:
best_param_group_id = i
break
return best_param_group_id
|
Cream/CDARTS/CDARTS_segmentation/segmentation/solver/utils.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/solver/utils.py",
"repo_id": "Cream",
"token_count": 448
}
| 312 |
from datasets.BaseDataset import BaseDataset
class CamVid(BaseDataset):
@classmethod
def get_class_colors(*args):
return [[128, 0, 0], [128, 128, 0], [128, 128, 128], [64, 0, 128],
[192, 128, 128], [128, 64, 128], [64, 64, 0], [64, 64, 128],
[192, 192, 128], [0, 0, 192], [0, 128, 192]]
@classmethod
def get_class_names(*args):
# class counting(gtFine)
# 2953 2811 2934 970 1296 2949 1658 2808 2891 1654 2686 2343 1023 2832
# 359 274 142 513 1646
return ['Building', 'Tree', 'Sky', 'Car', 'Sign-Symbol', 'Road',
'Pedestrian', 'Fence', 'Column-Pole', 'Side-Walk', 'Bicyclist', 'Void']
|
Cream/CDARTS/CDARTS_segmentation/tools/datasets/camvid/camvid.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/datasets/camvid/camvid.py",
"repo_id": "Cream",
"token_count": 331
}
| 313 |
from __future__ import division
import os
import sys
import time
import glob
import json
import logging
import argparse
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torch.optim as optim
import torch.distributed as dist
from tensorboardX import SummaryWriter
import numpy as np
from thop import profile
from ptflops import get_model_complexity_info
from config_train import config
# if config.is_eval:
# config.save = '../OUTPUT/eval-{}-{}'.format(config.save, time.strftime("%Y%m%d-%H%M%S"))
# else:
# config.save = '../OUTPUT/train-{}-{}'.format(config.save, time.strftime("%Y%m%d-%H%M%S"))
from dataloader import get_train_loader, CyclicIterator
from datasets import Cityscapes
import dataloaders
from utils.init_func import init_weight
from utils.lr_scheduler import Iter_LR_Scheduler
from seg_opr.loss_opr import ProbOhemCrossEntropy2d
from eval import SegEvaluator
from test import SegTester
from utils.darts_utils import create_exp_dir, save, plot_op, plot_path_width, objective_acc_lat
from utils.dist_utils import reduce_tensor, ModelEma
from model_seg import Network_Multi_Path_Infer_SPOS as Network
import seg_metrics
import yaml
import timm
from timm.optim import create_optimizer
from utils.pyt_utils import AverageMeter, to_cuda, get_loss_info_str, compute_hist, compute_hist_np, load_pretrain
def adjust_learning_rate(base_lr, power, optimizer, epoch, total_epoch):
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * power
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='../configs/auto2/sz512drop0.2.yaml', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--det2_cfg', type=str, default='configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml', help='')
parser.add_argument('--save', type=str, default='../OUTPUT/train', help='')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument("--world_size", default=1, type=int)
parser.add_argument("--eval_height", default=1025, type=int, help='train height')
parser.add_argument("--eval_width", default=2049, type=int, help='train width')
parser.add_argument("--test_epoch", default=250, type=int, help='Epochs for test')
parser.add_argument("--batch_size", default=12, type=int, help='batch size')
parser.add_argument("--Fch", default=12, type=int, help='Fch')
parser.add_argument('--stem_head_width', type=float, default=1.0, help='base learning rate')
parser.add_argument('--resume', type=str, default='../OUTPUT/train/', help='resume')
## new retrain ###
parser.add_argument('--sched', default='step', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--epochs', type=int, default=4000, help='num of training epochs')
parser.add_argument('--dataset', type=str, default='cityscapes', help='pascal or cityscapes')
parser.add_argument('--base_lr', type=float, default=0.05, help='base learning rate')
parser.add_argument('--warmup_start_lr', type=float, default=5e-6, help='warm up learning rate')
parser.add_argument('--lr-step', type=float, default=None)
parser.add_argument('--warmup-iters', type=int, default=1000)
parser.add_argument('--min-lr', type=float, default=None)
parser.add_argument('--crop_size', type=int, default=769, help='image crop size')
parser.add_argument('--resize', type=int, default=769, help='image crop size')
parser.add_argument("--image_height", default=513, type=int, help='train height')
parser.add_argument("--image_width", default=1025, type=int, help='train width')
parser.add_argument('--workers', type=int, default=4, help='number of data loading workers')
parser.add_argument('--dist', type=bool, default=True)
parser.add_argument('--autodeeplab', type=str, default='train_seg')
parser.add_argument('--max-iteration', default=1000000, type=bool)
parser.add_argument('--mode', default='poly', type=str, help='how lr decline')
parser.add_argument('--train_mode', type=str, default='iter', choices=['iter', 'epoch'])
parser.add_argument("--data_path", default='/home/t-hongyuanyu/data/cityscapes', type=str, help='If specified, replace config.load_path')
parser.add_argument("--load_path", default='', type=str, help='If specified, replace config.load_path')
parser.add_argument("--json_file", default='jsons/0.json', type=str, help='model_arch')
parser.add_argument("--seed", default=12345, type=int, help="random seed")
parser.add_argument('--sync_bn', action='store_false',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--random_sample', action='store_true',
help='Random sample path.')
parser.add_argument('--drop_path_prob', type=float, default=0.0, help='drop path prob')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0001,
help='weight decay (default: 0.0001)')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# train val
parser.add_argument('--ignore', type=int, default=255, help='semantic ignore')
parser.add_argument('--eval_flip', action='store_true', default=False,
help='semantic eval flip')
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
args, args_text = _parse_args()
if args.load_path:
config.load_path = args.load_path
config.batch_size = args.batch_size
config.image_height = args.image_height
config.image_width = args.image_width
config.eval_height = args.eval_height
config.eval_width = args.eval_width
config.Fch = args.Fch
config.dataset_path = args.data_path
config.save = args.save
# preparation ################
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
model_files = glob.glob("Search/1paths/*.json") + glob.glob("Search/2paths/*.json") + glob.glob("Search/3paths/*.json")
for model_file in model_files:
with open(model_file, 'r') as f:
# dict_a = json.loads(f, cls=NpEncoder)
model_dict = json.loads(f.read())
model = Network(
model_dict["ops"], model_dict["paths"], model_dict["downs"], model_dict["widths"], model_dict["lasts"],
num_classes=config.num_classes, layers=config.layers, Fch=config.Fch, width_mult_list=config.width_mult_list, stem_head_width=(args.stem_head_width, args.stem_head_width))
if args.local_rank == 0:
print("net: " + str(model))
# with torch.cuda.device(0):
# macs, params = get_model_complexity_info(model, (3, 1024, 2048), as_strings=True,
# print_per_layer_stat=True, verbose=True)
# logging.info('{:<30} {:<8}'.format('Computational complexity: ', macs))
# logging.info('{:<30} {:<8}'.format('Number of parameters: ', params))
flops, params = profile(model, inputs=(torch.randn(1, 3, 1024, 2048),), verbose=False)
flops = flops / 1e9
params = params / 1e6
model_dict['flops'] = flops
model_dict['params'] = params
print("params = %fMB, FLOPs = %fGB", params, flops)
with open(model_file, 'w') as f:
json.dump(model_dict, f, cls=NpEncoder)
if __name__ == '__main__':
main()
#launch(
# main,
# 2,
# num_machines=1,
# machine_rank=0,
# dist_url='auto',
#)
|
Cream/CDARTS/CDARTS_segmentation/tools/utils/cal_model.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/utils/cal_model.py",
"repo_id": "Cream",
"token_count": 3832
}
| 314 |
from __future__ import division
import os
import sys
import time
import glob
import json
import logging
import argparse
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import warnings; warnings.filterwarnings(action='once')
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def main():
# Import Data
model_files = glob.glob("train_samples/*.json")
for model_file in model_files:
with open(model_file, 'r') as f:
# dict_a = json.loads(f, cls=NpEncoder)
model_dict = json.loads(f.read())
df = pd.read_csv("mpg_ggplot2.csv")
# Prepare data
x_var = 'manufacturer'
groupby_var = 'class'
df_agg = df.loc[:, [x_var, groupby_var]].groupby(groupby_var)
vals = [df[x_var].values.tolist() for i, df in df_agg]
# Draw
plt.figure(figsize=(16,9), dpi= 80)
colors = [plt.cm.Spectral(i/float(len(vals)-1)) for i in range(len(vals))]
n, bins, patches = plt.hist(vals, df[x_var].unique().__len__(), stacked=True, density=False, color=colors[:len(vals)])
# Decoration
plt.legend({group:col for group, col in zip(np.unique(df[groupby_var]).tolist(), colors[:len(vals)])})
plt.title(f"Stacked Histogram of ${x_var}$ colored by ${groupby_var}$", fontsize=22)
plt.xlabel(x_var)
plt.ylabel("Frequency")
plt.ylim(0, 40)
plt.xticks(rotation=90, horizontalalignment='left')
plt.show()
if __name__ == '__main__':
main()
|
Cream/CDARTS/CDARTS_segmentation/train/cal_model.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/cal_model.py",
"repo_id": "Cream",
"token_count": 784
}
| 315 |
import torch.nn as nn
import torch.nn.functional as F
import torch
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True, ignore_index=-100):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss(weight, size_average, ignore_index)
def forward(self, inputs, targets):
return self.nll_loss(F.log_softmax(inputs, dim=1), targets)
def one_hot(index, classes):
# index is not flattened (pypass ignore) ############
# size = index.size()[:1] + (classes,) + index.size()[1:]
# view = index.size()[:1] + (1,) + index.size()[1:]
#####################################################
# index is flatten (during ignore) ##################
size = index.size()[:1] + (classes,)
view = index.size()[:1] + (1,)
#####################################################
mask = torch.Tensor(size).fill_(0).cuda()
index = index.view(view)
ones = 1.
return mask.scatter_(1, index, ones)
class FocalLoss(nn.CrossEntropyLoss):
''' Focal loss for classification tasks on imbalanced datasets '''
def __init__(self, gamma=2, alpha=None, ignore_index=-100, reduction='mean'):
super().__init__(weight=alpha, ignore_index=ignore_index, reduction='mean')
self.reduction = reduction
self.gamma = gamma
def forward(self, input_, target):
cross_entropy = super().forward(input_, target)
# Temporarily mask out ignore index to '0' for valid gather-indices input.
# This won't contribute final loss as the cross_entropy contribution
# for these would be zero.
target = target * (target != self.ignore_index).long()
input_prob = torch.gather(F.softmax(input_, 1), 1, target.unsqueeze(1))
loss = torch.pow(1 - input_prob, self.gamma) * cross_entropy
if self.reduction == 'mean': return torch.mean(loss)
elif self.reduction == 'sum': return torch.sum(loss)
else: return loss
class SoftCrossEntropyLoss2d(nn.Module):
def __init__(self):
super(SoftCrossEntropyLoss2d, self).__init__()
def forward(self, inputs, targets):
loss = 0
inputs = -F.log_softmax(inputs, dim=1)
for index in range(inputs.size()[0]):
loss += F.conv2d(inputs[range(index, index+1)], targets[range(index, index+1)])/(targets.size()[2] *
targets.size()[3])
return loss
class OhemCELoss(nn.Module):
def __init__(self, thresh, n_min=0.1, ignore_lb=255, *args, **kwargs):
super(OhemCELoss, self).__init__()
self.thresh = -torch.log(torch.tensor(thresh, dtype=torch.float)).cuda()
self.n_min = n_min
self.ignore_lb = ignore_lb
self.criteria = nn.CrossEntropyLoss(ignore_index=ignore_lb, reduction='none')
def forward(self, logits, labels):
loss = self.criteria(logits, labels).view(-1)
loss, _ = torch.sort(loss, descending=True)
n_min = int(self.n_min * len(loss))
if loss[n_min] > self.thresh:
loss = loss[loss>self.thresh]
else:
loss = loss[:n_min]
return torch.mean(loss)
|
Cream/CDARTS/CDARTS_segmentation/train/loss.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/loss.py",
"repo_id": "Cream",
"token_count": 1386
}
| 316 |
import torch
import torch.nn as nn
from utils import utils
from datasets import data_utils
from models.loss import CrossEntropyLabelSmooth
def train(train_loader, model, optimizer, epoch, writer, logger, config):
device = torch.device("cuda")
if config.label_smooth > 0:
criterion = CrossEntropyLabelSmooth(config.n_classes, config.label_smooth).to(device)
else:
criterion = nn.CrossEntropyLoss().to(device)
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
losses = utils.AverageMeter()
step_num = len(train_loader)
cur_step = epoch*step_num
cur_lr = optimizer.param_groups[0]['lr']
if config.local_rank == 0:
logger.info("Train Epoch {} LR {}".format(epoch, cur_lr))
writer.add_scalar('train/lr', cur_lr, cur_step)
model.train()
for step, (X, y) in enumerate(train_loader):
X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)
N = X.size(0)
X, target_a, target_b, lam = data_utils.mixup_data(X, y, config.mixup_alpha, use_cuda=True)
optimizer.zero_grad()
logits, logits_aux = model(X, layer_idx=0, super_flag=True, pretrain_flag=True)
loss = data_utils.mixup_criterion(criterion, logits, target_a, target_b, lam)
if config.aux_weight > 0:
# loss_aux = criterion(logits_aux, y)
loss_aux = data_utils.mixup_criterion(criterion, logits_aux, target_a, target_b, lam)
loss = loss + config.aux_weight * loss_aux
if config.use_amp:
from apex import amp
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(model.module.parameters(), config.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5))
if config.distributed:
reduced_loss = utils.reduce_tensor(loss.data, config.world_size)
prec1 = utils.reduce_tensor(prec1, config.world_size)
prec5 = utils.reduce_tensor(prec5, config.world_size)
else:
reduced_loss = loss.data
losses.update(reduced_loss.item(), N)
top1.update(prec1.item(), N)
top5.update(prec5.item(), N)
torch.cuda.synchronize()
if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num):
logger.info(
"Train: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} "
"Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format(
epoch+1, config.epochs, step,
step_num, losses=losses, top1=top1, top5=top5))
if config.local_rank == 0:
writer.add_scalar('train/loss', reduced_loss.item(), cur_step)
writer.add_scalar('train/top1', prec1.item(), cur_step)
writer.add_scalar('train/top5', prec5.item(), cur_step)
cur_step += 1
if config.local_rank == 0:
logger.info("Train: Epoch {:2d}/{} Final Prec@1 {:.4%}".format(
epoch+1, config.epochs, top1.avg))
def validate(valid_loader, model, epoch, cur_step, writer, logger, config):
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
losses = utils.AverageMeter()
model.eval()
device = torch.device("cuda")
criterion = nn.CrossEntropyLoss().to(device)
with torch.no_grad():
for step, (X, y) in enumerate(valid_loader):
X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)
N = X.size(0)
logits, _ = model(X, layer_idx=0, super_flag=True, pretrain_flag=True)
loss = criterion(logits, y)
prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5))
if config.distributed:
reduced_loss = utils.reduce_tensor(loss.data, config.world_size)
prec1 = utils.reduce_tensor(prec1, config.world_size)
prec5 = utils.reduce_tensor(prec5, config.world_size)
else:
reduced_loss = loss.data
losses.update(reduced_loss.item(), N)
top1.update(prec1.item(), N)
top5.update(prec5.item(), N)
torch.cuda.synchronize()
step_num = len(valid_loader)
if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0:
logger.info(
"Valid: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} "
"Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format(
epoch+1, config.epochs, step, step_num,
losses=losses, top1=top1, top5=top5))
if config.local_rank == 0:
writer.add_scalar('val/loss', losses.avg, cur_step)
writer.add_scalar('val/top1', top1.avg, cur_step)
writer.add_scalar('val/top5', top5.avg, cur_step)
logger.info("Valid: Epoch {:2d}/{} Final Prec@1 {:.4%}".format(
epoch+1, config.epochs, top1.avg))
return top1.avg, top5.avg
def sample_train(train_loader, model, optimizer, epoch, writer, logger, config):
device = torch.device("cuda")
if config.label_smooth > 0:
criterion = CrossEntropyLabelSmooth(config.n_classes, config.label_smooth).to(device)
else:
criterion = nn.CrossEntropyLoss().to(device)
step_num = len(train_loader)
cur_step = epoch*step_num
cur_lr = optimizer.param_groups[0]['lr']
if config.local_rank == 0:
logger.info("Train Epoch {} LR {}".format(epoch, cur_lr))
writer.add_scalar('train/lr', cur_lr, cur_step)
model.train()
for step, (X, y) in enumerate(train_loader):
X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)
N = X.size(0)
X, target_a, target_b, lam = data_utils.mixup_data(X, y, config.mixup_alpha, use_cuda=True)
optimizer.zero_grad()
all_losses = []
all_logits = []
for i in range(config.sample_archs):
### sample new arch ###
model.module.init_arch_params(layer_idx=0)
genotypes = []
for i in range(config.layer_num):
genotype, connect = model.module.generate_genotype(i)
genotypes.append(genotype)
model.module.genotypes[i] = genotype
model.module.connects[i] = connect
logits, logits_aux = model(X, layer_idx=0, super_flag=True, pretrain_flag=True, is_slim=True)
all_logits.append(logits)
loss = data_utils.mixup_criterion(criterion, logits, target_a, target_b, lam)
if config.aux_weight > 0:
# loss_aux = criterion(logits_aux, y)
loss_aux = data_utils.mixup_criterion(criterion, logits_aux, target_a, target_b, lam)
loss = loss + config.aux_weight * loss_aux
all_losses.append(loss)
'''
for j, genotype in enumerate(genotypes):
if config.local_rank == 0:
logger.info("Random stage: {} layer: {} genotype = {}".format(i, j, genotype))
'''
loss = torch.sum(torch.stack(all_losses))
if config.use_amp:
from apex import amp
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# tricks
for p in model.module.parameters():
if p.grad is not None and p.grad.sum() == 0:
p.grad = None
# gradient clipping
nn.utils.clip_grad_norm_(model.module.parameters(), config.grad_clip)
optimizer.step()
for i, logits in enumerate(all_logits):
prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5))
if config.distributed:
reduced_loss = utils.reduce_tensor(all_losses[i].data, config.world_size)
prec1 = utils.reduce_tensor(prec1, config.world_size)
prec5 = utils.reduce_tensor(prec5, config.world_size)
else:
reduced_loss = all_losses[i].data
torch.cuda.synchronize()
if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num):
logger.info(
"Train: Epoch {:2d}/{} Step {:03d}/{:03d} Sample idx {} Loss {:.3f} "
"Prec@(1,5) ({:.1%}, {:.1%})".format(
epoch+1, config.epochs, step, step_num, i,
reduced_loss.item(), prec1.item(), prec5.item()))
if config.local_rank == 0:
writer.add_scalar('train/loss', reduced_loss.item(), cur_step)
writer.add_scalar('train/top1', prec1.item(), cur_step)
writer.add_scalar('train/top5', prec5.item(), cur_step)
cur_step += 1
def sample_validate(valid_loader, model, epoch, cur_step, writer, logger, config):
model.eval()
device = torch.device("cuda")
criterion = nn.CrossEntropyLoss().to(device)
with torch.no_grad():
for step, (X, y) in enumerate(valid_loader):
X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)
N = X.size(0)
for i in range(config.sample_archs):
### sample new arch ###
model.module.init_arch_params(layer_idx=0)
genotypes = []
for i in range(config.layer_num):
genotype, connect = model.module.generate_genotype(i)
genotypes.append(genotype)
model.module.genotypes[i] = genotype
model.module.connects[i] = connect
logits, _ = model(X, layer_idx=0, super_flag=True, pretrain_flag=True, is_slim=True)
loss = criterion(logits, y)
prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5))
if config.distributed:
reduced_loss = utils.reduce_tensor(loss.data, config.world_size)
prec1 = utils.reduce_tensor(prec1, config.world_size)
prec5 = utils.reduce_tensor(prec5, config.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
step_num = len(valid_loader)
if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0:
logger.info(
"Valid: Epoch {:2d}/{} Step {:03d}/{:03d} Sample_index {} Loss {:.3f} "
"Prec@(1,5) ({:.1%}, {:.1%})".format(
epoch+1, config.epochs, step, step_num, i,
reduced_loss.item(), prec1.item(), prec5.item()))
if config.local_rank == 0:
writer.add_scalar('val/loss', reduced_loss.item(), cur_step)
writer.add_scalar('val/top1', prec1.item(), cur_step)
writer.add_scalar('val/top5', prec5.item(), cur_step)
return prec1.item(), prec5.item()
def test_sample(valid_loader, model, epoch, cur_step, writer, logger, config):
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
losses = utils.AverageMeter()
model.eval()
device = torch.device("cuda")
criterion = nn.CrossEntropyLoss().to(device)
model.module.init_arch_params(layer_idx=0)
genotypes = []
for i in range(config.layer_num):
genotype, connect = model.module.generate_genotype(i)
genotypes.append(genotype)
model.module.genotypes[i] = genotype
model.module.connects[i] = connect
with torch.no_grad():
for step, (X, y) in enumerate(valid_loader):
X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)
N = X.size(0)
# logits, _ = model(X, layer_idx=0, super_flag=True, pretrain_flag=True)
logits, _ = model(X, layer_idx=0, super_flag=True, pretrain_flag=True, is_slim=True)
loss = criterion(logits, y)
prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5))
if config.distributed:
reduced_loss = utils.reduce_tensor(loss.data, config.world_size)
prec1 = utils.reduce_tensor(prec1, config.world_size)
prec5 = utils.reduce_tensor(prec5, config.world_size)
else:
reduced_loss = loss.data
losses.update(reduced_loss.item(), N)
top1.update(prec1.item(), N)
top5.update(prec5.item(), N)
torch.cuda.synchronize()
step_num = len(valid_loader)
if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0:
logger.info(
"Valid: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} "
"Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format(
epoch+1, config.epochs, step, step_num,
losses=losses, top1=top1, top5=top5))
if config.local_rank == 0:
writer.add_scalar('val/loss', losses.avg, cur_step)
writer.add_scalar('val/top1', top1.avg, cur_step)
writer.add_scalar('val/top5', top5.avg, cur_step)
logger.info("Valid: Epoch {:2d}/{} Final Prec@1 {:.4%}".format(
epoch+1, config.epochs, top1.avg))
return top1.avg, top5.avg
|
Cream/CDARTS/benchmark201/core/pretrain_function.py/0
|
{
"file_path": "Cream/CDARTS/benchmark201/core/pretrain_function.py",
"repo_id": "Cream",
"token_count": 6837
}
| 317 |
""" CNN cell for network augmentation """
import torch
import torch.nn as nn
from lib.models import ops
import lib.utils.genotypes as gt
class AugmentCell(nn.Module):
""" Cell for augmentation
Each edge is discrete.
"""
def __init__(self, genotype, C_pp, C_p, C, reduction_p, reduction, bn_affine=True):
super().__init__()
self.reduction = reduction
self.n_nodes = len(genotype.normal)
if reduction_p:
self.preproc0 = ops.FactorizedReduce(C_pp, C, affine=bn_affine)
else:
self.preproc0 = ops.StdConv(C_pp, C, 1, 1, 0, affine=bn_affine)
self.preproc1 = ops.StdConv(C_p, C, 1, 1, 0, affine=bn_affine)
# generate dag
if reduction:
gene = genotype.reduce
self.concat = genotype.reduce_concat
else:
gene = genotype.normal
self.concat = genotype.normal_concat
self.dag = gt.to_dag(C, gene, reduction, bn_affine)
def forward(self, s0, s1):
s0 = self.preproc0(s0)
s1 = self.preproc1(s1)
states = [s0, s1]
for edges in self.dag:
s_cur = sum(op(states[op.s_idx]) for op in edges)
states.append(s_cur)
s_out = torch.cat([states[i] for i in self.concat], dim=1)
return s_out
|
Cream/CDARTS/lib/models/augment_cells.py/0
|
{
"file_path": "Cream/CDARTS/lib/models/augment_cells.py",
"repo_id": "Cream",
"token_count": 645
}
| 318 |
# Test Workspace
|
Cream/Cream/experiments/workspace/test/README.md/0
|
{
"file_path": "Cream/Cream/experiments/workspace/test/README.md",
"repo_id": "Cream",
"token_count": 4
}
| 319 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Written by Hao Du and Houwen Peng
# email: [email protected] and [email protected]
import torch
from ptflops import get_model_complexity_info
class FlopsEst(object):
def __init__(self, model, input_shape=(2, 3, 224, 224), device='cpu'):
self.block_num = len(model.blocks)
self.choice_num = len(model.blocks[0])
self.flops_dict = {}
self.params_dict = {}
if device == 'cpu':
model = model.cpu()
else:
model = model.cuda()
self.params_fixed = 0
self.flops_fixed = 0
input = torch.randn(input_shape)
flops, params = get_model_complexity_info(
model.conv_stem, (3, 224, 224), as_strings=False, print_per_layer_stat=False)
self.params_fixed += params / 1e6
self.flops_fixed += flops / 1e6
input = model.conv_stem(input)
for block_id, block in enumerate(model.blocks):
self.flops_dict[block_id] = {}
self.params_dict[block_id] = {}
for module_id, module in enumerate(block):
self.flops_dict[block_id][module_id] = {}
self.params_dict[block_id][module_id] = {}
for choice_id, choice in enumerate(module):
flops, params = get_model_complexity_info(choice, tuple(
input.shape[1:]), as_strings=False, print_per_layer_stat=False)
# Flops(M)
self.flops_dict[block_id][module_id][choice_id] = flops / 1e6
# Params(M)
self.params_dict[block_id][module_id][choice_id] = params / 1e6
input = choice(input)
# conv_last
flops, params = get_model_complexity_info(model.global_pool, tuple(
input.shape[1:]), as_strings=False, print_per_layer_stat=False)
self.params_fixed += params / 1e6
self.flops_fixed += flops / 1e6
input = model.global_pool(input)
# globalpool
flops, params = get_model_complexity_info(model.conv_head, tuple(
input.shape[1:]), as_strings=False, print_per_layer_stat=False)
self.params_fixed += params / 1e6
self.flops_fixed += flops / 1e6
# return params (M)
def get_params(self, arch):
params = 0
for block_id, block in enumerate(arch):
for module_id, choice in enumerate(block):
if choice == -1:
continue
params += self.params_dict[block_id][module_id][choice]
return params + self.params_fixed
# return flops (M)
def get_flops(self, arch):
flops = 0
for block_id, block in enumerate(arch):
for module_id, choice in enumerate(block):
if choice == -1:
continue
flops += self.flops_dict[block_id][module_id][choice]
return flops + self.flops_fixed
|
Cream/Cream/lib/utils/flops_table.py/0
|
{
"file_path": "Cream/Cream/lib/utils/flops_table.py",
"repo_id": "Cream",
"token_count": 1459
}
| 320 |
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_train.json',
img_prefix=data_root + 'leftImg8bit/train/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
img_prefix=data_root + 'leftImg8bit/val/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_test.json',
img_prefix=data_root + 'leftImg8bit/test/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
|
Cream/EfficientViT/downstream/configs/_base_/datasets/cityscapes_detection.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/configs/_base_/datasets/cityscapes_detection.py",
"repo_id": "Cream",
"token_count": 929
}
| 321 |
_base_ = [
'./_base_/models/mask_rcnn_efficientvit_fpn.py',
'./_base_/datasets/coco_instance.py',
'./_base_/schedules/schedule_1x.py',
'./_base_/default_runtime.py'
]
model = dict(
pretrained=None,
backbone=dict(
type='EfficientViT_M4',
pretrained="/root/efficientvit_m4.pth",
frozen_stages=-1,
),
neck=dict(
type='EfficientViTFPN',
in_channels=[128, 256, 384],
out_channels=256,
start_level=0,
num_outs=5,
num_extra_trans_convs=2,
))
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg=dict(custom_keys={'attention_biases': dict(decay_mult=0.),
'attention_bias_idxs': dict(decay_mult=0.),
}))
# optimizer_config = dict(grad_clip=None)
# do not use mmdet version fp16
# fp16 = None
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
total_epochs = 12
|
Cream/EfficientViT/downstream/configs/mask_rcnn_efficientvit_m4_fpn_1x_coco.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/configs/mask_rcnn_efficientvit_m4_fpn_1x_coco.py",
"repo_id": "Cream",
"token_count": 634
}
| 322 |
#include <c10/cuda/CUDAGuard.h>
#include <torch/extension.h>
#include <THC/THCAtomics.cuh>
#include <vector>
using index_t = int;
const int HIP_MAX_GRID_NUM = 65535;
const int HIP_MAX_NUM_THREADS = 512;
inline int HIP_GET_NUM_THREADS(const int n) {
return std::min(HIP_MAX_NUM_THREADS, ((n + 31) / 32) * 32);
}
inline int HIP_GET_BLOCKS(const int n, const int num_threads) {
return std::min(HIP_MAX_GRID_NUM, n + num_threads - 1) / num_threads;
}
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename scalar_t>
__global__ void rpe_index_forward_gpu_kernel(
index_t n, scalar_t *p_Y, const scalar_t *__restrict__ p_input,
const index_t *__restrict__ p_index, index_t num_buckets, index_t H,
index_t L_query, index_t L_key, index_t L_qk, index_t s0, index_t s1,
index_t s2, index_t s3) {
CUDA_KERNEL_LOOP(i, n) {
index_t gi = i / L_key;
const index_t qi = gi % L_query;
gi /= L_query;
const index_t hi = gi % H;
gi /= H;
const index_t bi = gi;
const index_t ind = bi * s0 + hi * s1 + qi * s2 + p_index[i % L_qk] * s3;
p_Y[i] = __ldg(&p_input[ind]);
}
}
template <typename scalar_t>
__global__ void rpe_index_backward_gpu_kernel(
index_t n, scalar_t *p_grad_input, const index_t *__restrict__ p_index,
const scalar_t *__restrict__ p_grad_output, index_t num_buckets,
index_t L_key, index_t L_qk) {
CUDA_KERNEL_LOOP(i, n) {
const index_t input_i = i / L_key * num_buckets + p_index[i % L_qk];
const scalar_t v = p_grad_output[i];
gpuAtomicAdd(p_grad_input + input_i, v);
}
}
at::Tensor rpe_index_forward_gpu(torch::Tensor input, torch::Tensor index) {
/*
- Inputs
input: float32 (B, H, L_query, num_buckets)
index: index_t (L_query, L_key)
- Outputs
Y: float32 (B, H, L_query, L_key)
*/
AT_ASSERTM(input.device().is_cuda(), "input must be a GPU tensor");
AT_ASSERTM(index.device().is_cuda(), "index must be a GPU tensor");
AT_ASSERTM(input.ndimension() == 4, "input must be a 4D tensor");
AT_ASSERTM(index.ndimension() == 2, "index must be a 2D tensor");
AT_ASSERTM(index.scalar_type() == at::kInt, "index must be Int type");
AT_ASSERTM(index.is_contiguous(), "index should be contiguous");
const index_t B = input.size(0);
const index_t H = input.size(1);
const index_t num_buckets = input.size(3);
const index_t L_query = index.size(0);
const index_t L_key = index.size(1);
const index_t L_qk = L_query * L_key;
at::Tensor Y = at::empty({B, H, L_query, L_key}, input.options());
const index_t numel = Y.numel();
const at::IntArrayRef strides = input.strides();
const int threadsPerBlock = HIP_GET_NUM_THREADS(numel);
const int blocks = HIP_GET_BLOCKS(numel, threadsPerBlock);
at::cuda::CUDAGuard device_guard(input.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "rpe_index_forward_gpu", [&] {
const scalar_t *p_input = input.data_ptr<scalar_t>();
const index_t *p_index = index.data_ptr<index_t>();
scalar_t *p_Y = Y.data_ptr<scalar_t>();
rpe_index_forward_gpu_kernel<<<blocks, threadsPerBlock, 0, stream>>>(
numel, p_Y, p_input, p_index, num_buckets, H, L_query, L_key, L_qk,
strides[0], strides[1], strides[2], strides[3]);
});
return Y;
}
void rpe_index_backward_gpu(torch::Tensor grad_input, torch::Tensor grad_output,
torch::Tensor index) {
/*
- Inputs
grad_output: float32 (B, H, L_query, L_key)
index: index_t (L_query, L_key)
- Outputs
grad_input: float32 (B, H, L_query, num_buckets)
*/
AT_ASSERTM(grad_input.device().is_cuda(), "grad_input must be a GPU tensor");
AT_ASSERTM(grad_output.device().is_cuda(),
"grad_output must be a GPU tensor");
AT_ASSERTM(index.device().is_cuda(), "grad_index must be a GPU tensor");
AT_ASSERTM(grad_input.ndimension() == 4, "input must be a 4D tensor");
AT_ASSERTM(grad_output.ndimension() == 4, "input must be a 4D tensor");
AT_ASSERTM(index.ndimension() == 2, "index must be a 2D tensor");
AT_ASSERTM(index.scalar_type() == at::kInt, "index must be Int type");
const index_t num_buckets = grad_input.size(3);
const index_t L_query = grad_output.size(2);
const index_t L_key = grad_output.size(3);
const index_t L_qk = L_query * L_key;
auto grad_input_ = grad_input.contiguous();
auto grad_output_ = grad_output.contiguous();
auto index_ = index.contiguous();
const index_t numel = grad_output.numel();
const int threadsPerBlock = HIP_GET_NUM_THREADS(numel);
const int blocks = HIP_GET_BLOCKS(numel, threadsPerBlock);
at::cuda::CUDAGuard device_guard(grad_output.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "rpe_index_backward_gpu", [&] {
scalar_t *p_grad_input = grad_input_.data_ptr<scalar_t>();
const index_t *p_index = index_.data_ptr<index_t>();
const scalar_t *p_grad_output = grad_output_.data_ptr<scalar_t>();
rpe_index_backward_gpu_kernel<<<blocks, threadsPerBlock, 0, stream>>>(
numel, p_grad_input, p_index, p_grad_output, num_buckets, L_key,
L_qk);
});
}
|
Cream/MiniViT/Mini-DeiT/rpe_ops/rpe_index_cuda.cu/0
|
{
"file_path": "Cream/MiniViT/Mini-DeiT/rpe_ops/rpe_index_cuda.cu",
"repo_id": "Cream",
"token_count": 2412
}
| 323 |
MODEL:
TYPE: swin_minivit_distill
NAME: swin_tiny_patch4_window7_224_minivit
DROP_PATH_RATE: 0.0
SWIN:
EMBED_DIM: 96
DEPTHS: [ 2, 2, 6, 2 ]
NUM_HEADS: [ 3, 6, 12, 24 ]
WINDOW_SIZE: 7
MINIVIT:
SEPARATE_LAYERNUM_LIST: [1, 1, 1, 1]
|
Cream/MiniViT/Mini-Swin/configs/swin_tiny_patch4_window7_224_minivit_sharenum6.yaml/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/configs/swin_tiny_patch4_window7_224_minivit_sharenum6.yaml",
"repo_id": "Cream",
"token_count": 140
}
| 324 |
import torch
import torch.distributed as dist
from utils import reduce_tensor
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def sync(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
val = torch.tensor(self.val).cuda()
sum_v = torch.tensor(self.sum).cuda()
count = torch.tensor(self.count).cuda()
self.val = reduce_tensor(val, world_size).item()
self.sum = reduce_tensor(sum_v, 1).item()
self.count = reduce_tensor(count, 1).item()
self.avg = self.sum / max(1, self.count)
|
Cream/MiniViT/Mini-Swin/my_meter.py/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/my_meter.py",
"repo_id": "Cream",
"token_count": 415
}
| 325 |
import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
from typing import Optional, Tuple
import torch
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .model import CLIP, convert_weights_to_fp16, resize_pos_embed
from .openai import load_openai_model
from .pretrained import get_pretrained_cfg, download_pretrained
from .transform import image_transform
from .tokenizer import HFTokenizer, tokenize
HF_HUB_PREFIX = 'hf-hub:'
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
# directory (model_name: config) of model architecture configs
_MODEL_CONFIGS = {}
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, 'r') as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(
_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs() # initial populate of model config registry
def get_model_config(model_name):
if model_name in _MODEL_CONFIGS:
return deepcopy(_MODEL_CONFIGS[model_name])
else:
return None
def get_tokenizer(model_name):
if model_name.startswith(HF_HUB_PREFIX):
tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):])
else:
config = get_model_config(model_name)
tokenizer = HFTokenizer(
config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize
return tokenizer
def load_state_dict(checkpoint_path: str, map_location='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def load_checkpoint(model, checkpoint_path, strict=True):
state_dict = load_state_dict(checkpoint_path)
resize_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
return incompatible_keys
def create_model(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
cache_dir: Optional[str] = None,
args=None,
):
# for callers using old naming with / in ViT names
model_name = model_name.replace('/', '-')
if pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(
model_name, device=device, jit=jit, cache_dir=cache_dir)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if model_name in _MODEL_CONFIGS:
logging.info(f'Loading {model_name} model config.')
model_cfg = deepcopy(_MODEL_CONFIGS[model_name])
else:
logging.error(
f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if pretrained_image:
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
else:
assert False, 'pretrained image towers currently only supported for timm models'
if args is not None:
model_cfg['mask_image'] = getattr(args, 'prune_image', False)
model_cfg['mask_text'] = getattr(args, 'prune_text', False)
model_cfg['sparsity_warmup'] = getattr(
args, 'sparsity_warmup', 1000)
model_cfg['start_sparsity'] = getattr(args, 'start_sparsity', 0.0)
model_cfg['sparsity'] = getattr(args, 'target_sparsity', 0.25)
logging.info(
f'model sparsity varies from {model_cfg["start_sparsity"]} to {model_cfg["sparsity"]}, sparsity warmup steps: {model_cfg["sparsity_warmup"]}')
logging.info(str(model_cfg))
model = CLIP(**model_cfg)
pretrained_cfg = {}
if pretrained:
checkpoint_path = ''
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
if pretrained_cfg:
checkpoint_path = download_pretrained(
pretrained_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(
f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
else:
logging.warning(
f'Pretrained weights ({pretrained}) not found for model {model_name}.')
raise RuntimeError(
f'Pretrained weights ({pretrained}) not found for model {model_name}.')
model.to(device=device)
if precision == "fp16":
assert device.type != 'cpu'
convert_weights_to_fp16(model)
# set image / mean metadata from pretrained_cfg if available, or use default
if 'davit' in model_name.lower():
pretrained_cfg['mean'] = [0.485, 0.456, 0.406]
pretrained_cfg['std'] = [0.229, 0.224, 0.225]
model.visual.image_mean = pretrained_cfg.get(
'mean', None) or OPENAI_DATASET_MEAN
model.visual.image_std = pretrained_cfg.get(
'std', None) or OPENAI_DATASET_STD
if jit:
model = torch.jit.script(model)
return model
def create_model_and_transforms(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
cache_dir: Optional[str] = None,
args=None,
):
model = create_model(
model_name, pretrained, precision, device, jit,
force_quick_gelu=force_quick_gelu,
pretrained_image=pretrained_image,
cache_dir=cache_dir,
args=args)
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
val_keep_ratio = 'davit' not in model_name.lower()
preprocess_train = image_transform(
model.visual.image_size, is_train=True, mean=image_mean, std=image_std)
preprocess_val = image_transform(model.visual.image_size, is_train=False,
mean=image_mean, std=image_std, val_keep_ratio=val_keep_ratio)
return model, preprocess_train, preprocess_val
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
def load_exp(name, device='cpu'):
assert '@' in name
teacher_model_name, teacher_pretrained = name.split('@')
return create_model_and_transforms(teacher_model_name, pretrained=teacher_pretrained)
def load_model(name, device='cpu'):
return load_exp(name, device)[0]
|
Cream/TinyCLIP/src/open_clip/factory.py/0
|
{
"file_path": "Cream/TinyCLIP/src/open_clip/factory.py",
"repo_id": "Cream",
"token_count": 3760
}
| 326 |
import torch
from torch import nn
import torch.nn.functional as F
from collections import OrderedDict
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.act2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.act3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes *
self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.act1(self.bn1(self.conv1(x)))
out = self.act2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.act3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(
spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2]
* x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, image_size=224, width=64):
super().__init__()
self.output_dim = output_dim
self.image_size = image_size
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3,
stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(width // 2, width // 2,
kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.act2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(
width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.act3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.head_dim = embed_dim // heads
self.attnpool = AttentionPool2d(
image_size // 32, embed_dim, heads, output_dim)
self.init_parameters()
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def init_parameters(self):
if self.attnpool is not None:
std = self.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.attnpool.c_proj.weight, std=std)
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert unlocked_groups == 0, 'partial locking not currently supported for this model'
for param in self.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
# FIXME support for non-transformer
pass
def stem(self, x):
x = self.act1(self.bn1(self.conv1(x)))
x = self.act2(self.bn2(self.conv2(x)))
x = self.act3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
def forward(self, x):
x = self.stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
|
Cream/TinyCLIP/src/open_clip/resnet.py/0
|
{
"file_path": "Cream/TinyCLIP/src/open_clip/resnet.py",
"repo_id": "Cream",
"token_count": 3494
}
| 327 |
from torch import optim
import logging
class EmptyOptimizer:
def __init__(self):
self.param_groups = []
def step(self, *args, **kwargs):
pass
def state_dict(self):
return dict()
def load_state_dict(self, *args, **kwargs):
pass
def zero_grad(self):
pass
def build_optimizer(args, model):
def exclude(
n, p): return p.ndim < 2 or "bn" in n or "ln" in n or "bias" in n or 'logit_scale' in n
def include(n, p): return not exclude(n, p)
named_parameters = list(model.named_parameters())
# we create three optimizer for image encode, text encoder, and jointly part
model_parts = [
list(model.image_named_params()),
list(model.text_named_params()),
list(model.joint_named_params()),
]
cnt1 = sum(v.numel() for k, v in named_parameters if v.requires_grad)
cnt2 = sum(sum(v.numel() for k, v in part if v.requires_grad)
for part in model_parts)
assert cnt1 == cnt2, f"cnt1 {cnt1} != cnt2 {cnt2}"
optimizer = []
part_names = ['image', 'text', 'joint']
assert len(model_parts) == len(part_names)
for name, named_parameters in zip(part_names, model_parts):
gain_or_bias_params = [p for n, p in named_parameters if exclude(
n, p) and p.requires_grad and "l0_module" not in n]
rest_params = [p for n, p in named_parameters if include(
n, p) and p.requires_grad and "l0_module" not in n]
params_groups = [
{"params": gain_or_bias_params, "weight_decay": 0.},
{"params": rest_params, "weight_decay": args.wd},
]
num_opt_params = 0
for pg in params_groups:
num_opt_params += sum(p.numel() for p in pg['params'])
logging.info(f'number of optimizer ({name}) params: {num_opt_params}')
if num_opt_params > 0:
optimizer_i = optim.AdamW(
params_groups,
lr=args.lr,
betas=(args.beta1, args.beta2),
eps=args.eps,
)
else:
optimizer_i = EmptyOptimizer()
optimizer.append(optimizer_i)
if args.prune_image or args.prune_text:
lr_l0 = 0.02
lr_lamda = args.l0lr
l0_params = []
# add l0 optimizer
if args.prune_image:
l0_params.extend([
{
"params": [p for n, p in model.image_named_params() if p.requires_grad and "lambda" not in n and "l0_module" in n],
"weight_decay": 0.0,
"lr": lr_l0
}, {
"params": [p for n, p in model.image_named_params() if p.requires_grad and "lambda" in n and "l0_module" in n],
"weight_decay": 0.0,
"lr": lr_lamda
}])
if args.prune_text:
l0_params.extend([
{
"params": [p for n, p in model.text_named_params() if p.requires_grad and "lambda" not in n and "l0_module" in n],
"weight_decay": 0.0,
"lr": lr_l0
}, {
"params": [p for n, p in model.text_named_params() if p.requires_grad and "lambda" in n and "l0_module" in n],
"weight_decay": 0.0,
"lr": lr_lamda
}])
l0_optimizer = optim.AdamW(l0_params)
optimizer.append(l0_optimizer)
return optimizer
|
Cream/TinyCLIP/src/training/optimizer.py/0
|
{
"file_path": "Cream/TinyCLIP/src/training/optimizer.py",
"repo_id": "Cream",
"token_count": 1798
}
| 328 |
import logging
from .constants import *
_logger = logging.getLogger(__name__)
def resolve_data_config(args, default_cfg={}, model=None, use_test_size=False, verbose=False):
new_config = {}
default_cfg = default_cfg
if not default_cfg and model is not None and hasattr(model, 'default_cfg'):
default_cfg = model.default_cfg
# Resolve input/image size
in_chans = 3
if 'chans' in args and args['chans'] is not None:
in_chans = args['chans']
input_size = (in_chans, 224, 224)
if 'input_size' in args and args['input_size'] is not None:
assert isinstance(args['input_size'], (tuple, list))
assert len(args['input_size']) == 3
input_size = tuple(args['input_size'])
in_chans = input_size[0] # input_size overrides in_chans
elif 'img_size' in args and args['img_size'] is not None:
assert isinstance(args['img_size'], int)
input_size = (in_chans, args['img_size'], args['img_size'])
else:
if use_test_size and 'test_input_size' in default_cfg:
input_size = default_cfg['test_input_size']
elif 'input_size' in default_cfg:
input_size = default_cfg['input_size']
new_config['input_size'] = input_size
# resolve interpolation method
new_config['interpolation'] = 'bicubic'
if 'interpolation' in args and args['interpolation']:
new_config['interpolation'] = args['interpolation']
elif 'interpolation' in default_cfg:
new_config['interpolation'] = default_cfg['interpolation']
# resolve dataset + model mean for normalization
new_config['mean'] = IMAGENET_DEFAULT_MEAN
if 'mean' in args and args['mean'] is not None:
mean = tuple(args['mean'])
if len(mean) == 1:
mean = tuple(list(mean) * in_chans)
else:
assert len(mean) == in_chans
new_config['mean'] = mean
elif 'mean' in default_cfg:
new_config['mean'] = default_cfg['mean']
# resolve dataset + model std deviation for normalization
new_config['std'] = IMAGENET_DEFAULT_STD
if 'std' in args and args['std'] is not None:
std = tuple(args['std'])
if len(std) == 1:
std = tuple(list(std) * in_chans)
else:
assert len(std) == in_chans
new_config['std'] = std
elif 'std' in default_cfg:
new_config['std'] = default_cfg['std']
# resolve default crop percentage
new_config['crop_pct'] = DEFAULT_CROP_PCT
if 'crop_pct' in args and args['crop_pct'] is not None:
new_config['crop_pct'] = args['crop_pct']
elif 'crop_pct' in default_cfg:
new_config['crop_pct'] = default_cfg['crop_pct']
if verbose:
_logger.info('Data processing configuration for current model + dataset:')
for n, v in new_config.items():
_logger.info('\t%s: %s' % (n, str(v)))
return new_config
|
Cream/TinyViT/data/augmentation/config.py/0
|
{
"file_path": "Cream/TinyViT/data/augmentation/config.py",
"repo_id": "Cream",
"token_count": 1235
}
| 329 |
""" A dataset parser that reads single tarfile based datasets
This parser can read datasets consisting if a single tarfile containing images.
I am planning to deprecated it in favour of ParerImageInTar.
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
import tarfile
from .parser import Parser
from .class_map import load_class_map
from .constants import IMG_EXTENSIONS
from timm.utils.misc import natural_key
def extract_tarinfo(tarfile, class_to_idx=None, sort=True):
files = []
labels = []
for ti in tarfile.getmembers():
if not ti.isfile():
continue
dirname, basename = os.path.split(ti.path)
label = os.path.basename(dirname)
ext = os.path.splitext(basename)[1]
if ext.lower() in IMG_EXTENSIONS:
files.append(ti)
labels.append(label)
if class_to_idx is None:
unique_labels = set(labels)
sorted_labels = list(sorted(unique_labels, key=natural_key))
class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)}
tarinfo_and_targets = [(f, class_to_idx[l]) for f, l in zip(files, labels) if l in class_to_idx]
if sort:
tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path))
return tarinfo_and_targets, class_to_idx
class ParserImageTar(Parser):
""" Single tarfile dataset where classes are mapped to folders within tar
NOTE: This class is being deprecated in favour of the more capable ParserImageInTar that can
operate on folders of tars or tars in tars.
"""
def __init__(self, root, class_map=''):
super().__init__()
class_to_idx = None
if class_map:
class_to_idx = load_class_map(class_map, root)
assert os.path.isfile(root)
self.root = root
with tarfile.open(root) as tf: # cannot keep this open across processes, reopen later
self.samples, self.class_to_idx = extract_tarinfo(tf, class_to_idx)
self.imgs = self.samples
self.tarfile = None # lazy init in __getitem__
def __getitem__(self, index):
if self.tarfile is None:
self.tarfile = tarfile.open(self.root)
tarinfo, target = self.samples[index]
fileobj = self.tarfile.extractfile(tarinfo)
return fileobj, target
def __len__(self):
return len(self.samples)
def _filename(self, index, basename=False, absolute=False):
filename = self.samples[index][0].name
if basename:
filename = os.path.basename(filename)
return filename
|
Cream/TinyViT/data/augmentation/parsers/parser_image_tar.py/0
|
{
"file_path": "Cream/TinyViT/data/augmentation/parsers/parser_image_tar.py",
"repo_id": "Cream",
"token_count": 1057
}
| 330 |
"""Model Inference."""
import torch
import numpy as np
from PIL import Image
from models.tiny_vit import tiny_vit_21m_224
from data import build_transform, imagenet_classnames
from config import get_config
config = get_config()
# Build model
model = tiny_vit_21m_224(pretrained=True)
model.eval()
# Load Image
fname = './.figure/cat.jpg'
image = Image.open(fname)
transform = build_transform(is_train=False, config=config)
# (1, 3, img_size, img_size)
batch = transform(image)[None]
with torch.no_grad():
logits = model(batch)
# print top-5 classification names
probs = torch.softmax(logits, -1)
scores, inds = probs.topk(5, largest=True, sorted=True)
print('=' * 30)
print(fname)
for score, ind in zip(scores[0].numpy(), inds[0].numpy()):
print(f'{imagenet_classnames[ind]}: {score:.2f}')
|
Cream/TinyViT/inference.py/0
|
{
"file_path": "Cream/TinyViT/inference.py",
"repo_id": "Cream",
"token_count": 299
}
| 331 |
from .multi_head_attention import RPEMultiheadAttention
from . import irpe
|
Cream/iRPE/DETR-with-iRPE/models/rpe_attention/__init__.py/0
|
{
"file_path": "Cream/iRPE/DETR-with-iRPE/models/rpe_attention/__init__.py",
"repo_id": "Cream",
"token_count": 23
}
| 332 |
"""
Plotting utilities to visualize training logs.
"""
import torch
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path, PurePath
def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'):
'''
Function to plot specific fields from training log(s). Plots both training and test results.
:: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file
- fields = which results to plot from each log file - plots both training and test for each field.
- ewm_col = optional, which column to use as the exponential weighted smoothing of the plots
- log_name = optional, name of log file if different than default 'log.txt'.
:: Outputs - matplotlib plots of results in fields, color coded for each log file.
- solid lines are training results, dashed lines are test results.
'''
func_name = "plot_utils.py::plot_logs"
# verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,
# convert single Path to list to avoid 'not iterable' error
if not isinstance(logs, list):
if isinstance(logs, PurePath):
logs = [logs]
print(f"{func_name} info: logs param expects a list argument, converted to list[Path].")
else:
raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \
Expect list[Path] or single Path obj, received {type(logs)}")
# Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir
for i, dir in enumerate(logs):
if not isinstance(dir, PurePath):
raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}")
if not dir.exists():
raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}")
# verify log_name exists
fn = Path(dir / log_name)
if not fn.exists():
print(f"-> missing {log_name}. Have you gotten to Epoch 1 in training?")
print(f"--> full path of missing log file: {fn}")
return
# load log file(s) and plot
dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs]
fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))
for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):
for j, field in enumerate(fields):
if field == 'mAP':
coco_eval = pd.DataFrame(
np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1]
).ewm(com=ewm_col).mean()
axs[j].plot(coco_eval, c=color)
else:
df.interpolate().ewm(com=ewm_col).mean().plot(
y=[f'train_{field}', f'test_{field}'],
ax=axs[j],
color=[color] * 2,
style=['-', '--']
)
for ax, field in zip(axs, fields):
ax.legend([Path(p).name for p in logs])
ax.set_title(field)
def plot_precision_recall(files, naming_scheme='iter'):
if naming_scheme == 'exp_id':
# name becomes exp_id
names = [f.parts[-3] for f in files]
elif naming_scheme == 'iter':
names = [f.stem for f in files]
else:
raise ValueError(f'not supported {naming_scheme}')
fig, axs = plt.subplots(ncols=2, figsize=(16, 5))
for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names):
data = torch.load(f)
# precision is n_iou, n_points, n_cat, n_area, max_det
precision = data['precision']
recall = data['params'].recThrs
scores = data['scores']
# take precision for all classes, all areas and 100 detections
precision = precision[0, :, :, 0, -1].mean(1)
scores = scores[0, :, :, 0, -1].mean(1)
prec = precision.mean()
rec = data['recall'][0, :, 0, -1].mean()
print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' +
f'score={scores.mean():0.3f}, ' +
f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}'
)
axs[0].plot(recall, precision, c=color)
axs[1].plot(recall, scores, c=color)
axs[0].set_title('Precision / Recall')
axs[0].legend(names)
axs[1].set_title('Scores / Recall')
axs[1].legend(names)
return fig, axs
|
Cream/iRPE/DETR-with-iRPE/util/plot_utils.py/0
|
{
"file_path": "Cream/iRPE/DETR-with-iRPE/util/plot_utils.py",
"repo_id": "Cream",
"token_count": 2006
}
| 333 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as op
import yaml
from yacs.config import CfgNode as CN
from lib.utils.comm import comm
_C = CN()
_C.BASE = ['']
_C.NAME = ''
_C.DATA_DIR = ''
_C.DIST_BACKEND = 'nccl'
_C.GPUS = (0,)
# _C.LOG_DIR = ''
_C.MULTIPROCESSING_DISTRIBUTED = True
_C.OUTPUT_DIR = ''
_C.PIN_MEMORY = True
_C.PRINT_FREQ = 20
_C.RANK = 0
_C.VERBOSE = True
_C.WORKERS = 4
_C.MODEL_SUMMARY = False
_C.AMP = CN()
_C.AMP.ENABLED = False
_C.AMP.MEMORY_FORMAT = 'nchw'
# Cudnn related params
_C.CUDNN = CN()
_C.CUDNN.BENCHMARK = True
_C.CUDNN.DETERMINISTIC = False
_C.CUDNN.ENABLED = True
# common params for NETWORK
_C.MODEL = CN()
_C.MODEL.NAME = 'cls_hrnet'
_C.MODEL.INIT_WEIGHTS = True
_C.MODEL.PRETRAINED = ''
_C.MODEL.PRETRAINED_LAYERS = ['*']
_C.MODEL.NUM_CLASSES = 1000
_C.MODEL.SPEC = CN(new_allowed=True)
_C.LOSS = CN(new_allowed=True)
_C.LOSS.LABEL_SMOOTHING = 0.0
_C.LOSS.LOSS = 'softmax'
# DATASET related params
_C.DATASET = CN()
_C.DATASET.ROOT = ''
_C.DATASET.DATASET = 'imagenet'
_C.DATASET.TRAIN_SET = 'train'
_C.DATASET.TEST_SET = 'val'
_C.DATASET.DATA_FORMAT = 'jpg'
_C.DATASET.LABELMAP = ''
_C.DATASET.TRAIN_TSV_LIST = []
_C.DATASET.TEST_TSV_LIST = []
_C.DATASET.SAMPLER = 'default'
_C.DATASET.TARGET_SIZE = -1
# training data augmentation
_C.INPUT = CN()
_C.INPUT.MEAN = [0.485, 0.456, 0.406]
_C.INPUT.STD = [0.229, 0.224, 0.225]
# data augmentation
_C.AUG = CN()
_C.AUG.SCALE = (0.08, 1.0)
_C.AUG.RATIO = (3.0/4.0, 4.0/3.0)
_C.AUG.COLOR_JITTER = [0.4, 0.4, 0.4, 0.1, 0.0]
_C.AUG.GRAY_SCALE = 0.0
_C.AUG.GAUSSIAN_BLUR = 0.0
_C.AUG.DROPBLOCK_LAYERS = [3, 4]
_C.AUG.DROPBLOCK_KEEP_PROB = 1.0
_C.AUG.DROPBLOCK_BLOCK_SIZE = 7
_C.AUG.MIXUP_PROB = 0.0
_C.AUG.MIXUP = 0.0
_C.AUG.MIXCUT = 0.0
_C.AUG.MIXCUT_MINMAX = []
_C.AUG.MIXUP_SWITCH_PROB = 0.5
_C.AUG.MIXUP_MODE = 'batch'
_C.AUG.MIXCUT_AND_MIXUP = False
_C.AUG.INTERPOLATION = 2
_C.AUG.TIMM_AUG = CN(new_allowed=True)
_C.AUG.TIMM_AUG.USE_LOADER = False
_C.AUG.TIMM_AUG.USE_TRANSFORM = False
# train
_C.TRAIN = CN()
_C.TRAIN.AUTO_RESUME = True
_C.TRAIN.CHECKPOINT = ''
_C.TRAIN.LR_SCHEDULER = CN(new_allowed=True)
_C.TRAIN.SCALE_LR = True
_C.TRAIN.LR = 0.001
_C.TRAIN.OPTIMIZER = 'sgd'
_C.TRAIN.OPTIMIZER_ARGS = CN(new_allowed=True)
_C.TRAIN.MOMENTUM = 0.9
_C.TRAIN.WD = 0.0001
_C.TRAIN.WITHOUT_WD_LIST = []
_C.TRAIN.NESTEROV = True
# for adam
_C.TRAIN.GAMMA1 = 0.99
_C.TRAIN.GAMMA2 = 0.0
_C.TRAIN.BEGIN_EPOCH = 0
_C.TRAIN.END_EPOCH = 100
_C.TRAIN.IMAGE_SIZE = [224, 224] # width * height, ex: 192 * 256
_C.TRAIN.BATCH_SIZE_PER_GPU = 32
_C.TRAIN.SHUFFLE = True
_C.TRAIN.EVAL_BEGIN_EPOCH = 0
_C.TRAIN.DETECT_ANOMALY = False
_C.TRAIN.CLIP_GRAD_NORM = 0.0
_C.TRAIN.SAVE_ALL_MODELS = False
# testing
_C.TEST = CN()
# size of images for each device
_C.TEST.BATCH_SIZE_PER_GPU = 32
_C.TEST.CENTER_CROP = True
_C.TEST.IMAGE_SIZE = [224, 224] # width * height, ex: 192 * 256
_C.TEST.INTERPOLATION = 2
_C.TEST.MODEL_FILE = ''
_C.TEST.REAL_LABELS = False
_C.TEST.VALID_LABELS = ''
_C.FINETUNE = CN()
_C.FINETUNE.FINETUNE = False
_C.FINETUNE.USE_TRAIN_AUG = False
_C.FINETUNE.BASE_LR = 0.003
_C.FINETUNE.BATCH_SIZE = 512
_C.FINETUNE.EVAL_EVERY = 3000
_C.FINETUNE.TRAIN_MODE = True
# _C.FINETUNE.MODEL_FILE = ''
_C.FINETUNE.FROZEN_LAYERS = []
_C.FINETUNE.LR_SCHEDULER = CN(new_allowed=True)
_C.FINETUNE.LR_SCHEDULER.DECAY_TYPE = 'step'
# debug
_C.DEBUG = CN()
_C.DEBUG.DEBUG = False
def _update_config_from_file(config, cfg_file):
config.defrost()
with open(cfg_file, 'r') as f:
yaml_cfg = yaml.load(f, Loader=yaml.FullLoader)
for cfg in yaml_cfg.setdefault('BASE', ['']):
if cfg:
_update_config_from_file(
config, op.join(op.dirname(cfg_file), cfg)
)
print('=> merge config from {}'.format(cfg_file))
config.merge_from_file(cfg_file)
config.freeze()
def update_config(config, args):
_update_config_from_file(config, args.cfg)
config.defrost()
config.merge_from_list(args.opts)
if config.TRAIN.SCALE_LR:
config.TRAIN.LR *= comm.world_size
file_name, _ = op.splitext(op.basename(args.cfg))
config.NAME = file_name + config.NAME
config.RANK = comm.rank
if 'timm' == config.TRAIN.LR_SCHEDULER.METHOD:
config.TRAIN.LR_SCHEDULER.ARGS.epochs = config.TRAIN.END_EPOCH
if 'timm' == config.TRAIN.OPTIMIZER:
config.TRAIN.OPTIMIZER_ARGS.lr = config.TRAIN.LR
aug = config.AUG
if aug.MIXUP > 0.0 or aug.MIXCUT > 0.0 or aug.MIXCUT_MINMAX:
aug.MIXUP_PROB = 1.0
config.freeze()
def save_config(cfg, path):
if comm.is_main_process():
with open(path, 'w') as f:
f.write(cfg.dump())
if __name__ == '__main__':
import sys
with open(sys.argv[1], 'w') as f:
print(_C, file=f)
|
CvT/lib/config/default.py/0
|
{
"file_path": "CvT/lib/config/default.py",
"repo_id": "CvT",
"token_count": 2530
}
| 334 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn as nn
import torch.optim as optim
from timm.optim import create_optimizer
def _is_depthwise(m):
return (
isinstance(m, nn.Conv2d)
and m.groups == m.in_channels
and m.groups == m.out_channels
)
def set_wd(cfg, model):
without_decay_list = cfg.TRAIN.WITHOUT_WD_LIST
without_decay_depthwise = []
without_decay_norm = []
for m in model.modules():
if _is_depthwise(m) and 'dw' in without_decay_list:
without_decay_depthwise.append(m.weight)
elif isinstance(m, nn.BatchNorm2d) and 'bn' in without_decay_list:
without_decay_norm.append(m.weight)
without_decay_norm.append(m.bias)
elif isinstance(m, nn.GroupNorm) and 'gn' in without_decay_list:
without_decay_norm.append(m.weight)
without_decay_norm.append(m.bias)
elif isinstance(m, nn.LayerNorm) and 'ln' in without_decay_list:
without_decay_norm.append(m.weight)
without_decay_norm.append(m.bias)
with_decay = []
without_decay = []
skip = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
skip_keys = {}
if hasattr(model, 'no_weight_decay_keywords'):
skip_keys = model.no_weight_decay_keywords()
for n, p in model.named_parameters():
ever_set = False
if p.requires_grad is False:
continue
skip_flag = False
if n in skip:
print('=> set {} wd to 0'.format(n))
without_decay.append(p)
skip_flag = True
else:
for i in skip:
if i in n:
print('=> set {} wd to 0'.format(n))
without_decay.append(p)
skip_flag = True
if skip_flag:
continue
for i in skip_keys:
if i in n:
print('=> set {} wd to 0'.format(n))
if skip_flag:
continue
for pp in without_decay_depthwise:
if p is pp:
if cfg.VERBOSE:
print('=> set depthwise({}) wd to 0'.format(n))
without_decay.append(p)
ever_set = True
break
for pp in without_decay_norm:
if p is pp:
if cfg.VERBOSE:
print('=> set norm({}) wd to 0'.format(n))
without_decay.append(p)
ever_set = True
break
if (
(not ever_set)
and 'bias' in without_decay_list
and n.endswith('.bias')
):
if cfg.VERBOSE:
print('=> set bias({}) wd to 0'.format(n))
without_decay.append(p)
elif not ever_set:
with_decay.append(p)
# assert (len(with_decay) + len(without_decay) == len(list(model.parameters())))
params = [
{'params': with_decay},
{'params': without_decay, 'weight_decay': 0.}
]
return params
def build_optimizer(cfg, model):
if cfg.TRAIN.OPTIMIZER == 'timm':
args = cfg.TRAIN.OPTIMIZER_ARGS
print(f'=> usage timm optimizer args: {cfg.TRAIN.OPTIMIZER_ARGS}')
optimizer = create_optimizer(args, model)
return optimizer
optimizer = None
params = set_wd(cfg, model)
if cfg.TRAIN.OPTIMIZER == 'sgd':
optimizer = optim.SGD(
params,
# filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WD,
nesterov=cfg.TRAIN.NESTEROV
)
elif cfg.TRAIN.OPTIMIZER == 'adam':
optimizer = optim.Adam(
params,
# filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
weight_decay=cfg.TRAIN.WD,
)
elif cfg.TRAIN.OPTIMIZER == 'adamW':
optimizer = optim.AdamW(
params,
lr=cfg.TRAIN.LR,
weight_decay=cfg.TRAIN.WD,
)
elif cfg.TRAIN.OPTIMIZER == 'rmsprop':
optimizer = optim.RMSprop(
params,
# filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WD,
alpha=cfg.TRAIN.RMSPROP_ALPHA,
centered=cfg.TRAIN.RMSPROP_CENTERED
)
return optimizer
|
CvT/lib/optim/build.py/0
|
{
"file_path": "CvT/lib/optim/build.py",
"repo_id": "CvT",
"token_count": 2459
}
| 335 |
import argparse
import logging
import os
import pathlib
import sr_detector
import numpy as np
import pandas as pd
from error_messages import *
from constants import *
from azureml.studio.core.io.data_frame_directory import load_data_frame_from_directory, save_data_frame_to_directory
PACKAGE_NAME = 'spectral_residual_anomaly_detection_module'
VERSION = '1.0.0'
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def is_timestamp_ascending(timestamps):
count = len(timestamps)
for i in range(count - 1):
if timestamps[i] > timestamps[i + 1]:
return -1
elif timestamps[i] == timestamps[i + 1]:
return -2
return 0
def invoke(input_path, detect_mode, timestamp_column, value_column, batch_size, threshold, sensitivity,
appendMode, compute_stats_in_visualization, output_path):
df = load_data_frame_from_directory(input_path).data
logging.info(f"Shape of loaded DataFrame: {df.shape}")
if df.shape[0] < MIN_POINTS:
raise Exception(NotEnoughPoints.format(MIN_POINTS))
if 0 < batch_size < MIN_POINTS:
raise Exception(InvalidBatchSize.format(MIN_POINTS))
if timestamp_column not in list(df.columns):
raise Exception(ColumnNotFoundError.format(timestamp_column))
if value_column not in list(df.columns):
raise Exception(ColumnNotFoundError.format(value_column))
timestamp = pd.DataFrame(df, columns=[timestamp_column])
timestamps = pd.to_datetime(timestamp.iloc[:, 0].values)
if np.any(np.isnat(timestamps)):
raise Exception(InvalidTimestamps)
res = is_timestamp_ascending(timestamps)
if res == -1:
raise Exception(InvalidSeriesOrder)
elif res == -2:
raise Exception(DuplicateSeriesTimestamp)
data_columns = pd.DataFrame(df, columns=[value_column])
for col in data_columns:
try:
float_data = data_columns[col].apply(float)
except Exception as e:
raise Exception(InvalidValueFormat.format(col))
if not np.all(np.isfinite(float_data)):
raise Exception(InvalidSeriesValue.format(col))
if np.any(np.less(float_data, VALUE_LOWER_BOUND)) or np.any(np.greater(float_data, VALUE_UPPER_BOUND)):
raise Exception(ValueOverflow.format(col))
data_columns[col] = float_data
result = sr_detector.detect(timestamps, data_columns, detect_mode=detect_mode,
batch_size=batch_size, threshold=threshold, sensitivity=sensitivity)
if appendMode is True:
result = pd.merge(df, result, left_index=True, right_index=True)
save_data_frame_to_directory(output_path, result, compute_stats_in_visualization=compute_stats_in_visualization)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--input-path',
help='Input Dataframe path'
)
parser.add_argument(
'--detect-mode',
choices=['AnomalyOnly', 'AnomalyAndMargin'],
help='Specify the detect mode.'
)
parser.add_argument(
'--timestamp-column',
help='This parameter specifies the column that contains timestamps.'
)
parser.add_argument(
'--value-column',
help='This parameter specifies the column that contains values.'
)
parser.add_argument(
'--batch-size', type=int,
help='This parameter specifies the size of each batch that the detection is perfomed.'
)
parser.add_argument(
'--threshold', type=float,
help='This parameter specifies the threshold anomaly score that a point is judged as anomaly.'
)
parser.add_argument(
'--sensitivity', type=float,
help='This parameter is used in AnomalyAndMargin mode to control the width of margin.'
)
parser.add_argument(
'--append-mode', type=str2bool, default=False,
help='This parameter is used in AnomalyAndMargin mode to control the width of margin.'
)
parser.add_argument(
'--compute-stats-in-visualization', type=str2bool, default=True,
help='Enable this parameter to get stats visualization.'
)
parser.add_argument(
'--output-path',
help='Output Dataframe path'
)
args, _ = parser.parse_known_args()
logging.info(f"Hello world from {PACKAGE_NAME} {VERSION}")
logging.debug("Received parameters:")
logging.debug(f"input: {args.input_path}")
logging.debug(f"detect mode: {args.detect_mode}")
logging.debug(f"timestamp column: {args.timestamp_column}")
logging.debug(f"value column: {args.value_column}")
logging.debug(f"batch size: {args.batch_size}")
logging.debug(f"threshold: {args.threshold}")
logging.debug(f"sensitivity: {args.sensitivity}")
logging.debug(f"appendMode: {args.append_mode}")
logging.debug(f"output path: {args.output_path}")
invoke(args.input_path, args.detect_mode, args.timestamp_column, args.value_column,
args.batch_size, args.threshold, args.sensitivity, args.append_mode,
args.compute_stats_in_visualization, args.output_path)
if __name__ == '__main__':
main()
|
anomalydetector/aml_component/invoker.py/0
|
{
"file_path": "anomalydetector/aml_component/invoker.py",
"repo_id": "anomalydetector",
"token_count": 2135
}
| 336 |
"""
This file is referenced from https://github.com/iopsai/iops/blob/master/evaluation/evaluation.py
"""
import numpy as np
from sklearn.metrics import f1_score, precision_score, recall_score
def get_range_proba(predict, label, delay=7):
predict = np.array(predict)
label = np.array(label)
splits = np.where(label[1:] != label[:-1])[0] + 1
is_anomaly = label[0] == 1
new_predict = np.array(predict)
pos = 0
for sp in splits:
if is_anomaly:
if 1 in predict[pos:min(pos + delay + 1, sp)]:
new_predict[pos: sp] = 1
else:
new_predict[pos: sp] = 0
is_anomaly = not is_anomaly
pos = sp
sp = len(label)
if is_anomaly:
if 1 in predict[pos: min(pos + delay + 1, sp)]:
new_predict[pos: sp] = 1
else:
new_predict[pos: sp] = 0
return new_predict
def reconstruct_label(timestamp, label):
timestamp = np.asarray(timestamp, np.int64)
index = np.argsort(timestamp)
timestamp_sorted = np.asarray(timestamp[index])
interval = np.min(np.diff(timestamp_sorted))
label = np.asarray(label, np.int64)
label = np.asarray(label[index])
idx = (timestamp_sorted - timestamp_sorted[0]) // interval
new_label = np.zeros(shape=((timestamp_sorted[-1] - timestamp_sorted[0]) // interval + 1,), dtype=np.int)
new_label[idx] = label
return new_label
def reconstruct_series(timestamp, label, predict, delay=7):
label = reconstruct_label(timestamp, label)
predict = reconstruct_label(timestamp, predict)
predict = get_range_proba(predict, label, delay)
return label.tolist(), predict.tolist()
def calc(pred, true):
TP = 0
FP = 0
TN = 0
FN = 0
for pre, gt in zip(pred, true):
if gt == 1:
if pre == 1:
TP += 1
else:
FN += 1
if gt == 0:
if pre == 1:
FP += 1
else:
TN += 1
return TP, FP, TN, FN
def evaluate_for_all_series(lst_timestamp_label_predict, delay=7, prt=True):
labels, predicts = [], []
for timestamp, label, predict, _ in lst_timestamp_label_predict:
if timestamp == []:
continue
lbl, pdt = reconstruct_series(timestamp, label, predict, delay)
labels += lbl
predicts += pdt
f1 = f1_score(labels, predicts)
pre = precision_score(labels, predicts)
rec = recall_score(labels, predicts)
TP, FP, TN, FN = calc(predicts, labels)
if prt:
print('precision', pre)
print('recall', rec)
print('f1', f1)
print('-------------------------------')
return f1, pre, rec, TP, FP, TN, FN
def bi_get_range_proba(predict, label, left, right):
i = 1
rs = predict[:]
while i < len(label):
if label[i] == 1 and label[i - 1] == 0:
start = max(0, i - left)
end = min(i + right + 1, len(label))
if 1 in predict[start: end]:
j = i
while j < len(label) and label[j] == 1:
rs[j] = 1
j += 1
i = j
rs[start: end] = label[start: end]
else:
j = i
while j < len(label) and label[j] == 1:
rs[j] = 0
j += 1
i = j
i += 1
return rs
def bi_reconstruct_series(timestamp, label, predict, left, right):
label = reconstruct_label(timestamp, label).tolist()
predict = reconstruct_label(timestamp, predict).tolist()
predict = bi_get_range_proba(predict, label, left, right)
return label, predict
def bi_evaluate_for_all_series(lst_timestamp_label_predict, left, right, prt=True):
import json
labels, predicts = [], []
save = []
for timestamp, label, predict in lst_timestamp_label_predict:
if timestamp == []:
continue
try:
lbl, pdt = bi_reconstruct_series(timestamp, label, predict, left, right)
except:
continue
ifi = f1_score(lbl, pdt)
save.append(ifi)
labels += lbl
predicts += pdt
with open('eachscore.json', 'w+') as fout:
json.dump(save, fout)
f1 = f1_score(labels, predicts)
pre = precision_score(labels, predicts)
rec = recall_score(labels, predicts)
if prt:
print('precision', pre)
print('recall', rec)
print('f1', f1)
print('-------------------------------')
return f1, pre, rec
def get_variance(f_score, all_fscore):
va = 0.0
for i in range(len(all_fscore)):
va += 1.0 * (all_fscore[i] - f_score) * (all_fscore[i] - f_score)
return va / len(all_fscore)
|
anomalydetector/srcnn/competition_metric.py/0
|
{
"file_path": "anomalydetector/srcnn/competition_metric.py",
"repo_id": "anomalydetector",
"token_count": 2271
}
| 337 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from abc import abstractmethod
from overrides import EnforceOverrides
class TrainerBase(EnforceOverrides):
"""Abstract class for trainers.
The `TrainerBase` class provides an abstract interface for training a model. The user
is required to implement the `train`, `evaluate`, and `predict` methods. The `train` method
should contain the logic for training the model, the `evaluate` method should contain
the logic for evaluating the model, and the `predict` method should contain the logic
for making predictions with the model.
Note:
This class is inherited from `EnforceOverrides` and any overridden methods in the
subclass should be decorated with `@overrides` to ensure they are properly overridden.
Examples:
>>> class MyTrainer(TrainerBase):
>>> def __init__(self) -> None:
>>> super().__init__()
>>>
>>> @overrides
>>> def train(self) -> None:
>>> return pytorch_lightining.trainer.Trainer().fit(model, train_dataloaders=train_dataloader)
>>>
>>> @overrides
>>> def evaluate(self) -> None:
>>> return pytorch_lightining.trainer.Trainer().test(model, dataloaders=val_dataloader)
>>>
>>> @overrides
>>> def predict(self) -> None:
>>> return pytorch_lightining.trainer.Trainer().predict(model, dataloaders=predict_dataloader)
"""
def __init__(self) -> None:
"""Initialize the trainer."""
pass
@abstractmethod
def train(self) -> None:
"""Train a model.
This method should contain the logic for training the model.
"""
pass
@abstractmethod
def evaluate(self) -> None:
"""Evaluate a model.
This method should contain the logic for evaluating the model.
"""
pass
@abstractmethod
def predict(self) -> None:
"""Predict with a model.
This method should contain the logic for making predictions with the model.
"""
pass
|
archai/archai/api/trainer_base.py/0
|
{
"file_path": "archai/archai/api/trainer_base.py",
"repo_id": "archai",
"token_count": 835
}
| 338 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
import itertools
import logging
import os
import pathlib
import time
from collections import OrderedDict
from types import TracebackType
from typing import Any, Dict, List, Optional, Union
import yaml
from archai.common.ordered_dict_logger_utils import get_logger
class OrderedDictLogger:
"""Log and save data in a hierarchical YAML structure.
The purpose of the structured logging is to store logs as key value pair.
However, when you have loop and sub routine calls, what you need is hierarchical
dictionaries where the value for a key could be a dictionary. The idea is that you
set one of the nodes in tree as current node and start logging your values. You can
then use pushd to create and go to child node and popd to come back to parent.
To implement this mechanism we use two main variables: _stack allows us to push each node
on stack when pushd is called. The node is OrderedDictionary. As a convinience, we let
specify child path in pushd in which case child hierarchy is created and current node
will be set to the last node in specified path. When popd is called, we go back to
original parent instead of parent of current node. To implement this we use _paths
variable which stores subpath when each pushd call was made.
"""
def __init__(
self, source: Optional[str] = None, file_path: Optional[str] = None, delay: Optional[float] = 60.0
) -> None:
"""Initialize the logger.
Args:
source: Source of the logger.
file_path: File path of the log file.
delay: Delay between log saves.
"""
self.logger = get_logger(source or __name__)
self.file_path = file_path
self.delay = delay
self.call_count = 0
self.timestamp = time.time()
self.paths = [[""]]
self.stack = [OrderedDict()]
if self.file_path:
if os.path.exists(self.file_path):
backup_file_path = pathlib.Path(self.file_path)
backup_file_path.rename(backup_file_path.with_suffix(f".{str(int(time.time()))}.yaml"))
def __enter__(self) -> OrderedDictLogger:
return self
def __exit__(self, exc_type: type[BaseException], exc_val: BaseException, exc_tb: TracebackType) -> None:
self.popd()
def __contains__(self, key: str) -> bool:
return key in self.current_node
def __len__(self) -> int:
return len(self.current_node)
@property
def root_node(self) -> OrderedDict:
"""Return the root node of the current stack."""
return self.stack[0]
@property
def current_node(self) -> OrderedDict:
"""Return the current node of the current stack.
Raises:
RuntimeError: If a `key` stores a scalar value and is trying to store
new information.
"""
last_obj = None
for i, (path, obj) in enumerate(zip(self.paths, self.stack)):
if obj is None:
obj = last_obj
for key in path:
if key not in obj:
obj[key] = OrderedDict()
if not isinstance(obj[key], OrderedDict):
raise RuntimeError(f"`{key}` is being used to store a scalar value.")
obj = obj[key]
self.stack[i] = obj
last_obj = obj
return self.stack[-1]
@property
def current_path(self) -> str:
"""Return the current path of the current stack."""
return "/".join(itertools.chain.from_iterable(self.paths[1:]))
def save(self) -> None:
"""Save the current log data to an output file.
This method only saves to a file if a valid `file_path` has been provided
in the constructor.
"""
if self.file_path:
with open(self.file_path, "w") as f:
yaml.dump(self.root_node, f)
def load(self, file_path: str) -> None:
"""Load log data from an input file.
Args:
file_path: File path to load data from.
"""
with open(file_path, "r") as f:
obj = yaml.load(f, Loader=yaml.Loader)
self.stack = [obj]
def close(self) -> None:
"""Close the logger."""
self.save()
for handler in self.logger.handlers:
handler.flush()
def _update_key(
self,
key: Any,
value: Any,
node: Optional[OrderedDict] = None,
path: Optional[List[str]] = None,
override_key: Optional[bool] = True,
) -> None:
if not override_key and key in self.current_node:
raise KeyError(f"`{key}` is already being used. Cannot use it again, unless popd() is called.")
current_node = node or self.current_node
current_path = path or []
for p in current_path:
if p not in current_node:
current_node[p] = OrderedDict()
current_node = current_node[p]
current_node[str(key)] = value
def _update(self, obj: Dict[str, Any], override_key: Optional[bool] = True) -> None:
for k, v in obj.items():
self._update_key(k, v, override_key=override_key)
def log(
self, obj: Union[Dict[str, Any], str], level: Optional[int] = None, override_key: Optional[bool] = True
) -> None:
"""Log the provided dictionary/string at the specified level.
Args:
obj: Object to log.
level: Logging level.
override_key: Whether key can be overridden if it's already in current node.
"""
self.call_count += 1
if isinstance(obj, dict):
self._update(obj, override_key=override_key)
message = ", ".join(f"{k}={v}" for k, v in obj.items())
else:
message = obj
path = {
logging.INFO: ["messages"],
logging.DEBUG: ["debugs"],
logging.WARNING: ["warnings"],
logging.ERROR: ["errors"],
}
self._update_key(self.call_count, message, node=self.root_node, path=path[level], override_key=override_key)
self.logger.log(msg=self.current_path + " " + message, level=level)
if time.time() - self.timestamp > self.delay:
self.save()
self.timestamp = time.time()
def info(self, obj: Union[Dict[str, Any], str], override_key: Optional[bool] = True) -> None:
"""Log the provided dictionary/string at the `info` level.
Args:
obj: Object to log.
override_key: Whether key can be overridden if it's already in current node.
"""
self.log(obj, level=logging.INFO, override_key=override_key)
def debug(self, obj: Union[Dict[str, Any], str], override_key: Optional[bool] = True) -> None:
"""Log the provided dictionary/string at the `debug` level.
Args:
obj: Object to log.
override_key: Whether key can be overridden if it's already in current node.
"""
self.log(obj, level=logging.DEBUG, override_key=override_key)
def warn(self, obj: Union[Dict[str, Any], str], override_key: Optional[bool] = True) -> None:
"""Log the provided dictionary/string at the `warning` level.
Args:
obj: Object to log.
override_key: Whether key can be overridden if it's already in current node.
"""
self.log(obj, level=logging.WARNING, override_key=override_key)
def error(self, obj: Union[Dict[str, Any], str], override_key: Optional[bool] = True) -> None:
"""Log the provided dictionary/string at the `error` level.
Args:
obj: Object to log.
override_key: Whether key can be overridden if it's already in current node.
"""
self.log(obj, level=logging.ERROR, override_key=override_key)
def pushd(self, *keys: Any) -> OrderedDictLogger:
"""Push the provided keys onto the current path stack.
Returns:
Instance of current logger.
"""
self.paths.append([str(k) for k in keys])
self.stack.append(None) # Delays creation of node until it is needed
return self # Allows to call __enter__
def popd(self) -> None:
"""Pop the last path and node off the stack."""
if len(self.stack) == 1:
self.warn("Invalid call. No available child in the stack.")
return
self.stack.pop()
self.paths.pop()
@staticmethod
def set_global_instance(instance: OrderedDictLogger) -> None:
"""Set a global logger instance.
Args:
instance: Instance to be set globally.
"""
global _logger
_logger = instance
@staticmethod
def get_global_instance() -> OrderedDictLogger:
"""Get a global logger instance.
Returns:
Global logger.
"""
global _logger
return _logger
def get_global_logger() -> OrderedDictLogger:
"""Get a global logger instance.
This method assures that if a global logger instance does not exist,
it will be created and set as the global logger instance.
Returns:
Global logger.
"""
try:
logger = OrderedDictLogger.get_global_instance()
except:
OrderedDictLogger.set_global_instance(OrderedDictLogger(file_path="archai.log.yaml", delay=30.0))
logger = OrderedDictLogger.get_global_instance()
return logger
|
archai/archai/common/ordered_dict_logger.py/0
|
{
"file_path": "archai/archai/common/ordered_dict_logger.py",
"repo_id": "archai",
"token_count": 4092
}
| 339 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, Optional
from overrides import overrides
from torch.utils.data import Dataset
from torchvision.datasets import Food101
from torchvision.transforms import ToTensor
from archai.api.dataset_provider import DatasetProvider
from archai.common.ordered_dict_logger import OrderedDictLogger
logger = OrderedDictLogger(source=__name__)
class Food101DatasetProvider(DatasetProvider):
"""Food-101 dataset provider."""
def __init__(
self,
root: Optional[str] = "dataroot",
) -> None:
"""Initialize Food-101 dataset provider.
Args:
root: Root directory of dataset where is saved.
"""
super().__init__()
self.root = root
@overrides
def get_train_dataset(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
return Food101(
self.root,
split="train",
transform=transform or ToTensor(),
target_transform=target_transform,
download=True,
)
@overrides
def get_val_dataset(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
logger.warn("Validation set not available. Returning training set ...")
return self.get_train_dataset(transform=transform, target_transform=target_transform)
@overrides
def get_test_dataset(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
return Food101(
self.root,
split="test",
transform=transform or ToTensor(),
target_transform=target_transform,
download=True,
)
|
archai/archai/datasets/cv/food101_dataset_provider.py/0
|
{
"file_path": "archai/archai/datasets/cv/food101_dataset_provider.py",
"repo_id": "archai",
"token_count": 793
}
| 340 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Dict, List, Optional, Union
from datasets import load_dataset as hf_load_dataset
from datasets import load_from_disk as hf_load_from_disk
from datasets.arrow_dataset import Dataset
from datasets.dataset_dict import DatasetDict, IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.splits import Split
from datasets.utils.version import Version
from overrides import overrides
from archai.api.dataset_provider import DatasetProvider
from archai.common.ordered_dict_logger import OrderedDictLogger
from archai.datasets.nlp.hf_dataset_provider_utils import should_refresh_cache
logger = OrderedDictLogger(source=__name__)
class HfHubDatasetProvider(DatasetProvider):
"""Hugging Face Hub dataset provider."""
def __init__(
self,
dataset_name: str,
dataset_config_name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List[str], Dict[str, Union[str, List[str]]]]] = None,
cache_dir: Optional[str] = None,
revision: Optional[Union[str, Version]] = None,
) -> None:
"""Initialize Hugging Face Hub dataset provider.
Args:
dataset_name: Name of the dataset.
dataset_config_name: Name of the dataset configuration.
data_dir: Path to the data directory.
data_files: Path(s) to the data file(s).
cache_dir: Path to the read/write cache directory.
revision: Version of the dataset to load.
"""
super().__init__()
self.dataset_name = dataset_name
self.dataset_config_name = dataset_config_name
self.data_dir = data_dir
self.data_files = data_files
self.cache_dir = cache_dir
self.revision = revision
def get_dataset(
self,
split: Optional[Union[str, Split]] = None,
refresh_cache: Optional[bool] = False,
keep_in_memory: Optional[bool] = False,
streaming: Optional[bool] = False,
) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
return hf_load_dataset(
self.dataset_name,
name=self.dataset_config_name,
data_dir=self.data_dir,
data_files=self.data_files,
split=split,
cache_dir=self.cache_dir,
download_mode=should_refresh_cache(refresh_cache),
keep_in_memory=keep_in_memory,
revision=self.revision,
streaming=streaming,
)
@overrides
def get_train_dataset(
self,
split: Optional[Union[str, Split]] = "train",
refresh_cache: Optional[bool] = False,
keep_in_memory: Optional[bool] = False,
streaming: Optional[bool] = False,
) -> Union[Dataset, IterableDataset]:
return self.get_dataset(
split=split, refresh_cache=refresh_cache, keep_in_memory=keep_in_memory, streaming=streaming
)
@overrides
def get_val_dataset(
self,
split: Optional[Union[str, Split]] = "validation",
refresh_cache: Optional[bool] = False,
keep_in_memory: Optional[bool] = False,
streaming: Optional[bool] = False,
) -> Union[Dataset, IterableDataset]:
try:
return self.get_dataset(
split=split, refresh_cache=refresh_cache, keep_in_memory=keep_in_memory, streaming=streaming
)
except ValueError:
logger.warn(f"Validation set not available for `{self.dataset}`. Returning full training set ...")
return self.get_dataset(
split="train", refresh_cache=refresh_cache, keep_in_memory=keep_in_memory, streaming=streaming
)
@overrides
def get_test_dataset(
self,
split: Optional[Union[str, Split]] = "test",
refresh_cache: Optional[bool] = False,
keep_in_memory: Optional[bool] = False,
streaming: Optional[bool] = False,
) -> Union[Dataset, IterableDataset]:
try:
return self.get_dataset(
split=split, refresh_cache=refresh_cache, keep_in_memory=keep_in_memory, streaming=streaming
)
except ValueError:
logger.warn(f"Testing set not available for `{self.dataset}`. Returning full validation set ...")
return self.get_dataset(
split="validation", refresh_cache=refresh_cache, keep_in_memory=keep_in_memory, streaming=streaming
)
class HfDiskDatasetProvider(DatasetProvider):
"""Hugging Face disk-saved dataset provider."""
def __init__(
self,
data_dir: str,
keep_in_memory: Optional[bool] = False,
) -> None:
"""Initialize Hugging Face disk-saved dataset provider.
Args:
data_dir: Path to the disk-saved dataset.
keep_in_memory: Whether to keep the dataset in memory.
"""
super().__init__()
self.data_dir = data_dir
self.keep_in_memory = keep_in_memory
# Pre-loads the dataset when class is instantiated to avoid loading it multiple times
self.dataset = hf_load_from_disk(self.data_dir, keep_in_memory=keep_in_memory)
@overrides
def get_train_dataset(self) -> Dataset:
if isinstance(self.dataset, DatasetDict):
return self.dataset["train"]
return self.dataset
@overrides
def get_val_dataset(self) -> Dataset:
try:
if isinstance(self.dataset, DatasetDict):
return self.dataset["validation"]
except:
logger.warn("Validation set not available. Returning training set ...")
return self.get_train_dataset()
@overrides
def get_test_dataset(self) -> Dataset:
try:
if isinstance(self.dataset, DatasetDict):
return self.dataset["test"]
except:
logger.warn("Testing set not available. Returning validation set ...")
return self.get_val_dataset()
|
archai/archai/datasets/nlp/hf_dataset_provider.py/0
|
{
"file_path": "archai/archai/datasets/nlp/hf_dataset_provider.py",
"repo_id": "archai",
"token_count": 2674
}
| 341 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import random
from pathlib import Path
from typing import List, Optional
from overrides import overrides
from archai.common.ordered_dict_logger import OrderedDictLogger
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.search_objectives import SearchObjectives
from archai.discrete_search.api.search_results import SearchResults
from archai.discrete_search.api.search_space import DiscreteSearchSpace
from archai.discrete_search.api.searcher import Searcher
logger = OrderedDictLogger(source=__name__)
class RandomSearch(Searcher):
"""Random search algorithm.
It evaluates random samples from the search space in each iteration until
`num_iters` is reached.
"""
def __init__(
self,
search_space: DiscreteSearchSpace,
search_objectives: SearchObjectives,
output_dir: str,
num_iters: Optional[int] = 10,
samples_per_iter: Optional[int] = 10,
clear_evaluated_models: Optional[bool] = True,
save_pareto_model_weights: bool = True,
seed: Optional[int] = 1,
):
"""Initialize the random search algorithm.
Args:
search_space: Discrete search space.
search_objectives: Search objectives.
output_dir: Output directory.
num_iters: Number of iterations.
samples_per_iter: Number of samples per iteration.
clear_evaluated_models (bool, optional): Optimizes memory usage by clearing the architecture
of `ArchaiModel` after each iteration. Defaults to True.
save_pareto_model_weights: If `True`, saves the weights of the pareto models. Defaults to True.
seed: Random seed.
"""
super(RandomSearch, self).__init__()
assert isinstance(
search_space, DiscreteSearchSpace
), f"{str(search_space.__class__)} is not compatible with {str(self.__class__)}"
self.iter_num = 0
self.search_space = search_space
self.so = search_objectives
self.output_dir = Path(output_dir)
self.output_dir.mkdir(exist_ok=True, parents=True)
# Algorithm settings
self.num_iters = num_iters
self.samples_per_iter = samples_per_iter
# Utils
self.clear_evaluated_models = clear_evaluated_models
self.save_pareto_model_weights = save_pareto_model_weights
self.search_state = SearchResults(search_space, self.so)
self.seed = seed
self.rng = random.Random(seed)
self.seen_archs = set()
self.num_sampled_archs = 0
assert self.samples_per_iter > 0
assert self.num_iters > 0
def sample_models(self, num_models: int, patience: Optional[int] = 5) -> List[ArchaiModel]:
"""Sample models from the search space.
Args:
num_models: Number of models to sample.
patience: Number of tries to sample a valid model.
Returns:
List of sampled models.
"""
nb_tries, valid_sample = 0, []
while len(valid_sample) < num_models and nb_tries < patience:
sample = [self.search_space.random_sample() for _ in range(num_models)]
_, valid_indices = self.so.validate_constraints(sample)
valid_sample += [sample[i] for i in valid_indices if sample[i].archid not in self.seen_archs]
return valid_sample[:num_models]
@overrides
def search(self) -> SearchResults:
for i in range(self.num_iters):
self.iter_num = i + 1
self.on_start_iteration(self.iter_num)
logger.info(f"Iteration {i+1}/{self.num_iters}")
logger.info(f"Sampling {self.samples_per_iter} random models ...")
unseen_pop = self.sample_models(self.samples_per_iter)
# Calculates objectives
logger.info(f"Calculating search objectives {list(self.so.objective_names)} for {len(unseen_pop)} models ...")
results = self.so.eval_all_objs(unseen_pop)
self.search_state.add_iteration_results(unseen_pop, results)
# Records evaluated archs to avoid computing the same architecture twice
self.seen_archs.update([m.archid for m in unseen_pop])
# update the pareto frontier
logger.info("Updating Pareto frontier ...")
pareto = self.search_state.get_pareto_frontier()["models"]
logger.info(f"Found {len(pareto)} members.")
# Saves search iteration results
self.search_state.save_search_state(str(self.output_dir / f"search_state_{self.iter_num}.csv"))
self.search_state.save_pareto_frontier_models(
str(self.output_dir / f"pareto_models_iter_{self.iter_num}"),
save_weights=self.save_pareto_model_weights
)
self.search_state.save_all_2d_pareto_evolution_plots(str(self.output_dir))
# Clears models from memory if needed
if self.clear_evaluated_models:
logger.info("Optimzing memory usage ...")
[model.clear() for model in unseen_pop]
return self.search_state
|
archai/archai/discrete_search/algos/random_search.py/0
|
{
"file_path": "archai/archai/discrete_search/algos/random_search.py",
"repo_id": "archai",
"token_count": 2212
}
| 342 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Optional
from overrides import overrides
from torch import nn
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.model_evaluator import ModelEvaluator
class NonEmbeddingParamsProxy(ModelEvaluator):
"""Total number of non-embedding parameters."""
def __init__(self, exclude_cls: Optional[List[nn.Module]] = None, trainable_only: Optional[bool] = True) -> None:
"""Initialize the evaluator.
Used as a proxy for the perplexity of decoder-only transformer LMs.
Args:
exclude_cls: List of PyTorch module classes to exclude from parameter counting.
If `None`, defaults to `[torch.nn.Embedding]`.
trainable_only: Whether only trainable parameters should be counted.
Reference:
"LiteTransformerSearch: Training-free Neural Architecture Search for
Efficient Language Models", Javaheripi et. al, 2022
"""
self.exclude_cls = exclude_cls or [nn.Embedding]
self.trainable_only = trainable_only
@overrides
def evaluate(self, model: ArchaiModel, budget: Optional[float] = None) -> float:
total_params = sum(
param.numel() for param in model.arch.parameters() if not self.trainable_only or param.requires_grad
)
embed_params = sum(
sum(param.numel() for param in module.parameters())
for module in model.arch.modules()
if isinstance(module, tuple(self.exclude_cls))
)
return total_params - embed_params
|
archai/archai/discrete_search/evaluators/nlp/parameters.py/0
|
{
"file_path": "archai/archai/discrete_search/evaluators/nlp/parameters.py",
"repo_id": "archai",
"token_count": 636
}
| 343 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import random
import re
import warnings
from pathlib import Path
from typing import Any, List, Optional
import nats_bench
import numpy as np
import torch
import yaml
from overrides import overrides
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.search_space import (
BayesOptSearchSpace,
EvolutionarySearchSpace,
)
class NatsbenchTssSearchSpace(EvolutionarySearchSpace, BayesOptSearchSpace):
"""Search space for NATS-Bench-TSS."""
OPS = ["none", "avg_pool_3x3", "nor_conv_1x1", "nor_conv_3x3", "skip_connect"]
try:
from xautodl.models import get_cell_based_tiny_net
except ImportError:
raise ImportError(
"xautodl installation not found. "
"Please install it using `pip install git+https://github.com/D-X-Y/AutoDL-Projects/`"
)
def __init__(self, natsbench_location: str, base_dataset: str, seed: Optional[int] = 1) -> None:
"""Initialize the search space.
Args:
natsbench_location: Path to the NATS-Bench-TSS dataset.
base_dataset: Base dataset used for training the models.
seed: Random seed.
"""
self.natsbench_location = Path(natsbench_location)
self.base_dataset = base_dataset
assert base_dataset in [
"cifar10",
"cifar100",
"ImageNet16-120",
], "`base_dataset` must be one of ['cifar10', 'cifar100', 'ImageNet16-120']"
if not self.natsbench_location.exists():
raise FileNotFoundError(
"The provided path to `natsbench_location` (" f"{self.natsbench_location.absolute()}) does not exist"
)
self.api = nats_bench.create(natsbench_location, "tss", fast_mode=True, verbose=False)
self.rng = random.Random(seed)
self.archid_pattern = re.compile("natsbench-tss-([0-9]+)")
def _get_op_list(self, string: str) -> List[str]:
"""Reused from https://github.com/naszilla/naszilla/blob/master/naszilla/nas_bench_201/cell_201.py."""
# Given a string, get the list of operations
tokens = string.split("|")
ops = [t.split("~")[0] for i, t in enumerate(tokens) if i not in [0, 2, 5, 9]]
return ops
def _get_string_from_ops(self, ops: List[str]) -> str:
"""Reused from https://github.com/naszilla/naszilla/blob/master/naszilla/nas_bench_201/cell_201.py."""
# Given a list of operations, get the string
strings = ["|"]
nodes = [0, 0, 1, 0, 1, 2]
for i, op in enumerate(ops):
strings.append(op + "~{}|".format(nodes[i]))
if i < len(nodes) - 1 and nodes[i + 1] == 0:
strings.append("+|")
return "".join(strings)
def model_from_natsbench_tss(self, natsbench_id: int) -> Any:
"""Get a model from NATS-Bench-TSS dataset.
Args:
natsbench_id: NATS-Bench-TSS identifier.
Returns:
Model from NATS-Bench-TSS dataset.
"""
config = self.api.get_net_config(natsbench_id, self.base_dataset)
return self.get_cell_based_tiny_net(config)
@overrides
def save_arch(self, model: ArchaiModel, path: str) -> None:
yaml.safe_dump({"archid": model.archid, **model.metadata}, open(path, "w", encoding="utf-8"))
@overrides
def load_arch(self, path: str) -> ArchaiModel:
metadata = yaml.safe_load(open(path, encoding="utf-8"))
natsbenchid = self.archid_pattern.match(metadata["archid"])
if not natsbenchid:
raise ValueError(f'Architecture {metadata["archid"]} does not belong to `NatsbenchTssSearchSpace`. ')
if metadata["dataset"] != self.base_dataset:
warnings.warn(
f'Architecture loaded from {path} was saved using a different dataset ({metadata["dataset"]})'
f" than `NatsbenchTssSearchSpace` base dataset ({self.base_dataset})"
)
idx = int(natsbenchid.group(1))
return ArchaiModel(
arch=self.model_from_natsbench_tss(idx),
archid=f"natsbench-tss-{idx}",
metadata={"dataset": self.base_dataset},
)
@overrides
def load_model_weights(self, model: ArchaiModel, path: str) -> None:
model.arch.load_state_dict(torch.load(path))
@overrides
def save_model_weights(self, model: ArchaiModel, path: str) -> None:
torch.save(model.arch.state_dict(), path)
@overrides
def random_sample(self) -> ArchaiModel:
idx = self.rng.randint(0, len(self.api))
return ArchaiModel(
arch=self.model_from_natsbench_tss(idx),
archid=f"natsbench-tss-{idx}",
metadata={"dataset": self.base_dataset},
)
@overrides
def mutate(self, model: ArchaiModel) -> ArchaiModel:
"""Reused from https://github.com/naszilla/naszilla/blob/master/naszilla/nas_bench_201/cell_201.py."""
# First get the string representation of the current architecture
natsbenchid = self.archid_pattern.match(model.archid)
if not natsbenchid:
raise ValueError(f"Architecture {model.archid} does not belong to the `NatsbenchTssSearchSpace`. ")
natsbenchid = int(natsbenchid.group(1))
string_rep = self.api.get_net_config(natsbenchid, self.base_dataset)["arch_str"]
nbhd_strs = []
ops = self._get_op_list(string_rep)
for i in range(len(ops)):
available = [op for op in self.OPS if op != ops[i]]
for op in available:
new_ops = ops.copy()
new_ops[i] = op
new_arch_str = self._get_string_from_ops(new_ops)
nbhd_strs.append(new_arch_str)
# Picks one neighbor architecture as the mutation
mutation_str = random.choice(nbhd_strs)
mutation_natsbenchid = self.api.archstr2index[mutation_str]
return ArchaiModel(
arch=self.model_from_natsbench_tss(mutation_natsbenchid),
archid=f"natsbench-tss-{mutation_natsbenchid}",
metadata={"dataset": self.base_dataset},
)
@overrides
def crossover(self, arch_list: List[ArchaiModel]) -> ArchaiModel:
raise NotImplementedError
@overrides
def encode(self, arch: ArchaiModel) -> np.ndarray:
enc_dict = {
"none": [0, 0, 0, 0],
"avg_pool_3x3": [1, 0, 0, 0],
"nor_conv_1x1": [0, 1, 0, 0],
"nor_conv_3x3": [0, 0, 1, 0],
"skip_connect": [0, 0, 0, 1],
}
# Gets string repr for `arch`
natsbenchid = self.archid_pattern.match(arch.archid)
if not natsbenchid:
raise ValueError(f"Architecture {arch.archid} does not belong" " to `NatsbenchTssSearchSpace`. ")
arch_str = self.api[int(natsbenchid.group(1))]
arch_ops = re.findall(r"([^\|\~\+]+)~\d", arch_str)
return np.hstack([np.array(enc_dict[op_name]) for op_name in arch_ops])
|
archai/archai/discrete_search/search_spaces/benchmark/natsbench_tss.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/benchmark/natsbench_tss.py",
"repo_id": "archai",
"token_count": 3256
}
| 344 |
# coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch REFORMER model."""
import sys
from collections import namedtuple
from dataclasses import dataclass
from functools import reduce
from operator import mul
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from torch import nn
from torch.autograd.function import Function
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import apply_chunking_to_forward
from transformers.utils import (
DUMMY_INPUTS,
DUMMY_MASK,
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.reformer.configuration_reformer import ReformerConfig
from ..causal_self_attn import apply_rotary_pos_emb, fixed_pos_embedding
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/reformer-crime-and-punishment"
_CONFIG_FOR_DOC = "ReformerConfig"
_TOKENIZER_FOR_DOC = "ReformerTokenizer"
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/reformer-crime-and-punishment",
"google/reformer-enwik8",
# See all Reformer models at https://huggingface.co/models?filter=reformer
]
# Define named tuples for nn.Modules here
LSHSelfAttentionOutput = namedtuple("LSHSelfAttentionOutput", ["hidden_states", "attention_probs", "buckets"])
LocalSelfAttentionOutput = namedtuple("LocalSelfAttentionOutput", ["hidden_states", "attention_probs"])
AttentionOutput = namedtuple("AttentionOutput", ["hidden_states", "attention_probs", "buckets"])
ReformerOutput = namedtuple("ReformerOutput", ["hidden_states", "attn_output", "attention_probs", "buckets"])
ReformerBackwardOutput = namedtuple(
"ReformerBackwardOutput", ["attn_output", "hidden_states", "grad_attn_output", "grad_hidden_states"]
)
ReformerEncoderOutput = namedtuple(
"ReformerEncoderOutput",
["hidden_states", "all_hidden_states", "all_attentions", "past_buckets_states"],
)
def _stable_argsort(vector, dim):
# this function scales the vector so that torch.argsort is stable.
# torch.argsort is not stable on its own
scale_offset = torch.arange(vector.shape[dim], device=vector.device).view(1, 1, -1)
scale_offset = scale_offset.expand(vector.shape)
scaled_vector = vector.shape[dim] * vector + (scale_offset % vector.shape[dim])
return torch.argsort(scaled_vector, dim=dim)
def _get_least_common_mult_chunk_len(config):
attn_types = config.attn_layers
attn_types_set = set(attn_types)
if len(attn_types_set) == 1 and attn_types[0] == "lsh":
return config.lsh_attn_chunk_length
elif len(attn_types_set) == 1 and attn_types[0] == "local":
return config.local_attn_chunk_length
elif len(attn_types_set) == 2 and attn_types_set == set(["lsh", "local"]):
return np.lcm(config.lsh_attn_chunk_length, config.local_attn_chunk_length)
else:
raise NotImplementedError(
f"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {config.attn_layers}. Select "
"attn layer types from ['lsh', 'local'] only."
)
def _get_min_chunk_len(config):
attn_types = config.attn_layers
attn_types_set = set(attn_types)
if len(attn_types_set) == 1 and attn_types[0] == "lsh":
return config.lsh_attn_chunk_length
elif len(attn_types_set) == 1 and attn_types[0] == "local":
return config.local_attn_chunk_length
elif len(attn_types_set) == 2 and attn_types_set == set(["lsh", "local"]):
return min(config.lsh_attn_chunk_length, config.local_attn_chunk_length)
else:
raise NotImplementedError(
f"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {config.attn_layers}. Select "
"attn layer types from ['lsh', 'local'] only."
)
class AxialPositionEmbeddings(nn.Module):
"""
Constructs axial position embeddings. Useful for very long input sequences to save memory and time.
"""
def __init__(self, config):
super().__init__()
self.axial_pos_shape = config.axial_pos_shape
self.axial_pos_embds_dim = config.axial_pos_embds_dim
self.dropout = config.hidden_dropout_prob
self.least_common_mult_chunk_length = _get_least_common_mult_chunk_len(config)
self.weights = nn.ParameterList()
if sum(self.axial_pos_embds_dim) != config.hidden_size:
raise ValueError(
f"Make sure that config.axial_pos_embds factors: {self.axial_pos_embds_dim} sum to "
f"config.hidden_size: {config.hidden_size}"
)
# create weights
for axis, axial_pos_embd_dim in enumerate(self.axial_pos_embds_dim):
# create expanded shapes
ax_shape = [1] * len(self.axial_pos_shape)
ax_shape[axis] = self.axial_pos_shape[axis]
ax_shape = tuple(ax_shape) + (axial_pos_embd_dim,)
# create tensor and init
self.weights.append(nn.Parameter(torch.ones(ax_shape, dtype=torch.float32)))
def forward(self, position_ids):
# broadcast weights to correct shape
batch_size = position_ids.shape[0]
sequence_length = position_ids.shape[1]
broadcasted_weights = [
weight.expand((batch_size,) + self.axial_pos_shape + weight.shape[-1:]) for weight in self.weights
]
if self.training is True:
if reduce(mul, self.axial_pos_shape) != sequence_length:
raise ValueError(
f"If training, make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply to "
f"sequence length. Got prod({self.axial_pos_shape}) != sequence_length: {sequence_length}. "
f"You might want to consider padding your sequence length to {reduce(mul, self.axial_pos_shape)} "
"or changing config.axial_pos_shape."
)
if self.dropout > 0:
weights = torch.cat(broadcasted_weights, dim=-1)
# permute weights so that 2D correctly drops dims 1 and 2
transposed_weights = weights.transpose(2, 1)
# drop entire matrix of last two dims (prev dims 1 and 2)
dropped_transposed_weights = nn.functional.dropout2d(
transposed_weights, p=self.dropout, training=self.training
)
dropped_weights = dropped_transposed_weights.transpose(2, 1)
position_encodings = torch.reshape(dropped_weights, (batch_size, sequence_length, -1))
else:
position_encodings = torch.cat(
[torch.reshape(weight, (batch_size, sequence_length, -1)) for weight in broadcasted_weights],
dim=-1,
)
else:
if reduce(mul, self.axial_pos_shape) < sequence_length:
raise ValueError(
f"Make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply at least to "
f"max(sequence_length, least_common_mult_chunk_length): max({sequence_length}, "
f"{self.least_common_mult_chunk_length})."
)
# compute how many columns are needed
max_position_id = position_ids.max().item()
required_pos_encodings_columns = -(-(max_position_id + 1) // self.axial_pos_shape[1])
# cut to columns that are needed
position_encodings = torch.cat(
[weight[:, :required_pos_encodings_columns] for weight in broadcasted_weights], dim=-1
)
position_encodings = torch.reshape(position_encodings, (batch_size, -1, position_encodings.shape[-1]))
# select correct position encodings
position_encodings = torch.cat(
[
torch.index_select(position_encodings[i], 0, position_ids[i]).unsqueeze(0)
for i in range(batch_size)
],
dim=0,
)
return position_encodings
class PositionEmbeddings(nn.Module):
"""Constructs conventional position embeddings of shape `[max_pos_embeddings, hidden_size]`."""
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size)
def forward(self, position_ids):
position_embeddings = self.embedding(position_ids)
position_embeddings = nn.functional.dropout(position_embeddings, p=self.dropout, training=self.training)
return position_embeddings
class ReformerEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.max_position_embeddings = config.max_position_embeddings
self.dropout = config.hidden_dropout_prob
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = (
AxialPositionEmbeddings(config) if config.axial_pos_embds else PositionEmbeddings(config)
)
def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0):
if input_ids is not None:
input_shape = input_ids.size()
device = input_ids.device
else:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
seq_length = input_shape[1]
if position_ids is None:
position_ids = torch.arange(
start_idx_pos_encodings, start_idx_pos_encodings + seq_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if position_ids.shape[-1] > self.max_position_embeddings:
raise ValueError(
f"Sequence Length: {position_ids.shape[-1]} has to be less or equal than "
f"config.max_position_embeddings {self.max_position_embeddings}."
)
# dropout
embeddings = nn.functional.dropout(inputs_embeds, p=self.dropout, training=self.training)
# add positional embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
return embeddings
class EfficientAttentionMixin:
"""
A few utilities for nn.Modules in Reformer, to be used as a mixin.
"""
def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after):
"""
Used to implement attention between consecutive chunks.
Args:
vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...]
num_chunks_before: chunks before current chunk to include in attention
num_chunks_after: chunks after current chunk to include in attention
Returns:
tensor of shape [num_chunks, N * chunk_length, ...], where N = (1 + num_chunks_before + num_chunks_after).
"""
if num_chunks_before == 0 and num_chunks_after == 0:
return vectors
slices = []
for i in range(-num_chunks_before, num_chunks_after + 1):
if i == 0:
slices.append(vectors)
else:
slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2))
return torch.cat(slices, dim=3)
def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size):
"""
splits hidden_size dim into attn_head_size and num_attn_heads
"""
new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size)
x = x.view(*new_x_shape)
return x.transpose(2, 1)
def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size):
"""
merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
x = x.permute(0, 2, 1, 3)
return torch.reshape(x, (x.size()[0], -1, num_attn_heads * attn_head_size))
def _split_seq_length_dim_to(self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None):
"""
splits sequence length dim of vectors into `dim_factor_1` and `dim_factor_2` dims
"""
batch_size = vectors.shape[0]
split_dim_shape = (batch_size, num_attn_heads, dim_factor_1, dim_factor_2)
if len(vectors.shape) == 4:
return torch.reshape(vectors, split_dim_shape + (attn_head_size,))
elif len(vectors.shape) == 3:
return torch.reshape(vectors, split_dim_shape)
else:
raise ValueError(f"Input vector rank should be one of [3, 4], but is: {len(vectors.shape)}")
class LSHSelfAttention(nn.Module, EfficientAttentionMixin):
def __init__(self, config):
super().__init__()
self.config = config
self.chunk_length = config.lsh_attn_chunk_length
self.num_hashes = config.num_hashes
self.num_buckets = config.num_buckets
self.num_chunks_before = config.lsh_num_chunks_before
self.num_chunks_after = config.lsh_num_chunks_after
self.hash_seed = config.hash_seed
self.is_decoder = config.is_decoder
self.max_position_embeddings = config.max_position_embeddings
self.dropout = config.lsh_attention_probs_dropout_prob
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = config.attention_head_size
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.hidden_size = config.hidden_size
# projection matrices
self.query_key = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
# save mask value here. Need fp32 and fp16 mask values
self.register_buffer("self_mask_value_float16", torch.tensor(-1e3))
self.register_buffer("self_mask_value_float32", torch.tensor(-1e5))
self.register_buffer("mask_value_float16", torch.tensor(-1e4))
self.register_buffer("mask_value_float32", torch.tensor(-1e9))
def _split_hidden_size_and_add_rotary(self, x, num_attn_heads, attn_head_size, seq_len):
"""
splits hidden_size dim into attn_head_size and num_attn_heads
"""
new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size)
x = x.view(*new_x_shape)
# Applies rotary embeddings
sincos = fixed_pos_embedding(x, 1, seq_len=seq_len)
x = apply_rotary_pos_emb(x, sincos, offset=0)
return x.transpose(2, 1)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
buckets=None,
past_buckets_states=None,
use_cache=False,
output_attentions=False,
**kwargs,
):
sequence_length = hidden_states.shape[1]
batch_size = hidden_states.shape[0]
# num hashes can optionally be overwritten by user
num_hashes = num_hashes if num_hashes is not None else self.num_hashes
do_cached_attention = use_cache and past_buckets_states[1] is not None
# check if cache shall be used and that hidden states are already cached
if do_cached_attention:
assert sequence_length == 1, (
"At the moment, auto-regressive language generation is only possible one word at a time. Make sure"
f" that input sequence length {sequence_length} equals 1, when `past_buckets_states` is passed."
)
past_buckets = past_buckets_states[0]
past_states = past_buckets_states[1]
# get query vector
query_vectors = self.query_key(hidden_states)
query_vectors = self._split_hidden_size_and_add_rotary(
query_vectors, self.num_attention_heads, self.attention_head_size, seq_len=sequence_length
)
if past_buckets is not None:
key_value_hidden_states, sorted_bucket_idx, buckets = self._get_relevant_hid_states_and_buckets(
query_vectors=query_vectors,
attention_mask=attention_mask,
num_hashes=num_hashes,
hidden_states=hidden_states,
past_states=past_states,
past_buckets=past_buckets,
)
query_key_vectors = self._query_per_attn_head(key_value_hidden_states)
value_vectors = self._value_per_attn_head(key_value_hidden_states)
# split key & value vectors by num hashes to apply
# self attention on each separately
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors,
num_hashes,
-1,
self.num_attention_heads,
self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors,
num_hashes,
-1,
self.num_attention_heads,
self.attention_head_size,
)
# repeat query vectors across hash dimension
query_vectors = query_vectors.unsqueeze(2).repeat(1, 1, num_hashes, 1, 1)
else:
key_value_hidden_states = torch.cat([past_states, hidden_states], dim=1)
query_key_vectors = self.query_key(key_value_hidden_states)
value_vectors = self.value(key_value_hidden_states)
else:
# project hidden_states to query_key and value
query_vectors = None
query_key_vectors = self.query_key(hidden_states)
value_vectors = self.value(hidden_states)
# if query key is not already split
if not do_cached_attention or past_buckets is None:
query_key_vectors = self._split_hidden_size_and_add_rotary(
query_key_vectors, self.num_attention_heads, self.attention_head_size, seq_len=sequence_length
)
value_vectors = self._split_hidden_size_dim(
value_vectors, self.num_attention_heads, self.attention_head_size
)
# cache buckets for next incremental decoding
if do_cached_attention and past_buckets is None and key_value_hidden_states.shape[1] >= self.chunk_length:
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
# free memory
del hidden_states
assert (
query_key_vectors.shape[-1] == self.attention_head_size
), f"last dim of query_key_vectors is {query_key_vectors.shape[-1]} but should be {self.attention_head_size}."
assert (
value_vectors.shape[-1] == self.attention_head_size
), f"last dim of value_vectors is {value_vectors.shape[-1]} but should be {self.attention_head_size}."
do_standard_self_attention = (sequence_length <= self.chunk_length) or (
use_cache and past_buckets_states[1] is not None
)
# LSH attention only makes sense if chunked attention should be performed
if not do_standard_self_attention:
# set `num_buckets` on the fly, recommended way to do it
if self.num_buckets is None:
self._set_num_buckets(sequence_length)
# use cached buckets for backprop only
if buckets is None:
# hash query key vectors into buckets
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
else:
# make sure buckets has correct shape for LSH attention
buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes * sequence_length)
assert (
int(buckets.shape[-1]) == num_hashes * sequence_length
), f"last dim of buckets is {buckets.shape[-1]}, but should be {num_hashes * sequence_length}"
sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx(
sequence_length, buckets, num_hashes
)
# make sure bucket idx is not longer then sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length
# cluster query key value vectors according to hashed buckets
query_key_vectors = self._gather_by_expansion(query_key_vectors, sorted_bucket_idx_per_hash, num_hashes)
value_vectors = self._gather_by_expansion(value_vectors, sorted_bucket_idx_per_hash, num_hashes)
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
if self.chunk_length is None:
assert self.num_chunks_before == 0 and self.num_chunks_after == 0, (
"If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and"
" `config.num_chunks_before` are set to 0."
)
elif do_cached_attention and past_buckets is not None:
# use max sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx
else:
# get sequence length indices
sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=query_key_vectors.device).repeat(
batch_size, self.num_attention_heads, 1
)
# scale key vectors
key_vectors = self._len_and_dim_norm(query_key_vectors)
# set query_vectors to query key vectors if LSH self attention
query_vectors = query_vectors if query_vectors is not None else query_key_vectors
# free memory
del query_key_vectors
# get attention probs
out_vectors, logits, attention_probs = self._attend(
query_vectors=query_vectors,
key_vectors=key_vectors,
value_vectors=value_vectors,
sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash,
attention_mask=attention_mask,
head_mask=head_mask,
do_standard_self_attention=do_standard_self_attention,
do_cached_attention=do_cached_attention,
)
# free memory
del key_vectors, value_vectors
# re-order out_vectors and logits
if not do_standard_self_attention:
# sort clusters back to correct ordering
out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx)
if not do_standard_self_attention or (do_cached_attention and past_buckets is not None):
# sum up all hash rounds
if num_hashes > 1:
out_vectors = self._split_seq_length_dim_to(
out_vectors,
num_hashes,
sequence_length,
self.num_attention_heads,
self.attention_head_size,
)
logits = self._split_seq_length_dim_to(
logits,
num_hashes,
sequence_length,
self.num_attention_heads,
self.attention_head_size,
).unsqueeze(-1)
probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True))
out_vectors = torch.sum(out_vectors * probs_vectors, dim=2)
# free memory
del probs_vectors
# free memory
del logits
assert out_vectors.shape == (
batch_size,
self.num_attention_heads,
sequence_length,
self.attention_head_size,
), (
"out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length,"
" config.attention_head_size]`."
)
out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)
if output_attentions is False:
attention_probs = ()
if buckets is not None:
buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes, -1)
return LSHSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs, buckets=buckets)
def _query_per_attn_head(self, hidden_states):
per_head_query_key = self.query_key.weight.reshape(
self.num_attention_heads, self.attention_head_size, self.hidden_size
).transpose(-2, -1)
# only relevant for inference and no bias => we can use einsum here
query_key_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_query_key)
return query_key_vectors
def _value_per_attn_head(self, hidden_states):
per_head_value = self.value.weight.reshape(
self.num_attention_heads, self.attention_head_size, self.hidden_size
).transpose(-2, -1)
# only relevant for inference and no bias => we can use einsum here
value_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_value)
return value_vectors
def _hash_vectors(self, vectors, num_hashes, attention_mask, increase_num_buckets=False):
batch_size = vectors.shape[0]
# See https://arxiv.org/pdf/1509.02897.pdf
# We sample a different random rotation for each round of hashing to
# decrease the probability of hash misses.
if isinstance(self.num_buckets, int):
assert (
self.num_buckets % 2 == 0
), f"There should be an even number of buckets, but `self.num_buckets`: {self.num_buckets}"
rotation_size = self.num_buckets
num_buckets = self.num_buckets
else:
# Factorize the hash if self.num_buckets is a list or tuple
rotation_size, num_buckets = 0, 1
for bucket_factor in self.num_buckets:
assert (
bucket_factor % 2 == 0
), f"The number of buckets should be even, but `num_bucket`: {bucket_factor}"
rotation_size = rotation_size + bucket_factor
num_buckets = num_buckets * bucket_factor
# remove gradient
vectors = vectors.detach()
if self.hash_seed is not None:
# for determinism
torch.manual_seed(self.hash_seed)
rotations_shape = (self.num_attention_heads, vectors.shape[-1], num_hashes, rotation_size // 2)
# create a random self.attention_head_size x num_hashes x num_buckets/2
random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype)
# Output dim: Batch_Size x Num_Attn_Heads x Num_Hashes x Seq_Len x Num_Buckets/2
rotated_vectors = torch.einsum("bmtd,mdhr->bmhtr", vectors, random_rotations)
if isinstance(self.num_buckets, int) or len(self.num_buckets) == 1:
rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1)
buckets = torch.argmax(rotated_vectors, dim=-1)
else:
# Get the buckets for them and combine.
buckets, cur_sum, cur_product = None, 0, 1
for bucket_factor in self.num_buckets:
rotated_vectors_factor = rotated_vectors[..., cur_sum : cur_sum + (bucket_factor // 2)]
cur_sum = cur_sum + bucket_factor // 2
rotated_vectors_factor = torch.cat([rotated_vectors_factor, -rotated_vectors_factor], dim=-1)
if buckets is None:
buckets = torch.argmax(rotated_vectors_factor, dim=-1)
else:
buckets = buckets + (cur_product * torch.argmax(rotated_vectors_factor, dim=-1))
cur_product = cur_product * bucket_factor
if attention_mask is not None and (attention_mask.sum().item() < batch_size * attention_mask.shape[-1]):
# add an extra bucket for padding tokens only
num_buckets = num_buckets + 1
# assign padding tokens extra bucket
buckets_mask = attention_mask.to(torch.uint8)[:, None, None, :].expand(buckets.shape)
buckets = torch.where(
buckets_mask, buckets, torch.tensor(num_buckets - 1, dtype=torch.long, device=buckets.device)
)
elif increase_num_buckets:
num_buckets = num_buckets + 1
# buckets is now (Batch_size x Num_Attn_Heads x Num_Hashes x Seq_Len).
# Next we add offsets so that bucket numbers from different hashing rounds don't overlap.
offsets = torch.arange(num_hashes, device=vectors.device)
offsets = (offsets * num_buckets).view((1, 1, -1, 1))
# expand to batch size and num attention heads
offsets = offsets.expand((batch_size, self.num_attention_heads) + offsets.shape[-2:])
offset_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3)
return offset_buckets
def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, sequence_length, buckets, num_hashes):
# no gradients are needed
with torch.no_grad():
# hash-based sort
sorted_bucket_idx = _stable_argsort(buckets, dim=-1)
# create simple indices to scatter to, to have undo sort
indices = (
torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device)
.view(1, 1, -1)
.expand(sorted_bucket_idx.shape)
)
# get undo sort
undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size())
undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices)
return sorted_bucket_idx, undo_sorted_bucket_idx
def _set_num_buckets(self, sequence_length):
# `num_buckets` should be set to 2 * sequence_length // chunk_length as recommended in paper
num_buckets_pow_2 = (2 * (sequence_length // self.chunk_length)).bit_length() - 1
# make sure buckets are power of 2
num_buckets = 2**num_buckets_pow_2
# factorize `num_buckets` if `num_buckets` becomes too large
num_buckets_limit = 2 * max(
int((self.max_position_embeddings // self.chunk_length) ** (0.5)),
self.chunk_length,
)
if num_buckets > num_buckets_limit:
num_buckets = [2 ** (num_buckets_pow_2 // 2), 2 ** (num_buckets_pow_2 - num_buckets_pow_2 // 2)]
logger.warning(f"config.num_buckets is not set. Setting config.num_buckets to {num_buckets}...")
# set num buckets in config to be properly saved
self.config.num_buckets = num_buckets
self.num_buckets = num_buckets
def _attend(
self,
query_vectors,
key_vectors,
value_vectors,
sorted_bucket_idx_per_hash,
attention_mask,
head_mask,
do_standard_self_attention,
do_cached_attention,
):
# look at previous and following chunks if chunked attention
if not do_standard_self_attention:
key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)
value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)
# get logits and dots
# (BS, NumAttn, NumHash x NumChunk, Chunk_L x Hidden),(BS, NumAttn, NumHash x NumChunk, Chunk_L * (Num_bef + Num_aft + 1) x Hidden) -> (BS, NumAttn, NumHash x NumChunk, Chunk_L, Chunk_L * (1 + Num_bef + Num_aft))
query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))
# free memory
del query_vectors, key_vectors
# if chunked attention split bucket idxs to query and key
if not do_standard_self_attention:
query_bucket_idx = self._split_seq_length_dim_to(
sorted_bucket_idx_per_hash, -1, self.chunk_length, self.num_attention_heads
)
key_value_bucket_idx = self._look_adjacent(query_bucket_idx, self.num_chunks_before, self.num_chunks_after)
elif do_cached_attention and query_key_dots.ndim > 4:
key_value_bucket_idx = sorted_bucket_idx_per_hash
query_bucket_idx = (
key_value_bucket_idx.new_ones(key_value_bucket_idx.shape[:-1] + (1,)) * key_value_bucket_idx.max()
)
elif do_cached_attention and query_key_dots.ndim <= 4:
query_bucket_idx = (query_key_dots.shape[-1] - 1) * torch.ones_like(query_key_dots)[:, :, :, -1]
key_value_bucket_idx = torch.arange(
query_key_dots.shape[-1], dtype=torch.long, device=query_key_dots.device
)[None, None, :].expand(query_bucket_idx.shape[:2] + (-1,))
else:
query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash
# get correct mask values depending on precision
if query_key_dots.dtype == torch.float16:
self_mask_value = self.self_mask_value_float16.half()
mask_value = self.mask_value_float16.half()
else:
self_mask_value = self.self_mask_value_float32
mask_value = self.mask_value_float32
if not do_cached_attention:
mask = self._compute_attn_mask(
query_bucket_idx,
key_value_bucket_idx,
attention_mask,
query_key_dots.shape,
do_standard_self_attention,
)
if mask is not None:
query_key_dots = torch.where(mask, query_key_dots, mask_value)
# free memory
del mask
# Self mask is ALWAYS applied.
# From the reformer paper (https://arxiv.org/pdf/2001.04451.pdf):
# " While attention to the future is not allowed, typical implementations of the
# Transformer do allow a position to attend to itself.
# Such behavior is undesirable in a shared-QK formulation because the dot-product
# of a query vector with itself will almost always be greater than the dot product of a
# query vector with a vector at another position. We therefore modify the masking
# to forbid a token from attending to itself, except in situations
# where a token has no other valid attention targets (e.g. the first token in a sequence) "
self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)).to(
query_bucket_idx.device
)
# apply self_mask
query_key_dots = torch.where(self_mask, query_key_dots, self_mask_value)
# free memory
del self_mask
logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)
# dots shape is `[batch_size, num_attn_heads, num_hashes * seq_len // chunk_length, chunk_length, chunk_length * (1 + num_chunks_before + num_chunks_after)]`
attention_probs = torch.exp(query_key_dots - logits)
# free memory
del query_key_dots
# dropout
attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# attend values
out_vectors = torch.matmul(attention_probs, value_vectors)
# free memory
del value_vectors
# merge chunk length
if out_vectors.ndim > 4:
logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1)
out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)
return out_vectors, logits, attention_probs
def _compute_attn_mask(
self, query_indices, key_indices, attention_mask, query_key_dot_shape, do_standard_self_attention
):
# attention mask for LSH
if attention_mask is not None:
# if chunked attention, the attention mask has to correspond to LSH order
attention_mask = attention_mask.to(torch.uint8)[:, None, :]
if not do_standard_self_attention:
# expand attn_mask to fit with key_value_bucket_idx shape
attention_mask = attention_mask[:, None, :]
attention_mask = attention_mask.expand(query_indices.shape[:-1] + (-1,))
# extract attention mask from LSH sorted key_indices
attention_mask = torch.gather(attention_mask, -1, key_indices)
attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dot_shape)
# Causal mask
if self.is_decoder is True:
causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device)
# add attention mask if not None
if attention_mask is not None:
attention_mask = causal_mask * attention_mask
else:
attention_mask = causal_mask
return attention_mask
def _get_relevant_hid_states_and_buckets(
self, query_vectors, attention_mask, num_hashes, hidden_states, past_states, past_buckets
):
# concat hidden states
hidden_states = torch.cat([past_states, hidden_states], dim=1)
# batch_size hidden
batch_size = hidden_states.shape[0]
sequence_length = hidden_states.shape[1]
# check if cached buckets include pad bucket
max_bucket = self.num_buckets if isinstance(self.num_buckets, int) else reduce(mul, self.num_buckets)
# if pad bucket was cached => need to increase num buckets for caching
increase_num_buckets = past_buckets.max() > num_hashes * max_bucket - 1
# retrieve query buckets
query_buckets = self._hash_vectors(
query_vectors, num_hashes, attention_mask, increase_num_buckets=increase_num_buckets
)
# concat buckets
concat_buckets = torch.cat([past_buckets, query_buckets.unsqueeze(-1)], dim=-1)
# hash-based sort
bucket_idx = _stable_argsort(concat_buckets, dim=-1)
# bucket_idx has shape: BatchSize x NumAttnHeads x NumHashes x SequenceLength
assert bucket_idx.shape == (
batch_size,
self.num_attention_heads,
num_hashes,
sequence_length,
), (
f"bucket_idx should have shape {(batch_size, self.num_attention_heads, num_hashes, sequence_length)}, but"
f" has shape {bucket_idx.shape}."
)
# find indices of new bucket indices
relevant_bucket_idx = (bucket_idx == (bucket_idx.shape[-1] - 1)).nonzero()
# expand relevant bucket indices to its chunks
relevant_bucket_idx_chunk = self._expand_to_indices_in_relevant_chunk(relevant_bucket_idx, sequence_length)
relevant_bucket_idx_chunk = bucket_idx[tuple(relevant_bucket_idx_chunk.transpose(0, 1))]
# adapt bucket_idx for batch and hidden states for index select
bucket_idx_batch_offset = sequence_length * (
batch_size
* torch.arange(relevant_bucket_idx_chunk.shape[-1], device=hidden_states.device, dtype=torch.long)
// relevant_bucket_idx_chunk.shape[-1]
)
# add batch offset
relevant_bucket_idx_chunk_all_batch = relevant_bucket_idx_chunk + bucket_idx_batch_offset
hidden_states = hidden_states.reshape((-1, self.hidden_size))
# select all relevant hidden states
relevant_hidden_states = hidden_states.index_select(0, relevant_bucket_idx_chunk_all_batch)
# reshape hidden states and bucket_idx to correct output
relevant_hidden_states = relevant_hidden_states.reshape(
batch_size, self.num_attention_heads, -1, self.hidden_size
)
relevant_bucket_idx_chunk = relevant_bucket_idx_chunk.reshape(
batch_size, self.num_attention_heads, num_hashes, -1
)
assert (
relevant_hidden_states.shape[2]
== (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes
), (
"There should be"
f" {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes} `hidden_states`,"
f" there are {relevant_hidden_states.shape[2]} `hidden_states`."
)
assert (
relevant_bucket_idx_chunk.shape[-1]
== (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length
), (
"There should be"
f" {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length} `hidden_states`, there are"
f" {relevant_bucket_idx_chunk.shape[-1]} `bucket_idx`."
)
return relevant_hidden_states, relevant_bucket_idx_chunk, query_buckets
def _expand_to_indices_in_relevant_chunk(self, indices, sequence_length):
# get relevant indices of where chunk starts and its size
start_indices_chunk = ((indices[:, -1] // self.chunk_length) - self.num_chunks_before) * self.chunk_length
total_chunk_size = self.chunk_length * (1 + self.num_chunks_before + self.num_chunks_after)
# expand start indices and add correct chunk offset via arange
expanded_start_indices = start_indices_chunk.unsqueeze(-1).expand(indices.shape[0], total_chunk_size)
chunk_sequence_indices = expanded_start_indices + torch.arange(
total_chunk_size, device=indices.device, dtype=torch.long
).unsqueeze(0).expand(indices.shape[0], total_chunk_size)
# make sure that circular logic holds via % seq len
chunk_sequence_indices = chunk_sequence_indices.flatten() % sequence_length
# expand indices and set indices correctly
indices = indices.unsqueeze(1).expand((indices.shape[0], total_chunk_size, -1)).flatten(0, 1).clone()
indices[:, -1] = chunk_sequence_indices
return indices
def _len_and_dim_norm(self, vectors):
"""
length and attention head size dim normalization
"""
vectors = self._len_norm(vectors)
vectors = vectors * torch.rsqrt(
torch.tensor(self.attention_head_size, device=vectors.device, dtype=vectors.dtype)
)
return vectors
def _len_norm(self, x, epsilon=1e-6):
"""
length normalization
"""
variance = torch.mean(x**2, -1, keepdim=True)
norm_x = x * torch.rsqrt(variance + epsilon)
return norm_x
def _gather_by_expansion(self, vectors, idxs, num_hashes):
"""
expand dims of idxs and vectors for all hashes and gather
"""
expanded_idxs = idxs.unsqueeze(-1).expand(-1, -1, -1, self.attention_head_size)
vectors = vectors.repeat(1, 1, num_hashes, 1)
return torch.gather(vectors, 2, expanded_idxs)
class ReverseSort(Function):
"""
After chunked attention is applied which sorted clusters, original ordering has to be restored. Since customized
backward function is used for Reformer, the gradients of the output vectors have to be explicitly sorted here.
"""
@staticmethod
def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx):
# save sorted_bucket_idx for backprop
with torch.no_grad():
ctx.sorted_bucket_idx = sorted_bucket_idx
# undo sort to have correct order for next layer
expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(out_vectors.shape)
out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices)
logits = torch.gather(logits, 2, undo_sorted_bucket_idx)
return out_vectors, logits
@staticmethod
def backward(ctx, grad_out_vectors, grad_logits):
# get parameters saved in ctx
sorted_bucket_idx = ctx.sorted_bucket_idx
expanded_sort_indices = sorted_bucket_idx.unsqueeze(-1).expand(grad_out_vectors.shape)
# reverse sort of forward
grad_out_vectors = torch.gather(grad_out_vectors, 2, expanded_sort_indices)
grad_logits = torch.gather(grad_logits, 2, sorted_bucket_idx)
# return grad and `None` fillers for last 2 forward args
return grad_out_vectors, grad_logits, None, None
class ReformerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
all_head_size = config.num_attention_heads * config.attention_head_size
self.dropout = config.hidden_dropout_prob
self.dense = nn.Linear(all_head_size, config.hidden_size, bias=False)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
class ReformerAttention(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.layer_id = layer_id
self.attn_layers = config.attn_layers
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "lsh":
self.self_attention = LSHSelfAttention(config)
else:
raise NotImplementedError
self.output = ReformerSelfOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
past_buckets_states=None,
use_cache=False,
orig_sequence_length=None,
output_attentions=False,
buckets=None,
):
hidden_states = self.layer_norm(hidden_states)
# make sure cached hidden states is set to None for backward pass
if past_buckets_states is not None:
past_buckets_states_layer = past_buckets_states[self.layer_id]
else:
past_buckets_states_layer = None
# use cached buckets for backprob if buckets not None for LSHSelfAttention
self_attention_outputs = self.self_attention(
hidden_states=hidden_states,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states_layer,
use_cache=use_cache,
output_attentions=output_attentions,
buckets=buckets,
)
# add buckets if necessary
if hasattr(self_attention_outputs, "buckets"):
buckets = self_attention_outputs.buckets
else:
buckets = None
# cache hidden states for future use
if use_cache:
if past_buckets_states[self.layer_id][0] is None:
# padded input should not be cached
past_buckets = (
buckets[:, :, :, :orig_sequence_length]
if (buckets is not None and orig_sequence_length > 1)
else buckets
)
else:
past_buckets = torch.cat([past_buckets_states[self.layer_id][0], buckets], dim=-1)
if past_buckets_states[self.layer_id][1] is None:
# padded input should not be cached
past_states = hidden_states[:, :orig_sequence_length]
else:
past_states = torch.cat([past_buckets_states[self.layer_id][1], hidden_states], dim=1)
past_buckets_states[self.layer_id] = (past_buckets, past_states)
# compute attention feed forward output
attention_output = self.output(self_attention_outputs.hidden_states)
return AttentionOutput(
hidden_states=attention_output,
attention_probs=self_attention_outputs.attention_probs,
buckets=buckets,
)
class ReformerFeedForwardDense(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
if isinstance(config.hidden_act, str):
self.act_fn = ACT2FN[config.hidden_act]
else:
self.act_fn = config.hidden_act
self.dense = nn.Linear(config.hidden_size, config.feed_forward_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = self.act_fn(hidden_states)
return hidden_states
class ReformerFeedForwardOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.dense = nn.Linear(config.feed_forward_size, config.hidden_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
class ChunkReformerFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense = ReformerFeedForwardDense(config)
self.output = ReformerFeedForwardOutput(config)
def forward(self, attention_output):
return apply_chunking_to_forward(
self.forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
def forward_chunk(self, hidden_states):
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dense(hidden_states)
return self.output(hidden_states)
class ReformerLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.attention = ReformerAttention(config, layer_id)
# dropout requires to have the same
# seed for forward and backward pass
self.attention_seed = None
self.feed_forward_seed = None
self.feed_forward = ChunkReformerFeedForward(config)
def _init_attention_seed(self):
"""
This function sets a new seed for the attention layer to make dropout deterministic for both forward calls: 1
normal forward call and 1 forward call in backward to recalculate activations.
"""
# randomize seeds
# use cuda generator if available
if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0:
# GPU
device_idx = torch.cuda.current_device()
self.attention_seed = torch.cuda.default_generators[device_idx].seed()
else:
# CPU
self.attention_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(self.attention_seed)
def _init_feed_forward_seed(self):
"""
This function sets a new seed for the feed forward layer to make dropout deterministic for both forward calls:
1 normal forward call and 1 forward call in backward to recalculate activations.
"""
# randomize seeds
# use cuda generator if available
if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0:
# GPU
device_idx = torch.cuda.current_device()
self.feed_forward_seed = torch.cuda.default_generators[device_idx].seed()
else:
# CPU
self.feed_forward_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(self.feed_forward_seed)
def forward(
self,
prev_attn_output,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
past_buckets_states=None,
use_cache=False,
orig_sequence_length=None,
output_attentions=False,
):
with torch.no_grad():
# every forward pass we sample a different seed
# for dropout and save for forward fn in backward pass
# to have correct dropout
if self.training:
self._init_attention_seed()
attn_outputs = self.attention(
hidden_states=hidden_states,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
orig_sequence_length=orig_sequence_length,
output_attentions=output_attentions,
)
attn_output = attn_outputs.hidden_states
# Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0)
# Y_1 = X_1 + f(X_2)
attn_output = prev_attn_output + attn_output
# free memory
del prev_attn_output
# every forward pass we sample a different seed
# for dropout and save seed for forward fn in backward
# to have correct dropout
if self.training:
self._init_feed_forward_seed()
# Y_2 = X_2 + g(Y_1)
hidden_states = hidden_states + self.feed_forward(attn_output)
return ReformerOutput(
attn_output=attn_output,
hidden_states=hidden_states,
attention_probs=attn_outputs.attention_probs,
buckets=attn_outputs.buckets,
)
def backward_pass(
self,
next_attn_output,
hidden_states,
grad_attn_output,
grad_hidden_states,
attention_mask=None,
head_mask=None,
buckets=None,
):
# Implements the backward pass for reversible ResNets.
# A good blog post on how this works can be found here:
# Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0)
# This code is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py
assert self.training, (
"If you want to train `ReformerModel` and its variations, make sure to use `model.train()` to put the"
" model into training mode."
)
with torch.enable_grad():
next_attn_output.requires_grad = True
# set seed to have correct dropout
torch.manual_seed(self.feed_forward_seed)
# g(Y_1)
res_hidden_states = self.feed_forward(next_attn_output)
res_hidden_states.backward(grad_hidden_states, retain_graph=True)
with torch.no_grad():
# X_2 = Y_2 - g(Y_1)
hidden_states = hidden_states - res_hidden_states
del res_hidden_states
grad_attn_output = grad_attn_output + next_attn_output.grad
next_attn_output.grad = None
with torch.enable_grad():
hidden_states.requires_grad = True
# set seed to have correct dropout
torch.manual_seed(self.attention_seed)
# f(X_2)
# use cached buckets for backprob if buckets not None for LSHSelfAttention
output = self.attention(
hidden_states=hidden_states,
head_mask=head_mask,
attention_mask=attention_mask,
buckets=buckets,
).hidden_states
output.backward(grad_attn_output, retain_graph=True)
with torch.no_grad():
# X_1 = Y_1 - f(X_2)
attn_output = next_attn_output - output
del output, next_attn_output
grad_hidden_states = grad_hidden_states + hidden_states.grad
hidden_states.grad = None
hidden_states = hidden_states.detach()
return ReformerBackwardOutput(
attn_output=attn_output,
hidden_states=hidden_states,
grad_attn_output=grad_attn_output,
grad_hidden_states=grad_hidden_states,
)
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/lsh_utils/modeling_reformer.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/lsh_utils/modeling_reformer.py",
"repo_id": "archai",
"token_count": 26144
}
| 345 |
from typing import Union, Tuple, Optional
import numpy as np
from archai.discrete_search.search_spaces.config import (
ArchParamTree, repeat_config, ConfigSearchSpace,
DiscreteChoice
)
from .model import LanguageModel
from .ops import OPS
from .utils import get_attn_head_simplex
def to_tuple(x: Union[Tuple[int], int]) -> Tuple[int]:
if not isinstance(x, (tuple, list)):
return (x, )
return x
class TfppSearchSpace(ConfigSearchSpace):
def __init__(self,
backbone: str = 'codegen',
embed_dims: Union[Tuple[int], int] = (768, ),
inner_dims: Union[Tuple[int], int] = (3072, ),
total_heads: Union[Tuple[int], int] = (12,),
total_layers: Union[Tuple[int], int] = (8, 10, 12, 16, 18),
local_attn_window_sizes: Union[Tuple[int], int] = (256, ),
sgconv_kernel_sizes: Union[Tuple[int], int] = (256, ),
sconv1d_kernel_sizes: Union[Tuple[int], int] = (256, ),
lsh_attn_num_hashes: Union[Tuple[int], int] = (4, 8),
lsh_attn_bucket_size: Union[Tuple[int], int] = (64,),
op_subset: Optional[Tuple[str]] = None,
mixed_ops: bool = True,
homogeneous: bool = False,
seed: Optional[int] = None,
disable_cache: bool = True,
**hf_config_kwargs) -> None:
op_subset = {
op_name: op for op_name, op in OPS.items()
if op_name in (op_subset or list(OPS.keys())) and not op.deprecated
}
if disable_cache:
hf_config_kwargs['use_cache'] = False
if mixed_ops:
op_allocations = get_attn_head_simplex(total_heads, list(op_subset.keys()), grid_scale=2)
else:
op_allocations = [
tuple([
(op_name, float(item)) for op_name, item in zip(op_subset.keys(), alloc)
])
for alloc in np.eye(len(op_subset), dtype=np.uint).tolist()
]
to_tuple = lambda x: (x, ) if not isinstance(x, (tuple, list)) else x
arch_param_tree = ArchParamTree({
'backbone': backbone,
'hidden_size': DiscreteChoice(to_tuple(embed_dims)),
'hidden_layers': repeat_config({
'total_heads': DiscreteChoice(to_tuple(total_heads)),
'op_allocation': DiscreteChoice(op_allocations),
'd_inner': DiscreteChoice(to_tuple(inner_dims)),
'sgconv': {
'kernel_size': DiscreteChoice(to_tuple(sgconv_kernel_sizes))
},
'sgconv3': {
'kernel_size': DiscreteChoice(to_tuple(sgconv_kernel_sizes))
},
'sep_conv1d': {
'kernel_size': DiscreteChoice(to_tuple(sconv1d_kernel_sizes))
},
'local_attn': {
'window_size': DiscreteChoice(to_tuple(local_attn_window_sizes))
},
'lsh_attn': {
'num_hashes': DiscreteChoice(to_tuple(lsh_attn_num_hashes)),
'bucket_size': DiscreteChoice(to_tuple(lsh_attn_bucket_size))
}
}, repeat_times=total_layers, share_arch=homogeneous)
})
super().__init__(
LanguageModel,
arch_param_tree,
model_kwargs=(hf_config_kwargs or {}),
seed=seed,
)
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/search_space.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/search_space.py",
"repo_id": "archai",
"token_count": 1907
}
| 346 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from typing import Optional, Tuple
import torch
from archai.quantization.qat import prepare_with_qat
class MixedQAT(torch.nn.Module):
"""Mixed QAT (Quantization-Aware Training) model, which can be fine-tuned
using a linear combination of regular and QAT losses.
"""
def __init__(self, model: torch.nn.Module, qat_weight: Optional[float] = 0.2) -> None:
"""Initialize the class by creating standard and QAT-based attributes
of the incoming model.
Args:
model: Instance of the model that will be fine-tuned with Mixed QAT.
qat_weight: Amount of QAT-based loss that should be used in the linear combination.
This value should be between 0 and 1.
"""
super().__init__()
if qat_weight < 0.0 or qat_weight > 1.0:
raise ValueError(f"qat_weight: {qat_weight} should be between 0 and 1.")
self.qat_weight = qat_weight
self.regular_weight = 1.0 - qat_weight
self.model = model
self.qat_model = copy.deepcopy(model)
# Shares all parameters
for module, qat_module in zip(self.model.modules(), self.qat_model.modules()):
if hasattr(qat_module, "weight"):
qat_module.weight = module.weight
if hasattr(qat_module, "bias"):
qat_module.bias = module.bias
# Adds fake quantization
prepare_with_qat(self.qat_model, onnx_compatible=True)
for param, qat_param in zip(self.model.parameters(), self.qat_model.parameters()):
assert qat_param is param, "MixedQAT parameters are not fully shared."
def forward(
self, input_ids: torch.LongTensor, labels: torch.LongTensor, *args, **kwargs
) -> Tuple[torch.Tensor, ...]:
outputs = self.model(input_ids=input_ids, labels=labels, *args, **kwargs)
qat_outputs = self.qat_model(input_ids=input_ids, labels=labels, *args, **kwargs)
# If training, returns the linear combination of losses
if self.training:
loss = outputs.loss * self.regular_weight + qat_outputs.loss * self.qat_weight
return (loss,) + outputs[1:]
return qat_outputs
|
archai/archai/quantization/mixed_qat.py/0
|
{
"file_path": "archai/archai/quantization/mixed_qat.py",
"repo_id": "archai",
"token_count": 942
}
| 347 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from typing import List, Tuple
from overrides import overrides
from archai.common.config import Config
from archai.supergraph.algos.darts.mixed_op import MixedOp
from archai.supergraph.nas.model_desc import (
CellType,
ConvMacroParams,
EdgeDesc,
NodeDesc,
OpDesc,
TensorShape,
TensorShapes,
)
from archai.supergraph.nas.model_desc_builder import ModelDescBuilder
from archai.supergraph.nas.operations import Op
class DartsModelDescBuilder(ModelDescBuilder):
@overrides
def pre_build(self, conf_model_desc:Config)->None:
Op.register_op('mixed_op',
lambda op_desc, arch_params, affine:
MixedOp(op_desc, arch_params, affine))
@overrides
def build_nodes(self, stem_shapes:TensorShapes, conf_cell:Config,
cell_index:int, cell_type:CellType, node_count:int,
in_shape:TensorShape, out_shape:TensorShape) \
->Tuple[TensorShapes, List[NodeDesc]]:
assert in_shape[0]==out_shape[0]
reduction = (cell_type==CellType.Reduction)
nodes:List[NodeDesc] = []
conv_params = ConvMacroParams(in_shape[0], out_shape[0])
# add mixed op for each edge in each node
# how does the stride works? For all ops connected to s0 and s1, we apply
# reduction in WxH. All ops connected elsewhere automatically gets
# reduced WxH (because all subsequent states are derived from s0 and s1).
# Note that channel is increased via conv_params for the cell
for i in range(node_count):
edges=[]
for j in range(i+2):
op_desc = OpDesc('mixed_op',
params={
'conv': conv_params,
'stride': 2 if reduction and j < 2 else 1
}, in_len=1, trainables=None, children=None)
edge = EdgeDesc(op_desc, input_ids=[j])
edges.append(edge)
nodes.append(NodeDesc(edges=edges, conv_params=conv_params))
out_shapes = [copy.deepcopy(out_shape) for _ in range(node_count)]
return out_shapes, nodes
|
archai/archai/supergraph/algos/darts/darts_model_desc_builder.py/0
|
{
"file_path": "archai/archai/supergraph/algos/darts/darts_model_desc_builder.py",
"repo_id": "archai",
"token_count": 1076
}
| 348 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional
import torch
import torch.nn.functional as F
from overrides import overrides
from torch import Tensor, nn
from torch.optim.optimizer import Optimizer
from archai.common import ml_utils
from archai.common.common import get_conf
from archai.common.config import Config
from archai.supergraph.algos.gumbelsoftmax.gs_op import GsOp
from archai.supergraph.nas.arch_trainer import ArchTrainer
from archai.supergraph.utils.checkpoint import CheckPoint
class GsArchTrainer(ArchTrainer):
def __init__(self, conf_train: Config, model: nn.Module, checkpoint: Optional[CheckPoint]) -> None:
super().__init__(conf_train, model, checkpoint)
conf = get_conf()
self._gs_num_sample = conf['nas']['search']['model_desc']['cell']['gs']['num_sample']
@overrides
def create_optimizer(self, conf_optim:Config, params) -> Optimizer:
# in this case we don't need to differentiate between arch_params and weights
# as the same optimizer will update both
arch_params = list(self.model.all_owned().param_by_kind('alphas'))
nonarch_params = list(self.model.nonarch_params(recurse=True))
# TODO: do we need different param groups? Check in paper if they are using different optimizers for alphas or not.
param_groups = [{'params': nonarch_params}, {'params': arch_params}]
return ml_utils.create_optimizer(conf_optim, param_groups)
@overrides
def pre_step(self, x:Tensor, y:Tensor)->None:
super().pre_step(x, y)
# TODO: is it a good idea to ensure model is in training mode here?
# for each node in a cell, get the alphas of each incoming edge
# concatenate them all together, sample from them via GS
# push the resulting weights to the corresponding edge ops
# for use in their respective forward
for _, cell in enumerate(self.model.cells):
for _, node in enumerate(cell.dag):
# collect all alphas for all edges in to node
node_alphas = []
for edge in node:
if hasattr(edge._op, 'PRIMITIVES') and type(edge._op) == GsOp:
node_alphas.extend(alpha for op, alpha in edge._op.ops())
# TODO: will creating a tensor from a list of tensors preserve the graph?
node_alphas = torch.Tensor(node_alphas)
if node_alphas.nelement() > 0:
# sample ops via gumbel softmax
sample_storage = []
for _ in range(self._gs_num_sample):
sampled = F.gumbel_softmax(node_alphas, tau=1, hard=False, eps=1e-10, dim=-1)
sample_storage.append(sampled)
samples_summed = torch.sum(torch.stack(sample_storage, dim=0), dim=0)
samples = samples_summed / torch.sum(samples_summed)
# TODO: should we be normalizing the sampled weights?
# TODO: do gradients blow up as number of samples increases?
# send the sampled op weights to their respective edges
# to be used in forward
counter = 0
for _, edge in enumerate(node):
if hasattr(edge._op, 'PRIMITIVES') and type(edge._op) == GsOp:
this_edge_sampled_weights = samples[counter:counter+len(edge._op.PRIMITIVES)]
edge._op.set_op_sampled_weights(this_edge_sampled_weights)
counter += len(edge._op.PRIMITIVES)
|
archai/archai/supergraph/algos/gumbelsoftmax/gs_arch_trainer.py/0
|
{
"file_path": "archai/archai/supergraph/algos/gumbelsoftmax/gs_arch_trainer.py",
"repo_id": "archai",
"token_count": 1598
}
| 349 |
import logging
from typing import List
import torch
from torch import nn
from archai.common import ml_utils
from archai.supergraph.algos.nasbench101.model import Network
from archai.supergraph.algos.nasbench101.model_spec import ModelSpec
EXAMPLE_VERTEX_OPS = ['input', 'conv1x1-bn-relu', 'conv3x3-bn-relu', 'conv3x3-bn-relu', 'conv3x3-bn-relu', 'maxpool3x3', 'output']
EXAMPLE_DESC_MATRIX = [[0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0]]
def build(desc_matrix:List[List[int]], vertex_ops:List[str], device=None,
stem_out_channels=128, num_stacks=3, num_modules_per_stack=3, num_labels=10)->nn.Module:
model_spec = ModelSpec(desc_matrix, vertex_ops)
model = Network(model_spec, stem_out_channels, num_stacks, num_modules_per_stack, num_labels)
logging.info(f'Model parameters: {ml_utils.param_size(model)}')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if device is None else device
model.to(device)
return model
|
archai/archai/supergraph/algos/nasbench101/model_builder.py/0
|
{
"file_path": "archai/archai/supergraph/algos/nasbench101/model_builder.py",
"repo_id": "archai",
"token_count": 605
}
| 350 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from overrides import overrides
from archai.supergraph.algos.random.random_model_desc_builder import (
RandomModelDescBuilder,
)
from archai.supergraph.nas.arch_trainer import TArchTrainer
from archai.supergraph.nas.exp_runner import ExperimentRunner
class RandomExperimentRunner(ExperimentRunner):
@overrides
def model_desc_builder(self)->RandomModelDescBuilder:
return RandomModelDescBuilder()
@overrides
def trainer_class(self)->TArchTrainer:
return None
|
archai/archai/supergraph/algos/random/random_exp_runner.py/0
|
{
"file_path": "archai/archai/supergraph/algos/random/random_exp_runner.py",
"repo_id": "archai",
"token_count": 182
}
| 351 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import torchvision
from overrides import overrides
from torchvision.transforms import transforms
from archai.common import utils
from archai.common.config import Config
from archai.supergraph.datasets.dataset_provider import (
DatasetProvider,
ImgSize,
TrainTestDatasets,
register_dataset_provider,
)
class AircraftProvider(DatasetProvider):
def __init__(self, conf_dataset:Config):
super().__init__(conf_dataset)
self._dataroot = utils.full_path(conf_dataset['dataroot'])
@overrides
def get_datasets(self, load_train:bool, load_test:bool,
transform_train, transform_test)->TrainTestDatasets:
trainset, testset = None, None
if load_train:
trainpath = os.path.join(self._dataroot, 'aircraft', 'train')
trainset = torchvision.datasets.ImageFolder(trainpath, transform=transform_train)
if load_test:
testpath = os.path.join(self._dataroot, 'aircraft', 'test')
testset = torchvision.datasets.ImageFolder(testpath, transform=transform_test)
return trainset, testset
@overrides
def get_transforms(self, img_size:ImgSize)->tuple:
if isinstance(img_size, int):
img_size = (img_size, img_size)
# TODO: update MEAN, STD, currently mit67 values
MEAN = [0.4893, 0.4270, 0.3625]
STD = [0.2631, 0.2565, 0.2582]
# transformations match that in
# https://github.com/antoyang/NAS-Benchmark/blob/master/DARTS/preproc.py
train_transf = [
transforms.RandomResizedCrop(img_size, scale=(0.75, 1)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2)
]
margin_size = (int(img_size[0] + img_size[0]*0.1), int(img_size[1] + img_size[1]*0.1))
test_transf = [transforms.Resize(margin_size), transforms.CenterCrop(img_size)]
#test_transf = [transforms.Resize(img_size)]
normalize = [
transforms.ToTensor(),
transforms.Normalize(MEAN, STD)
]
train_transform = transforms.Compose(train_transf + normalize)
test_transform = transforms.Compose(test_transf + normalize)
return train_transform, test_transform
register_dataset_provider('aircraft', AircraftProvider)
|
archai/archai/supergraph/datasets/providers/aircraft_provider.py/0
|
{
"file_path": "archai/archai/supergraph/datasets/providers/aircraft_provider.py",
"repo_id": "archai",
"token_count": 1104
}
| 352 |
import os
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1,
bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1,
bias=False)),
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate,
training=self.training)
return torch.cat([x, new_features], 1)
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate,
bn_size, drop_rate)
self.add_module('denselayer%d' % (i + 1), layer)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=10):
super(DenseNet, self).__init__()
# First convolution
# CIFAR-10: kernel_size 7 ->3, stride 2->1, padding 3->1
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features, kernel_size=3, stride=1,
padding=1, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
## END
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate,
drop_rate=drop_rate)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features,
num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1)
out = self.classifier(out)
return out
def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress, device, **kwargs):
model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
if pretrained:
script_dir = os.path.dirname(__file__)
state_dict = torch.load(script_dir + '/state_dicts/'+arch+'.pt', map_location=device)
model.load_state_dict(state_dict)
return model
def densenet121(pretrained=False, progress=True, device='cpu', **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress, device,
**kwargs)
def densenet161(pretrained=False, progress=True, device='cpu', **kwargs):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress, device,
**kwargs)
def densenet169(pretrained=False, progress=True, device='cpu', **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress, device,
**kwargs)
def densenet201(pretrained=False, progress=True, device='cpu', **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress, device,
**kwargs)
|
archai/archai/supergraph/models/densenet.py/0
|
{
"file_path": "archai/archai/supergraph/models/densenet.py",
"repo_id": "archai",
"token_count": 3535
}
| 353 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from abc import ABC
from typing import Iterable, Iterator, Optional, Tuple
from overrides import EnforceOverrides
from torch import nn
from archai.supergraph.nas.arch_params import ArchParams, NNTypes
class ArchModule(nn.Module, ABC, EnforceOverrides):
"""ArchModule enahnces nn.Module by making a clear separation between regular
weights and the architecture weights. The architecture parameters can be added
using `create_arch_params()` method and then accessed using `arch_params()` method."""
def __init__(self) -> None:
super().__init__()
# these are params module should use, they may be shared or created by this module
self._arch_params = ArchParams.empty()
# these are the params created and registerd in this module
self._owned_arch_params:Optional[ArchParams] = None
def create_arch_params(self, named_params:Iterable[Tuple[str, NNTypes]])->None:
if len(self._arch_params):
raise RuntimeError('Arch parameters for this module already exist')
self._owned_arch_params = ArchParams(named_params, registrar=self)
self.set_arch_params(self._owned_arch_params)
def set_arch_params(self, arch_params:ArchParams)->None:
if len(self._arch_params):
raise RuntimeError('Arch parameters for this module already exist')
self._arch_params = arch_params
def arch_params(self, recurse=False, only_owned=False)->ArchParams:
# note that we will cache lists on first calls, this doesn't allow
# dynamic parameters but it makes this frequent calls much faster
if not recurse:
if not only_owned:
return self._arch_params
else:
return ArchParams.from_module(self, recurse=False)
else:
if not only_owned:
raise NotImplementedError('Recursively getting shared and owned arch params not implemented yet')
else:
return ArchParams.from_module(self, recurse=True)
def all_owned(self)->ArchParams:
return self.arch_params(recurse=True, only_owned=True)
def nonarch_params(self, recurse:bool)->Iterator[nn.Parameter]:
return ArchParams.nonarch_from_module(self, recurse)
|
archai/archai/supergraph/nas/arch_module.py/0
|
{
"file_path": "archai/archai/supergraph/nas/arch_module.py",
"repo_id": "archai",
"token_count": 854
}
| 354 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional, Tuple
from graphviz import Digraph
from archai.common.ordered_dict_logger import get_global_logger
from archai.common.utils import first_or_default
from archai.supergraph.nas.model_desc import CellDesc, CellType, ModelDesc
logger = get_global_logger()
def draw_model_desc(model_desc:ModelDesc, filepath:str=None, caption:str=None)\
->Tuple[Optional[Digraph],Optional[Digraph]]:
normal_cell_desc = first_or_default((c for c in model_desc.cell_descs() \
if c.cell_type == CellType.Regular), None)
reduced_cell_desc = first_or_default((c for c in model_desc.cell_descs() \
if c.cell_type == CellType.Reduction), None)
g_normal = draw_cell_desc(normal_cell_desc,
filepath+'-normal' if filepath else None,
caption) if normal_cell_desc is not None else None
g_reduct = draw_cell_desc(reduced_cell_desc,
filepath+'-reduced' if filepath else None,
caption) if reduced_cell_desc is not None else None
return g_normal, g_reduct
def draw_cell_desc(cell_desc:CellDesc, filepath:str=None, caption:str=None
)->Digraph:
""" make DAG plot and optionally save to filepath as .png """
edge_attr = {
'fontsize': '20',
'fontname': 'times'
}
node_attr = {
'style': 'filled',
'shape': 'rect',
'align': 'center',
'fontsize': '20',
'height': '0.5',
'width': '0.5',
'penwidth': '2',
'fontname': 'times'
}
g = Digraph(
format='png',
edge_attr=edge_attr,
node_attr=node_attr,
engine='dot')
g.body.extend(['rankdir=LR'])
# input nodes
# TODO: remove only two input node as assumption
g.node("c_{k-2}", fillcolor='darkseagreen2')
g.node("c_{k-1}", fillcolor='darkseagreen2')
# intermediate nodes
n_nodes = len(cell_desc.nodes())
for i in range(n_nodes):
g.node(str(i), fillcolor='lightblue')
for i, node in enumerate(cell_desc.nodes()):
for edge in node.edges:
op, js = edge.op_desc.name, edge.input_ids
for j in js:
if j == 0:
u = "c_{k-2}"
elif j == 1:
u = "c_{k-1}"
else:
u = str(j-2)
v = str(i)
g.edge(u, v, label=op, fillcolor="gray")
# output node
g.node("c_{k}", fillcolor='palegoldenrod')
for i in range(n_nodes):
g.edge(str(i), "c_{k}", fillcolor="gray")
# add image caption
if caption:
g.attr(label=caption, overlap='false', fontsize='20', fontname='times')
if filepath:
g.render(filepath, view=False)
logger.info(f'plot_filename: {filepath}')
return g
|
archai/archai/supergraph/nas/vis_model_desc.py/0
|
{
"file_path": "archai/archai/supergraph/nas/vis_model_desc.py",
"repo_id": "archai",
"token_count": 1381
}
| 355 |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0.
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py
#
# Copyright (c) 2019 cybertronai.
# Licensed under the MIT license.
from typing import Iterable, Optional, Tuple
import torch
from torch.optim import Optimizer
class Lamb(Optimizer):
"""Lamb algorithm for large batch optimization.
It has been proposed in `Large Batch Optimization for Deep Learning:
Training BERT in 76 minutes`.
Reference:
https://arxiv.org/abs/1904.00962
"""
def __init__(
self,
params: Iterable,
lr: Optional[float] = 1e-3,
betas: Optional[Tuple[float, float]] = (0.9, 0.999),
eps: Optional[float] = 1e-6,
weight_decay: Optional[float] = 0.0,
adam: Optional[bool] = False,
) -> None:
"""Initialize the optimizer.
Args:
params: An iterable of parameters to optimize.
lr: The learning rate.
betas: Coefficients used for computing running averages.
eps: Term added to the denominator to improve numerical stability.
weight_decay: Weight decay.
adam: Whether to turn current optimizer into Adam.
Raises:
ValueError: If the learning rate, epsilon value, or beta parameters are invalid.
"""
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.adam = adam
super().__init__(params, defaults)
def step(self, closure: Optional[callable] = None) -> torch.FloatTensor:
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Lamb does not support sparse gradients.")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# Paper v3 does not use debiasing.
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# Apply bias to lr to avoid broadcast
# * math.sqrt(bias_correction2) / bias_correction1
step_size = group["lr"]
weight_norm = p.data.norm(p=2).clamp_(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group["eps"])
if group["weight_decay"] != 0:
adam_step.add_(p.data, alpha=group["weight_decay"])
adam_norm = adam_step.norm(p=2)
if weight_norm == 0.0 or adam_norm == 0.0:
trust_ratio = 1
else:
trust_ratio = weight_norm / (adam_norm + group["eps"])
state["weight_norm"] = weight_norm
state["adam_norm"] = adam_norm
state["trust_ratio"] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(adam_step, alpha=-step_size * trust_ratio)
return loss
@torch.jit.script
def _lamb_kernel(
param: torch.Tensor,
grad: torch.Tensor,
exp_avg: torch.Tensor,
exp_avg_sq: torch.Tensor,
beta1: float,
beta2: float,
step_size: float,
eps: float,
weight_decay: float,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
exp_avg = exp_avg * beta1 + (1 - beta1) * grad
exp_avg_sq = exp_avg_sq * beta2 + (1 - beta2) * (grad * grad)
adam_step = exp_avg / (exp_avg_sq.sqrt() + eps)
adam_step = adam_step + weight_decay * param
weight_norm = param.norm(p=2).clamp(0, 10)
adam_norm = adam_step.norm(p=2)
trust_ratio = weight_norm / (adam_norm + eps)
trust_ratio = (weight_norm == 0.0) * 1.0 + (weight_norm != 0.0) * trust_ratio
trust_ratio = (adam_norm == 0.0) * 1.0 + (adam_norm != 0.0) * trust_ratio
trust_ratio = trust_ratio.float()
param = param - step_size * trust_ratio * adam_step
return param, exp_avg, exp_avg_sq
class JITLamb(Optimizer):
"""JIT-based version of the Lamb algorithm for large batch optimization.
It has been proposed in `Large Batch Optimization for Deep Learning:
Training BERT in 76 minutes`.
Reference:
https://arxiv.org/abs/1904.00962
"""
def __init__(
self,
params: Iterable,
lr: Optional[float] = 1e-3,
betas: Optional[Tuple[float, float]] = (0.9, 0.999),
eps: Optional[float] = 1e-6,
weight_decay: Optional[float] = 0.0,
adam: Optional[bool] = False,
) -> None:
"""Initialize the optimizer.
Args:
params: An iterable of parameters to optimize.
lr: The learning rate.
betas: Coefficients used for computing running averages.
eps: Term added to the denominator to improve numerical stability.
weight_decay: Weight decay.
adam: Whether to turn current optimizer into Adam.
Raises:
ValueError: If the learning rate, epsilon value, or beta parameters are invalid.
"""
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.adam = adam
super().__init__(params, defaults)
def step(self, closure: Optional[callable] = None) -> torch.FloatTensor:
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("JITLamb does not support sparse gradients.")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
step_size = group["lr"]
param, exp_avg, exp_avg_sq = _lamb_kernel(
p.data,
grad,
exp_avg,
exp_avg_sq,
beta1,
beta2,
step_size,
group["eps"],
group["weight_decay"],
)
state["exp_avg"] = exp_avg
state["exp_avg_sq"] = exp_avg_sq
p.data = param
return loss
|
archai/archai/trainers/lamb_optimizer.py/0
|
{
"file_path": "archai/archai/trainers/lamb_optimizer.py",
"repo_id": "archai",
"token_count": 4282
}
| 356 |
__include__: "darts.yaml" # just use darts defaults
nas:
eval:
loader:
train_batch: 96
search:
loader:
val_ratio: 0.0 # don't need val during search in gs
trainer:
epochs: 1
model_desc:
max_final_edges: 1
cell:
gs:
num_sample: 1
|
archai/confs/algos/gs.yaml/0
|
{
"file_path": "archai/confs/algos/gs.yaml",
"repo_id": "archai",
"token_count": 147
}
| 357 |
autoaug:
model:
type: pyramid
depth: 272
alpha: 200
bottleneck: True
loader:
aug: fa_reduced_cifar10
cutout: 16
batch: 64
epochs: 1800
lr_schedule:
type: 'cosine'
optimizer:
type: sgd
lr: 0.05
nesterov: True
decay: 0.00005
|
archai/confs/aug/pyramid272_cifar10_b64.yaml/0
|
{
"file_path": "archai/confs/aug/pyramid272_cifar10_b64.yaml",
"repo_id": "archai",
"token_count": 138
}
| 358 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import json
from glob import glob
from IPython.display import display, Image
from shutil import copyfile, rmtree
from archai.common.store import ArchaiStore
def get_results(store : ArchaiStore, blob_path, output_folder):
""" Fetch the pareto fully trained models and show the results """
os.makedirs(blob_path, exist_ok=True)
store.download(blob_path, output_folder)
def download_models(store : ArchaiStore, blob_folder, output_folder, models):
""" Download the .onnx models from our blob store """
for id in models:
sub_folder = os.path.join(output_folder, id)
if os.path.isdir(sub_folder):
rmtree(sub_folder)
os.makedirs(sub_folder, exist_ok=True)
print(f'Downloading results for model: {id}')
store.download(os.path.join(blob_folder, id), sub_folder)
def show_results(output_folder):
""" Disable .png images in our Jupyter notebook """
for name in os.listdir(output_folder):
if name.endswith(".png"):
display(Image(filename=os.path.join(output_folder, name)))
def download_best_models(store : ArchaiStore, blob_folder, output_folder):
""" Download the models listed in a results.json file """
results_file = os.path.join(output_folder, "results.json")
if os.path.isfile(results_file):
best_models = json.load(open(results_file, "r"))
for model in best_models:
print(f"{model}\t{model['archid']}\t{model['val_acc']}")
download_models(store, blob_folder, output_folder, list(best_models.keys()))
def copy_code_folder():
""" Copies the code folder into a separate folder. This is needed otherwise the pipeline will fail with
UserError: The code snapshot was modified in blob storage, which could indicate tampering.
If this was unintended, you can create a new snapshot for the run. To do so, edit any
content in the local source directory and resubmit the run.
"""
scripts_dir = os.path.dirname(os.path.abspath(__file__))
code_dir = 'temp_code'
if os.path.isdir(code_dir):
rmtree(code_dir) # make sure old files are gone!
os.makedirs(code_dir)
for path in glob(os.path.join(scripts_dir, '*.py')):
file = os.path.basename(path)
print(f"copying source file : {file} to {code_dir}")
copyfile(path, os.path.join(code_dir, file))
return code_dir
|
archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/scripts/utils.py/0
|
{
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/scripts/utils.py",
"repo_id": "archai",
"token_count": 906
}
| 359 |
Computer Vision
===============
.. toctree::
:maxdepth: 2
Dataset Provider <cv/cv_dataset_provider.ipynb>
PyTorch-Lightining Trainer <cv/pl_trainer.ipynb>
|
archai/docs/getting_started/notebooks/cv.rst/0
|
{
"file_path": "archai/docs/getting_started/notebooks/cv.rst",
"repo_id": "archai",
"token_count": 66
}
| 360 |
<jupyter_start><jupyter_text>Creating NLP-based DataIn this notebook, we will use a dataset provider-based abstraction that interfaces with Hugging Face's `datasets`. Such a library provides access to a large number of NLP-based datasets, including text classification, question-answering, and language modeling, among others. Loading the DataThe first step is to create an instance of the `HfHubDatasetProvider`, which offers pre-loads the dataset and offers three methods to retrieve them: `get_train_dataset()`, `get_val_dataset()` and `get_test_dataset()`.Additionally, a set of additional arguments can be passed to its constructor according to the user's needs:* `dataset_config_name`: Name of the dataset configuration.* `data_dir`: Path to the data directory.* `data_files`: Path(s) to the data file(s).* `cache_dir`: Path to the read/write cache directory.* `revision`: Version of the dataset to load.<jupyter_code>from archai.datasets.nlp.hf_dataset_provider import HfHubDatasetProvider
dataset_provider = HfHubDatasetProvider("glue", dataset_config_name="sst2")
# When loading `train_dataset`, we will override the split argument to only load 1%
# of the data and speed up its encoding
train_dataset = dataset_provider.get_train_dataset(split="train[:1%]")
val_dataset = dataset_provider.get_val_dataset()
print(train_dataset, val_dataset)<jupyter_output>Found cached dataset glue (C:/Users/gderosa/.cache/huggingface/datasets/glue/sst2/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)
Found cached dataset glue (C:/Users/gderosa/.cache/huggingface/datasets/glue/sst2/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)<jupyter_text>Encoding the DataAfter loading the data, one might need to encode it with a tokenizer to apply for an NLP-based task. Again, Archai's offer a set of functions that ease the process.Inside the `archai.datasets.nlp.hf_dataset_provider_utils` module, the user can find different tokenization functions, such as:* `tokenize_dataset`: Tokenize a list of examples using a specified tokenizer.* `tokenize_contiguous_dataset`: Tokenize a list of examples using a specified tokenizer and with contiguous-length batches (no truncation nor padding).* `tokenize_nsp_dataset`: Tokenize a list of examples using a specified tokenizer and with next-sentence prediction (NSP).<jupyter_code>from transformers import AutoTokenizer
from archai.datasets.nlp.hf_dataset_provider_utils import tokenize_dataset
tokenizer = AutoTokenizer.from_pretrained("gpt2")
tokenizer.pad_token = tokenizer.eos_token
encoded_train_dataset = train_dataset.map(tokenize_dataset, batched=True, fn_kwargs={"tokenizer": tokenizer, "mapping_column_name": ["sentence"]})
encoded_val_dataset = val_dataset.map(tokenize_dataset, batched=True, fn_kwargs={"tokenizer": tokenizer, "mapping_column_name": ["sentence"]})
print(encoded_train_dataset, encoded_val_dataset)<jupyter_output>Loading cached processed dataset at C:\Users\gderosa\.cache\huggingface\datasets\glue\sst2\1.0.0\dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad\cache-c989f437f7c0d7ad.arrow
Loading cached processed dataset at C:\Users\gderosa\.cache\huggingface\datasets\glue\sst2\1.0.0\dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad\cache-31197ec623723cd1.arrow
|
archai/docs/getting_started/notebooks/nlp/hf_dataset_provider.ipynb/0
|
{
"file_path": "archai/docs/getting_started/notebooks/nlp/hf_dataset_provider.ipynb",
"repo_id": "archai",
"token_count": 1163
}
| 361 |
Transforms
==========
Brightness
----------
.. automodule:: archai.datasets.cv.transforms.brightness
:members:
:undoc-members:
Custom Cutout
-------------
.. automodule:: archai.datasets.cv.transforms.custom_cutout
:members:
:undoc-members:
Lighting
--------
.. automodule:: archai.datasets.cv.transforms.lighting
:members:
:undoc-members:
|
archai/docs/reference/api/archai.datasets.cv.transforms.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.datasets.cv.transforms.rst",
"repo_id": "archai",
"token_count": 134
}
| 362 |
Natural Language Processing
===========================
.. toctree::
:maxdepth: 2
archai.discrete_search.search_spaces.nlp.tfpp
archai.discrete_search.search_spaces.nlp.transformer_flex
|
archai/docs/reference/api/archai.discrete_search.search_spaces.nlp.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.discrete_search.search_spaces.nlp.rst",
"repo_id": "archai",
"token_count": 67
}
| 363 |
Petridish
=========
Evaluater
---------
.. automodule:: archai.supergraph.algos.petridish.evaluater_petridish
:members:
:undoc-members:
Experiment Runner
-----------------
.. automodule:: archai.supergraph.algos.petridish.petridish_exp_runner
:members:
:undoc-members:
Model Description Builder
-------------------------
.. automodule:: archai.supergraph.algos.petridish.petridish_model_desc_builder
:members:
:undoc-members:
Operators
---------
.. automodule:: archai.supergraph.algos.petridish.petridish_op
:members:
:undoc-members:
Utilities
---------
.. automodule:: archai.supergraph.algos.petridish.petridish_utils
:members:
:undoc-members:
Searcher
--------
.. automodule:: archai.supergraph.algos.petridish.searcher_petridish
:members:
:undoc-members:
|
archai/docs/reference/api/archai.supergraph.algos.petridish.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.supergraph.algos.petridish.rst",
"repo_id": "archai",
"token_count": 299
}
| 364 |
Contact
=======
If you have any questions or feedback about the Archai project or the open problems in Neural Architecture Search (NAS), please feel free to contact us using the following information:
* Email: [email protected]
* Website: https://github.com/microsoft/archai/issues
We welcome any questions, feedback, or suggestions you may have and look forward to hearing from you.
Team
----
Archai has been created and maintained by `Shital Shah <https://shital.com>`_, `Debadeepta Dey <https://debadeepta.com>`_, `Gustavo de Rosa <https://www.microsoft.com/en-us/research/people/gderosa>`_, Caio Mendes, `Piero Kauffmann <https://www.microsoft.com/en-us/research/people/pkauffmann>`_, `Chris Lovett <https://lovettsoftware.com>`_, Allie Del Giorno, Mojan Javaheripi, and `Ofer Dekel <https://www.microsoft.com/en-us/research/people/oferd>`_ at Microsoft Research.
|
archai/docs/support/contact.rst/0
|
{
"file_path": "archai/docs/support/contact.rst",
"repo_id": "archai",
"token_count": 270
}
| 365 |
# Copyright (c) EleutherAI.
# Licensed under the MIT license.
# https://github.com/EleutherAI/lm-evaluation-harness/blob/master/main.py
from __future__ import annotations
from typing import Any, Optional
REQUEST_RETURN_LENGTHS = {
"generate": None,
"greedy_until": None,
"loglikelihood": 2,
"loglikelihood_rolling": None,
}
class Request:
def __init__(self, request_type: str, args: Any, index: Optional[int] = None) -> None:
if request_type not in REQUEST_RETURN_LENGTHS.keys():
raise NotImplementedError("The request type {} is not implemented!".format(request_type))
self.request_type = request_type
self.args = args
self.index = index
def __iter__(self) -> Request:
if REQUEST_RETURN_LENGTHS[self.request_type] is None:
raise IndexError("This request type does not return multiple arguments!")
for i in range(REQUEST_RETURN_LENGTHS[self.request_type]):
yield Request(self.request_type, self.args, i)
def __getitem__(self, i: int) -> Request:
if REQUEST_RETURN_LENGTHS[self.request_type] is None:
raise IndexError("This request type does not return multiple arguments!")
return Request(self.request_type, self.args, i)
def __eq__(self, other: Request) -> bool:
return self.request_type == other.request_type and self.args == other.args and self.index == other.index
def __repr__(self) -> str:
return f"Req_{self.request_type}{self.args}[{self.index}]\n"
class RequestFactory:
def __getattr__(self, attr: str) -> Request:
def fn(*args):
return Request(attr, args)
return fn
rf = RequestFactory()
|
archai/research/lm_eval_harness/lm_eval_harness/utils/request_factory.py/0
|
{
"file_path": "archai/research/lm_eval_harness/lm_eval_harness/utils/request_factory.py",
"repo_id": "archai",
"token_count": 663
}
| 366 |
set -e -o xtrace
bash dist_main.sh --full --no-search --algos darts --datasets cifar10 --nas.eval.final_desc_filename confs/darts_modelsdarts_genotype.yaml --common.apex.min_world_size 2 --nas.eval.trainer.apex.enabled True
|
archai/scripts/supergraph/dist_test.sh/0
|
{
"file_path": "archai/scripts/supergraph/dist_test.sh",
"repo_id": "archai",
"token_count": 84
}
| 367 |
import logging
import statistics
from archai.supergraph.algos.nasbench101.nasbench101_dataset import Nasbench101Dataset
def main():
logging.getLogger().setLevel(logging.DEBUG)
# create dataset
nsds = Nasbench101Dataset("~/dataroot/nasbench_ds/nasbench_full.pkl")
vars = [
statistics.variance(statistics.mean(nsds.get_test_acc(i, epochs=e)) for e in Nasbench101Dataset.VALID_EPOCHS)
for i in range(len(nsds))
]
bad_archs = list((i, v) for i, v in enumerate(vars) if v < 0.01)
print(bad_archs)
if __name__ == "__main__":
main()
|
archai/scripts/supergraph/nasbench101/bad_data.py/0
|
{
"file_path": "archai/scripts/supergraph/nasbench101/bad_data.py",
"repo_id": "archai",
"token_count": 242
}
| 368 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import pathlib
import subprocess
import sys
try:
from runstats import Statistics
except:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'runstats'])
from runstats import Statistics
def main():
parser = argparse.ArgumentParser(description="NAS E2E Runs")
parser.add_argument("--logdir", type=str, default="D:\\logdir\\azure\\random_cifar_test", help="folder with logs")
args, extra_args = parser.parse_known_args()
lines = []
top1s = []
for filepath in pathlib.Path(args.logdir).rglob("logs.log"):
epoch = 0
for line in pathlib.Path(filepath).read_text().splitlines():
if "[eval_test] Epoch: [ 1/1] " in line:
top1s.append(Statistics())
top1 = float(line.strip().split("(")[-1].split(",")[0].split("%")[0].strip()) / 100.0
lines.append(f"{epoch}\t{top1}\t{str(filepath)}")
top1s[epoch].push(top1)
epoch += 1
pathlib.Path(os.path.join(args.logdir, "summary.tsv")).write_text("\n".join(lines))
stat_lines = ["epoch\tmean\tstddev\tcount"]
for i, top1 in enumerate(top1s):
stat_lines.append(f'{i}\t{top1.mean()}\t{top1.stddev() if len(top1)>1 else float("NaN")}\t{len(top1)}')
pathlib.Path(os.path.join(args.logdir, "summary_stats.tsv")).write_text("\n".join(stat_lines))
if __name__ == "__main__":
main()
|
archai/scripts/supergraph/reports/old_logs.py/0
|
{
"file_path": "archai/scripts/supergraph/reports/old_logs.py",
"repo_id": "archai",
"token_count": 651
}
| 369 |
[flake8]
ignore = E111,E402,E722,W503,W504,F405,F403
max-line-length = 120
|
archai/tasks/face_segmentation/aml/.flake8/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/.flake8",
"repo_id": "archai",
"token_count": 37
}
| 370 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import sys
from archai.common.store import ArchaiStore
CONNECTION_NAME = 'MODEL_STORAGE_CONNECTION_STRING'
def upload(con_str, experiment_name, args):
parser = argparse.ArgumentParser(description='Upload a named model (and optional accompanying files) to your ' +
'azure blob store')
parser.add_argument('name', help='Friendly name of the folder to put this in.')
parser.add_argument('file', help='Path to the file to upload to Azure ' +
'or a folder to upload all files in that folder to the same azure blob folder.')
parser.add_argument('--priority', type=int, help='Optional priority override for this job. ' +
'Larger numbers mean lower priority')
parser.add_argument('--reset', help='Reset stats for the model if it exists already.', action="store_true")
args = parser.parse_args(args)
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name=experiment_name)
store.upload(f'{experiment_name}/args.name', args.file, args.reset, priority=args.priority)
if __name__ == '__main__':
experiment_name = os.getenv("EXPERIMENT_NAME", "facesynthetics")
con_str = os.getenv(CONNECTION_NAME)
if not con_str:
print(f"Please specify your {CONNECTION_NAME} environment variable.")
sys.exit(1)
upload(con_str, experiment_name)
|
archai/tasks/face_segmentation/aml/azure/upload.py/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/azure/upload.py",
"repo_id": "archai",
"token_count": 565
}
| 371 |
conda activate snap
pushd $SNPE_ROOT
source bin/envsetup.sh -o ~/anaconda3/envs/snap/lib/python3.6/site-packages/onnx
popd
|
archai/tasks/face_segmentation/aml/snpe/snpe_setup.sh/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/snpe/snpe_setup.sh",
"repo_id": "archai",
"token_count": 52
}
| 372 |
from typing import Optional, List, Dict
import torch
@torch.no_grad()
def get_confusion_matrix(pred_labels: torch.LongTensor,
true_labels: torch.LongTensor,
num_labels: int, ignore_index: int = 255) -> torch.LongTensor:
pred_labels, true_labels = pred_labels.view(-1), true_labels.view(-1)
ignore_mask = (true_labels == ignore_index)
pred_labels, true_labels = pred_labels[~ignore_mask], true_labels[~ignore_mask]
confusion_matrix = num_labels * true_labels + pred_labels
return torch.bincount(confusion_matrix, minlength=num_labels**2).reshape(num_labels, num_labels)
@torch.no_grad()
def get_iou(confusion_matrix: torch.LongTensor,
ignore_labels: Optional[List[int]] = None) -> Dict[str, torch.Tensor]:
ignore_labels = ignore_labels or []
ignore_labels = torch.isin(
torch.arange(len(confusion_matrix)), torch.tensor(ignore_labels)
)
diag = confusion_matrix.diag()
row_sum = confusion_matrix.sum(dim=1)
col_sum = confusion_matrix.sum(dim=0)
class_iou = (diag + 1e-7) / (row_sum + col_sum - diag + 1e-7)
return {
'class_iou': class_iou,
'mIOU': class_iou[~ignore_labels].mean(),
}
@torch.no_grad()
def get_f1_scores(confusion_matrix: torch.LongTensor,
ignore_labels: Optional[List[int]] = None) -> Dict[str, torch.Tensor]:
ignore_labels = ignore_labels or []
ignore_labels = torch.isin(
torch.arange(len(confusion_matrix)), torch.tensor(ignore_labels)
)
recall = confusion_matrix.diag() / (confusion_matrix.sum(dim=1) + 1e-7)
prec = confusion_matrix.diag() / (confusion_matrix.sum(dim=0) + 1e-7)
class_f1 = 2 * prec * recall / (prec + recall + 1e-7)
support = confusion_matrix.sum(dim=1)[~ignore_labels]
return {
'class_f1': class_f1,
'macro_f1': class_f1[~ignore_labels].mean(),
'weighted_f1': (class_f1[~ignore_labels] * support).sum() / support.sum()
}
|
archai/tasks/face_segmentation/training/metrics.py/0
|
{
"file_path": "archai/tasks/face_segmentation/training/metrics.py",
"repo_id": "archai",
"token_count": 900
}
| 373 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import csv
import subprocess
"""Train the models that are in the pareto front"""
# Please change the following variables to your own path
data_dir = "face_synthetics/dataset_100000"
output_dir = "./output"
csv_file = "search_results.csv"
# Read the search results and pick the models in the pareto front
pareto_archids = []
search_results = []
with open(csv_file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
search_results.append(row)
if row["is_pareto"] == "True":
pareto_archids.append(row["archid"])
print(f"Models to be trained: {pareto_archids}")
# Train the models with subprocess call
training_accuracy = {}
num_epochs = 100
for arch_id in pareto_archids:
print(f"Training model with arch_id: {arch_id}")
cmd = [
"torchrun",
"--nproc_per_node=4",
"train.py",
"--data-path",
data_dir,
"--output_dir",
output_dir,
"--search_result_archid",
arch_id,
"--search_result_csv",
csv_file,
"--train-crop-size",
"128",
"--epochs",
str(num_epochs),
"--batch-size",
"32",
"--lr",
"0.001",
"--opt",
"adamw",
"--lr-scheduler",
"steplr",
"--lr-step-size",
"100",
"--lr-gamma",
"0.5",
"-wd",
"0.00001",
]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
val_errors = []
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
print(output.strip())
if output.startswith("Test:"):
if "Error" in output:
error_str = output.split()[-1]
val_error = float(error_str)
val_errors.append(val_error)
result = process.poll()
assert val_errors and len(val_errors) != 0 # should have at least one error
training_accuracy[arch_id] = val_errors[-1]
# Merge training accuracy to search_results
merged_data = []
for row in search_results:
arch_id = row["archid"]
if arch_id in training_accuracy:
row["Full_Training_Validation_Error"] = training_accuracy[arch_id]
else:
row["Full_training_Validation_Error"] = ""
merged_data.append(row)
# Write to csv
fieldnames = search_results[0].keys()
with open("search_results_with_full_validation_error.csv", "w", newline="") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in merged_data:
writer.writerow(row)
|
archai/tasks/facial_landmark_detection/train_candidate_models.py/0
|
{
"file_path": "archai/tasks/facial_landmark_detection/train_candidate_models.py",
"repo_id": "archai",
"token_count": 1265
}
| 374 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from archai.common.config import Config
def test_config():
# Asserts that it can load keys from a YAML file
config_filepath = "config.yaml"
with open(config_filepath, "w") as f:
f.write("test_key: test_value")
config = Config(config_filepath=config_filepath)
assert config["test_key"] == "test_value"
os.remove(config_filepath)
|
archai/tests/common/test_config.py/0
|
{
"file_path": "archai/tests/common/test_config.py",
"repo_id": "archai",
"token_count": 159
}
| 375 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import pytest
from overrides import overrides
from archai.datasets.nlp.tokenizer_utils.token_config import SpecialTokenEnum
from archai.datasets.nlp.tokenizer_utils.tokenizer_base import TokenizerBase
@pytest.fixture
def tokenizer_base():
class Tokenizer(TokenizerBase):
def __init__(self):
self.is_trained_value = False
def __len__(self):
return 100
@overrides
def train(self, filepaths):
self.is_trained_value = True
@overrides
def is_trained(self):
return self.is_trained_value
@overrides
def load(self):
self.is_trained_value = True
@overrides
def encode_text(self, text):
return [1, 2, 3]
@overrides
def decode_text(self, ids):
return "decoded"
@overrides
def special_token_id(self, sp):
if sp == SpecialTokenEnum.BOS:
return 1
if sp == SpecialTokenEnum.EOS:
return 2
if sp == SpecialTokenEnum.UNK:
return 3
if sp == SpecialTokenEnum.PAD:
return 4
return None
@overrides
def token_to_id(self, t):
return 5
@overrides
def id_to_token(self, id):
return "token"
return Tokenizer()
def test_tokenizer_base_len(tokenizer_base):
assert len(tokenizer_base) == 100
def test_tokenizer_base_train(tokenizer_base):
tokenizer_base.train(["file1", "file2"])
assert tokenizer_base.is_trained() is True
def test_tokenizer_base_load(tokenizer_base):
tokenizer_base.load()
assert tokenizer_base.is_trained() is True
def test_tokenizer_base_encode_text(tokenizer_base):
assert tokenizer_base.encode_text("test") == [1, 2, 3]
def test_tokenizer_base_decode_text(tokenizer_base):
assert tokenizer_base.decode_text([1, 2, 3]) == "decoded"
def test_tokenizer_base_special_token_id(tokenizer_base):
assert tokenizer_base.special_token_id(SpecialTokenEnum.BOS) == 1
assert tokenizer_base.special_token_id(SpecialTokenEnum.EOS) == 2
assert tokenizer_base.special_token_id(SpecialTokenEnum.UNK) == 3
assert tokenizer_base.special_token_id(SpecialTokenEnum.PAD) == 4
assert tokenizer_base.special_token_id("invalid") is None
def test_tokenizer_base_token_to_id(tokenizer_base):
assert tokenizer_base.token_to_id("test") == 5
def test_tokenizer_base_id_to_token(tokenizer_base):
assert tokenizer_base.id_to_token(5) == "token"
|
archai/tests/datasets/nlp/tokenizer_utils/test_tokenizer_base.py/0
|
{
"file_path": "archai/tests/datasets/nlp/tokenizer_utils/test_tokenizer_base.py",
"repo_id": "archai",
"token_count": 1169
}
| 376 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from overrides import overrides
from archai.discrete_search.api.search_results import SearchResults
from archai.discrete_search.api.searcher import Searcher
class MySearcher(Searcher):
def __init__(self) -> None:
super().__init__()
@overrides
def search(self) -> SearchResults:
return SearchResults(None, None)
def test_searcher():
searcher = MySearcher()
# Assert that mocked method return a `SearchResults`
search_results = searcher.search()
assert isinstance(search_results, SearchResults)
|
archai/tests/discrete_search/api/test_searcher.py/0
|
{
"file_path": "archai/tests/discrete_search/api/test_searcher.py",
"repo_id": "archai",
"token_count": 201
}
| 377 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import pytest
import torch
from transformers import PretrainedConfig
from archai.onnx.config_utils.gpt2_onnx_config import GPT2FlexOnnxConfig, GPT2OnnxConfig
@pytest.fixture
def dummy_config_gpt2():
class DummyConfig(PretrainedConfig):
max_position_embeddings = 16
hidden_size = 32
n_layer = 3
num_attention_heads = 4
return DummyConfig()
@pytest.fixture
def dummy_config_gpt2_flex():
class DummyConfig(PretrainedConfig):
max_position_embeddings = 16
hidden_size = 32
n_layer = 3
num_attention_heads = [4, 4, 4]
return DummyConfig()
def test_gpt2_onnx_config(dummy_config_gpt2):
# Assert that default values are set correctly
gpt2_onnx_config = GPT2OnnxConfig(dummy_config_gpt2)
assert gpt2_onnx_config.num_layers == 3
assert gpt2_onnx_config.is_ort_graph_optimizable is True
assert gpt2_onnx_config.ort_graph_optimizer_args == (4, 32)
def test_gpt2_flex_onnx_config(dummy_config_gpt2_flex):
gpt2_flex_onnx_config = GPT2FlexOnnxConfig(dummy_config_gpt2_flex, use_past=True)
# Assert that default values are set correctly
assert gpt2_flex_onnx_config.num_layers == 3
assert gpt2_flex_onnx_config.is_ort_graph_optimizable is True
assert gpt2_flex_onnx_config.ort_graph_optimizer_args == (4, 32)
# Assert that dummy inputs are generated correctly
inputs = gpt2_flex_onnx_config.generate_dummy_inputs(batch_size=3, seq_len=4, past_seq_len=2)
assert torch.equal(inputs["input_ids"], torch.zeros((3, 4), dtype=torch.long))
assert torch.equal(inputs["past_key_values"][0], torch.zeros((2, 3, 4, 2, 8)))
assert torch.equal(inputs["past_key_values"][1], torch.zeros((2, 3, 4, 2, 8)))
assert torch.equal(inputs["past_key_values"][2], torch.zeros((2, 3, 4, 2, 8)))
|
archai/tests/onnx/config_utils/test_gpt2_onnx_config.py/0
|
{
"file_path": "archai/tests/onnx/config_utils/test_gpt2_onnx_config.py",
"repo_id": "archai",
"token_count": 791
}
| 378 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from archai.quantization.quantizers import FakeDynamicQuant
def test_fake_dynamic_quant():
x = torch.randn(4)
# Assert the quint8 quantization type with 8-bit
fake_quant = FakeDynamicQuant(dtype=torch.quint8, bits=8)
y = fake_quant(x)
assert y.dtype == torch.float32
assert torch.equal(
y,
torch.fake_quantize_per_tensor_affine(
x, fake_quant._scale, fake_quant._zero_pointer, fake_quant.qmin, fake_quant.qmax
),
)
# Assert the qint8 quantization type with 8-bit
fake_quant = FakeDynamicQuant(dtype=torch.qint8, bits=8)
y = fake_quant(x)
assert y.dtype == torch.float32
assert torch.equal(
y,
torch.fake_quantize_per_tensor_affine(
x, fake_quant._scale, fake_quant._zero_pointer, fake_quant.qmin, fake_quant.qmax
),
)
# Assert the quint8 quantization type with 16-bit
fake_quant = FakeDynamicQuant(dtype=torch.quint8, bits=16)
y = fake_quant(x)
assert y.dtype == torch.float32
assert torch.equal(
y,
torch.fake_quantize_per_tensor_affine(
x, fake_quant._scale, fake_quant._zero_pointer, fake_quant.qmin, fake_quant.qmax
),
)
# Assert the qint8 quantization type with 16-bit
fake_quant = FakeDynamicQuant(dtype=torch.qint8, bits=16)
y = fake_quant(x)
assert y.dtype == torch.float32
assert torch.equal(
y,
torch.fake_quantize_per_tensor_affine(
x, fake_quant._scale, fake_quant._zero_pointer, fake_quant.qmin, fake_quant.qmax
),
)
# Assert the `onnx_compatible` option for 8-bit
fake_quant = FakeDynamicQuant(dtype=torch.quint8, bits=8, onnx_compatible=True)
y = fake_quant(x)
assert y.dtype == torch.float32
assert torch.equal(
y,
torch.fake_quantize_per_tensor_affine(
x, fake_quant._scale, fake_quant._zero_pointer, fake_quant.qmin, fake_quant.qmax
),
)
fake_quant = FakeDynamicQuant(dtype=torch.qint8, bits=8, onnx_compatible=True)
y = fake_quant(x)
assert y.dtype == torch.float32
assert torch.equal(
y,
torch.fake_quantize_per_tensor_affine(
x, fake_quant._scale, fake_quant._zero_pointer, fake_quant.qmin, fake_quant.qmax
),
)
|
archai/tests/quantization/test_quantizers.py/0
|
{
"file_path": "archai/tests/quantization/test_quantizers.py",
"repo_id": "archai",
"token_count": 1061
}
| 379 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import pytest
import torch
from archai.trainers.lamb_optimizer import JITLamb, Lamb
def test_lamb_init():
# Assert default parameter values
lamb = Lamb([torch.randn(10, 5)])
assert lamb.param_groups[0]["lr"] == 1e-3
assert lamb.param_groups[0]["betas"] == (0.9, 0.999)
assert lamb.param_groups[0]["eps"] == 1e-6
assert lamb.param_groups[0]["weight_decay"] == 0.0
assert lamb.adam is False
# Assert custom parameter values
lamb = Lamb([torch.randn(10, 5)], lr=0.5, betas=(0.8, 0.99), eps=1e-5, weight_decay=0.1, adam=True)
assert lamb.param_groups[0]["lr"] == 0.5
assert lamb.param_groups[0]["betas"] == (0.8, 0.99)
assert lamb.param_groups[0]["eps"] == 1e-5
assert lamb.param_groups[0]["weight_decay"] == 0.1
assert lamb.adam is True
# Assert invalid learning rate
with pytest.raises(ValueError):
Lamb([torch.randn(10, 5)], lr=-0.5)
# Assert invalid epsilon value
with pytest.raises(ValueError):
Lamb([torch.randn(10, 5)], eps=-1e-5)
# Assert invalid beta parameter at index 0
with pytest.raises(ValueError):
Lamb([torch.randn(10, 5)], betas=(-0.5, 0.99))
# Assert invalid beta parameter at index 1
with pytest.raises(ValueError):
Lamb([torch.randn(10, 5)], betas=(0.8, 1.5))
def test_lamb_step():
# Assert with closure
def closure():
return torch.tensor(1.0)
lamb = Lamb([torch.randn(10, 5)])
loss = lamb.step(closure)
assert loss.item() == 1.0
# Assert without closure
lamb = Lamb([torch.randn(10, 5)])
loss = lamb.step()
assert loss is None
def test_jit_lamb_init():
# Assert default parameter values
jit_lamb = JITLamb([torch.randn(10, 5)])
assert jit_lamb.param_groups[0]["lr"] == 1e-3
assert jit_lamb.param_groups[0]["betas"] == (0.9, 0.999)
assert jit_lamb.param_groups[0]["eps"] == 1e-6
assert jit_lamb.param_groups[0]["weight_decay"] == 0.0
assert jit_lamb.adam is False
# Assert custom parameter values
jit_lamb = JITLamb([torch.randn(10, 5)], lr=0.5, betas=(0.8, 0.99), eps=1e-5, weight_decay=0.1, adam=True)
assert jit_lamb.param_groups[0]["lr"] == 0.5
assert jit_lamb.param_groups[0]["betas"] == (0.8, 0.99)
assert jit_lamb.param_groups[0]["eps"] == 1e-5
assert jit_lamb.param_groups[0]["weight_decay"] == 0.1
assert jit_lamb.adam is True
# Assert invalid learning rate
with pytest.raises(ValueError):
JITLamb([torch.randn(10, 5)], lr=-0.5)
# Assert invalid epsilon value
with pytest.raises(ValueError):
JITLamb([torch.randn(10, 5)], eps=-1e-5)
# Assert invalid beta parameter at index 0
with pytest.raises(ValueError):
JITLamb([torch.randn(10, 5)], betas=(-0.5, 0.99))
# Assert invalid beta parameter at index 1
with pytest.raises(ValueError):
JITLamb([torch.randn(10, 5)], betas=(0.8, 1.5))
def test_jit_lamb_step():
# Assert with closure
def closure():
return torch.tensor(1.0)
jit_lamb = JITLamb([torch.randn(10, 5)])
loss = jit_lamb.step(closure)
assert loss.item() == 1.0
# Assert without closure
jit_lamb = JITLamb([torch.randn(10, 5)])
loss = jit_lamb.step()
assert loss is None
|
archai/tests/trainers/test_lamb_optimizer.py/0
|
{
"file_path": "archai/tests/trainers/test_lamb_optimizer.py",
"repo_id": "archai",
"token_count": 1482
}
| 380 |
[](https://github.com/microsoft/azure-devops-python-api/actions)
[](https://pypi.python.org/pypi/azure-devops)
# Azure DevOps Python API
This repository contains Python APIs for interacting with and managing Azure DevOps. These APIs power the Azure DevOps Extension for Azure CLI. To learn more about the Azure DevOps Extension for Azure CLI, visit the [Microsoft/azure-devops-cli-extension](https://github.com/Microsoft/azure-devops-cli-extension) repo.
## Install
```
pip install azure-devops
```
## Get started
To use the API, establish a connection using a [personal access token](https://docs.microsoft.com/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=vsts) and the URL to your Azure DevOps organization. Then get a client from the connection and make API calls.
```python
from azure.devops.connection import Connection
from msrest.authentication import BasicAuthentication
import pprint
# Fill in with your personal access token and org URL
personal_access_token = 'YOURPAT'
organization_url = 'https://dev.azure.com/YOURORG'
# Create a connection to the org
credentials = BasicAuthentication('', personal_access_token)
connection = Connection(base_url=organization_url, creds=credentials)
# Get a client (the "core" client provides access to projects, teams, etc)
core_client = connection.clients.get_core_client()
# Get the first page of projects
get_projects_response = core_client.get_projects()
index = 0
while get_projects_response is not None:
for project in get_projects_response.value:
pprint.pprint("[" + str(index) + "] " + project.name)
index += 1
if get_projects_response.continuation_token is not None and get_projects_response.continuation_token != "":
# Get the next page of projects
get_projects_response = core_client.get_projects(continuation_token=get_projects_response.continuation_token)
else:
# All projects have been retrieved
get_projects_response = None
```
## API documentation
This Python library provides a thin wrapper around the Azure DevOps REST APIs. See the [Azure DevOps REST API reference](https://docs.microsoft.com/en-us/rest/api/azure/devops/?view=azure-devops-rest-5.1) for details on calling different APIs.
## Samples
Learn how to call different APIs by viewing the samples in the [Microsoft/azure-devops-python-samples](https://github.com/Microsoft/azure-devops-python-samples) repo.
## Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [[email protected]](mailto:[email protected]) with any additional questions or comments.
|
azure-devops-python-api/README.md/0
|
{
"file_path": "azure-devops-python-api/README.md",
"repo_id": "azure-devops-python-api",
"token_count": 1027
}
| 381 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from ...v7_0.accounts import models
class AccountsClient(Client):
"""Accounts
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(AccountsClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '0d55247a-1c47-4462-9b1f-5e2125590ee6'
def get_accounts(self, owner_id=None, member_id=None, properties=None):
"""GetAccounts.
Get a list of accounts for a specific owner or a specific member. One of the following parameters is required: ownerId, memberId.
:param str owner_id: ID for the owner of the accounts.
:param str member_id: ID for a member of the accounts.
:param str properties:
:rtype: [Account]
"""
query_parameters = {}
if owner_id is not None:
query_parameters['ownerId'] = self._serialize.query('owner_id', owner_id, 'str')
if member_id is not None:
query_parameters['memberId'] = self._serialize.query('member_id', member_id, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='229a6a53-b428-4ffb-a835-e8f36b5b4b1e',
version='7.0',
query_parameters=query_parameters)
return self._deserialize('[Account]', self._unwrap_collection(response))
|
azure-devops-python-api/azure-devops/azure/devops/released/accounts/accounts_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/released/accounts/accounts_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 840
}
| 382 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from ...v7_0.graph import models
class GraphClient(Client):
"""Graph
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(GraphClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = 'bb1e7ec9-e901-4b68-999a-de7012b920f8'
def delete_avatar(self, subject_descriptor):
"""DeleteAvatar.
:param str subject_descriptor:
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
self._send(http_method='DELETE',
location_id='801eaf9c-0585-4be8-9cdb-b0efa074de91',
version='7.0',
route_values=route_values)
def get_avatar(self, subject_descriptor, size=None, format=None):
"""GetAvatar.
:param str subject_descriptor:
:param str size:
:param str format:
:rtype: :class:`<Avatar> <azure.devops.v7_0.graph.models.Avatar>`
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
query_parameters = {}
if size is not None:
query_parameters['size'] = self._serialize.query('size', size, 'str')
if format is not None:
query_parameters['format'] = self._serialize.query('format', format, 'str')
response = self._send(http_method='GET',
location_id='801eaf9c-0585-4be8-9cdb-b0efa074de91',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Avatar', response)
def set_avatar(self, avatar, subject_descriptor):
"""SetAvatar.
:param :class:`<Avatar> <azure.devops.v7_0.graph.models.Avatar>` avatar:
:param str subject_descriptor:
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
content = self._serialize.body(avatar, 'Avatar')
self._send(http_method='PUT',
location_id='801eaf9c-0585-4be8-9cdb-b0efa074de91',
version='7.0',
route_values=route_values,
content=content)
def get_descriptor(self, storage_key):
"""GetDescriptor.
Resolve a storage key to a descriptor
:param str storage_key: Storage key of the subject (user, group, scope, etc.) to resolve
:rtype: :class:`<GraphDescriptorResult> <azure.devops.v7_0.graph.models.GraphDescriptorResult>`
"""
route_values = {}
if storage_key is not None:
route_values['storageKey'] = self._serialize.url('storage_key', storage_key, 'str')
response = self._send(http_method='GET',
location_id='048aee0a-7072-4cde-ab73-7af77b1e0b4e',
version='7.0',
route_values=route_values)
return self._deserialize('GraphDescriptorResult', response)
def get_provider_info(self, user_descriptor):
"""GetProviderInfo.
:param str user_descriptor:
:rtype: :class:`<GraphProviderInfo> <azure.devops.v7_0.graph.models.GraphProviderInfo>`
"""
route_values = {}
if user_descriptor is not None:
route_values['userDescriptor'] = self._serialize.url('user_descriptor', user_descriptor, 'str')
response = self._send(http_method='GET',
location_id='1e377995-6fa2-4588-bd64-930186abdcfa',
version='7.0',
route_values=route_values)
return self._deserialize('GraphProviderInfo', response)
def get_storage_key(self, subject_descriptor):
"""GetStorageKey.
Resolve a descriptor to a storage key.
:param str subject_descriptor:
:rtype: :class:`<GraphStorageKeyResult> <azure.devops.v7_0.graph.models.GraphStorageKeyResult>`
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
response = self._send(http_method='GET',
location_id='eb85f8cc-f0f6-4264-a5b1-ffe2e4d4801f',
version='7.0',
route_values=route_values)
return self._deserialize('GraphStorageKeyResult', response)
|
azure-devops-python-api/azure-devops/azure/devops/released/graph/graph_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/released/graph/graph_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 2538
}
| 383 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from ...v7_0.policy import models
class PolicyClient(Client):
"""Policy
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(PolicyClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = 'fb13a388-40dd-4a04-b530-013a739c72ef'
def create_policy_configuration(self, configuration, project, configuration_id=None):
"""CreatePolicyConfiguration.
Create a policy configuration of a given policy type.
:param :class:`<PolicyConfiguration> <azure.devops.v7_0.policy.models.PolicyConfiguration>` configuration: The policy configuration to create.
:param str project: Project ID or project name
:param int configuration_id:
:rtype: :class:`<PolicyConfiguration> <azure.devops.v7_0.policy.models.PolicyConfiguration>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if configuration_id is not None:
route_values['configurationId'] = self._serialize.url('configuration_id', configuration_id, 'int')
content = self._serialize.body(configuration, 'PolicyConfiguration')
response = self._send(http_method='POST',
location_id='dad91cbe-d183-45f8-9c6e-9c1164472121',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('PolicyConfiguration', response)
def delete_policy_configuration(self, project, configuration_id):
"""DeletePolicyConfiguration.
Delete a policy configuration by its ID.
:param str project: Project ID or project name
:param int configuration_id: ID of the policy configuration to delete.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if configuration_id is not None:
route_values['configurationId'] = self._serialize.url('configuration_id', configuration_id, 'int')
self._send(http_method='DELETE',
location_id='dad91cbe-d183-45f8-9c6e-9c1164472121',
version='7.0',
route_values=route_values)
def get_policy_configuration(self, project, configuration_id):
"""GetPolicyConfiguration.
Get a policy configuration by its ID.
:param str project: Project ID or project name
:param int configuration_id: ID of the policy configuration
:rtype: :class:`<PolicyConfiguration> <azure.devops.v7_0.policy.models.PolicyConfiguration>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if configuration_id is not None:
route_values['configurationId'] = self._serialize.url('configuration_id', configuration_id, 'int')
response = self._send(http_method='GET',
location_id='dad91cbe-d183-45f8-9c6e-9c1164472121',
version='7.0',
route_values=route_values)
return self._deserialize('PolicyConfiguration', response)
def get_policy_configurations(self, project, scope=None, top=None, continuation_token=None, policy_type=None):
"""GetPolicyConfigurations.
Get a list of policy configurations in a project.
:param str project: Project ID or project name
:param str scope: [Provided for legacy reasons] The scope on which a subset of policies is defined.
:param int top: Maximum number of policies to return.
:param str continuation_token: The continuation token used for pagination.
:param str policy_type: Filter returned policies to only this type
:rtype: :class:`<[PolicyConfiguration]> <azure.devops.v7_0.policy.models.[PolicyConfiguration]>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if policy_type is not None:
query_parameters['policyType'] = self._serialize.query('policy_type', policy_type, 'str')
response = self._send(http_method='GET',
location_id='dad91cbe-d183-45f8-9c6e-9c1164472121',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[PolicyConfiguration]', self._unwrap_collection(response))
def update_policy_configuration(self, configuration, project, configuration_id):
"""UpdatePolicyConfiguration.
Update a policy configuration by its ID.
:param :class:`<PolicyConfiguration> <azure.devops.v7_0.policy.models.PolicyConfiguration>` configuration: The policy configuration to update.
:param str project: Project ID or project name
:param int configuration_id: ID of the existing policy configuration to be updated.
:rtype: :class:`<PolicyConfiguration> <azure.devops.v7_0.policy.models.PolicyConfiguration>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if configuration_id is not None:
route_values['configurationId'] = self._serialize.url('configuration_id', configuration_id, 'int')
content = self._serialize.body(configuration, 'PolicyConfiguration')
response = self._send(http_method='PUT',
location_id='dad91cbe-d183-45f8-9c6e-9c1164472121',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('PolicyConfiguration', response)
def get_policy_configuration_revision(self, project, configuration_id, revision_id):
"""GetPolicyConfigurationRevision.
Retrieve a specific revision of a given policy by ID.
:param str project: Project ID or project name
:param int configuration_id: The policy configuration ID.
:param int revision_id: The revision ID.
:rtype: :class:`<PolicyConfiguration> <azure.devops.v7_0.policy.models.PolicyConfiguration>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if configuration_id is not None:
route_values['configurationId'] = self._serialize.url('configuration_id', configuration_id, 'int')
if revision_id is not None:
route_values['revisionId'] = self._serialize.url('revision_id', revision_id, 'int')
response = self._send(http_method='GET',
location_id='fe1e68a2-60d3-43cb-855b-85e41ae97c95',
version='7.0',
route_values=route_values)
return self._deserialize('PolicyConfiguration', response)
def get_policy_configuration_revisions(self, project, configuration_id, top=None, skip=None):
"""GetPolicyConfigurationRevisions.
Retrieve all revisions for a given policy.
:param str project: Project ID or project name
:param int configuration_id: The policy configuration ID.
:param int top: The number of revisions to retrieve.
:param int skip: The number of revisions to ignore. For example, to retrieve results 101-150, set top to 50 and skip to 100.
:rtype: [PolicyConfiguration]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if configuration_id is not None:
route_values['configurationId'] = self._serialize.url('configuration_id', configuration_id, 'int')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='fe1e68a2-60d3-43cb-855b-85e41ae97c95',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[PolicyConfiguration]', self._unwrap_collection(response))
def get_policy_type(self, project, type_id):
"""GetPolicyType.
Retrieve a specific policy type by ID.
:param str project: Project ID or project name
:param str type_id: The policy ID.
:rtype: :class:`<PolicyType> <azure.devops.v7_0.policy.models.PolicyType>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type_id is not None:
route_values['typeId'] = self._serialize.url('type_id', type_id, 'str')
response = self._send(http_method='GET',
location_id='44096322-2d3d-466a-bb30-d1b7de69f61f',
version='7.0',
route_values=route_values)
return self._deserialize('PolicyType', response)
def get_policy_types(self, project):
"""GetPolicyTypes.
Retrieve all available policy types.
:param str project: Project ID or project name
:rtype: [PolicyType]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='44096322-2d3d-466a-bb30-d1b7de69f61f',
version='7.0',
route_values=route_values)
return self._deserialize('[PolicyType]', self._unwrap_collection(response))
|
azure-devops-python-api/azure-devops/azure/devops/released/policy/policy_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/released/policy/policy_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 4773
}
| 384 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from ...v7_0.symbol import models
class SymbolClient(Client):
"""Symbol
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(SymbolClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = 'af607f94-69ba-4821-8159-f04e37b66350'
def check_availability(self):
"""CheckAvailability.
Check the availability of symbol service. This includes checking for feature flag, and possibly license in future. Note this is NOT an anonymous endpoint, and the caller will be redirected to authentication before hitting it.
"""
self._send(http_method='GET',
location_id='97c893cc-e861-4ef4-8c43-9bad4a963dee',
version='7.0')
def get_client(self, client_type):
"""GetClient.
Get the client package.
:param str client_type: Either "EXE" for a zip file containing a Windows symbol client (a.k.a. symbol.exe) along with dependencies, or "TASK" for a VSTS task that can be run on a VSTS build agent. All the other values are invalid. The parameter is case-insensitive.
:rtype: object
"""
route_values = {}
if client_type is not None:
route_values['clientType'] = self._serialize.url('client_type', client_type, 'str')
response = self._send(http_method='GET',
location_id='79c83865-4de3-460c-8a16-01be238e0818',
version='7.0',
route_values=route_values)
return self._deserialize('object', response)
def head_client(self):
"""HeadClient.
Get client version information.
"""
self._send(http_method='HEAD',
location_id='79c83865-4de3-460c-8a16-01be238e0818',
version='7.0')
def get_debug_entry_content(self, request_id, debug_entry_id):
"""GetDebugEntryContent.
Get a stitched debug entry for a symbol request as specified by symbol request identifier and debug entry identifier.
:param str request_id: The symbol request identifier.
:param str debug_entry_id: The debug entry identifier.
"""
route_values = {}
if request_id is not None:
route_values['requestId'] = self._serialize.url('request_id', request_id, 'str')
if debug_entry_id is not None:
route_values['debugEntryId'] = self._serialize.url('debug_entry_id', debug_entry_id, 'str')
self._send(http_method='GET',
location_id='0f98d9f5-caf7-44fd-a5c9-55f3a9b34399',
version='7.0',
route_values=route_values)
def create_requests(self, request_to_create):
"""CreateRequests.
Create a new symbol request.
:param :class:`<Request> <azure.devops.v7_0.symbol.models.Request>` request_to_create: The symbol request to create.
:rtype: :class:`<Request> <azure.devops.v7_0.symbol.models.Request>`
"""
content = self._serialize.body(request_to_create, 'Request')
response = self._send(http_method='POST',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='7.0',
content=content)
return self._deserialize('Request', response)
def create_requests_request_id_debug_entries(self, batch, request_id, collection):
"""CreateRequestsRequestIdDebugEntries.
Create debug entries for a symbol request as specified by its identifier.
:param :class:`<DebugEntryCreateBatch> <azure.devops.v7_0.symbol.models.DebugEntryCreateBatch>` batch: A batch that contains debug entries to create.
:param str request_id: The symbol request identifier.
:param str collection: A valid debug entry collection name. Must be "debugentries".
:rtype: [DebugEntry]
"""
route_values = {}
if request_id is not None:
route_values['requestId'] = self._serialize.url('request_id', request_id, 'str')
query_parameters = {}
if collection is not None:
query_parameters['collection'] = self._serialize.query('collection', collection, 'str')
content = self._serialize.body(batch, 'DebugEntryCreateBatch')
response = self._send(http_method='POST',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='7.0',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('[DebugEntry]', self._unwrap_collection(response))
def create_requests_request_name_debug_entries(self, batch, request_name, collection):
"""CreateRequestsRequestNameDebugEntries.
Create debug entries for a symbol request as specified by its name.
:param :class:`<DebugEntryCreateBatch> <azure.devops.v7_0.symbol.models.DebugEntryCreateBatch>` batch: A batch that contains debug entries to create.
:param str request_name: The symbol request name.
:param str collection: A valid debug entry collection name. Must be "debugentries".
:rtype: [DebugEntry]
"""
query_parameters = {}
if request_name is not None:
query_parameters['requestName'] = self._serialize.query('request_name', request_name, 'str')
if collection is not None:
query_parameters['collection'] = self._serialize.query('collection', collection, 'str')
content = self._serialize.body(batch, 'DebugEntryCreateBatch')
response = self._send(http_method='POST',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='7.0',
query_parameters=query_parameters,
content=content)
return self._deserialize('[DebugEntry]', self._unwrap_collection(response))
def delete_requests_request_id(self, request_id, synchronous=None):
"""DeleteRequestsRequestId.
Delete a symbol request by request identifier.
:param str request_id: The symbol request identifier.
:param bool synchronous: If true, delete all the debug entries under this request synchronously in the current session. If false, the deletion will be postponed to a later point and be executed automatically by the system.
"""
route_values = {}
if request_id is not None:
route_values['requestId'] = self._serialize.url('request_id', request_id, 'str')
query_parameters = {}
if synchronous is not None:
query_parameters['synchronous'] = self._serialize.query('synchronous', synchronous, 'bool')
self._send(http_method='DELETE',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
def delete_requests_request_name(self, request_name, synchronous=None):
"""DeleteRequestsRequestName.
Delete a symbol request by request name.
:param str request_name: The symbol request name.
:param bool synchronous: If true, delete all the debug entries under this request synchronously in the current session. If false, the deletion will be postponed to a later point and be executed automatically by the system.
"""
query_parameters = {}
if request_name is not None:
query_parameters['requestName'] = self._serialize.query('request_name', request_name, 'str')
if synchronous is not None:
query_parameters['synchronous'] = self._serialize.query('synchronous', synchronous, 'bool')
self._send(http_method='DELETE',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='7.0',
query_parameters=query_parameters)
def get_requests_request_id(self, request_id):
"""GetRequestsRequestId.
Get a symbol request by request identifier.
:param str request_id: The symbol request identifier.
:rtype: :class:`<Request> <azure.devops.v7_0.symbol.models.Request>`
"""
route_values = {}
if request_id is not None:
route_values['requestId'] = self._serialize.url('request_id', request_id, 'str')
response = self._send(http_method='GET',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='7.0',
route_values=route_values)
return self._deserialize('Request', response)
def get_requests_request_name(self, request_name):
"""GetRequestsRequestName.
Get a symbol request by request name.
:param str request_name: The symbol request name.
:rtype: :class:`<Request> <azure.devops.v7_0.symbol.models.Request>`
"""
query_parameters = {}
if request_name is not None:
query_parameters['requestName'] = self._serialize.query('request_name', request_name, 'str')
response = self._send(http_method='GET',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='7.0',
query_parameters=query_parameters)
return self._deserialize('Request', response)
def update_requests_request_id(self, update_request, request_id):
"""UpdateRequestsRequestId.
Update a symbol request by request identifier.
:param :class:`<Request> <azure.devops.v7_0.symbol.models.Request>` update_request: The symbol request.
:param str request_id: The symbol request identifier.
:rtype: :class:`<Request> <azure.devops.v7_0.symbol.models.Request>`
"""
route_values = {}
if request_id is not None:
route_values['requestId'] = self._serialize.url('request_id', request_id, 'str')
content = self._serialize.body(update_request, 'Request')
response = self._send(http_method='PATCH',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('Request', response)
def update_requests_request_name(self, update_request, request_name):
"""UpdateRequestsRequestName.
Update a symbol request by request name.
:param :class:`<Request> <azure.devops.v7_0.symbol.models.Request>` update_request: The symbol request.
:param str request_name: The symbol request name.
:rtype: :class:`<Request> <azure.devops.v7_0.symbol.models.Request>`
"""
query_parameters = {}
if request_name is not None:
query_parameters['requestName'] = self._serialize.query('request_name', request_name, 'str')
content = self._serialize.body(update_request, 'Request')
response = self._send(http_method='PATCH',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='7.0',
query_parameters=query_parameters,
content=content)
return self._deserialize('Request', response)
def get_sym_srv_debug_entry_client_key(self, debug_entry_client_key):
"""GetSymSrvDebugEntryClientKey.
Given a client key, returns the best matched debug entry.
:param str debug_entry_client_key: A "client key" used by both ends of Microsoft's symbol protocol to identify a debug entry. The semantics of client key is governed by symsrv and is beyond the scope of this documentation.
"""
route_values = {}
if debug_entry_client_key is not None:
route_values['debugEntryClientKey'] = self._serialize.url('debug_entry_client_key', debug_entry_client_key, 'str')
self._send(http_method='GET',
location_id='9648e256-c9f9-4f16-8a27-630b06396942',
version='7.0',
route_values=route_values)
|
azure-devops-python-api/azure-devops/azure/devops/released/symbol/symbol_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/released/symbol/symbol_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 5672
}
| 385 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from ...v7_0.wiki import models
class WikiClient(Client):
"""Wiki
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(WikiClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = 'bf7d82a0-8aa5-4613-94ef-6172a5ea01f3'
def create_attachment(self, upload_stream, project, wiki_identifier, name, version_descriptor=None, **kwargs):
"""CreateAttachment.
Creates an attachment in the wiki.
:param object upload_stream: Stream to upload
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name.
:param str name: Wiki attachment name.
:param :class:`<GitVersionDescriptor> <azure.devops.v7_0.wiki.models.GitVersionDescriptor>` version_descriptor: GitVersionDescriptor for the page. (Optional in case of ProjectWiki).
:rtype: :class:`<WikiAttachmentResponse> <azure.devops.v7_0.wiki.models.WikiAttachmentResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
query_parameters = {}
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
if version_descriptor is not None:
if version_descriptor.version_type is not None:
query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type
if version_descriptor.version is not None:
query_parameters['versionDescriptor.version'] = version_descriptor.version
if version_descriptor.version_options is not None:
query_parameters['versionDescriptor.versionOptions'] = version_descriptor.version_options
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='PUT',
location_id='c4382d8d-fefc-40e0-92c5-49852e9e17c0',
version='7.0',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/octet-stream')
response_object = models.WikiAttachmentResponse()
response_object.attachment = self._deserialize('WikiAttachment', response)
response_object.eTag = response.headers.get('ETag')
return response_object
def create_page_move(self, page_move_parameters, project, wiki_identifier, comment=None, version_descriptor=None):
"""CreatePageMove.
Creates a page move operation that updates the path and order of the page as provided in the parameters.
:param :class:`<WikiPageMoveParameters> <azure.devops.v7_0.wiki.models.WikiPageMoveParameters>` page_move_parameters: Page more operation parameters.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name.
:param str comment: Comment that is to be associated with this page move.
:param :class:`<GitVersionDescriptor> <azure.devops.v7_0.wiki.models.GitVersionDescriptor>` version_descriptor: GitVersionDescriptor for the page. (Optional in case of ProjectWiki).
:rtype: :class:`<WikiPageMoveResponse> <azure.devops.v7_0.wiki.models.WikiPageMoveResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
query_parameters = {}
if comment is not None:
query_parameters['comment'] = self._serialize.query('comment', comment, 'str')
if version_descriptor is not None:
if version_descriptor.version_type is not None:
query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type
if version_descriptor.version is not None:
query_parameters['versionDescriptor.version'] = version_descriptor.version
if version_descriptor.version_options is not None:
query_parameters['versionDescriptor.versionOptions'] = version_descriptor.version_options
content = self._serialize.body(page_move_parameters, 'WikiPageMoveParameters')
response = self._send(http_method='POST',
location_id='e37bbe71-cbae-49e5-9a4e-949143b9d910',
version='7.0',
route_values=route_values,
query_parameters=query_parameters,
content=content)
response_object = models.WikiPageMoveResponse()
response_object.page_move = self._deserialize('WikiPageMove', response)
response_object.eTag = response.headers.get('ETag')
return response_object
def create_or_update_page(self, parameters, project, wiki_identifier, path, version, comment=None, version_descriptor=None):
"""CreateOrUpdatePage.
Creates or edits a wiki page.
:param :class:`<WikiPageCreateOrUpdateParameters> <azure.devops.v7_0.wiki.models.WikiPageCreateOrUpdateParameters>` parameters: Wiki create or update operation parameters.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name.
:param str path: Wiki page path.
:param String version: Version of the page on which the change is to be made. Mandatory for `Edit` scenario. To be populated in the If-Match header of the request.
:param str comment: Comment to be associated with the page operation.
:param :class:`<GitVersionDescriptor> <azure.devops.v7_0.wiki.models.GitVersionDescriptor>` version_descriptor: GitVersionDescriptor for the page. (Optional in case of ProjectWiki).
:rtype: :class:`<WikiPageResponse> <azure.devops.v7_0.wiki.models.WikiPageResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
query_parameters = {}
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
if comment is not None:
query_parameters['comment'] = self._serialize.query('comment', comment, 'str')
if version_descriptor is not None:
if version_descriptor.version_type is not None:
query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type
if version_descriptor.version is not None:
query_parameters['versionDescriptor.version'] = version_descriptor.version
if version_descriptor.version_options is not None:
query_parameters['versionDescriptor.versionOptions'] = version_descriptor.version_options
additional_headers = {}
if version is not None:
additional_headers['If-Match'] = version
content = self._serialize.body(parameters, 'WikiPageCreateOrUpdateParameters')
response = self._send(http_method='PUT',
location_id='25d3fbc7-fe3d-46cb-b5a5-0b6f79caf27b',
version='7.0',
route_values=route_values,
query_parameters=query_parameters,
additional_headers=additional_headers,
content=content)
response_object = models.WikiPageResponse()
response_object.page = self._deserialize('WikiPage', response)
response_object.eTag = response.headers.get('ETag')
return response_object
def delete_page(self, project, wiki_identifier, path, comment=None, version_descriptor=None):
"""DeletePage.
Deletes a wiki page.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name.
:param str path: Wiki page path.
:param str comment: Comment to be associated with this page delete.
:param :class:`<GitVersionDescriptor> <azure.devops.v7_0.wiki.models.GitVersionDescriptor>` version_descriptor: GitVersionDescriptor for the page. (Optional in case of ProjectWiki).
:rtype: :class:`<WikiPageResponse> <azure.devops.v7_0.wiki.models.WikiPageResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
query_parameters = {}
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
if comment is not None:
query_parameters['comment'] = self._serialize.query('comment', comment, 'str')
if version_descriptor is not None:
if version_descriptor.version_type is not None:
query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type
if version_descriptor.version is not None:
query_parameters['versionDescriptor.version'] = version_descriptor.version
if version_descriptor.version_options is not None:
query_parameters['versionDescriptor.versionOptions'] = version_descriptor.version_options
response = self._send(http_method='DELETE',
location_id='25d3fbc7-fe3d-46cb-b5a5-0b6f79caf27b',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
response_object = models.WikiPageResponse()
response_object.page = self._deserialize('WikiPage', response)
response_object.eTag = response.headers.get('ETag')
return response_object
def get_page(self, project, wiki_identifier, path=None, recursion_level=None, version_descriptor=None, include_content=None):
"""GetPage.
Gets metadata or content of the wiki page for the provided path. Content negotiation is done based on the `Accept` header sent in the request.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name.
:param str path: Wiki page path.
:param str recursion_level: Recursion level for subpages retrieval. Defaults to `None` (Optional).
:param :class:`<GitVersionDescriptor> <azure.devops.v7_0.wiki.models.GitVersionDescriptor>` version_descriptor: GitVersionDescriptor for the page. Defaults to the default branch (Optional).
:param bool include_content: True to include the content of the page in the response for Json content type. Defaults to false (Optional)
:rtype: :class:`<WikiPageResponse> <azure.devops.v7_0.wiki.models.WikiPageResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
query_parameters = {}
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
if recursion_level is not None:
query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str')
if version_descriptor is not None:
if version_descriptor.version_type is not None:
query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type
if version_descriptor.version is not None:
query_parameters['versionDescriptor.version'] = version_descriptor.version
if version_descriptor.version_options is not None:
query_parameters['versionDescriptor.versionOptions'] = version_descriptor.version_options
if include_content is not None:
query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool')
response = self._send(http_method='GET',
location_id='25d3fbc7-fe3d-46cb-b5a5-0b6f79caf27b',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
response_object = models.WikiPageResponse()
response_object.page = self._deserialize('WikiPage', response)
response_object.eTag = response.headers.get('ETag')
return response_object
def get_page_text(self, project, wiki_identifier, path=None, recursion_level=None, version_descriptor=None, include_content=None, **kwargs):
"""GetPageText.
Gets metadata or content of the wiki page for the provided path. Content negotiation is done based on the `Accept` header sent in the request.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name.
:param str path: Wiki page path.
:param str recursion_level: Recursion level for subpages retrieval. Defaults to `None` (Optional).
:param :class:`<GitVersionDescriptor> <azure.devops.v7_0.wiki.models.GitVersionDescriptor>` version_descriptor: GitVersionDescriptor for the page. Defaults to the default branch (Optional).
:param bool include_content: True to include the content of the page in the response for Json content type. Defaults to false (Optional)
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
query_parameters = {}
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
if recursion_level is not None:
query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str')
if version_descriptor is not None:
if version_descriptor.version_type is not None:
query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type
if version_descriptor.version is not None:
query_parameters['versionDescriptor.version'] = version_descriptor.version
if version_descriptor.version_options is not None:
query_parameters['versionDescriptor.versionOptions'] = version_descriptor.version_options
if include_content is not None:
query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool')
response = self._send(http_method='GET',
location_id='25d3fbc7-fe3d-46cb-b5a5-0b6f79caf27b',
version='7.0',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_page_zip(self, project, wiki_identifier, path=None, recursion_level=None, version_descriptor=None, include_content=None, **kwargs):
"""GetPageZip.
Gets metadata or content of the wiki page for the provided path. Content negotiation is done based on the `Accept` header sent in the request.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name.
:param str path: Wiki page path.
:param str recursion_level: Recursion level for subpages retrieval. Defaults to `None` (Optional).
:param :class:`<GitVersionDescriptor> <azure.devops.v7_0.wiki.models.GitVersionDescriptor>` version_descriptor: GitVersionDescriptor for the page. Defaults to the default branch (Optional).
:param bool include_content: True to include the content of the page in the response for Json content type. Defaults to false (Optional)
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
query_parameters = {}
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
if recursion_level is not None:
query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str')
if version_descriptor is not None:
if version_descriptor.version_type is not None:
query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type
if version_descriptor.version is not None:
query_parameters['versionDescriptor.version'] = version_descriptor.version
if version_descriptor.version_options is not None:
query_parameters['versionDescriptor.versionOptions'] = version_descriptor.version_options
if include_content is not None:
query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool')
response = self._send(http_method='GET',
location_id='25d3fbc7-fe3d-46cb-b5a5-0b6f79caf27b',
version='7.0',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def delete_page_by_id(self, project, wiki_identifier, id, comment=None):
"""DeletePageById.
Deletes a wiki page.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name.
:param int id: Wiki page ID.
:param str comment: Comment to be associated with this page delete.
:rtype: :class:`<WikiPageResponse> <azure.devops.v7_0.wiki.models.WikiPageResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if comment is not None:
query_parameters['comment'] = self._serialize.query('comment', comment, 'str')
response = self._send(http_method='DELETE',
location_id='ceddcf75-1068-452d-8b13-2d4d76e1f970',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
response_object = models.WikiPageResponse()
response_object.page = self._deserialize('WikiPage', response)
response_object.eTag = response.headers.get('ETag')
return response_object
def get_page_by_id(self, project, wiki_identifier, id, recursion_level=None, include_content=None):
"""GetPageById.
Gets metadata or content of the wiki page for the provided page id. Content negotiation is done based on the `Accept` header sent in the request.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name..
:param int id: Wiki page ID.
:param str recursion_level: Recursion level for subpages retrieval. Defaults to `None` (Optional).
:param bool include_content: True to include the content of the page in the response for Json content type. Defaults to false (Optional)
:rtype: :class:`<WikiPageResponse> <azure.devops.v7_0.wiki.models.WikiPageResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if recursion_level is not None:
query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str')
if include_content is not None:
query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool')
response = self._send(http_method='GET',
location_id='ceddcf75-1068-452d-8b13-2d4d76e1f970',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
response_object = models.WikiPageResponse()
response_object.page = self._deserialize('WikiPage', response)
response_object.eTag = response.headers.get('ETag')
return response_object
def get_page_by_id_text(self, project, wiki_identifier, id, recursion_level=None, include_content=None, **kwargs):
"""GetPageByIdText.
Gets metadata or content of the wiki page for the provided page id. Content negotiation is done based on the `Accept` header sent in the request.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name..
:param int id: Wiki page ID.
:param str recursion_level: Recursion level for subpages retrieval. Defaults to `None` (Optional).
:param bool include_content: True to include the content of the page in the response for Json content type. Defaults to false (Optional)
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if recursion_level is not None:
query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str')
if include_content is not None:
query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool')
response = self._send(http_method='GET',
location_id='ceddcf75-1068-452d-8b13-2d4d76e1f970',
version='7.0',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_page_by_id_zip(self, project, wiki_identifier, id, recursion_level=None, include_content=None, **kwargs):
"""GetPageByIdZip.
Gets metadata or content of the wiki page for the provided page id. Content negotiation is done based on the `Accept` header sent in the request.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name..
:param int id: Wiki page ID.
:param str recursion_level: Recursion level for subpages retrieval. Defaults to `None` (Optional).
:param bool include_content: True to include the content of the page in the response for Json content type. Defaults to false (Optional)
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if recursion_level is not None:
query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str')
if include_content is not None:
query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool')
response = self._send(http_method='GET',
location_id='ceddcf75-1068-452d-8b13-2d4d76e1f970',
version='7.0',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def update_page_by_id(self, parameters, project, wiki_identifier, id, version, comment=None):
"""UpdatePageById.
Edits a wiki page.
:param :class:`<WikiPageCreateOrUpdateParameters> <azure.devops.v7_0.wiki.models.WikiPageCreateOrUpdateParameters>` parameters: Wiki update operation parameters.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name.
:param int id: Wiki page ID.
:param String version: Version of the page on which the change is to be made. Mandatory for `Edit` scenario. To be populated in the If-Match header of the request.
:param str comment: Comment to be associated with the page operation.
:rtype: :class:`<WikiPageResponse> <azure.devops.v7_0.wiki.models.WikiPageResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if comment is not None:
query_parameters['comment'] = self._serialize.query('comment', comment, 'str')
additional_headers = {}
if version is not None:
additional_headers['If-Match'] = version
content = self._serialize.body(parameters, 'WikiPageCreateOrUpdateParameters')
response = self._send(http_method='PATCH',
location_id='ceddcf75-1068-452d-8b13-2d4d76e1f970',
version='7.0',
route_values=route_values,
query_parameters=query_parameters,
additional_headers=additional_headers,
content=content)
response_object = models.WikiPageResponse()
response_object.page = self._deserialize('WikiPage', response)
response_object.eTag = response.headers.get('ETag')
return response_object
def get_pages_batch(self, pages_batch_request, project, wiki_identifier, version_descriptor=None):
"""GetPagesBatch.
Returns pageable list of Wiki Pages
:param :class:`<WikiPagesBatchRequest> <azure.devops.v7_0.wiki.models.WikiPagesBatchRequest>` pages_batch_request: Wiki batch page request.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name.
:param :class:`<GitVersionDescriptor> <azure.devops.v7_0.wiki.models.GitVersionDescriptor>` version_descriptor: GitVersionDescriptor for the page. (Optional in case of ProjectWiki).
:rtype: :class:`<[WikiPageDetail]> <azure.devops.v7_0.wiki.models.[WikiPageDetail]>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
query_parameters = {}
if version_descriptor is not None:
if version_descriptor.version_type is not None:
query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type
if version_descriptor.version is not None:
query_parameters['versionDescriptor.version'] = version_descriptor.version
if version_descriptor.version_options is not None:
query_parameters['versionDescriptor.versionOptions'] = version_descriptor.version_options
content = self._serialize.body(pages_batch_request, 'WikiPagesBatchRequest')
response = self._send(http_method='POST',
location_id='71323c46-2592-4398-8771-ced73dd87207',
version='7.0',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('[WikiPageDetail]', self._unwrap_collection(response))
def get_page_data(self, project, wiki_identifier, page_id, page_views_for_days=None):
"""GetPageData.
Returns page detail corresponding to Page ID.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki ID or wiki name.
:param int page_id: Wiki page ID.
:param int page_views_for_days: last N days from the current day for which page views is to be returned. It's inclusive of current day.
:rtype: :class:`<WikiPageDetail> <azure.devops.v7_0.wiki.models.WikiPageDetail>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
if page_id is not None:
route_values['pageId'] = self._serialize.url('page_id', page_id, 'int')
query_parameters = {}
if page_views_for_days is not None:
query_parameters['pageViewsForDays'] = self._serialize.query('page_views_for_days', page_views_for_days, 'int')
response = self._send(http_method='GET',
location_id='81c4e0fe-7663-4d62-ad46-6ab78459f274',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WikiPageDetail', response)
def create_wiki(self, wiki_create_params, project=None):
"""CreateWiki.
Creates the wiki resource.
:param :class:`<WikiCreateParametersV2> <azure.devops.v7_0.wiki.models.WikiCreateParametersV2>` wiki_create_params: Parameters for the wiki creation.
:param str project: Project ID or project name
:rtype: :class:`<WikiV2> <azure.devops.v7_0.wiki.models.WikiV2>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(wiki_create_params, 'WikiCreateParametersV2')
response = self._send(http_method='POST',
location_id='288d122c-dbd4-451d-aa5f-7dbbba070728',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('WikiV2', response)
def delete_wiki(self, wiki_identifier, project=None):
"""DeleteWiki.
Deletes the wiki corresponding to the wiki ID or wiki name provided.
:param str wiki_identifier: Wiki ID or wiki name.
:param str project: Project ID or project name
:rtype: :class:`<WikiV2> <azure.devops.v7_0.wiki.models.WikiV2>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
response = self._send(http_method='DELETE',
location_id='288d122c-dbd4-451d-aa5f-7dbbba070728',
version='7.0',
route_values=route_values)
return self._deserialize('WikiV2', response)
def get_all_wikis(self, project=None):
"""GetAllWikis.
Gets all wikis in a project or collection.
:param str project: Project ID or project name
:rtype: [WikiV2]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='288d122c-dbd4-451d-aa5f-7dbbba070728',
version='7.0',
route_values=route_values)
return self._deserialize('[WikiV2]', self._unwrap_collection(response))
def get_wiki(self, wiki_identifier, project=None):
"""GetWiki.
Gets the wiki corresponding to the wiki ID or wiki name provided.
:param str wiki_identifier: Wiki ID or wiki name.
:param str project: Project ID or project name
:rtype: :class:`<WikiV2> <azure.devops.v7_0.wiki.models.WikiV2>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
response = self._send(http_method='GET',
location_id='288d122c-dbd4-451d-aa5f-7dbbba070728',
version='7.0',
route_values=route_values)
return self._deserialize('WikiV2', response)
def update_wiki(self, update_parameters, wiki_identifier, project=None):
"""UpdateWiki.
Updates the wiki corresponding to the wiki ID or wiki name provided using the update parameters.
:param :class:`<WikiUpdateParameters> <azure.devops.v7_0.wiki.models.WikiUpdateParameters>` update_parameters: Update parameters.
:param str wiki_identifier: Wiki ID or wiki name.
:param str project: Project ID or project name
:rtype: :class:`<WikiV2> <azure.devops.v7_0.wiki.models.WikiV2>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
content = self._serialize.body(update_parameters, 'WikiUpdateParameters')
response = self._send(http_method='PATCH',
location_id='288d122c-dbd4-451d-aa5f-7dbbba070728',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('WikiV2', response)
|
azure-devops-python-api/azure-devops/azure/devops/released/wiki/wiki_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/released/wiki/wiki_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 16066
}
| 386 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .models import *
from .build_client import BuildClient
__all__ = [
'AgentPoolQueue',
'AgentSpecification',
'AggregatedResultsAnalysis',
'AggregatedResultsByOutcome',
'AggregatedResultsDifference',
'AggregatedRunsByOutcome',
'AggregatedRunsByState',
'ArtifactResource',
'AssociatedWorkItem',
'Attachment',
'AuthorizationHeader',
'Build',
'BuildArtifact',
'BuildBadge',
'BuildController',
'BuildDefinition',
'BuildDefinition3_2',
'BuildDefinitionReference',
'BuildDefinitionReference3_2',
'BuildDefinitionRevision',
'BuildDefinitionStep',
'BuildDefinitionTemplate',
'BuildDefinitionTemplate3_2',
'BuildDefinitionVariable',
'BuildLog',
'BuildLogReference',
'BuildMetric',
'BuildOption',
'BuildOptionDefinition',
'BuildOptionDefinitionReference',
'BuildOptionGroupDefinition',
'BuildOptionInputDefinition',
'BuildReportMetadata',
'BuildRepository',
'BuildRequestValidationResult',
'BuildResourceUsage',
'BuildRetentionHistory',
'BuildRetentionSample',
'BuildSettings',
'DataSourceBindingBase',
'DefinitionReference',
'DefinitionResourceReference',
'Deployment',
'Folder',
'GraphSubjectBase',
'Change',
'IdentityRef',
'Issue',
'JobReference',
'JsonPatchOperation',
'MinimalRetentionLease',
'NewRetentionLease',
'PhaseReference',
'PipelineGeneralSettings',
'PipelineReference',
'ProcessParameters',
'ProjectRetentionSetting',
'PullRequest',
'ReferenceLinks',
'ReleaseReference',
'RepositoryWebhook',
'ResourceRef',
'RetentionLease',
'RetentionLeaseUpdate',
'RetentionPolicy',
'RetentionSetting',
'SourceProviderAttributes',
'SourceRepositories',
'SourceRepository',
'SourceRepositoryItem',
'StageReference',
'SupportedTrigger',
'TaskAgentPoolReference',
'TaskDefinitionReference',
'TaskInputDefinitionBase',
'TaskInputValidation',
'TaskOrchestrationPlanReference',
'TaskReference',
'TaskSourceDefinitionBase',
'TeamProjectReference',
'TestResultsContext',
'Timeline',
'TimelineAttempt',
'TimelineRecord',
'TimelineReference',
'UpdateProjectRetentionSettingModel',
'UpdateRetentionSettingModel',
'UpdateStageParameters',
'UpdateTagParameters',
'VariableGroup',
'VariableGroupReference',
'WebApiConnectedServiceRef',
'XamlBuildControllerReference',
'YamlBuild',
'BuildClient'
]
|
azure-devops-python-api/azure-devops/azure/devops/v7_0/build/__init__.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_0/build/__init__.py",
"repo_id": "azure-devops-python-api",
"token_count": 999
}
| 387 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class FeatureManagementClient(Client):
"""FeatureManagement
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(FeatureManagementClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = None
def get_feature(self, feature_id):
"""GetFeature.
[Preview API] Get a specific feature by its id
:param str feature_id: The contribution id of the feature
:rtype: :class:`<ContributedFeature> <azure.devops.v7_0.feature_management.models.ContributedFeature>`
"""
route_values = {}
if feature_id is not None:
route_values['featureId'] = self._serialize.url('feature_id', feature_id, 'str')
response = self._send(http_method='GET',
location_id='c4209f25-7a27-41dd-9f04-06080c7b6afd',
version='7.0-preview.1',
route_values=route_values)
return self._deserialize('ContributedFeature', response)
def get_features(self, target_contribution_id=None):
"""GetFeatures.
[Preview API] Get a list of all defined features
:param str target_contribution_id: Optional target contribution. If null/empty, return all features. If specified include the features that target the specified contribution.
:rtype: [ContributedFeature]
"""
query_parameters = {}
if target_contribution_id is not None:
query_parameters['targetContributionId'] = self._serialize.query('target_contribution_id', target_contribution_id, 'str')
response = self._send(http_method='GET',
location_id='c4209f25-7a27-41dd-9f04-06080c7b6afd',
version='7.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('[ContributedFeature]', self._unwrap_collection(response))
def get_feature_state(self, feature_id, user_scope):
"""GetFeatureState.
[Preview API] Get the state of the specified feature for the given user/all-users scope
:param str feature_id: Contribution id of the feature
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
:rtype: :class:`<ContributedFeatureState> <azure.devops.v7_0.feature_management.models.ContributedFeatureState>`
"""
route_values = {}
if feature_id is not None:
route_values['featureId'] = self._serialize.url('feature_id', feature_id, 'str')
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
response = self._send(http_method='GET',
location_id='98911314-3f9b-4eaf-80e8-83900d8e85d9',
version='7.0-preview.1',
route_values=route_values)
return self._deserialize('ContributedFeatureState', response)
def set_feature_state(self, feature, feature_id, user_scope, reason=None, reason_code=None):
"""SetFeatureState.
[Preview API] Set the state of a feature
:param :class:`<ContributedFeatureState> <azure.devops.v7_0.feature_management.models.ContributedFeatureState>` feature: Posted feature state object. Should specify the effective value.
:param str feature_id: Contribution id of the feature
:param str user_scope: User-Scope at which to set the value. Should be "me" for the current user or "host" for all users.
:param str reason: Reason for changing the state
:param str reason_code: Short reason code
:rtype: :class:`<ContributedFeatureState> <azure.devops.v7_0.feature_management.models.ContributedFeatureState>`
"""
route_values = {}
if feature_id is not None:
route_values['featureId'] = self._serialize.url('feature_id', feature_id, 'str')
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
query_parameters = {}
if reason is not None:
query_parameters['reason'] = self._serialize.query('reason', reason, 'str')
if reason_code is not None:
query_parameters['reasonCode'] = self._serialize.query('reason_code', reason_code, 'str')
content = self._serialize.body(feature, 'ContributedFeatureState')
response = self._send(http_method='PATCH',
location_id='98911314-3f9b-4eaf-80e8-83900d8e85d9',
version='7.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('ContributedFeatureState', response)
def get_feature_state_for_scope(self, feature_id, user_scope, scope_name, scope_value):
"""GetFeatureStateForScope.
[Preview API] Get the state of the specified feature for the given named scope
:param str feature_id: Contribution id of the feature
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
:param str scope_name: Scope at which to get the feature setting for (e.g. "project" or "team")
:param str scope_value: Value of the scope (e.g. the project or team id)
:rtype: :class:`<ContributedFeatureState> <azure.devops.v7_0.feature_management.models.ContributedFeatureState>`
"""
route_values = {}
if feature_id is not None:
route_values['featureId'] = self._serialize.url('feature_id', feature_id, 'str')
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
if scope_name is not None:
route_values['scopeName'] = self._serialize.url('scope_name', scope_name, 'str')
if scope_value is not None:
route_values['scopeValue'] = self._serialize.url('scope_value', scope_value, 'str')
response = self._send(http_method='GET',
location_id='dd291e43-aa9f-4cee-8465-a93c78e414a4',
version='7.0-preview.1',
route_values=route_values)
return self._deserialize('ContributedFeatureState', response)
def set_feature_state_for_scope(self, feature, feature_id, user_scope, scope_name, scope_value, reason=None, reason_code=None):
"""SetFeatureStateForScope.
[Preview API] Set the state of a feature at a specific scope
:param :class:`<ContributedFeatureState> <azure.devops.v7_0.feature_management.models.ContributedFeatureState>` feature: Posted feature state object. Should specify the effective value.
:param str feature_id: Contribution id of the feature
:param str user_scope: User-Scope at which to set the value. Should be "me" for the current user or "host" for all users.
:param str scope_name: Scope at which to get the feature setting for (e.g. "project" or "team")
:param str scope_value: Value of the scope (e.g. the project or team id)
:param str reason: Reason for changing the state
:param str reason_code: Short reason code
:rtype: :class:`<ContributedFeatureState> <azure.devops.v7_0.feature_management.models.ContributedFeatureState>`
"""
route_values = {}
if feature_id is not None:
route_values['featureId'] = self._serialize.url('feature_id', feature_id, 'str')
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
if scope_name is not None:
route_values['scopeName'] = self._serialize.url('scope_name', scope_name, 'str')
if scope_value is not None:
route_values['scopeValue'] = self._serialize.url('scope_value', scope_value, 'str')
query_parameters = {}
if reason is not None:
query_parameters['reason'] = self._serialize.query('reason', reason, 'str')
if reason_code is not None:
query_parameters['reasonCode'] = self._serialize.query('reason_code', reason_code, 'str')
content = self._serialize.body(feature, 'ContributedFeatureState')
response = self._send(http_method='PATCH',
location_id='dd291e43-aa9f-4cee-8465-a93c78e414a4',
version='7.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('ContributedFeatureState', response)
def query_feature_states(self, query):
"""QueryFeatureStates.
[Preview API] Get the effective state for a list of feature ids
:param :class:`<ContributedFeatureStateQuery> <azure.devops.v7_0.feature_management.models.ContributedFeatureStateQuery>` query: Features to query along with current scope values
:rtype: :class:`<ContributedFeatureStateQuery> <azure.devops.v7_0.feature_management.models.ContributedFeatureStateQuery>`
"""
content = self._serialize.body(query, 'ContributedFeatureStateQuery')
response = self._send(http_method='POST',
location_id='2b4486ad-122b-400c-ae65-17b6672c1f9d',
version='7.0-preview.1',
content=content)
return self._deserialize('ContributedFeatureStateQuery', response)
def query_feature_states_for_default_scope(self, query, user_scope):
"""QueryFeatureStatesForDefaultScope.
[Preview API] Get the states of the specified features for the default scope
:param :class:`<ContributedFeatureStateQuery> <azure.devops.v7_0.feature_management.models.ContributedFeatureStateQuery>` query: Query describing the features to query.
:param str user_scope:
:rtype: :class:`<ContributedFeatureStateQuery> <azure.devops.v7_0.feature_management.models.ContributedFeatureStateQuery>`
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
content = self._serialize.body(query, 'ContributedFeatureStateQuery')
response = self._send(http_method='POST',
location_id='3f810f28-03e2-4239-b0bc-788add3005e5',
version='7.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('ContributedFeatureStateQuery', response)
def query_feature_states_for_named_scope(self, query, user_scope, scope_name, scope_value):
"""QueryFeatureStatesForNamedScope.
[Preview API] Get the states of the specified features for the specific named scope
:param :class:`<ContributedFeatureStateQuery> <azure.devops.v7_0.feature_management.models.ContributedFeatureStateQuery>` query: Query describing the features to query.
:param str user_scope:
:param str scope_name:
:param str scope_value:
:rtype: :class:`<ContributedFeatureStateQuery> <azure.devops.v7_0.feature_management.models.ContributedFeatureStateQuery>`
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
if scope_name is not None:
route_values['scopeName'] = self._serialize.url('scope_name', scope_name, 'str')
if scope_value is not None:
route_values['scopeValue'] = self._serialize.url('scope_value', scope_value, 'str')
content = self._serialize.body(query, 'ContributedFeatureStateQuery')
response = self._send(http_method='POST',
location_id='f29e997b-c2da-4d15-8380-765788a1a74c',
version='7.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('ContributedFeatureStateQuery', response)
|
azure-devops-python-api/azure-devops/azure/devops/v7_0/feature_management/feature_management_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_0/feature_management/feature_management_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 5488
}
| 388 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class GraphClient(Client):
"""Graph
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(GraphClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = 'bb1e7ec9-e901-4b68-999a-de7012b920f8'
def delete_avatar(self, subject_descriptor):
"""DeleteAvatar.
:param str subject_descriptor:
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
self._send(http_method='DELETE',
location_id='801eaf9c-0585-4be8-9cdb-b0efa074de91',
version='7.0',
route_values=route_values)
def get_avatar(self, subject_descriptor, size=None, format=None):
"""GetAvatar.
:param str subject_descriptor:
:param str size:
:param str format:
:rtype: :class:`<Avatar> <azure.devops.v7_0.graph.models.Avatar>`
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
query_parameters = {}
if size is not None:
query_parameters['size'] = self._serialize.query('size', size, 'str')
if format is not None:
query_parameters['format'] = self._serialize.query('format', format, 'str')
response = self._send(http_method='GET',
location_id='801eaf9c-0585-4be8-9cdb-b0efa074de91',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Avatar', response)
def set_avatar(self, avatar, subject_descriptor):
"""SetAvatar.
:param :class:`<Avatar> <azure.devops.v7_0.graph.models.Avatar>` avatar:
:param str subject_descriptor:
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
content = self._serialize.body(avatar, 'Avatar')
self._send(http_method='PUT',
location_id='801eaf9c-0585-4be8-9cdb-b0efa074de91',
version='7.0',
route_values=route_values,
content=content)
def get_descriptor(self, storage_key):
"""GetDescriptor.
Resolve a storage key to a descriptor
:param str storage_key: Storage key of the subject (user, group, scope, etc.) to resolve
:rtype: :class:`<GraphDescriptorResult> <azure.devops.v7_0.graph.models.GraphDescriptorResult>`
"""
route_values = {}
if storage_key is not None:
route_values['storageKey'] = self._serialize.url('storage_key', storage_key, 'str')
response = self._send(http_method='GET',
location_id='048aee0a-7072-4cde-ab73-7af77b1e0b4e',
version='7.0',
route_values=route_values)
return self._deserialize('GraphDescriptorResult', response)
def create_group(self, creation_context, scope_descriptor=None, group_descriptors=None):
"""CreateGroup.
[Preview API] Create a new Azure DevOps group or materialize an existing AAD group.
:param :class:`<GraphGroupCreationContext> <azure.devops.v7_0.graph.models.GraphGroupCreationContext>` creation_context: The subset of the full graph group used to uniquely find the graph subject in an external provider.
:param str scope_descriptor: A descriptor referencing the scope (collection, project) in which the group should be created. If omitted, will be created in the scope of the enclosing account or organization. Valid only for VSTS groups.
:param [str] group_descriptors: A comma separated list of descriptors referencing groups you want the graph group to join
:rtype: :class:`<GraphGroup> <azure.devops.v7_0.graph.models.GraphGroup>`
"""
query_parameters = {}
if scope_descriptor is not None:
query_parameters['scopeDescriptor'] = self._serialize.query('scope_descriptor', scope_descriptor, 'str')
if group_descriptors is not None:
group_descriptors = ",".join(group_descriptors)
query_parameters['groupDescriptors'] = self._serialize.query('group_descriptors', group_descriptors, 'str')
content = self._serialize.body(creation_context, 'GraphGroupCreationContext')
response = self._send(http_method='POST',
location_id='ebbe6af8-0b91-4c13-8cf1-777c14858188',
version='7.0-preview.1',
query_parameters=query_parameters,
content=content)
return self._deserialize('GraphGroup', response)
def delete_group(self, group_descriptor):
"""DeleteGroup.
[Preview API] Removes an Azure DevOps group from all of its parent groups.
:param str group_descriptor: The descriptor of the group to delete.
"""
route_values = {}
if group_descriptor is not None:
route_values['groupDescriptor'] = self._serialize.url('group_descriptor', group_descriptor, 'str')
self._send(http_method='DELETE',
location_id='ebbe6af8-0b91-4c13-8cf1-777c14858188',
version='7.0-preview.1',
route_values=route_values)
def get_group(self, group_descriptor):
"""GetGroup.
[Preview API] Get a group by its descriptor.
:param str group_descriptor: The descriptor of the desired graph group.
:rtype: :class:`<GraphGroup> <azure.devops.v7_0.graph.models.GraphGroup>`
"""
route_values = {}
if group_descriptor is not None:
route_values['groupDescriptor'] = self._serialize.url('group_descriptor', group_descriptor, 'str')
response = self._send(http_method='GET',
location_id='ebbe6af8-0b91-4c13-8cf1-777c14858188',
version='7.0-preview.1',
route_values=route_values)
return self._deserialize('GraphGroup', response)
def list_groups(self, scope_descriptor=None, subject_types=None, continuation_token=None):
"""ListGroups.
[Preview API] Gets a list of all groups in the current scope (usually organization or account).
:param str scope_descriptor: Specify a non-default scope (collection, project) to search for groups.
:param [str] subject_types: A comma separated list of user subject subtypes to reduce the retrieved results, e.g. Microsoft.IdentityModel.Claims.ClaimsIdentity
:param str continuation_token: An opaque data blob that allows the next page of data to resume immediately after where the previous page ended. The only reliable way to know if there is more data left is the presence of a continuation token.
:rtype: :class:`<PagedGraphGroups> <azure.devops.v7_0.graph.models.PagedGraphGroups>`
"""
query_parameters = {}
if scope_descriptor is not None:
query_parameters['scopeDescriptor'] = self._serialize.query('scope_descriptor', scope_descriptor, 'str')
if subject_types is not None:
subject_types = ",".join(subject_types)
query_parameters['subjectTypes'] = self._serialize.query('subject_types', subject_types, 'str')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
response = self._send(http_method='GET',
location_id='ebbe6af8-0b91-4c13-8cf1-777c14858188',
version='7.0-preview.1',
query_parameters=query_parameters)
response_object = models.PagedGraphGroups()
response_object.graph_groups = self._deserialize('[GraphGroup]', self._unwrap_collection(response))
response_object.continuation_token = response.headers.get('X-MS-ContinuationToken')
return response_object
def update_group(self, group_descriptor, patch_document):
"""UpdateGroup.
[Preview API] Update the properties of an Azure DevOps group.
:param str group_descriptor: The descriptor of the group to modify.
:param :class:`<[JsonPatchOperation]> <azure.devops.v7_0.graph.models.[JsonPatchOperation]>` patch_document: The JSON+Patch document containing the fields to alter.
:rtype: :class:`<GraphGroup> <azure.devops.v7_0.graph.models.GraphGroup>`
"""
route_values = {}
if group_descriptor is not None:
route_values['groupDescriptor'] = self._serialize.url('group_descriptor', group_descriptor, 'str')
content = self._serialize.body(patch_document, '[JsonPatchOperation]')
response = self._send(http_method='PATCH',
location_id='ebbe6af8-0b91-4c13-8cf1-777c14858188',
version='7.0-preview.1',
route_values=route_values,
content=content,
media_type='application/json-patch+json')
return self._deserialize('GraphGroup', response)
def add_membership(self, subject_descriptor, container_descriptor):
"""AddMembership.
[Preview API] Create a new membership between a container and subject.
:param str subject_descriptor: A descriptor to a group or user that can be the child subject in the relationship.
:param str container_descriptor: A descriptor to a group that can be the container in the relationship.
:rtype: :class:`<GraphMembership> <azure.devops.v7_0.graph.models.GraphMembership>`
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
if container_descriptor is not None:
route_values['containerDescriptor'] = self._serialize.url('container_descriptor', container_descriptor, 'str')
response = self._send(http_method='PUT',
location_id='3fd2e6ca-fb30-443a-b579-95b19ed0934c',
version='7.0-preview.1',
route_values=route_values)
return self._deserialize('GraphMembership', response)
def get_membership(self, subject_descriptor, container_descriptor):
"""GetMembership.
[Preview API] Get a membership relationship between a container and subject.
:param str subject_descriptor: A descriptor to the child subject in the relationship.
:param str container_descriptor: A descriptor to the container in the relationship.
:rtype: :class:`<GraphMembership> <azure.devops.v7_0.graph.models.GraphMembership>`
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
if container_descriptor is not None:
route_values['containerDescriptor'] = self._serialize.url('container_descriptor', container_descriptor, 'str')
response = self._send(http_method='GET',
location_id='3fd2e6ca-fb30-443a-b579-95b19ed0934c',
version='7.0-preview.1',
route_values=route_values)
return self._deserialize('GraphMembership', response)
def check_membership_existence(self, subject_descriptor, container_descriptor):
"""CheckMembershipExistence.
[Preview API] Check to see if a membership relationship between a container and subject exists.
:param str subject_descriptor: The group or user that is a child subject of the relationship.
:param str container_descriptor: The group that is the container in the relationship.
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
if container_descriptor is not None:
route_values['containerDescriptor'] = self._serialize.url('container_descriptor', container_descriptor, 'str')
self._send(http_method='HEAD',
location_id='3fd2e6ca-fb30-443a-b579-95b19ed0934c',
version='7.0-preview.1',
route_values=route_values)
def remove_membership(self, subject_descriptor, container_descriptor):
"""RemoveMembership.
[Preview API] Deletes a membership between a container and subject.
:param str subject_descriptor: A descriptor to a group or user that is the child subject in the relationship.
:param str container_descriptor: A descriptor to a group that is the container in the relationship.
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
if container_descriptor is not None:
route_values['containerDescriptor'] = self._serialize.url('container_descriptor', container_descriptor, 'str')
self._send(http_method='DELETE',
location_id='3fd2e6ca-fb30-443a-b579-95b19ed0934c',
version='7.0-preview.1',
route_values=route_values)
def list_memberships(self, subject_descriptor, direction=None, depth=None):
"""ListMemberships.
[Preview API] Get all the memberships where this descriptor is a member in the relationship.
:param str subject_descriptor: Fetch all direct memberships of this descriptor.
:param str direction: Defaults to Up.
:param int depth: The maximum number of edges to traverse up or down the membership tree. Currently the only supported value is '1'.
:rtype: [GraphMembership]
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
query_parameters = {}
if direction is not None:
query_parameters['direction'] = self._serialize.query('direction', direction, 'str')
if depth is not None:
query_parameters['depth'] = self._serialize.query('depth', depth, 'int')
response = self._send(http_method='GET',
location_id='e34b6394-6b30-4435-94a9-409a5eef3e31',
version='7.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[GraphMembership]', self._unwrap_collection(response))
def get_membership_state(self, subject_descriptor):
"""GetMembershipState.
[Preview API] Check whether a subject is active or inactive.
:param str subject_descriptor: Descriptor of the subject (user, group, scope, etc.) to check state of
:rtype: :class:`<GraphMembershipState> <azure.devops.v7_0.graph.models.GraphMembershipState>`
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
response = self._send(http_method='GET',
location_id='1ffe5c94-1144-4191-907b-d0211cad36a8',
version='7.0-preview.1',
route_values=route_values)
return self._deserialize('GraphMembershipState', response)
def get_provider_info(self, user_descriptor):
"""GetProviderInfo.
:param str user_descriptor:
:rtype: :class:`<GraphProviderInfo> <azure.devops.v7_0.graph.models.GraphProviderInfo>`
"""
route_values = {}
if user_descriptor is not None:
route_values['userDescriptor'] = self._serialize.url('user_descriptor', user_descriptor, 'str')
response = self._send(http_method='GET',
location_id='1e377995-6fa2-4588-bd64-930186abdcfa',
version='7.0',
route_values=route_values)
return self._deserialize('GraphProviderInfo', response)
def request_access(self, jsondocument):
"""RequestAccess.
[Preview API]
:param :class:`<object> <azure.devops.v7_0.graph.models.object>` jsondocument:
"""
content = self._serialize.body(jsondocument, 'object')
self._send(http_method='POST',
location_id='8d54bf92-8c99-47f2-9972-b21341f1722e',
version='7.0-preview.1',
content=content)
def get_storage_key(self, subject_descriptor):
"""GetStorageKey.
Resolve a descriptor to a storage key.
:param str subject_descriptor:
:rtype: :class:`<GraphStorageKeyResult> <azure.devops.v7_0.graph.models.GraphStorageKeyResult>`
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
response = self._send(http_method='GET',
location_id='eb85f8cc-f0f6-4264-a5b1-ffe2e4d4801f',
version='7.0',
route_values=route_values)
return self._deserialize('GraphStorageKeyResult', response)
def lookup_subjects(self, subject_lookup):
"""LookupSubjects.
[Preview API] Resolve descriptors to users, groups or scopes (Subjects) in a batch.
:param :class:`<GraphSubjectLookup> <azure.devops.v7_0.graph.models.GraphSubjectLookup>` subject_lookup: A list of descriptors that specifies a subset of subjects to retrieve. Each descriptor uniquely identifies the subject across all instance scopes, but only at a single point in time.
:rtype: {GraphSubject}
"""
content = self._serialize.body(subject_lookup, 'GraphSubjectLookup')
response = self._send(http_method='POST',
location_id='4dd4d168-11f2-48c4-83e8-756fa0de027c',
version='7.0-preview.1',
content=content)
return self._deserialize('{GraphSubject}', self._unwrap_collection(response))
def query_subjects(self, subject_query):
"""QuerySubjects.
[Preview API] Search for Azure Devops users, or/and groups. Results will be returned in a batch with no more than 100 graph subjects.
:param :class:`<GraphSubjectQuery> <azure.devops.v7_0.graph.models.GraphSubjectQuery>` subject_query: The query that we'll be using to search includes the following: Query: the search term. The search will be prefix matching only. SubjectKind: "User" or "Group" can be specified, both or either ScopeDescriptor: Non-default scope can be specified, i.e. project scope descriptor
:rtype: [GraphSubject]
"""
content = self._serialize.body(subject_query, 'GraphSubjectQuery')
response = self._send(http_method='POST',
location_id='05942c89-006a-48ce-bb79-baeb8abf99c6',
version='7.0-preview.1',
content=content)
return self._deserialize('[GraphSubject]', self._unwrap_collection(response))
def create_user(self, creation_context, group_descriptors=None):
"""CreateUser.
[Preview API] Materialize an existing AAD or MSA user into the VSTS account.
:param :class:`<GraphUserCreationContext> <azure.devops.v7_0.graph.models.GraphUserCreationContext>` creation_context: The subset of the full graph user used to uniquely find the graph subject in an external provider.
:param [str] group_descriptors: A comma separated list of descriptors of groups you want the graph user to join
:rtype: :class:`<GraphUser> <azure.devops.v7_0.graph.models.GraphUser>`
"""
query_parameters = {}
if group_descriptors is not None:
group_descriptors = ",".join(group_descriptors)
query_parameters['groupDescriptors'] = self._serialize.query('group_descriptors', group_descriptors, 'str')
content = self._serialize.body(creation_context, 'GraphUserCreationContext')
response = self._send(http_method='POST',
location_id='005e26ec-6b77-4e4f-a986-b3827bf241f5',
version='7.0-preview.1',
query_parameters=query_parameters,
content=content)
return self._deserialize('GraphUser', response)
def delete_user(self, user_descriptor):
"""DeleteUser.
[Preview API] Disables a user.
:param str user_descriptor: The descriptor of the user to delete.
"""
route_values = {}
if user_descriptor is not None:
route_values['userDescriptor'] = self._serialize.url('user_descriptor', user_descriptor, 'str')
self._send(http_method='DELETE',
location_id='005e26ec-6b77-4e4f-a986-b3827bf241f5',
version='7.0-preview.1',
route_values=route_values)
def get_user(self, user_descriptor):
"""GetUser.
[Preview API] Get a user by its descriptor.
:param str user_descriptor: The descriptor of the desired user.
:rtype: :class:`<GraphUser> <azure.devops.v7_0.graph.models.GraphUser>`
"""
route_values = {}
if user_descriptor is not None:
route_values['userDescriptor'] = self._serialize.url('user_descriptor', user_descriptor, 'str')
response = self._send(http_method='GET',
location_id='005e26ec-6b77-4e4f-a986-b3827bf241f5',
version='7.0-preview.1',
route_values=route_values)
return self._deserialize('GraphUser', response)
def list_users(self, subject_types=None, continuation_token=None, scope_descriptor=None):
"""ListUsers.
[Preview API] Get a list of all users in a given scope.
:param [str] subject_types: A comma separated list of user subject subtypes to reduce the retrieved results, e.g. msa’, ‘aad’, ‘svc’ (service identity), ‘imp’ (imported identity), etc.
:param str continuation_token: An opaque data blob that allows the next page of data to resume immediately after where the previous page ended. The only reliable way to know if there is more data left is the presence of a continuation token.
:param str scope_descriptor: Specify a non-default scope (collection, project) to search for users.
:rtype: :class:`<PagedGraphUsers> <azure.devops.v7_0.graph.models.PagedGraphUsers>`
"""
query_parameters = {}
if subject_types is not None:
subject_types = ",".join(subject_types)
query_parameters['subjectTypes'] = self._serialize.query('subject_types', subject_types, 'str')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if scope_descriptor is not None:
query_parameters['scopeDescriptor'] = self._serialize.query('scope_descriptor', scope_descriptor, 'str')
response = self._send(http_method='GET',
location_id='005e26ec-6b77-4e4f-a986-b3827bf241f5',
version='7.0-preview.1',
query_parameters=query_parameters)
response_object = models.PagedGraphUsers()
response_object.graph_users = self._deserialize('[GraphUser]', self._unwrap_collection(response))
response_object.continuation_token = response.headers.get('X-MS-ContinuationToken')
return response_object
def update_user(self, update_context, user_descriptor):
"""UpdateUser.
[Preview API] Map an existing user to a different identity
:param :class:`<GraphUserUpdateContext> <azure.devops.v7_0.graph.models.GraphUserUpdateContext>` update_context: The subset of the full graph user used to uniquely find the graph subject in an external provider.
:param str user_descriptor: the descriptor of the user to update
:rtype: :class:`<GraphUser> <azure.devops.v7_0.graph.models.GraphUser>`
"""
route_values = {}
if user_descriptor is not None:
route_values['userDescriptor'] = self._serialize.url('user_descriptor', user_descriptor, 'str')
content = self._serialize.body(update_context, 'GraphUserUpdateContext')
response = self._send(http_method='PATCH',
location_id='005e26ec-6b77-4e4f-a986-b3827bf241f5',
version='7.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('GraphUser', response)
|
azure-devops-python-api/azure-devops/azure/devops/v7_0/graph/graph_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_0/graph/graph_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 11401
}
| 389 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class NotificationClient(Client):
"""Notification
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(NotificationClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = None
def list_logs(self, source, entry_id=None, start_time=None, end_time=None):
"""ListLogs.
Get a list of diagnostic logs for this service.
:param str source: ID specifying which type of logs to check diagnostics for.
:param str entry_id: The ID of the specific log to query for.
:param datetime start_time: Start time for the time range to query in.
:param datetime end_time: End time for the time range to query in.
:rtype: [INotificationDiagnosticLog]
"""
route_values = {}
if source is not None:
route_values['source'] = self._serialize.url('source', source, 'str')
if entry_id is not None:
route_values['entryId'] = self._serialize.url('entry_id', entry_id, 'str')
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query('start_time', start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query('end_time', end_time, 'iso-8601')
response = self._send(http_method='GET',
location_id='991842f3-eb16-4aea-ac81-81353ef2b75c',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[INotificationDiagnosticLog]', self._unwrap_collection(response))
def get_subscription_diagnostics(self, subscription_id):
"""GetSubscriptionDiagnostics.
Get the diagnostics settings for a subscription.
:param str subscription_id: The id of the notifications subscription.
:rtype: :class:`<SubscriptionDiagnostics> <azure.devops.v7_0.notification.models.SubscriptionDiagnostics>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
response = self._send(http_method='GET',
location_id='20f1929d-4be7-4c2e-a74e-d47640ff3418',
version='7.0',
route_values=route_values)
return self._deserialize('SubscriptionDiagnostics', response)
def update_subscription_diagnostics(self, update_parameters, subscription_id):
"""UpdateSubscriptionDiagnostics.
Update the diagnostics settings for a subscription.
:param :class:`<UpdateSubscripitonDiagnosticsParameters> <azure.devops.v7_0.notification.models.UpdateSubscripitonDiagnosticsParameters>` update_parameters:
:param str subscription_id: The id of the notifications subscription.
:rtype: :class:`<SubscriptionDiagnostics> <azure.devops.v7_0.notification.models.SubscriptionDiagnostics>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
content = self._serialize.body(update_parameters, 'UpdateSubscripitonDiagnosticsParameters')
response = self._send(http_method='PUT',
location_id='20f1929d-4be7-4c2e-a74e-d47640ff3418',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('SubscriptionDiagnostics', response)
def get_event_type(self, event_type):
"""GetEventType.
Get a specific event type.
:param str event_type: The ID of the event type.
:rtype: :class:`<NotificationEventType> <azure.devops.v7_0.notification.models.NotificationEventType>`
"""
route_values = {}
if event_type is not None:
route_values['eventType'] = self._serialize.url('event_type', event_type, 'str')
response = self._send(http_method='GET',
location_id='cc84fb5f-6247-4c7a-aeae-e5a3c3fddb21',
version='7.0',
route_values=route_values)
return self._deserialize('NotificationEventType', response)
def list_event_types(self, publisher_id=None):
"""ListEventTypes.
List available event types for this service. Optionally filter by only event types for the specified publisher.
:param str publisher_id: Limit to event types for this publisher
:rtype: [NotificationEventType]
"""
query_parameters = {}
if publisher_id is not None:
query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='cc84fb5f-6247-4c7a-aeae-e5a3c3fddb21',
version='7.0',
query_parameters=query_parameters)
return self._deserialize('[NotificationEventType]', self._unwrap_collection(response))
def get_settings(self):
"""GetSettings.
:rtype: :class:`<NotificationAdminSettings> <azure.devops.v7_0.notification.models.NotificationAdminSettings>`
"""
response = self._send(http_method='GET',
location_id='cbe076d8-2803-45ff-8d8d-44653686ea2a',
version='7.0')
return self._deserialize('NotificationAdminSettings', response)
def update_settings(self, update_parameters):
"""UpdateSettings.
:param :class:`<NotificationAdminSettingsUpdateParameters> <azure.devops.v7_0.notification.models.NotificationAdminSettingsUpdateParameters>` update_parameters:
:rtype: :class:`<NotificationAdminSettings> <azure.devops.v7_0.notification.models.NotificationAdminSettings>`
"""
content = self._serialize.body(update_parameters, 'NotificationAdminSettingsUpdateParameters')
response = self._send(http_method='PATCH',
location_id='cbe076d8-2803-45ff-8d8d-44653686ea2a',
version='7.0',
content=content)
return self._deserialize('NotificationAdminSettings', response)
def get_subscriber(self, subscriber_id):
"""GetSubscriber.
Get delivery preferences of a notifications subscriber.
:param str subscriber_id: ID of the user or group.
:rtype: :class:`<NotificationSubscriber> <azure.devops.v7_0.notification.models.NotificationSubscriber>`
"""
route_values = {}
if subscriber_id is not None:
route_values['subscriberId'] = self._serialize.url('subscriber_id', subscriber_id, 'str')
response = self._send(http_method='GET',
location_id='4d5caff1-25ba-430b-b808-7a1f352cc197',
version='7.0',
route_values=route_values)
return self._deserialize('NotificationSubscriber', response)
def update_subscriber(self, update_parameters, subscriber_id):
"""UpdateSubscriber.
Update delivery preferences of a notifications subscriber.
:param :class:`<NotificationSubscriberUpdateParameters> <azure.devops.v7_0.notification.models.NotificationSubscriberUpdateParameters>` update_parameters:
:param str subscriber_id: ID of the user or group.
:rtype: :class:`<NotificationSubscriber> <azure.devops.v7_0.notification.models.NotificationSubscriber>`
"""
route_values = {}
if subscriber_id is not None:
route_values['subscriberId'] = self._serialize.url('subscriber_id', subscriber_id, 'str')
content = self._serialize.body(update_parameters, 'NotificationSubscriberUpdateParameters')
response = self._send(http_method='PATCH',
location_id='4d5caff1-25ba-430b-b808-7a1f352cc197',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('NotificationSubscriber', response)
def query_subscriptions(self, subscription_query):
"""QuerySubscriptions.
Query for subscriptions. A subscription is returned if it matches one or more of the specified conditions.
:param :class:`<SubscriptionQuery> <azure.devops.v7_0.notification.models.SubscriptionQuery>` subscription_query:
:rtype: [NotificationSubscription]
"""
content = self._serialize.body(subscription_query, 'SubscriptionQuery')
response = self._send(http_method='POST',
location_id='6864db85-08c0-4006-8e8e-cc1bebe31675',
version='7.0',
content=content)
return self._deserialize('[NotificationSubscription]', self._unwrap_collection(response))
def create_subscription(self, create_parameters):
"""CreateSubscription.
Create a new subscription.
:param :class:`<NotificationSubscriptionCreateParameters> <azure.devops.v7_0.notification.models.NotificationSubscriptionCreateParameters>` create_parameters:
:rtype: :class:`<NotificationSubscription> <azure.devops.v7_0.notification.models.NotificationSubscription>`
"""
content = self._serialize.body(create_parameters, 'NotificationSubscriptionCreateParameters')
response = self._send(http_method='POST',
location_id='70f911d6-abac-488c-85b3-a206bf57e165',
version='7.0',
content=content)
return self._deserialize('NotificationSubscription', response)
def delete_subscription(self, subscription_id):
"""DeleteSubscription.
Delete a subscription.
:param str subscription_id:
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
self._send(http_method='DELETE',
location_id='70f911d6-abac-488c-85b3-a206bf57e165',
version='7.0',
route_values=route_values)
def get_subscription(self, subscription_id, query_flags=None):
"""GetSubscription.
Get a notification subscription by its ID.
:param str subscription_id:
:param str query_flags:
:rtype: :class:`<NotificationSubscription> <azure.devops.v7_0.notification.models.NotificationSubscription>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
query_parameters = {}
if query_flags is not None:
query_parameters['queryFlags'] = self._serialize.query('query_flags', query_flags, 'str')
response = self._send(http_method='GET',
location_id='70f911d6-abac-488c-85b3-a206bf57e165',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('NotificationSubscription', response)
def list_subscriptions(self, target_id=None, ids=None, query_flags=None):
"""ListSubscriptions.
Get a list of notification subscriptions, either by subscription IDs or by all subscriptions for a given user or group.
:param str target_id: User or Group ID
:param [str] ids: List of subscription IDs
:param str query_flags:
:rtype: [NotificationSubscription]
"""
query_parameters = {}
if target_id is not None:
query_parameters['targetId'] = self._serialize.query('target_id', target_id, 'str')
if ids is not None:
ids = ",".join(ids)
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
if query_flags is not None:
query_parameters['queryFlags'] = self._serialize.query('query_flags', query_flags, 'str')
response = self._send(http_method='GET',
location_id='70f911d6-abac-488c-85b3-a206bf57e165',
version='7.0',
query_parameters=query_parameters)
return self._deserialize('[NotificationSubscription]', self._unwrap_collection(response))
def update_subscription(self, update_parameters, subscription_id):
"""UpdateSubscription.
Update an existing subscription. Depending on the type of subscription and permissions, the caller can update the description, filter settings, channel (delivery) settings and more.
:param :class:`<NotificationSubscriptionUpdateParameters> <azure.devops.v7_0.notification.models.NotificationSubscriptionUpdateParameters>` update_parameters:
:param str subscription_id:
:rtype: :class:`<NotificationSubscription> <azure.devops.v7_0.notification.models.NotificationSubscription>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
content = self._serialize.body(update_parameters, 'NotificationSubscriptionUpdateParameters')
response = self._send(http_method='PATCH',
location_id='70f911d6-abac-488c-85b3-a206bf57e165',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('NotificationSubscription', response)
def get_subscription_templates(self):
"""GetSubscriptionTemplates.
Get available subscription templates.
:rtype: [NotificationSubscriptionTemplate]
"""
response = self._send(http_method='GET',
location_id='fa5d24ba-7484-4f3d-888d-4ec6b1974082',
version='7.0')
return self._deserialize('[NotificationSubscriptionTemplate]', self._unwrap_collection(response))
def update_subscription_user_settings(self, user_settings, subscription_id, user_id):
"""UpdateSubscriptionUserSettings.
Update the specified user's settings for the specified subscription. This API is typically used to opt in or out of a shared subscription. User settings can only be applied to shared subscriptions, like team subscriptions or default subscriptions.
:param :class:`<SubscriptionUserSettings> <azure.devops.v7_0.notification.models.SubscriptionUserSettings>` user_settings:
:param str subscription_id:
:param str user_id: ID of the user
:rtype: :class:`<SubscriptionUserSettings> <azure.devops.v7_0.notification.models.SubscriptionUserSettings>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
if user_id is not None:
route_values['userId'] = self._serialize.url('user_id', user_id, 'str')
content = self._serialize.body(user_settings, 'SubscriptionUserSettings')
response = self._send(http_method='PUT',
location_id='ed5a3dff-aeb5-41b1-b4f7-89e66e58b62e',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('SubscriptionUserSettings', response)
|
azure-devops-python-api/azure-devops/azure/devops/v7_0/notification/notification_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_0/notification/notification_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 7258
}
| 390 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .models import *
from .pipelines_checks_client import PipelinesChecksClient
__all__ = [
'ApprovalConfig',
'ApprovalConfigSettings',
'GraphSubjectBase',
'CheckConfiguration',
'CheckConfigurationRef',
'CheckRun',
'CheckRunResult',
'CheckSuite',
'CheckSuiteRef',
'CheckSuiteRequest',
'CheckType',
'IdentityRef',
'ReferenceLinks',
'Resource',
'TaskCheckConfig',
'TaskCheckDefinitionReference',
'PipelinesChecksClient'
]
|
azure-devops-python-api/azure-devops/azure/devops/v7_0/pipelines_checks/__init__.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_0/pipelines_checks/__init__.py",
"repo_id": "azure-devops-python-api",
"token_count": 266
}
| 391 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class SessionRequest(Model):
"""
:param data: Generic property bag to store data about the session
:type data: dict
:param feed: The feed name or id for the session
:type feed: str
:param source: The type of session If a known value is provided, the Data dictionary will be validated for the presence of properties required by that type
:type source: str
"""
_attribute_map = {
'data': {'key': 'data', 'type': '{str}'},
'feed': {'key': 'feed', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'}
}
def __init__(self, data=None, feed=None, source=None):
super(SessionRequest, self).__init__()
self.data = data
self.feed = feed
self.source = source
class SessionResponse(Model):
"""
:param session_id: The unique identifier for the session
:type session_id: str
:param session_name: The name for the session
:type session_name: str
"""
_attribute_map = {
'session_id': {'key': 'sessionId', 'type': 'str'},
'session_name': {'key': 'sessionName', 'type': 'str'}
}
def __init__(self, session_id=None, session_name=None):
super(SessionResponse, self).__init__()
self.session_id = session_id
self.session_name = session_name
__all__ = [
'SessionRequest',
'SessionResponse',
]
|
azure-devops-python-api/azure-devops/azure/devops/v7_0/provenance/models.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_0/provenance/models.py",
"repo_id": "azure-devops-python-api",
"token_count": 616
}
| 392 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class SecurityClient(Client):
"""Security
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(SecurityClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = None
def remove_access_control_entries(self, security_namespace_id, token=None, descriptors=None):
"""RemoveAccessControlEntries.
Remove the specified ACEs from the ACL belonging to the specified token.
:param str security_namespace_id: Security namespace identifier.
:param str token: The token whose ACL should be modified.
:param str descriptors: String containing a list of identity descriptors separated by ',' whose entries should be removed.
:rtype: bool
"""
route_values = {}
if security_namespace_id is not None:
route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str')
query_parameters = {}
if token is not None:
query_parameters['token'] = self._serialize.query('token', token, 'str')
if descriptors is not None:
query_parameters['descriptors'] = self._serialize.query('descriptors', descriptors, 'str')
response = self._send(http_method='DELETE',
location_id='ac08c8ff-4323-4b08-af90-bcd018d380ce',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('bool', response)
def set_access_control_entries(self, container, security_namespace_id):
"""SetAccessControlEntries.
Add or update ACEs in the ACL for the provided token. The request body contains the target token, a list of [ACEs](https://docs.microsoft.com/en-us/rest/api/azure/devops/security/access%20control%20entries/set%20access%20control%20entries?#accesscontrolentry) and a optional merge parameter. In the case of a collision (by identity descriptor) with an existing ACE in the ACL, the "merge" parameter determines the behavior. If set, the existing ACE has its allow and deny merged with the incoming ACE's allow and deny. If unset, the existing ACE is displaced.
:param :class:`<object> <azure.devops.v7_0.security.models.object>` container:
:param str security_namespace_id: Security namespace identifier.
:rtype: [AccessControlEntry]
"""
route_values = {}
if security_namespace_id is not None:
route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str')
content = self._serialize.body(container, 'object')
response = self._send(http_method='POST',
location_id='ac08c8ff-4323-4b08-af90-bcd018d380ce',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('[AccessControlEntry]', self._unwrap_collection(response))
def query_access_control_lists(self, security_namespace_id, token=None, descriptors=None, include_extended_info=None, recurse=None):
"""QueryAccessControlLists.
Return a list of access control lists for the specified security namespace and token. All ACLs in the security namespace will be retrieved if no optional parameters are provided.
:param str security_namespace_id: Security namespace identifier.
:param str token: Security token
:param str descriptors: An optional filter string containing a list of identity descriptors separated by ',' whose ACEs should be retrieved. If this is left null, entire ACLs will be returned.
:param bool include_extended_info: If true, populate the extended information properties for the access control entries contained in the returned lists.
:param bool recurse: If true and this is a hierarchical namespace, return child ACLs of the specified token.
:rtype: [AccessControlList]
"""
route_values = {}
if security_namespace_id is not None:
route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str')
query_parameters = {}
if token is not None:
query_parameters['token'] = self._serialize.query('token', token, 'str')
if descriptors is not None:
query_parameters['descriptors'] = self._serialize.query('descriptors', descriptors, 'str')
if include_extended_info is not None:
query_parameters['includeExtendedInfo'] = self._serialize.query('include_extended_info', include_extended_info, 'bool')
if recurse is not None:
query_parameters['recurse'] = self._serialize.query('recurse', recurse, 'bool')
response = self._send(http_method='GET',
location_id='18a2ad18-7571-46ae-bec7-0c7da1495885',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[AccessControlList]', self._unwrap_collection(response))
def remove_access_control_lists(self, security_namespace_id, tokens=None, recurse=None):
"""RemoveAccessControlLists.
Remove access control lists under the specfied security namespace.
:param str security_namespace_id: Security namespace identifier.
:param str tokens: One or more comma-separated security tokens
:param bool recurse: If true and this is a hierarchical namespace, also remove child ACLs of the specified tokens.
:rtype: bool
"""
route_values = {}
if security_namespace_id is not None:
route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str')
query_parameters = {}
if tokens is not None:
query_parameters['tokens'] = self._serialize.query('tokens', tokens, 'str')
if recurse is not None:
query_parameters['recurse'] = self._serialize.query('recurse', recurse, 'bool')
response = self._send(http_method='DELETE',
location_id='18a2ad18-7571-46ae-bec7-0c7da1495885',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('bool', response)
def set_access_control_lists(self, access_control_lists, security_namespace_id):
"""SetAccessControlLists.
Create or update one or more access control lists. All data that currently exists for the ACLs supplied will be overwritten.
:param :class:`<VssJsonCollectionWrapper> <azure.devops.v7_0.security.models.VssJsonCollectionWrapper>` access_control_lists: A list of ACLs to create or update.
:param str security_namespace_id: Security namespace identifier.
"""
route_values = {}
if security_namespace_id is not None:
route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str')
content = self._serialize.body(access_control_lists, 'VssJsonCollectionWrapper')
self._send(http_method='POST',
location_id='18a2ad18-7571-46ae-bec7-0c7da1495885',
version='7.0',
route_values=route_values,
content=content)
def has_permissions_batch(self, eval_batch):
"""HasPermissionsBatch.
Evaluates multiple permissions for the calling user. Note: This method does not aggregate the results, nor does it short-circuit if one of the permissions evaluates to false.
:param :class:`<PermissionEvaluationBatch> <azure.devops.v7_0.security.models.PermissionEvaluationBatch>` eval_batch: The set of evaluation requests.
:rtype: :class:`<PermissionEvaluationBatch> <azure.devops.v7_0.security.models.PermissionEvaluationBatch>`
"""
content = self._serialize.body(eval_batch, 'PermissionEvaluationBatch')
response = self._send(http_method='POST',
location_id='cf1faa59-1b63-4448-bf04-13d981a46f5d',
version='7.0',
content=content)
return self._deserialize('PermissionEvaluationBatch', response)
def has_permissions(self, security_namespace_id, permissions=None, tokens=None, always_allow_administrators=None, delimiter=None):
"""HasPermissions.
Evaluates whether the caller has the specified permissions on the specified set of security tokens.
:param str security_namespace_id: Security namespace identifier.
:param int permissions: Permissions to evaluate.
:param str tokens: One or more security tokens to evaluate.
:param bool always_allow_administrators: If true and if the caller is an administrator, always return true.
:param str delimiter: Optional security token separator. Defaults to ",".
:rtype: [bool]
"""
route_values = {}
if security_namespace_id is not None:
route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str')
if permissions is not None:
route_values['permissions'] = self._serialize.url('permissions', permissions, 'int')
query_parameters = {}
if tokens is not None:
query_parameters['tokens'] = self._serialize.query('tokens', tokens, 'str')
if always_allow_administrators is not None:
query_parameters['alwaysAllowAdministrators'] = self._serialize.query('always_allow_administrators', always_allow_administrators, 'bool')
if delimiter is not None:
query_parameters['delimiter'] = self._serialize.query('delimiter', delimiter, 'str')
response = self._send(http_method='GET',
location_id='dd3b8bd6-c7fc-4cbd-929a-933d9c011c9d',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[bool]', self._unwrap_collection(response))
def remove_permission(self, security_namespace_id, descriptor, permissions=None, token=None):
"""RemovePermission.
Removes the specified permissions on a security token for a user or group.
:param str security_namespace_id: Security namespace identifier.
:param str descriptor: Identity descriptor of the user to remove permissions for.
:param int permissions: Permissions to remove.
:param str token: Security token to remove permissions for.
:rtype: :class:`<AccessControlEntry> <azure.devops.v7_0.security.models.AccessControlEntry>`
"""
route_values = {}
if security_namespace_id is not None:
route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str')
if permissions is not None:
route_values['permissions'] = self._serialize.url('permissions', permissions, 'int')
query_parameters = {}
if descriptor is not None:
query_parameters['descriptor'] = self._serialize.query('descriptor', descriptor, 'str')
if token is not None:
query_parameters['token'] = self._serialize.query('token', token, 'str')
response = self._send(http_method='DELETE',
location_id='dd3b8bd6-c7fc-4cbd-929a-933d9c011c9d',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('AccessControlEntry', response)
def query_security_namespaces(self, security_namespace_id=None, local_only=None):
"""QuerySecurityNamespaces.
List all security namespaces or just the specified namespace.
:param str security_namespace_id: Security namespace identifier.
:param bool local_only: If true, retrieve only local security namespaces.
:rtype: [SecurityNamespaceDescription]
"""
route_values = {}
if security_namespace_id is not None:
route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str')
query_parameters = {}
if local_only is not None:
query_parameters['localOnly'] = self._serialize.query('local_only', local_only, 'bool')
response = self._send(http_method='GET',
location_id='ce7b9f95-fde9-4be8-a86d-83b366f0b87a',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[SecurityNamespaceDescription]', self._unwrap_collection(response))
|
azure-devops-python-api/azure-devops/azure/devops/v7_0/security/security_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_0/security/security_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 5587
}
| 393 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class TokenAdminClient(Client):
"""TokenAdmin
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(TokenAdminClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = 'af68438b-ed04-4407-9eb6-f1dbae3f922e'
def list_personal_access_tokens(self, subject_descriptor, page_size=None, continuation_token=None, is_public=None):
"""ListPersonalAccessTokens.
Lists of all the session token details of the personal access tokens (PATs) for a particular user.
:param :class:`<str> <azure.devops.v7_0.token_admin.models.str>` subject_descriptor: The descriptor of the target user.
:param int page_size: The maximum number of results to return on each page.
:param str continuation_token: An opaque data blob that allows the next page of data to resume immediately after where the previous page ended. The only reliable way to know if there is more data left is the presence of a continuation token.
:param bool is_public: Set to false for PAT tokens and true for SSH tokens.
:rtype: :class:`<TokenAdminPagedSessionTokens> <azure.devops.v7_0.token_admin.models.TokenAdminPagedSessionTokens>`
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
query_parameters = {}
if page_size is not None:
query_parameters['pageSize'] = self._serialize.query('page_size', page_size, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if is_public is not None:
query_parameters['isPublic'] = self._serialize.query('is_public', is_public, 'bool')
response = self._send(http_method='GET',
location_id='af68438b-ed04-4407-9eb6-f1dbae3f922e',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('TokenAdminPagedSessionTokens', response)
def create_revocation_rule(self, revocation_rule):
"""CreateRevocationRule.
Creates a revocation rule to prevent the further usage of any OAuth authorizations that were created before the current point in time and which match the conditions in the rule.
:param :class:`<TokenAdminRevocationRule> <azure.devops.v7_0.token_admin.models.TokenAdminRevocationRule>` revocation_rule: The revocation rule to create. The rule must specify a space-separated list of scopes, after which preexisting OAuth authorizations that match that any of the scopes will be rejected. For a list of all OAuth scopes supported by VSTS, see: https://docs.microsoft.com/en-us/vsts/integrate/get-started/authentication/oauth?view=vsts#scopes The rule may also specify the time before which to revoke tokens.
"""
content = self._serialize.body(revocation_rule, 'TokenAdminRevocationRule')
self._send(http_method='POST',
location_id='ee4afb16-e7ab-4ed8-9d4b-4ef3e78f97e4',
version='7.0',
content=content)
def revoke_authorizations(self, revocations, is_public=None):
"""RevokeAuthorizations.
Revokes the listed OAuth authorizations.
:param [TokenAdminRevocation] revocations: The list of objects containing the authorization IDs of the OAuth authorizations, such as session tokens retrieved by listed a users PATs, that should be revoked.
:param bool is_public: Set to false for PAT tokens and true for SSH tokens.
"""
query_parameters = {}
if is_public is not None:
query_parameters['isPublic'] = self._serialize.query('is_public', is_public, 'bool')
content = self._serialize.body(revocations, '[TokenAdminRevocation]')
self._send(http_method='POST',
location_id='a9c08b2c-5466-4e22-8626-1ff304ffdf0f',
version='7.0',
query_parameters=query_parameters,
content=content)
|
azure-devops-python-api/azure-devops/azure/devops/v7_0/token_admin/token_admin_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_0/token_admin/token_admin_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 1856
}
| 394 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .models import *
from .work_item_tracking_process_client import WorkItemTrackingProcessClient
__all__ = [
'AddProcessWorkItemTypeFieldRequest',
'Control',
'CreateProcessModel',
'CreateProcessRuleRequest',
'CreateProcessWorkItemTypeRequest',
'Extension',
'FieldModel',
'FieldRuleModel',
'FormLayout',
'Group',
'HideStateModel',
'Page',
'PickList',
'PickListMetadata',
'ProcessBehavior',
'ProcessBehaviorCreateRequest',
'ProcessBehaviorField',
'ProcessBehaviorReference',
'ProcessBehaviorUpdateRequest',
'ProcessInfo',
'ProcessModel',
'ProcessProperties',
'ProcessRule',
'ProcessWorkItemType',
'ProcessWorkItemTypeField',
'ProjectReference',
'RuleAction',
'RuleActionModel',
'RuleCondition',
'RuleConditionModel',
'Section',
'UpdateProcessModel',
'UpdateProcessRuleRequest',
'UpdateProcessWorkItemTypeFieldRequest',
'UpdateProcessWorkItemTypeRequest',
'WitContribution',
'WorkItemBehavior',
'WorkItemBehaviorField',
'WorkItemBehaviorReference',
'WorkItemStateInputModel',
'WorkItemStateResultModel',
'WorkItemTypeBehavior',
'WorkItemTypeModel',
'WorkItemTrackingProcessClient'
]
|
azure-devops-python-api/azure-devops/azure/devops/v7_0/work_item_tracking_process/__init__.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_0/work_item_tracking_process/__init__.py",
"repo_id": "azure-devops-python-api",
"token_count": 533
}
| 395 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .models import *
from .cix_client import CixClient
__all__ = [
'ConfigurationFile',
'CreatedResources',
'CreatePipelineConnectionInputs',
'DetectedBuildFramework',
'DetectedBuildTarget',
'Operation',
'OperationReference',
'OperationResultReference',
'PipelineConnection',
'ReferenceLinks',
'ResourceCreationParameter',
'TeamProject',
'TeamProjectReference',
'WebApiTeamRef',
'CixClient'
]
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/cix/__init__.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/cix/__init__.py",
"repo_id": "azure-devops-python-api",
"token_count": 244
}
| 396 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .models import *
from .dashboard_client import DashboardClient
__all__ = [
'CopyDashboardOptions',
'CopyDashboardResponse',
'Dashboard',
'DashboardGroup',
'DashboardGroupEntry',
'DashboardGroupEntryResponse',
'DashboardResponse',
'LightboxOptions',
'ReferenceLinks',
'SemanticVersion',
'TeamContext',
'Widget',
'WidgetMetadata',
'WidgetMetadataResponse',
'WidgetPosition',
'WidgetResponse',
'WidgetSize',
'WidgetsVersionedList',
'WidgetTypesResponse',
'DashboardClient'
]
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/dashboard/__init__.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/dashboard/__init__.py",
"repo_id": "azure-devops-python-api",
"token_count": 286
}
| 397 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class FeedClient(Client):
"""Feed
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(FeedClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '7ab4e64e-c4d8-4f50-ae73-5ef2e21642a5'
def get_badge(self, feed_id, package_id, project=None, **kwargs):
"""GetBadge.
[Preview API] Generate a SVG badge for the latest version of a package. The generated SVG is typically used as the image in an HTML link which takes users to the feed containing the package to accelerate discovery and consumption.
:param str feed_id: Name or Id of the feed.
:param str package_id: Id of the package (GUID Id, not name).
:param str project: Project ID or project name
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_id is not None:
route_values['packageId'] = self._serialize.url('package_id', package_id, 'str')
response = self._send(http_method='GET',
location_id='61d885fd-10f3-4a55-82b6-476d866b673f',
version='7.1-preview.1',
route_values=route_values,
accept_media_type='image/svg+xml')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_feed_change(self, feed_id, project=None):
"""GetFeedChange.
[Preview API] Query a feed to determine its current state.
:param str feed_id: Name or ID of the feed.
:param str project: Project ID or project name
:rtype: :class:`<FeedChange> <azure.devops.v7_1.feed.models.FeedChange>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
response = self._send(http_method='GET',
location_id='29ba2dad-389a-4661-b5d3-de76397ca05b',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('FeedChange', response)
def get_feed_changes(self, project=None, include_deleted=None, continuation_token=None, batch_size=None):
"""GetFeedChanges.
[Preview API] Query to determine which feeds have changed since the last call, tracked through the provided continuationToken. Only changes to a feed itself are returned and impact the continuationToken, not additions or alterations to packages within the feeds.
:param str project: Project ID or project name
:param bool include_deleted: If true, get changes for all feeds including deleted feeds. The default value is false.
:param long continuation_token: A continuation token which acts as a bookmark to a previously retrieved change. This token allows the user to continue retrieving changes in batches, picking up where the previous batch left off. If specified, all the changes that occur strictly after the token will be returned. If not specified or 0, iteration will start with the first change.
:param int batch_size: Number of package changes to fetch. The default value is 1000. The maximum value is 2000.
:rtype: :class:`<FeedChangesResponse> <azure.devops.v7_1.feed.models.FeedChangesResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if include_deleted is not None:
query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'long')
if batch_size is not None:
query_parameters['batchSize'] = self._serialize.query('batch_size', batch_size, 'int')
response = self._send(http_method='GET',
location_id='29ba2dad-389a-4661-b5d3-de76397ca05b',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('FeedChangesResponse', response)
def get_feeds_from_recycle_bin(self, project=None):
"""GetFeedsFromRecycleBin.
[Preview API] Query for feeds within the recycle bin.
:param str project: Project ID or project name
:rtype: [Feed]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='0cee643d-beb9-41f8-9368-3ada763a8344',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('[Feed]', self._unwrap_collection(response))
def permanent_delete_feed(self, feed_id, project=None):
"""PermanentDeleteFeed.
[Preview API]
:param str feed_id:
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
self._send(http_method='DELETE',
location_id='0cee643d-beb9-41f8-9368-3ada763a8344',
version='7.1-preview.1',
route_values=route_values)
def restore_deleted_feed(self, patch_json, feed_id, project=None):
"""RestoreDeletedFeed.
[Preview API]
:param :class:`<[JsonPatchOperation]> <azure.devops.v7_1.feed.models.[JsonPatchOperation]>` patch_json:
:param str feed_id:
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
content = self._serialize.body(patch_json, '[JsonPatchOperation]')
self._send(http_method='PATCH',
location_id='0cee643d-beb9-41f8-9368-3ada763a8344',
version='7.1-preview.1',
route_values=route_values,
content=content,
media_type='application/json-patch+json')
def create_feed(self, feed, project=None):
"""CreateFeed.
[Preview API] Create a feed, a container for various package types.
:param :class:`<Feed> <azure.devops.v7_1.feed.models.Feed>` feed: A JSON object containing both required and optional attributes for the feed. Name is the only required value.
:param str project: Project ID or project name
:rtype: :class:`<Feed> <azure.devops.v7_1.feed.models.Feed>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(feed, 'Feed')
response = self._send(http_method='POST',
location_id='c65009a7-474a-4ad1-8b42-7d852107ef8c',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Feed', response)
def delete_feed(self, feed_id, project=None):
"""DeleteFeed.
[Preview API] Remove a feed and all its packages. The feed moves to the recycle bin and is reversible.
:param str feed_id: Name or Id of the feed.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
self._send(http_method='DELETE',
location_id='c65009a7-474a-4ad1-8b42-7d852107ef8c',
version='7.1-preview.1',
route_values=route_values)
def get_feed(self, feed_id, project=None, include_deleted_upstreams=None):
"""GetFeed.
[Preview API] Get the settings for a specific feed.
:param str feed_id: Name or Id of the feed.
:param str project: Project ID or project name
:param bool include_deleted_upstreams: Include upstreams that have been deleted in the response.
:rtype: :class:`<Feed> <azure.devops.v7_1.feed.models.Feed>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
query_parameters = {}
if include_deleted_upstreams is not None:
query_parameters['includeDeletedUpstreams'] = self._serialize.query('include_deleted_upstreams', include_deleted_upstreams, 'bool')
response = self._send(http_method='GET',
location_id='c65009a7-474a-4ad1-8b42-7d852107ef8c',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Feed', response)
def get_feeds(self, project=None, feed_role=None, include_deleted_upstreams=None, include_urls=None):
"""GetFeeds.
[Preview API] Get all feeds in an account where you have the provided role access.
:param str project: Project ID or project name
:param str feed_role: Filter by this role, either Administrator(4), Contributor(3), or Reader(2) level permissions.
:param bool include_deleted_upstreams: Include upstreams that have been deleted in the response.
:param bool include_urls: Resolve names if true
:rtype: [Feed]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if feed_role is not None:
query_parameters['feedRole'] = self._serialize.query('feed_role', feed_role, 'str')
if include_deleted_upstreams is not None:
query_parameters['includeDeletedUpstreams'] = self._serialize.query('include_deleted_upstreams', include_deleted_upstreams, 'bool')
if include_urls is not None:
query_parameters['includeUrls'] = self._serialize.query('include_urls', include_urls, 'bool')
response = self._send(http_method='GET',
location_id='c65009a7-474a-4ad1-8b42-7d852107ef8c',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Feed]', self._unwrap_collection(response))
def update_feed(self, feed, feed_id, project=None):
"""UpdateFeed.
[Preview API] Change the attributes of a feed.
:param :class:`<FeedUpdate> <azure.devops.v7_1.feed.models.FeedUpdate>` feed: A JSON object containing the feed settings to be updated.
:param str feed_id: Name or Id of the feed.
:param str project: Project ID or project name
:rtype: :class:`<Feed> <azure.devops.v7_1.feed.models.Feed>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
content = self._serialize.body(feed, 'FeedUpdate')
response = self._send(http_method='PATCH',
location_id='c65009a7-474a-4ad1-8b42-7d852107ef8c',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Feed', response)
def get_global_permissions(self, include_ids=None):
"""GetGlobalPermissions.
[Preview API] Get all service-wide feed creation and administration permissions.
:param bool include_ids: Set to true to add IdentityIds to the permission objects.
:rtype: [GlobalPermission]
"""
query_parameters = {}
if include_ids is not None:
query_parameters['includeIds'] = self._serialize.query('include_ids', include_ids, 'bool')
response = self._send(http_method='GET',
location_id='a74419ef-b477-43df-8758-3cd1cd5f56c6',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[GlobalPermission]', self._unwrap_collection(response))
def set_global_permissions(self, global_permissions):
"""SetGlobalPermissions.
[Preview API] Set service-wide permissions that govern feed creation and administration.
:param [GlobalPermission] global_permissions: New permissions for the organization.
:rtype: [GlobalPermission]
"""
content = self._serialize.body(global_permissions, '[GlobalPermission]')
response = self._send(http_method='PATCH',
location_id='a74419ef-b477-43df-8758-3cd1cd5f56c6',
version='7.1-preview.1',
content=content)
return self._deserialize('[GlobalPermission]', self._unwrap_collection(response))
def get_package_changes(self, feed_id, project=None, continuation_token=None, batch_size=None):
"""GetPackageChanges.
[Preview API] Get a batch of package changes made to a feed. The changes returned are 'most recent change' so if an Add is followed by an Update before you begin enumerating, you'll only see one change in the batch. While consuming batches using the continuation token, you may see changes to the same package version multiple times if they are happening as you enumerate.
:param str feed_id: Name or Id of the feed.
:param str project: Project ID or project name
:param long continuation_token: A continuation token which acts as a bookmark to a previously retrieved change. This token allows the user to continue retrieving changes in batches, picking up where the previous batch left off. If specified, all the changes that occur strictly after the token will be returned. If not specified or 0, iteration will start with the first change.
:param int batch_size: Number of package changes to fetch. The default value is 1000. The maximum value is 2000.
:rtype: :class:`<PackageChangesResponse> <azure.devops.v7_1.feed.models.PackageChangesResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
query_parameters = {}
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'long')
if batch_size is not None:
query_parameters['batchSize'] = self._serialize.query('batch_size', batch_size, 'int')
response = self._send(http_method='GET',
location_id='323a0631-d083-4005-85ae-035114dfb681',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('PackageChangesResponse', response)
def query_package_metrics(self, package_id_query, feed_id, project=None):
"""QueryPackageMetrics.
[Preview API]
:param :class:`<PackageMetricsQuery> <azure.devops.v7_1.feed.models.PackageMetricsQuery>` package_id_query:
:param str feed_id:
:param str project: Project ID or project name
:rtype: [PackageMetrics]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
content = self._serialize.body(package_id_query, 'PackageMetricsQuery')
response = self._send(http_method='POST',
location_id='bddc9b3c-8a59-4a9f-9b40-ee1dcaa2cc0d',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('[PackageMetrics]', self._unwrap_collection(response))
def get_package(self, feed_id, package_id, project=None, include_all_versions=None, include_urls=None, is_listed=None, is_release=None, include_deleted=None, include_description=None):
"""GetPackage.
[Preview API] Get details about a specific package.
:param str feed_id: Name or Id of the feed.
:param str package_id: The package Id (GUID Id, not the package name).
:param str project: Project ID or project name
:param bool include_all_versions: True to return all versions of the package in the response. Default is false (latest version only).
:param bool include_urls: True to return REST Urls with the response. Default is True.
:param bool is_listed: Only applicable for NuGet packages, setting it for other package types will result in a 404. If false, delisted package versions will be returned. Use this to filter the response when includeAllVersions is set to true. Default is unset (do not return delisted packages).
:param bool is_release: Only applicable for Nuget packages. Use this to filter the response when includeAllVersions is set to true. Default is True (only return packages without prerelease versioning).
:param bool include_deleted: Return deleted or unpublished versions of packages in the response. Default is False.
:param bool include_description: Return the description for every version of each package in the response. Default is False.
:rtype: :class:`<Package> <azure.devops.v7_1.feed.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_id is not None:
route_values['packageId'] = self._serialize.url('package_id', package_id, 'str')
query_parameters = {}
if include_all_versions is not None:
query_parameters['includeAllVersions'] = self._serialize.query('include_all_versions', include_all_versions, 'bool')
if include_urls is not None:
query_parameters['includeUrls'] = self._serialize.query('include_urls', include_urls, 'bool')
if is_listed is not None:
query_parameters['isListed'] = self._serialize.query('is_listed', is_listed, 'bool')
if is_release is not None:
query_parameters['isRelease'] = self._serialize.query('is_release', is_release, 'bool')
if include_deleted is not None:
query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
if include_description is not None:
query_parameters['includeDescription'] = self._serialize.query('include_description', include_description, 'bool')
response = self._send(http_method='GET',
location_id='7a20d846-c929-4acc-9ea2-0d5a7df1b197',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Package', response)
def get_packages(self, feed_id, project=None, protocol_type=None, package_name_query=None, normalized_package_name=None, include_urls=None, include_all_versions=None, is_listed=None, get_top_package_versions=None, is_release=None, include_description=None, top=None, skip=None, include_deleted=None, is_cached=None, direct_upstream_id=None):
"""GetPackages.
[Preview API] Get details about all of the packages in the feed. Use the various filters to include or exclude information from the result set.
:param str feed_id: Name or Id of the feed.
:param str project: Project ID or project name
:param str protocol_type: One of the supported artifact package types.
:param str package_name_query: Filter to packages that contain the provided string. Characters in the string must conform to the package name constraints.
:param str normalized_package_name: [Obsolete] Used for legacy scenarios and may be removed in future versions.
:param bool include_urls: True to return REST Urls with the response. Default is True.
:param bool include_all_versions: True to return all versions of the package in the response. Default is false (latest version only).
:param bool is_listed: Only applicable for NuGet packages, setting it for other package types will result in a 404. If false, delisted package versions will be returned. Use this to filter the response when includeAllVersions is set to true. Default is unset (do not return delisted packages).
:param bool get_top_package_versions: Changes the behavior of $top and $skip to return all versions of each package up to $top. Must be used in conjunction with includeAllVersions=true
:param bool is_release: Only applicable for Nuget packages. Use this to filter the response when includeAllVersions is set to true. Default is True (only return packages without prerelease versioning).
:param bool include_description: Return the description for every version of each package in the response. Default is False.
:param int top: Get the top N packages (or package versions where getTopPackageVersions=true)
:param int skip: Skip the first N packages (or package versions where getTopPackageVersions=true)
:param bool include_deleted: Return deleted or unpublished versions of packages in the response. Default is False.
:param bool is_cached: [Obsolete] Used for legacy scenarios and may be removed in future versions.
:param str direct_upstream_id: Filter results to return packages from a specific upstream.
:rtype: [Package]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
query_parameters = {}
if protocol_type is not None:
query_parameters['protocolType'] = self._serialize.query('protocol_type', protocol_type, 'str')
if package_name_query is not None:
query_parameters['packageNameQuery'] = self._serialize.query('package_name_query', package_name_query, 'str')
if normalized_package_name is not None:
query_parameters['normalizedPackageName'] = self._serialize.query('normalized_package_name', normalized_package_name, 'str')
if include_urls is not None:
query_parameters['includeUrls'] = self._serialize.query('include_urls', include_urls, 'bool')
if include_all_versions is not None:
query_parameters['includeAllVersions'] = self._serialize.query('include_all_versions', include_all_versions, 'bool')
if is_listed is not None:
query_parameters['isListed'] = self._serialize.query('is_listed', is_listed, 'bool')
if get_top_package_versions is not None:
query_parameters['getTopPackageVersions'] = self._serialize.query('get_top_package_versions', get_top_package_versions, 'bool')
if is_release is not None:
query_parameters['isRelease'] = self._serialize.query('is_release', is_release, 'bool')
if include_description is not None:
query_parameters['includeDescription'] = self._serialize.query('include_description', include_description, 'bool')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if include_deleted is not None:
query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
if is_cached is not None:
query_parameters['isCached'] = self._serialize.query('is_cached', is_cached, 'bool')
if direct_upstream_id is not None:
query_parameters['directUpstreamId'] = self._serialize.query('direct_upstream_id', direct_upstream_id, 'str')
response = self._send(http_method='GET',
location_id='7a20d846-c929-4acc-9ea2-0d5a7df1b197',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Package]', self._unwrap_collection(response))
def get_feed_permissions(self, feed_id, project=None, include_ids=None, exclude_inherited_permissions=None, identity_descriptor=None, include_deleted_feeds=None):
"""GetFeedPermissions.
[Preview API] Get the permissions for a feed.
:param str feed_id: Name or Id of the feed.
:param str project: Project ID or project name
:param bool include_ids: True to include user Ids in the response. Default is false.
:param bool exclude_inherited_permissions: True to only return explicitly set permissions on the feed. Default is false.
:param str identity_descriptor: Filter permissions to the provided identity.
:param bool include_deleted_feeds: If includeDeletedFeeds is true, then feedId must be specified by name and not by Guid.
:rtype: [FeedPermission]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
query_parameters = {}
if include_ids is not None:
query_parameters['includeIds'] = self._serialize.query('include_ids', include_ids, 'bool')
if exclude_inherited_permissions is not None:
query_parameters['excludeInheritedPermissions'] = self._serialize.query('exclude_inherited_permissions', exclude_inherited_permissions, 'bool')
if identity_descriptor is not None:
query_parameters['identityDescriptor'] = self._serialize.query('identity_descriptor', identity_descriptor, 'str')
if include_deleted_feeds is not None:
query_parameters['includeDeletedFeeds'] = self._serialize.query('include_deleted_feeds', include_deleted_feeds, 'bool')
response = self._send(http_method='GET',
location_id='be8c1476-86a7-44ed-b19d-aec0e9275cd8',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[FeedPermission]', self._unwrap_collection(response))
def set_feed_permissions(self, feed_permission, feed_id, project=None):
"""SetFeedPermissions.
[Preview API] Update the permissions on a feed.
:param [FeedPermission] feed_permission: Permissions to set.
:param str feed_id: Name or Id of the feed.
:param str project: Project ID or project name
:rtype: [FeedPermission]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
content = self._serialize.body(feed_permission, '[FeedPermission]')
response = self._send(http_method='PATCH',
location_id='be8c1476-86a7-44ed-b19d-aec0e9275cd8',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('[FeedPermission]', self._unwrap_collection(response))
def get_package_version_provenance(self, feed_id, package_id, package_version_id, project=None):
"""GetPackageVersionProvenance.
[Preview API] Gets provenance for a package version.
:param str feed_id: Name or Id of the feed.
:param str package_id: Id of the package (GUID Id, not name).
:param str package_version_id: Id of the package version (GUID Id, not name).
:param str project: Project ID or project name
:rtype: :class:`<PackageVersionProvenance> <azure.devops.v7_1.feed.models.PackageVersionProvenance>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_id is not None:
route_values['packageId'] = self._serialize.url('package_id', package_id, 'str')
if package_version_id is not None:
route_values['packageVersionId'] = self._serialize.url('package_version_id', package_version_id, 'str')
response = self._send(http_method='GET',
location_id='0aaeabd4-85cd-4686-8a77-8d31c15690b8',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('PackageVersionProvenance', response)
def empty_recycle_bin(self, feed_id, project=None):
"""EmptyRecycleBin.
[Preview API] Queues a job to remove all package versions from a feed's recycle bin
:param str feed_id: Name or Id of the feed
:param str project: Project ID or project name
:rtype: :class:`<OperationReference> <azure.devops.v7_1.feed.models.OperationReference>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
response = self._send(http_method='DELETE',
location_id='2704e72c-f541-4141-99be-2004b50b05fa',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('OperationReference', response)
def get_recycle_bin_package(self, feed_id, package_id, project=None, include_urls=None):
"""GetRecycleBinPackage.
[Preview API] Get information about a package and all its versions within the recycle bin.
:param str feed_id: Name or Id of the feed.
:param str package_id: The package Id (GUID Id, not the package name).
:param str project: Project ID or project name
:param bool include_urls: True to return REST Urls with the response. Default is True.
:rtype: :class:`<Package> <azure.devops.v7_1.feed.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_id is not None:
route_values['packageId'] = self._serialize.url('package_id', package_id, 'str')
query_parameters = {}
if include_urls is not None:
query_parameters['includeUrls'] = self._serialize.query('include_urls', include_urls, 'bool')
response = self._send(http_method='GET',
location_id='2704e72c-f541-4141-99be-2004b50b05fa',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Package', response)
def get_recycle_bin_packages(self, feed_id, project=None, protocol_type=None, package_name_query=None, include_urls=None, top=None, skip=None, include_all_versions=None):
"""GetRecycleBinPackages.
[Preview API] Query for packages within the recycle bin.
:param str feed_id: Name or Id of the feed.
:param str project: Project ID or project name
:param str protocol_type: Type of package (e.g. NuGet, npm, ...).
:param str package_name_query: Filter to packages matching this name.
:param bool include_urls: True to return REST Urls with the response. Default is True.
:param int top: Get the top N packages.
:param int skip: Skip the first N packages.
:param bool include_all_versions: True to return all versions of the package in the response. Default is false (latest version only).
:rtype: [Package]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
query_parameters = {}
if protocol_type is not None:
query_parameters['protocolType'] = self._serialize.query('protocol_type', protocol_type, 'str')
if package_name_query is not None:
query_parameters['packageNameQuery'] = self._serialize.query('package_name_query', package_name_query, 'str')
if include_urls is not None:
query_parameters['includeUrls'] = self._serialize.query('include_urls', include_urls, 'bool')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if include_all_versions is not None:
query_parameters['includeAllVersions'] = self._serialize.query('include_all_versions', include_all_versions, 'bool')
response = self._send(http_method='GET',
location_id='2704e72c-f541-4141-99be-2004b50b05fa',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Package]', self._unwrap_collection(response))
def get_recycle_bin_package_version(self, feed_id, package_id, package_version_id, project=None, include_urls=None):
"""GetRecycleBinPackageVersion.
[Preview API] Get information about a package version within the recycle bin.
:param str feed_id: Name or Id of the feed.
:param str package_id: The package Id (GUID Id, not the package name).
:param str package_version_id: The package version Id 9guid Id, not the version string).
:param str project: Project ID or project name
:param bool include_urls: True to return REST Urls with the response. Default is True.
:rtype: :class:`<RecycleBinPackageVersion> <azure.devops.v7_1.feed.models.RecycleBinPackageVersion>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_id is not None:
route_values['packageId'] = self._serialize.url('package_id', package_id, 'str')
if package_version_id is not None:
route_values['packageVersionId'] = self._serialize.url('package_version_id', package_version_id, 'str')
query_parameters = {}
if include_urls is not None:
query_parameters['includeUrls'] = self._serialize.query('include_urls', include_urls, 'bool')
response = self._send(http_method='GET',
location_id='aceb4be7-8737-4820-834c-4c549e10fdc7',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('RecycleBinPackageVersion', response)
def get_recycle_bin_package_versions(self, feed_id, package_id, project=None, include_urls=None):
"""GetRecycleBinPackageVersions.
[Preview API] Get a list of package versions within the recycle bin.
:param str feed_id: Name or Id of the feed.
:param str package_id: The package Id (GUID Id, not the package name).
:param str project: Project ID or project name
:param bool include_urls: True to return REST Urls with the response. Default is True.
:rtype: [RecycleBinPackageVersion]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_id is not None:
route_values['packageId'] = self._serialize.url('package_id', package_id, 'str')
query_parameters = {}
if include_urls is not None:
query_parameters['includeUrls'] = self._serialize.query('include_urls', include_urls, 'bool')
response = self._send(http_method='GET',
location_id='aceb4be7-8737-4820-834c-4c549e10fdc7',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[RecycleBinPackageVersion]', self._unwrap_collection(response))
def delete_feed_retention_policies(self, feed_id, project=None):
"""DeleteFeedRetentionPolicies.
[Preview API] Delete the retention policy for a feed.
:param str feed_id: Name or ID of the feed.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
self._send(http_method='DELETE',
location_id='ed52a011-0112-45b5-9f9e-e14efffb3193',
version='7.1-preview.1',
route_values=route_values)
def get_feed_retention_policies(self, feed_id, project=None):
"""GetFeedRetentionPolicies.
[Preview API] Get the retention policy for a feed.
:param str feed_id: Name or ID of the feed.
:param str project: Project ID or project name
:rtype: :class:`<FeedRetentionPolicy> <azure.devops.v7_1.feed.models.FeedRetentionPolicy>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
response = self._send(http_method='GET',
location_id='ed52a011-0112-45b5-9f9e-e14efffb3193',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('FeedRetentionPolicy', response)
def set_feed_retention_policies(self, policy, feed_id, project=None):
"""SetFeedRetentionPolicies.
[Preview API] Set the retention policy for a feed.
:param :class:`<FeedRetentionPolicy> <azure.devops.v7_1.feed.models.FeedRetentionPolicy>` policy: Feed retention policy.
:param str feed_id: Name or ID of the feed.
:param str project: Project ID or project name
:rtype: :class:`<FeedRetentionPolicy> <azure.devops.v7_1.feed.models.FeedRetentionPolicy>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
content = self._serialize.body(policy, 'FeedRetentionPolicy')
response = self._send(http_method='PUT',
location_id='ed52a011-0112-45b5-9f9e-e14efffb3193',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('FeedRetentionPolicy', response)
def query_package_version_metrics(self, package_version_id_query, feed_id, package_id, project=None):
"""QueryPackageVersionMetrics.
[Preview API]
:param :class:`<PackageVersionMetricsQuery> <azure.devops.v7_1.feed.models.PackageVersionMetricsQuery>` package_version_id_query:
:param str feed_id:
:param str package_id:
:param str project: Project ID or project name
:rtype: [PackageVersionMetrics]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_id is not None:
route_values['packageId'] = self._serialize.url('package_id', package_id, 'str')
content = self._serialize.body(package_version_id_query, 'PackageVersionMetricsQuery')
response = self._send(http_method='POST',
location_id='e6ae8caa-b6a8-4809-b840-91b2a42c19ad',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('[PackageVersionMetrics]', self._unwrap_collection(response))
def get_package_version(self, feed_id, package_id, package_version_id, project=None, include_urls=None, is_listed=None, is_deleted=None):
"""GetPackageVersion.
[Preview API] Get details about a specific package version.
:param str feed_id: Name or Id of the feed.
:param str package_id: Id of the package (GUID Id, not name).
:param str package_version_id: Id of the package version (GUID Id, not name).
:param str project: Project ID or project name
:param bool include_urls: True to include urls for each version. Default is true.
:param bool is_listed: Only applicable for NuGet packages. If false, delisted package versions will be returned.
:param bool is_deleted: This does not have any effect on the requested package version, for other versions returned specifies whether to return only deleted or non-deleted versions of packages in the response. Default is unset (return all versions).
:rtype: :class:`<PackageVersion> <azure.devops.v7_1.feed.models.PackageVersion>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_id is not None:
route_values['packageId'] = self._serialize.url('package_id', package_id, 'str')
if package_version_id is not None:
route_values['packageVersionId'] = self._serialize.url('package_version_id', package_version_id, 'str')
query_parameters = {}
if include_urls is not None:
query_parameters['includeUrls'] = self._serialize.query('include_urls', include_urls, 'bool')
if is_listed is not None:
query_parameters['isListed'] = self._serialize.query('is_listed', is_listed, 'bool')
if is_deleted is not None:
query_parameters['isDeleted'] = self._serialize.query('is_deleted', is_deleted, 'bool')
response = self._send(http_method='GET',
location_id='3b331909-6a86-44cc-b9ec-c1834c35498f',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('PackageVersion', response)
def get_package_versions(self, feed_id, package_id, project=None, include_urls=None, is_listed=None, is_deleted=None):
"""GetPackageVersions.
[Preview API] Get a list of package versions, optionally filtering by state.
:param str feed_id: Name or Id of the feed.
:param str package_id: Id of the package (GUID Id, not name).
:param str project: Project ID or project name
:param bool include_urls: True to include urls for each version. Default is true.
:param bool is_listed: Only applicable for NuGet packages. If false, delisted package versions will be returned.
:param bool is_deleted: If set specifies whether to return only deleted or non-deleted versions of packages in the response. Default is unset (return all versions).
:rtype: [PackageVersion]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_id is not None:
route_values['packageId'] = self._serialize.url('package_id', package_id, 'str')
query_parameters = {}
if include_urls is not None:
query_parameters['includeUrls'] = self._serialize.query('include_urls', include_urls, 'bool')
if is_listed is not None:
query_parameters['isListed'] = self._serialize.query('is_listed', is_listed, 'bool')
if is_deleted is not None:
query_parameters['isDeleted'] = self._serialize.query('is_deleted', is_deleted, 'bool')
response = self._send(http_method='GET',
location_id='3b331909-6a86-44cc-b9ec-c1834c35498f',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[PackageVersion]', self._unwrap_collection(response))
def create_feed_view(self, view, feed_id, project=None):
"""CreateFeedView.
[Preview API] Create a new view on the referenced feed.
:param :class:`<FeedView> <azure.devops.v7_1.feed.models.FeedView>` view: View to be created.
:param str feed_id: Name or Id of the feed.
:param str project: Project ID or project name
:rtype: :class:`<FeedView> <azure.devops.v7_1.feed.models.FeedView>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
content = self._serialize.body(view, 'FeedView')
response = self._send(http_method='POST',
location_id='42a8502a-6785-41bc-8c16-89477d930877',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('FeedView', response)
def delete_feed_view(self, feed_id, view_id, project=None):
"""DeleteFeedView.
[Preview API] Delete a feed view.
:param str feed_id: Name or Id of the feed.
:param str view_id: Name or Id of the view.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if view_id is not None:
route_values['viewId'] = self._serialize.url('view_id', view_id, 'str')
self._send(http_method='DELETE',
location_id='42a8502a-6785-41bc-8c16-89477d930877',
version='7.1-preview.1',
route_values=route_values)
def get_feed_view(self, feed_id, view_id, project=None):
"""GetFeedView.
[Preview API] Get a view by Id.
:param str feed_id: Name or Id of the feed.
:param str view_id: Name or Id of the view.
:param str project: Project ID or project name
:rtype: :class:`<FeedView> <azure.devops.v7_1.feed.models.FeedView>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if view_id is not None:
route_values['viewId'] = self._serialize.url('view_id', view_id, 'str')
response = self._send(http_method='GET',
location_id='42a8502a-6785-41bc-8c16-89477d930877',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('FeedView', response)
def get_feed_views(self, feed_id, project=None):
"""GetFeedViews.
[Preview API] Get all views for a feed.
:param str feed_id: Name or Id of the feed.
:param str project: Project ID or project name
:rtype: [FeedView]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
response = self._send(http_method='GET',
location_id='42a8502a-6785-41bc-8c16-89477d930877',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('[FeedView]', self._unwrap_collection(response))
def update_feed_view(self, view, feed_id, view_id, project=None):
"""UpdateFeedView.
[Preview API] Update a view.
:param :class:`<FeedView> <azure.devops.v7_1.feed.models.FeedView>` view: New settings to apply to the specified view.
:param str feed_id: Name or Id of the feed.
:param str view_id: Name or Id of the view.
:param str project: Project ID or project name
:rtype: :class:`<FeedView> <azure.devops.v7_1.feed.models.FeedView>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if view_id is not None:
route_values['viewId'] = self._serialize.url('view_id', view_id, 'str')
content = self._serialize.body(view, 'FeedView')
response = self._send(http_method='PATCH',
location_id='42a8502a-6785-41bc-8c16-89477d930877',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('FeedView', response)
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/feed/feed_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/feed/feed_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 23109
}
| 398 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class IdentityClient(Client):
"""Identity
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(IdentityClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '8a3d49b8-91f0-46ef-b33d-dda338c25db3'
def create_or_bind_with_claims(self, source_identity):
"""CreateOrBindWithClaims.
[Preview API]
:param :class:`<Identity> <azure.devops.v7_1.identity.models.Identity>` source_identity:
:rtype: :class:`<Identity> <azure.devops.v7_1.identity.models.Identity>`
"""
content = self._serialize.body(source_identity, 'Identity')
response = self._send(http_method='PUT',
location_id='90ddfe71-171c-446c-bf3b-b597cd562afd',
version='7.1-preview.1',
content=content)
return self._deserialize('Identity', response)
def get_descriptor_by_id(self, id, is_master_id=None):
"""GetDescriptorById.
[Preview API]
:param str id:
:param bool is_master_id:
:rtype: :class:`<str> <azure.devops.v7_1.identity.models.str>`
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if is_master_id is not None:
query_parameters['isMasterId'] = self._serialize.query('is_master_id', is_master_id, 'bool')
response = self._send(http_method='GET',
location_id='a230389a-94f2-496c-839f-c929787496dd',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('str', response)
def create_groups(self, container):
"""CreateGroups.
[Preview API]
:param :class:`<object> <azure.devops.v7_1.identity.models.object>` container:
:rtype: [Identity]
"""
content = self._serialize.body(container, 'object')
response = self._send(http_method='POST',
location_id='5966283b-4196-4d57-9211-1b68f41ec1c2',
version='7.1-preview.1',
content=content)
return self._deserialize('[Identity]', self._unwrap_collection(response))
def delete_group(self, group_id):
"""DeleteGroup.
[Preview API]
:param str group_id:
"""
route_values = {}
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
self._send(http_method='DELETE',
location_id='5966283b-4196-4d57-9211-1b68f41ec1c2',
version='7.1-preview.1',
route_values=route_values)
def list_groups(self, scope_ids=None, recurse=None, deleted=None, properties=None):
"""ListGroups.
[Preview API]
:param str scope_ids:
:param bool recurse:
:param bool deleted:
:param str properties:
:rtype: [Identity]
"""
query_parameters = {}
if scope_ids is not None:
query_parameters['scopeIds'] = self._serialize.query('scope_ids', scope_ids, 'str')
if recurse is not None:
query_parameters['recurse'] = self._serialize.query('recurse', recurse, 'bool')
if deleted is not None:
query_parameters['deleted'] = self._serialize.query('deleted', deleted, 'bool')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='5966283b-4196-4d57-9211-1b68f41ec1c2',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[Identity]', self._unwrap_collection(response))
def get_identity_changes(self, identity_sequence_id, group_sequence_id, organization_identity_sequence_id=None, page_size=None, scope_id=None):
"""GetIdentityChanges.
[Preview API]
:param int identity_sequence_id:
:param int group_sequence_id:
:param int organization_identity_sequence_id:
:param int page_size:
:param str scope_id:
:rtype: :class:`<ChangedIdentities> <azure.devops.v7_1.identity.models.ChangedIdentities>`
"""
query_parameters = {}
if identity_sequence_id is not None:
query_parameters['identitySequenceId'] = self._serialize.query('identity_sequence_id', identity_sequence_id, 'int')
if group_sequence_id is not None:
query_parameters['groupSequenceId'] = self._serialize.query('group_sequence_id', group_sequence_id, 'int')
if organization_identity_sequence_id is not None:
query_parameters['organizationIdentitySequenceId'] = self._serialize.query('organization_identity_sequence_id', organization_identity_sequence_id, 'int')
if page_size is not None:
query_parameters['pageSize'] = self._serialize.query('page_size', page_size, 'int')
if scope_id is not None:
query_parameters['scopeId'] = self._serialize.query('scope_id', scope_id, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('ChangedIdentities', response)
def get_user_identity_ids_by_domain_id(self, domain_id):
"""GetUserIdentityIdsByDomainId.
[Preview API]
:param str domain_id:
:rtype: [str]
"""
query_parameters = {}
if domain_id is not None:
query_parameters['domainId'] = self._serialize.query('domain_id', domain_id, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[str]', self._unwrap_collection(response))
def read_identities(self, descriptors=None, identity_ids=None, subject_descriptors=None, social_descriptors=None, search_filter=None, filter_value=None, query_membership=None, properties=None, include_restricted_visibility=None, options=None):
"""ReadIdentities.
[Preview API] Resolve legacy identity information for use with older APIs such as the Security APIs
:param str descriptors: A comma separated list of identity descriptors to resolve
:param str identity_ids: A comma seperated list of storage keys to resolve
:param str subject_descriptors: A comma seperated list of subject descriptors to resolve
:param str social_descriptors:
:param str search_filter: The type of search to perform. Values can be AccountName (domain\alias), DisplayName, MailAddress, General (display name, account name, or unique name), or LocalGroupName (only search Azure Devops groups).
:param str filter_value: The search value, as specified by the searchFilter.
:param str query_membership: The membership information to include with the identities. Values can be None for no membership data or Direct to include the groups that the identity is a member of and the identities that are a member of this identity (groups only)
:param str properties:
:param bool include_restricted_visibility:
:param str options:
:rtype: [Identity]
"""
query_parameters = {}
if descriptors is not None:
query_parameters['descriptors'] = self._serialize.query('descriptors', descriptors, 'str')
if identity_ids is not None:
query_parameters['identityIds'] = self._serialize.query('identity_ids', identity_ids, 'str')
if subject_descriptors is not None:
query_parameters['subjectDescriptors'] = self._serialize.query('subject_descriptors', subject_descriptors, 'str')
if social_descriptors is not None:
query_parameters['socialDescriptors'] = self._serialize.query('social_descriptors', social_descriptors, 'str')
if search_filter is not None:
query_parameters['searchFilter'] = self._serialize.query('search_filter', search_filter, 'str')
if filter_value is not None:
query_parameters['filterValue'] = self._serialize.query('filter_value', filter_value, 'str')
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
if include_restricted_visibility is not None:
query_parameters['includeRestrictedVisibility'] = self._serialize.query('include_restricted_visibility', include_restricted_visibility, 'bool')
if options is not None:
query_parameters['options'] = self._serialize.query('options', options, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[Identity]', self._unwrap_collection(response))
def read_identities_by_scope(self, scope_id, query_membership=None, properties=None):
"""ReadIdentitiesByScope.
[Preview API]
:param str scope_id:
:param str query_membership:
:param str properties:
:rtype: [Identity]
"""
query_parameters = {}
if scope_id is not None:
query_parameters['scopeId'] = self._serialize.query('scope_id', scope_id, 'str')
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[Identity]', self._unwrap_collection(response))
def read_identity(self, identity_id, query_membership=None, properties=None):
"""ReadIdentity.
[Preview API]
:param str identity_id:
:param str query_membership:
:param str properties:
:rtype: :class:`<Identity> <azure.devops.v7_1.identity.models.Identity>`
"""
route_values = {}
if identity_id is not None:
route_values['identityId'] = self._serialize.url('identity_id', identity_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Identity', response)
def update_identities(self, identities, allow_meta_data_update=None):
"""UpdateIdentities.
[Preview API]
:param :class:`<VssJsonCollectionWrapper> <azure.devops.v7_1.identity.models.VssJsonCollectionWrapper>` identities:
:param bool allow_meta_data_update:
:rtype: [IdentityUpdateData]
"""
query_parameters = {}
if allow_meta_data_update is not None:
query_parameters['allowMetaDataUpdate'] = self._serialize.query('allow_meta_data_update', allow_meta_data_update, 'bool')
content = self._serialize.body(identities, 'VssJsonCollectionWrapper')
response = self._send(http_method='PUT',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='7.1-preview.1',
query_parameters=query_parameters,
content=content)
return self._deserialize('[IdentityUpdateData]', self._unwrap_collection(response))
def update_identity(self, identity, identity_id):
"""UpdateIdentity.
[Preview API]
:param :class:`<Identity> <azure.devops.v7_1.identity.models.Identity>` identity:
:param str identity_id:
"""
route_values = {}
if identity_id is not None:
route_values['identityId'] = self._serialize.url('identity_id', identity_id, 'str')
content = self._serialize.body(identity, 'Identity')
self._send(http_method='PUT',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='7.1-preview.1',
route_values=route_values,
content=content)
def create_identity(self, framework_identity_info):
"""CreateIdentity.
[Preview API]
:param :class:`<FrameworkIdentityInfo> <azure.devops.v7_1.identity.models.FrameworkIdentityInfo>` framework_identity_info:
:rtype: :class:`<Identity> <azure.devops.v7_1.identity.models.Identity>`
"""
content = self._serialize.body(framework_identity_info, 'FrameworkIdentityInfo')
response = self._send(http_method='PUT',
location_id='dd55f0eb-6ea2-4fe4-9ebe-919e7dd1dfb4',
version='7.1-preview.1',
content=content)
return self._deserialize('Identity', response)
def read_identity_batch(self, batch_info):
"""ReadIdentityBatch.
[Preview API]
:param :class:`<IdentityBatchInfo> <azure.devops.v7_1.identity.models.IdentityBatchInfo>` batch_info:
:rtype: [Identity]
"""
content = self._serialize.body(batch_info, 'IdentityBatchInfo')
response = self._send(http_method='POST',
location_id='299e50df-fe45-4d3a-8b5b-a5836fac74dc',
version='7.1-preview.1',
content=content)
return self._deserialize('[Identity]', self._unwrap_collection(response))
def get_identity_snapshot(self, scope_id):
"""GetIdentitySnapshot.
[Preview API]
:param str scope_id:
:rtype: :class:`<IdentitySnapshot> <azure.devops.v7_1.identity.models.IdentitySnapshot>`
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
response = self._send(http_method='GET',
location_id='d56223df-8ccd-45c9-89b4-eddf692400d7',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('IdentitySnapshot', response)
def get_max_sequence_id(self):
"""GetMaxSequenceId.
[Preview API] Read the max sequence id of all the identities.
:rtype: long
"""
response = self._send(http_method='GET',
location_id='e4a70778-cb2c-4e85-b7cc-3f3c7ae2d408',
version='7.1-preview.1')
return self._deserialize('long', response)
def get_self(self):
"""GetSelf.
[Preview API] Read identity of the home tenant request user.
:rtype: :class:`<IdentitySelf> <azure.devops.v7_1.identity.models.IdentitySelf>`
"""
response = self._send(http_method='GET',
location_id='4bb02b5b-c120-4be2-b68e-21f7c50a4b82',
version='7.1-preview.1')
return self._deserialize('IdentitySelf', response)
def add_member(self, container_id, member_id):
"""AddMember.
[Preview API]
:param str container_id:
:param str member_id:
:rtype: bool
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
response = self._send(http_method='PUT',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('bool', response)
def force_remove_member(self, container_id, member_id, force_remove):
"""ForceRemoveMember.
[Preview API]
:param str container_id:
:param str member_id:
:param bool force_remove:
:rtype: bool
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
query_parameters = {}
if force_remove is not None:
query_parameters['forceRemove'] = self._serialize.query('force_remove', force_remove, 'bool')
response = self._send(http_method='DELETE',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('bool', response)
def read_member(self, container_id, member_id, query_membership=None):
"""ReadMember.
[Preview API]
:param str container_id:
:param str member_id:
:param str query_membership:
:rtype: :class:`<str> <azure.devops.v7_1.identity.models.str>`
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
response = self._send(http_method='GET',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('str', response)
def read_members(self, container_id, query_membership=None):
"""ReadMembers.
[Preview API]
:param str container_id:
:param str query_membership:
:rtype: [str]
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
response = self._send(http_method='GET',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[str]', self._unwrap_collection(response))
def remove_member(self, container_id, member_id):
"""RemoveMember.
[Preview API]
:param str container_id:
:param str member_id:
:rtype: bool
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
response = self._send(http_method='DELETE',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('bool', response)
def read_member_of(self, member_id, container_id, query_membership=None):
"""ReadMemberOf.
[Preview API]
:param str member_id:
:param str container_id:
:param str query_membership:
:rtype: :class:`<str> <azure.devops.v7_1.identity.models.str>`
"""
route_values = {}
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
response = self._send(http_method='GET',
location_id='22865b02-9e4a-479e-9e18-e35b8803b8a0',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('str', response)
def read_members_of(self, member_id, query_membership=None):
"""ReadMembersOf.
[Preview API]
:param str member_id:
:param str query_membership:
:rtype: [str]
"""
route_values = {}
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
response = self._send(http_method='GET',
location_id='22865b02-9e4a-479e-9e18-e35b8803b8a0',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[str]', self._unwrap_collection(response))
def refresh_members_of(self, member_id, query_membership=None):
"""RefreshMembersOf.
[Preview API]
:param str member_id:
:param str query_membership:
:rtype: [str]
"""
route_values = {}
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
response = self._send(http_method='POST',
location_id='22865b02-9e4a-479e-9e18-e35b8803b8a0',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[str]', self._unwrap_collection(response))
def create_scope(self, info, scope_id):
"""CreateScope.
[Preview API]
:param :class:`<CreateScopeInfo> <azure.devops.v7_1.identity.models.CreateScopeInfo>` info:
:param str scope_id:
:rtype: :class:`<IdentityScope> <azure.devops.v7_1.identity.models.IdentityScope>`
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
content = self._serialize.body(info, 'CreateScopeInfo')
response = self._send(http_method='PUT',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='7.1-preview.2',
route_values=route_values,
content=content)
return self._deserialize('IdentityScope', response)
def delete_scope(self, scope_id):
"""DeleteScope.
[Preview API]
:param str scope_id:
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
self._send(http_method='DELETE',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='7.1-preview.2',
route_values=route_values)
def get_scope_by_id(self, scope_id):
"""GetScopeById.
[Preview API]
:param str scope_id:
:rtype: :class:`<IdentityScope> <azure.devops.v7_1.identity.models.IdentityScope>`
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
response = self._send(http_method='GET',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='7.1-preview.2',
route_values=route_values)
return self._deserialize('IdentityScope', response)
def get_scope_by_name(self, scope_name):
"""GetScopeByName.
[Preview API]
:param str scope_name:
:rtype: :class:`<IdentityScope> <azure.devops.v7_1.identity.models.IdentityScope>`
"""
query_parameters = {}
if scope_name is not None:
query_parameters['scopeName'] = self._serialize.query('scope_name', scope_name, 'str')
response = self._send(http_method='GET',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='7.1-preview.2',
query_parameters=query_parameters)
return self._deserialize('IdentityScope', response)
def update_scope(self, patch_document, scope_id):
"""UpdateScope.
[Preview API]
:param :class:`<[JsonPatchOperation]> <azure.devops.v7_1.identity.models.[JsonPatchOperation]>` patch_document:
:param str scope_id:
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
content = self._serialize.body(patch_document, '[JsonPatchOperation]')
self._send(http_method='PATCH',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='7.1-preview.2',
route_values=route_values,
content=content,
media_type='application/json-patch+json')
def get_signed_in_token(self):
"""GetSignedInToken.
[Preview API]
:rtype: :class:`<AccessTokenResult> <azure.devops.v7_1.identity.models.AccessTokenResult>`
"""
response = self._send(http_method='GET',
location_id='6074ff18-aaad-4abb-a41e-5c75f6178057',
version='7.1-preview.1')
return self._deserialize('AccessTokenResult', response)
def get_signout_token(self):
"""GetSignoutToken.
[Preview API]
:rtype: :class:`<AccessTokenResult> <azure.devops.v7_1.identity.models.AccessTokenResult>`
"""
response = self._send(http_method='GET',
location_id='be39e83c-7529-45e9-9c67-0410885880da',
version='7.1-preview.1')
return self._deserialize('AccessTokenResult', response)
def get_tenant(self, tenant_id):
"""GetTenant.
[Preview API]
:param str tenant_id:
:rtype: :class:`<TenantInfo> <azure.devops.v7_1.identity.models.TenantInfo>`
"""
route_values = {}
if tenant_id is not None:
route_values['tenantId'] = self._serialize.url('tenant_id', tenant_id, 'str')
response = self._send(http_method='GET',
location_id='5f0a1723-2e2c-4c31-8cae-002d01bdd592',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('TenantInfo', response)
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/identity/identity_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/identity/identity_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 14419
}
| 399 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class NpmClient(Client):
"""Npm
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(NpmClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '4c83cfc1-f33a-477e-a789-29d38ffca52e'
def get_content_scoped_package(self, feed_id, package_scope, unscoped_package_name, package_version, project=None, **kwargs):
"""GetContentScopedPackage.
[Preview API]
:param str feed_id:
:param str package_scope:
:param str unscoped_package_name:
:param str package_version:
:param str project: Project ID or project name
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='09a4eafd-123a-495c-979c-0eda7bdb9a14',
version='7.1-preview.1',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_content_unscoped_package(self, feed_id, package_name, package_version, project=None, **kwargs):
"""GetContentUnscopedPackage.
[Preview API] Get an unscoped npm package.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='75caa482-cb1e-47cd-9f2c-c048a4b7a43e',
version='7.1-preview.1',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def update_packages(self, batch_request, feed_id, project=None):
"""UpdatePackages.
[Preview API] Update several packages from a single feed in a single request. The updates to the packages do not happen atomically.
:param :class:`<NpmPackagesBatchRequest> <azure.devops.v7_1.npm.models.NpmPackagesBatchRequest>` batch_request: Information about the packages to update, the operation to perform, and its associated data.
:param str feed_id: Name or ID of the feed.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
content = self._serialize.body(batch_request, 'NpmPackagesBatchRequest')
self._send(http_method='POST',
location_id='06f34005-bbb2-41f4-88f5-23e03a99bb12',
version='7.1-preview.1',
route_values=route_values,
content=content)
def get_readme_scoped_package(self, feed_id, package_scope, unscoped_package_name, package_version, project=None, **kwargs):
"""GetReadmeScopedPackage.
[Preview API] Get the Readme for a package version with an npm scope.
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope\name)
:param str unscoped_package_name: Name of the package (the 'name' part of @scope\name)
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='6d4db777-7e4a-43b2-afad-779a1d197301',
version='7.1-preview.1',
route_values=route_values,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_readme_unscoped_package(self, feed_id, package_name, package_version, project=None, **kwargs):
"""GetReadmeUnscopedPackage.
[Preview API] Get the Readme for a package version that has no npm scope.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='1099a396-b310-41d4-a4b6-33d134ce3fcf',
version='7.1-preview.1',
route_values=route_values,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def update_recycle_bin_packages(self, batch_request, feed_id, project=None):
"""UpdateRecycleBinPackages.
[Preview API] Delete or restore several package versions from the recycle bin.
:param :class:`<NpmPackagesBatchRequest> <azure.devops.v7_1.npm.models.NpmPackagesBatchRequest>` batch_request: Information about the packages to update, the operation to perform, and its associated data.
:param str feed_id: Name or ID of the feed.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
content = self._serialize.body(batch_request, 'NpmPackagesBatchRequest')
self._send(http_method='POST',
location_id='eefe03ef-a6a2-4a7a-a0ec-2e65a5efd64c',
version='7.1-preview.1',
route_values=route_values,
content=content)
def delete_scoped_package_version_from_recycle_bin(self, feed_id, package_scope, unscoped_package_name, package_version, project=None):
"""DeleteScopedPackageVersionFromRecycleBin.
[Preview API] Delete a package version with an npm scope from the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name).
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
self._send(http_method='DELETE',
location_id='220f45eb-94a5-432c-902a-5b8c6372e415',
version='7.1-preview.1',
route_values=route_values)
def get_scoped_package_version_metadata_from_recycle_bin(self, feed_id, package_scope, unscoped_package_name, package_version, project=None):
"""GetScopedPackageVersionMetadataFromRecycleBin.
[Preview API] Get information about a scoped package version in the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name)
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<NpmPackageVersionDeletionState> <azure.devops.v7_1.npm.models.NpmPackageVersionDeletionState>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='220f45eb-94a5-432c-902a-5b8c6372e415',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('NpmPackageVersionDeletionState', response)
def restore_scoped_package_version_from_recycle_bin(self, package_version_details, feed_id, package_scope, unscoped_package_name, package_version, project=None):
"""RestoreScopedPackageVersionFromRecycleBin.
[Preview API] Restore a package version with an npm scope from the recycle bin to its feed.
:param :class:`<NpmRecycleBinPackageVersionDetails> <azure.devops.v7_1.npm.models.NpmRecycleBinPackageVersionDetails>` package_version_details:
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name).
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
content = self._serialize.body(package_version_details, 'NpmRecycleBinPackageVersionDetails')
self._send(http_method='PATCH',
location_id='220f45eb-94a5-432c-902a-5b8c6372e415',
version='7.1-preview.1',
route_values=route_values,
content=content)
def delete_package_version_from_recycle_bin(self, feed_id, package_name, package_version, project=None):
"""DeletePackageVersionFromRecycleBin.
[Preview API] Delete a package version without an npm scope from the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
self._send(http_method='DELETE',
location_id='63a4f31f-e92b-4ee4-bf92-22d485e73bef',
version='7.1-preview.1',
route_values=route_values)
def get_package_version_metadata_from_recycle_bin(self, feed_id, package_name, package_version, project=None):
"""GetPackageVersionMetadataFromRecycleBin.
[Preview API] Get information about an unscoped package version in the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<NpmPackageVersionDeletionState> <azure.devops.v7_1.npm.models.NpmPackageVersionDeletionState>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='63a4f31f-e92b-4ee4-bf92-22d485e73bef',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('NpmPackageVersionDeletionState', response)
def restore_package_version_from_recycle_bin(self, package_version_details, feed_id, package_name, package_version, project=None):
"""RestorePackageVersionFromRecycleBin.
[Preview API] Restore a package version without an npm scope from the recycle bin to its feed.
:param :class:`<NpmRecycleBinPackageVersionDetails> <azure.devops.v7_1.npm.models.NpmRecycleBinPackageVersionDetails>` package_version_details:
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
content = self._serialize.body(package_version_details, 'NpmRecycleBinPackageVersionDetails')
self._send(http_method='PATCH',
location_id='63a4f31f-e92b-4ee4-bf92-22d485e73bef',
version='7.1-preview.1',
route_values=route_values,
content=content)
def get_scoped_upstreaming_behavior(self, feed_id, package_scope, unscoped_package_name, project=None):
"""GetScopedUpstreamingBehavior.
[Preview API] Get the upstreaming behavior of the (scoped) package within the context of a feed
:param str feed_id: The name or id of the feed
:param str package_scope: The scope of the package
:param str unscoped_package_name: The name of the scoped package
:param str project: Project ID or project name
:rtype: :class:`<UpstreamingBehavior> <azure.devops.v7_1.npm.models.UpstreamingBehavior>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
response = self._send(http_method='GET',
location_id='9859c187-f6ec-41b0-862d-8003b3b404e0',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('UpstreamingBehavior', response)
def set_scoped_upstreaming_behavior(self, feed_id, package_scope, unscoped_package_name, behavior, project=None):
"""SetScopedUpstreamingBehavior.
[Preview API] Set the upstreaming behavior of a (scoped) package within the context of a feed
:param str feed_id: The name or id of the feed
:param str package_scope: The scope of the package
:param str unscoped_package_name: The name of the scoped package
:param :class:`<UpstreamingBehavior> <azure.devops.v7_1.npm.models.UpstreamingBehavior>` behavior: The behavior to apply to the scoped package within the scope of the feed
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
content = self._serialize.body(behavior, 'UpstreamingBehavior')
self._send(http_method='PATCH',
location_id='9859c187-f6ec-41b0-862d-8003b3b404e0',
version='7.1-preview.1',
route_values=route_values,
content=content)
def get_upstreaming_behavior(self, feed_id, package_name, project=None):
"""GetUpstreamingBehavior.
[Preview API] Get the upstreaming behavior of the (unscoped) package within the context of a feed
:param str feed_id: The name or id of the feed
:param str package_name: The name of the package
:param str project: Project ID or project name
:rtype: :class:`<UpstreamingBehavior> <azure.devops.v7_1.npm.models.UpstreamingBehavior>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
response = self._send(http_method='GET',
location_id='e27a45d3-711b-41cb-a47a-ae669b6e9076',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('UpstreamingBehavior', response)
def set_upstreaming_behavior(self, feed_id, package_name, behavior, project=None):
"""SetUpstreamingBehavior.
[Preview API] Set the upstreaming behavior of a (scoped) package within the context of a feed
:param str feed_id: The name or id of the feed
:param str package_name: The name of the package
:param :class:`<UpstreamingBehavior> <azure.devops.v7_1.npm.models.UpstreamingBehavior>` behavior: The behavior to apply to the scoped package within the scope of the feed
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
content = self._serialize.body(behavior, 'UpstreamingBehavior')
self._send(http_method='PATCH',
location_id='e27a45d3-711b-41cb-a47a-ae669b6e9076',
version='7.1-preview.1',
route_values=route_values,
content=content)
def get_scoped_package_info(self, feed_id, package_scope, unscoped_package_name, package_version, project=None):
"""GetScopedPackageInfo.
[Preview API] Get information about a scoped package version (such as @scope/name).
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name).
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<Package> <azure.devops.v7_1.npm.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='e93d9ec3-4022-401e-96b0-83ea5d911e09',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('Package', response)
def unpublish_scoped_package(self, feed_id, package_scope, unscoped_package_name, package_version, project=None):
"""UnpublishScopedPackage.
[Preview API] Unpublish a scoped package version (such as @scope/name).
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name).
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<Package> <azure.devops.v7_1.npm.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='DELETE',
location_id='e93d9ec3-4022-401e-96b0-83ea5d911e09',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('Package', response)
def update_scoped_package(self, package_version_details, feed_id, package_scope, unscoped_package_name, package_version, project=None):
"""UpdateScopedPackage.
[Preview API]
:param :class:`<PackageVersionDetails> <azure.devops.v7_1.npm.models.PackageVersionDetails>` package_version_details:
:param str feed_id:
:param str package_scope:
:param str unscoped_package_name:
:param str package_version:
:param str project: Project ID or project name
:rtype: :class:`<Package> <azure.devops.v7_1.npm.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
content = self._serialize.body(package_version_details, 'PackageVersionDetails')
response = self._send(http_method='PATCH',
location_id='e93d9ec3-4022-401e-96b0-83ea5d911e09',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Package', response)
def get_package_info(self, feed_id, package_name, package_version, project=None):
"""GetPackageInfo.
[Preview API] Get information about an unscoped package version.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<Package> <azure.devops.v7_1.npm.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='ed579d62-67c9-4271-be66-9b029af5bcf9',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('Package', response)
def unpublish_package(self, feed_id, package_name, package_version, project=None):
"""UnpublishPackage.
[Preview API] Unpublish an unscoped package version.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<Package> <azure.devops.v7_1.npm.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='DELETE',
location_id='ed579d62-67c9-4271-be66-9b029af5bcf9',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('Package', response)
def update_package(self, package_version_details, feed_id, package_name, package_version, project=None):
"""UpdatePackage.
[Preview API]
:param :class:`<PackageVersionDetails> <azure.devops.v7_1.npm.models.PackageVersionDetails>` package_version_details:
:param str feed_id:
:param str package_name:
:param str package_version:
:param str project: Project ID or project name
:rtype: :class:`<Package> <azure.devops.v7_1.npm.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
content = self._serialize.body(package_version_details, 'PackageVersionDetails')
response = self._send(http_method='PATCH',
location_id='ed579d62-67c9-4271-be66-9b029af5bcf9',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Package', response)
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/npm/npm_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/npm/npm_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 14608
}
| 400 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class ServiceEndpointClient(Client):
"""ServiceEndpoint
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(ServiceEndpointClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '1814ab31-2f4f-4a9f-8761-f4d77dc5a5d7'
def execute_service_endpoint_request(self, service_endpoint_request, project, endpoint_id):
"""ExecuteServiceEndpointRequest.
[Preview API] Proxy for a GET request defined by a service endpoint.
:param :class:`<ServiceEndpointRequest> <azure.devops.v7_1.service_endpoint.models.ServiceEndpointRequest>` service_endpoint_request: Service endpoint request.
:param str project: Project ID or project name
:param str endpoint_id: Id of the service endpoint.
:rtype: :class:`<ServiceEndpointRequestResult> <azure.devops.v7_1.service_endpoint.models.ServiceEndpointRequestResult>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if endpoint_id is not None:
query_parameters['endpointId'] = self._serialize.query('endpoint_id', endpoint_id, 'str')
content = self._serialize.body(service_endpoint_request, 'ServiceEndpointRequest')
response = self._send(http_method='POST',
location_id='cc63bb57-2a5f-4a7a-b79c-c142d308657e',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('ServiceEndpointRequestResult', response)
def create_service_endpoint(self, endpoint):
"""CreateServiceEndpoint.
[Preview API] Creates a new service endpoint
:param :class:`<ServiceEndpoint> <azure.devops.v7_1.service_endpoint.models.ServiceEndpoint>` endpoint: Service endpoint to create
:rtype: :class:`<ServiceEndpoint> <azure.devops.v7_1.service_endpoint.models.ServiceEndpoint>`
"""
content = self._serialize.body(endpoint, 'ServiceEndpoint')
response = self._send(http_method='POST',
location_id='14e48fdc-2c8b-41ce-a0c3-e26f6cc55bd0',
version='7.1-preview.4',
content=content)
return self._deserialize('ServiceEndpoint', response)
def delete_service_endpoint(self, endpoint_id, project_ids, deep=None):
"""DeleteServiceEndpoint.
[Preview API] Delete a service endpoint
:param str endpoint_id: Endpoint Id of endpoint to delete
:param [str] project_ids: project Ids from which endpoint needs to be deleted
:param bool deep: delete the spn created by endpoint
"""
route_values = {}
if endpoint_id is not None:
route_values['endpointId'] = self._serialize.url('endpoint_id', endpoint_id, 'str')
query_parameters = {}
if project_ids is not None:
project_ids = ",".join(project_ids)
query_parameters['projectIds'] = self._serialize.query('project_ids', project_ids, 'str')
if deep is not None:
query_parameters['deep'] = self._serialize.query('deep', deep, 'bool')
self._send(http_method='DELETE',
location_id='14e48fdc-2c8b-41ce-a0c3-e26f6cc55bd0',
version='7.1-preview.4',
route_values=route_values,
query_parameters=query_parameters)
def share_service_endpoint(self, endpoint_project_references, endpoint_id):
"""ShareServiceEndpoint.
[Preview API] Share service endpoint across projects
:param [ServiceEndpointProjectReference] endpoint_project_references: Project reference details of the target project
:param str endpoint_id: Endpoint Id of the endpoint to share
"""
route_values = {}
if endpoint_id is not None:
route_values['endpointId'] = self._serialize.url('endpoint_id', endpoint_id, 'str')
content = self._serialize.body(endpoint_project_references, '[ServiceEndpointProjectReference]')
self._send(http_method='PATCH',
location_id='14e48fdc-2c8b-41ce-a0c3-e26f6cc55bd0',
version='7.1-preview.4',
route_values=route_values,
content=content)
def update_service_endpoint(self, endpoint, endpoint_id, operation=None):
"""UpdateServiceEndpoint.
[Preview API] Update the service endpoint
:param :class:`<ServiceEndpoint> <azure.devops.v7_1.service_endpoint.models.ServiceEndpoint>` endpoint: Updated data for the endpoint
:param str endpoint_id: Endpoint Id of the endpoint to update
:param str operation: operation type
:rtype: :class:`<ServiceEndpoint> <azure.devops.v7_1.service_endpoint.models.ServiceEndpoint>`
"""
route_values = {}
if endpoint_id is not None:
route_values['endpointId'] = self._serialize.url('endpoint_id', endpoint_id, 'str')
query_parameters = {}
if operation is not None:
query_parameters['operation'] = self._serialize.query('operation', operation, 'str')
content = self._serialize.body(endpoint, 'ServiceEndpoint')
response = self._send(http_method='PUT',
location_id='14e48fdc-2c8b-41ce-a0c3-e26f6cc55bd0',
version='7.1-preview.4',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('ServiceEndpoint', response)
def update_service_endpoints(self, endpoints):
"""UpdateServiceEndpoints.
[Preview API] Update the service endpoints.
:param [ServiceEndpoint] endpoints: Names of the service endpoints to update.
:rtype: [ServiceEndpoint]
"""
content = self._serialize.body(endpoints, '[ServiceEndpoint]')
response = self._send(http_method='PUT',
location_id='14e48fdc-2c8b-41ce-a0c3-e26f6cc55bd0',
version='7.1-preview.4',
content=content)
return self._deserialize('[ServiceEndpoint]', self._unwrap_collection(response))
def get_service_endpoint_details(self, project, endpoint_id, action_filter=None):
"""GetServiceEndpointDetails.
[Preview API] Get the service endpoint details.
:param str project: Project ID or project name
:param str endpoint_id: Id of the service endpoint.
:param str action_filter: Action filter for the service connection. It specifies the action which can be performed on the service connection.
:rtype: :class:`<ServiceEndpoint> <azure.devops.v7_1.service_endpoint.models.ServiceEndpoint>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if endpoint_id is not None:
route_values['endpointId'] = self._serialize.url('endpoint_id', endpoint_id, 'str')
query_parameters = {}
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
response = self._send(http_method='GET',
location_id='e85f1c62-adfc-4b74-b618-11a150fb195e',
version='7.1-preview.4',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ServiceEndpoint', response)
def get_service_endpoints(self, project, type=None, auth_schemes=None, endpoint_ids=None, owner=None, include_failed=None, include_details=None):
"""GetServiceEndpoints.
[Preview API] Get the service endpoints.
:param str project: Project ID or project name
:param str type: Type of the service endpoints.
:param [str] auth_schemes: Authorization schemes used for service endpoints.
:param [str] endpoint_ids: Ids of the service endpoints.
:param str owner: Owner for service endpoints.
:param bool include_failed: Failed flag for service endpoints.
:param bool include_details: Flag to include more details for service endpoints. This is for internal use only and the flag will be treated as false for all other requests
:rtype: [ServiceEndpoint]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if type is not None:
query_parameters['type'] = self._serialize.query('type', type, 'str')
if auth_schemes is not None:
auth_schemes = ",".join(auth_schemes)
query_parameters['authSchemes'] = self._serialize.query('auth_schemes', auth_schemes, 'str')
if endpoint_ids is not None:
endpoint_ids = ",".join(endpoint_ids)
query_parameters['endpointIds'] = self._serialize.query('endpoint_ids', endpoint_ids, 'str')
if owner is not None:
query_parameters['owner'] = self._serialize.query('owner', owner, 'str')
if include_failed is not None:
query_parameters['includeFailed'] = self._serialize.query('include_failed', include_failed, 'bool')
if include_details is not None:
query_parameters['includeDetails'] = self._serialize.query('include_details', include_details, 'bool')
response = self._send(http_method='GET',
location_id='e85f1c62-adfc-4b74-b618-11a150fb195e',
version='7.1-preview.4',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ServiceEndpoint]', self._unwrap_collection(response))
def get_service_endpoints_by_names(self, project, endpoint_names, type=None, auth_schemes=None, owner=None, include_failed=None, include_details=None):
"""GetServiceEndpointsByNames.
[Preview API] Get the service endpoints by name.
:param str project: Project ID or project name
:param [str] endpoint_names: Names of the service endpoints.
:param str type: Type of the service endpoints.
:param [str] auth_schemes: Authorization schemes used for service endpoints.
:param str owner: Owner for service endpoints.
:param bool include_failed: Failed flag for service endpoints.
:param bool include_details: Flag to include more details for service endpoints. This is for internal use only and the flag will be treated as false for all other requests
:rtype: [ServiceEndpoint]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if endpoint_names is not None:
endpoint_names = ",".join(endpoint_names)
query_parameters['endpointNames'] = self._serialize.query('endpoint_names', endpoint_names, 'str')
if type is not None:
query_parameters['type'] = self._serialize.query('type', type, 'str')
if auth_schemes is not None:
auth_schemes = ",".join(auth_schemes)
query_parameters['authSchemes'] = self._serialize.query('auth_schemes', auth_schemes, 'str')
if owner is not None:
query_parameters['owner'] = self._serialize.query('owner', owner, 'str')
if include_failed is not None:
query_parameters['includeFailed'] = self._serialize.query('include_failed', include_failed, 'bool')
if include_details is not None:
query_parameters['includeDetails'] = self._serialize.query('include_details', include_details, 'bool')
response = self._send(http_method='GET',
location_id='e85f1c62-adfc-4b74-b618-11a150fb195e',
version='7.1-preview.4',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ServiceEndpoint]', self._unwrap_collection(response))
def get_service_endpoints_with_refreshed_authentication(self, refresh_authentication_parameters, project, endpoint_ids):
"""GetServiceEndpointsWithRefreshedAuthentication.
[Preview API] Gets the service endpoints and patch new authorization parameters
:param [RefreshAuthenticationParameters] refresh_authentication_parameters: Scope, Validity of Token requested.
:param str project: Project ID or project name
:param [str] endpoint_ids: Ids of the service endpoints.
:rtype: [ServiceEndpoint]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if endpoint_ids is not None:
endpoint_ids = ",".join(endpoint_ids)
query_parameters['endpointIds'] = self._serialize.query('endpoint_ids', endpoint_ids, 'str')
content = self._serialize.body(refresh_authentication_parameters, '[RefreshAuthenticationParameters]')
response = self._send(http_method='POST',
location_id='e85f1c62-adfc-4b74-b618-11a150fb195e',
version='7.1-preview.4',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('[ServiceEndpoint]', self._unwrap_collection(response))
def get_service_endpoint_execution_records(self, project, endpoint_id, top=None, continuation_token=None):
"""GetServiceEndpointExecutionRecords.
[Preview API] Get service endpoint execution records.
:param str project: Project ID or project name
:param str endpoint_id: Id of the service endpoint.
:param int top: Number of service endpoint execution records to get.
:param long continuation_token: A continuation token, returned by a previous call to this method, that can be used to return the next set of records
:rtype: :class:`<[ServiceEndpointExecutionRecord]> <azure.devops.v7_1.service_endpoint.models.[ServiceEndpointExecutionRecord]>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if endpoint_id is not None:
route_values['endpointId'] = self._serialize.url('endpoint_id', endpoint_id, 'str')
query_parameters = {}
if top is not None:
query_parameters['top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'long')
response = self._send(http_method='GET',
location_id='10a16738-9299-4cd1-9a81-fd23ad6200d0',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ServiceEndpointExecutionRecord]', self._unwrap_collection(response))
def get_service_endpoint_types(self, type=None, scheme=None):
"""GetServiceEndpointTypes.
[Preview API] Get service endpoint types.
:param str type: Type of service endpoint.
:param str scheme: Scheme of service endpoint.
:rtype: [ServiceEndpointType]
"""
query_parameters = {}
if type is not None:
query_parameters['type'] = self._serialize.query('type', type, 'str')
if scheme is not None:
query_parameters['scheme'] = self._serialize.query('scheme', scheme, 'str')
response = self._send(http_method='GET',
location_id='5a7938a4-655e-486c-b562-b78c54a7e87b',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[ServiceEndpointType]', self._unwrap_collection(response))
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/service_endpoint/service_endpoint_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/service_endpoint/service_endpoint_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 7394
}
| 401 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class AggregatedDataForResultTrend(Model):
"""
:param duration: This is tests execution duration.
:type duration: object
:param results_by_outcome:
:type results_by_outcome: dict
:param run_summary_by_state:
:type run_summary_by_state: dict
:param test_results_context:
:type test_results_context: :class:`TestResultsContext <azure.devops.v7_1.test.models.TestResultsContext>`
:param total_tests:
:type total_tests: int
"""
_attribute_map = {
'duration': {'key': 'duration', 'type': 'object'},
'results_by_outcome': {'key': 'resultsByOutcome', 'type': '{AggregatedResultsByOutcome}'},
'run_summary_by_state': {'key': 'runSummaryByState', 'type': '{AggregatedRunsByState}'},
'test_results_context': {'key': 'testResultsContext', 'type': 'TestResultsContext'},
'total_tests': {'key': 'totalTests', 'type': 'int'}
}
def __init__(self, duration=None, results_by_outcome=None, run_summary_by_state=None, test_results_context=None, total_tests=None):
super(AggregatedDataForResultTrend, self).__init__()
self.duration = duration
self.results_by_outcome = results_by_outcome
self.run_summary_by_state = run_summary_by_state
self.test_results_context = test_results_context
self.total_tests = total_tests
class AggregatedResultsAnalysis(Model):
"""
:param duration:
:type duration: object
:param not_reported_results_by_outcome:
:type not_reported_results_by_outcome: dict
:param previous_context:
:type previous_context: :class:`TestResultsContext <azure.devops.v7_1.test.models.TestResultsContext>`
:param results_by_outcome:
:type results_by_outcome: dict
:param results_difference:
:type results_difference: :class:`AggregatedResultsDifference <azure.devops.v7_1.test.models.AggregatedResultsDifference>`
:param run_summary_by_outcome:
:type run_summary_by_outcome: dict
:param run_summary_by_state:
:type run_summary_by_state: dict
:param total_tests:
:type total_tests: int
"""
_attribute_map = {
'duration': {'key': 'duration', 'type': 'object'},
'not_reported_results_by_outcome': {'key': 'notReportedResultsByOutcome', 'type': '{AggregatedResultsByOutcome}'},
'previous_context': {'key': 'previousContext', 'type': 'TestResultsContext'},
'results_by_outcome': {'key': 'resultsByOutcome', 'type': '{AggregatedResultsByOutcome}'},
'results_difference': {'key': 'resultsDifference', 'type': 'AggregatedResultsDifference'},
'run_summary_by_outcome': {'key': 'runSummaryByOutcome', 'type': '{AggregatedRunsByOutcome}'},
'run_summary_by_state': {'key': 'runSummaryByState', 'type': '{AggregatedRunsByState}'},
'total_tests': {'key': 'totalTests', 'type': 'int'}
}
def __init__(self, duration=None, not_reported_results_by_outcome=None, previous_context=None, results_by_outcome=None, results_difference=None, run_summary_by_outcome=None, run_summary_by_state=None, total_tests=None):
super(AggregatedResultsAnalysis, self).__init__()
self.duration = duration
self.not_reported_results_by_outcome = not_reported_results_by_outcome
self.previous_context = previous_context
self.results_by_outcome = results_by_outcome
self.results_difference = results_difference
self.run_summary_by_outcome = run_summary_by_outcome
self.run_summary_by_state = run_summary_by_state
self.total_tests = total_tests
class AggregatedResultsByOutcome(Model):
"""
:param count:
:type count: int
:param duration:
:type duration: object
:param group_by_field:
:type group_by_field: str
:param group_by_value:
:type group_by_value: object
:param outcome:
:type outcome: object
:param rerun_result_count:
:type rerun_result_count: int
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'duration': {'key': 'duration', 'type': 'object'},
'group_by_field': {'key': 'groupByField', 'type': 'str'},
'group_by_value': {'key': 'groupByValue', 'type': 'object'},
'outcome': {'key': 'outcome', 'type': 'object'},
'rerun_result_count': {'key': 'rerunResultCount', 'type': 'int'}
}
def __init__(self, count=None, duration=None, group_by_field=None, group_by_value=None, outcome=None, rerun_result_count=None):
super(AggregatedResultsByOutcome, self).__init__()
self.count = count
self.duration = duration
self.group_by_field = group_by_field
self.group_by_value = group_by_value
self.outcome = outcome
self.rerun_result_count = rerun_result_count
class AggregatedResultsDifference(Model):
"""
:param increase_in_duration:
:type increase_in_duration: object
:param increase_in_failures:
:type increase_in_failures: int
:param increase_in_non_impacted_tests:
:type increase_in_non_impacted_tests: int
:param increase_in_other_tests:
:type increase_in_other_tests: int
:param increase_in_passed_tests:
:type increase_in_passed_tests: int
:param increase_in_total_tests:
:type increase_in_total_tests: int
"""
_attribute_map = {
'increase_in_duration': {'key': 'increaseInDuration', 'type': 'object'},
'increase_in_failures': {'key': 'increaseInFailures', 'type': 'int'},
'increase_in_non_impacted_tests': {'key': 'increaseInNonImpactedTests', 'type': 'int'},
'increase_in_other_tests': {'key': 'increaseInOtherTests', 'type': 'int'},
'increase_in_passed_tests': {'key': 'increaseInPassedTests', 'type': 'int'},
'increase_in_total_tests': {'key': 'increaseInTotalTests', 'type': 'int'}
}
def __init__(self, increase_in_duration=None, increase_in_failures=None, increase_in_non_impacted_tests=None, increase_in_other_tests=None, increase_in_passed_tests=None, increase_in_total_tests=None):
super(AggregatedResultsDifference, self).__init__()
self.increase_in_duration = increase_in_duration
self.increase_in_failures = increase_in_failures
self.increase_in_non_impacted_tests = increase_in_non_impacted_tests
self.increase_in_other_tests = increase_in_other_tests
self.increase_in_passed_tests = increase_in_passed_tests
self.increase_in_total_tests = increase_in_total_tests
class AggregatedRunsByOutcome(Model):
"""
:param outcome:
:type outcome: object
:param runs_count:
:type runs_count: int
"""
_attribute_map = {
'outcome': {'key': 'outcome', 'type': 'object'},
'runs_count': {'key': 'runsCount', 'type': 'int'}
}
def __init__(self, outcome=None, runs_count=None):
super(AggregatedRunsByOutcome, self).__init__()
self.outcome = outcome
self.runs_count = runs_count
class AggregatedRunsByState(Model):
"""
:param results_by_outcome:
:type results_by_outcome: dict
:param runs_count:
:type runs_count: int
:param state:
:type state: object
"""
_attribute_map = {
'results_by_outcome': {'key': 'resultsByOutcome', 'type': '{AggregatedResultsByOutcome}'},
'runs_count': {'key': 'runsCount', 'type': 'int'},
'state': {'key': 'state', 'type': 'object'}
}
def __init__(self, results_by_outcome=None, runs_count=None, state=None):
super(AggregatedRunsByState, self).__init__()
self.results_by_outcome = results_by_outcome
self.runs_count = runs_count
self.state = state
class BuildConfiguration(Model):
"""
BuildConfiguration Details.
:param branch_name: Branch name for which build is generated.
:type branch_name: str
:param build_definition_id: BuildDefinitionId for build.
:type build_definition_id: int
:param build_system: Build system.
:type build_system: str
:param creation_date: Build Creation Date.
:type creation_date: datetime
:param flavor: Build flavor (eg Build/Release).
:type flavor: str
:param id: BuildConfiguration Id.
:type id: int
:param number: Build Number.
:type number: str
:param platform: BuildConfiguration Platform.
:type platform: str
:param project: Project associated with this BuildConfiguration.
:type project: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param repository_guid: Repository Guid for the Build.
:type repository_guid: str
:param repository_id: Repository Id.
:type repository_id: int
:param repository_type: Repository Type (eg. TFSGit).
:type repository_type: str
:param source_version: Source Version(/first commit) for the build was triggered.
:type source_version: str
:param target_branch_name: Target BranchName.
:type target_branch_name: str
:param uri: Build Uri.
:type uri: str
"""
_attribute_map = {
'branch_name': {'key': 'branchName', 'type': 'str'},
'build_definition_id': {'key': 'buildDefinitionId', 'type': 'int'},
'build_system': {'key': 'buildSystem', 'type': 'str'},
'creation_date': {'key': 'creationDate', 'type': 'iso-8601'},
'flavor': {'key': 'flavor', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'number': {'key': 'number', 'type': 'str'},
'platform': {'key': 'platform', 'type': 'str'},
'project': {'key': 'project', 'type': 'ShallowReference'},
'repository_guid': {'key': 'repositoryGuid', 'type': 'str'},
'repository_id': {'key': 'repositoryId', 'type': 'int'},
'repository_type': {'key': 'repositoryType', 'type': 'str'},
'source_version': {'key': 'sourceVersion', 'type': 'str'},
'target_branch_name': {'key': 'targetBranchName', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'}
}
def __init__(self, branch_name=None, build_definition_id=None, build_system=None, creation_date=None, flavor=None, id=None, number=None, platform=None, project=None, repository_guid=None, repository_id=None, repository_type=None, source_version=None, target_branch_name=None, uri=None):
super(BuildConfiguration, self).__init__()
self.branch_name = branch_name
self.build_definition_id = build_definition_id
self.build_system = build_system
self.creation_date = creation_date
self.flavor = flavor
self.id = id
self.number = number
self.platform = platform
self.project = project
self.repository_guid = repository_guid
self.repository_id = repository_id
self.repository_type = repository_type
self.source_version = source_version
self.target_branch_name = target_branch_name
self.uri = uri
class BuildCoverage(Model):
"""
Build Coverage Detail
:param code_coverage_file_url: Code Coverage File Url
:type code_coverage_file_url: str
:param configuration: Build Configuration
:type configuration: :class:`BuildConfiguration <azure.devops.v7_1.test.models.BuildConfiguration>`
:param last_error: Last Error
:type last_error: str
:param modules: List of Modules
:type modules: list of :class:`ModuleCoverage <azure.devops.v7_1.test.models.ModuleCoverage>`
:param state: State
:type state: str
"""
_attribute_map = {
'code_coverage_file_url': {'key': 'codeCoverageFileUrl', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'BuildConfiguration'},
'last_error': {'key': 'lastError', 'type': 'str'},
'modules': {'key': 'modules', 'type': '[ModuleCoverage]'},
'state': {'key': 'state', 'type': 'str'}
}
def __init__(self, code_coverage_file_url=None, configuration=None, last_error=None, modules=None, state=None):
super(BuildCoverage, self).__init__()
self.code_coverage_file_url = code_coverage_file_url
self.configuration = configuration
self.last_error = last_error
self.modules = modules
self.state = state
class BuildReference(Model):
"""
Reference to a build.
:param branch_name: Branch name.
:type branch_name: str
:param build_system: Build system.
:type build_system: str
:param definition_id: Build Definition ID.
:type definition_id: int
:param id: Build ID.
:type id: int
:param number: Build Number.
:type number: str
:param repository_id: Repository ID.
:type repository_id: str
:param uri: Build URI.
:type uri: str
"""
_attribute_map = {
'branch_name': {'key': 'branchName', 'type': 'str'},
'build_system': {'key': 'buildSystem', 'type': 'str'},
'definition_id': {'key': 'definitionId', 'type': 'int'},
'id': {'key': 'id', 'type': 'int'},
'number': {'key': 'number', 'type': 'str'},
'repository_id': {'key': 'repositoryId', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'}
}
def __init__(self, branch_name=None, build_system=None, definition_id=None, id=None, number=None, repository_id=None, uri=None):
super(BuildReference, self).__init__()
self.branch_name = branch_name
self.build_system = build_system
self.definition_id = definition_id
self.id = id
self.number = number
self.repository_id = repository_id
self.uri = uri
class CloneOperationInformation(Model):
"""
Detail About Clone Operation.
:param clone_statistics: Clone Statistics
:type clone_statistics: :class:`CloneStatistics <azure.devops.v7_1.test.models.CloneStatistics>`
:param completion_date: If the operation is complete, the DateTime of completion. If operation is not complete, this is DateTime.MaxValue
:type completion_date: datetime
:param creation_date: DateTime when the operation was started
:type creation_date: datetime
:param destination_object: Shallow reference of the destination
:type destination_object: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param destination_plan: Shallow reference of the destination
:type destination_plan: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param destination_project: Shallow reference of the destination
:type destination_project: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param message: If the operation has Failed, Message contains the reason for failure. Null otherwise.
:type message: str
:param op_id: The ID of the operation
:type op_id: int
:param result_object_type: The type of the object generated as a result of the Clone operation
:type result_object_type: object
:param source_object: Shallow reference of the source
:type source_object: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param source_plan: Shallow reference of the source
:type source_plan: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param source_project: Shallow reference of the source
:type source_project: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param state: Current state of the operation. When State reaches Succeeded or Failed, the operation is complete
:type state: object
:param url: Url for getting the clone information
:type url: str
"""
_attribute_map = {
'clone_statistics': {'key': 'cloneStatistics', 'type': 'CloneStatistics'},
'completion_date': {'key': 'completionDate', 'type': 'iso-8601'},
'creation_date': {'key': 'creationDate', 'type': 'iso-8601'},
'destination_object': {'key': 'destinationObject', 'type': 'ShallowReference'},
'destination_plan': {'key': 'destinationPlan', 'type': 'ShallowReference'},
'destination_project': {'key': 'destinationProject', 'type': 'ShallowReference'},
'message': {'key': 'message', 'type': 'str'},
'op_id': {'key': 'opId', 'type': 'int'},
'result_object_type': {'key': 'resultObjectType', 'type': 'object'},
'source_object': {'key': 'sourceObject', 'type': 'ShallowReference'},
'source_plan': {'key': 'sourcePlan', 'type': 'ShallowReference'},
'source_project': {'key': 'sourceProject', 'type': 'ShallowReference'},
'state': {'key': 'state', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, clone_statistics=None, completion_date=None, creation_date=None, destination_object=None, destination_plan=None, destination_project=None, message=None, op_id=None, result_object_type=None, source_object=None, source_plan=None, source_project=None, state=None, url=None):
super(CloneOperationInformation, self).__init__()
self.clone_statistics = clone_statistics
self.completion_date = completion_date
self.creation_date = creation_date
self.destination_object = destination_object
self.destination_plan = destination_plan
self.destination_project = destination_project
self.message = message
self.op_id = op_id
self.result_object_type = result_object_type
self.source_object = source_object
self.source_plan = source_plan
self.source_project = source_project
self.state = state
self.url = url
class CloneOptions(Model):
"""
Clone options for cloning the test suite.
:param clone_requirements: If set to true requirements will be cloned
:type clone_requirements: bool
:param copy_all_suites: copy all suites from a source plan
:type copy_all_suites: bool
:param copy_ancestor_hierarchy: copy ancestor hierarchy
:type copy_ancestor_hierarchy: bool
:param destination_work_item_type: Name of the workitem type of the clone
:type destination_work_item_type: str
:param override_parameters: Key value pairs where the key value is overridden by the value.
:type override_parameters: dict
:param related_link_comment: Comment on the link that will link the new clone test case to the original Set null for no comment
:type related_link_comment: str
"""
_attribute_map = {
'clone_requirements': {'key': 'cloneRequirements', 'type': 'bool'},
'copy_all_suites': {'key': 'copyAllSuites', 'type': 'bool'},
'copy_ancestor_hierarchy': {'key': 'copyAncestorHierarchy', 'type': 'bool'},
'destination_work_item_type': {'key': 'destinationWorkItemType', 'type': 'str'},
'override_parameters': {'key': 'overrideParameters', 'type': '{str}'},
'related_link_comment': {'key': 'relatedLinkComment', 'type': 'str'}
}
def __init__(self, clone_requirements=None, copy_all_suites=None, copy_ancestor_hierarchy=None, destination_work_item_type=None, override_parameters=None, related_link_comment=None):
super(CloneOptions, self).__init__()
self.clone_requirements = clone_requirements
self.copy_all_suites = copy_all_suites
self.copy_ancestor_hierarchy = copy_ancestor_hierarchy
self.destination_work_item_type = destination_work_item_type
self.override_parameters = override_parameters
self.related_link_comment = related_link_comment
class CloneStatistics(Model):
"""
Clone Statistics Details.
:param cloned_requirements_count: Number of requirements cloned so far.
:type cloned_requirements_count: int
:param cloned_shared_steps_count: Number of shared steps cloned so far.
:type cloned_shared_steps_count: int
:param cloned_test_cases_count: Number of test cases cloned so far
:type cloned_test_cases_count: int
:param total_requirements_count: Total number of requirements to be cloned
:type total_requirements_count: int
:param total_test_cases_count: Total number of test cases to be cloned
:type total_test_cases_count: int
"""
_attribute_map = {
'cloned_requirements_count': {'key': 'clonedRequirementsCount', 'type': 'int'},
'cloned_shared_steps_count': {'key': 'clonedSharedStepsCount', 'type': 'int'},
'cloned_test_cases_count': {'key': 'clonedTestCasesCount', 'type': 'int'},
'total_requirements_count': {'key': 'totalRequirementsCount', 'type': 'int'},
'total_test_cases_count': {'key': 'totalTestCasesCount', 'type': 'int'}
}
def __init__(self, cloned_requirements_count=None, cloned_shared_steps_count=None, cloned_test_cases_count=None, total_requirements_count=None, total_test_cases_count=None):
super(CloneStatistics, self).__init__()
self.cloned_requirements_count = cloned_requirements_count
self.cloned_shared_steps_count = cloned_shared_steps_count
self.cloned_test_cases_count = cloned_test_cases_count
self.total_requirements_count = total_requirements_count
self.total_test_cases_count = total_test_cases_count
class CodeCoverageData(Model):
"""
Represents the build configuration (platform, flavor) and coverage data for the build
:param build_flavor: Flavor of build for which data is retrieved/published
:type build_flavor: str
:param build_platform: Platform of build for which data is retrieved/published
:type build_platform: str
:param coverage_stats: List of coverage data for the build
:type coverage_stats: list of :class:`CodeCoverageStatistics <azure.devops.v7_1.test.models.CodeCoverageStatistics>`
"""
_attribute_map = {
'build_flavor': {'key': 'buildFlavor', 'type': 'str'},
'build_platform': {'key': 'buildPlatform', 'type': 'str'},
'coverage_stats': {'key': 'coverageStats', 'type': '[CodeCoverageStatistics]'}
}
def __init__(self, build_flavor=None, build_platform=None, coverage_stats=None):
super(CodeCoverageData, self).__init__()
self.build_flavor = build_flavor
self.build_platform = build_platform
self.coverage_stats = coverage_stats
class CodeCoverageStatistics(Model):
"""
Represents the code coverage statistics for a particular coverage label (modules, statements, blocks, etc.)
:param covered: Covered units
:type covered: int
:param delta: Delta of coverage
:type delta: float
:param is_delta_available: Is delta valid
:type is_delta_available: bool
:param label: Label of coverage data ("Blocks", "Statements", "Modules", etc.)
:type label: str
:param position: Position of label
:type position: int
:param total: Total units
:type total: int
"""
_attribute_map = {
'covered': {'key': 'covered', 'type': 'int'},
'delta': {'key': 'delta', 'type': 'float'},
'is_delta_available': {'key': 'isDeltaAvailable', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'position': {'key': 'position', 'type': 'int'},
'total': {'key': 'total', 'type': 'int'}
}
def __init__(self, covered=None, delta=None, is_delta_available=None, label=None, position=None, total=None):
super(CodeCoverageStatistics, self).__init__()
self.covered = covered
self.delta = delta
self.is_delta_available = is_delta_available
self.label = label
self.position = position
self.total = total
class CodeCoverageSummary(Model):
"""
Represents the code coverage summary results Used to publish or retrieve code coverage summary against a build
:param build: Uri of build for which data is retrieved/published
:type build: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param coverage_data: List of coverage data and details for the build
:type coverage_data: list of :class:`CodeCoverageData <azure.devops.v7_1.test.models.CodeCoverageData>`
:param delta_build: Uri of build against which difference in coverage is computed
:type delta_build: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param status: Uri of build against which difference in coverage is computed
:type status: object
"""
_attribute_map = {
'build': {'key': 'build', 'type': 'ShallowReference'},
'coverage_data': {'key': 'coverageData', 'type': '[CodeCoverageData]'},
'delta_build': {'key': 'deltaBuild', 'type': 'ShallowReference'},
'status': {'key': 'status', 'type': 'object'}
}
def __init__(self, build=None, coverage_data=None, delta_build=None, status=None):
super(CodeCoverageSummary, self).__init__()
self.build = build
self.coverage_data = coverage_data
self.delta_build = delta_build
self.status = status
class CoverageStatistics(Model):
"""
:param blocks_covered:
:type blocks_covered: int
:param blocks_not_covered:
:type blocks_not_covered: int
:param lines_covered:
:type lines_covered: int
:param lines_not_covered:
:type lines_not_covered: int
:param lines_partially_covered:
:type lines_partially_covered: int
"""
_attribute_map = {
'blocks_covered': {'key': 'blocksCovered', 'type': 'int'},
'blocks_not_covered': {'key': 'blocksNotCovered', 'type': 'int'},
'lines_covered': {'key': 'linesCovered', 'type': 'int'},
'lines_not_covered': {'key': 'linesNotCovered', 'type': 'int'},
'lines_partially_covered': {'key': 'linesPartiallyCovered', 'type': 'int'}
}
def __init__(self, blocks_covered=None, blocks_not_covered=None, lines_covered=None, lines_not_covered=None, lines_partially_covered=None):
super(CoverageStatistics, self).__init__()
self.blocks_covered = blocks_covered
self.blocks_not_covered = blocks_not_covered
self.lines_covered = lines_covered
self.lines_not_covered = lines_not_covered
self.lines_partially_covered = lines_partially_covered
class CustomTestField(Model):
"""
A custom field information. Allowed Key : Value pairs - ( AttemptId: int value, IsTestResultFlaky: bool)
:param field_name: Field Name.
:type field_name: str
:param value: Field value.
:type value: object
"""
_attribute_map = {
'field_name': {'key': 'fieldName', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'}
}
def __init__(self, field_name=None, value=None):
super(CustomTestField, self).__init__()
self.field_name = field_name
self.value = value
class CustomTestFieldDefinition(Model):
"""
:param field_id:
:type field_id: int
:param field_name:
:type field_name: str
:param field_type:
:type field_type: object
:param scope:
:type scope: object
"""
_attribute_map = {
'field_id': {'key': 'fieldId', 'type': 'int'},
'field_name': {'key': 'fieldName', 'type': 'str'},
'field_type': {'key': 'fieldType', 'type': 'object'},
'scope': {'key': 'scope', 'type': 'object'}
}
def __init__(self, field_id=None, field_name=None, field_type=None, scope=None):
super(CustomTestFieldDefinition, self).__init__()
self.field_id = field_id
self.field_name = field_name
self.field_type = field_type
self.scope = scope
class DtlEnvironmentDetails(Model):
"""
This is a temporary class to provide the details for the test run environment.
:param csm_content:
:type csm_content: str
:param csm_parameters:
:type csm_parameters: str
:param subscription_name:
:type subscription_name: str
"""
_attribute_map = {
'csm_content': {'key': 'csmContent', 'type': 'str'},
'csm_parameters': {'key': 'csmParameters', 'type': 'str'},
'subscription_name': {'key': 'subscriptionName', 'type': 'str'}
}
def __init__(self, csm_content=None, csm_parameters=None, subscription_name=None):
super(DtlEnvironmentDetails, self).__init__()
self.csm_content = csm_content
self.csm_parameters = csm_parameters
self.subscription_name = subscription_name
class FailingSince(Model):
"""
Failing since information of a test result.
:param build: Build reference since failing.
:type build: :class:`BuildReference <azure.devops.v7_1.test.models.BuildReference>`
:param date: Time since failing(UTC).
:type date: datetime
:param release: Release reference since failing.
:type release: :class:`ReleaseReference <azure.devops.v7_1.test.models.ReleaseReference>`
"""
_attribute_map = {
'build': {'key': 'build', 'type': 'BuildReference'},
'date': {'key': 'date', 'type': 'iso-8601'},
'release': {'key': 'release', 'type': 'ReleaseReference'}
}
def __init__(self, build=None, date=None, release=None):
super(FailingSince, self).__init__()
self.build = build
self.date = date
self.release = release
class FieldDetailsForTestResults(Model):
"""
:param field_name: Group by field name
:type field_name: str
:param groups_for_field: Group by field values
:type groups_for_field: list of object
"""
_attribute_map = {
'field_name': {'key': 'fieldName', 'type': 'str'},
'groups_for_field': {'key': 'groupsForField', 'type': '[object]'}
}
def __init__(self, field_name=None, groups_for_field=None):
super(FieldDetailsForTestResults, self).__init__()
self.field_name = field_name
self.groups_for_field = groups_for_field
class FunctionCoverage(Model):
"""
:param class_:
:type class_: str
:param name:
:type name: str
:param namespace:
:type namespace: str
:param source_file:
:type source_file: str
:param statistics:
:type statistics: :class:`CoverageStatistics <azure.devops.v7_1.test.models.CoverageStatistics>`
"""
_attribute_map = {
'class_': {'key': 'class', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'source_file': {'key': 'sourceFile', 'type': 'str'},
'statistics': {'key': 'statistics', 'type': 'CoverageStatistics'}
}
def __init__(self, class_=None, name=None, namespace=None, source_file=None, statistics=None):
super(FunctionCoverage, self).__init__()
self.class_ = class_
self.name = name
self.namespace = namespace
self.source_file = source_file
self.statistics = statistics
class GraphSubjectBase(Model):
"""
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None):
super(GraphSubjectBase, self).__init__()
self._links = _links
self.descriptor = descriptor
self.display_name = display_name
self.url = url
class IdentityRef(GraphSubjectBase):
"""
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
:param directory_alias: Deprecated - Can be retrieved by querying the Graph user referenced in the "self" entry of the IdentityRef "_links" dictionary
:type directory_alias: str
:param id:
:type id: str
:param image_url: Deprecated - Available in the "avatar" entry of the IdentityRef "_links" dictionary
:type image_url: str
:param inactive: Deprecated - Can be retrieved by querying the Graph membership state referenced in the "membershipState" entry of the GraphUser "_links" dictionary
:type inactive: bool
:param is_aad_identity: Deprecated - Can be inferred from the subject type of the descriptor (Descriptor.IsAadUserType/Descriptor.IsAadGroupType)
:type is_aad_identity: bool
:param is_container: Deprecated - Can be inferred from the subject type of the descriptor (Descriptor.IsGroupType)
:type is_container: bool
:param is_deleted_in_origin:
:type is_deleted_in_origin: bool
:param profile_url: Deprecated - not in use in most preexisting implementations of ToIdentityRef
:type profile_url: str
:param unique_name: Deprecated - use Domain+PrincipalName instead
:type unique_name: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'directory_alias': {'key': 'directoryAlias', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'image_url': {'key': 'imageUrl', 'type': 'str'},
'inactive': {'key': 'inactive', 'type': 'bool'},
'is_aad_identity': {'key': 'isAadIdentity', 'type': 'bool'},
'is_container': {'key': 'isContainer', 'type': 'bool'},
'is_deleted_in_origin': {'key': 'isDeletedInOrigin', 'type': 'bool'},
'profile_url': {'key': 'profileUrl', 'type': 'str'},
'unique_name': {'key': 'uniqueName', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None, directory_alias=None, id=None, image_url=None, inactive=None, is_aad_identity=None, is_container=None, is_deleted_in_origin=None, profile_url=None, unique_name=None):
super(IdentityRef, self).__init__(_links=_links, descriptor=descriptor, display_name=display_name, url=url)
self.directory_alias = directory_alias
self.id = id
self.image_url = image_url
self.inactive = inactive
self.is_aad_identity = is_aad_identity
self.is_container = is_container
self.is_deleted_in_origin = is_deleted_in_origin
self.profile_url = profile_url
self.unique_name = unique_name
class JobReference(Model):
"""
Job in pipeline. This is related to matrixing in YAML.
:param attempt: Attempt number of the job
:type attempt: int
:param job_name: Matrixing in YAML generates copies of a job with different inputs in matrix. JobName is the name of those input. Maximum supported length for name is 256 character.
:type job_name: str
"""
_attribute_map = {
'attempt': {'key': 'attempt', 'type': 'int'},
'job_name': {'key': 'jobName', 'type': 'str'}
}
def __init__(self, attempt=None, job_name=None):
super(JobReference, self).__init__()
self.attempt = attempt
self.job_name = job_name
class LastResultDetails(Model):
"""
Last result details of test point.
:param date_completed: Completed date of last result.
:type date_completed: datetime
:param duration: Duration of the last result in milliseconds.
:type duration: long
:param run_by: The user who executed the last result.
:type run_by: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
"""
_attribute_map = {
'date_completed': {'key': 'dateCompleted', 'type': 'iso-8601'},
'duration': {'key': 'duration', 'type': 'long'},
'run_by': {'key': 'runBy', 'type': 'IdentityRef'}
}
def __init__(self, date_completed=None, duration=None, run_by=None):
super(LastResultDetails, self).__init__()
self.date_completed = date_completed
self.duration = duration
self.run_by = run_by
class LinkedWorkItemsQuery(Model):
"""
:param automated_test_names:
:type automated_test_names: list of str
:param plan_id:
:type plan_id: int
:param point_ids:
:type point_ids: list of int
:param suite_ids:
:type suite_ids: list of int
:param test_case_ids:
:type test_case_ids: list of int
:param work_item_category:
:type work_item_category: str
"""
_attribute_map = {
'automated_test_names': {'key': 'automatedTestNames', 'type': '[str]'},
'plan_id': {'key': 'planId', 'type': 'int'},
'point_ids': {'key': 'pointIds', 'type': '[int]'},
'suite_ids': {'key': 'suiteIds', 'type': '[int]'},
'test_case_ids': {'key': 'testCaseIds', 'type': '[int]'},
'work_item_category': {'key': 'workItemCategory', 'type': 'str'}
}
def __init__(self, automated_test_names=None, plan_id=None, point_ids=None, suite_ids=None, test_case_ids=None, work_item_category=None):
super(LinkedWorkItemsQuery, self).__init__()
self.automated_test_names = automated_test_names
self.plan_id = plan_id
self.point_ids = point_ids
self.suite_ids = suite_ids
self.test_case_ids = test_case_ids
self.work_item_category = work_item_category
class LinkedWorkItemsQueryResult(Model):
"""
:param automated_test_name:
:type automated_test_name: str
:param plan_id:
:type plan_id: int
:param point_id:
:type point_id: int
:param suite_id:
:type suite_id: int
:param test_case_id:
:type test_case_id: int
:param work_items:
:type work_items: list of :class:`WorkItemReference <azure.devops.v7_1.test.models.WorkItemReference>`
"""
_attribute_map = {
'automated_test_name': {'key': 'automatedTestName', 'type': 'str'},
'plan_id': {'key': 'planId', 'type': 'int'},
'point_id': {'key': 'pointId', 'type': 'int'},
'suite_id': {'key': 'suiteId', 'type': 'int'},
'test_case_id': {'key': 'testCaseId', 'type': 'int'},
'work_items': {'key': 'workItems', 'type': '[WorkItemReference]'}
}
def __init__(self, automated_test_name=None, plan_id=None, point_id=None, suite_id=None, test_case_id=None, work_items=None):
super(LinkedWorkItemsQueryResult, self).__init__()
self.automated_test_name = automated_test_name
self.plan_id = plan_id
self.point_id = point_id
self.suite_id = suite_id
self.test_case_id = test_case_id
self.work_items = work_items
class ModuleCoverage(Model):
"""
:param block_count:
:type block_count: int
:param block_data:
:type block_data: str
:param file_url: Code Coverage File Url
:type file_url: str
:param functions:
:type functions: list of :class:`FunctionCoverage <azure.devops.v7_1.test.models.FunctionCoverage>`
:param name:
:type name: str
:param signature:
:type signature: str
:param signature_age:
:type signature_age: int
:param statistics:
:type statistics: :class:`CoverageStatistics <azure.devops.v7_1.test.models.CoverageStatistics>`
"""
_attribute_map = {
'block_count': {'key': 'blockCount', 'type': 'int'},
'block_data': {'key': 'blockData', 'type': 'str'},
'file_url': {'key': 'fileUrl', 'type': 'str'},
'functions': {'key': 'functions', 'type': '[FunctionCoverage]'},
'name': {'key': 'name', 'type': 'str'},
'signature': {'key': 'signature', 'type': 'str'},
'signature_age': {'key': 'signatureAge', 'type': 'int'},
'statistics': {'key': 'statistics', 'type': 'CoverageStatistics'}
}
def __init__(self, block_count=None, block_data=None, file_url=None, functions=None, name=None, signature=None, signature_age=None, statistics=None):
super(ModuleCoverage, self).__init__()
self.block_count = block_count
self.block_data = block_data
self.file_url = file_url
self.functions = functions
self.name = name
self.signature = signature
self.signature_age = signature_age
self.statistics = statistics
class NameValuePair(Model):
"""
Name value pair
:param name: Name
:type name: str
:param value: Value
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, name=None, value=None):
super(NameValuePair, self).__init__()
self.name = name
self.value = value
class PhaseReference(Model):
"""
Phase in pipeline
:param attempt: Attempt number of the phase
:type attempt: int
:param phase_name: Name of the phase. Maximum supported length for name is 256 character.
:type phase_name: str
"""
_attribute_map = {
'attempt': {'key': 'attempt', 'type': 'int'},
'phase_name': {'key': 'phaseName', 'type': 'str'}
}
def __init__(self, attempt=None, phase_name=None):
super(PhaseReference, self).__init__()
self.attempt = attempt
self.phase_name = phase_name
class PipelineReference(Model):
"""
Pipeline reference
:param job_reference: Reference of the job
:type job_reference: :class:`JobReference <azure.devops.v7_1.test.models.JobReference>`
:param phase_reference: Reference of the phase.
:type phase_reference: :class:`PhaseReference <azure.devops.v7_1.test.models.PhaseReference>`
:param pipeline_id: Reference of the pipeline with which this pipeline instance is related.
:type pipeline_id: int
:param stage_reference: Reference of the stage.
:type stage_reference: :class:`StageReference <azure.devops.v7_1.test.models.StageReference>`
"""
_attribute_map = {
'job_reference': {'key': 'jobReference', 'type': 'JobReference'},
'phase_reference': {'key': 'phaseReference', 'type': 'PhaseReference'},
'pipeline_id': {'key': 'pipelineId', 'type': 'int'},
'stage_reference': {'key': 'stageReference', 'type': 'StageReference'}
}
def __init__(self, job_reference=None, phase_reference=None, pipeline_id=None, stage_reference=None):
super(PipelineReference, self).__init__()
self.job_reference = job_reference
self.phase_reference = phase_reference
self.pipeline_id = pipeline_id
self.stage_reference = stage_reference
class PlanUpdateModel(Model):
"""
A model class used for creating and updating test plans.
:param area: Area path to which the test plan belongs. This should be set to area path of the team that works on this test plan.
:type area: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param automated_test_environment:
:type automated_test_environment: :class:`TestEnvironment <azure.devops.v7_1.test.models.TestEnvironment>`
:param automated_test_settings:
:type automated_test_settings: :class:`TestSettings <azure.devops.v7_1.test.models.TestSettings>`
:param build: Build ID of the build whose quality is tested by the tests in this test plan. For automated testing, this build ID is used to find the test binaries that contain automated test methods.
:type build: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param build_definition: The Build Definition that generates a build associated with this test plan.
:type build_definition: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param configuration_ids: IDs of configurations to be applied when new test suites and test cases are added to the test plan.
:type configuration_ids: list of int
:param description: Description of the test plan.
:type description: str
:param end_date: End date for the test plan.
:type end_date: str
:param iteration: Iteration path assigned to the test plan. This indicates when the target iteration by which the testing in this plan is supposed to be complete and the product is ready to be released.
:type iteration: str
:param manual_test_environment:
:type manual_test_environment: :class:`TestEnvironment <azure.devops.v7_1.test.models.TestEnvironment>`
:param manual_test_settings:
:type manual_test_settings: :class:`TestSettings <azure.devops.v7_1.test.models.TestSettings>`
:param name: Name of the test plan.
:type name: str
:param owner: Owner of the test plan.
:type owner: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param release_environment_definition: Release Environment to be used to deploy the build and run automated tests from this test plan.
:type release_environment_definition: :class:`ReleaseEnvironmentDefinitionReference <azure.devops.v7_1.test.models.ReleaseEnvironmentDefinitionReference>`
:param start_date: Start date for the test plan.
:type start_date: str
:param state: State of the test plan.
:type state: str
:param status:
:type status: str
:param test_outcome_settings: Test Outcome settings
:type test_outcome_settings: :class:`TestOutcomeSettings <azure.devops.v7_1.test.models.TestOutcomeSettings>`
"""
_attribute_map = {
'area': {'key': 'area', 'type': 'ShallowReference'},
'automated_test_environment': {'key': 'automatedTestEnvironment', 'type': 'TestEnvironment'},
'automated_test_settings': {'key': 'automatedTestSettings', 'type': 'TestSettings'},
'build': {'key': 'build', 'type': 'ShallowReference'},
'build_definition': {'key': 'buildDefinition', 'type': 'ShallowReference'},
'configuration_ids': {'key': 'configurationIds', 'type': '[int]'},
'description': {'key': 'description', 'type': 'str'},
'end_date': {'key': 'endDate', 'type': 'str'},
'iteration': {'key': 'iteration', 'type': 'str'},
'manual_test_environment': {'key': 'manualTestEnvironment', 'type': 'TestEnvironment'},
'manual_test_settings': {'key': 'manualTestSettings', 'type': 'TestSettings'},
'name': {'key': 'name', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'release_environment_definition': {'key': 'releaseEnvironmentDefinition', 'type': 'ReleaseEnvironmentDefinitionReference'},
'start_date': {'key': 'startDate', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'test_outcome_settings': {'key': 'testOutcomeSettings', 'type': 'TestOutcomeSettings'}
}
def __init__(self, area=None, automated_test_environment=None, automated_test_settings=None, build=None, build_definition=None, configuration_ids=None, description=None, end_date=None, iteration=None, manual_test_environment=None, manual_test_settings=None, name=None, owner=None, release_environment_definition=None, start_date=None, state=None, status=None, test_outcome_settings=None):
super(PlanUpdateModel, self).__init__()
self.area = area
self.automated_test_environment = automated_test_environment
self.automated_test_settings = automated_test_settings
self.build = build
self.build_definition = build_definition
self.configuration_ids = configuration_ids
self.description = description
self.end_date = end_date
self.iteration = iteration
self.manual_test_environment = manual_test_environment
self.manual_test_settings = manual_test_settings
self.name = name
self.owner = owner
self.release_environment_definition = release_environment_definition
self.start_date = start_date
self.state = state
self.status = status
self.test_outcome_settings = test_outcome_settings
class PointAssignment(Model):
"""
Adding test cases to a suite creates one of more test points based on the default configurations and testers assigned to the test suite. PointAssignment is the list of test points that were created for each of the test cases that were added to the test suite.
:param configuration: Configuration that was assigned to the test case.
:type configuration: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param tester: Tester that was assigned to the test case
:type tester: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
"""
_attribute_map = {
'configuration': {'key': 'configuration', 'type': 'ShallowReference'},
'tester': {'key': 'tester', 'type': 'IdentityRef'}
}
def __init__(self, configuration=None, tester=None):
super(PointAssignment, self).__init__()
self.configuration = configuration
self.tester = tester
class PointsFilter(Model):
"""
Filter class for test point.
:param configuration_names: List of Configurations for filtering.
:type configuration_names: list of str
:param testcase_ids: List of test case id for filtering.
:type testcase_ids: list of int
:param testers: List of tester for filtering.
:type testers: list of :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
"""
_attribute_map = {
'configuration_names': {'key': 'configurationNames', 'type': '[str]'},
'testcase_ids': {'key': 'testcaseIds', 'type': '[int]'},
'testers': {'key': 'testers', 'type': '[IdentityRef]'}
}
def __init__(self, configuration_names=None, testcase_ids=None, testers=None):
super(PointsFilter, self).__init__()
self.configuration_names = configuration_names
self.testcase_ids = testcase_ids
self.testers = testers
class PointUpdateModel(Model):
"""
Model to update test point.
:param outcome: Outcome to update.
:type outcome: str
:param reset_to_active: Reset test point to active.
:type reset_to_active: bool
:param tester: Tester to update. Type IdentityRef.
:type tester: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
"""
_attribute_map = {
'outcome': {'key': 'outcome', 'type': 'str'},
'reset_to_active': {'key': 'resetToActive', 'type': 'bool'},
'tester': {'key': 'tester', 'type': 'IdentityRef'}
}
def __init__(self, outcome=None, reset_to_active=None, tester=None):
super(PointUpdateModel, self).__init__()
self.outcome = outcome
self.reset_to_active = reset_to_active
self.tester = tester
class PropertyBag(Model):
"""
The class to represent a Generic store for test session data.
:param bag: Generic store for test session data
:type bag: dict
"""
_attribute_map = {
'bag': {'key': 'bag', 'type': '{str}'}
}
def __init__(self, bag=None):
super(PropertyBag, self).__init__()
self.bag = bag
class QueryModel(Model):
"""
:param query:
:type query: str
"""
_attribute_map = {
'query': {'key': 'query', 'type': 'str'}
}
def __init__(self, query=None):
super(QueryModel, self).__init__()
self.query = query
class ReferenceLinks(Model):
"""
The class to represent a collection of REST reference links.
:param links: The readonly view of the links. Because Reference links are readonly, we only want to expose them as read only.
:type links: dict
"""
_attribute_map = {
'links': {'key': 'links', 'type': '{object}'}
}
def __init__(self, links=None):
super(ReferenceLinks, self).__init__()
self.links = links
class ReleaseEnvironmentDefinitionReference(Model):
"""
Reference to release environment resource.
:param definition_id: ID of the release definition that contains the release environment definition.
:type definition_id: int
:param environment_definition_id: ID of the release environment definition.
:type environment_definition_id: int
"""
_attribute_map = {
'definition_id': {'key': 'definitionId', 'type': 'int'},
'environment_definition_id': {'key': 'environmentDefinitionId', 'type': 'int'}
}
def __init__(self, definition_id=None, environment_definition_id=None):
super(ReleaseEnvironmentDefinitionReference, self).__init__()
self.definition_id = definition_id
self.environment_definition_id = environment_definition_id
class ReleaseReference(Model):
"""
Reference to a release.
:param attempt: Number of Release Attempt.
:type attempt: int
:param creation_date: Release Creation Date(UTC).
:type creation_date: datetime
:param definition_id: Release definition ID.
:type definition_id: int
:param environment_creation_date: Environment creation Date(UTC).
:type environment_creation_date: datetime
:param environment_definition_id: Release environment definition ID.
:type environment_definition_id: int
:param environment_definition_name: Release environment definition name.
:type environment_definition_name: str
:param environment_id: Release environment ID.
:type environment_id: int
:param environment_name: Release environment name.
:type environment_name: str
:param id: Release ID.
:type id: int
:param name: Release name.
:type name: str
"""
_attribute_map = {
'attempt': {'key': 'attempt', 'type': 'int'},
'creation_date': {'key': 'creationDate', 'type': 'iso-8601'},
'definition_id': {'key': 'definitionId', 'type': 'int'},
'environment_creation_date': {'key': 'environmentCreationDate', 'type': 'iso-8601'},
'environment_definition_id': {'key': 'environmentDefinitionId', 'type': 'int'},
'environment_definition_name': {'key': 'environmentDefinitionName', 'type': 'str'},
'environment_id': {'key': 'environmentId', 'type': 'int'},
'environment_name': {'key': 'environmentName', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, attempt=None, creation_date=None, definition_id=None, environment_creation_date=None, environment_definition_id=None, environment_definition_name=None, environment_id=None, environment_name=None, id=None, name=None):
super(ReleaseReference, self).__init__()
self.attempt = attempt
self.creation_date = creation_date
self.definition_id = definition_id
self.environment_creation_date = environment_creation_date
self.environment_definition_id = environment_definition_id
self.environment_definition_name = environment_definition_name
self.environment_id = environment_id
self.environment_name = environment_name
self.id = id
self.name = name
class ResultRetentionSettings(Model):
"""
Test result retention settings
:param automated_results_retention_duration: Automated test result retention duration in days
:type automated_results_retention_duration: int
:param last_updated_by: Last Updated by identity
:type last_updated_by: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param last_updated_date: Last updated date
:type last_updated_date: datetime
:param manual_results_retention_duration: Manual test result retention duration in days
:type manual_results_retention_duration: int
"""
_attribute_map = {
'automated_results_retention_duration': {'key': 'automatedResultsRetentionDuration', 'type': 'int'},
'last_updated_by': {'key': 'lastUpdatedBy', 'type': 'IdentityRef'},
'last_updated_date': {'key': 'lastUpdatedDate', 'type': 'iso-8601'},
'manual_results_retention_duration': {'key': 'manualResultsRetentionDuration', 'type': 'int'}
}
def __init__(self, automated_results_retention_duration=None, last_updated_by=None, last_updated_date=None, manual_results_retention_duration=None):
super(ResultRetentionSettings, self).__init__()
self.automated_results_retention_duration = automated_results_retention_duration
self.last_updated_by = last_updated_by
self.last_updated_date = last_updated_date
self.manual_results_retention_duration = manual_results_retention_duration
class ResultsFilter(Model):
"""
:param automated_test_name:
:type automated_test_name: str
:param branch:
:type branch: str
:param executed_in:
:type executed_in: object
:param group_by:
:type group_by: str
:param max_complete_date:
:type max_complete_date: datetime
:param results_count:
:type results_count: int
:param test_case_id:
:type test_case_id: int
:param test_case_reference_ids:
:type test_case_reference_ids: list of int
:param test_plan_id:
:type test_plan_id: int
:param test_point_ids:
:type test_point_ids: list of int
:param test_results_context:
:type test_results_context: :class:`TestResultsContext <azure.devops.v7_1.test.models.TestResultsContext>`
:param trend_days:
:type trend_days: int
"""
_attribute_map = {
'automated_test_name': {'key': 'automatedTestName', 'type': 'str'},
'branch': {'key': 'branch', 'type': 'str'},
'executed_in': {'key': 'executedIn', 'type': 'object'},
'group_by': {'key': 'groupBy', 'type': 'str'},
'max_complete_date': {'key': 'maxCompleteDate', 'type': 'iso-8601'},
'results_count': {'key': 'resultsCount', 'type': 'int'},
'test_case_id': {'key': 'testCaseId', 'type': 'int'},
'test_case_reference_ids': {'key': 'testCaseReferenceIds', 'type': '[int]'},
'test_plan_id': {'key': 'testPlanId', 'type': 'int'},
'test_point_ids': {'key': 'testPointIds', 'type': '[int]'},
'test_results_context': {'key': 'testResultsContext', 'type': 'TestResultsContext'},
'trend_days': {'key': 'trendDays', 'type': 'int'}
}
def __init__(self, automated_test_name=None, branch=None, executed_in=None, group_by=None, max_complete_date=None, results_count=None, test_case_id=None, test_case_reference_ids=None, test_plan_id=None, test_point_ids=None, test_results_context=None, trend_days=None):
super(ResultsFilter, self).__init__()
self.automated_test_name = automated_test_name
self.branch = branch
self.executed_in = executed_in
self.group_by = group_by
self.max_complete_date = max_complete_date
self.results_count = results_count
self.test_case_id = test_case_id
self.test_case_reference_ids = test_case_reference_ids
self.test_plan_id = test_plan_id
self.test_point_ids = test_point_ids
self.test_results_context = test_results_context
self.trend_days = trend_days
class RunCreateModel(Model):
"""
Test run create details.
:param automated: true if test run is automated, false otherwise. By default it will be false.
:type automated: bool
:param build: An abstracted reference to the build that it belongs.
:type build: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param build_drop_location: Drop location of the build used for test run.
:type build_drop_location: str
:param build_flavor: Flavor of the build used for test run. (E.g: Release, Debug)
:type build_flavor: str
:param build_platform: Platform of the build used for test run. (E.g.: x86, amd64)
:type build_platform: str
:param build_reference: BuildReference of the test run.
:type build_reference: :class:`BuildConfiguration <azure.devops.v7_1.test.models.BuildConfiguration>`
:param comment: Comments entered by those analyzing the run.
:type comment: str
:param complete_date: Completed date time of the run.
:type complete_date: str
:param configuration_ids: IDs of the test configurations associated with the run.
:type configuration_ids: list of int
:param controller: Name of the test controller used for automated run.
:type controller: str
:param custom_test_fields: Additional properties of test Run.
:type custom_test_fields: list of :class:`CustomTestField <azure.devops.v7_1.test.models.CustomTestField>`
:param dtl_aut_environment: An abstracted reference to DtlAutEnvironment.
:type dtl_aut_environment: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param dtl_test_environment: An abstracted reference to DtlTestEnvironment.
:type dtl_test_environment: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param due_date: Due date and time for test run.
:type due_date: str
:param environment_details:
:type environment_details: :class:`DtlEnvironmentDetails <azure.devops.v7_1.test.models.DtlEnvironmentDetails>`
:param error_message: Error message associated with the run.
:type error_message: str
:param filter: Filter used for discovering the Run.
:type filter: :class:`RunFilter <azure.devops.v7_1.test.models.RunFilter>`
:param iteration: The iteration in which to create the run. Root iteration of the team project will be default
:type iteration: str
:param name: Name of the test run.
:type name: str
:param owner: Display name of the owner of the run.
:type owner: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param pipeline_reference: Reference of the pipeline to which this test run belongs. PipelineReference.PipelineId should be equal to RunCreateModel.Build.Id
:type pipeline_reference: :class:`PipelineReference <azure.devops.v7_1.test.models.PipelineReference>`
:param plan: An abstracted reference to the plan that it belongs.
:type plan: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param point_ids: IDs of the test points to use in the run.
:type point_ids: list of int
:param release_environment_uri: URI of release environment associated with the run.
:type release_environment_uri: str
:param release_reference: Reference to release associated with test run.
:type release_reference: :class:`ReleaseReference <azure.devops.v7_1.test.models.ReleaseReference>`
:param release_uri: URI of release associated with the run.
:type release_uri: str
:param run_summary: Run summary for run Type = NoConfigRun.
:type run_summary: list of :class:`RunSummaryModel <azure.devops.v7_1.test.models.RunSummaryModel>`
:param run_timeout: Timespan till the run times out.
:type run_timeout: object
:param source_workflow: SourceWorkFlow(CI/CD) of the test run.
:type source_workflow: str
:param start_date: Start date time of the run.
:type start_date: str
:param state: The state of the run. Type TestRunState Valid states - NotStarted, InProgress, Waiting
:type state: str
:param tags: Tags to attach with the test run, maximum of 5 tags can be added to run.
:type tags: list of :class:`TestTag <azure.devops.v7_1.test.models.TestTag>`
:param test_configurations_mapping: TestConfigurationMapping of the test run.
:type test_configurations_mapping: str
:param test_environment_id: ID of the test environment associated with the run.
:type test_environment_id: str
:param test_settings: An abstracted reference to the test settings resource.
:type test_settings: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param type: Type of the run(RunType) Valid Values : (Unspecified, Normal, Blocking, Web, MtrRunInitiatedFromWeb, RunWithDtlEnv, NoConfigRun)
:type type: str
"""
_attribute_map = {
'automated': {'key': 'automated', 'type': 'bool'},
'build': {'key': 'build', 'type': 'ShallowReference'},
'build_drop_location': {'key': 'buildDropLocation', 'type': 'str'},
'build_flavor': {'key': 'buildFlavor', 'type': 'str'},
'build_platform': {'key': 'buildPlatform', 'type': 'str'},
'build_reference': {'key': 'buildReference', 'type': 'BuildConfiguration'},
'comment': {'key': 'comment', 'type': 'str'},
'complete_date': {'key': 'completeDate', 'type': 'str'},
'configuration_ids': {'key': 'configurationIds', 'type': '[int]'},
'controller': {'key': 'controller', 'type': 'str'},
'custom_test_fields': {'key': 'customTestFields', 'type': '[CustomTestField]'},
'dtl_aut_environment': {'key': 'dtlAutEnvironment', 'type': 'ShallowReference'},
'dtl_test_environment': {'key': 'dtlTestEnvironment', 'type': 'ShallowReference'},
'due_date': {'key': 'dueDate', 'type': 'str'},
'environment_details': {'key': 'environmentDetails', 'type': 'DtlEnvironmentDetails'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'filter': {'key': 'filter', 'type': 'RunFilter'},
'iteration': {'key': 'iteration', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'pipeline_reference': {'key': 'pipelineReference', 'type': 'PipelineReference'},
'plan': {'key': 'plan', 'type': 'ShallowReference'},
'point_ids': {'key': 'pointIds', 'type': '[int]'},
'release_environment_uri': {'key': 'releaseEnvironmentUri', 'type': 'str'},
'release_reference': {'key': 'releaseReference', 'type': 'ReleaseReference'},
'release_uri': {'key': 'releaseUri', 'type': 'str'},
'run_summary': {'key': 'runSummary', 'type': '[RunSummaryModel]'},
'run_timeout': {'key': 'runTimeout', 'type': 'object'},
'source_workflow': {'key': 'sourceWorkflow', 'type': 'str'},
'start_date': {'key': 'startDate', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[TestTag]'},
'test_configurations_mapping': {'key': 'testConfigurationsMapping', 'type': 'str'},
'test_environment_id': {'key': 'testEnvironmentId', 'type': 'str'},
'test_settings': {'key': 'testSettings', 'type': 'ShallowReference'},
'type': {'key': 'type', 'type': 'str'}
}
def __init__(self, automated=None, build=None, build_drop_location=None, build_flavor=None, build_platform=None, build_reference=None, comment=None, complete_date=None, configuration_ids=None, controller=None, custom_test_fields=None, dtl_aut_environment=None, dtl_test_environment=None, due_date=None, environment_details=None, error_message=None, filter=None, iteration=None, name=None, owner=None, pipeline_reference=None, plan=None, point_ids=None, release_environment_uri=None, release_reference=None, release_uri=None, run_summary=None, run_timeout=None, source_workflow=None, start_date=None, state=None, tags=None, test_configurations_mapping=None, test_environment_id=None, test_settings=None, type=None):
super(RunCreateModel, self).__init__()
self.automated = automated
self.build = build
self.build_drop_location = build_drop_location
self.build_flavor = build_flavor
self.build_platform = build_platform
self.build_reference = build_reference
self.comment = comment
self.complete_date = complete_date
self.configuration_ids = configuration_ids
self.controller = controller
self.custom_test_fields = custom_test_fields
self.dtl_aut_environment = dtl_aut_environment
self.dtl_test_environment = dtl_test_environment
self.due_date = due_date
self.environment_details = environment_details
self.error_message = error_message
self.filter = filter
self.iteration = iteration
self.name = name
self.owner = owner
self.pipeline_reference = pipeline_reference
self.plan = plan
self.point_ids = point_ids
self.release_environment_uri = release_environment_uri
self.release_reference = release_reference
self.release_uri = release_uri
self.run_summary = run_summary
self.run_timeout = run_timeout
self.source_workflow = source_workflow
self.start_date = start_date
self.state = state
self.tags = tags
self.test_configurations_mapping = test_configurations_mapping
self.test_environment_id = test_environment_id
self.test_settings = test_settings
self.type = type
class RunFilter(Model):
"""
This class is used to provide the filters used for discovery
:param source_filter: filter for the test case sources (test containers)
:type source_filter: str
:param test_case_filter: filter for the test cases
:type test_case_filter: str
"""
_attribute_map = {
'source_filter': {'key': 'sourceFilter', 'type': 'str'},
'test_case_filter': {'key': 'testCaseFilter', 'type': 'str'}
}
def __init__(self, source_filter=None, test_case_filter=None):
super(RunFilter, self).__init__()
self.source_filter = source_filter
self.test_case_filter = test_case_filter
class RunStatistic(Model):
"""
Test run statistics per outcome.
:param count: Test result count fo the given outcome.
:type count: int
:param outcome: Test result outcome
:type outcome: str
:param resolution_state: Test run Resolution State.
:type resolution_state: :class:`TestResolutionState <azure.devops.v7_1.test.models.TestResolutionState>`
:param result_metadata: ResultMetadata for the given outcome/count.
:type result_metadata: object
:param state: State of the test run
:type state: str
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'outcome': {'key': 'outcome', 'type': 'str'},
'resolution_state': {'key': 'resolutionState', 'type': 'TestResolutionState'},
'result_metadata': {'key': 'resultMetadata', 'type': 'object'},
'state': {'key': 'state', 'type': 'str'}
}
def __init__(self, count=None, outcome=None, resolution_state=None, result_metadata=None, state=None):
super(RunStatistic, self).__init__()
self.count = count
self.outcome = outcome
self.resolution_state = resolution_state
self.result_metadata = result_metadata
self.state = state
class RunSummaryModel(Model):
"""
Run summary for each output type of test.
:param duration: Total time taken in milliseconds.
:type duration: long
:param result_count: Number of results for Outcome TestOutcome
:type result_count: int
:param test_outcome: Summary is based on outcome
:type test_outcome: object
"""
_attribute_map = {
'duration': {'key': 'duration', 'type': 'long'},
'result_count': {'key': 'resultCount', 'type': 'int'},
'test_outcome': {'key': 'testOutcome', 'type': 'object'}
}
def __init__(self, duration=None, result_count=None, test_outcome=None):
super(RunSummaryModel, self).__init__()
self.duration = duration
self.result_count = result_count
self.test_outcome = test_outcome
class RunUpdateModel(Model):
"""
:param build: An abstracted reference to the build that it belongs.
:type build: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param build_drop_location: Drop location of the build used for test run.
:type build_drop_location: str
:param build_flavor: Flavor of the build used for test run. (E.g: Release, Debug)
:type build_flavor: str
:param build_platform: Platform of the build used for test run. (E.g.: x86, amd64)
:type build_platform: str
:param comment: Comments entered by those analyzing the run.
:type comment: str
:param completed_date: Completed date time of the run.
:type completed_date: str
:param controller: Name of the test controller used for automated run.
:type controller: str
:param delete_in_progress_results: true to delete inProgess Results , false otherwise.
:type delete_in_progress_results: bool
:param dtl_aut_environment: An abstracted reference to DtlAutEnvironment.
:type dtl_aut_environment: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param dtl_environment: An abstracted reference to DtlEnvironment.
:type dtl_environment: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param dtl_environment_details:
:type dtl_environment_details: :class:`DtlEnvironmentDetails <azure.devops.v7_1.test.models.DtlEnvironmentDetails>`
:param due_date: Due date and time for test run.
:type due_date: str
:param error_message: Error message associated with the run.
:type error_message: str
:param iteration: The iteration in which to create the run.
:type iteration: str
:param log_entries: Log entries associated with the run. Use a comma-separated list of multiple log entry objects. { logEntry }, { logEntry }, ...
:type log_entries: list of :class:`TestMessageLogDetails <azure.devops.v7_1.test.models.TestMessageLogDetails>`
:param name: Name of the test run.
:type name: str
:param release_environment_uri: URI of release environment associated with the run.
:type release_environment_uri: str
:param release_uri: URI of release associated with the run.
:type release_uri: str
:param run_summary: Run summary for run Type = NoConfigRun.
:type run_summary: list of :class:`RunSummaryModel <azure.devops.v7_1.test.models.RunSummaryModel>`
:param source_workflow: SourceWorkFlow(CI/CD) of the test run.
:type source_workflow: str
:param started_date: Start date time of the run.
:type started_date: str
:param state: The state of the test run Below are the valid values - NotStarted, InProgress, Completed, Aborted, Waiting
:type state: str
:param substate: The types of sub states for test run.
:type substate: object
:param tags: Tags to attach with the test run.
:type tags: list of :class:`TestTag <azure.devops.v7_1.test.models.TestTag>`
:param test_environment_id: ID of the test environment associated with the run.
:type test_environment_id: str
:param test_settings: An abstracted reference to test setting resource.
:type test_settings: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
"""
_attribute_map = {
'build': {'key': 'build', 'type': 'ShallowReference'},
'build_drop_location': {'key': 'buildDropLocation', 'type': 'str'},
'build_flavor': {'key': 'buildFlavor', 'type': 'str'},
'build_platform': {'key': 'buildPlatform', 'type': 'str'},
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'str'},
'controller': {'key': 'controller', 'type': 'str'},
'delete_in_progress_results': {'key': 'deleteInProgressResults', 'type': 'bool'},
'dtl_aut_environment': {'key': 'dtlAutEnvironment', 'type': 'ShallowReference'},
'dtl_environment': {'key': 'dtlEnvironment', 'type': 'ShallowReference'},
'dtl_environment_details': {'key': 'dtlEnvironmentDetails', 'type': 'DtlEnvironmentDetails'},
'due_date': {'key': 'dueDate', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'iteration': {'key': 'iteration', 'type': 'str'},
'log_entries': {'key': 'logEntries', 'type': '[TestMessageLogDetails]'},
'name': {'key': 'name', 'type': 'str'},
'release_environment_uri': {'key': 'releaseEnvironmentUri', 'type': 'str'},
'release_uri': {'key': 'releaseUri', 'type': 'str'},
'run_summary': {'key': 'runSummary', 'type': '[RunSummaryModel]'},
'source_workflow': {'key': 'sourceWorkflow', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'substate': {'key': 'substate', 'type': 'object'},
'tags': {'key': 'tags', 'type': '[TestTag]'},
'test_environment_id': {'key': 'testEnvironmentId', 'type': 'str'},
'test_settings': {'key': 'testSettings', 'type': 'ShallowReference'}
}
def __init__(self, build=None, build_drop_location=None, build_flavor=None, build_platform=None, comment=None, completed_date=None, controller=None, delete_in_progress_results=None, dtl_aut_environment=None, dtl_environment=None, dtl_environment_details=None, due_date=None, error_message=None, iteration=None, log_entries=None, name=None, release_environment_uri=None, release_uri=None, run_summary=None, source_workflow=None, started_date=None, state=None, substate=None, tags=None, test_environment_id=None, test_settings=None):
super(RunUpdateModel, self).__init__()
self.build = build
self.build_drop_location = build_drop_location
self.build_flavor = build_flavor
self.build_platform = build_platform
self.comment = comment
self.completed_date = completed_date
self.controller = controller
self.delete_in_progress_results = delete_in_progress_results
self.dtl_aut_environment = dtl_aut_environment
self.dtl_environment = dtl_environment
self.dtl_environment_details = dtl_environment_details
self.due_date = due_date
self.error_message = error_message
self.iteration = iteration
self.log_entries = log_entries
self.name = name
self.release_environment_uri = release_environment_uri
self.release_uri = release_uri
self.run_summary = run_summary
self.source_workflow = source_workflow
self.started_date = started_date
self.state = state
self.substate = substate
self.tags = tags
self.test_environment_id = test_environment_id
self.test_settings = test_settings
class ShallowReference(Model):
"""
An abstracted reference to some other resource. This class is used to provide the build data contracts with a uniform way to reference other resources in a way that provides easy traversal through links.
:param id: ID of the resource
:type id: str
:param name: Name of the linked resource (definition name, controller name, etc.)
:type name: str
:param url: Full http link to the resource
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, name=None, url=None):
super(ShallowReference, self).__init__()
self.id = id
self.name = name
self.url = url
class ShallowTestCaseResult(Model):
"""
:param automated_test_name:
:type automated_test_name: str
:param automated_test_storage:
:type automated_test_storage: str
:param duration_in_ms:
:type duration_in_ms: float
:param id:
:type id: int
:param is_re_run:
:type is_re_run: bool
:param outcome:
:type outcome: str
:param owner:
:type owner: str
:param priority:
:type priority: int
:param ref_id:
:type ref_id: int
:param run_id:
:type run_id: int
:param tags:
:type tags: list of str
:param test_case_title:
:type test_case_title: str
"""
_attribute_map = {
'automated_test_name': {'key': 'automatedTestName', 'type': 'str'},
'automated_test_storage': {'key': 'automatedTestStorage', 'type': 'str'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'float'},
'id': {'key': 'id', 'type': 'int'},
'is_re_run': {'key': 'isReRun', 'type': 'bool'},
'outcome': {'key': 'outcome', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'ref_id': {'key': 'refId', 'type': 'int'},
'run_id': {'key': 'runId', 'type': 'int'},
'tags': {'key': 'tags', 'type': '[str]'},
'test_case_title': {'key': 'testCaseTitle', 'type': 'str'}
}
def __init__(self, automated_test_name=None, automated_test_storage=None, duration_in_ms=None, id=None, is_re_run=None, outcome=None, owner=None, priority=None, ref_id=None, run_id=None, tags=None, test_case_title=None):
super(ShallowTestCaseResult, self).__init__()
self.automated_test_name = automated_test_name
self.automated_test_storage = automated_test_storage
self.duration_in_ms = duration_in_ms
self.id = id
self.is_re_run = is_re_run
self.outcome = outcome
self.owner = owner
self.priority = priority
self.ref_id = ref_id
self.run_id = run_id
self.tags = tags
self.test_case_title = test_case_title
class SharedStepModel(Model):
"""
Reference to shared step workitem.
:param id: WorkItem shared step ID.
:type id: int
:param revision: Shared step workitem revision.
:type revision: int
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'revision': {'key': 'revision', 'type': 'int'}
}
def __init__(self, id=None, revision=None):
super(SharedStepModel, self).__init__()
self.id = id
self.revision = revision
class StageReference(Model):
"""
Stage in pipeline
:param attempt: Attempt number of stage
:type attempt: int
:param stage_name: Name of the stage. Maximum supported length for name is 256 character.
:type stage_name: str
"""
_attribute_map = {
'attempt': {'key': 'attempt', 'type': 'int'},
'stage_name': {'key': 'stageName', 'type': 'str'}
}
def __init__(self, attempt=None, stage_name=None):
super(StageReference, self).__init__()
self.attempt = attempt
self.stage_name = stage_name
class SuiteCreateModel(Model):
"""
Suite create model
:param name: Name of test suite.
:type name: str
:param query_string: For query based suites, query string that defines the suite.
:type query_string: str
:param requirement_ids: For requirements test suites, the IDs of the requirements.
:type requirement_ids: list of int
:param suite_type: Type of test suite to create. It can have value from DynamicTestSuite, StaticTestSuite and RequirementTestSuite.
:type suite_type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'query_string': {'key': 'queryString', 'type': 'str'},
'requirement_ids': {'key': 'requirementIds', 'type': '[int]'},
'suite_type': {'key': 'suiteType', 'type': 'str'}
}
def __init__(self, name=None, query_string=None, requirement_ids=None, suite_type=None):
super(SuiteCreateModel, self).__init__()
self.name = name
self.query_string = query_string
self.requirement_ids = requirement_ids
self.suite_type = suite_type
class SuiteEntry(Model):
"""
A suite entry defines properties for a test suite.
:param child_suite_id: Id of child suite in the test suite.
:type child_suite_id: int
:param sequence_number: Sequence number for the test case or child test suite in the test suite.
:type sequence_number: int
:param suite_id: Id for the test suite.
:type suite_id: int
:param test_case_id: Id of a test case in the test suite.
:type test_case_id: int
"""
_attribute_map = {
'child_suite_id': {'key': 'childSuiteId', 'type': 'int'},
'sequence_number': {'key': 'sequenceNumber', 'type': 'int'},
'suite_id': {'key': 'suiteId', 'type': 'int'},
'test_case_id': {'key': 'testCaseId', 'type': 'int'}
}
def __init__(self, child_suite_id=None, sequence_number=None, suite_id=None, test_case_id=None):
super(SuiteEntry, self).__init__()
self.child_suite_id = child_suite_id
self.sequence_number = sequence_number
self.suite_id = suite_id
self.test_case_id = test_case_id
class SuiteEntryUpdateModel(Model):
"""
A model to define sequence of test suite entries in a test suite.
:param child_suite_id: Id of the child suite in the test suite.
:type child_suite_id: int
:param sequence_number: Updated sequence number for the test case or child test suite in the test suite.
:type sequence_number: int
:param test_case_id: Id of the test case in the test suite.
:type test_case_id: int
"""
_attribute_map = {
'child_suite_id': {'key': 'childSuiteId', 'type': 'int'},
'sequence_number': {'key': 'sequenceNumber', 'type': 'int'},
'test_case_id': {'key': 'testCaseId', 'type': 'int'}
}
def __init__(self, child_suite_id=None, sequence_number=None, test_case_id=None):
super(SuiteEntryUpdateModel, self).__init__()
self.child_suite_id = child_suite_id
self.sequence_number = sequence_number
self.test_case_id = test_case_id
class SuiteTestCase(Model):
"""
Test case for the suite.
:param point_assignments: Point Assignment for test suite's test case.
:type point_assignments: list of :class:`PointAssignment <azure.devops.v7_1.test.models.PointAssignment>`
:param test_case: Test case workItem reference.
:type test_case: :class:`WorkItemReference <azure.devops.v7_1.test.models.WorkItemReference>`
"""
_attribute_map = {
'point_assignments': {'key': 'pointAssignments', 'type': '[PointAssignment]'},
'test_case': {'key': 'testCase', 'type': 'WorkItemReference'}
}
def __init__(self, point_assignments=None, test_case=None):
super(SuiteTestCase, self).__init__()
self.point_assignments = point_assignments
self.test_case = test_case
class SuiteTestCaseUpdateModel(Model):
"""
Test suite update model.
:param configurations: Shallow reference of configurations for the test cases in the suite.
:type configurations: list of :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
"""
_attribute_map = {
'configurations': {'key': 'configurations', 'type': '[ShallowReference]'}
}
def __init__(self, configurations=None):
super(SuiteTestCaseUpdateModel, self).__init__()
self.configurations = configurations
class SuiteUpdateModel(Model):
"""
Test suite update model.
:param default_configurations: Shallow reference of default configurations for the suite.
:type default_configurations: list of :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param default_testers: Shallow reference of test suite.
:type default_testers: list of :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param inherit_default_configurations: Specifies if the default configurations have to be inherited from the parent test suite in which the test suite is created.
:type inherit_default_configurations: bool
:param name: Test suite name
:type name: str
:param parent: Shallow reference of the parent.
:type parent: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param query_string: For query based suites, the new query string.
:type query_string: str
"""
_attribute_map = {
'default_configurations': {'key': 'defaultConfigurations', 'type': '[ShallowReference]'},
'default_testers': {'key': 'defaultTesters', 'type': '[ShallowReference]'},
'inherit_default_configurations': {'key': 'inheritDefaultConfigurations', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'parent': {'key': 'parent', 'type': 'ShallowReference'},
'query_string': {'key': 'queryString', 'type': 'str'}
}
def __init__(self, default_configurations=None, default_testers=None, inherit_default_configurations=None, name=None, parent=None, query_string=None):
super(SuiteUpdateModel, self).__init__()
self.default_configurations = default_configurations
self.default_testers = default_testers
self.inherit_default_configurations = inherit_default_configurations
self.name = name
self.parent = parent
self.query_string = query_string
class TeamContext(Model):
"""
The Team Context for an operation.
:param project: The team project Id or name. Ignored if ProjectId is set.
:type project: str
:param project_id: The Team Project ID. Required if Project is not set.
:type project_id: str
:param team: The Team Id or name. Ignored if TeamId is set.
:type team: str
:param team_id: The Team Id
:type team_id: str
"""
_attribute_map = {
'project': {'key': 'project', 'type': 'str'},
'project_id': {'key': 'projectId', 'type': 'str'},
'team': {'key': 'team', 'type': 'str'},
'team_id': {'key': 'teamId', 'type': 'str'}
}
def __init__(self, project=None, project_id=None, team=None, team_id=None):
super(TeamContext, self).__init__()
self.project = project
self.project_id = project_id
self.team = team
self.team_id = team_id
class TeamProjectReference(Model):
"""
Represents a shallow reference to a TeamProject.
:param abbreviation: Project abbreviation.
:type abbreviation: str
:param default_team_image_url: Url to default team identity image.
:type default_team_image_url: str
:param description: The project's description (if any).
:type description: str
:param id: Project identifier.
:type id: str
:param last_update_time: Project last update time.
:type last_update_time: datetime
:param name: Project name.
:type name: str
:param revision: Project revision.
:type revision: long
:param state: Project state.
:type state: object
:param url: Url to the full version of the object.
:type url: str
:param visibility: Project visibility.
:type visibility: object
"""
_attribute_map = {
'abbreviation': {'key': 'abbreviation', 'type': 'str'},
'default_team_image_url': {'key': 'defaultTeamImageUrl', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'long'},
'state': {'key': 'state', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'},
'visibility': {'key': 'visibility', 'type': 'object'}
}
def __init__(self, abbreviation=None, default_team_image_url=None, description=None, id=None, last_update_time=None, name=None, revision=None, state=None, url=None, visibility=None):
super(TeamProjectReference, self).__init__()
self.abbreviation = abbreviation
self.default_team_image_url = default_team_image_url
self.description = description
self.id = id
self.last_update_time = last_update_time
self.name = name
self.revision = revision
self.state = state
self.url = url
self.visibility = visibility
class TestAttachment(Model):
"""
:param attachment_type: Attachment type.
:type attachment_type: object
:param comment: Comment associated with attachment.
:type comment: str
:param created_date: Attachment created date.
:type created_date: datetime
:param file_name: Attachment file name
:type file_name: str
:param id: ID of the attachment.
:type id: int
:param size: Attachment size.
:type size: long
:param url: Attachment Url.
:type url: str
"""
_attribute_map = {
'attachment_type': {'key': 'attachmentType', 'type': 'object'},
'comment': {'key': 'comment', 'type': 'str'},
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'file_name': {'key': 'fileName', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'size': {'key': 'size', 'type': 'long'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, attachment_type=None, comment=None, created_date=None, file_name=None, id=None, size=None, url=None):
super(TestAttachment, self).__init__()
self.attachment_type = attachment_type
self.comment = comment
self.created_date = created_date
self.file_name = file_name
self.id = id
self.size = size
self.url = url
class TestAttachmentReference(Model):
"""
Reference to test attachment.
:param id: ID of the attachment.
:type id: int
:param url: Url to download the attachment.
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, url=None):
super(TestAttachmentReference, self).__init__()
self.id = id
self.url = url
class TestAttachmentRequestModel(Model):
"""
Test attachment request model
:param attachment_type: Attachment type By Default it will be GeneralAttachment. It can be one of the following type. { GeneralAttachment, AfnStrip, BugFilingData, CodeCoverage, IntermediateCollectorData, RunConfig, TestImpactDetails, TmiTestRunDeploymentFiles, TmiTestRunReverseDeploymentFiles, TmiTestResultDetail, TmiTestRunSummary }
:type attachment_type: str
:param comment: Comment associated with attachment
:type comment: str
:param file_name: Attachment filename
:type file_name: str
:param stream: Base64 encoded file stream
:type stream: str
"""
_attribute_map = {
'attachment_type': {'key': 'attachmentType', 'type': 'str'},
'comment': {'key': 'comment', 'type': 'str'},
'file_name': {'key': 'fileName', 'type': 'str'},
'stream': {'key': 'stream', 'type': 'str'}
}
def __init__(self, attachment_type=None, comment=None, file_name=None, stream=None):
super(TestAttachmentRequestModel, self).__init__()
self.attachment_type = attachment_type
self.comment = comment
self.file_name = file_name
self.stream = stream
class TestCaseResult(Model):
"""
Represents a test result.
:param afn_strip_id: Test attachment ID of action recording.
:type afn_strip_id: int
:param area: Reference to area path of test.
:type area: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param associated_bugs: Reference to bugs linked to test result.
:type associated_bugs: list of :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param automated_test_id: ID representing test method in a dll.
:type automated_test_id: str
:param automated_test_name: Fully qualified name of test executed.
:type automated_test_name: str
:param automated_test_storage: Container to which test belongs.
:type automated_test_storage: str
:param automated_test_type: Type of automated test.
:type automated_test_type: str
:param automated_test_type_id: TypeId of automated test.
:type automated_test_type_id: str
:param build: Shallow reference to build associated with test result.
:type build: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param build_reference: Reference to build associated with test result.
:type build_reference: :class:`BuildReference <azure.devops.v7_1.test.models.BuildReference>`
:param comment: Comment in a test result with maxSize= 1000 chars.
:type comment: str
:param completed_date: Time when test execution completed(UTC). Completed date should be greater than StartedDate.
:type completed_date: datetime
:param computer_name: Machine name where test executed.
:type computer_name: str
:param configuration: Reference to test configuration. Type ShallowReference.
:type configuration: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param created_date: Timestamp when test result created(UTC).
:type created_date: datetime
:param custom_fields: Additional properties of test result.
:type custom_fields: list of :class:`CustomTestField <azure.devops.v7_1.test.models.CustomTestField>`
:param duration_in_ms: Duration of test execution in milliseconds. If not provided value will be set as CompletedDate - StartedDate
:type duration_in_ms: float
:param error_message: Error message in test execution.
:type error_message: str
:param failing_since: Information when test results started failing.
:type failing_since: :class:`FailingSince <azure.devops.v7_1.test.models.FailingSince>`
:param failure_type: Failure type of test result. Valid Value= (Known Issue, New Issue, Regression, Unknown, None)
:type failure_type: str
:param id: ID of a test result.
:type id: int
:param iteration_details: Test result details of test iterations used only for Manual Testing.
:type iteration_details: list of :class:`TestIterationDetailsModel <azure.devops.v7_1.test.models.TestIterationDetailsModel>`
:param last_updated_by: Reference to identity last updated test result.
:type last_updated_by: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param last_updated_date: Last updated datetime of test result(UTC).
:type last_updated_date: datetime
:param outcome: Test outcome of test result. Valid values = (Unspecified, None, Passed, Failed, Inconclusive, Timeout, Aborted, Blocked, NotExecuted, Warning, Error, NotApplicable, Paused, InProgress, NotImpacted)
:type outcome: str
:param owner: Reference to test owner.
:type owner: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param priority: Priority of test executed.
:type priority: int
:param project: Reference to team project.
:type project: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param release: Shallow reference to release associated with test result.
:type release: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param release_reference: Reference to release associated with test result.
:type release_reference: :class:`ReleaseReference <azure.devops.v7_1.test.models.ReleaseReference>`
:param reset_count: ResetCount.
:type reset_count: int
:param resolution_state: Resolution state of test result.
:type resolution_state: str
:param resolution_state_id: ID of resolution state.
:type resolution_state_id: int
:param result_group_type: Hierarchy type of the result, default value of None means its leaf node.
:type result_group_type: object
:param revision: Revision number of test result.
:type revision: int
:param run_by: Reference to identity executed the test.
:type run_by: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param stack_trace: Stacktrace with maxSize= 1000 chars.
:type stack_trace: str
:param started_date: Time when test execution started(UTC).
:type started_date: datetime
:param state: State of test result. Type TestRunState.
:type state: str
:param sub_results: List of sub results inside a test result, if ResultGroupType is not None, it holds corresponding type sub results.
:type sub_results: list of :class:`TestSubResult <azure.devops.v7_1.test.models.TestSubResult>`
:param test_case: Reference to the test executed.
:type test_case: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param test_case_reference_id: Reference ID of test used by test result. Type TestResultMetaData
:type test_case_reference_id: int
:param test_case_revision: TestCaseRevision Number.
:type test_case_revision: int
:param test_case_title: Name of test.
:type test_case_title: str
:param test_plan: Reference to test plan test case workitem is part of.
:type test_plan: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param test_point: Reference to the test point executed.
:type test_point: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param test_run: Reference to test run.
:type test_run: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param test_suite: Reference to test suite test case workitem is part of.
:type test_suite: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param url: Url of test result.
:type url: str
"""
_attribute_map = {
'afn_strip_id': {'key': 'afnStripId', 'type': 'int'},
'area': {'key': 'area', 'type': 'ShallowReference'},
'associated_bugs': {'key': 'associatedBugs', 'type': '[ShallowReference]'},
'automated_test_id': {'key': 'automatedTestId', 'type': 'str'},
'automated_test_name': {'key': 'automatedTestName', 'type': 'str'},
'automated_test_storage': {'key': 'automatedTestStorage', 'type': 'str'},
'automated_test_type': {'key': 'automatedTestType', 'type': 'str'},
'automated_test_type_id': {'key': 'automatedTestTypeId', 'type': 'str'},
'build': {'key': 'build', 'type': 'ShallowReference'},
'build_reference': {'key': 'buildReference', 'type': 'BuildReference'},
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'computer_name': {'key': 'computerName', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ShallowReference'},
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'custom_fields': {'key': 'customFields', 'type': '[CustomTestField]'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'float'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'failing_since': {'key': 'failingSince', 'type': 'FailingSince'},
'failure_type': {'key': 'failureType', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'iteration_details': {'key': 'iterationDetails', 'type': '[TestIterationDetailsModel]'},
'last_updated_by': {'key': 'lastUpdatedBy', 'type': 'IdentityRef'},
'last_updated_date': {'key': 'lastUpdatedDate', 'type': 'iso-8601'},
'outcome': {'key': 'outcome', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'priority': {'key': 'priority', 'type': 'int'},
'project': {'key': 'project', 'type': 'ShallowReference'},
'release': {'key': 'release', 'type': 'ShallowReference'},
'release_reference': {'key': 'releaseReference', 'type': 'ReleaseReference'},
'reset_count': {'key': 'resetCount', 'type': 'int'},
'resolution_state': {'key': 'resolutionState', 'type': 'str'},
'resolution_state_id': {'key': 'resolutionStateId', 'type': 'int'},
'result_group_type': {'key': 'resultGroupType', 'type': 'object'},
'revision': {'key': 'revision', 'type': 'int'},
'run_by': {'key': 'runBy', 'type': 'IdentityRef'},
'stack_trace': {'key': 'stackTrace', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'str'},
'sub_results': {'key': 'subResults', 'type': '[TestSubResult]'},
'test_case': {'key': 'testCase', 'type': 'ShallowReference'},
'test_case_reference_id': {'key': 'testCaseReferenceId', 'type': 'int'},
'test_case_revision': {'key': 'testCaseRevision', 'type': 'int'},
'test_case_title': {'key': 'testCaseTitle', 'type': 'str'},
'test_plan': {'key': 'testPlan', 'type': 'ShallowReference'},
'test_point': {'key': 'testPoint', 'type': 'ShallowReference'},
'test_run': {'key': 'testRun', 'type': 'ShallowReference'},
'test_suite': {'key': 'testSuite', 'type': 'ShallowReference'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, afn_strip_id=None, area=None, associated_bugs=None, automated_test_id=None, automated_test_name=None, automated_test_storage=None, automated_test_type=None, automated_test_type_id=None, build=None, build_reference=None, comment=None, completed_date=None, computer_name=None, configuration=None, created_date=None, custom_fields=None, duration_in_ms=None, error_message=None, failing_since=None, failure_type=None, id=None, iteration_details=None, last_updated_by=None, last_updated_date=None, outcome=None, owner=None, priority=None, project=None, release=None, release_reference=None, reset_count=None, resolution_state=None, resolution_state_id=None, result_group_type=None, revision=None, run_by=None, stack_trace=None, started_date=None, state=None, sub_results=None, test_case=None, test_case_reference_id=None, test_case_revision=None, test_case_title=None, test_plan=None, test_point=None, test_run=None, test_suite=None, url=None):
super(TestCaseResult, self).__init__()
self.afn_strip_id = afn_strip_id
self.area = area
self.associated_bugs = associated_bugs
self.automated_test_id = automated_test_id
self.automated_test_name = automated_test_name
self.automated_test_storage = automated_test_storage
self.automated_test_type = automated_test_type
self.automated_test_type_id = automated_test_type_id
self.build = build
self.build_reference = build_reference
self.comment = comment
self.completed_date = completed_date
self.computer_name = computer_name
self.configuration = configuration
self.created_date = created_date
self.custom_fields = custom_fields
self.duration_in_ms = duration_in_ms
self.error_message = error_message
self.failing_since = failing_since
self.failure_type = failure_type
self.id = id
self.iteration_details = iteration_details
self.last_updated_by = last_updated_by
self.last_updated_date = last_updated_date
self.outcome = outcome
self.owner = owner
self.priority = priority
self.project = project
self.release = release
self.release_reference = release_reference
self.reset_count = reset_count
self.resolution_state = resolution_state
self.resolution_state_id = resolution_state_id
self.result_group_type = result_group_type
self.revision = revision
self.run_by = run_by
self.stack_trace = stack_trace
self.started_date = started_date
self.state = state
self.sub_results = sub_results
self.test_case = test_case
self.test_case_reference_id = test_case_reference_id
self.test_case_revision = test_case_revision
self.test_case_title = test_case_title
self.test_plan = test_plan
self.test_point = test_point
self.test_run = test_run
self.test_suite = test_suite
self.url = url
class TestCaseResultAttachmentModel(Model):
"""
Test attachment information in a test iteration.
:param action_path: Path identifier test step in test case workitem.
:type action_path: str
:param id: Attachment ID.
:type id: int
:param iteration_id: Iteration ID.
:type iteration_id: int
:param name: Name of attachment.
:type name: str
:param size: Attachment size.
:type size: long
:param url: Url to attachment.
:type url: str
"""
_attribute_map = {
'action_path': {'key': 'actionPath', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'iteration_id': {'key': 'iterationId', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'size': {'key': 'size', 'type': 'long'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, action_path=None, id=None, iteration_id=None, name=None, size=None, url=None):
super(TestCaseResultAttachmentModel, self).__init__()
self.action_path = action_path
self.id = id
self.iteration_id = iteration_id
self.name = name
self.size = size
self.url = url
class TestCaseResultIdentifier(Model):
"""
Reference to a test result.
:param test_result_id: Test result ID.
:type test_result_id: int
:param test_run_id: Test run ID.
:type test_run_id: int
"""
_attribute_map = {
'test_result_id': {'key': 'testResultId', 'type': 'int'},
'test_run_id': {'key': 'testRunId', 'type': 'int'}
}
def __init__(self, test_result_id=None, test_run_id=None):
super(TestCaseResultIdentifier, self).__init__()
self.test_result_id = test_result_id
self.test_run_id = test_run_id
class TestCaseResultUpdateModel(Model):
"""
:param associated_work_items:
:type associated_work_items: list of int
:param automated_test_type_id:
:type automated_test_type_id: str
:param comment:
:type comment: str
:param completed_date:
:type completed_date: str
:param computer_name:
:type computer_name: str
:param custom_fields:
:type custom_fields: list of :class:`CustomTestField <azure.devops.v7_1.test.models.CustomTestField>`
:param duration_in_ms:
:type duration_in_ms: str
:param error_message:
:type error_message: str
:param failure_type:
:type failure_type: str
:param outcome:
:type outcome: str
:param owner:
:type owner: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param resolution_state:
:type resolution_state: str
:param run_by:
:type run_by: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param stack_trace:
:type stack_trace: str
:param started_date:
:type started_date: str
:param state:
:type state: str
:param test_case_priority:
:type test_case_priority: str
:param test_result:
:type test_result: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
"""
_attribute_map = {
'associated_work_items': {'key': 'associatedWorkItems', 'type': '[int]'},
'automated_test_type_id': {'key': 'automatedTestTypeId', 'type': 'str'},
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'str'},
'computer_name': {'key': 'computerName', 'type': 'str'},
'custom_fields': {'key': 'customFields', 'type': '[CustomTestField]'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'failure_type': {'key': 'failureType', 'type': 'str'},
'outcome': {'key': 'outcome', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'resolution_state': {'key': 'resolutionState', 'type': 'str'},
'run_by': {'key': 'runBy', 'type': 'IdentityRef'},
'stack_trace': {'key': 'stackTrace', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'test_case_priority': {'key': 'testCasePriority', 'type': 'str'},
'test_result': {'key': 'testResult', 'type': 'ShallowReference'}
}
def __init__(self, associated_work_items=None, automated_test_type_id=None, comment=None, completed_date=None, computer_name=None, custom_fields=None, duration_in_ms=None, error_message=None, failure_type=None, outcome=None, owner=None, resolution_state=None, run_by=None, stack_trace=None, started_date=None, state=None, test_case_priority=None, test_result=None):
super(TestCaseResultUpdateModel, self).__init__()
self.associated_work_items = associated_work_items
self.automated_test_type_id = automated_test_type_id
self.comment = comment
self.completed_date = completed_date
self.computer_name = computer_name
self.custom_fields = custom_fields
self.duration_in_ms = duration_in_ms
self.error_message = error_message
self.failure_type = failure_type
self.outcome = outcome
self.owner = owner
self.resolution_state = resolution_state
self.run_by = run_by
self.stack_trace = stack_trace
self.started_date = started_date
self.state = state
self.test_case_priority = test_case_priority
self.test_result = test_result
class TestConfiguration(Model):
"""
Test configuration
:param area: Area of the configuration
:type area: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param description: Description of the configuration
:type description: str
:param id: Id of the configuration
:type id: int
:param is_default: Is the configuration a default for the test plans
:type is_default: bool
:param last_updated_by: Last Updated By Reference
:type last_updated_by: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param last_updated_date: Last Updated Data
:type last_updated_date: datetime
:param name: Name of the configuration
:type name: str
:param project: Project to which the configuration belongs
:type project: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param revision: Revision of the configuration
:type revision: int
:param state: State of the configuration
:type state: object
:param url: Url of Configuration Resource
:type url: str
:param values: Dictionary of Test Variable, Selected Value
:type values: list of :class:`NameValuePair <azure.devops.v7_1.test.models.NameValuePair>`
"""
_attribute_map = {
'area': {'key': 'area', 'type': 'ShallowReference'},
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'last_updated_by': {'key': 'lastUpdatedBy', 'type': 'IdentityRef'},
'last_updated_date': {'key': 'lastUpdatedDate', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'project': {'key': 'project', 'type': 'ShallowReference'},
'revision': {'key': 'revision', 'type': 'int'},
'state': {'key': 'state', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'},
'values': {'key': 'values', 'type': '[NameValuePair]'}
}
def __init__(self, area=None, description=None, id=None, is_default=None, last_updated_by=None, last_updated_date=None, name=None, project=None, revision=None, state=None, url=None, values=None):
super(TestConfiguration, self).__init__()
self.area = area
self.description = description
self.id = id
self.is_default = is_default
self.last_updated_by = last_updated_by
self.last_updated_date = last_updated_date
self.name = name
self.project = project
self.revision = revision
self.state = state
self.url = url
self.values = values
class TestEnvironment(Model):
"""
Test environment Detail.
:param environment_id: Test Environment Id.
:type environment_id: str
:param environment_name: Test Environment Name.
:type environment_name: str
"""
_attribute_map = {
'environment_id': {'key': 'environmentId', 'type': 'str'},
'environment_name': {'key': 'environmentName', 'type': 'str'}
}
def __init__(self, environment_id=None, environment_name=None):
super(TestEnvironment, self).__init__()
self.environment_id = environment_id
self.environment_name = environment_name
class TestFailureDetails(Model):
"""
:param count:
:type count: int
:param test_results:
:type test_results: list of :class:`TestCaseResultIdentifier <azure.devops.v7_1.test.models.TestCaseResultIdentifier>`
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'test_results': {'key': 'testResults', 'type': '[TestCaseResultIdentifier]'}
}
def __init__(self, count=None, test_results=None):
super(TestFailureDetails, self).__init__()
self.count = count
self.test_results = test_results
class TestFailuresAnalysis(Model):
"""
:param existing_failures:
:type existing_failures: :class:`TestFailureDetails <azure.devops.v7_1.test.models.TestFailureDetails>`
:param fixed_tests:
:type fixed_tests: :class:`TestFailureDetails <azure.devops.v7_1.test.models.TestFailureDetails>`
:param new_failures:
:type new_failures: :class:`TestFailureDetails <azure.devops.v7_1.test.models.TestFailureDetails>`
:param previous_context:
:type previous_context: :class:`TestResultsContext <azure.devops.v7_1.test.models.TestResultsContext>`
"""
_attribute_map = {
'existing_failures': {'key': 'existingFailures', 'type': 'TestFailureDetails'},
'fixed_tests': {'key': 'fixedTests', 'type': 'TestFailureDetails'},
'new_failures': {'key': 'newFailures', 'type': 'TestFailureDetails'},
'previous_context': {'key': 'previousContext', 'type': 'TestResultsContext'}
}
def __init__(self, existing_failures=None, fixed_tests=None, new_failures=None, previous_context=None):
super(TestFailuresAnalysis, self).__init__()
self.existing_failures = existing_failures
self.fixed_tests = fixed_tests
self.new_failures = new_failures
self.previous_context = previous_context
class TestFlakyIdentifier(Model):
"""
Test Flaky Identifier
:param branch_name: Branch Name where Flakiness has to be Marked/Unmarked
:type branch_name: str
:param is_flaky: State for Flakiness
:type is_flaky: bool
"""
_attribute_map = {
'branch_name': {'key': 'branchName', 'type': 'str'},
'is_flaky': {'key': 'isFlaky', 'type': 'bool'}
}
def __init__(self, branch_name=None, is_flaky=None):
super(TestFlakyIdentifier, self).__init__()
self.branch_name = branch_name
self.is_flaky = is_flaky
class TestHistoryQuery(Model):
"""
Filter to get TestCase result history.
:param automated_test_name: Automated test name of the TestCase.
:type automated_test_name: str
:param branch: Results to be get for a particular branches.
:type branch: str
:param build_definition_id: Get the results history only for this BuildDefinitionId. This to get used in query GroupBy should be Branch. If this is provided, Branch will have no use.
:type build_definition_id: int
:param continuation_token: It will be filled by server. If not null means there are some results still to be get, and we need to call this REST API with this ContinuousToken. It is not supposed to be created (or altered, if received from server in last batch) by user.
:type continuation_token: str
:param group_by: Group the result on the basis of TestResultGroupBy. This can be Branch, Environment or null(if results are fetched by BuildDefinitionId)
:type group_by: object
:param max_complete_date: History to get between time interval MaxCompleteDate and (MaxCompleteDate - TrendDays). Default is current date time.
:type max_complete_date: datetime
:param release_env_definition_id: Get the results history only for this ReleaseEnvDefinitionId. This to get used in query GroupBy should be Environment.
:type release_env_definition_id: int
:param results_for_group: List of TestResultHistoryForGroup which are grouped by GroupBy
:type results_for_group: list of :class:`TestResultHistoryForGroup <azure.devops.v7_1.test.models.TestResultHistoryForGroup>`
:param test_case_id: Get the results history only for this testCaseId. This to get used in query to filter the result along with automatedtestname
:type test_case_id: int
:param trend_days: Number of days for which history to collect. Maximum supported value is 7 days. Default is 7 days.
:type trend_days: int
"""
_attribute_map = {
'automated_test_name': {'key': 'automatedTestName', 'type': 'str'},
'branch': {'key': 'branch', 'type': 'str'},
'build_definition_id': {'key': 'buildDefinitionId', 'type': 'int'},
'continuation_token': {'key': 'continuationToken', 'type': 'str'},
'group_by': {'key': 'groupBy', 'type': 'object'},
'max_complete_date': {'key': 'maxCompleteDate', 'type': 'iso-8601'},
'release_env_definition_id': {'key': 'releaseEnvDefinitionId', 'type': 'int'},
'results_for_group': {'key': 'resultsForGroup', 'type': '[TestResultHistoryForGroup]'},
'test_case_id': {'key': 'testCaseId', 'type': 'int'},
'trend_days': {'key': 'trendDays', 'type': 'int'}
}
def __init__(self, automated_test_name=None, branch=None, build_definition_id=None, continuation_token=None, group_by=None, max_complete_date=None, release_env_definition_id=None, results_for_group=None, test_case_id=None, trend_days=None):
super(TestHistoryQuery, self).__init__()
self.automated_test_name = automated_test_name
self.branch = branch
self.build_definition_id = build_definition_id
self.continuation_token = continuation_token
self.group_by = group_by
self.max_complete_date = max_complete_date
self.release_env_definition_id = release_env_definition_id
self.results_for_group = results_for_group
self.test_case_id = test_case_id
self.trend_days = trend_days
class TestIterationDetailsModel(Model):
"""
Represents a test iteration result.
:param action_results: Test step results in an iteration.
:type action_results: list of :class:`TestActionResultModel <azure.devops.v7_1.test.models.TestActionResultModel>`
:param attachments: Reference to attachments in test iteration result.
:type attachments: list of :class:`TestCaseResultAttachmentModel <azure.devops.v7_1.test.models.TestCaseResultAttachmentModel>`
:param comment: Comment in test iteration result.
:type comment: str
:param completed_date: Time when execution completed(UTC).
:type completed_date: datetime
:param duration_in_ms: Duration of execution.
:type duration_in_ms: float
:param error_message: Error message in test iteration result execution.
:type error_message: str
:param id: ID of test iteration result.
:type id: int
:param outcome: Test outcome if test iteration result.
:type outcome: str
:param parameters: Test parameters in an iteration.
:type parameters: list of :class:`TestResultParameterModel <azure.devops.v7_1.test.models.TestResultParameterModel>`
:param started_date: Time when execution started(UTC).
:type started_date: datetime
:param url: Url to test iteration result.
:type url: str
"""
_attribute_map = {
'action_results': {'key': 'actionResults', 'type': '[TestActionResultModel]'},
'attachments': {'key': 'attachments', 'type': '[TestCaseResultAttachmentModel]'},
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'float'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'outcome': {'key': 'outcome', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[TestResultParameterModel]'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, action_results=None, attachments=None, comment=None, completed_date=None, duration_in_ms=None, error_message=None, id=None, outcome=None, parameters=None, started_date=None, url=None):
super(TestIterationDetailsModel, self).__init__()
self.action_results = action_results
self.attachments = attachments
self.comment = comment
self.completed_date = completed_date
self.duration_in_ms = duration_in_ms
self.error_message = error_message
self.id = id
self.outcome = outcome
self.parameters = parameters
self.started_date = started_date
self.url = url
class TestMessageLogDetails(Model):
"""
An abstracted reference to some other resource. This class is used to provide the build data contracts with a uniform way to reference other resources in a way that provides easy traversal through links.
:param date_created: Date when the resource is created
:type date_created: datetime
:param entry_id: Id of the resource
:type entry_id: int
:param message: Message of the resource
:type message: str
"""
_attribute_map = {
'date_created': {'key': 'dateCreated', 'type': 'iso-8601'},
'entry_id': {'key': 'entryId', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'}
}
def __init__(self, date_created=None, entry_id=None, message=None):
super(TestMessageLogDetails, self).__init__()
self.date_created = date_created
self.entry_id = entry_id
self.message = message
class TestMethod(Model):
"""
:param container:
:type container: str
:param name:
:type name: str
"""
_attribute_map = {
'container': {'key': 'container', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, container=None, name=None):
super(TestMethod, self).__init__()
self.container = container
self.name = name
class TestOperationReference(Model):
"""
Class representing a reference to an operation.
:param id:
:type id: str
:param status:
:type status: str
:param url:
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, status=None, url=None):
super(TestOperationReference, self).__init__()
self.id = id
self.status = status
self.url = url
class TestOutcomeSettings(Model):
"""
Test outcome settings
:param sync_outcome_across_suites: Value to configure how test outcomes for the same tests across suites are shown
:type sync_outcome_across_suites: bool
"""
_attribute_map = {
'sync_outcome_across_suites': {'key': 'syncOutcomeAcrossSuites', 'type': 'bool'}
}
def __init__(self, sync_outcome_across_suites=None):
super(TestOutcomeSettings, self).__init__()
self.sync_outcome_across_suites = sync_outcome_across_suites
class TestPlan(Model):
"""
The test plan resource.
:param area: Area of the test plan.
:type area: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param automated_test_environment:
:type automated_test_environment: :class:`TestEnvironment <azure.devops.v7_1.test.models.TestEnvironment>`
:param automated_test_settings:
:type automated_test_settings: :class:`TestSettings <azure.devops.v7_1.test.models.TestSettings>`
:param build: Build to be tested.
:type build: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param build_definition: The Build Definition that generates a build associated with this test plan.
:type build_definition: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param client_url:
:type client_url: str
:param description: Description of the test plan.
:type description: str
:param end_date: End date for the test plan.
:type end_date: datetime
:param id: ID of the test plan.
:type id: int
:param iteration: Iteration path of the test plan.
:type iteration: str
:param manual_test_environment:
:type manual_test_environment: :class:`TestEnvironment <azure.devops.v7_1.test.models.TestEnvironment>`
:param manual_test_settings:
:type manual_test_settings: :class:`TestSettings <azure.devops.v7_1.test.models.TestSettings>`
:param name: Name of the test plan.
:type name: str
:param owner: Owner of the test plan.
:type owner: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param previous_build:
:type previous_build: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param project: Project which contains the test plan.
:type project: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param release_environment_definition: Release Environment to be used to deploy the build and run automated tests from this test plan.
:type release_environment_definition: :class:`ReleaseEnvironmentDefinitionReference <azure.devops.v7_1.test.models.ReleaseEnvironmentDefinitionReference>`
:param revision: Revision of the test plan.
:type revision: int
:param root_suite: Root test suite of the test plan.
:type root_suite: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param start_date: Start date for the test plan.
:type start_date: datetime
:param state: State of the test plan.
:type state: str
:param test_outcome_settings: Value to configure how same tests across test suites under a test plan need to behave
:type test_outcome_settings: :class:`TestOutcomeSettings <azure.devops.v7_1.test.models.TestOutcomeSettings>`
:param updated_by:
:type updated_by: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param updated_date:
:type updated_date: datetime
:param url: URL of the test plan resource.
:type url: str
"""
_attribute_map = {
'area': {'key': 'area', 'type': 'ShallowReference'},
'automated_test_environment': {'key': 'automatedTestEnvironment', 'type': 'TestEnvironment'},
'automated_test_settings': {'key': 'automatedTestSettings', 'type': 'TestSettings'},
'build': {'key': 'build', 'type': 'ShallowReference'},
'build_definition': {'key': 'buildDefinition', 'type': 'ShallowReference'},
'client_url': {'key': 'clientUrl', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'end_date': {'key': 'endDate', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'int'},
'iteration': {'key': 'iteration', 'type': 'str'},
'manual_test_environment': {'key': 'manualTestEnvironment', 'type': 'TestEnvironment'},
'manual_test_settings': {'key': 'manualTestSettings', 'type': 'TestSettings'},
'name': {'key': 'name', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'previous_build': {'key': 'previousBuild', 'type': 'ShallowReference'},
'project': {'key': 'project', 'type': 'ShallowReference'},
'release_environment_definition': {'key': 'releaseEnvironmentDefinition', 'type': 'ReleaseEnvironmentDefinitionReference'},
'revision': {'key': 'revision', 'type': 'int'},
'root_suite': {'key': 'rootSuite', 'type': 'ShallowReference'},
'start_date': {'key': 'startDate', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'str'},
'test_outcome_settings': {'key': 'testOutcomeSettings', 'type': 'TestOutcomeSettings'},
'updated_by': {'key': 'updatedBy', 'type': 'IdentityRef'},
'updated_date': {'key': 'updatedDate', 'type': 'iso-8601'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, area=None, automated_test_environment=None, automated_test_settings=None, build=None, build_definition=None, client_url=None, description=None, end_date=None, id=None, iteration=None, manual_test_environment=None, manual_test_settings=None, name=None, owner=None, previous_build=None, project=None, release_environment_definition=None, revision=None, root_suite=None, start_date=None, state=None, test_outcome_settings=None, updated_by=None, updated_date=None, url=None):
super(TestPlan, self).__init__()
self.area = area
self.automated_test_environment = automated_test_environment
self.automated_test_settings = automated_test_settings
self.build = build
self.build_definition = build_definition
self.client_url = client_url
self.description = description
self.end_date = end_date
self.id = id
self.iteration = iteration
self.manual_test_environment = manual_test_environment
self.manual_test_settings = manual_test_settings
self.name = name
self.owner = owner
self.previous_build = previous_build
self.project = project
self.release_environment_definition = release_environment_definition
self.revision = revision
self.root_suite = root_suite
self.start_date = start_date
self.state = state
self.test_outcome_settings = test_outcome_settings
self.updated_by = updated_by
self.updated_date = updated_date
self.url = url
class TestPlanCloneRequest(Model):
"""
:param destination_test_plan:
:type destination_test_plan: :class:`TestPlan <azure.devops.v7_1.test.models.TestPlan>`
:param options:
:type options: :class:`CloneOptions <azure.devops.v7_1.test.models.CloneOptions>`
:param suite_ids:
:type suite_ids: list of int
"""
_attribute_map = {
'destination_test_plan': {'key': 'destinationTestPlan', 'type': 'TestPlan'},
'options': {'key': 'options', 'type': 'CloneOptions'},
'suite_ids': {'key': 'suiteIds', 'type': '[int]'}
}
def __init__(self, destination_test_plan=None, options=None, suite_ids=None):
super(TestPlanCloneRequest, self).__init__()
self.destination_test_plan = destination_test_plan
self.options = options
self.suite_ids = suite_ids
class TestPoint(Model):
"""
Test point.
:param assigned_to: AssignedTo. Type IdentityRef.
:type assigned_to: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param automated: Automated.
:type automated: bool
:param comment: Comment associated with test point.
:type comment: str
:param configuration: Configuration. Type ShallowReference.
:type configuration: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param failure_type: Failure type of test point.
:type failure_type: str
:param id: ID of the test point.
:type id: int
:param last_reset_to_active: Last date when test point was reset to Active.
:type last_reset_to_active: datetime
:param last_resolution_state_id: Last resolution state id of test point.
:type last_resolution_state_id: int
:param last_result: Last result of test point. Type ShallowReference.
:type last_result: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param last_result_details: Last result details of test point. Type LastResultDetails.
:type last_result_details: :class:`LastResultDetails <azure.devops.v7_1.test.models.LastResultDetails>`
:param last_result_state: Last result state of test point.
:type last_result_state: str
:param last_run_build_number: LastRun build number of test point.
:type last_run_build_number: str
:param last_test_run: Last testRun of test point. Type ShallowReference.
:type last_test_run: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param last_updated_by: Test point last updated by. Type IdentityRef.
:type last_updated_by: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param last_updated_date: Last updated date of test point.
:type last_updated_date: datetime
:param outcome: Outcome of test point.
:type outcome: str
:param revision: Revision number.
:type revision: int
:param state: State of test point.
:type state: str
:param suite: Suite of test point. Type ShallowReference.
:type suite: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param test_case: TestCase associated to test point. Type WorkItemReference.
:type test_case: :class:`WorkItemReference <azure.devops.v7_1.test.models.WorkItemReference>`
:param test_plan: TestPlan of test point. Type ShallowReference.
:type test_plan: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param url: Test point Url.
:type url: str
:param work_item_properties: Work item properties of test point.
:type work_item_properties: list of object
"""
_attribute_map = {
'assigned_to': {'key': 'assignedTo', 'type': 'IdentityRef'},
'automated': {'key': 'automated', 'type': 'bool'},
'comment': {'key': 'comment', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ShallowReference'},
'failure_type': {'key': 'failureType', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'last_reset_to_active': {'key': 'lastResetToActive', 'type': 'iso-8601'},
'last_resolution_state_id': {'key': 'lastResolutionStateId', 'type': 'int'},
'last_result': {'key': 'lastResult', 'type': 'ShallowReference'},
'last_result_details': {'key': 'lastResultDetails', 'type': 'LastResultDetails'},
'last_result_state': {'key': 'lastResultState', 'type': 'str'},
'last_run_build_number': {'key': 'lastRunBuildNumber', 'type': 'str'},
'last_test_run': {'key': 'lastTestRun', 'type': 'ShallowReference'},
'last_updated_by': {'key': 'lastUpdatedBy', 'type': 'IdentityRef'},
'last_updated_date': {'key': 'lastUpdatedDate', 'type': 'iso-8601'},
'outcome': {'key': 'outcome', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'int'},
'state': {'key': 'state', 'type': 'str'},
'suite': {'key': 'suite', 'type': 'ShallowReference'},
'test_case': {'key': 'testCase', 'type': 'WorkItemReference'},
'test_plan': {'key': 'testPlan', 'type': 'ShallowReference'},
'url': {'key': 'url', 'type': 'str'},
'work_item_properties': {'key': 'workItemProperties', 'type': '[object]'}
}
def __init__(self, assigned_to=None, automated=None, comment=None, configuration=None, failure_type=None, id=None, last_reset_to_active=None, last_resolution_state_id=None, last_result=None, last_result_details=None, last_result_state=None, last_run_build_number=None, last_test_run=None, last_updated_by=None, last_updated_date=None, outcome=None, revision=None, state=None, suite=None, test_case=None, test_plan=None, url=None, work_item_properties=None):
super(TestPoint, self).__init__()
self.assigned_to = assigned_to
self.automated = automated
self.comment = comment
self.configuration = configuration
self.failure_type = failure_type
self.id = id
self.last_reset_to_active = last_reset_to_active
self.last_resolution_state_id = last_resolution_state_id
self.last_result = last_result
self.last_result_details = last_result_details
self.last_result_state = last_result_state
self.last_run_build_number = last_run_build_number
self.last_test_run = last_test_run
self.last_updated_by = last_updated_by
self.last_updated_date = last_updated_date
self.outcome = outcome
self.revision = revision
self.state = state
self.suite = suite
self.test_case = test_case
self.test_plan = test_plan
self.url = url
self.work_item_properties = work_item_properties
class TestPointsQuery(Model):
"""
Test point query class.
:param order_by: Order by results.
:type order_by: str
:param points: List of test points
:type points: list of :class:`TestPoint <azure.devops.v7_1.test.models.TestPoint>`
:param points_filter: Filter
:type points_filter: :class:`PointsFilter <azure.devops.v7_1.test.models.PointsFilter>`
:param wit_fields: List of workitem fields to get.
:type wit_fields: list of str
"""
_attribute_map = {
'order_by': {'key': 'orderBy', 'type': 'str'},
'points': {'key': 'points', 'type': '[TestPoint]'},
'points_filter': {'key': 'pointsFilter', 'type': 'PointsFilter'},
'wit_fields': {'key': 'witFields', 'type': '[str]'}
}
def __init__(self, order_by=None, points=None, points_filter=None, wit_fields=None):
super(TestPointsQuery, self).__init__()
self.order_by = order_by
self.points = points
self.points_filter = points_filter
self.wit_fields = wit_fields
class TestResolutionState(Model):
"""
Test Resolution State Details.
:param id: Test Resolution state Id.
:type id: int
:param name: Test Resolution State Name.
:type name: str
:param project:
:type project: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'project': {'key': 'project', 'type': 'ShallowReference'}
}
def __init__(self, id=None, name=None, project=None):
super(TestResolutionState, self).__init__()
self.id = id
self.name = name
self.project = project
class TestResultCreateModel(Model):
"""
:param area:
:type area: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param associated_work_items:
:type associated_work_items: list of int
:param automated_test_id:
:type automated_test_id: str
:param automated_test_name:
:type automated_test_name: str
:param automated_test_storage:
:type automated_test_storage: str
:param automated_test_type:
:type automated_test_type: str
:param automated_test_type_id:
:type automated_test_type_id: str
:param comment:
:type comment: str
:param completed_date:
:type completed_date: str
:param computer_name:
:type computer_name: str
:param configuration:
:type configuration: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param custom_fields:
:type custom_fields: list of :class:`CustomTestField <azure.devops.v7_1.test.models.CustomTestField>`
:param duration_in_ms:
:type duration_in_ms: str
:param error_message:
:type error_message: str
:param failure_type:
:type failure_type: str
:param outcome:
:type outcome: str
:param owner:
:type owner: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param resolution_state:
:type resolution_state: str
:param run_by:
:type run_by: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param stack_trace:
:type stack_trace: str
:param started_date:
:type started_date: str
:param state:
:type state: str
:param test_case:
:type test_case: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param test_case_priority:
:type test_case_priority: str
:param test_case_title:
:type test_case_title: str
:param test_point:
:type test_point: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
"""
_attribute_map = {
'area': {'key': 'area', 'type': 'ShallowReference'},
'associated_work_items': {'key': 'associatedWorkItems', 'type': '[int]'},
'automated_test_id': {'key': 'automatedTestId', 'type': 'str'},
'automated_test_name': {'key': 'automatedTestName', 'type': 'str'},
'automated_test_storage': {'key': 'automatedTestStorage', 'type': 'str'},
'automated_test_type': {'key': 'automatedTestType', 'type': 'str'},
'automated_test_type_id': {'key': 'automatedTestTypeId', 'type': 'str'},
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'str'},
'computer_name': {'key': 'computerName', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ShallowReference'},
'custom_fields': {'key': 'customFields', 'type': '[CustomTestField]'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'failure_type': {'key': 'failureType', 'type': 'str'},
'outcome': {'key': 'outcome', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'resolution_state': {'key': 'resolutionState', 'type': 'str'},
'run_by': {'key': 'runBy', 'type': 'IdentityRef'},
'stack_trace': {'key': 'stackTrace', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'test_case': {'key': 'testCase', 'type': 'ShallowReference'},
'test_case_priority': {'key': 'testCasePriority', 'type': 'str'},
'test_case_title': {'key': 'testCaseTitle', 'type': 'str'},
'test_point': {'key': 'testPoint', 'type': 'ShallowReference'}
}
def __init__(self, area=None, associated_work_items=None, automated_test_id=None, automated_test_name=None, automated_test_storage=None, automated_test_type=None, automated_test_type_id=None, comment=None, completed_date=None, computer_name=None, configuration=None, custom_fields=None, duration_in_ms=None, error_message=None, failure_type=None, outcome=None, owner=None, resolution_state=None, run_by=None, stack_trace=None, started_date=None, state=None, test_case=None, test_case_priority=None, test_case_title=None, test_point=None):
super(TestResultCreateModel, self).__init__()
self.area = area
self.associated_work_items = associated_work_items
self.automated_test_id = automated_test_id
self.automated_test_name = automated_test_name
self.automated_test_storage = automated_test_storage
self.automated_test_type = automated_test_type
self.automated_test_type_id = automated_test_type_id
self.comment = comment
self.completed_date = completed_date
self.computer_name = computer_name
self.configuration = configuration
self.custom_fields = custom_fields
self.duration_in_ms = duration_in_ms
self.error_message = error_message
self.failure_type = failure_type
self.outcome = outcome
self.owner = owner
self.resolution_state = resolution_state
self.run_by = run_by
self.stack_trace = stack_trace
self.started_date = started_date
self.state = state
self.test_case = test_case
self.test_case_priority = test_case_priority
self.test_case_title = test_case_title
self.test_point = test_point
class TestResultDocument(Model):
"""
:param operation_reference:
:type operation_reference: :class:`TestOperationReference <azure.devops.v7_1.test.models.TestOperationReference>`
:param payload:
:type payload: :class:`TestResultPayload <azure.devops.v7_1.test.models.TestResultPayload>`
"""
_attribute_map = {
'operation_reference': {'key': 'operationReference', 'type': 'TestOperationReference'},
'payload': {'key': 'payload', 'type': 'TestResultPayload'}
}
def __init__(self, operation_reference=None, payload=None):
super(TestResultDocument, self).__init__()
self.operation_reference = operation_reference
self.payload = payload
class TestResultHistory(Model):
"""
:param group_by_field:
:type group_by_field: str
:param results_for_group:
:type results_for_group: list of :class:`TestResultHistoryDetailsForGroup <azure.devops.v7_1.test.models.TestResultHistoryDetailsForGroup>`
"""
_attribute_map = {
'group_by_field': {'key': 'groupByField', 'type': 'str'},
'results_for_group': {'key': 'resultsForGroup', 'type': '[TestResultHistoryDetailsForGroup]'}
}
def __init__(self, group_by_field=None, results_for_group=None):
super(TestResultHistory, self).__init__()
self.group_by_field = group_by_field
self.results_for_group = results_for_group
class TestResultHistoryDetailsForGroup(Model):
"""
:param group_by_value:
:type group_by_value: object
:param latest_result:
:type latest_result: :class:`TestCaseResult <azure.devops.v7_1.test.models.TestCaseResult>`
"""
_attribute_map = {
'group_by_value': {'key': 'groupByValue', 'type': 'object'},
'latest_result': {'key': 'latestResult', 'type': 'TestCaseResult'}
}
def __init__(self, group_by_value=None, latest_result=None):
super(TestResultHistoryDetailsForGroup, self).__init__()
self.group_by_value = group_by_value
self.latest_result = latest_result
class TestResultHistoryForGroup(Model):
"""
List of test results filtered on the basis of GroupByValue
:param display_name: Display name of the group.
:type display_name: str
:param group_by_value: Name or Id of the group identifier by which results are grouped together.
:type group_by_value: str
:param results: List of results for GroupByValue
:type results: list of :class:`TestCaseResult <azure.devops.v7_1.test.models.TestCaseResult>`
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'group_by_value': {'key': 'groupByValue', 'type': 'str'},
'results': {'key': 'results', 'type': '[TestCaseResult]'}
}
def __init__(self, display_name=None, group_by_value=None, results=None):
super(TestResultHistoryForGroup, self).__init__()
self.display_name = display_name
self.group_by_value = group_by_value
self.results = results
class TestResultMetaData(Model):
"""
Represents a Meta Data of a test result.
:param automated_test_name: AutomatedTestName of test result.
:type automated_test_name: str
:param automated_test_storage: AutomatedTestStorage of test result.
:type automated_test_storage: str
:param flaky_identifiers: List of Flaky Identifier for TestCaseReferenceId
:type flaky_identifiers: list of :class:`TestFlakyIdentifier <azure.devops.v7_1.test.models.TestFlakyIdentifier>`
:param owner: Owner of test result.
:type owner: str
:param priority: Priority of test result.
:type priority: int
:param test_case_reference_id: ID of TestCaseReference.
:type test_case_reference_id: int
:param test_case_title: TestCaseTitle of test result.
:type test_case_title: str
"""
_attribute_map = {
'automated_test_name': {'key': 'automatedTestName', 'type': 'str'},
'automated_test_storage': {'key': 'automatedTestStorage', 'type': 'str'},
'flaky_identifiers': {'key': 'flakyIdentifiers', 'type': '[TestFlakyIdentifier]'},
'owner': {'key': 'owner', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'test_case_reference_id': {'key': 'testCaseReferenceId', 'type': 'int'},
'test_case_title': {'key': 'testCaseTitle', 'type': 'str'}
}
def __init__(self, automated_test_name=None, automated_test_storage=None, flaky_identifiers=None, owner=None, priority=None, test_case_reference_id=None, test_case_title=None):
super(TestResultMetaData, self).__init__()
self.automated_test_name = automated_test_name
self.automated_test_storage = automated_test_storage
self.flaky_identifiers = flaky_identifiers
self.owner = owner
self.priority = priority
self.test_case_reference_id = test_case_reference_id
self.test_case_title = test_case_title
class TestResultModelBase(Model):
"""
:param comment: Comment in result.
:type comment: str
:param completed_date: Time when execution completed(UTC).
:type completed_date: datetime
:param duration_in_ms: Duration of execution.
:type duration_in_ms: float
:param error_message: Error message in result.
:type error_message: str
:param outcome: Test outcome of result.
:type outcome: str
:param started_date: Time when execution started(UTC).
:type started_date: datetime
"""
_attribute_map = {
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'float'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'outcome': {'key': 'outcome', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'}
}
def __init__(self, comment=None, completed_date=None, duration_in_ms=None, error_message=None, outcome=None, started_date=None):
super(TestResultModelBase, self).__init__()
self.comment = comment
self.completed_date = completed_date
self.duration_in_ms = duration_in_ms
self.error_message = error_message
self.outcome = outcome
self.started_date = started_date
class TestResultParameterModel(Model):
"""
Test parameter information in a test iteration.
:param action_path: Test step path where parameter is referenced.
:type action_path: str
:param iteration_id: Iteration ID.
:type iteration_id: int
:param parameter_name: Name of parameter.
:type parameter_name: str
:param step_identifier: This is step Id of test case. For shared step, it is step Id of shared step in test case workitem; step Id in shared step. Example: TestCase workitem has two steps: 1) Normal step with Id = 1 2) Shared Step with Id = 2. Inside shared step: a) Normal Step with Id = 1 Value for StepIdentifier for First step: "1" Second step: "2;1"
:type step_identifier: str
:param url: Url of test parameter. Deprecated in hosted environment.
:type url: str
:param value: Value of parameter.
:type value: str
"""
_attribute_map = {
'action_path': {'key': 'actionPath', 'type': 'str'},
'iteration_id': {'key': 'iterationId', 'type': 'int'},
'parameter_name': {'key': 'parameterName', 'type': 'str'},
'step_identifier': {'key': 'stepIdentifier', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, action_path=None, iteration_id=None, parameter_name=None, step_identifier=None, url=None, value=None):
super(TestResultParameterModel, self).__init__()
self.action_path = action_path
self.iteration_id = iteration_id
self.parameter_name = parameter_name
self.step_identifier = step_identifier
self.url = url
self.value = value
class TestResultPayload(Model):
"""
:param comment:
:type comment: str
:param name:
:type name: str
:param stream:
:type stream: str
"""
_attribute_map = {
'comment': {'key': 'comment', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'stream': {'key': 'stream', 'type': 'str'}
}
def __init__(self, comment=None, name=None, stream=None):
super(TestResultPayload, self).__init__()
self.comment = comment
self.name = name
self.stream = stream
class TestResultsContext(Model):
"""
:param build:
:type build: :class:`BuildReference <azure.devops.v7_1.test.models.BuildReference>`
:param context_type:
:type context_type: object
:param pipeline_reference:
:type pipeline_reference: :class:`PipelineReference <azure.devops.v7_1.test.models.PipelineReference>`
:param release:
:type release: :class:`ReleaseReference <azure.devops.v7_1.test.models.ReleaseReference>`
"""
_attribute_map = {
'build': {'key': 'build', 'type': 'BuildReference'},
'context_type': {'key': 'contextType', 'type': 'object'},
'pipeline_reference': {'key': 'pipelineReference', 'type': 'PipelineReference'},
'release': {'key': 'release', 'type': 'ReleaseReference'}
}
def __init__(self, build=None, context_type=None, pipeline_reference=None, release=None):
super(TestResultsContext, self).__init__()
self.build = build
self.context_type = context_type
self.pipeline_reference = pipeline_reference
self.release = release
class TestResultsDetails(Model):
"""
:param group_by_field:
:type group_by_field: str
:param results_for_group:
:type results_for_group: list of :class:`TestResultsDetailsForGroup <azure.devops.v7_1.test.models.TestResultsDetailsForGroup>`
"""
_attribute_map = {
'group_by_field': {'key': 'groupByField', 'type': 'str'},
'results_for_group': {'key': 'resultsForGroup', 'type': '[TestResultsDetailsForGroup]'}
}
def __init__(self, group_by_field=None, results_for_group=None):
super(TestResultsDetails, self).__init__()
self.group_by_field = group_by_field
self.results_for_group = results_for_group
class TestResultsDetailsForGroup(Model):
"""
:param group_by_value:
:type group_by_value: object
:param results:
:type results: list of :class:`TestCaseResult <azure.devops.v7_1.test.models.TestCaseResult>`
:param results_count_by_outcome:
:type results_count_by_outcome: dict
:param tags:
:type tags: list of str
"""
_attribute_map = {
'group_by_value': {'key': 'groupByValue', 'type': 'object'},
'results': {'key': 'results', 'type': '[TestCaseResult]'},
'results_count_by_outcome': {'key': 'resultsCountByOutcome', 'type': '{AggregatedResultsByOutcome}'},
'tags': {'key': 'tags', 'type': '[str]'}
}
def __init__(self, group_by_value=None, results=None, results_count_by_outcome=None, tags=None):
super(TestResultsDetailsForGroup, self).__init__()
self.group_by_value = group_by_value
self.results = results
self.results_count_by_outcome = results_count_by_outcome
self.tags = tags
class TestResultsGroupsForBuild(Model):
"""
:param build_id: BuildId for which groupby result is fetched.
:type build_id: int
:param fields: The group by results
:type fields: list of :class:`FieldDetailsForTestResults <azure.devops.v7_1.test.models.FieldDetailsForTestResults>`
"""
_attribute_map = {
'build_id': {'key': 'buildId', 'type': 'int'},
'fields': {'key': 'fields', 'type': '[FieldDetailsForTestResults]'}
}
def __init__(self, build_id=None, fields=None):
super(TestResultsGroupsForBuild, self).__init__()
self.build_id = build_id
self.fields = fields
class TestResultsGroupsForRelease(Model):
"""
:param fields: The group by results
:type fields: list of :class:`FieldDetailsForTestResults <azure.devops.v7_1.test.models.FieldDetailsForTestResults>`
:param release_env_id: Release Environment Id for which groupby result is fetched.
:type release_env_id: int
:param release_id: ReleaseId for which groupby result is fetched.
:type release_id: int
"""
_attribute_map = {
'fields': {'key': 'fields', 'type': '[FieldDetailsForTestResults]'},
'release_env_id': {'key': 'releaseEnvId', 'type': 'int'},
'release_id': {'key': 'releaseId', 'type': 'int'}
}
def __init__(self, fields=None, release_env_id=None, release_id=None):
super(TestResultsGroupsForRelease, self).__init__()
self.fields = fields
self.release_env_id = release_env_id
self.release_id = release_id
class TestResultsQuery(Model):
"""
:param fields:
:type fields: list of str
:param results:
:type results: list of :class:`TestCaseResult <azure.devops.v7_1.test.models.TestCaseResult>`
:param results_filter:
:type results_filter: :class:`ResultsFilter <azure.devops.v7_1.test.models.ResultsFilter>`
"""
_attribute_map = {
'fields': {'key': 'fields', 'type': '[str]'},
'results': {'key': 'results', 'type': '[TestCaseResult]'},
'results_filter': {'key': 'resultsFilter', 'type': 'ResultsFilter'}
}
def __init__(self, fields=None, results=None, results_filter=None):
super(TestResultsQuery, self).__init__()
self.fields = fields
self.results = results
self.results_filter = results_filter
class TestResultSummary(Model):
"""
:param aggregated_results_analysis:
:type aggregated_results_analysis: :class:`AggregatedResultsAnalysis <azure.devops.v7_1.test.models.AggregatedResultsAnalysis>`
:param no_config_runs_count:
:type no_config_runs_count: int
:param team_project:
:type team_project: :class:`TeamProjectReference <azure.devops.v7_1.test.models.TeamProjectReference>`
:param test_failures:
:type test_failures: :class:`TestFailuresAnalysis <azure.devops.v7_1.test.models.TestFailuresAnalysis>`
:param test_results_context:
:type test_results_context: :class:`TestResultsContext <azure.devops.v7_1.test.models.TestResultsContext>`
:param total_runs_count:
:type total_runs_count: int
"""
_attribute_map = {
'aggregated_results_analysis': {'key': 'aggregatedResultsAnalysis', 'type': 'AggregatedResultsAnalysis'},
'no_config_runs_count': {'key': 'noConfigRunsCount', 'type': 'int'},
'team_project': {'key': 'teamProject', 'type': 'TeamProjectReference'},
'test_failures': {'key': 'testFailures', 'type': 'TestFailuresAnalysis'},
'test_results_context': {'key': 'testResultsContext', 'type': 'TestResultsContext'},
'total_runs_count': {'key': 'totalRunsCount', 'type': 'int'}
}
def __init__(self, aggregated_results_analysis=None, no_config_runs_count=None, team_project=None, test_failures=None, test_results_context=None, total_runs_count=None):
super(TestResultSummary, self).__init__()
self.aggregated_results_analysis = aggregated_results_analysis
self.no_config_runs_count = no_config_runs_count
self.team_project = team_project
self.test_failures = test_failures
self.test_results_context = test_results_context
self.total_runs_count = total_runs_count
class TestResultTrendFilter(Model):
"""
:param branch_names:
:type branch_names: list of str
:param build_count:
:type build_count: int
:param definition_ids:
:type definition_ids: list of int
:param env_definition_ids:
:type env_definition_ids: list of int
:param max_complete_date:
:type max_complete_date: datetime
:param publish_context:
:type publish_context: str
:param test_run_titles:
:type test_run_titles: list of str
:param trend_days:
:type trend_days: int
"""
_attribute_map = {
'branch_names': {'key': 'branchNames', 'type': '[str]'},
'build_count': {'key': 'buildCount', 'type': 'int'},
'definition_ids': {'key': 'definitionIds', 'type': '[int]'},
'env_definition_ids': {'key': 'envDefinitionIds', 'type': '[int]'},
'max_complete_date': {'key': 'maxCompleteDate', 'type': 'iso-8601'},
'publish_context': {'key': 'publishContext', 'type': 'str'},
'test_run_titles': {'key': 'testRunTitles', 'type': '[str]'},
'trend_days': {'key': 'trendDays', 'type': 'int'}
}
def __init__(self, branch_names=None, build_count=None, definition_ids=None, env_definition_ids=None, max_complete_date=None, publish_context=None, test_run_titles=None, trend_days=None):
super(TestResultTrendFilter, self).__init__()
self.branch_names = branch_names
self.build_count = build_count
self.definition_ids = definition_ids
self.env_definition_ids = env_definition_ids
self.max_complete_date = max_complete_date
self.publish_context = publish_context
self.test_run_titles = test_run_titles
self.trend_days = trend_days
class TestRun(Model):
"""
Test run details.
:param build: Build associated with this test run.
:type build: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param build_configuration: Build configuration details associated with this test run.
:type build_configuration: :class:`BuildConfiguration <azure.devops.v7_1.test.models.BuildConfiguration>`
:param comment: Comments entered by those analyzing the run.
:type comment: str
:param completed_date: Completed date time of the run.
:type completed_date: datetime
:param controller: Test Run Controller.
:type controller: str
:param created_date: Test Run CreatedDate.
:type created_date: datetime
:param custom_fields: List of Custom Fields for TestRun.
:type custom_fields: list of :class:`CustomTestField <azure.devops.v7_1.test.models.CustomTestField>`
:param drop_location: Drop Location for the test Run.
:type drop_location: str
:param dtl_aut_environment:
:type dtl_aut_environment: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param dtl_environment:
:type dtl_environment: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param dtl_environment_creation_details:
:type dtl_environment_creation_details: :class:`DtlEnvironmentDetails <azure.devops.v7_1.test.models.DtlEnvironmentDetails>`
:param due_date: Due date and time for test run.
:type due_date: datetime
:param error_message: Error message associated with the run.
:type error_message: str
:param filter:
:type filter: :class:`RunFilter <azure.devops.v7_1.test.models.RunFilter>`
:param id: ID of the test run.
:type id: int
:param incomplete_tests: Number of Incomplete Tests.
:type incomplete_tests: int
:param is_automated: true if test run is automated, false otherwise.
:type is_automated: bool
:param iteration: The iteration to which the run belongs.
:type iteration: str
:param last_updated_by: Team foundation ID of the last updated the test run.
:type last_updated_by: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param last_updated_date: Last updated date and time
:type last_updated_date: datetime
:param name: Name of the test run.
:type name: str
:param not_applicable_tests: Number of Not Applicable Tests.
:type not_applicable_tests: int
:param owner: Team Foundation ID of the owner of the runs.
:type owner: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param passed_tests: Number of passed tests in the run
:type passed_tests: int
:param phase: Phase/State for the testRun.
:type phase: str
:param pipeline_reference: Reference of the pipeline to which this test run belongs.
:type pipeline_reference: :class:`PipelineReference <azure.devops.v7_1.test.models.PipelineReference>`
:param plan: Test plan associated with this test run.
:type plan: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param post_process_state: Post Process State.
:type post_process_state: str
:param project: Project associated with this run.
:type project: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param release: Release Reference for the Test Run.
:type release: :class:`ReleaseReference <azure.devops.v7_1.test.models.ReleaseReference>`
:param release_environment_uri: Release Environment Uri for TestRun.
:type release_environment_uri: str
:param release_uri: Release Uri for TestRun.
:type release_uri: str
:param revision:
:type revision: int
:param run_statistics: RunSummary by outcome.
:type run_statistics: list of :class:`RunStatistic <azure.devops.v7_1.test.models.RunStatistic>`
:param started_date: Start date time of the run.
:type started_date: datetime
:param state: The state of the run. Type TestRunState Valid states - Unspecified ,NotStarted, InProgress, Completed, Waiting, Aborted, NeedsInvestigation
:type state: str
:param substate: TestRun Substate.
:type substate: object
:param tags: Tags attached with this test run.
:type tags: list of :class:`TestTag <azure.devops.v7_1.test.models.TestTag>`
:param test_environment: Test environment associated with the run.
:type test_environment: :class:`TestEnvironment <azure.devops.v7_1.test.models.TestEnvironment>`
:param test_message_log_id:
:type test_message_log_id: int
:param test_settings:
:type test_settings: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param total_tests: Total tests in the run
:type total_tests: int
:param unanalyzed_tests: Number of failed tests in the run.
:type unanalyzed_tests: int
:param url: Url of the test run
:type url: str
:param web_access_url: Web Access Url for TestRun.
:type web_access_url: str
"""
_attribute_map = {
'build': {'key': 'build', 'type': 'ShallowReference'},
'build_configuration': {'key': 'buildConfiguration', 'type': 'BuildConfiguration'},
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'controller': {'key': 'controller', 'type': 'str'},
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'custom_fields': {'key': 'customFields', 'type': '[CustomTestField]'},
'drop_location': {'key': 'dropLocation', 'type': 'str'},
'dtl_aut_environment': {'key': 'dtlAutEnvironment', 'type': 'ShallowReference'},
'dtl_environment': {'key': 'dtlEnvironment', 'type': 'ShallowReference'},
'dtl_environment_creation_details': {'key': 'dtlEnvironmentCreationDetails', 'type': 'DtlEnvironmentDetails'},
'due_date': {'key': 'dueDate', 'type': 'iso-8601'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'filter': {'key': 'filter', 'type': 'RunFilter'},
'id': {'key': 'id', 'type': 'int'},
'incomplete_tests': {'key': 'incompleteTests', 'type': 'int'},
'is_automated': {'key': 'isAutomated', 'type': 'bool'},
'iteration': {'key': 'iteration', 'type': 'str'},
'last_updated_by': {'key': 'lastUpdatedBy', 'type': 'IdentityRef'},
'last_updated_date': {'key': 'lastUpdatedDate', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'not_applicable_tests': {'key': 'notApplicableTests', 'type': 'int'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'passed_tests': {'key': 'passedTests', 'type': 'int'},
'phase': {'key': 'phase', 'type': 'str'},
'pipeline_reference': {'key': 'pipelineReference', 'type': 'PipelineReference'},
'plan': {'key': 'plan', 'type': 'ShallowReference'},
'post_process_state': {'key': 'postProcessState', 'type': 'str'},
'project': {'key': 'project', 'type': 'ShallowReference'},
'release': {'key': 'release', 'type': 'ReleaseReference'},
'release_environment_uri': {'key': 'releaseEnvironmentUri', 'type': 'str'},
'release_uri': {'key': 'releaseUri', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'int'},
'run_statistics': {'key': 'runStatistics', 'type': '[RunStatistic]'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'str'},
'substate': {'key': 'substate', 'type': 'object'},
'tags': {'key': 'tags', 'type': '[TestTag]'},
'test_environment': {'key': 'testEnvironment', 'type': 'TestEnvironment'},
'test_message_log_id': {'key': 'testMessageLogId', 'type': 'int'},
'test_settings': {'key': 'testSettings', 'type': 'ShallowReference'},
'total_tests': {'key': 'totalTests', 'type': 'int'},
'unanalyzed_tests': {'key': 'unanalyzedTests', 'type': 'int'},
'url': {'key': 'url', 'type': 'str'},
'web_access_url': {'key': 'webAccessUrl', 'type': 'str'}
}
def __init__(self, build=None, build_configuration=None, comment=None, completed_date=None, controller=None, created_date=None, custom_fields=None, drop_location=None, dtl_aut_environment=None, dtl_environment=None, dtl_environment_creation_details=None, due_date=None, error_message=None, filter=None, id=None, incomplete_tests=None, is_automated=None, iteration=None, last_updated_by=None, last_updated_date=None, name=None, not_applicable_tests=None, owner=None, passed_tests=None, phase=None, pipeline_reference=None, plan=None, post_process_state=None, project=None, release=None, release_environment_uri=None, release_uri=None, revision=None, run_statistics=None, started_date=None, state=None, substate=None, tags=None, test_environment=None, test_message_log_id=None, test_settings=None, total_tests=None, unanalyzed_tests=None, url=None, web_access_url=None):
super(TestRun, self).__init__()
self.build = build
self.build_configuration = build_configuration
self.comment = comment
self.completed_date = completed_date
self.controller = controller
self.created_date = created_date
self.custom_fields = custom_fields
self.drop_location = drop_location
self.dtl_aut_environment = dtl_aut_environment
self.dtl_environment = dtl_environment
self.dtl_environment_creation_details = dtl_environment_creation_details
self.due_date = due_date
self.error_message = error_message
self.filter = filter
self.id = id
self.incomplete_tests = incomplete_tests
self.is_automated = is_automated
self.iteration = iteration
self.last_updated_by = last_updated_by
self.last_updated_date = last_updated_date
self.name = name
self.not_applicable_tests = not_applicable_tests
self.owner = owner
self.passed_tests = passed_tests
self.phase = phase
self.pipeline_reference = pipeline_reference
self.plan = plan
self.post_process_state = post_process_state
self.project = project
self.release = release
self.release_environment_uri = release_environment_uri
self.release_uri = release_uri
self.revision = revision
self.run_statistics = run_statistics
self.started_date = started_date
self.state = state
self.substate = substate
self.tags = tags
self.test_environment = test_environment
self.test_message_log_id = test_message_log_id
self.test_settings = test_settings
self.total_tests = total_tests
self.unanalyzed_tests = unanalyzed_tests
self.url = url
self.web_access_url = web_access_url
class TestRunCoverage(Model):
"""
Test Run Code Coverage Details
:param last_error: Last Error
:type last_error: str
:param modules: List of Modules Coverage
:type modules: list of :class:`ModuleCoverage <azure.devops.v7_1.test.models.ModuleCoverage>`
:param state: State
:type state: str
:param test_run: Reference of test Run.
:type test_run: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
"""
_attribute_map = {
'last_error': {'key': 'lastError', 'type': 'str'},
'modules': {'key': 'modules', 'type': '[ModuleCoverage]'},
'state': {'key': 'state', 'type': 'str'},
'test_run': {'key': 'testRun', 'type': 'ShallowReference'}
}
def __init__(self, last_error=None, modules=None, state=None, test_run=None):
super(TestRunCoverage, self).__init__()
self.last_error = last_error
self.modules = modules
self.state = state
self.test_run = test_run
class TestRunStatistic(Model):
"""
Test run statistics.
:param run:
:type run: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param run_statistics:
:type run_statistics: list of :class:`RunStatistic <azure.devops.v7_1.test.models.RunStatistic>`
"""
_attribute_map = {
'run': {'key': 'run', 'type': 'ShallowReference'},
'run_statistics': {'key': 'runStatistics', 'type': '[RunStatistic]'}
}
def __init__(self, run=None, run_statistics=None):
super(TestRunStatistic, self).__init__()
self.run = run
self.run_statistics = run_statistics
class TestSession(Model):
"""
Test Session
:param area: Area path of the test session
:type area: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param comment: Comments in the test session
:type comment: str
:param end_date: Duration of the session
:type end_date: datetime
:param id: Id of the test session
:type id: int
:param last_updated_by: Last Updated By Reference
:type last_updated_by: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param last_updated_date: Last updated date
:type last_updated_date: datetime
:param owner: Owner of the test session
:type owner: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param project: Project to which the test session belongs
:type project: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param property_bag: Generic store for test session data
:type property_bag: :class:`PropertyBag <azure.devops.v7_1.test.models.PropertyBag>`
:param revision: Revision of the test session
:type revision: int
:param source: Source of the test session
:type source: object
:param start_date: Start date
:type start_date: datetime
:param state: State of the test session
:type state: object
:param title: Title of the test session
:type title: str
:param url: Url of Test Session Resource
:type url: str
"""
_attribute_map = {
'area': {'key': 'area', 'type': 'ShallowReference'},
'comment': {'key': 'comment', 'type': 'str'},
'end_date': {'key': 'endDate', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'int'},
'last_updated_by': {'key': 'lastUpdatedBy', 'type': 'IdentityRef'},
'last_updated_date': {'key': 'lastUpdatedDate', 'type': 'iso-8601'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'project': {'key': 'project', 'type': 'ShallowReference'},
'property_bag': {'key': 'propertyBag', 'type': 'PropertyBag'},
'revision': {'key': 'revision', 'type': 'int'},
'source': {'key': 'source', 'type': 'object'},
'start_date': {'key': 'startDate', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'object'},
'title': {'key': 'title', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, area=None, comment=None, end_date=None, id=None, last_updated_by=None, last_updated_date=None, owner=None, project=None, property_bag=None, revision=None, source=None, start_date=None, state=None, title=None, url=None):
super(TestSession, self).__init__()
self.area = area
self.comment = comment
self.end_date = end_date
self.id = id
self.last_updated_by = last_updated_by
self.last_updated_date = last_updated_date
self.owner = owner
self.project = project
self.property_bag = property_bag
self.revision = revision
self.source = source
self.start_date = start_date
self.state = state
self.title = title
self.url = url
class TestSettings(Model):
"""
Represents the test settings of the run. Used to create test settings and fetch test settings
:param area_path: Area path required to create test settings
:type area_path: str
:param description: Description of the test settings. Used in create test settings.
:type description: str
:param is_public: Indicates if the tests settings is public or private.Used in create test settings.
:type is_public: bool
:param machine_roles: Xml string of machine roles. Used in create test settings.
:type machine_roles: str
:param test_settings_content: Test settings content.
:type test_settings_content: str
:param test_settings_id: Test settings id.
:type test_settings_id: int
:param test_settings_name: Test settings name.
:type test_settings_name: str
"""
_attribute_map = {
'area_path': {'key': 'areaPath', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'is_public': {'key': 'isPublic', 'type': 'bool'},
'machine_roles': {'key': 'machineRoles', 'type': 'str'},
'test_settings_content': {'key': 'testSettingsContent', 'type': 'str'},
'test_settings_id': {'key': 'testSettingsId', 'type': 'int'},
'test_settings_name': {'key': 'testSettingsName', 'type': 'str'}
}
def __init__(self, area_path=None, description=None, is_public=None, machine_roles=None, test_settings_content=None, test_settings_id=None, test_settings_name=None):
super(TestSettings, self).__init__()
self.area_path = area_path
self.description = description
self.is_public = is_public
self.machine_roles = machine_roles
self.test_settings_content = test_settings_content
self.test_settings_id = test_settings_id
self.test_settings_name = test_settings_name
class TestSubResult(Model):
"""
Represents a sub result of a test result.
:param comment: Comment in sub result.
:type comment: str
:param completed_date: Time when test execution completed(UTC).
:type completed_date: datetime
:param computer_name: Machine where test executed.
:type computer_name: str
:param configuration: Reference to test configuration.
:type configuration: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param custom_fields: Additional properties of sub result.
:type custom_fields: list of :class:`CustomTestField <azure.devops.v7_1.test.models.CustomTestField>`
:param display_name: Name of sub result.
:type display_name: str
:param duration_in_ms: Duration of test execution.
:type duration_in_ms: long
:param error_message: Error message in sub result.
:type error_message: str
:param id: ID of sub result.
:type id: int
:param last_updated_date: Time when result last updated(UTC).
:type last_updated_date: datetime
:param outcome: Outcome of sub result.
:type outcome: str
:param parent_id: Immediate parent ID of sub result.
:type parent_id: int
:param result_group_type: Hierarchy type of the result, default value of None means its leaf node.
:type result_group_type: object
:param sequence_id: Index number of sub result.
:type sequence_id: int
:param stack_trace: Stacktrace.
:type stack_trace: str
:param started_date: Time when test execution started(UTC).
:type started_date: datetime
:param sub_results: List of sub results inside a sub result, if ResultGroupType is not None, it holds corresponding type sub results.
:type sub_results: list of :class:`TestSubResult <azure.devops.v7_1.test.models.TestSubResult>`
:param test_result: Reference to test result.
:type test_result: :class:`TestCaseResultIdentifier <azure.devops.v7_1.test.models.TestCaseResultIdentifier>`
:param url: Url of sub result.
:type url: str
"""
_attribute_map = {
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'computer_name': {'key': 'computerName', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ShallowReference'},
'custom_fields': {'key': 'customFields', 'type': '[CustomTestField]'},
'display_name': {'key': 'displayName', 'type': 'str'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'long'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'last_updated_date': {'key': 'lastUpdatedDate', 'type': 'iso-8601'},
'outcome': {'key': 'outcome', 'type': 'str'},
'parent_id': {'key': 'parentId', 'type': 'int'},
'result_group_type': {'key': 'resultGroupType', 'type': 'object'},
'sequence_id': {'key': 'sequenceId', 'type': 'int'},
'stack_trace': {'key': 'stackTrace', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'},
'sub_results': {'key': 'subResults', 'type': '[TestSubResult]'},
'test_result': {'key': 'testResult', 'type': 'TestCaseResultIdentifier'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, comment=None, completed_date=None, computer_name=None, configuration=None, custom_fields=None, display_name=None, duration_in_ms=None, error_message=None, id=None, last_updated_date=None, outcome=None, parent_id=None, result_group_type=None, sequence_id=None, stack_trace=None, started_date=None, sub_results=None, test_result=None, url=None):
super(TestSubResult, self).__init__()
self.comment = comment
self.completed_date = completed_date
self.computer_name = computer_name
self.configuration = configuration
self.custom_fields = custom_fields
self.display_name = display_name
self.duration_in_ms = duration_in_ms
self.error_message = error_message
self.id = id
self.last_updated_date = last_updated_date
self.outcome = outcome
self.parent_id = parent_id
self.result_group_type = result_group_type
self.sequence_id = sequence_id
self.stack_trace = stack_trace
self.started_date = started_date
self.sub_results = sub_results
self.test_result = test_result
self.url = url
class TestSuite(Model):
"""
Test suite
:param area_uri: Area uri of the test suite.
:type area_uri: str
:param default_configurations: Test suite default configuration.
:type default_configurations: list of :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param default_testers: Test suite default testers.
:type default_testers: list of :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param children: Child test suites of current test suite.
:type children: list of :class:`TestSuite <azure.devops.v7_1.test.models.TestSuite>`
:param id: Id of test suite.
:type id: int
:param inherit_default_configurations: Default configuration was inherited or not.
:type inherit_default_configurations: bool
:param last_error: Last error for test suite.
:type last_error: str
:param last_populated_date: Last populated date.
:type last_populated_date: datetime
:param last_updated_by: IdentityRef of user who has updated test suite recently.
:type last_updated_by: :class:`IdentityRef <azure.devops.v7_1.test.models.IdentityRef>`
:param last_updated_date: Last update date.
:type last_updated_date: datetime
:param name: Name of test suite.
:type name: str
:param parent: Test suite parent shallow reference.
:type parent: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param plan: Test plan to which the test suite belongs.
:type plan: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param project: Test suite project shallow reference.
:type project: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param query_string: Test suite query string, for dynamic suites.
:type query_string: str
:param requirement_id: Test suite requirement id.
:type requirement_id: int
:param revision: Test suite revision.
:type revision: int
:param state: State of test suite.
:type state: str
:param suites: List of shallow reference of suites.
:type suites: list of :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param suite_type: Test suite type.
:type suite_type: str
:param test_case_count: Test cases count.
:type test_case_count: int
:param test_cases_url: Test case url.
:type test_cases_url: str
:param text: Used in tree view. If test suite is root suite then, it is name of plan otherwise title of the suite.
:type text: str
:param url: Url of test suite.
:type url: str
"""
_attribute_map = {
'area_uri': {'key': 'areaUri', 'type': 'str'},
'default_configurations': {'key': 'defaultConfigurations', 'type': '[ShallowReference]'},
'default_testers': {'key': 'defaultTesters', 'type': '[ShallowReference]'},
'children': {'key': 'children', 'type': '[TestSuite]'},
'id': {'key': 'id', 'type': 'int'},
'inherit_default_configurations': {'key': 'inheritDefaultConfigurations', 'type': 'bool'},
'last_error': {'key': 'lastError', 'type': 'str'},
'last_populated_date': {'key': 'lastPopulatedDate', 'type': 'iso-8601'},
'last_updated_by': {'key': 'lastUpdatedBy', 'type': 'IdentityRef'},
'last_updated_date': {'key': 'lastUpdatedDate', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'parent': {'key': 'parent', 'type': 'ShallowReference'},
'plan': {'key': 'plan', 'type': 'ShallowReference'},
'project': {'key': 'project', 'type': 'ShallowReference'},
'query_string': {'key': 'queryString', 'type': 'str'},
'requirement_id': {'key': 'requirementId', 'type': 'int'},
'revision': {'key': 'revision', 'type': 'int'},
'state': {'key': 'state', 'type': 'str'},
'suites': {'key': 'suites', 'type': '[ShallowReference]'},
'suite_type': {'key': 'suiteType', 'type': 'str'},
'test_case_count': {'key': 'testCaseCount', 'type': 'int'},
'test_cases_url': {'key': 'testCasesUrl', 'type': 'str'},
'text': {'key': 'text', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, area_uri=None, default_configurations=None, default_testers=None, children=None, id=None, inherit_default_configurations=None, last_error=None, last_populated_date=None, last_updated_by=None, last_updated_date=None, name=None, parent=None, plan=None, project=None, query_string=None, requirement_id=None, revision=None, state=None, suites=None, suite_type=None, test_case_count=None, test_cases_url=None, text=None, url=None):
super(TestSuite, self).__init__()
self.area_uri = area_uri
self.default_configurations = default_configurations
self.default_testers = default_testers
self.children = children
self.id = id
self.inherit_default_configurations = inherit_default_configurations
self.last_error = last_error
self.last_populated_date = last_populated_date
self.last_updated_by = last_updated_by
self.last_updated_date = last_updated_date
self.name = name
self.parent = parent
self.plan = plan
self.project = project
self.query_string = query_string
self.requirement_id = requirement_id
self.revision = revision
self.state = state
self.suites = suites
self.suite_type = suite_type
self.test_case_count = test_case_count
self.test_cases_url = test_cases_url
self.text = text
self.url = url
class TestSuiteCloneRequest(Model):
"""
Test suite clone request
:param clone_options: Clone options for cloning the test suite.
:type clone_options: :class:`CloneOptions <azure.devops.v7_1.test.models.CloneOptions>`
:param destination_suite_id: Suite id under which, we have to clone the suite.
:type destination_suite_id: int
:param destination_suite_project_name: Destination suite project name.
:type destination_suite_project_name: str
"""
_attribute_map = {
'clone_options': {'key': 'cloneOptions', 'type': 'CloneOptions'},
'destination_suite_id': {'key': 'destinationSuiteId', 'type': 'int'},
'destination_suite_project_name': {'key': 'destinationSuiteProjectName', 'type': 'str'}
}
def __init__(self, clone_options=None, destination_suite_id=None, destination_suite_project_name=None):
super(TestSuiteCloneRequest, self).__init__()
self.clone_options = clone_options
self.destination_suite_id = destination_suite_id
self.destination_suite_project_name = destination_suite_project_name
class TestSummaryForWorkItem(Model):
"""
:param summary:
:type summary: :class:`AggregatedDataForResultTrend <azure.devops.v7_1.test.models.AggregatedDataForResultTrend>`
:param work_item:
:type work_item: :class:`WorkItemReference <azure.devops.v7_1.test.models.WorkItemReference>`
"""
_attribute_map = {
'summary': {'key': 'summary', 'type': 'AggregatedDataForResultTrend'},
'work_item': {'key': 'workItem', 'type': 'WorkItemReference'}
}
def __init__(self, summary=None, work_item=None):
super(TestSummaryForWorkItem, self).__init__()
self.summary = summary
self.work_item = work_item
class TestTag(Model):
"""
Tag attached to a run or result.
:param name: Name of the tag, alphanumeric value less than 30 chars
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, name=None):
super(TestTag, self).__init__()
self.name = name
class TestToWorkItemLinks(Model):
"""
:param test:
:type test: :class:`TestMethod <azure.devops.v7_1.test.models.TestMethod>`
:param work_items:
:type work_items: list of :class:`WorkItemReference <azure.devops.v7_1.test.models.WorkItemReference>`
"""
_attribute_map = {
'test': {'key': 'test', 'type': 'TestMethod'},
'work_items': {'key': 'workItems', 'type': '[WorkItemReference]'}
}
def __init__(self, test=None, work_items=None):
super(TestToWorkItemLinks, self).__init__()
self.test = test
self.work_items = work_items
class TestVariable(Model):
"""
:param description: Description of the test variable
:type description: str
:param id: Id of the test variable
:type id: int
:param name: Name of the test variable
:type name: str
:param project: Project to which the test variable belongs
:type project: :class:`ShallowReference <azure.devops.v7_1.test.models.ShallowReference>`
:param revision: Revision
:type revision: int
:param url: Url of the test variable
:type url: str
:param values: List of allowed values
:type values: list of str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'project': {'key': 'project', 'type': 'ShallowReference'},
'revision': {'key': 'revision', 'type': 'int'},
'url': {'key': 'url', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'}
}
def __init__(self, description=None, id=None, name=None, project=None, revision=None, url=None, values=None):
super(TestVariable, self).__init__()
self.description = description
self.id = id
self.name = name
self.project = project
self.revision = revision
self.url = url
self.values = values
class WorkItemReference(Model):
"""
WorkItem reference Details.
:param id: WorkItem Id.
:type id: str
:param name: WorkItem Name.
:type name: str
:param type: WorkItem Type.
:type type: str
:param url: WorkItem Url. Valid Values : (Bug, Task, User Story, Test Case)
:type url: str
:param web_url: WorkItem WebUrl.
:type web_url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'web_url': {'key': 'webUrl', 'type': 'str'}
}
def __init__(self, id=None, name=None, type=None, url=None, web_url=None):
super(WorkItemReference, self).__init__()
self.id = id
self.name = name
self.type = type
self.url = url
self.web_url = web_url
class WorkItemToTestLinks(Model):
"""
:param executed_in:
:type executed_in: object
:param tests:
:type tests: list of :class:`TestMethod <azure.devops.v7_1.test.models.TestMethod>`
:param work_item:
:type work_item: :class:`WorkItemReference <azure.devops.v7_1.test.models.WorkItemReference>`
"""
_attribute_map = {
'executed_in': {'key': 'executedIn', 'type': 'object'},
'tests': {'key': 'tests', 'type': '[TestMethod]'},
'work_item': {'key': 'workItem', 'type': 'WorkItemReference'}
}
def __init__(self, executed_in=None, tests=None, work_item=None):
super(WorkItemToTestLinks, self).__init__()
self.executed_in = executed_in
self.tests = tests
self.work_item = work_item
class TestActionResultModel(TestResultModelBase):
"""
Represents a test step result.
:param comment: Comment in result.
:type comment: str
:param completed_date: Time when execution completed(UTC).
:type completed_date: datetime
:param duration_in_ms: Duration of execution.
:type duration_in_ms: float
:param error_message: Error message in result.
:type error_message: str
:param outcome: Test outcome of result.
:type outcome: str
:param started_date: Time when execution started(UTC).
:type started_date: datetime
:param action_path: Path identifier for test step in test case workitem. Note: 1) It is represented in Hexadecimal format with 8 digits for a step. 2) Internally, the step ID value for first step starts with 2 so actionPath = 00000002 step 9, will have an ID = 10 and actionPath = 0000000a step 15, will have an ID =16 and actionPath = 00000010 3) actionPath of shared step is concatenated with the parent step of test case. Example, it would be something of type - 0000000300000001 where 00000003 denotes action path of test step and 00000001 denotes action path for shared step
:type action_path: str
:param iteration_id: Iteration ID of test action result.
:type iteration_id: int
:param shared_step_model: Reference to shared step workitem.
:type shared_step_model: :class:`SharedStepModel <azure.devops.v7_1.test.models.SharedStepModel>`
:param step_identifier: This is step Id of test case. For shared step, it is step Id of shared step in test case workitem; step Id in shared step. Example: TestCase workitem has two steps: 1) Normal step with Id = 1 2) Shared Step with Id = 2. Inside shared step: a) Normal Step with Id = 1 Value for StepIdentifier for First step: "1" Second step: "2;1"
:type step_identifier: str
:param url: Url of test action result. Deprecated in hosted environment.
:type url: str
"""
_attribute_map = {
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'float'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'outcome': {'key': 'outcome', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'},
'action_path': {'key': 'actionPath', 'type': 'str'},
'iteration_id': {'key': 'iterationId', 'type': 'int'},
'shared_step_model': {'key': 'sharedStepModel', 'type': 'SharedStepModel'},
'step_identifier': {'key': 'stepIdentifier', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, comment=None, completed_date=None, duration_in_ms=None, error_message=None, outcome=None, started_date=None, action_path=None, iteration_id=None, shared_step_model=None, step_identifier=None, url=None):
super(TestActionResultModel, self).__init__(comment=comment, completed_date=completed_date, duration_in_ms=duration_in_ms, error_message=error_message, outcome=outcome, started_date=started_date)
self.action_path = action_path
self.iteration_id = iteration_id
self.shared_step_model = shared_step_model
self.step_identifier = step_identifier
self.url = url
__all__ = [
'AggregatedDataForResultTrend',
'AggregatedResultsAnalysis',
'AggregatedResultsByOutcome',
'AggregatedResultsDifference',
'AggregatedRunsByOutcome',
'AggregatedRunsByState',
'BuildConfiguration',
'BuildCoverage',
'BuildReference',
'CloneOperationInformation',
'CloneOptions',
'CloneStatistics',
'CodeCoverageData',
'CodeCoverageStatistics',
'CodeCoverageSummary',
'CoverageStatistics',
'CustomTestField',
'CustomTestFieldDefinition',
'DtlEnvironmentDetails',
'FailingSince',
'FieldDetailsForTestResults',
'FunctionCoverage',
'GraphSubjectBase',
'IdentityRef',
'JobReference',
'LastResultDetails',
'LinkedWorkItemsQuery',
'LinkedWorkItemsQueryResult',
'ModuleCoverage',
'NameValuePair',
'PhaseReference',
'PipelineReference',
'PlanUpdateModel',
'PointAssignment',
'PointsFilter',
'PointUpdateModel',
'PropertyBag',
'QueryModel',
'ReferenceLinks',
'ReleaseEnvironmentDefinitionReference',
'ReleaseReference',
'ResultRetentionSettings',
'ResultsFilter',
'RunCreateModel',
'RunFilter',
'RunStatistic',
'RunSummaryModel',
'RunUpdateModel',
'ShallowReference',
'ShallowTestCaseResult',
'SharedStepModel',
'StageReference',
'SuiteCreateModel',
'SuiteEntry',
'SuiteEntryUpdateModel',
'SuiteTestCase',
'SuiteTestCaseUpdateModel',
'SuiteUpdateModel',
'TeamContext',
'TeamProjectReference',
'TestAttachment',
'TestAttachmentReference',
'TestAttachmentRequestModel',
'TestCaseResult',
'TestCaseResultAttachmentModel',
'TestCaseResultIdentifier',
'TestCaseResultUpdateModel',
'TestConfiguration',
'TestEnvironment',
'TestFailureDetails',
'TestFailuresAnalysis',
'TestFlakyIdentifier',
'TestHistoryQuery',
'TestIterationDetailsModel',
'TestMessageLogDetails',
'TestMethod',
'TestOperationReference',
'TestOutcomeSettings',
'TestPlan',
'TestPlanCloneRequest',
'TestPoint',
'TestPointsQuery',
'TestResolutionState',
'TestResultCreateModel',
'TestResultDocument',
'TestResultHistory',
'TestResultHistoryDetailsForGroup',
'TestResultHistoryForGroup',
'TestResultMetaData',
'TestResultModelBase',
'TestResultParameterModel',
'TestResultPayload',
'TestResultsContext',
'TestResultsDetails',
'TestResultsDetailsForGroup',
'TestResultsGroupsForBuild',
'TestResultsGroupsForRelease',
'TestResultsQuery',
'TestResultSummary',
'TestResultTrendFilter',
'TestRun',
'TestRunCoverage',
'TestRunStatistic',
'TestSession',
'TestSettings',
'TestSubResult',
'TestSuite',
'TestSuiteCloneRequest',
'TestSummaryForWorkItem',
'TestTag',
'TestToWorkItemLinks',
'TestVariable',
'WorkItemReference',
'WorkItemToTestLinks',
'TestActionResultModel',
]
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/test/models.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/test/models.py",
"repo_id": "azure-devops-python-api",
"token_count": 78958
}
| 402 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class UPackApiClient(Client):
"""UPackApi
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(UPackApiClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = 'd397749b-f115-4027-b6dd-77a65dd10d21'
def update_package_versions(self, batch_request, feed_id, project=None):
"""UpdatePackageVersions.
[Preview API] Update several packages from a single feed in a single request. The updates to the packages do not happen atomically.
:param :class:`<UPackPackagesBatchRequest> <azure.devops.v7_1.upack_api.models.UPackPackagesBatchRequest>` batch_request: Information about the packages to update, the operation to perform, and its associated data.
:param str feed_id: Name or ID of the feed.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
content = self._serialize.body(batch_request, 'UPackPackagesBatchRequest')
self._send(http_method='POST',
location_id='c17e81ae-4caa-4d8b-a431-6b329e890281',
version='7.1-preview.1',
route_values=route_values,
content=content)
def update_recycle_bin_package_versions(self, batch_request, feed_id, project=None):
"""UpdateRecycleBinPackageVersions.
[Preview API] Delete or restore several package versions from the recycle bin.
:param :class:`<UPackPackagesBatchRequest> <azure.devops.v7_1.upack_api.models.UPackPackagesBatchRequest>` batch_request: Information about the packages to update, the operation to perform, and its associated data. <c>Operation</c> must be <c>PermanentDelete</c> or <c>RestoreToFeed</c>
:param str feed_id: Feed which contains the packages to update.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
content = self._serialize.body(batch_request, 'UPackPackagesBatchRequest')
self._send(http_method='POST',
location_id='12f73313-0937-4114-bb9f-4e9e720fdc78',
version='7.1-preview.1',
route_values=route_values,
content=content)
def delete_package_version_from_recycle_bin(self, feed_id, package_name, package_version, project=None):
"""DeletePackageVersionFromRecycleBin.
[Preview API] Delete a package version from the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
self._send(http_method='DELETE',
location_id='3ba455ae-31e6-409e-849f-56c66888d004',
version='7.1-preview.1',
route_values=route_values)
def get_package_version_metadata_from_recycle_bin(self, feed_id, package_name, package_version, project=None):
"""GetPackageVersionMetadataFromRecycleBin.
[Preview API] Get information about a package version in the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<UPackPackageVersionDeletionState> <azure.devops.v7_1.upack_api.models.UPackPackageVersionDeletionState>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='3ba455ae-31e6-409e-849f-56c66888d004',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('UPackPackageVersionDeletionState', response)
def restore_package_version_from_recycle_bin(self, package_version_details, feed_id, package_name, package_version, project=None):
"""RestorePackageVersionFromRecycleBin.
[Preview API] Restore a package version from the recycle bin to its associated feed.
:param :class:`<UPackRecycleBinPackageVersionDetails> <azure.devops.v7_1.upack_api.models.UPackRecycleBinPackageVersionDetails>` package_version_details: Set the 'Deleted' property to 'false' to restore the package.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
content = self._serialize.body(package_version_details, 'UPackRecycleBinPackageVersionDetails')
self._send(http_method='PATCH',
location_id='3ba455ae-31e6-409e-849f-56c66888d004',
version='7.1-preview.1',
route_values=route_values,
content=content)
def delete_package_version(self, feed_id, package_name, package_version, project=None):
"""DeletePackageVersion.
[Preview API] Delete a package version from a feed's recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<Package> <azure.devops.v7_1.upack_api.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='DELETE',
location_id='72f61ca4-e07c-4eca-be75-6c0b2f3f4051',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('Package', response)
def get_package_version(self, feed_id, package_name, package_version, project=None, show_deleted=None):
"""GetPackageVersion.
[Preview API] Show information about a package version.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
:param bool show_deleted: True to show information for deleted versions
:rtype: :class:`<Package> <azure.devops.v7_1.upack_api.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
query_parameters = {}
if show_deleted is not None:
query_parameters['showDeleted'] = self._serialize.query('show_deleted', show_deleted, 'bool')
response = self._send(http_method='GET',
location_id='72f61ca4-e07c-4eca-be75-6c0b2f3f4051',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Package', response)
def update_package_version(self, package_version_details, feed_id, package_name, package_version, project=None):
"""UpdatePackageVersion.
[Preview API] Update information for a package version.
:param :class:`<PackageVersionDetails> <azure.devops.v7_1.upack_api.models.PackageVersionDetails>` package_version_details:
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
content = self._serialize.body(package_version_details, 'PackageVersionDetails')
self._send(http_method='PATCH',
location_id='72f61ca4-e07c-4eca-be75-6c0b2f3f4051',
version='7.1-preview.1',
route_values=route_values,
content=content)
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/upack_api/upack_api_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/upack_api/upack_api_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 5042
}
| 403 |
@REM init section. Set _echo=1 to echo everything
@IF NOT DEFINED _echo ECHO OFF
IF EXIST "%BUILD_BINARIESDIRECTORY%\python.3.6.2\tools\python.exe" (
REM Build step installs Python here.
SET PYTHONEXE=%BUILD_BINARIESDIRECTORY%\python.3.6.2\tools\python.exe
) ELSE (
SET PYTHONEXE=python.exe
)
"%PYTHONEXE%" %~dp0\..\sdist.py
IF ERRORLEVEL 1 GOTO FAIL
SET PYTHONEXE=
GOTO :EOF
:FAIL
ECHO sdist failed.
EXIT /B 1
|
azure-devops-python-api/scripts/windows/sdist.cmd/0
|
{
"file_path": "azure-devops-python-api/scripts/windows/sdist.cmd",
"repo_id": "azure-devops-python-api",
"token_count": 197
}
| 404 |
# coding=utf-8
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
from ._chained import *
from ._default import _DefaultAzureCredential
from ._token import _TokenFileCredential
|
azure-quantum-python/azure-quantum/azure/quantum/_authentication/__init__.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/azure/quantum/_authentication/__init__.py",
"repo_id": "azure-quantum-python",
"token_count": 63
}
| 405 |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""Customize generated code here.
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
from typing import List
__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
def patch_sdk():
"""Do not remove from this file.
`patch_sdk` is a last resort escape hatch that allows you to do customizations
you can't accomplish using the techniques described in
https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
|
azure-quantum-python/azure-quantum/azure/quantum/_client/operations/_patch.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/azure/quantum/_client/operations/_patch.py",
"repo_id": "azure-quantum-python",
"token_count": 181
}
| 406 |
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
import re
import abc
from typing import Optional
from datetime import date, datetime, timezone
from azure.quantum._client.models import JobStatus
class FilteredJob(abc.ABC):
"""
Mixin for adding methods to filter jobs
"""
def matches_filter(
self,
name_match: str = None,
status: Optional[JobStatus] = None,
created_after: Optional[datetime] = None
) -> bool:
"""Checks if job (self) matches the given properties if any.
:param name_match: regex expression for job name matching
:type name_match: str
:param status: filter by job status
:type status: Optional[JobStatus]
:param created_after: filter jobs after time of job creation
:type status: Optional[datetime]
:return: Is filter match
:rtype: bool
"""
if name_match is not None and re.search(name_match, self.details.name) is None:
return False
if status is not None and self.details.status != status.value:
return False
if created_after is not None:
# if supplied date is date we must convert to datetime first
if isinstance(created_after, date):
created_after = datetime(created_after.year, created_after.month, created_after.day)
# if supplied date is naive, assume local and convert to timezone aware object
if created_after.tzinfo is None:
created_after = created_after.astimezone()
if self.details.creation_time.replace(tzinfo=timezone.utc) < created_after:
return False
return True
|
azure-quantum-python/azure-quantum/azure/quantum/job/filtered_job.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/azure/quantum/job/filtered_job.py",
"repo_id": "azure-quantum-python",
"token_count": 753
}
| 407 |
"""Defines targets and helper functions for the Pasqal provider"""
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
__all__ = [
"InputParams",
"Pasqal",
"PasqalTarget",
]
from dataclasses import dataclass
from enum import Enum
from typing import Union, Any, Dict, List, Optional
from ..target import Target
from ... import Job
from ...workspace import Workspace
class PasqalTarget(str, Enum):
"""The known targets for the Pasqal provider
"""
SIM_EMU_TN = "pasqal.sim.emu-tn"
"""pasqal.sim.emu-tn target"""
QPU_FRESNEL = "pasqal.qpu.fresnel"
"""A simulator target for Quil. See https://github.com/quil-lang/qvm for more info."""
def simulators() -> List[str]:
"""Returns a list of simulator targets"""
return [
PasqalTarget.SIM_EMU_TN.value
]
def qpus() -> List[str]:
"""Returns a list of QPU targets"""
return [
PasqalTarget.QPU_FRESNEL.value
]
def num_qubits(target_name) -> int:
"""Returns the number of qubits supported by the given target"""
if target_name == PasqalTarget.SIM_EMU_TN.value:
return 100
elif target_name == PasqalTarget.QPU_FRESNEL.value:
return 20
else:
raise ValueError(f"Unknown target {target_name}")
@dataclass
class InputParams:
"""Input parameters
Args:
runs (int): The number of times to run the experiment.
"""
runs: int = 1
"""The number of times to run the experiment."""
class Pasqal(Target):
"""Pasqal target, defaults to the simulator PasqalTarget.SIM_EMU_TN
In order to process the results of a Quil input to this target, we recommend using the included Result class.
"""
target_names = tuple(target.value for target in PasqalTarget)
_SHOTS_PARAM_NAME = "count"
def __init__(
self,
workspace: Workspace,
name: Union[PasqalTarget, str] = PasqalTarget.SIM_EMU_TN,
input_data_format: str = "pasqal.pulser.v1",
output_data_format: str = "pasqal.pulser-results.v1",
capability: str = "BasicExecution",
provider_id: str = "pasqal",
encoding: str = "",
**kwargs,
):
"""
Initializes a new target.
:param workspace: Associated workspace
:type workspace: Workspace
:param name: Target name
:type name: str
:param input_data_format: Format of input data (ex. "pasqal.pulser.v1")
:type input_data_format: str
:param output_data_format: Format of output data (ex. "pasqal.pulser-results.v1")
:type output_data_format: str
:param capability: QIR capability
:type capability: str
:param provider_id: Id of provider (ex. "pasqal")
:type provider_id: str
:param encoding: "Content-Encoding" attribute value to set on input blob (ex. "gzip")
:type encoding: str
"""
super().__init__(
workspace=workspace,
name=name,
input_data_format=input_data_format,
output_data_format=output_data_format,
capability=capability,
provider_id=provider_id,
content_type="application/json",
encoding=encoding,
**kwargs,
)
def submit(
self,
input_data: Any,
name: str = "azure-quantum-job",
shots: int = None,
input_params: Union[InputParams, None, Dict[str, Any]] = None,
**kwargs,
) -> Job:
"""Submit input data and return Job.
Provide input_data_format, output_data_format and content_type
keyword arguments to override default values.
:param input_data: Input data
:type input_data: Any
:param name: Job name
:type name: str
:param shots: Number of shots, defaults to None
:type shots: int
:param input_params: Input parameters, see :class:`azure.quantum.target.pasqal.InputParams` for details.
:type input_params: Union[InputParams, None, Dict[str, Any]]
:return: Azure Quantum job
:rtype: Job
"""
if isinstance(input_params, InputParams):
typed_input_params = input_params
input_params = {
self.__class__._SHOTS_PARAM_NAME: typed_input_params.runs,
}
input_params = input_params or {}
return super().submit(
input_data=input_data,
name=name,
shots=shots,
input_params=input_params,
**kwargs
)
|
azure-quantum-python/azure-quantum/azure/quantum/target/pasqal/target.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/azure/quantum/target/pasqal/target.py",
"repo_id": "azure-quantum-python",
"token_count": 2049
}
| 408 |
{
"arguments": [
{
"name": "bitwidth",
"value": 32,
"type": "Int"
}
]
}
|
azure-quantum-python/azure-quantum/examples/resource_estimation/cli_test_files/multiplier.json/0
|
{
"file_path": "azure-quantum-python/azure-quantum/examples/resource_estimation/cli_test_files/multiplier.json",
"repo_id": "azure-quantum-python",
"token_count": 89
}
| 409 |
namespace QSharpBellState {
open Microsoft.Quantum.Intrinsic;
operation BellState_File() : (Result,Result) {
use q0 = Qubit();
use q1 = Qubit();
H(q0);
CNOT(q0, q1);
return (M(q0), M(q1));
}
}
|
azure-quantum-python/azure-quantum/tests/unit/QSharpBellState.qs/0
|
{
"file_path": "azure-quantum-python/azure-quantum/tests/unit/QSharpBellState.qs",
"repo_id": "azure-quantum-python",
"token_count": 129
}
| 410 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.