text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
from typing import Optional
import torch
from torch import nn
from transformers.pytorch_utils import Conv1D
from transformers import PretrainedConfig
from archai.discrete_search.search_spaces.config import ArchConfig
from archai.discrete_search.search_spaces.nlp.tfpp.ops import OPS
class MixedAttentionBlock(nn.Module):
def __init__(self, arch_config: ArchConfig, hf_config: PretrainedConfig,
hidden_size: int, layer_idx: Optional[int] = None) -> None:
super().__init__()
self.total_heads = arch_config.pick('total_heads')
self.op_allocation = {
op_name: round(self.total_heads * op_prop)
for op_name, op_prop in arch_config.pick('op_allocation')
}
self.hf_config = hf_config
self.hidden_size = hidden_size
self.layer_idx = layer_idx
self.head_size = hidden_size // self.total_heads
assert hidden_size % self.total_heads == 0
assert sum(list(self.op_allocation.values())) == self.total_heads, \
'Invalid allocation'
op_kwargs = {
'hidden_size': self.hidden_size,
'total_heads': self.total_heads,
'hf_config': self.hf_config,
'layer_idx': self.layer_idx
}
self.ops = nn.ModuleList([
OPS[op_name].cls(
arch_config=arch_config.pick(op_name) if OPS[op_name].requires_extra_config else None,
op_heads=self.op_allocation[op_name],
**op_kwargs
) for op_name, op_heads in self.op_allocation.items()
if op_heads > 0
])
self.resid_dropout = nn.Dropout(self.hf_config.resid_pdrop)
self.out_proj = Conv1D(self.hidden_size, self.hidden_size)
def forward(self, hidden_states: torch.Tensor, **kwargs):
# Concatenates outputs from each op in the embedding dim
output = [op(hidden_states, **kwargs)[0] for op in self.ops]
output = torch.cat(output, dim=-1)
# TODO: return present values
return self.resid_dropout(self.out_proj(output)), None
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/mixed_attention.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/mixed_attention.py",
"repo_id": "archai",
"token_count": 968
}
| 328 |
# Copied from https://github.com/HazyResearch/state-spaces/blob/06dbbdfd0876501a7f12bf3262121badbc7658af/src/models/sequence/ss/dplr.py
"""Initializations of structured state space models"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from . import hippo
def dplr(scaling='linear', N=64, rank=1, H=1, dtype=torch.float, real_scale=1.0, imag_scale=1.0, random_real=False, random_imag=False, normalize=False, diagonal=True, random_B=False):
assert dtype == torch.float or dtype == torch.double
dtype = torch.cfloat if dtype == torch.float else torch.cdouble
pi = torch.tensor(math.pi)
if random_real:
real_part = torch.rand(H, N//2)
else:
real_part = .5 * torch.ones(H, N//2)
if random_imag:
imag_part = N//2 * torch.rand(H, N//2)
else:
imag_part = repeat(torch.arange(N//2), 'n -> h n', h=H)
real_part = real_scale * real_part
if scaling == 'random':
imag_part = torch.randn(H, N//2)
elif scaling == 'real':
imag_part = 0 * imag_part
real_part = 1 + repeat(torch.arange(N//2), 'n -> h n', h=H)
elif scaling in ['linear', 'lin']:
imag_part = pi * imag_part
elif scaling in ['inverse', 'inv']: # Based on asymptotics of the default HiPPO matrix
imag_part = 1/pi * N * (N/(1+2*imag_part)-1)
elif scaling in ['inverse2', 'inv2']:
imag_part = 1/pi * N * (N/(1+imag_part)-1)
elif scaling in ['quadratic', 'quad']:
imag_part = 1/pi * (1+2*imag_part)**2
elif scaling in ['legs', 'hippo']:
w, _, _, _ = hippo.nplr('legsd', N)
imag_part = w.imag
else: raise NotImplementedError
imag_part = imag_scale * imag_part
w = -real_part + 1j * imag_part
# Initialize B
if random_B:
B = torch.randn(H, N//2, dtype=dtype)
else:
B = torch.ones(H, N//2, dtype=dtype)
if normalize:
norm = -B/w # (H, N) # Result if you integrate the kernel with constant 1 function
zeta = 2*torch.sum(torch.abs(norm)**2, dim=-1, keepdim=True) # Variance with a random C vector
B = B / zeta**.5
P = torch.randn(rank, H, N//2, dtype=dtype)
if diagonal: P = P * 0.0
V = torch.eye(N, dtype=dtype)[:, :N//2] # Only used in testing
V = repeat(V, 'n m -> h n m', h=H)
return w, P, B, V
def ssm(measure, N, R, H, **ssm_args):
"""Dispatcher to create single SSM initialization
N: state size
R: rank (for DPLR parameterization)
H: number of independent SSM copies
"""
if measure == "dplr":
w, P, B, V = dplr(N=N, rank=R, H=H, **ssm_args)
elif measure.startswith("diag"):
args = measure.split("-")
assert args[0] == "diag" and len(args) > 1
scaling = args[1]
w, P, B, V = dplr(scaling=scaling, N=N, rank=R, H=H, diagonal=True, **ssm_args)
else:
w, P, B, V = hippo.nplr(measure, N, R, **ssm_args)
w = repeat(w, 'n -> s n', s=H)
P = repeat(P, 'r n -> r s n', s=H)
B = repeat(B, 'n -> s n', s=H)
V = repeat(V, 'n m -> s n m', s=H)
return w, P, B, V
combinations = {
'hippo': ['legs', 'fourier'],
'diag': ['diag-inv', 'diag-lin'],
'all': ['legs', 'fourier', 'diag-inv', 'diag-lin'],
}
def combination(measures, N, R, S, **ssm_args):
if isinstance(measures, str):
measures = combinations[measures] if measures in combinations else [measures]
assert S % len(measures) == 0, f"{S} independent trainable SSM copies must be multiple of {len(measures)} different measures"
w, P, B, V = zip(
*[ssm(measure, N, R, S // len(measures), **ssm_args) for measure in measures]
)
w = torch.cat(w, dim=0) # (S N)
P = torch.cat(P, dim=1) # (R S N)
B = torch.cat(B, dim=0) # (S N)
V = torch.cat(V, dim=0) # (S N N)
return w, P, B, V
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/dplr.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/dplr.py",
"repo_id": "archai",
"token_count": 1776
}
| 329 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from collections import OrderedDict
from typing import Any, Mapping, Optional, Tuple
import torch
from overrides import overrides
from overrides.enforce import EnforceOverrides
from transformers.configuration_utils import PretrainedConfig
class OnnxConfig(EnforceOverrides):
"""Base ONNX configuration.
This class defines a base ONNX configuration for a specific task, which includes the
input and output structure required for ONNX models, as well as additional properties
and methods for handling ONNX Runtime graph optimization.
"""
DEFAULT_TASK_OUTPUTS = {"causal-lm": OrderedDict({"probs": {0: "batch_size"}})}
def __init__(
self,
config: PretrainedConfig,
task: Optional[str] = "causal-lm",
) -> None:
"""Initialize the ONNX configuration by verifying whether the
specified `task` is supported.
Args:
config: Configuration of the model being exported.
task: Type of task that the exported model will be used for.
"""
assert task in self.DEFAULT_TASK_OUTPUTS.keys(), f"`task`: {task} is not supported yet."
self.config = config
self.task = task
@property
def is_ort_graph_optimizable(self) -> bool:
"""Return whether configuration supports additional graph optimization."""
return False
@property
def ort_graph_optimizer_args(self) -> Tuple[Any, ...]:
"""Return additional arguments used by the ORT graph optimizer."""
return None
def get_inputs(self) -> Mapping[str, Mapping[int, str]]:
"""Get the ONNX-based inputs structure.
Returns:
ONNX-based inputs.
"""
return OrderedDict({"input_ids": {0: "batch_size", 1: "seq_len"}})
def get_outputs(self) -> Mapping[str, Mapping[int, str]]:
"""Get the ONNX-based outputs structure.
Returns:
ONNX-based outputs.
"""
return copy.deepcopy(self.DEFAULT_TASK_OUTPUTS[self.task])
def generate_dummy_inputs(
self, batch_size: Optional[int] = 2, seq_len: Optional[int] = 8
) -> Mapping[str, torch.Tensor]:
"""Generate dummy inputs for the ONNX exporter.
Args:
batch_size: Batch size.
seq_len: Sequence length.
Returns:
Keyword arguments for the model's `forward()` function.
"""
assert seq_len <= self.config.max_position_embeddings, (
f"seq_len ({seq_len}) must be smaller than max_position_embeddings"
f" ({self.config.max_position_embeddings})"
)
return {"input_ids": torch.zeros((batch_size, seq_len), dtype=torch.long)}
class OnnxConfigWithPast(OnnxConfig):
"""ONNX configuration with support for past key/values.
This class is a subclass of `OnnxConfig` that adds the ability to use past key/values
(also known as 'use_cache') in the model's ONNX export.
"""
def __init__(
self,
config: PretrainedConfig,
task: Optional[str] = "causal-lm",
use_past: Optional[bool] = False,
past_key_values: Optional[int] = 2,
) -> None:
"""Initialize the ONNX configuration with past key/values.
Args:
config: Model's configuration.
task: Type of task that the exported model will be used for.
use_past: Whether past key/values should be used.
past_key_values: Number of past-related information (2 for key and values).
"""
super().__init__(config, task=task)
if use_past:
self.config.use_cache = True
self.config.past_key_values = past_key_values
else:
self.config.use_cache = False
self.use_past = use_past
@property
def hidden_size(self) -> int:
"""Return the dimensionality of hidden units."""
if not hasattr(self.config, "hidden_size"):
raise AttributeError("Please override `hidden_size` with correct attribute.")
return self.config.hidden_size
@property
def num_layers(self) -> int:
"""Return the number of layers."""
if not hasattr(self.config, "num_layers"):
raise AttributeError("Please override `num_layers` with correct attribute.")
return self.config.num_layers
@property
def num_attention_heads(self) -> int:
"""Return the number of attention heads."""
if not hasattr(self.config, "num_attention_heads"):
raise AttributeError("Please override `num_attention_heads` with correct attribute.")
return self.config.num_attention_heads
@overrides
def get_inputs(self) -> Mapping[str, Mapping[int, str]]:
inputs = super().get_inputs()
if self.use_past:
for i in range(self.num_layers):
# [past_key_values, batch_size, n_head, past_seq_len, d_head]
inputs[f"past_{i}"] = {1: "batch_size", 3: "past_seq_len"}
return inputs
@overrides
def get_outputs(self) -> Mapping[str, Mapping[int, str]]:
outputs = super().get_outputs()
if self.use_past:
for i in range(self.num_layers):
# [past_key_values, batch_size, n_head, total_seq_len, d_head]
# Note that total_seq_len is seq_len + past_seq_len
outputs[f"present_{i}"] = {1: "batch_size", 3: "total_seq_len"}
return outputs
@overrides
def generate_dummy_inputs(
self, batch_size: Optional[int] = 2, seq_len: Optional[int] = 8, past_seq_len: Optional[int] = 8
) -> Mapping[str, torch.Tensor]:
"""Generate dummy inputs for the ONNX exporter.
Args:
batch_size: Batch size.
seq_len: Sequence length.
past_seq_len: Past key/values sequence length.
Returns:
Keyword arguments for the model's `forward()` function.
"""
assert seq_len + past_seq_len <= self.config.max_position_embeddings, (
f"Dummy input generated size ({seq_len + past_seq_len}) must be smaller"
f" than max_position_embeddings ({self.config.max_position_embeddings})."
)
dummy_inputs = super().generate_dummy_inputs(batch_size, seq_len)
if self.use_past:
# [past_key_values, batch_size, n_head, past_seq_len, d_head]
dummy_inputs["past_key_values"] = tuple(
[
torch.zeros(
self.config.past_key_values,
batch_size,
self.num_attention_heads,
past_seq_len,
self.hidden_size // self.num_attention_heads,
)
for _ in range(self.num_layers)
]
)
return dummy_inputs
|
archai/archai/onnx/config_utils/onnx_config_base.py/0
|
{
"file_path": "archai/archai/onnx/config_utils/onnx_config_base.py",
"repo_id": "archai",
"token_count": 3036
}
| 330 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from typing import Any, Dict, Optional
import torch
from archai.quantization.modules import (
FakeDynamicQuantConv1d,
FakeDynamicQuantConv1dForOnnx,
FakeDynamicQuantLinear,
FakeDynamicQuantLinearForOnnx,
FakeQuantEmbedding,
FakeQuantEmbeddingForOnnx,
)
DYNAMIC_QAT_MODULE_MAP = {
torch.nn.Conv1d: FakeDynamicQuantConv1d,
torch.nn.Linear: FakeDynamicQuantLinear,
torch.nn.Embedding: FakeQuantEmbedding,
}
ONNX_DYNAMIC_QAT_MODULE_MAP = {
torch.nn.Conv1d: FakeDynamicQuantConv1dForOnnx,
torch.nn.Linear: FakeDynamicQuantLinearForOnnx,
torch.nn.Embedding: FakeQuantEmbeddingForOnnx,
}
try:
import transformers
from archai.quantization.nlp.modules import (
FakeDynamicQuantHFConv1D,
FakeDynamicQuantHFConv1DForOnnx,
)
DYNAMIC_QAT_MODULE_MAP[transformers.modeling_utils.Conv1D] = FakeDynamicQuantHFConv1D
ONNX_DYNAMIC_QAT_MODULE_MAP[transformers.modeling_utils.Conv1D] = FakeDynamicQuantHFConv1DForOnnx
except ModuleNotFoundError:
print("`archai.quantization.nlp` is not available. If needed, install with: pip install archai[nlp].")
from archai.common.ordered_dict_logger import OrderedDictLogger
logger = OrderedDictLogger(source=__name__)
def qat_to_float_modules(model: torch.nn.Module) -> None:
"""Convert QAT-ready modules to float-based modules.
This function converts all QAT-ready modules in the input model to float-based modules.
It does this recursively, so all sub-modules within the input model will also be
converted if applicable.
Args:
model: QAT-ready module to be converted.
"""
for name in list(model._modules):
module = model._modules[name]
if hasattr(module, "to_float"):
model._modules[name] = module.to_float()
else:
qat_to_float_modules(module)
def float_to_qat_modules(
model: torch.nn.Module,
module_mapping: Optional[Dict[torch.nn.Module, torch.nn.Module]] = DYNAMIC_QAT_MODULE_MAP,
qconfig: Optional[Dict[torch.nn.Module, Any]] = None,
**kwargs
) -> None:
"""Convert float-based modules to QAT-ready modules.
This function converts all float-based modules in the input model to QAT-ready
modules using the provided module mapping. It does this recursively, so all sub-modules
within the input model will also be converted if applicable.
A quantization configuration can also be supplied.
Args:
model: Float-based module to be converted.
module_mapping: Maps between float and QAT-ready modules.
qconfig: Quantization configuration to be used for the conversion.
"""
for name in list(model._modules):
module = model._modules[name]
if type(module) in module_mapping:
if not hasattr(module, "qconfig"):
module.qconfig = qconfig
model._modules[name] = module_mapping[type(module)].from_float(module, qconfig, **kwargs)
else:
float_to_qat_modules(module, module_mapping=module_mapping, qconfig=qconfig, **kwargs)
def prepare_with_qat(
model: torch.nn.Module,
inplace: Optional[bool] = True,
onnx_compatible: Optional[bool] = False,
backend: Optional[str] = "qnnpack",
**kwargs
) -> torch.nn.Module:
"""Prepare a float-based PyTorch model for quantization-aware training (QAT).
This function modifies the input model in place by inserting
QAT-based modules and configurations.
Args:
model: Float-based PyTorch module to be prepared for QAT.
inplace: Whether the prepared QAT model should replace the original model.
onnx_compatible: Whether the prepared QAT model should be compatible with ONNX.
backend: Quantization backend to be used.
Returns:
The input model, modified in place (or not) to be ready for QAT.
"""
logger.info("Preparing model with QAT ...")
prepared_model = model
if not inplace:
prepared_model = copy.deepcopy(model)
qconfig = torch.quantization.get_default_qat_qconfig(backend)
module_mapping = ONNX_DYNAMIC_QAT_MODULE_MAP if onnx_compatible else DYNAMIC_QAT_MODULE_MAP
float_to_qat_modules(prepared_model, module_mapping=module_mapping, qconfig=qconfig, **kwargs)
return prepared_model
|
archai/archai/quantization/qat.py/0
|
{
"file_path": "archai/archai/quantization/qat.py",
"repo_id": "archai",
"token_count": 1654
}
| 331 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math as ma
import os
from collections import defaultdict
from copy import deepcopy
from itertools import combinations
from typing import Any, Callable, Dict, List, Set, Tuple
import h5py
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from archai.supergraph.algos.divnas.seqopt import SeqOpt
from archai.supergraph.utils.heatmap import heatmap
def create_submod_f(covariance:np.array)->Callable:
def compute_marginal_gain_func(item:int, sub_sel:List[int], S:Set[int]):
assert covariance.shape[0] == covariance.shape[1]
assert len(covariance.shape) == 2
assert len(S) == covariance.shape[0]
sel_set = set(sub_sel)
marg_gain = compute_marginal_gain(item, sel_set, S, covariance)
return marg_gain
return compute_marginal_gain_func
def get_batch(feature_list, batch_size, i):
start_row = batch_size * i
end_row = start_row + batch_size
feats = [feat[start_row:end_row, :] for feat in feature_list]
return feats
def rbf(x:np.array, y:np.array, sigma=0.1)->np.array:
""" Computes the rbf kernel between two input vectors """
# make sure that inputs are vectors
assert len(x.shape) == 1
assert len(y.shape) == 1
sq_euclidean = np.sum(np.square(x-y))
k = np.exp(-sq_euclidean/(2*sigma*sigma))
return k
def _compute_mi(cov_kernel:np.array, A:Set, V_minus_A:Set):
sigma_A = cov_kernel[np.ix_(list(A), list(A))]
sigma_V_minus_A = cov_kernel[np.ix_(list(V_minus_A), list(V_minus_A))]
I = 0.5 * np.log(np.linalg.det(sigma_A) * np.linalg.det(sigma_V_minus_A) / np.linalg.det(cov_kernel))
return I
def compute_brute_force_sol(cov_kernel:np.array, budget:int)->Tuple[Tuple[Any], float]:
assert cov_kernel.shape[0] == cov_kernel.shape[1]
assert len(cov_kernel.shape) == 2
assert budget > 0 and budget <= cov_kernel.shape[0]
V = set(range(cov_kernel.shape[0]))
# for each combination of budgeted items compute its mutual
# information with the complement set
mis = []
for subset in combinations(range(cov_kernel.shape[0]), budget):
A = set(subset)
V_minus_A = V - A
I = _compute_mi(cov_kernel, A, V_minus_A)
mis.append((subset, I))
# find the maximum subset
max_subset, mi = max(mis, key = lambda x: x[1])
return max_subset, mi
def compute_correlation(covariance:np.array)->np.array:
variance = np.diag(covariance).reshape(-1, 1)
stds = np.sqrt(np.matmul(variance, variance.T))
correlation = covariance / (stds + 1e-16)
return correlation
def compute_covariance_offline(feature_list:List[np.array])->np.array:
"""Compute covariance matrix for high-dimensional features.
feature_shape: (num_samples, feature_dim)
"""
num_features = len(feature_list)
num_samples = feature_list[0].shape[0]
flatten_features = [
feas.reshape(num_samples, -1) for feas in feature_list]
unbiased_features = [
feas - np.mean(feas, 0) for feas in flatten_features]
# (num_samples, feature_dim, num_features)
features = np.stack(unbiased_features, -1)
covariance = np.zeros((num_features, num_features), np.float32)
for i in range(num_samples):
covariance += np.matmul(features[i].T, features[i])
return covariance
def compute_rbf_kernel_covariance(feature_list:List[np.array], sigma=0.1)->np.array:
""" Compute rbf kernel covariance for high dimensional features.
feature_list: List of features each of shape: (num_samples, feature_dim)
sigma: sigma of the rbf kernel """
num_features = len(feature_list)
covariance = np.zeros((num_features, num_features), np.float32)
for i in range(num_features):
for j in range(num_features):
if i == j:
covariance[i][j] = covariance[j][i] = 1.0
continue
# NOTE: one could try to take all pairs rbf responses
# but that is too much computation and probably does
# not add much information
feats_i = feature_list[i]
feats_j = feature_list[j]
assert feats_i.shape == feats_j.shape
rbfs = np.exp(-np.sum(np.square(feats_i - feats_j), axis=1) / (2*sigma*sigma))
avg_cov = np.sum(rbfs)/feats_i.shape[0]
covariance[i][j] = covariance[j][i] = avg_cov
return covariance
def compute_euclidean_dist_quantiles(feature_list:List[np.array], subsamplefactor=1)->List[Tuple[float, float]]:
""" Compute quantile distances between feature pairs
feature_list: List of features each of shape: (num_samples, feature_dim)
"""
num_features = len(feature_list)
num_samples = feature_list[0].shape[0]
# (num_samples, feature_dim, num_features)
features = np.stack(feature_list, -1)
# compute all pairwise feature distances
# too slow need to vectorize asap
distances = []
for i in range(num_features):
for j in range(num_features):
if i == j:
continue
for k in range(0, num_samples, subsamplefactor):
feat_i = features[k, :][:, i]
feat_j = features[k, :][:, j]
dist = np.sqrt(np.sum(np.square(feat_i-feat_j)))
distances.append(dist)
quantiles = [i*0.1 for i in range(1, 10)]
quant_vals = np.quantile(distances, quantiles)
quants = []
for quant, val in zip(quantiles, quant_vals.tolist()):
quants.append((quant, val))
return quants
def greedy_op_selection(covariance:np.array, k:int)->List[int]:
assert covariance.shape[0] == covariance.shape[1]
assert len(covariance.shape) == 2
assert k <= covariance.shape[0]
A = set()
# to keep order information
A_list = []
S = set()
for i in range(covariance.shape[0]):
S.add(i)
for i in tqdm(range(k)):
marginal_gains = []
marginal_gain_ids = []
for y in S - A:
delta_y = compute_marginal_gain(y, A, S, covariance)
marginal_gains.append(delta_y)
marginal_gain_ids.append(y)
val = -ma.inf
argmax = -1
for marg_gain, marg_gain_id in zip(marginal_gains, marginal_gain_ids):
if marg_gain > val:
val = marg_gain
argmax = marg_gain_id
A.add(argmax)
A_list.append(argmax)
return A_list
def compute_marginal_gain(y:int, A:Set[int], S:Set[int], covariance:np.array)->float:
if A:
A_copy = deepcopy(A)
A_copy.add(y)
else:
A_copy = set()
A_copy.add(y)
A_bar = S - A_copy
sigma_y_sqr = covariance[y, y]
if A:
sigma_AA = covariance[np.ix_(list(A), list(A))]
sigma_yA = covariance[np.ix_([y], list(A))]
numerator = sigma_y_sqr - np.matmul(sigma_yA, np.matmul(np.linalg.inv(sigma_AA), sigma_yA.T))
else:
numerator = sigma_y_sqr
if A_bar:
sigma_AA_bar = covariance[np.ix_(list(A_bar), list(A_bar))]
sigma_yA_bar = covariance[np.ix_([y], list(A_bar))]
denominator = sigma_y_sqr - np.matmul(sigma_yA_bar, np.matmul(np.linalg.inv(sigma_AA_bar), sigma_yA_bar.T))
else:
denominator = sigma_y_sqr
gain = numerator/denominator
return float(gain)
def collect_features(rootfolder:str, subsampling_factor:int = 1)->Dict[str, List[np.array]]:
""" Walks the rootfolder for h5py files and loads them into the format
required for analysis.
Inputs:
rootfolder: full path to folder containing h5 files which have activations
subsampling_factor: every nth minibatch will be loaded to keep memory manageable
Outputs:
dictionary with edge name strings as keys and values are lists of np.array [num_samples, feature_dim]
"""
assert subsampling_factor > 0
# gather all h5 files
h5files = [os.path.join(rootfolder, f) for f in os.listdir(rootfolder) if os.path.isfile(os.path.join(rootfolder, f)) and '.h5' in f]
assert h5files
# storage for holding activations for all edges
all_edges_activs = defaultdict(list)
for h5file in h5files:
with h5py.File(h5file, 'r') as hf:
edge_name = h5file.split('/')[-1].split('.')[-2]
edge_activ_list = []
# load all batches
keys_list = list(hf.keys())
print(f'processing {h5file}, num batches {len(keys_list)}')
for i in range(0, len(keys_list), subsampling_factor):
key = keys_list[i]
payload = np.array(hf.get(key))
edge_activ_list.append(payload)
obsv_dict = defaultdict(list)
# separate activations by ops
for batch in edge_activ_list:
# assumption (num_ops, batch_size, x, y, z)
for op in range(batch.shape[0]):
for b in range(batch.shape[1]):
feat = batch[op][b]
feat = feat.flatten()
obsv_dict[op].append(feat)
num_ops = edge_activ_list[0].shape[0]
feature_list = [np.zeros(1) for _ in range(num_ops)]
for key in obsv_dict.keys():
feat = np.array(obsv_dict[key])
feature_list[key] = feat
# removing none and skip_connect
del feature_list[-1]
del feature_list[2]
all_edges_activs[edge_name] = feature_list
return all_edges_activs
def plot_all_covs(covs_kernel, corr, primitives, axs):
assert axs.shape[0] * axs.shape[1] == len(covs_kernel) + 1
flat_axs = axs.flatten()
for i, quantile in enumerate(covs_kernel.keys()):
cov = covs_kernel[quantile]
heatmap(cov, ax=flat_axs[i], xtick_labels=primitives, ytick_labels=primitives, fmt=".1g", cmap="coolwarm")
flat_axs[i].set_title(f'Kernel covariance sigma={quantile} quantile')
heatmap(corr, ax=flat_axs[-1], xtick_labels=primitives, ytick_labels=primitives, fmt=".1g", cmap="coolwarm")
flat_axs[-1].set_title(f'Correlation')
def main():
rootfile = '/media/dedey/DATADRIVE1/activations'
all_edges_activs = collect_features(rootfile, subsampling_factor=5)
PRIMITIVES = [
'max_pool_3x3',
'avg_pool_3x3',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5',
]
# # Use all edges
# all_edges_list = []
# all_names_list = []
# for i in all_edges_activs.keys():
# all_edges_list.extend(all_edges_activs[i])
# for prim in PRIMITIVES:
# all_names_list.append(i + '_' + prim)
# Use specific edges
all_edges_list = []
all_names_list = []
# edge_list = ['activations_node_0_edge_0']
edge_list = ['activations_node_0_edge_0', 'activations_node_0_edge_1']
# edge_list = ['activations_node_1_edge_0', 'activations_node_1_edge_1', 'activations_node_1_edge_2']
# edge_list = ['activations_node_2_edge_0', 'activations_node_2_edge_1', 'activations_node_2_edge_2', 'activations_node_2_edge_3']
# edge_list = ['activations_node_3_edge_0', 'activations_node_3_edge_1', 'activations_node_3_edge_2', 'activations_node_3_edge_3', 'activations_node_3_edge_4']
for name in edge_list:
all_edges_list.extend(all_edges_activs[name])
for prim in PRIMITIVES:
all_names_list.append(name + '_' + prim)
# compute covariance like usual
# cov = compute_covariance_offline(all_edges_list)
# corr = compute_correlation(cov)
# heatmap(corr, xtick_labels=all_names_list, ytick_labels=all_names_list, cmap='coolwarm')
# plt.axis('equal')
# plt.show()
# compute kernel covariance
# quants = compute_euclidean_dist_quantiles(all_edges_list, subsamplefactor=20)
cov_kernel_orig = compute_rbf_kernel_covariance(all_edges_list, sigma=168)
cov_kernel = cov_kernel_orig + 1.0*np.eye(cov_kernel_orig.shape[0])
print(f'Det before diag addition {np.linalg.det(cov_kernel_orig)}')
print(f'Det after diag addition {np.linalg.det(cov_kernel)}')
print(f'Condition number is {np.linalg.cond(cov_kernel)}')
heatmap(cov_kernel, xtick_labels=all_names_list, ytick_labels=all_names_list, cmap="coolwarm")
plt.axis('equal')
plt.show()
# brute force solution
budget = 4
bf_sensors, bf_val = compute_brute_force_sol(cov_kernel_orig, budget)
print(f'Brute force max subset {bf_sensors}, max mi {bf_val}')
# greedy
print('Greedy selection')
greedy_ops = greedy_op_selection(cov_kernel, cov_kernel.shape[0])
for i, op_index in enumerate(greedy_ops):
print(f'Greedy op {i} is {all_names_list[op_index]}')
greedy_budget = greedy_ops[:budget]
# find MI of the greedy solution
V = set(range(cov_kernel.shape[0]))
A_greedy = set(greedy_budget)
V_minus_A_greedy = V - A_greedy
I_greedy = _compute_mi(cov_kernel_orig, A_greedy, V_minus_A_greedy)
print(f'Greedy solution is {greedy_budget}, mi is {I_greedy}')
# seqopt
# simulated batch size
batch_size = 64
num_batches = int(all_edges_list[0].shape[0] / batch_size)
# seqopt object that will get updated in an online manner
num_items = cov_kernel.shape[0]
eps = 0.1
seqopt = SeqOpt(num_items, eps)
for i in tqdm(range(num_batches)):
# simulate getting a new batch of activations
sample = get_batch(all_edges_list, batch_size, i)
# sample a list of activations from seqopt
sel_list = seqopt.sample_sequence(with_replacement=False)
# Using 50th percentile distance
sigma = 168.0
cov = compute_rbf_kernel_covariance(sample, sigma=sigma)
# update seqopt
compute_marginal_gain_func = create_submod_f(cov)
seqopt.update(sel_list, compute_marginal_gain_func)
# now sample a list of ops and hope it is diverse
sel_list = seqopt.sample_sequence(with_replacement=False)
# sel_primitives = [all_names_list for i in sel_list]
# print(f'SeqOpt selected primitives are {sel_primitives}')
# check that it is close to greedy and or bruteforce
budget = 4
sel_list = sel_list[:budget]
# find MI of the greedy solution
V = set(range(num_items))
A_seqopt = set(sel_list)
V_minus_A_seqopt = V - A_seqopt
I_seqopt = _compute_mi(cov_kernel_orig, A_seqopt, V_minus_A_seqopt)
print(f'SeqOpt solution is {sel_list}, mi is {I_seqopt}')
# # For enumerating through many choices of rbf sigmas
# covs_kernel = {}
# for quantile, val in quants:
# print(f'Computing kernel covariance for quantile {quantile}')
# cov_kernel = compute_rbf_kernel_covariance(all_edges_list, sigma=val)
# covs_kernel[quantile] = cov_kernel
# # compute greedy sequence of ops on one of the kernels
# print('Greedy selection')
# greedy_ops = greedy_op_selection(covs_kernel[0.5], 3)
# for i, op_index in enumerate(greedy_ops):
# print(f'Greedy op {i} is {all_names_list[op_index]}')
# fig, axs = plt.subplots(5, 2)
# plot_all_covs(covs_kernel, corr, all_names_list, axs)
# plt.show()
if __name__ == '__main__':
main()
|
archai/archai/supergraph/algos/divnas/analyse_activations.py/0
|
{
"file_path": "archai/archai/supergraph/algos/divnas/analyse_activations.py",
"repo_id": "archai",
"token_count": 6820
}
| 332 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import importlib
import sys
from overrides import overrides
from torch import nn
from archai.common import ml_utils
from archai.common.config import Config
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.nas.evaluater import Evaluater
from archai.supergraph.nas.model import Model
from archai.supergraph.nas.model_desc_builder import ModelDescBuilder
logger = get_global_logger()
class ManualEvaluater(Evaluater):
@overrides
def create_model(self, conf_eval:Config, model_desc_builder:ModelDescBuilder,
final_desc_filename=None, full_desc_filename=None)->nn.Module:
# region conf vars
dataset_name = conf_eval['loader']['dataset']['name']
# if explicitly passed in then don't get from conf
if not final_desc_filename:
final_desc_filename = conf_eval['final_desc_filename']
model_factory_spec = conf_eval['model_factory_spec']
# endregion
assert model_factory_spec
return self._model_from_factory(model_factory_spec, dataset_name)
def _model_from_factory(self, model_factory_spec:str, dataset_name:str)->Model:
splitted = model_factory_spec.rsplit('.', 1)
function_name = splitted[-1]
if len(splitted) > 1:
module_name = splitted[0]
else:
module_name = self._default_module_name(dataset_name, function_name)
module = importlib.import_module(module_name) if module_name else sys.modules[__name__]
function = getattr(module, function_name)
model = function()
logger.info({'model_factory':True,
'module_name': module_name,
'function_name': function_name,
'params': ml_utils.param_size(model)})
return model
|
archai/archai/supergraph/algos/manual/manual_evaluater.py/0
|
{
"file_path": "archai/archai/supergraph/algos/manual/manual_evaluater.py",
"repo_id": "archai",
"token_count": 764
}
| 333 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from typing import List, Tuple
from overrides import overrides
from archai.common.config import Config
from archai.supergraph.algos.nasbench101 import model_matrix
from archai.supergraph.algos.nasbench101.nasbench101_op import NasBench101Op
from archai.supergraph.nas.model_desc import (
CellDesc,
CellType,
ConvMacroParams,
EdgeDesc,
NodeDesc,
OpDesc,
TensorShape,
TensorShapes,
TensorShapesList,
)
from archai.supergraph.nas.model_desc_builder import ModelDescBuilder
from archai.supergraph.nas.operations import Op
class NasBench101CellBuilder(ModelDescBuilder):
@overrides
def pre_build(self, conf_model_desc:Config)->None:
Op.register_op('nasbench101_op',
lambda op_desc, arch_params, affine:
NasBench101Op(op_desc, arch_params, affine))
# extract model specs from params in config
params = conf_model_desc['params'].to_dict()
cell_matrix = params['cell_matrix']
vertex_ops = params['vertex_ops']
self.num_stacks = params['num_stacks']
self._cell_matrix, self._vertex_ops = model_matrix.prune(cell_matrix, vertex_ops)
@overrides
def build_cell(self, in_shapes:TensorShapesList, conf_cell:Config,
cell_index:int) ->CellDesc:
stem_shapes, stems = self.build_cell_stems(in_shapes, conf_cell, cell_index)
cell_type = self.get_cell_type(cell_index)
if self.template is None:
node_count = self.get_node_count(cell_index)
in_shape = stem_shapes[0] # input shape to noded is same as cell stem
out_shape = stem_shapes[0] # we ask nodes to keep the output shape same
node_shapes, nodes = self.build_nodes(stem_shapes, conf_cell,
cell_index, cell_type, node_count, in_shape, out_shape)
else:
node_shapes, nodes = self.build_nodes_from_template(stem_shapes, conf_cell, cell_index)
post_op_shape, post_op_desc = self.build_cell_post_op(stem_shapes,
node_shapes, conf_cell, cell_index)
cell_desc = CellDesc(
id=cell_index, cell_type=self.get_cell_type(cell_index),
conf_cell=conf_cell,
stems=stems, stem_shapes=stem_shapes,
nodes=nodes, node_shapes=node_shapes,
post_op=post_op_desc, out_shape=post_op_shape,
trainables_from=self.get_trainables_from(cell_index)
)
# output same shape twice to indicate s0 and s1 inputs for next cell
in_shapes.append([post_op_shape])
return cell_desc
@overrides
def build_nodes(self, stem_shapes:TensorShapes, conf_cell:Config,
cell_index:int, cell_type:CellType, node_count:int,
in_shape:TensorShape, out_shape:TensorShape) \
->Tuple[TensorShapes, List[NodeDesc]]:
assert in_shape[0]==out_shape[0]
nodes:List[NodeDesc] = []
conv_params = ConvMacroParams(in_shape[0], out_shape[0])
for i in range(node_count):
edges = []
input_ids = []
first_proj = False # if input node is connected then it needs projection
if self._cell_matrix[0, i+1]: # nadbench internal node starts at 1
input_ids.append(0) # connect to s0
first_proj = True
for j in range(i): # look at all internal vertex before us
if self._cell_matrix[j+1, i+1]: # if there is connection
input_ids.append(j+2) # offset because of s0, s1
op_desc = OpDesc('nasbench101_op',
params={
'conv': conv_params,
'stride': 1,
'vertex_op': self._vertex_ops[i+1], # offset because of input node
'first_proj': first_proj
}, in_len=len(input_ids), trainables=None, children=None) # TODO: should we pass children here?
edge = EdgeDesc(op_desc, input_ids=input_ids)
edges.append(edge)
nodes.append(NodeDesc(edges=edges, conv_params=conv_params))
out_shapes = [copy.deepcopy(out_shape) for _ in range(node_count)]
return out_shapes, nodes
|
archai/archai/supergraph/algos/nasbench101/nasbench101_model_desc_builder.py/0
|
{
"file_path": "archai/archai/supergraph/algos/nasbench101/nasbench101_model_desc_builder.py",
"repo_id": "archai",
"token_count": 2124
}
| 334 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
import math as ma
import os
from typing import Optional, Tuple
import torch
from overrides import overrides
from torch import nn
from archai.common.common import get_conf, get_expdir
from archai.common.utils import zip_eq
from archai.supergraph.nas.arch_params import ArchParams
from archai.supergraph.nas.model_desc import OpDesc
from archai.supergraph.nas.operations import Op
# TODO: reduction cell might have output reduced by 2^1=2X due to
# stride 2 through input nodes however FactorizedReduce does only
# 4X reduction. Is this correct?
class XnasOp(Op):
"""The output of XnasOp is weighted output of all allowed primitives.
"""
PRIMITIVES = [
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect', # identity
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5',
'none' # this must be at the end so top1 doesn't chose it
]
def __init__(self, op_desc:OpDesc, arch_params:Optional[ArchParams],
affine:bool):
super().__init__()
# assume last PRIMITIVE is 'none'
assert XnasOp.PRIMITIVES[-1] == 'none'
self._ops = nn.ModuleList()
for primitive in XnasOp.PRIMITIVES:
op = Op.create(
OpDesc(primitive, op_desc.params, in_len=1, trainables=None),
affine=affine, arch_params=None)
self._ops.append(op)
# for getting gradients to non-leaf node
self._grad = None
# we do this at the end so that we can capture all arch params registered by
# any previous child modules
self._setup_arch_params(arch_params)
def update_alphas(self, eta:float, current_t:int, total_t:int, grad_clip:float):
grad_flat = torch.flatten(self._grad)
rewards = torch.tensor([-torch.dot(grad_flat, torch.flatten(activ)) for activ in self._activs])
exprewards = torch.exp(eta * rewards).cuda()
# NOTE: Will this remain registered?
self._alphas[0] = torch.mul(self._alphas[0], exprewards)
# weak learner eviction
conf = get_conf()
to_evict = conf['nas']['search']['xnas']['to_evict']
if to_evict:
theta = max(self._alphas[0]) * ma.exp(-2 * eta * grad_clip * (total_t - current_t))
assert len(self._ops) == self._alphas[0].shape[0]
to_keep_mask = self._alphas[0] >= theta
num_ops_kept = torch.sum(to_keep_mask).item()
assert num_ops_kept > 0
# zero out the weights which are evicted
self._alphas[0] = torch.mul(self._alphas[0], to_keep_mask)
# save some debugging info
expdir = get_expdir()
filename = os.path.join(expdir, str(id(self)) + '.txt')
# save debug info to file
alphas = [str(self._alphas[0][i].item()) for i in range(self._alphas[0].shape[0])]
with open(filename, 'a') as f:
f.write(str(alphas))
f.write('\n')
def _save_grad(self):
def hook(grad):
self._grad = copy.deepcopy(grad)
return hook
@overrides
def forward(self, x):
self._activs = [op(x) for op in self._ops]
numer = sum(w * activ for w, activ in zip_eq(self._alphas[0], self._activs))
denom = sum(self._alphas[0])
self.pt = torch.div(numer, denom)
# register hook to save gradients
# NOTE: it has to be done every forward call
# otherwise the hook doesn't remain registered
# for subsequent loss.backward calls
if self.training:
self.pt.register_hook(self._save_grad())
return self.pt
@overrides
def finalize(self) -> Tuple[OpDesc, Optional[float]]:
with torch.no_grad():
# select except 'none' op
val, i = torch.topk(self._alphas[0][:-1], 1)
desc, _ = self._ops[i].finalize()
return desc, float(val.item())
@overrides
def can_drop_path(self) -> bool:
return False
def _setup_arch_params(self, arch_params:Optional[ArchParams])->None:
# do we have shared arch params?
if arch_params is None:
# create our own arch params
# the alphas are updated by exponentiated gradient descent
# and not by gradients from backprop. so we don't require grad.
new_p = nn.Parameter(torch.ones(len(XnasOp.PRIMITIVES)), requires_grad=False)
self.create_arch_params([('alphas', new_p)])
else:
assert arch_params.has_kind('alphas')
self.set_arch_params(arch_params)
# we store alphas in list so Pytorch don't register them
self._alphas = list(self.arch_params().param_by_kind('alphas'))
assert len(self._alphas)==1
|
archai/archai/supergraph/algos/xnas/xnas_op.py/0
|
{
"file_path": "archai/archai/supergraph/algos/xnas/xnas_op.py",
"repo_id": "archai",
"token_count": 2153
}
| 335 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import shutil
import torch
import torchvision
from torchvision.datasets.utils import check_integrity
from archai.common.ordered_dict_logger import get_global_logger
from archai.common.utils import download_and_extract_tar, extract_tar
logger = get_global_logger()
_ARCHIVE_DICT = {
'train': {
'url': 'https://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar',
'md5': '1d675b47d978889d74fa0da5fadfb00e',
},
'val': {
'url': 'https://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar',
'md5': '29b22e2961454d5413ddabcf34fc5622',
},
'devkit': {
'url': 'https://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz',
'md5': 'fa75699e90414af021442c21a62c3abf',
}
}
# copy ILSVRC/ImageSets/CLS-LOC/train_cls.txt to ./root/
# to skip os walk (it's too slow) using ILSVRC/ImageSets/CLS-LOC/train_cls.txt file
class ImageNetFolder(torchvision.datasets.ImageFolder):
"""`ImageNetFolder <https://image-net.org/>`_ 2012 Classification Dataset.
Args:
root (string): Root directory of the ImageNet Dataset.
split (string, optional): The dataset split, supports ``train``, or ``val``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
imgs (list): List of (image path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, split='train', download=False, **kwargs):
root = self.root = os.path.expanduser(root)
self.split = self._verify_split(split)
if download:
self.download()
wnid_to_classes = self._load_meta_file()[0]
# to skip os walk (it's too slow) using ILSVRC/ImageSets/CLS-LOC/train_cls.txt file
listfile = os.path.join(root, 'train_cls.txt')
if split == 'train' and os.path.exists(listfile):
torchvision.datasets.VisionDataset.__init__(self, root, **kwargs)
with open(listfile, 'r') as f:
datalist = [
line.strip().split(' ')[0]
for line in f.readlines()
if line.strip()
]
classes = list(set([line.split('/')[0] for line in datalist]))
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
samples = [
(os.path.join(self.split_folder, line + '.JPEG'), class_to_idx[line.split('/')[0]])
for line in datalist
]
self.loader = torchvision.datasets.folder.default_loader
self.extensions = torchvision.datasets.folder.IMG_EXTENSIONS
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
self.imgs = self.samples
else:
super(ImageNetFolder, self).__init__(self.split_folder, **kwargs)
self.root = root
idcs = [idx for _, idx in self.imgs]
self.wnids = self.classes
self.wnid_to_idx = {wnid: idx for idx, wnid in zip(idcs, self.wnids)}
self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]
self.class_to_idx = {cls: idx
for clss, idx in zip(self.classes, idcs)
for cls in clss}
def download(self):
if not check_integrity(self.meta_file):
tmpdir = os.path.join(self.root, 'tmp')
archive_dict = _ARCHIVE_DICT['devkit']
download_and_extract_tar(archive_dict['url'], self.root,
extract_root=tmpdir,
md5=archive_dict['md5'])
devkit_folder = _splitexts(os.path.basename(archive_dict['url']))[0]
meta = _parse_devkit(os.path.join(tmpdir, devkit_folder))
self._save_meta_file(*meta)
shutil.rmtree(tmpdir)
if not os.path.isdir(self.split_folder):
archive_dict = _ARCHIVE_DICT[self.split]
download_and_extract_tar(archive_dict['url'], self.root,
extract_root=self.split_folder,
md5=archive_dict['md5'])
if self.split == 'train':
_prepare_train_folder(self.split_folder)
elif self.split == 'val':
val_wnids = self._load_meta_file()[1]
_prepare_val_folder(self.split_folder, val_wnids)
else:
logger.warn({'imagenet_download':
f'dir "{self.split_folder}" already exist'})
@property
def meta_file(self):
return os.path.join(self.root, 'meta.bin')
def _load_meta_file(self):
if check_integrity(self.meta_file):
return torch.load(self.meta_file)
raise RuntimeError("Meta file not found or corrupted.",
"You can use download=True to create it.")
def _save_meta_file(self, wnid_to_class, val_wnids):
torch.save((wnid_to_class, val_wnids), self.meta_file)
def _verify_split(self, split):
if split not in self.valid_splits:
msg = "Unknown split {} .".format(split)
msg += "Valid splits are {{}}.".format(", ".join(self.valid_splits))
raise ValueError(msg)
return split
@property
def valid_splits(self):
return 'train', 'val'
@property
def split_folder(self):
return os.path.join(self.root, self.split)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
def _parse_devkit(root):
idx_to_wnid, wnid_to_classes = _parse_meta(root)
val_idcs = _parse_val_groundtruth(root)
val_wnids = [idx_to_wnid[idx] for idx in val_idcs]
return wnid_to_classes, val_wnids
def _parse_meta(devkit_root, path='data', filename='meta.mat'):
import scipy.io as sio
metafile = os.path.join(devkit_root, path, filename)
meta = sio.loadmat(metafile, squeeze_me=True)['synsets']
nums_children = list(zip(*meta))[4]
meta = [meta[idx] for idx, num_children in enumerate(nums_children)
if num_children == 0]
idcs, wnids, classes = list(zip(*meta))[:3]
classes = [tuple(clss.split(', ')) for clss in classes]
idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}
wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}
return idx_to_wnid, wnid_to_classes
def _parse_val_groundtruth(devkit_root, path='data',
filename='ILSVRC2012_validation_ground_truth.txt'):
with open(os.path.join(devkit_root, path, filename), 'r') as txtfh:
val_idcs = txtfh.readlines()
return [int(val_idx) for val_idx in val_idcs]
def _prepare_train_folder(folder):
for archive in [os.path.join(folder, archive) for archive in os.listdir(folder)]:
extract_tar(archive, os.path.splitext(archive)[0], delete=True)
def _prepare_val_folder(folder, wnids):
img_files = sorted([os.path.join(folder, file) for file in os.listdir(folder)])
for wnid in set(wnids):
os.mkdir(os.path.join(folder, wnid))
for wnid, img_file in zip(wnids, img_files):
shutil.move(img_file, os.path.join(folder, wnid, os.path.basename(img_file)))
def _splitexts(root):
exts = []
ext = '.'
while ext:
root, ext = os.path.splitext(root)
exts.append(ext)
return root, ''.join(reversed(exts))
|
archai/archai/supergraph/datasets/providers/imagenet_folder.py/0
|
{
"file_path": "archai/archai/supergraph/datasets/providers/imagenet_folder.py",
"repo_id": "archai",
"token_count": 3929
}
| 336 |
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
#Credit to https://github.com/akamaster/pytorch_resnet_cifar10
__all__ = ['resnet_orig']
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet_orig(pretrained=False, device='cpu'):
net = ResNet(BasicBlock, [3, 3, 3])
if pretrained:
script_dir = os.path.dirname(__file__)
state_dict = torch.load(script_dir + '/state_dicts/resnet_orig.pt', map_location=device)
net.load_state_dict(state_dict)
return net
|
archai/archai/supergraph/models/resnet_orig.py/0
|
{
"file_path": "archai/archai/supergraph/models/resnet_orig.py",
"repo_id": "archai",
"token_count": 1533
}
| 337 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from abc import ABC, abstractmethod
from typing import Optional, Tuple
from overrides import EnforceOverrides
from archai.common import common, utils
from archai.common.config import Config
from archai.supergraph.nas.arch_trainer import TArchTrainer
from archai.supergraph.nas.evaluater import EvalResult, Evaluater
from archai.supergraph.nas.finalizers import Finalizers
from archai.supergraph.nas.model_desc_builder import ModelDescBuilder
from archai.supergraph.nas.random_finalizers import RandomFinalizers
from archai.supergraph.nas.searcher import Searcher, SearchResult
class ExperimentRunner(ABC, EnforceOverrides):
def __init__(self, config_filename:str, base_name:str, clean_expdir=False) -> None:
self.config_filename = config_filename
self.base_name = base_name
self.clean_expdir = clean_expdir
def run_search(self, conf_search:Config)->SearchResult:
model_desc_builder = self.model_desc_builder()
trainer_class = self.trainer_class()
finalizers = self.finalizers()
search = self.searcher()
return search.search(conf_search, model_desc_builder, trainer_class, finalizers)
def run_eval(self, conf_eval:Config)->EvalResult:
evaler = self.evaluater()
return evaler.evaluate(conf_eval,
model_desc_builder=self.model_desc_builder())
def run(self, search=True, eval=True) \
->Tuple[Optional[SearchResult], Optional[EvalResult]]:
search_result, eval_result = None, None
if search: # run search
conf = self._init_conf(True, clean_expdir=self.clean_expdir)
search_result = self.run_search(conf['nas']['search'])
if eval:
conf = self.get_conf(False)
common.clean_ensure_expdir(conf, clean_dir=self.clean_expdir, ensure_dir=True)
if search:
# first copy search result to eval, otherwise we expect eval config to point to results
self.copy_search_to_eval()
conf = self._init_conf(False, clean_expdir=False)
eval_result = self.run_eval(conf['nas']['eval'])
return search_result, eval_result
def copy_search_to_eval(self)->None:
# do not cache conf_search or conf_eval as it may have values that
# needs env var expansion.
# get desc file path that search has produced
conf_search = self.get_conf(True)['nas']['search']
search_desc_filename = conf_search['final_desc_filename']
search_desc_filepath = utils.full_path(search_desc_filename)
assert search_desc_filepath and os.path.exists(search_desc_filepath)
# get file path that eval would need
conf_eval = self.get_conf(False)['nas']['eval']
eval_desc_filename = conf_eval['final_desc_filename']
eval_desc_filepath = utils.full_path(eval_desc_filename)
assert eval_desc_filepath
utils.copy_file(search_desc_filepath, eval_desc_filepath)
def model_desc_builder(self)->Optional[ModelDescBuilder]:
return ModelDescBuilder() # default model desc builder puts nodes with no edges
def searcher(self)->Searcher:
return Searcher()
def evaluater(self)->Evaluater:
return Evaluater()
@abstractmethod
def trainer_class(self)->TArchTrainer:
pass
def finalizers(self)->Finalizers:
conf = common.get_conf()
finalizer = conf['nas']['search']['finalizer']
if not finalizer or finalizer == 'default':
return Finalizers()
elif finalizer == 'random':
return RandomFinalizers()
else:
raise NotImplementedError
def get_expname(self, is_search_or_eval:bool)->str:
return self.base_name + ('_search' if is_search_or_eval else '_eval')
def get_conf(self, is_search_or_eval:bool)->Config:
conf = common.create_conf(config_filepath=self.config_filename,
param_args=['--common.experiment_name', self.get_expname(is_search_or_eval)])
common.update_envvars(conf) # config paths might include env vars
return conf
def _init_conf(self, is_search_or_eval:bool, clean_expdir:bool)->Config:
config_filename = self.config_filename
conf = common.common_init(config_filepath=config_filename,
param_args=['--common.experiment_name', self.get_expname(is_search_or_eval),
], clean_expdir=clean_expdir)
return conf
|
archai/archai/supergraph/nas/exp_runner.py/0
|
{
"file_path": "archai/archai/supergraph/nas/exp_runner.py",
"repo_id": "archai",
"token_count": 1824
}
| 338 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
import math
import pathlib
import statistics
import time
from collections import defaultdict
from typing import List, Mapping, Optional, Tuple
import yaml
from torch import Tensor
from archai.common import ml_utils, utils
from archai.common.apex_utils import ApexUtils
from archai.common.common import get_tb_writer
from archai.common.ordered_dict_logger import get_global_logger
logger = get_global_logger()
class Metrics:
"""Record top1, top5, loss metrics, track best so far.
There are 3 levels of metrics:
1. Run level - these for the one call of 'fit', example, best top1
2. Epoch level - these are the averages maintained top1, top5, loss
3. Step level - these are for every step in epoch
The pre_run must be called before fit call which will reset all metrics. Similarly
pre_epoch will reset running averages and pre_step will reset step level metrics like average step time.
The post_step will simply update the running averages while post_epoch updates
best we have seen for each epoch.
"""
def __init__(self, title:str, apex:Optional[ApexUtils], logger_freq:int=50) -> None:
"""Create the metrics object to maintain epoch stats
Arguments:
title {str} -- descriptive name of the stage for which metrics are collected
Keyword Arguments:
logger_freq {int} -- Must be > 0 for epoch level logging, the step level logging is decided by this number (default: {50})
"""
self.logger_freq = logger_freq
self.title = title
self._apex = apex
self._reset_run()
def _reset_run(self)->None:
self.run_metrics = RunMetrics()
self.global_step = -1
self._tb_path = logger.current_path
def pre_run(self)->None:
self._reset_run()
self.run_metrics.pre_run()
def post_run(self, test_metrics:Optional['Metrics']=None)->None:
self.run_metrics.post_run(test_metrics)
# logging
if self.logger_freq > 0:
with logger.pushd('timings'):
logger.info({'epoch':self.run_metrics.epoch_time_avg(),
'step': self.run_metrics.step_time_avg(),
'run': self.run_metrics.duration()})
if self.is_dist():
logger.info({'dist_epoch_sum': self.reduce_sum(self.run_metrics.epoch_time_avg()),
'dist_step': self.reduce_mean(self.run_metrics.step_time_avg()),
'dist_run_sum': self.reduce_sum(self.run_metrics.duration())})
best_train, best_val, best_test = self.run_metrics.best_epoch()
with logger.pushd('best_train'):
logger.info({'epoch': best_train.index,
'top1': best_train.top1.avg})
if self.is_dist():
logger.info({'dist_epoch': self.reduce_mean(best_train.index),
'dist_top1': self.reduce_mean(best_train.top1.avg)})
if best_val:
with logger.pushd('best_val'):
logger.info({'epoch': best_val.index,
'top1': best_val.top1.avg})
if self.is_dist():
logger.info({'dist_epoch': self.reduce_mean(best_val.index),
'dist_top1': self.reduce_mean(best_val.top1.avg)})
if best_test:
with logger.pushd('best_test'):
logger.info({'epoch': best_test.index,
'top1': best_test.top1.avg})
if self.is_dist():
logger.info({'dist_epoch': self.reduce_mean(best_test.index),
'dist_top1': self.reduce_mean(best_test.top1.avg)})
def pre_step(self, x: Tensor, y: Tensor):
self.run_metrics.cur_epoch().pre_step()
self.global_step += 1
def post_step(self, x: Tensor, y: Tensor, logits: Tensor,
loss: Tensor, steps: int) -> None:
assert len(x)==len(y) and len(y)==len(logits) and len(loss.shape)==0
# update metrics after optimizer step
batch_size = x.size(0)
# TODO: code for more than 5 classes?
top1, top5 = ml_utils.accuracy(logits, y, topk=(1, 5))
epoch = self.run_metrics.cur_epoch()
epoch.post_step(top1.item(), top5.item(),
loss.item(), batch_size)
if self.logger_freq > 0 and \
((epoch.step+1) % self.logger_freq == 0):
logger.info({'top1': epoch.top1.avg,
'top5': epoch.top5.avg,
'loss': epoch.loss.avg,
'step_time': epoch.step_time.last})
if self.is_dist():
logger.info({'dist_top1': self.reduce_mean(epoch.top1.avg),
'dist_top5': self.reduce_mean(epoch.top5.avg),
'dist_loss': self.reduce_mean(epoch.loss.avg),
'dist_step_time': self.reduce_mean(epoch.step_time.last)})
# NOTE: Tensorboard step-level logging is removed as it becomes exponentially expensive on Azure blobs
# writer = get_tb_writer()
# writer.add_scalar(f'{self._tb_path}/train_steps/loss',
# epoch.loss.avg, self.global_step)
# writer.add_scalar(f'{self._tb_path}/train_steps/top1',
# epoch.top1.avg, self.global_step)
# writer.add_scalar(f'{self._tb_path}/train_steps/top5',
# epoch.top5.avg, self.global_step)
def pre_epoch(self, lr:float=math.nan)->None:
epoch = self.run_metrics.add_epoch()
epoch.pre_epoch(lr)
if lr is not None:
writer = get_tb_writer()
if writer is not None:
if self.logger_freq > 0 and not math.isnan(lr):
logger.debug({'start_lr': lr})
writer.add_scalar(f'{self._tb_path}/train_steps/lr',
lr, self.global_step)
def post_epoch(self, lr:float=math.nan, val_metrics:Optional['Metrics']=None):
epoch = self.run_metrics.cur_epoch()
epoch.post_epoch(lr, val_metrics)
val_epoch_metrics = None
if val_metrics:
val_epoch_metrics = val_metrics.run_metrics.epochs_metrics[-1]
if self.logger_freq > 0:
with logger.pushd('train'):
logger.info({'top1': epoch.top1.avg,
'top5': epoch.top5.avg,
'loss': epoch.loss.avg,
'duration': epoch.duration(),
'step_time': epoch.step_time.avg,
'end_lr': lr})
if self.is_dist():
logger.info({'dist_top1': self.reduce_mean(epoch.top1.avg),
'dist_top5': self.reduce_mean(epoch.top5.avg),
'dist_loss': self.reduce_mean(epoch.loss.avg),
'dist_duration': self.reduce_mean(epoch.duration()),
'dist_step_time': self.reduce_mean(epoch.step_time.avg),
'dist_end_lr': self.reduce_mean(lr)})
if val_epoch_metrics:
with logger.pushd('val'):
logger.info({'top1': val_epoch_metrics.top1.avg,
'top5': val_epoch_metrics.top5.avg,
'loss': val_epoch_metrics.loss.avg,
'duration': val_epoch_metrics.duration()})
if self.is_dist():
logger.info({'dist_top1': self.reduce_mean(val_epoch_metrics.top1.avg),
'dist_top5': self.reduce_mean(val_epoch_metrics.top5.avg),
'dist_loss': self.reduce_mean(val_epoch_metrics.loss.avg),
'dist_duration': self.reduce_mean(val_epoch_metrics.duration())})
# writer = get_tb_writer()
# writer.add_scalar(f'{self._tb_path}/train_epochs/loss',
# epoch.loss.avg, epoch.index)
# writer.add_scalar(f'{self._tb_path}/train_epochs/top1',
# epoch.top1.avg, epoch.index)
# writer.add_scalar(f'{self._tb_path}/train_epochs/top5',
# epoch.top5.avg, epoch.index)
# if test_epoch:
# writer.add_scalar(f'{self._tb_path}/val_epochs/loss',
# test_epoch.loss.avg, epoch.index)
# writer.add_scalar(f'{self._tb_path}/val_epochs/top1',
# test_epoch.top1.avg, epoch.index)
# writer.add_scalar(f'{self._tb_path}/val_epochs/top5',
# test_epoch.top5.avg, epoch.index)
def state_dict(self)->Mapping:
return utils.state_dict(self)
def load_state_dict(self, state_dict:dict)->None:
utils.load_state_dict(self, state_dict)
def __getstate__(self):
state = self.__dict__.copy()
if '_apex' in state:
del state['_apex'] # cannot serialize this
return state
# no need to define __setstate__ because _apex should be set from constructor
def save(self, filepath:str)->Optional[str]:
if filepath:
filepath = utils.full_path(filepath)
pathlib.Path(filepath).write_text(yaml.dump(self))
return filepath
def epochs(self)->int:
"""Returns epochs recorded so far"""
return len(self.run_metrics.epochs_metrics)
def cur_epoch(self)->'EpochMetrics':
return self.run_metrics.cur_epoch()
def reduce_min(self, val):
if not self._apex:
return val
return self._apex.reduce(val, op='min')
def reduce_max(self, val):
if not self._apex:
return val
return self._apex.reduce(val, op='max')
def reduce_sum(self, val):
if not self._apex:
return val
return self._apex.reduce(val, op='sum')
def reduce_mean(self, val):
if not self._apex:
return val
return self._apex.reduce(val, op='mean')
def is_dist(self)->bool:
if not self._apex:
return False
return self._apex.is_dist()
def best_train_top1(self)->float:
return self.run_metrics.best_epoch()[0].top1.avg
def best_val_top1(self)->float:
val_epoch_metrics = self.run_metrics.best_epoch()[1]
return val_epoch_metrics.top1.avg if val_epoch_metrics is not None else math.nan
def best_test_top1(self)->float:
test_epoch_metrics = self.run_metrics.best_epoch()[2]
return test_epoch_metrics.top1.avg if test_epoch_metrics is not None else math.nan
class Accumulator:
# TODO: replace this with Metrics class
def __init__(self):
self.metrics = defaultdict(lambda: 0.)
def add(self, key, value):
self.metrics[key] += value
def add_dict(self, dict):
for key, value in dict.items():
self.add(key, value)
def __getitem__(self, item):
return self.metrics[item]
def __setitem__(self, key, value):
self.metrics[key] = value
def get_dict(self):
return copy.deepcopy(dict(self.metrics))
def items(self):
return self.metrics.items()
def __str__(self):
return str(dict(self.metrics))
def __truediv__(self, other):
newone = Accumulator()
for key, value in self.items():
if isinstance(other, str):
if other != key:
newone[key] = value / self[other]
else:
newone[key] = value
else:
newone[key] = value / other
return newone
class EpochMetrics:
"""Stores the metrics for each epoch. Training metrics is in top1, top5 etc
while validation metrics is in val_metrics"""
def __init__(self, index:int) -> None:
self.index = index
self.top1 = utils.AverageMeter()
self.top5 = utils.AverageMeter()
self.loss = utils.AverageMeter()
self.step_time = utils.AverageMeter()
self.start_time = math.nan
self.end_time = math.nan
self.step = -1
self.start_lr = math.nan
self.end_lr = math.nan
self.val_metrics:Optional[EpochMetrics] = None
def pre_step(self):
self._step_start_time = time.time()
self.step += 1
def post_step(self, top1:float, top5:float, loss:float, batch:int):
self.step_time.update(time.time() - self._step_start_time)
self.top1.update(top1, batch)
self.top5.update(top5, batch)
self.loss.update(loss, batch)
def pre_epoch(self, lr:float):
self.start_time = time.time()
self.start_lr = lr
def post_epoch(self, lr:float, val_metrics:Optional[Metrics]):
self.end_time = time.time()
self.end_lr = lr
if val_metrics is not None:
assert len(val_metrics.run_metrics.epochs_metrics)==1, 'Number of epochs in val metrics should be 1'
self.val_metrics = val_metrics.run_metrics.epochs_metrics[-1]
def duration(self):
return self.end_time-self.start_time
class RunMetrics:
"""Metrics for the entire run. It mainly consist of metrics for each epoch"""
def __init__(self) -> None:
self.epochs_metrics:List[EpochMetrics] = []
self.start_time = math.nan
self.end_time = math.nan
self.epoch = -1
self.test_metrics:Optional['Metrics'] = None
def pre_run(self):
self.start_time = time.time()
def post_run(self, test_metrics:Optional['Metrics']=None):
self.end_time = time.time()
self.test_metrics = test_metrics
# test should have only one epoch
assert test_metrics is None or len(test_metrics.run_metrics.epochs_metrics)==1
def add_epoch(self)->EpochMetrics:
self.epoch = len(self.epochs_metrics)
epoch_metrics = EpochMetrics(self.epoch)
self.epochs_metrics.append(epoch_metrics)
return epoch_metrics
def cur_epoch(self)->EpochMetrics:
return self.epochs_metrics[self.epoch]
def best_epoch(self)->Tuple[EpochMetrics, Optional[EpochMetrics],
Optional[EpochMetrics]]: # [train, val, test]
best_train = max(self.epochs_metrics, key=lambda e:e.top1.avg)
best_val = max(self.epochs_metrics,
key=lambda e:e.val_metrics.top1.avg if e.val_metrics else -1)
best_val = best_val.val_metrics if best_val.val_metrics else None
best_test = self.test_metrics.run_metrics.epochs_metrics[-1] \
if self.test_metrics else None
return best_train, best_val, best_test
def epoch_time_avg(self):
return statistics.mean((e.duration() for e in self.epochs_metrics))
def step_time_avg(self):
return statistics.mean((e.step_time.avg for e in self.epochs_metrics))
def duration(self):
return self.end_time-self.start_time
|
archai/archai/supergraph/utils/metrics.py/0
|
{
"file_path": "archai/archai/supergraph/utils/metrics.py",
"repo_id": "archai",
"token_count": 7791
}
| 339 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import shutil
from typing import Dict, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from overrides import overrides
from transformers.trainer import Trainer
from archai.api.trainer_base import TrainerBase
from archai.trainers.nlp.hf_training_args import DistillerTrainingArguments
class HfTrainer(Trainer, TrainerBase):
"""Hugging Face trainer."""
@overrides
def _rotate_checkpoints(self, use_mtime: Optional[bool] = False, output_dir: Optional[str] = None) -> None:
"""Rotate checkpoints and cache them to Azure Storage.
The `use_mtime` argument is always set to `False` to avoid having
multiple checkpoints with the same timestamp when retrieving them
from Azure Storage. This is because Azure Storage does not support
sub-second precision for file timestamps.
Args:
use_mtime: Whether to use mtime to sort the checkpoints.
output_dir: Folder to output the checkpoints.
"""
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Enforces use_mtime=False to avoid identical timestamps
# when retrieving files from Azure Storage
use_mtime = False
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
# If save_total_limit=1 with load_best_model_at_end=True,
# we could end up deleting the last checkpoint, which
# we don't do to allow resuming
save_total_limit = self.args.save_total_limit
if (
self.state.best_model_checkpoint is not None
and self.args.save_total_limit == 1
and checkpoints_sorted[-1] != self.state.best_model_checkpoint
):
save_total_limit = 2
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
try:
shutil.rmtree(checkpoint)
except FileNotFoundError:
pass
class HfDistillerTrainer(HfTrainer):
"""Hugging Face distillation-based trainer."""
def __init__(self, teacher_model: torch.nn.Module, **kwargs) -> None:
"""Initialize Hugging Face distillation-based trainer.
Args:
teacher_model: Pre-trained teacher model.
"""
self.teacher_model = teacher_model
if "args" in kwargs:
assert isinstance(
kwargs["args"], DistillerTrainingArguments
), "`args` should be an instance of `DistillerTrainingArguments`."
else:
kwargs["args"] = DistillerTrainingArguments("tmp")
super().__init__(**kwargs)
@overrides
def compute_loss(
self,
model: torch.nn.Module,
inputs: Dict[str, torch.Tensor],
return_outputs: Optional[bool] = False,
) -> Tuple[torch.Tensor, ...]:
"""Override the computation of the loss function.
The loss is a weighted sum of the student's loss, as computed by
the original `HfTrainer`, and the KL divergence between the student and
teacher models.
Args:
model: Student model.
inputs: Input tensors.
return_outputs: Whether outputs should be returned.
Returns:
(loss, outputs) or the loss tensor.
"""
student_outputs = model(**inputs)
student_loss = student_outputs["loss"]
student_logits = student_outputs["logits"]
with torch.no_grad():
teacher_outputs = self.teacher_model(**inputs)
teacher_logits = teacher_outputs["logits"]
# Compute the KL divergence and KD losses
kl_loss = nn.KLDivLoss(reduction="batchmean")
kl_divergence = kl_loss(
F.log_softmax(student_logits / self.args.temperature, dim=-1),
F.softmax(teacher_logits / self.args.temperature, dim=-1),
)
kd_loss = self.args.temperature**2 * kl_divergence
# Weigh the final loss
loss = self.args.alpha * student_loss + (1 - self.args.alpha) * kd_loss
return (loss, student_outputs) if return_outputs else loss
|
archai/archai/trainers/nlp/hf_trainer.py/0
|
{
"file_path": "archai/archai/trainers/nlp/hf_trainer.py",
"repo_id": "archai",
"token_count": 1861
}
| 340 |
# in toy mode, load the config for algo and then override with common settings for toy mode
# any additional algo specific toy mode settings will go in this file
__include__: ['petridish.yaml', 'toy_common.yaml']
nas:
search:
petridish:
sampling_max_try: 1
max_hull_points: 2
|
archai/confs/algos/petridish_toy.yaml/0
|
{
"file_path": "archai/confs/algos/petridish_toy.yaml",
"repo_id": "archai",
"token_count": 100
}
| 341 |
#!/bin/bash
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
docker build . --file Dockerfile --tag nvidia22.10-archai:latest
|
archai/docker/build_image.sh/0
|
{
"file_path": "archai/docker/build_image.sh",
"repo_id": "archai",
"token_count": 44
}
| 342 |
display_name: Generates new tokens with a pre-trained model
type: command
compute: nas-gpu-cluster-NC6
inputs:
pre_trained_model_path:
type: uri_folder
path: azureml://full/path/to/pretrained/model
hub_tokenizer_path: gpt2
prompt: Machine Learning
outputs:
output_path:
type: uri_file
code: .
environment:
azureml:aml-archai:0.0.1
command: >-
python generate_text.py
${{inputs.pre_trained_model_path}}
${{inputs.hub_tokenizer_path}}
"${{inputs.prompt}}"
--output_path ${{outputs.output_path}}
|
archai/docs/advanced_guide/cloud/azure/notebooks/text_generation/src/generate_text.yaml/0
|
{
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/text_generation/src/generate_text.yaml",
"repo_id": "archai",
"token_count": 204
}
| 343 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import sys
from datetime import date
# Adds path to local extension
sys.path.insert(0, os.path.abspath(".."))
# Project information
project = "Archai"
author = "Microsoft"
copyright = f"{date.today().year}"
# General configuration
extensions = [
"nbsphinx",
"sphinxcontrib.programoutput",
"sphinxcontrib.mermaid",
"sphinx.ext.autodoc",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx_git",
"sphinx_inline_tabs",
"sphinx_sitemap",
]
exclude_patterns = [
"confs/**",
"docker/**",
"research/**",
"scripts/**",
"tasks/**",
"tests/**",
]
extlinks = {"github": ("https://github.com/microsoft/archai/tree/main/%s", "%s")}
source_suffix = [".rst", ".md"]
master_doc = "index"
language = "en"
html_baseurl = "https://microsoft.github.io/archai/"
# Options for HTML output
html_title = project
html_baseurl = "https://microsoft.github.io/archai"
html_theme = "sphinx_book_theme"
html_logo = "assets/img/logo.png"
html_favicon = "assets/img/favicon.ico"
html_last_updated_fmt = ""
html_static_path = ["assets"]
html_css_files = ["css/custom.css"]
html_theme_options = {
"repository_url": "https://github.com/microsoft/archai",
"use_issues_button": True,
"use_edit_page_button": False,
"use_download_button": False,
"use_fullscreen_button": False,
"use_repository_button": True,
"show_navbar_depth": 1,
"toc_title": "Sections",
}
# Autodoc
autodoc_default_options = {"exclude-members": "__weakref__"}
autodoc_member_order = "bysource"
autodoc_mock_imports = ["lmdb", "nats_bench", "ray.tune", "xautodl"]
# Disables `nbsphinx` require.js to avoid
# conflicts with `sphinxcontrib.mermaid`
nbsphinx_execute = "never"
nbsphinx_requirejs_path = ""
|
archai/docs/conf.py/0
|
{
"file_path": "archai/docs/conf.py",
"repo_id": "archai",
"token_count": 757
}
| 344 |
<jupyter_start><jupyter_text>Search Algorithms<jupyter_code>import os
from overrides import overrides
from typing import List
import torch
from torch import nn
import numpy as np
import json
from random import Random
from archai.discrete_search.api import ArchaiModel, EvolutionarySearchSpace, BayesOptSearchSpace<jupyter_output><empty_output><jupyter_text>We will re-use the CNN search space created in the [search space example](./search_space.ipynb).<jupyter_code>from model import MyModel
from cnn_search_space import CNNSearchSpaceExt as CNNSearchSpace
ss = CNNSearchSpace(max_layers=10, kernel_list=[3, 5, 7], hidden_list=[16, 32, 64])
m = ss.random_sample()
m<jupyter_output><empty_output><jupyter_text>Dataset Provider Datasets are represented in Archai throught the [`DatasetProvider`](../../reference/api/archai.discrete_search.api.rst) class. For this example, we will use the built-in dataset provider of the MNIST dataset.<jupyter_code>from archai.datasets.cv.mnist_dataset_provider import MnistDatasetProvider
dataset_provider = MnistDatasetProvider()<jupyter_output><empty_output><jupyter_text>We can get train/test PyTorch datasets from a DatasetProvider by calling `dataset_provider.get_datasets(load_train, load_test, transforms_train, transforms_test)`<jupyter_code># Loads only the training set
tr_d = dataset_provider.get_train_dataset()<jupyter_output><empty_output><jupyter_text>Wrapping custom evaluation code We will evaluate the models using partial training validation accuracy as a proxy for final task performance. This notebook will take about an hour to complete if you run it on a Intel Core i9 CPU. If you have CUDA enabled version of pytorch installed, it will be about 30 minutes if you pass the `device` argument `cuda` in the following `PartialTrainingValAccuracy` constructor.<jupyter_code>from archai.api.dataset_provider import DatasetProvider
from archai.discrete_search.api import ModelEvaluator
from archai.discrete_search.evaluators import RayParallelEvaluator
from tqdm import tqdm
import math
class PartialTrainingValAccuracy(ModelEvaluator):
def __init__(self, dataset: DatasetProvider, training_epochs: float = 1.0, lr: float = 1e-4, device: str = 'cpu',
progress_bar: bool = False):
self.training_epochs = training_epochs
self.dataset_provider = dataset
self.device = device
self.lr = lr
self.progress_bar = progress_bar
@overrides
def evaluate(self, model, budget = None) -> float:
# Loads the dataset
tr_data = self.dataset_provider.get_train_dataset()
val_data = self.dataset_provider.get_val_dataset()
tr_dl = torch.utils.data.DataLoader(tr_data, batch_size=16, shuffle=True, num_workers=4)
val_dl = torch.utils.data.DataLoader(val_data, batch_size=32, shuffle=False, num_workers=4)
# Training settings
optimizer = torch.optim.Adam(model.arch.parameters(), lr=self.lr)
criterion = nn.CrossEntropyLoss()
model.arch.train()
model.arch.to(self.device)
# Partial training
epoch_iter = range(math.ceil(self.training_epochs))
if self.progress_bar:
epoch_iter = tqdm(epoch_iter, desc=f'Training model {model.archid}')
for epoch_nb in epoch_iter:
# Early stops for fractional values of training epochs (e.g, 0.2)
early_stop = len(tr_dl) + 1
if 0 < (self.training_epochs - epoch_nb) < 1:
early_stop = int((self.training_epochs - epoch_nb) * len(tr_dl))
for i, (x, y) in enumerate(tr_dl):
if i >= early_stop:
break
optimizer.zero_grad()
pred = model.arch(x.to(self.device))
loss = criterion(pred, y.to(self.device))
loss.backward()
optimizer.step()
# Evaluates final model
model.arch.eval()
with torch.no_grad():
val_pred, val_target = [], []
for x, y in val_dl:
val_pred.append(model.arch(x.to(self.device)).argmax(axis=1).to('cpu'))
val_target.append(y.to('cpu'))
val_pred, val_target = torch.cat(val_pred, axis=0), torch.cat(val_target, axis=0)
val_acc = (val_pred.squeeze() == val_target.squeeze()).numpy().mean()
# Returns model to cpu
model.arch.cpu()
return val_acc<jupyter_output><empty_output><jupyter_text>Let's test our evaluator:<jupyter_code>partial_tr = PartialTrainingValAccuracy(
dataset_provider,
training_epochs=0.001, # Trains for 1/1000 of an epoch
progress_bar=True
)<jupyter_output><empty_output><jupyter_text>This evaluation is pretty quick, even on a CPU, at around 10 seconds on an Intel Core i9 processor.<jupyter_code>partial_tr.evaluate(ss.random_sample())<jupyter_output>Training model L=5, K=3, H=32: 100%|██████████| 1/1 [00:02<00:00, 2.04s/it]<jupyter_text>We can make this objective more efficient evaluating multiple architectures in parallel. To do that, we can use the `RayParallelObjective` wrapper mentioned in the [previous example](./evaluators.ipynb):<jupyter_code># NBVAL_SKIP
parallel_partial_tr = RayParallelEvaluator(partial_tr)<jupyter_output><empty_output><jupyter_text>Let's test our partial training objective sending two random architectures<jupyter_code># NBVAL_SKIP
parallel_partial_tr.send(ss.random_sample())
parallel_partial_tr.send(ss.random_sample())
# NBVAL_SKIP
parallel_partial_tr.fetch_all()<jupyter_output>Training model L=7, K=3, H=16: 0%| | 0/1 [00:00<?, ?it/s]
Training model L=8, K=5, H=64: 0%| | 0/1 [00:00<?, ?it/s]
Training model L=7, K=3, H=16: 100%|██████████| 1/1 [00:03<00:00, 3.55s/it]
Training model L=8, K=5, H=64: 100%|██████████| 1/1 [00:05<00:00, 5.11s/it]<jupyter_text>To run the same objective distributing jobs across multiple GPUs, just set the `num_gpus` parameter from [ray.init](https://docs.ray.io/en/latest/ray-core/package-ref.htmlray-init) and set `device='cuda'` (This assumes you have installed the NVidia CUDA SDK and PyTorch for CUDA as per the setup instructions at https://pytorch.org/get-started/locally/)```pythonRayParallelObjective( PartialTrainingValAccuracy(training_epochs=1, device='cuda'), num_gpus=0.5, 2 jobs per gpu available max_calls=1)``` Defining Search Objectives Search optimization objectives are specified using the `archai.discrete_search.SearchObjectives` class<jupyter_code>from archai.discrete_search.api import SearchObjectives
objectives = SearchObjectives()<jupyter_output><empty_output><jupyter_text>Adding objectives To add search objectives, we can use the `SearchObjectives.add_objective` method<jupyter_code>from archai.discrete_search.evaluators import AvgOnnxLatency, TorchFlops
objectives.add_objective(
# Objective function name (will be used in plots and reports)
name='ONNX Latency (ms)',
# ModelEvaluator object that will be used to evaluate the model
model_evaluator=AvgOnnxLatency(input_shape=(1, 1, 28, 28), num_trials=3),
# Optimization direction, `True` for maximization or `False` for minimization
higher_is_better=False,
# Whether this objective should be considered 'compute intensive' or not.
compute_intensive=False
)<jupyter_output><empty_output><jupyter_text>The `compute_intensive` flag is used in some search algorithms to help increase search efficiency. For instance, search algorithms that use surrogate models may try to estimate the value of expensive objective functions of unseen architectures in certain situations, while cheap objectives (`compute_intensive=False`) will just be computed directly.<jupyter_code>objectives.add_objective(
'FLOPs', TorchFlops(torch.randn(1, 1, 28, 28)),
higher_is_better=False,
compute_intensive=False,
# We may optionally add a constraint.
# Architectures outside this range will be ignored by the search algorithm
constraint=(0.0, 1e9)
)<jupyter_output><empty_output><jupyter_text>Additionally, objectives that are cheap to evaluate (`compute_intensive=False`) may receive an optional `constraint` argument. Model candidates outside this range willbe ignored by the search algorithm. We can evaluate cheap objectives calling `SearchObjectives.eval_cheap_objs(model_list)`<jupyter_code>samples = [ss.random_sample() for _ in range(2)]
objectives.eval_cheap_objs(samples,
progress_bar=True)<jupyter_output>Calculating "ONNX Latency (ms)"...: 100%|██████████| 2/2 [00:00<00:00, 9.76it/s]
Calculating "FLOPs"...: 100%|██████████| 2/2 [00:00<00:00, 23.84it/s]
Gathering results from async objectives...: 0it [00:00, ?it/s]<jupyter_text>We can check if a model satisfies the constraints we added for the FLOPs objective by calling `SearchObjectives.validate_constraints(model_list)` or `SearchObjectives.is_model_valid(ss.random_sample())`<jupyter_code>m = ss.random_sample()
objectives.validate_constraints([m])
objectives.is_model_valid(m)<jupyter_output><empty_output><jupyter_text>By default, all objective and constraints evaluations are cached to prevent spending resources in the same architecture twice.<jupyter_code># The evaluation cache is built using the
# tuple (obj_name, archid, budget)
objectives.lookup_cache('FLOPs', samples[0].archid, None)<jupyter_output><empty_output><jupyter_text>Caching can be disabled setting `SearchObjectives(cache_objective_evaluation=False)`. Now, let's try adding the partial training objective we created before. The code below requests a GPU compatible with CUDA. If running on CPU, that's ok, it will just fall back to CPU training.<jupyter_code>objectives.add_objective(
'Partial training Validation Accuracy (1 epoch)',
RayParallelEvaluator(
PartialTrainingValAccuracy(dataset_provider, training_epochs=1, device='cuda'),
num_gpus=0.5, # 2 jobs per gpu available
max_calls=1
),
higher_is_better=True,
compute_intensive=True # This is a compute intensive evaluator
)<jupyter_output><empty_output><jupyter_text>Expensive objectives can be evaluated using `SearchObjectives.eval_expensive_objs(model_list)` Alternatively, all objectives (expensive and cheap) can also be evaluated using `SearchObjectives.eval_all_objs`. Adding extra constraints Besides the constraint parameter from cheap objectives, it is also possible to add extra constraints that are not search objectives (and thus should not be optimized by NAS algorithms).<jupyter_code>from archai.discrete_search.evaluators import TorchNumParameters
objectives.add_constraint(
'Number of parameters',
TorchNumParameters(),
constraint=(0.0, 1e6)
)
objectives.validate_constraints([m])
objectives.is_model_valid(m)<jupyter_output><empty_output><jupyter_text>Using a search algorithm Now that we know how to create and use search objectives, we can finally use a search algorithm do to Neural Architecture Search! Example: `EvolutionParetoSearch` Let's start with an evolutionary-based search algorithm<jupyter_code>from archai.discrete_search.algos import EvolutionParetoSearch
algo = EvolutionParetoSearch(
ss, objectives,
output_dir='./out_evo',
num_iters=5, num_crossovers=5,
mutations_per_parent=2,
max_unseen_population=10,
save_pareto_model_weights=False,
seed=42
)
# NBVAL_SKIP
search_results = algo.search()<jupyter_output>2023-03-29 19:16:04,354 - archai.discrete_search.algos.evolution_pareto — INFO — Using 10 random architectures as the initial population ...
2023-03-29 19:16:04,536 - archai.discrete_search.algos.evolution_pareto — INFO — Iteration 1/5
2023-03-29 19:16:04,537 - archai.discrete_search.algos.evolution_pareto — INFO — Calculating search objectives ['ONNX Latency (ms)', 'FLOPs', 'Partial training Validation Accuracy (1 epoch)'] for 10 models ...
2023-03-29 19:19:42,179 - archai.discrete_search.algos.evolution_pareto — INFO — Updating Pareto frontier ...
2023-03-29 19:19:42,180 - archai.discrete_search.algos.evolution_pareto — INFO — Found 7 members.
2023-03-29 19:19:42,651 - archai.discrete_search.algos.evolution_pareto — INFO — Optimzing memory usage ...
2023-03-29 19:19:42,652 - archai.discrete_search.algos.evolution_pareto — INFO — Choosing 7 parents ...<jupyter_text>By default all algorithms will save the final pareto architectures `{output_dir}/pareto_models_iter_*/`, pareto evolution plots `pareto_*.png` and search state tables with all the results `{output_dir}/search_state_*.csv`<jupyter_code># NBVAL_SKIP
os.listdir('./out_evo')<jupyter_output><empty_output><jupyter_text>It is also possible to get information from the `search_results` object directly:<jupyter_code># NBVAL_SKIP
search_results.plot_2d_pareto_evolution(('ONNX Latency (ms)', 'Partial training Validation Accuracy (1 epoch)'))<jupyter_output><empty_output><jupyter_text>We can get `pandas.DataFrame` object with the search results calling<jupyter_code># NBVAL_SKIP
results_df = search_results.get_search_state_df()
# NBVAL_SKIP
results_df.query('is_pareto').drop(columns=['is_pareto']).sort_values('Partial training Validation Accuracy (1 epoch)')<jupyter_output><empty_output><jupyter_text>Since our search space is also compatible with Bayesian Optimization algorithms, let's try more sophisticated algorithm like MO-BANANAS. MO-BANANAS will progressively train a surrogate model based on the data gathered during search. This surrogate model will be used to predict the result of expensive objective function evaluations and will try to determine what are the best possible architectures according to the surrogate model.<jupyter_code>from archai.discrete_search.algos import MoBananasSearch
algo2 = MoBananasSearch(
ss, objectives,
output_dir='./out_bananas',
num_iters=5, mutations_per_parent=5,
num_candidates=20,
seed=43
)
# NBVAL_SKIP
search_results2 = algo2.search()
# NBVAL_SKIP
os.listdir('./out_bananas')
# NBVAL_SKIP
search_results2.plot_2d_pareto_evolution(('ONNX Latency (ms)', 'Partial training Validation Accuracy (1 epoch)'))<jupyter_output><empty_output><jupyter_text>MO-BANANAS will also save the predictive mean and variance of the expensive objectives during that iteration .<jupyter_code># NBVAL_SKIP
results_df2 = search_results2.get_search_state_df()
results_df2.query('is_pareto').sort_values('Partial training Validation Accuracy (1 epoch)').drop(columns=['is_pareto'])<jupyter_output><empty_output><jupyter_text>Let's use [plotly](https://plotly.com/) to compare the final pareto frontiers of both algorithms:<jupyter_code># NBVAL_SKIP
%pip install plotly
import pandas as pd
import plotly.express as px
merged_results_df = pd.concat([
results_df.assign(algo='Evolution Pareto'),
results_df2.assign(algo='Mo-BANANAS')
], axis=0)
fig = px.scatter(
merged_results_df.query('is_pareto'),
'ONNX Latency (ms)',
'Partial training Validation Accuracy (1 epoch)',
hover_name='archid',
color='algo',
facet_col='algo'
)
fig.layout = fig.layout.update(showlegend=False)
fig<jupyter_output>Requirement already satisfied: plotly in d:\anaconda3\envs\archai\lib\site-packages (5.13.0)
Requirement already satisfied: tenacity>=6.2.0 in d:\anaconda3\envs\archai\lib\site-packages (from plotly) (8.2.1)
Note: you may need to restart the kernel to use updated packages.
|
archai/docs/getting_started/notebooks/discrete_search/algos.ipynb/0
|
{
"file_path": "archai/docs/getting_started/notebooks/discrete_search/algos.ipynb",
"repo_id": "archai",
"token_count": 5625
}
| 345 |
Benchmark
=========
NATS-Bench
----------
.. automodule:: archai.discrete_search.evaluators.benchmark.natsbench_tss
:members:
:undoc-members:
|
archai/docs/reference/api/archai.discrete_search.evaluators.benchmark.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.discrete_search.evaluators.benchmark.rst",
"repo_id": "archai",
"token_count": 57
}
| 346 |
Optimization Utilities
======================
Fusion Options
--------------
.. automodule:: archai.onnx.optimization_utils.fusion_options
:members:
:undoc-members:
Transfomer-XL ONNX Model
------------------------
.. automodule:: archai.onnx.optimization_utils.transfo_xl_onnx_model
:members:
:undoc-members:
|
archai/docs/reference/api/archai.onnx.optimization_utils.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.onnx.optimization_utils.rst",
"repo_id": "archai",
"token_count": 111
}
| 347 |
Models
======
.. toctree::
:maxdepth: 2
archai.supergraph.models.shakeshake
AlexNet
-------
.. automodule:: archai.supergraph.models.alexnet
:members:
:undoc-members:
DenseNet
--------
.. automodule:: archai.supergraph.models.densenet
:members:
:undoc-members:
GoogleNet
---------
.. automodule:: archai.supergraph.models.googlenet
:members:
:undoc-members:
Inception
---------
.. automodule:: archai.supergraph.models.inception
:members:
:undoc-members:
MobileNetV2
-----------
.. automodule:: archai.supergraph.models.mobilenetv2
:members:
:undoc-members:
PyramidNet
----------
.. automodule:: archai.supergraph.models.pyramidnet
:members:
:undoc-members:
ResNet
------
.. automodule:: archai.supergraph.models.resnet
:members:
:undoc-members:
ResNet (Original)
-----------------
.. automodule:: archai.supergraph.models.resnet_orig
:members:
:undoc-members:
ResNet (Paper)
--------------
.. automodule:: archai.supergraph.models.resnet_paper
:members:
:undoc-members:
ShakeDrop
---------
.. automodule:: archai.supergraph.models.shakedrop
:members:
:undoc-members:
VGG
---
.. automodule:: archai.supergraph.models.vgg
:members:
:undoc-members:
Wide ResNet
-----------
.. automodule:: archai.supergraph.models.wideresnet
:members:
:undoc-members:
|
archai/docs/reference/api/archai.supergraph.models.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.supergraph.models.rst",
"repo_id": "archai",
"token_count": 518
}
| 348 |
# LM-Eval-Harness
## Installation
To install `lm_eval_harness`, run the following commands in your command line:
```shell
conda create -n lm_eval_harness python=3.8
conda activate lm_eval_harness
pip install -e .
```
## Evaluating with `lm_eval_harness`
To evaluate your model with `lm_eval_harness`, run the following command:
```shell
python evaluate_with_lm_eval.py --help
```
This will give you a list of options and arguments that can be passed to the script to evaluate your model. For example:
```shell
python evaluate_with_lm_eval.py gpt2 gpt2 --tasks cb,copa
```
This will evaluate a pre-trained GPT-2 from Hugging Face's Hub, using the `gpt2` pre-trained tokenizer on two SuperGLUE tasks: CommitmentBank and Choice of Plausible Alternatives.
|
archai/research/lm_eval_harness/README.md/0
|
{
"file_path": "archai/research/lm_eval_harness/README.md",
"repo_id": "archai",
"token_count": 249
}
| 349 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import json
import os
import re
import natsort
import torch
from lm_eval.evaluator import evaluate
from lm_eval_harness.lm_eval_hf_model import HFEvalModel
from lm_eval_harness.tasks.human_eval import HumanEval
from transformers import AutoTokenizer
from archai.discrete_search.search_spaces.nlp.tfpp.modeling_codegen_flash import (
CodeGenFlashConfig,
CodeGenFlashSequential,
)
CHECKPOINT_REGEX = re.compile(r"^" + r"(\d+)$")
def find_checkpoints(folder_name: str) -> str:
folder_content = os.listdir(folder_name)
checkpoints = [
os.path.join(folder_name, path)
for path in folder_content
if CHECKPOINT_REGEX.search(path) is not None and os.path.isdir(os.path.join(folder_name, path))
]
checkpoints = natsort.natsorted(checkpoints)
return checkpoints
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Evaluate DeepSpeed checkpoints on HumanEval.")
parser.add_argument(
"checkpoint_dir",
type=str,
help="Directory containing the checkpoints to evaluate.",
)
parser.add_argument(
"-htn",
"--hub_tokenizer_name",
type=str,
default="Salesforce/codegen-350M-mono",
help="Name of the tokenizer to use (via the Hugging Face Hub).",
)
parser.add_argument(
"-ns",
"--n_samples",
type=int,
default=1,
help="Number of code samples to generate.",
)
parser.add_argument(
"-t",
"--temperature",
type=float,
default=0.01,
help="Temperature for the code generation.",
)
parser.add_argument(
"-pk",
"--pass_at_k",
type=int,
nargs="+",
default=1,
help="Pass at k for the code generation.",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
if not isinstance(args.pass_at_k, list):
args.pass_at_k = [args.pass_at_k]
tokenizer = AutoTokenizer.from_pretrained(args.hub_tokenizer_name)
for checkpoint in find_checkpoints(args.checkpoint_dir):
print(f"Loading checkpoint: {checkpoint}")
# Note: match the model configuration to the one used for training
config = CodeGenFlashConfig(
vocab_size=50304,
n_positions=2048,
n_embd=1024,
n_layer=20,
n_head=16,
rotary_dim=32,
pad_vocab_size_multiple=64,
attn_type="flash",
use_fused_mlp=True,
)
model = CodeGenFlashSequential(config)
state_dict = torch.load(os.path.join(checkpoint, "mp_rank_00_model_states.pt"))
if state_dict["module"] is not None:
model.load_state_dict(state_dict["module"])
else:
for i, layer in enumerate(model.layers):
state_dict = torch.load(os.path.join(checkpoint, f"layer_{i:02d}-model_states.pt"))
layer.load_state_dict(state_dict)
hf_model = HFEvalModel(model.half(), tokenizer)
print("Evaluating on HumanEval ...")
results = evaluate(
lm=hf_model,
task_dict={
"human_eval": HumanEval(
n_samples=args.n_samples,
temperature=args.temperature,
pass_at_k=args.pass_at_k,
)
},
)
output_json = json.dumps(results, indent=2)
output_json_path = os.path.join(checkpoint, "human_eval.json")
with open(output_json_path, "w") as f:
print(f"Dumping evaluation results: {output_json_path}")
f.write(output_json)
|
archai/scripts/eval/deepspeed/evaluate_human_eval.py/0
|
{
"file_path": "archai/scripts/eval/deepspeed/evaluate_human_eval.py",
"repo_id": "archai",
"token_count": 1751
}
| 350 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
""" Script to prepare mit67 dataset for pytorch dataloader.
"""
import argparse
import os
import tempfile
from collections import defaultdict
from typing import Dict, List
from torchvision.datasets.utils import download_and_extract_archive, download_url
from archai.common import utils
def check_mit67(dataroot: str) -> bool:
mit67 = os.path.join(dataroot, "mit67")
train = os.path.join(mit67, "train")
test = os.path.join(mit67, "test")
meta = os.path.join(mit67, "meta")
if not os.path.isdir(mit67) or not os.path.isdir(train) or not os.path.isdir(test) or not os.path.isdir(meta):
return False
num_train_files = 0
for base, dirs, files in os.walk(train):
for file in files:
num_train_files += 1
if num_train_files != 12466:
return False
num_test_files = 0
for base, dirs, files in os.walk(test):
for file in files:
num_test_files += 1
if num_test_files != 3153:
return False
# all checks passed
return True
def download(dataroot: str):
DOWNLOAD_URL = "https://groups.csail.mit.edu/vision/LabelMe/NewImages/indoorCVPR_09.tar"
with tempfile.TemporaryDirectory() as tempdir:
download_and_extract_archive(DOWNLOAD_URL, tempdir, extract_root=dataroot, remove_finished=True)
def load_test_csv_data(filename: str) -> Dict[str, List[str]]:
"""Loads the data in csv files into a dictionary with
class names as keys and list of image names as values. Works only for test data csv"""
data_dict = defaultdict(list)
with open(filename, "r") as f:
lines = f.readlines()
assert len(lines) > 0
for line in lines[1:]:
words = line.rstrip().split(",")
assert len(words) > 0
data_dict[words[0]] = words[1:]
return data_dict
def load_train_csv_data(filename: str) -> Dict[str, List[str]]:
"""Loads the data in csv files into a dictionary with
class names as keys and list of image names as values. Works only for train data csv"""
data_dict = defaultdict(list)
with open(filename, "r") as f:
lines = f.readlines()
assert len(lines) > 0
for line in lines[1:]:
words = line.rstrip().split(",")
assert len(words) > 0
data_dict[words[1]] = words[2:]
return data_dict
def copy_data_helper(data: Dict[str, List[str]], imagesroot: str, foldername: str) -> None:
for key in data.keys():
images = data[key]
for im in images:
if not im:
continue
source = os.path.join(imagesroot, key, im)
target = os.path.join(foldername, key, im)
if not os.path.isfile(target):
utils.copy_file(source, target)
def prepare_data(mit67_root: str):
test_file = os.path.join(mit67_root, "meta", "MIT67_test.csv")
test_data = load_test_csv_data(test_file)
# train data is split into 4 files for some reason
train1_file = os.path.join(mit67_root, "meta", "MIT67_train1.csv")
train2_file = os.path.join(mit67_root, "meta", "MIT67_train2.csv")
train3_file = os.path.join(mit67_root, "meta", "MIT67_train3.csv")
train4_file = os.path.join(mit67_root, "meta", "MIT67_train4.csv")
train_files = [train1_file, train2_file, train3_file, train4_file]
train_data = defaultdict(list)
for tf in train_files:
this_data = load_train_csv_data(tf)
train_data.update(this_data)
# make classname directories for train and test
for key in test_data.keys():
os.makedirs(os.path.join(mit67_root, "test", key), exist_ok=True)
os.makedirs(os.path.join(mit67_root, "train", key), exist_ok=True)
# copy images to the right locations
imagesroot = os.path.join(mit67_root, "Images")
testfoldername = os.path.join(mit67_root, "test")
copy_data_helper(test_data, imagesroot, testfoldername)
trainfoldername = os.path.join(mit67_root, "train")
copy_data_helper(train_data, imagesroot, trainfoldername)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataroot",
type=str,
default="C:\\Users\\dedey\\dataroot",
help="root directory where mit67 folder is intended to exist. If mit67 already exists in the format required this script will skip downloading",
)
args = parser.parse_args()
# check that dataset is in format required
# else download and prepare dataset
if not check_mit67(args.dataroot):
# make mit67 directory
mit67 = os.path.join(args.dataroot, "mit67")
train = os.path.join(mit67, "train")
test = os.path.join(mit67, "test")
meta = os.path.join(mit67, "meta")
os.makedirs(mit67, exist_ok=True)
os.makedirs(train, exist_ok=True)
os.makedirs(test, exist_ok=True)
os.makedirs(meta, exist_ok=True)
# this step will create folder mit67/Images
# which has all the images for each class in its own subfolder
download(mit67)
# download the csv files for the train and test split
# from 'NAS Evaluation is Frustrating' repo
# note that download_url doesn't work in vscode debug mode
test_file_url = "https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_test.csv"
train_file_urls = [
"https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_train1.csv",
"https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_train2.csv",
"https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_train3.csv",
"https://raw.githubusercontent.com/antoyang/NAS-Benchmark/master/data/MIT67_train4.csv",
]
download_url(test_file_url, meta, filename=None, md5=None)
for tu in train_file_urls:
download_url(tu, meta, filename=None, md5=None)
prepare_data(mit67)
|
archai/scripts/supergraph/download_datasets/mit67_install.py/0
|
{
"file_path": "archai/scripts/supergraph/download_datasets/mit67_install.py",
"repo_id": "archai",
"token_count": 2513
}
| 351 |
import pickle
from archai.common import utils
def main():
in_dataset_file = utils.full_path("~/dataroot/nasbench_ds/nasbench_full.tfrecord.pkl")
out_dataset_file = utils.full_path("~/dataroot/nasbench_ds/nasbench101_sample.tfrecord.pkl")
with open(in_dataset_file, "rb") as f:
records = pickle.load(f)
sampled_indices = set()
adj_samples = 1000
for i in [0, 4000, 40000, len(records) - 1 - adj_samples + 1]:
sampled_indices = sampled_indices.union([i + k for k in range(adj_samples)])
sampled_hashes = set(records[i][0] for i in sorted(list(sampled_indices)))
sampled = [r for r in records if r[0] in sampled_hashes]
with open(out_dataset_file, "wb") as f:
pickle.dump(sampled, f)
if __name__ == "__main__":
main()
|
archai/scripts/supergraph/nasbench101/sample_pkl.py/0
|
{
"file_path": "archai/scripts/supergraph/nasbench101/sample_pkl.py",
"repo_id": "archai",
"token_count": 336
}
| 352 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import torch
from transformers import AutoTokenizer
from archai.datasets.nlp.fast_hf_dataset_provider import (
FastDataCollatorForLanguageModeling,
FastHfDatasetProvider,
)
from archai.discrete_search.search_spaces.nlp.tfpp.modeling_codegen_flash import (
CodeGenFlashConfig,
CodeGenFlashSequential,
)
from archai.trainers.nlp.ds_trainer import DsTrainer
from archai.trainers.nlp.ds_training_args import DsTrainingArguments
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Trains/Fine-tunes a CodeGen model with DeepSpeed.")
parser.add_argument(
"-dn",
"--dataset_name",
type=str,
default="wikitext",
help="Name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"-dcn",
"--dataset_config_name",
type=str,
default="wikitext-103-raw-v1",
help="Configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"-ptm",
"--pre_trained_model_path",
type=str,
default=None,
help="Path to the pre-trained model.",
)
parser.add_argument(
"-ds",
"--ds_config_path",
type=str,
default=None,
help="Path to the DeepSpeed configuration file.",
)
parser.add_argument(
"-pps",
"--pipe_parallel_size",
type=int,
default=1,
help="Size of pipeline parallelism.",
)
parser.add_argument(
"-o",
"--output_dir",
type=str,
default="",
help="Defines an output folder for the saved outputs.",
)
parser.add_argument(
"-l",
"--local_rank",
type=int,
default=-1,
help="Rank of process passed by the DeepSpeed launcher.",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
collator = FastDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
dataset_provider = FastHfDatasetProvider.from_hub(
args.dataset_name,
dataset_config_name=args.dataset_config_name,
tokenizer=tokenizer,
)
train_dataset = dataset_provider.get_train_dataset(seq_len=2048)
eval_dataset = dataset_provider.get_val_dataset(seq_len=2048)
config = CodeGenFlashConfig(
vocab_size=50304,
n_positions=2048,
n_embd=1024,
n_layer=20,
n_head=16,
rotary_dim=32,
pad_vocab_size_multiple=64,
attn_type="flash",
use_fused_mlp=True,
)
model = CodeGenFlashSequential(config)
print(f"Total parameters: {sum(p.numel() for p in model.parameters())}")
if args.pre_trained_model_path is not None:
state_dict = torch.load(os.path.join(args.pre_trained_model_path, "mp_rank_00_model_states.pt"))
if state_dict["module"] is not None:
model.load_state_dict(state_dict["module"])
else:
for i, layer in enumerate(model.layers):
state_dict = torch.load(os.path.join(args.pre_trained_model_path, f"layer_{i:02d}-model_states.pt"))
layer.load_state_dict(state_dict)
training_args = DsTrainingArguments(
"ds-codegen",
ds_config=args.ds_config_path,
local_rank=args.local_rank,
max_steps=1000,
logging_steps=10,
save_steps=1000,
eval_steps=250,
eval_max_steps=25,
pipe_parallel_size=args.pipe_parallel_size,
pipe_parallel_loss_fn=model.loss,
)
trainer = DsTrainer(
model=model.layers if args.pipe_parallel_size > 0 else model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
trainer.train()
|
archai/scripts/trainers/deepspeed/train_codegen.py/0
|
{
"file_path": "archai/scripts/trainers/deepspeed/train_codegen.py",
"repo_id": "archai",
"token_count": 1800
}
| 353 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import sys
from archai.common.store import ArchaiStore
CONNECTION_NAME = 'MODEL_STORAGE_CONNECTION_STRING'
def download(con_str, experiment_name):
parser = argparse.ArgumentParser(
description="Download assets from azure blob store using friendly name.")
parser.add_argument('--name', help='Friendly name of model to download (if not provided it downloads them all')
parser.add_argument('--file', help='The optional name of the files to download instead of getting them all.')
args = parser.parse_args()
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name=experiment_name)
friendly_name = args.name
if not friendly_name:
friendly_names = [e['name'] for e in store.get_all_status_entities()]
else:
friendly_names = [friendly_name]
specific_file = args.file
for friendly_name in friendly_names:
downloaded = store.download(friendly_name, friendly_name, specific_file)
if len(downloaded) == 0 and specific_file:
print(f"file {specific_file} not found")
if __name__ == '__main__':
experiment_name = os.getenv("EXPERIMENT_NAME", "facesynthetics")
con_str = os.getenv(CONNECTION_NAME)
if not con_str:
print(f"Please specify your {CONNECTION_NAME} environment variable.")
sys.exit(1)
download(con_str, experiment_name)
|
archai/tasks/face_segmentation/aml/azure/download.py/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/azure/download.py",
"repo_id": "archai",
"token_count": 534
}
| 354 |
# Readme
See Demo Video:
[](https://youtu.be/a8MfuyYpRQA)
This folder contains code that automates the search, partial training and inference latency testing in [Azure
ML](https://azure.microsoft.com/en-us/products/machine-learning/). The inference testing of ONNX models can be performed
across one or more machines that are connected via USB to Qualcomm 888 boards.
The code is organized into:
1. [Training](training/readme.md) code that plugs into the Archai Search to perform partial training
of selected models on a GPU cluster in Azure ML.
1. [SNPE Device](snpe/readme.md) code that uses [Microsoft
Olive](https://github.com/microsoft/olive) to drive the
[Qualcomm Neural Processing SDK](https://developer.qualcomm.com/software/qualcomm-neural-processing-sdk) to talk
to the device, convert ONNX models to .dlc, quantize them, and test them on one or more
[Qualcomm 888 dev kits](https://developer.qualcomm.com/hardware/snapdragon-888-hdk).
1. [Azure Code](azure/readme.md) that talks to a configured Azure storage account for uploading
models to test, downloading them, uploading test results, and keeping an Azure status table that
summarizes results of all the work in progress.
1. [Docker](docker/quantizer/readme.md) contains scripts for setting up your Azure account and optionally
creating a docker image for running in an Azure Kubernetes cluster to do model quantization using
the Qualcomm Neural Processing SDK. Quantization is time consuming so having an elastic scale speeds
things up a lot.
1. [Notebook](notebooks/results.ipynb) a simple Jupyter notebook for visualizing the
results found in your Azure table.
## Results
The jupyter notebook can be used to visualize the results of the search iterations as they are
happening. The following is a an animation of the complete 20 search iterations where the darker
colors are the early iterations and the brighter colors are the most recent iterations. The pareto
frontier models are highlighted in yellow. This clearly shows the general trend of model improvement
over time on each new iteration.

The following animation shows only the pareto models from each search iteration. These are the
models that get mutated during the evolutionary pareto search, all the other models have lower
validation scores and are discarded:

When the search completes you can run [train_pareto.py](../../train_pareto.py) to fully train the
pareto models. When training is finished you can visualize the results of full training in the notebook and you should see something like this:

Then you can run [snp_test.py](../../snp_test.py) to compute the F1 scores for these fully trained
models on your Qualcomm hardware, the following is a plot you can get from the notebook showing the
final results comparing F1 accuracy with inference latency. Notice that the Qualcomm hardware F1
score mostly matches our earlier `val_iou` pareto curve, but not exactly. The dots shown in gray
have fallen off the pareto frontier. This is why it is always good to test your models on the target
hardware. Even better if that testing can be done in the search loop so that the search finds
models that work well on the target hardware, as we have done in this face segmentation example:

## Workflow
The overall workflow begins with the top level [aml.py](../../aml.py) script which starts with an
Archai Search that contains an `AmlPartialTrainingEvaluator` and a `RemoteAzureBenchmarkEvaluator`.
The remote benchmark evaluator performs inference latency testing on Qualcomm hardware. The
`AmlPartialTrainingEvaluator` then kicks off one new Azure ML training pipeline for each batch of
new model architectures that need to be partially trained, it stores the validation IOU results in
an Azure blob store and an Azure table so the search can get those results and use them to figure
out the next iteration of the search algorithm:

See [AML Training Readme](training/readme.md) for more information.
## Remote Inference Testing
The remote inference testing workflow looks like this, the `RemoteAzureBenchmarkEvaluator` uploads
models to the same Azure blob store, and adds a row to the status table. This triggers remote
instances of the [runner.py](azure/runner.py) script to process these new models on an attached
Qualcomm device. Optionally some of the work can be done in the cloud using a Kubernetes cluster,
this includes model quantization and accuracy testing using the ONNX runtime. The workflow looks
like this:

Each instance of `runner.py` looks for work, and executes it in priority order where the
prioritization is defined by the `find_work_prioritized` function in the runner. This script is
completely restartable, and can distribute the work across multiple instances of the runner script.
Each instance will pick up where a previous one left off based on what it finds in your Azure status
table. The prioritization maps to the columns of the status table as follows:
1. **macs:** convert to .dlc and post Macs score and `snpe-dlc-viewer` output and do model quantization (runs on Linux) - priority 20
1. **total_inference_avg** run `snpe_bench.py` with quantized model on Qualcomm device DSP - priority 30
1. **f1_onnx** compute f1 from onnxruntime on .onnx model on a 10k test set on Linux - priority 60
1. **f1_1k** compute f1 on quantized .dlc model on Qualcomm device DSP with a 1k test set - priority
is the mean f1 score so that quicker models are prioritized.
1. **f1_1k_f** compute f1 on floating point .dlc model on on Qualcomm device CPU with a 1k test set
- priority 10 * the mean f1 score so that quicker models are prioritized.
1. **f1_10k** compute f1 on quantized model on a 10k test set - priority = 100 * the mean f1 score
so that quicker models are prioritized.
Lower number means higher priority job and each machine will run the highest priority work first.
You can override the priority of a specific job by passing a `--proprity` parameter on the `upload.py` script or by
editing the Azure status table and adding a `priority` field to the JSON stored there. You can set any priority number
you want, if you specify priority 0 it will run before anything else which can be handy if you have a cool new model
that you want to bump to the top of the list.
Notice some of the above jobs can run on Linux and do not require Qualcomm device. So in order to maximize throughput on
machines that do have a Qualcomm devices you can allocate other Linux machines with no Qualcomm devices to do the other
work, namely, converting models, quantizing them, and running the `f1_onnx` test.
Folks across your team can use the `azure/upload.py` to submit jobs and let them run, or they can automate that as
shown in the `RemoteAzureBenchmarkEvaluator` in the `search.py` script.
You can use `status.py` to monitor progress or look at the Azure status table. Various status messages are posted
there so you can see which machine is doing what and is in what stage of the job.
Next you can go to the `notebook` page and get some pretty pictures of your Pareto Curves.
## Azure Portal
When everything is running you will see progress happening in your Azure status table. Here you see the snpe-quantizer
kubernetes cluster is quantizing a bunch of models while other machines are running the bench mark tests on the Qualcomm
hardware:

|
archai/tasks/face_segmentation/aml/readme.md/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/readme.md",
"repo_id": "archai",
"token_count": 1973
}
| 355 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import os
from pathlib import Path
from typing import List
from archai.common.store import ArchaiStore
from azure.ai.ml import MLClient
from archai.discrete_search.api import ArchaiModel
from archai.discrete_search.search_spaces.config import ArchConfig
from azure.ai.ml import command, Input, Output, dsl
from azure.ai.ml.entities import UserIdentityConfiguration
from archai.common.config import Config
from aml.util.setup import copy_code_folder, get_valid_arch_id
from shutil import copyfile
def training_component(output_path: str, code_dir: Path, config, training_epochs: int, config_filename: str, model_id: str, arch: str):
# we need a folder containing all the specific code we need here, which is not everything in this repo.
training = config['training']
learning_rate = training['learning_rate']
batch_size = training['batch_size']
aml_config = config['aml']
environment_name = aml_config['environment_name']
con_str = aml_config['connection_str']
fixed_args = f'--lr {learning_rate} --batch_size {batch_size} ' +\
f'--epochs {int(training_epochs)} --model_id {model_id} --config {config_filename} ' +\
f'{arch}'
return command(
name="train",
display_name="Archai training job",
description="Trains a face segmentation model.",
inputs={
"data": Input(type="uri_folder", mode="download")
},
is_deterministic=False,
outputs={
"results": Output(type="uri_folder", path=output_path, mode="rw_mount")
},
environment_variables={'MODEL_STORAGE_CONNECTION_STRING': con_str},
identity=UserIdentityConfiguration(),
# The source folder of the component
code=str(code_dir),
command="""python3 train.py \
--dataset_dir ${{inputs.data}} \
--output_dir ${{outputs.results}} \
""" + fixed_args,
environment=environment_name,
)
def start_training_pipeline(description: str, ml_client: MLClient, store: ArchaiStore,
model_architectures: List[ArchaiModel],
config: Config, training_epochs: int, output_folder: Path):
""" Creates a new Azure ML Pipeline for training a set of models, updating the status of
these jobs in a given Azure Storage Table. This command does not wait for those jobs to
finish. For that use the monitor.py script which monitors the same Azure Storage Table
to find out when the jobs have all finished. The train.py script will update the table
when each training job completes. """
aml_config = config['aml']
training_cluster = aml_config['training_cluster']
compute_cluster_name = training_cluster['name']
datastore_path = aml_config['datastore_path']
root_uri = aml_config['results_path']
environment_name = aml_config['environment_name']
experiment_name = aml_config['experiment_name']
metric_key = config['training'].get('metric_key', 'val_iou')
print(f"Cluster: {compute_cluster_name}")
print(f"Dataset: {datastore_path}")
print(f"Output: {root_uri}")
print(f"Environment: {environment_name}")
print(f"Experiment: {experiment_name}")
print(f"Epochs: {training_epochs}")
code_dir = Path('temp_code')
os.makedirs(code_dir, exist_ok=True)
config_dir = code_dir / 'confs'
os.makedirs(config_dir, exist_ok=True)
archs_dir = code_dir / 'archs'
os.makedirs(archs_dir, exist_ok=True)
copyfile('train.py', str(code_dir / 'train.py'))
copy_code_folder('training', str(code_dir / 'training'))
copy_code_folder('search_space', str(code_dir / 'search_space'))
copy_code_folder(os.path.join('aml', 'training'), str(code_dir / 'aml' / 'training'))
copy_code_folder(os.path.join('aml', 'util'), str(code_dir / 'aml' / 'util'))
config.save(str(config_dir / 'aml_search.yaml'))
models = []
model_names = []
for arch in model_architectures:
model_id = get_valid_arch_id(arch)
model_names += [model_id]
print(f'Launching training job for model {model_id}')
# upload the model architecture to our blob store so we can find it later.
metadata: ArchConfig = arch.metadata['config']
filename = str(archs_dir / f'{model_id}.json')
metadata.to_file(filename)
store.upload_blob(f'{experiment_name}/{model_id}', filename, blob_name=f'{model_id}.json')
# create status entry in azure table
e = store.get_status(model_id)
e['experiment'] = experiment_name
e['epochs'] = training_epochs
e['status'] = 'preparing'
store.merge_status_entity(e)
models += [{
'id': model_id,
'status': 'training',
'epochs': training_epochs,
metric_key: e[metric_key] if metric_key in e else 0.0
}]
results = {
'models': models
}
@dsl.pipeline(
compute=compute_cluster_name,
description=description,
)
def parallel_training_pipeline(
data_input
):
outputs = {}
for arch in model_architectures:
model_id = get_valid_arch_id(arch)
output_path = f'{root_uri}/{model_id}'
filename = f'archs/{model_id}.json'
train_job = training_component(
output_path, code_dir, config, training_epochs, 'confs/aml_search.yaml', model_id, filename)(
data=data_input
)
outputs[model_id] = train_job.outputs.results
return outputs
training_pipeline = parallel_training_pipeline(
data_input=Input(type="uri_folder", path=datastore_path)
)
# submit the pipeline job
pipeline_job = ml_client.jobs.create_or_update(
training_pipeline,
experiment_name=experiment_name,
)
# Write the new list of pending models so that the make_monitor_command
# knows what to wait for.
print("Writing pending.json: ")
print(json.dumps(results, indent=2))
results_path = output_folder / 'pending.json'
with open(results_path, 'w') as f:
f.write(json.dumps(results, indent=2))
return (pipeline_job, model_names)
|
archai/tasks/face_segmentation/aml/training/training_pipeline.py/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/training/training_pipeline.py",
"repo_id": "archai",
"token_count": 2578
}
| 356 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from pathlib import Path
import torch
import os
from argparse import ArgumentParser
from archai.discrete_search.search_spaces.config import ArchConfig
from search_space.hgnet import StackedHourglass
def export(checkpoint, model, onnx_file):
state_dict = checkpoint['state_dict']
# strip 'model.' prefix off the keys!
state_dict = dict({(k[6:], state_dict[k]) for k in state_dict})
model.load_state_dict(state_dict)
input_shapes = [(1, 3, 256, 256)]
rand_range = (0.0, 1.0)
export_kwargs = {'opset_version': 11}
rand_min, rand_max = rand_range
sample_inputs = tuple(
[
((rand_max - rand_min) * torch.rand(*input_shape) + rand_min).type("torch.FloatTensor")
for input_shape in input_shapes
]
)
torch.onnx.export(
model,
sample_inputs,
onnx_file,
input_names=[f"input_{i}" for i in range(len(sample_inputs))],
**export_kwargs,
)
print(f'Exported {onnx_file}')
def main():
parser = ArgumentParser(
"Converts the final_model.ckpt to final_model.onnx, writing the onnx model to the same folder."
)
parser.add_argument('arch', type=Path, help="Path to config.json file describing the model architecture")
parser.add_argument('--checkpoint', help="Path of the checkpoint to export")
args = parser.parse_args()
checkpoint = torch.load(args.checkpoint)
# get the directory name from args.checkpoint
output_path = os.path.dirname(os.path.realpath(args.checkpoint))
base_name = os.path.splitext(os.path.basename(args.checkpoint))[0]
onnx_file = os.path.join(output_path, f'{base_name}.onnx')
arch_config = ArchConfig.from_file(args.arch)
model = StackedHourglass(arch_config, num_classes=18)
export(checkpoint, model, onnx_file)
if __name__ == '__main__':
main()
|
archai/tasks/face_segmentation/export.py/0
|
{
"file_path": "archai/tasks/face_segmentation/export.py",
"repo_id": "archai",
"token_count": 756
}
| 357 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import glob
import os
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from archai.common.utils import download_and_extract_zip
import transforms
class FaceLandmarkDataset(Dataset):
"""Dataset class for Microsoft Face Synthetics dataset.
Args:
directory (str): Path to the directory containing the PNG images and landmarks files.
limit (int, optional): Maximum number of samples to load from the dataset. Defaults to None.
crop_size (int, optional): Size of the square crop to apply to the images. Defaults to 128.
Attributes:
png_files (list): List of paths to the PNG image files in the dataset.
transform (FaceLandmarkTransform): Transform to apply to the samples.
_num_landmarks (int): Number of landmarks in each sample.
Methods:
__len__(): Returns the number of samples in the dataset.
__getitem__(index): Returns the image and landmarks of the sample at the given index.
num_landmarks(): Returns the number of landmarks in each sample.
"""
def __init__(self, directory, limit=None, crop_size=128):
pattern = os.path.join(directory, "[0-9][0-9][0-9][0-9][0-9][0-9].png") # don't load *_seg.png files
self.png_files = glob.glob(pattern)
if len(self.png_files) < 100000:
zip_url = "https://facesyntheticspubwedata.blob.core.windows.net/iccv-2021/dataset_100000.zip"
download_and_extract_zip(zip_url, directory)
self.png_files = glob.glob(pattern)
assert len(self.png_files) > 0, f"Can't find any PNG files in {directory}"
if limit is not None:
self.png_files = self.png_files[:limit]
self.transform = transforms.FaceLandmarkTransform(crop_size=crop_size)
self._num_landmarks = None
def __len__(self):
return len(self.png_files)
def __getitem__(self, index):
"""
Returns the image and landmarks of the sample at the given index.
Args:
index (int): Index of the sample to retrieve.
Returns:
tuple: A tuple containing the transformed image and landmarks of the sample.
"""
png_file = self.png_files[index]
image = Image.open(png_file)
label_file = png_file.replace(".png", "_ldmks.txt")
label = np.loadtxt(label_file, dtype=np.single)
assert label.size > 0, "Can't find data in landmarks file: f{label_file}"
sample = transforms.Sample(image=image, landmarks=label)
assert sample is not None
sample_transformed = self.transform(sample)
assert sample_transformed is not None
return sample_transformed.image, sample_transformed.landmarks
@property
def num_landmarks(self):
"""
Returns the number of landmarks in each sample.
Returns:
int: The number of landmarks in each sample.
"""
if self._num_landmarks is None:
_, label = self.__getitem__(0)
self._num_landmarks = torch.numel(label)
return self._num_landmarks
|
archai/tasks/facial_landmark_detection/dataset.py/0
|
{
"file_path": "archai/tasks/facial_landmark_detection/dataset.py",
"repo_id": "archai",
"token_count": 1232
}
| 358 |
pytest_plugins = [
"discrete_search.algos.fixtures.objectives",
"discrete_search.algos.fixtures.search_space",
"discrete_search.algos.fixtures.surrogate_model",
]
|
archai/tests/conftest.py/0
|
{
"file_path": "archai/tests/conftest.py",
"repo_id": "archai",
"token_count": 70
}
| 359 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import pytest
from archai.discrete_search.algos.local_search import LocalSearch
@pytest.fixture(scope="session")
def output_dir(tmp_path_factory):
return tmp_path_factory.mktemp("out")
def test_local_search(output_dir, search_space, search_objectives):
algo = LocalSearch(
search_space,
search_objectives,
output_dir,
num_iters=2,
init_num_models=5,
mutations_per_parent=2,
seed=1,
)
search_results = algo.search()
assert len(os.listdir(output_dir)) > 0
df = search_results.get_search_state_df()
assert all(0 <= x <= 0.4 for x in df["Random1"].tolist())
all_models = [m for iter_r in search_results.results for m in iter_r["models"]]
# Checks if all registered models satisfy constraints
_, valid_models = search_objectives.validate_constraints(all_models)
assert len(valid_models) == len(all_models)
|
archai/tests/discrete_search/algos/test_local_search.py/0
|
{
"file_path": "archai/tests/discrete_search/algos/test_local_search.py",
"repo_id": "archai",
"token_count": 384
}
| 360 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
from archai.discrete_search.predictors.dnn_ensemble import PredictiveDNNEnsemble
def test_dnn_ensemble():
X_train = np.random.rand(100, 5)
y_train = np.random.rand(100, 2)
X_test = np.random.rand(50, 5)
predictor = PredictiveDNNEnsemble(device="cpu")
predictor.fit(X_train, y_train)
y_pred = predictor.predict(X_test)
assert y_pred.mean.shape == (50, 2)
assert y_pred.var.shape == (50, 2)
|
archai/tests/discrete_search/predictors/test_dnn_ensemble.py/0
|
{
"file_path": "archai/tests/discrete_search/predictors/test_dnn_ensemble.py",
"repo_id": "archai",
"token_count": 202
}
| 361 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from transformers import GPT2Config, GPT2LMHeadModel
from archai.onnx.onnx_forward import gpt2_onnx_forward
def test_gpt2_onnx_forward():
# Assert that the forward method returns the expected keys
model = GPT2LMHeadModel(config=GPT2Config(vocab_size=128, n_layer=3))
input_ids = torch.zeros((1, 4), dtype=torch.long)
outputs_dict = gpt2_onnx_forward(model, input_ids)
assert "logits" in outputs_dict.keys()
assert "past_key_values" in outputs_dict.keys()
# Assert that the forward method returns the expected keys
model = GPT2LMHeadModel(config=GPT2Config(vocab_size=128, n_layer=3, use_cache=False))
input_ids = torch.zeros((1, 4), dtype=torch.long)
outputs_dict = gpt2_onnx_forward(model, input_ids)
assert "logits" in outputs_dict.keys()
assert "past_key_values" not in outputs_dict.keys()
|
archai/tests/onnx/test_onnx_forward.py/0
|
{
"file_path": "archai/tests/onnx/test_onnx_forward.py",
"repo_id": "archai",
"token_count": 348
}
| 362 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import pytest
if os.name == "nt":
pytest.skip(allow_module_level=True)
from torch.utils.data import Dataset
from archai.trainers.nlp.ds_trainer import StatefulDistributedSampler
class DummyDataset(Dataset):
def __init__(self, size):
self.size = size
def __len__(self):
return self.size
def __getitem__(self, idx):
return idx
def test_stateful_distributed_sampler():
dataset = DummyDataset(100)
# Assert that the correct subset of indices is returned
sampler = StatefulDistributedSampler(dataset, num_replicas=1, rank=0, shuffle=False, total_consumed_samples=50)
expected_indices = [i for i in range(50, 100)]
assert list(iter(sampler)) == expected_indices
# Assert that the correct subset of indices is returned with more than one replica
sampler = StatefulDistributedSampler(dataset, num_replicas=2, rank=0, shuffle=False, total_consumed_samples=80)
expected_indices = [i for i in range(80, 100, 2)]
assert list(iter(sampler)) == expected_indices
|
archai/tests/trainers/nlp/test_ds_trainer.py/0
|
{
"file_path": "archai/tests/trainers/nlp/test_ds_trainer.py",
"repo_id": "archai",
"token_count": 398
}
| 363 |
# coding=utf-8
# --------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApiResourceLocation(Model):
"""ApiResourceLocation.
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'area': {'key': 'area', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
'route_template': {'key': 'routeTemplate', 'type': 'str'},
'resource_version': {'key': 'resourceVersion', 'type': 'int'},
'min_version': {'key': 'minVersion', 'type': 'float'},
'max_version': {'key': 'maxVersion', 'type': 'float'},
'released_version': {'key': 'releasedVersion', 'type': 'str'},
}
def __init__(self, id=None, area=None, resource_name=None,
route_template=None, resource_version=None,
min_version=None, max_version=None,
released_version=None):
super(ApiResourceLocation, self).__init__()
self.id = id
self.area = area
self.resource_name = resource_name
self.route_template = route_template
self.resource_version = resource_version
self.min_version = min_version
self.max_version = max_version
self.released_version = released_version
class CustomerIntelligenceEvent(Model):
"""CustomerIntelligenceEvent.
:param area:
:type area: str
:param feature:
:type feature: str
:param properties:
:type properties: dict
"""
_attribute_map = {
'area': {'key': 'area', 'type': 'str'},
'feature': {'key': 'feature', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{object}'}
}
def __init__(self, area=None, feature=None, properties=None):
super(CustomerIntelligenceEvent, self).__init__()
self.area = area
self.feature = feature
self.properties = properties
class ImproperException(Model):
"""ImproperException.
:param message:
:type message: str
"""
_attribute_map = {
'message': {'key': 'Message', 'type': 'str'}
}
def __init__(self, message=None):
super(ImproperException, self).__init__()
self.message = message
class ResourceAreaInfo(Model):
"""ResourceAreaInfo.
:param id:
:type id: str
:param location_url:
:type location_url: str
:param name:
:type name: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'location_url': {'key': 'locationUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, id=None, location_url=None, name=None):
super(ResourceAreaInfo, self).__init__()
self.id = id
self.location_url = location_url
self.name = name
class SystemException(Model):
"""SystemException.
:param class_name:
:type class_name: str
:param inner_exception:
:type inner_exception: :class:`SystemException`
:param message:
:type message: str
"""
_attribute_map = {
'class_name': {'key': 'ClassName', 'type': 'str'},
'message': {'key': 'Message', 'type': 'str'},
'inner_exception': {'key': 'InnerException', 'type': 'SystemException'}
}
def __init__(self, class_name=None, message=None, inner_exception=None):
super(SystemException, self).__init__()
self.class_name = class_name
self.message = message
self.inner_exception = inner_exception
class VssJsonCollectionWrapperBase(Model):
"""VssJsonCollectionWrapperBase.
:param count:
:type count: int
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'}
}
def __init__(self, count=None):
super(VssJsonCollectionWrapperBase, self).__init__()
self.count = count
class VssJsonCollectionWrapper(VssJsonCollectionWrapperBase):
"""VssJsonCollectionWrapper.
:param count:
:type count: int
:param value:
:type value: object
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'value': {'key': 'value', 'type': 'object'}
}
def __init__(self, count=None, value=None):
super(VssJsonCollectionWrapper, self).__init__(count=count)
self.value = value
class WrappedException(Model):
"""WrappedException.
:param exception_id:
:type exception_id: str
:param inner_exception:
:type inner_exception: :class:`WrappedException`
:param message:
:type message: str
:param type_name:
:type type_name: str
:param type_key:
:type type_key: str
:param error_code:
:type error_code: int
:param event_id:
:type event_id: int
:param custom_properties:
:type custom_properties: dict
"""
_attribute_map = {
'exception_id': {'key': '$id', 'type': 'str'},
'inner_exception': {'key': 'innerException', 'type': 'WrappedException'},
'message': {'key': 'message', 'type': 'str'},
'type_name': {'key': 'typeName', 'type': 'str'},
'type_key': {'key': 'typeKey', 'type': 'str'},
'error_code': {'key': 'errorCode', 'type': 'int'},
'event_id': {'key': 'eventId', 'type': 'int'},
'custom_properties': {'key': 'customProperties', 'type': '{object}'}
}
def __init__(self, exception_id=None, inner_exception=None, message=None,
type_name=None, type_key=None, error_code=None, event_id=None, custom_properties=None):
super(WrappedException, self).__init__()
self.exception_id = exception_id
self.inner_exception = inner_exception
self.message = message
self.type_name = type_name
self.type_key = type_key
self.error_code = error_code
self.event_id = event_id
self.custom_properties = custom_properties
__all__ = [
'ApiResourceLocation',
'CustomerIntelligenceEvent',
'ImproperException',
'ResourceAreaInfo',
'SystemException',
'VssJsonCollectionWrapperBase',
'VssJsonCollectionWrapper',
'WrappedException'
]
|
azure-devops-python-api/azure-devops/azure/devops/_models.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/_models.py",
"repo_id": "azure-devops-python-api",
"token_count": 2588
}
| 364 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from ...v7_0.core.models import *
from .core_client import CoreClient
__all__ = [
'GraphSubjectBase',
'Identity',
'IdentityBase',
'IdentityData',
'IdentityRef',
'JsonPatchOperation',
'OperationReference',
'Process',
'ProcessReference',
'ProjectAvatar',
'ProjectInfo',
'ProjectProperties',
'ProjectProperty',
'Proxy',
'ProxyAuthorization',
'PublicKey',
'ReferenceLinks',
'TeamMember',
'TeamProject',
'TeamProjectCollection',
'TeamProjectCollectionReference',
'TeamProjectReference',
'WebApiConnectedService',
'WebApiConnectedServiceDetails',
'WebApiConnectedServiceRef',
'WebApiTeam',
'WebApiTeamRef',
'CoreClient'
]
|
azure-devops-python-api/azure-devops/azure/devops/released/core/__init__.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/released/core/__init__.py",
"repo_id": "azure-devops-python-api",
"token_count": 364
}
| 365 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from ...v7_0.release import models
class ReleaseClient(Client):
"""Release
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(ReleaseClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = 'efc2f575-36ef-48e9-b672-0c6fb4a48ac5'
def get_approvals(self, project, assigned_to_filter=None, status_filter=None, release_ids_filter=None, type_filter=None, top=None, continuation_token=None, query_order=None, include_my_group_approvals=None):
"""GetApprovals.
Get a list of approvals
:param str project: Project ID or project name
:param str assigned_to_filter: Approvals assigned to this user.
:param str status_filter: Approvals with this status. Default is 'pending'.
:param [int] release_ids_filter: Approvals for release id(s) mentioned in the filter. Multiple releases can be mentioned by separating them with ',' e.g. releaseIdsFilter=1,2,3,4.
:param str type_filter: Approval with this type.
:param int top: Number of approvals to get. Default is 50.
:param int continuation_token: Gets the approvals after the continuation token provided.
:param str query_order: Gets the results in the defined order of created approvals. Default is 'descending'.
:param bool include_my_group_approvals: 'true' to include my group approvals. Default is 'false'.
:rtype: :class:`<[ReleaseApproval]> <azure.devops.v7_0.release.models.[ReleaseApproval]>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if assigned_to_filter is not None:
query_parameters['assignedToFilter'] = self._serialize.query('assigned_to_filter', assigned_to_filter, 'str')
if status_filter is not None:
query_parameters['statusFilter'] = self._serialize.query('status_filter', status_filter, 'str')
if release_ids_filter is not None:
release_ids_filter = ",".join(map(str, release_ids_filter))
query_parameters['releaseIdsFilter'] = self._serialize.query('release_ids_filter', release_ids_filter, 'str')
if type_filter is not None:
query_parameters['typeFilter'] = self._serialize.query('type_filter', type_filter, 'str')
if top is not None:
query_parameters['top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'int')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if include_my_group_approvals is not None:
query_parameters['includeMyGroupApprovals'] = self._serialize.query('include_my_group_approvals', include_my_group_approvals, 'bool')
response = self._send(http_method='GET',
location_id='b47c6458-e73b-47cb-a770-4df1e8813a91',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ReleaseApproval]', self._unwrap_collection(response))
def update_release_approval(self, approval, project, approval_id):
"""UpdateReleaseApproval.
Update status of an approval
:param :class:`<ReleaseApproval> <azure.devops.v7_0.release.models.ReleaseApproval>` approval: ReleaseApproval object having status, approver and comments.
:param str project: Project ID or project name
:param int approval_id: Id of the approval.
:rtype: :class:`<ReleaseApproval> <azure.devops.v7_0.release.models.ReleaseApproval>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if approval_id is not None:
route_values['approvalId'] = self._serialize.url('approval_id', approval_id, 'int')
content = self._serialize.body(approval, 'ReleaseApproval')
response = self._send(http_method='PATCH',
location_id='9328e074-59fb-465a-89d9-b09c82ee5109',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('ReleaseApproval', response)
def get_release_task_attachment_content(self, project, release_id, environment_id, attempt_id, plan_id, timeline_id, record_id, type, name, **kwargs):
"""GetReleaseTaskAttachmentContent.
Get a release task attachment.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int environment_id: Id of the release environment.
:param int attempt_id: Attempt number of deployment.
:param str plan_id: Plan Id of the deploy phase.
:param str timeline_id: Timeline Id of the task.
:param str record_id: Record Id of attachment.
:param str type: Type of the attachment.
:param str name: Name of the attachment.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
if attempt_id is not None:
route_values['attemptId'] = self._serialize.url('attempt_id', attempt_id, 'int')
if plan_id is not None:
route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str')
if timeline_id is not None:
route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str')
if record_id is not None:
route_values['recordId'] = self._serialize.url('record_id', record_id, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
if name is not None:
route_values['name'] = self._serialize.url('name', name, 'str')
response = self._send(http_method='GET',
location_id='60b86efb-7b8c-4853-8f9f-aa142b77b479',
version='7.0',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_release_task_attachments(self, project, release_id, environment_id, attempt_id, plan_id, type):
"""GetReleaseTaskAttachments.
Get the release task attachments.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int environment_id: Id of the release environment.
:param int attempt_id: Attempt number of deployment.
:param str plan_id: Plan Id of the deploy phase.
:param str type: Type of the attachment.
:rtype: [ReleaseTaskAttachment]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
if attempt_id is not None:
route_values['attemptId'] = self._serialize.url('attempt_id', attempt_id, 'int')
if plan_id is not None:
route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
response = self._send(http_method='GET',
location_id='a4d06688-0dfa-4895-82a5-f43ec9452306',
version='7.0',
route_values=route_values)
return self._deserialize('[ReleaseTaskAttachment]', self._unwrap_collection(response))
def create_release_definition(self, release_definition, project):
"""CreateReleaseDefinition.
Create a release definition
:param :class:`<ReleaseDefinition> <azure.devops.v7_0.release.models.ReleaseDefinition>` release_definition: release definition object to create.
:param str project: Project ID or project name
:rtype: :class:`<ReleaseDefinition> <azure.devops.v7_0.release.models.ReleaseDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(release_definition, 'ReleaseDefinition')
response = self._send(http_method='POST',
location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('ReleaseDefinition', response)
def delete_release_definition(self, project, definition_id, comment=None, force_delete=None):
"""DeleteReleaseDefinition.
Delete a release definition.
:param str project: Project ID or project name
:param int definition_id: Id of the release definition.
:param str comment: Comment for deleting a release definition.
:param bool force_delete: 'true' to automatically cancel any in-progress release deployments and proceed with release definition deletion . Default is 'false'.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if comment is not None:
query_parameters['comment'] = self._serialize.query('comment', comment, 'str')
if force_delete is not None:
query_parameters['forceDelete'] = self._serialize.query('force_delete', force_delete, 'bool')
self._send(http_method='DELETE',
location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
def get_release_definition(self, project, definition_id, property_filters=None):
"""GetReleaseDefinition.
Get a release definition.
:param str project: Project ID or project name
:param int definition_id: Id of the release definition.
:param [str] property_filters: A comma-delimited list of extended properties to be retrieved. If set, the returned Release Definition will contain values for the specified property Ids (if they exist). If not set, properties will not be included.
:rtype: :class:`<ReleaseDefinition> <azure.devops.v7_0.release.models.ReleaseDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
response = self._send(http_method='GET',
location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReleaseDefinition', response)
def get_release_definitions(self, project, search_text=None, expand=None, artifact_type=None, artifact_source_id=None, top=None, continuation_token=None, query_order=None, path=None, is_exact_name_match=None, tag_filter=None, property_filters=None, definition_id_filter=None, is_deleted=None, search_text_contains_folder_name=None):
"""GetReleaseDefinitions.
Get a list of release definitions.
:param str project: Project ID or project name
:param str search_text: Get release definitions with names containing searchText.
:param str expand: The properties that should be expanded in the list of Release definitions.
:param str artifact_type: Release definitions with given artifactType will be returned. Values can be Build, Jenkins, GitHub, Nuget, Team Build (external), ExternalTFSBuild, Git, TFVC, ExternalTfsXamlBuild.
:param str artifact_source_id: Release definitions with given artifactSourceId will be returned. e.g. For build it would be {projectGuid}:{BuildDefinitionId}, for Jenkins it would be {JenkinsConnectionId}:{JenkinsDefinitionId}, for TfsOnPrem it would be {TfsOnPremConnectionId}:{ProjectName}:{TfsOnPremDefinitionId}. For third-party artifacts e.g. TeamCity, BitBucket you may refer 'uniqueSourceIdentifier' inside vss-extension.json at https://github.com/Microsoft/vsts-rm-extensions/blob/master/Extensions.
:param int top: Number of release definitions to get.
:param str continuation_token: Gets the release definitions after the continuation token provided.
:param str query_order: Gets the results in the defined order. Default is 'IdAscending'.
:param str path: Gets the release definitions under the specified path.
:param bool is_exact_name_match: 'true'to gets the release definitions with exact match as specified in searchText. Default is 'false'.
:param [str] tag_filter: A comma-delimited list of tags. Only release definitions with these tags will be returned.
:param [str] property_filters: A comma-delimited list of extended properties to be retrieved. If set, the returned Release Definitions will contain values for the specified property Ids (if they exist). If not set, properties will not be included. Note that this will not filter out any Release Definition from results irrespective of whether it has property set or not.
:param [str] definition_id_filter: A comma-delimited list of release definitions to retrieve.
:param bool is_deleted: 'true' to get release definitions that has been deleted. Default is 'false'
:param bool search_text_contains_folder_name: 'true' to get the release definitions under the folder with name as specified in searchText. Default is 'false'.
:rtype: :class:`<[ReleaseDefinition]> <azure.devops.v7_0.release.models.[ReleaseDefinition]>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if search_text is not None:
query_parameters['searchText'] = self._serialize.query('search_text', search_text, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if artifact_type is not None:
query_parameters['artifactType'] = self._serialize.query('artifact_type', artifact_type, 'str')
if artifact_source_id is not None:
query_parameters['artifactSourceId'] = self._serialize.query('artifact_source_id', artifact_source_id, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
if is_exact_name_match is not None:
query_parameters['isExactNameMatch'] = self._serialize.query('is_exact_name_match', is_exact_name_match, 'bool')
if tag_filter is not None:
tag_filter = ",".join(tag_filter)
query_parameters['tagFilter'] = self._serialize.query('tag_filter', tag_filter, 'str')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
if definition_id_filter is not None:
definition_id_filter = ",".join(definition_id_filter)
query_parameters['definitionIdFilter'] = self._serialize.query('definition_id_filter', definition_id_filter, 'str')
if is_deleted is not None:
query_parameters['isDeleted'] = self._serialize.query('is_deleted', is_deleted, 'bool')
if search_text_contains_folder_name is not None:
query_parameters['searchTextContainsFolderName'] = self._serialize.query('search_text_contains_folder_name', search_text_contains_folder_name, 'bool')
response = self._send(http_method='GET',
location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ReleaseDefinition]', self._unwrap_collection(response))
def update_release_definition(self, release_definition, project):
"""UpdateReleaseDefinition.
Update a release definition.
:param :class:`<ReleaseDefinition> <azure.devops.v7_0.release.models.ReleaseDefinition>` release_definition: Release definition object to update.
:param str project: Project ID or project name
:rtype: :class:`<ReleaseDefinition> <azure.devops.v7_0.release.models.ReleaseDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(release_definition, 'ReleaseDefinition')
response = self._send(http_method='PUT',
location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('ReleaseDefinition', response)
def get_deployments(self, project, definition_id=None, definition_environment_id=None, created_by=None, min_modified_time=None, max_modified_time=None, deployment_status=None, operation_status=None, latest_attempts_only=None, query_order=None, top=None, continuation_token=None, created_for=None, min_started_time=None, max_started_time=None, source_branch=None):
"""GetDeployments.
:param str project: Project ID or project name
:param int definition_id:
:param int definition_environment_id:
:param str created_by:
:param datetime min_modified_time:
:param datetime max_modified_time:
:param str deployment_status:
:param str operation_status:
:param bool latest_attempts_only:
:param str query_order:
:param int top:
:param int continuation_token:
:param str created_for:
:param datetime min_started_time:
:param datetime max_started_time:
:param str source_branch:
:rtype: :class:`<[Deployment]> <azure.devops.v7_0.release.models.[Deployment]>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if definition_id is not None:
query_parameters['definitionId'] = self._serialize.query('definition_id', definition_id, 'int')
if definition_environment_id is not None:
query_parameters['definitionEnvironmentId'] = self._serialize.query('definition_environment_id', definition_environment_id, 'int')
if created_by is not None:
query_parameters['createdBy'] = self._serialize.query('created_by', created_by, 'str')
if min_modified_time is not None:
query_parameters['minModifiedTime'] = self._serialize.query('min_modified_time', min_modified_time, 'iso-8601')
if max_modified_time is not None:
query_parameters['maxModifiedTime'] = self._serialize.query('max_modified_time', max_modified_time, 'iso-8601')
if deployment_status is not None:
query_parameters['deploymentStatus'] = self._serialize.query('deployment_status', deployment_status, 'str')
if operation_status is not None:
query_parameters['operationStatus'] = self._serialize.query('operation_status', operation_status, 'str')
if latest_attempts_only is not None:
query_parameters['latestAttemptsOnly'] = self._serialize.query('latest_attempts_only', latest_attempts_only, 'bool')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'int')
if created_for is not None:
query_parameters['createdFor'] = self._serialize.query('created_for', created_for, 'str')
if min_started_time is not None:
query_parameters['minStartedTime'] = self._serialize.query('min_started_time', min_started_time, 'iso-8601')
if max_started_time is not None:
query_parameters['maxStartedTime'] = self._serialize.query('max_started_time', max_started_time, 'iso-8601')
if source_branch is not None:
query_parameters['sourceBranch'] = self._serialize.query('source_branch', source_branch, 'str')
response = self._send(http_method='GET',
location_id='b005ef73-cddc-448e-9ba2-5193bf36b19f',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Deployment]', self._unwrap_collection(response))
def get_release_environment(self, project, release_id, environment_id, expand=None):
"""GetReleaseEnvironment.
Get a release environment.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int environment_id: Id of the release environment.
:param str expand: A property that should be expanded in the environment.
:rtype: :class:`<ReleaseEnvironment> <azure.devops.v7_0.release.models.ReleaseEnvironment>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='a7e426b1-03dc-48af-9dfe-c98bac612dcb',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReleaseEnvironment', response)
def update_release_environment(self, environment_update_data, project, release_id, environment_id):
"""UpdateReleaseEnvironment.
Update the status of a release environment
:param :class:`<ReleaseEnvironmentUpdateMetadata> <azure.devops.v7_0.release.models.ReleaseEnvironmentUpdateMetadata>` environment_update_data: Environment update meta data.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int environment_id: Id of release environment.
:rtype: :class:`<ReleaseEnvironment> <azure.devops.v7_0.release.models.ReleaseEnvironment>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
content = self._serialize.body(environment_update_data, 'ReleaseEnvironmentUpdateMetadata')
response = self._send(http_method='PATCH',
location_id='a7e426b1-03dc-48af-9dfe-c98bac612dcb',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('ReleaseEnvironment', response)
def delete_folder(self, project, path):
"""DeleteFolder.
Deletes a definition folder for given folder name and path and all it's existing definitions.
:param str project: Project ID or project name
:param str path: Path of the folder to delete.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
self._send(http_method='DELETE',
location_id='f7ddf76d-ce0c-4d68-94ff-becaec5d9dea',
version='7.0',
route_values=route_values)
def get_folders(self, project, path=None, query_order=None):
"""GetFolders.
Gets folders.
:param str project: Project ID or project name
:param str path: Path of the folder.
:param str query_order: Gets the results in the defined order. Default is 'None'.
:rtype: [Folder]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
response = self._send(http_method='GET',
location_id='f7ddf76d-ce0c-4d68-94ff-becaec5d9dea',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Folder]', self._unwrap_collection(response))
def update_folder(self, folder, project, path):
"""UpdateFolder.
Updates an existing folder at given existing path.
:param :class:`<Folder> <azure.devops.v7_0.release.models.Folder>` folder: folder.
:param str project: Project ID or project name
:param str path: Path of the folder to update.
:rtype: :class:`<Folder> <azure.devops.v7_0.release.models.Folder>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
content = self._serialize.body(folder, 'Folder')
response = self._send(http_method='PATCH',
location_id='f7ddf76d-ce0c-4d68-94ff-becaec5d9dea',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('Folder', response)
def update_gates(self, gate_update_metadata, project, gate_step_id):
"""UpdateGates.
Updates the gate for a deployment.
:param :class:`<GateUpdateMetadata> <azure.devops.v7_0.release.models.GateUpdateMetadata>` gate_update_metadata: Metadata to patch the Release Gates.
:param str project: Project ID or project name
:param int gate_step_id: Gate step Id.
:rtype: :class:`<ReleaseGates> <azure.devops.v7_0.release.models.ReleaseGates>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if gate_step_id is not None:
route_values['gateStepId'] = self._serialize.url('gate_step_id', gate_step_id, 'int')
content = self._serialize.body(gate_update_metadata, 'GateUpdateMetadata')
response = self._send(http_method='PATCH',
location_id='2666a539-2001-4f80-bcc7-0379956749d4',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('ReleaseGates', response)
def get_logs(self, project, release_id, **kwargs):
"""GetLogs.
Get logs for a release Id.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
response = self._send(http_method='GET',
location_id='c37fbab5-214b-48e4-a55b-cb6b4f6e4038',
version='7.0',
route_values=route_values,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_task_log(self, project, release_id, environment_id, release_deploy_phase_id, task_id, start_line=None, end_line=None, **kwargs):
"""GetTaskLog.
Gets the task log of a release as a plain text file.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int environment_id: Id of release environment.
:param int release_deploy_phase_id: Release deploy phase Id.
:param int task_id: ReleaseTask Id for the log.
:param long start_line: Starting line number for logs
:param long end_line: Ending line number for logs
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
if release_deploy_phase_id is not None:
route_values['releaseDeployPhaseId'] = self._serialize.url('release_deploy_phase_id', release_deploy_phase_id, 'int')
if task_id is not None:
route_values['taskId'] = self._serialize.url('task_id', task_id, 'int')
query_parameters = {}
if start_line is not None:
query_parameters['startLine'] = self._serialize.query('start_line', start_line, 'long')
if end_line is not None:
query_parameters['endLine'] = self._serialize.query('end_line', end_line, 'long')
response = self._send(http_method='GET',
location_id='17c91af7-09fd-4256-bff1-c24ee4f73bc0',
version='7.0',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_manual_intervention(self, project, release_id, manual_intervention_id):
"""GetManualIntervention.
Get manual intervention for a given release and manual intervention id.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int manual_intervention_id: Id of the manual intervention.
:rtype: :class:`<ManualIntervention> <azure.devops.v7_0.release.models.ManualIntervention>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if manual_intervention_id is not None:
route_values['manualInterventionId'] = self._serialize.url('manual_intervention_id', manual_intervention_id, 'int')
response = self._send(http_method='GET',
location_id='616c46e4-f370-4456-adaa-fbaf79c7b79e',
version='7.0',
route_values=route_values)
return self._deserialize('ManualIntervention', response)
def get_manual_interventions(self, project, release_id):
"""GetManualInterventions.
List all manual interventions for a given release.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:rtype: [ManualIntervention]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
response = self._send(http_method='GET',
location_id='616c46e4-f370-4456-adaa-fbaf79c7b79e',
version='7.0',
route_values=route_values)
return self._deserialize('[ManualIntervention]', self._unwrap_collection(response))
def update_manual_intervention(self, manual_intervention_update_metadata, project, release_id, manual_intervention_id):
"""UpdateManualIntervention.
Update manual intervention.
:param :class:`<ManualInterventionUpdateMetadata> <azure.devops.v7_0.release.models.ManualInterventionUpdateMetadata>` manual_intervention_update_metadata: Meta data to update manual intervention.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int manual_intervention_id: Id of the manual intervention.
:rtype: :class:`<ManualIntervention> <azure.devops.v7_0.release.models.ManualIntervention>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if manual_intervention_id is not None:
route_values['manualInterventionId'] = self._serialize.url('manual_intervention_id', manual_intervention_id, 'int')
content = self._serialize.body(manual_intervention_update_metadata, 'ManualInterventionUpdateMetadata')
response = self._send(http_method='PATCH',
location_id='616c46e4-f370-4456-adaa-fbaf79c7b79e',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('ManualIntervention', response)
def get_releases(self, project=None, definition_id=None, definition_environment_id=None, search_text=None, created_by=None, status_filter=None, environment_status_filter=None, min_created_time=None, max_created_time=None, query_order=None, top=None, continuation_token=None, expand=None, artifact_type_id=None, source_id=None, artifact_version_id=None, source_branch_filter=None, is_deleted=None, tag_filter=None, property_filters=None, release_id_filter=None, path=None):
"""GetReleases.
Get a list of releases
:param str project: Project ID or project name
:param int definition_id: Releases from this release definition Id.
:param int definition_environment_id:
:param str search_text: Releases with names containing searchText.
:param str created_by: Releases created by this user.
:param str status_filter: Releases that have this status.
:param int environment_status_filter:
:param datetime min_created_time: Releases that were created after this time.
:param datetime max_created_time: Releases that were created before this time.
:param str query_order: Gets the results in the defined order of created date for releases. Default is descending.
:param int top: Number of releases to get. Default is 50.
:param int continuation_token: Gets the releases after the continuation token provided.
:param str expand: The property that should be expanded in the list of releases.
:param str artifact_type_id: Releases with given artifactTypeId will be returned. Values can be Build, Jenkins, GitHub, Nuget, Team Build (external), ExternalTFSBuild, Git, TFVC, ExternalTfsXamlBuild.
:param str source_id: Unique identifier of the artifact used. e.g. For build it would be {projectGuid}:{BuildDefinitionId}, for Jenkins it would be {JenkinsConnectionId}:{JenkinsDefinitionId}, for TfsOnPrem it would be {TfsOnPremConnectionId}:{ProjectName}:{TfsOnPremDefinitionId}. For third-party artifacts e.g. TeamCity, BitBucket you may refer 'uniqueSourceIdentifier' inside vss-extension.json https://github.com/Microsoft/vsts-rm-extensions/blob/master/Extensions.
:param str artifact_version_id: Releases with given artifactVersionId will be returned. E.g. in case of Build artifactType, it is buildId.
:param str source_branch_filter: Releases with given sourceBranchFilter will be returned.
:param bool is_deleted: Gets the soft deleted releases, if true.
:param [str] tag_filter: A comma-delimited list of tags. Only releases with these tags will be returned.
:param [str] property_filters: A comma-delimited list of extended properties to be retrieved. If set, the returned Releases will contain values for the specified property Ids (if they exist). If not set, properties will not be included. Note that this will not filter out any Release from results irrespective of whether it has property set or not.
:param [int] release_id_filter: A comma-delimited list of releases Ids. Only releases with these Ids will be returned.
:param str path: Releases under this folder path will be returned
:rtype: :class:`<[Release]> <azure.devops.v7_0.release.models.[Release]>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if definition_id is not None:
query_parameters['definitionId'] = self._serialize.query('definition_id', definition_id, 'int')
if definition_environment_id is not None:
query_parameters['definitionEnvironmentId'] = self._serialize.query('definition_environment_id', definition_environment_id, 'int')
if search_text is not None:
query_parameters['searchText'] = self._serialize.query('search_text', search_text, 'str')
if created_by is not None:
query_parameters['createdBy'] = self._serialize.query('created_by', created_by, 'str')
if status_filter is not None:
query_parameters['statusFilter'] = self._serialize.query('status_filter', status_filter, 'str')
if environment_status_filter is not None:
query_parameters['environmentStatusFilter'] = self._serialize.query('environment_status_filter', environment_status_filter, 'int')
if min_created_time is not None:
query_parameters['minCreatedTime'] = self._serialize.query('min_created_time', min_created_time, 'iso-8601')
if max_created_time is not None:
query_parameters['maxCreatedTime'] = self._serialize.query('max_created_time', max_created_time, 'iso-8601')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'int')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if artifact_type_id is not None:
query_parameters['artifactTypeId'] = self._serialize.query('artifact_type_id', artifact_type_id, 'str')
if source_id is not None:
query_parameters['sourceId'] = self._serialize.query('source_id', source_id, 'str')
if artifact_version_id is not None:
query_parameters['artifactVersionId'] = self._serialize.query('artifact_version_id', artifact_version_id, 'str')
if source_branch_filter is not None:
query_parameters['sourceBranchFilter'] = self._serialize.query('source_branch_filter', source_branch_filter, 'str')
if is_deleted is not None:
query_parameters['isDeleted'] = self._serialize.query('is_deleted', is_deleted, 'bool')
if tag_filter is not None:
tag_filter = ",".join(tag_filter)
query_parameters['tagFilter'] = self._serialize.query('tag_filter', tag_filter, 'str')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
if release_id_filter is not None:
release_id_filter = ",".join(map(str, release_id_filter))
query_parameters['releaseIdFilter'] = self._serialize.query('release_id_filter', release_id_filter, 'str')
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
response = self._send(http_method='GET',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Release]', self._unwrap_collection(response))
def create_release(self, release_start_metadata, project):
"""CreateRelease.
Create a release.
:param :class:`<ReleaseStartMetadata> <azure.devops.v7_0.release.models.ReleaseStartMetadata>` release_start_metadata: Metadata to create a release.
:param str project: Project ID or project name
:rtype: :class:`<Release> <azure.devops.v7_0.release.models.Release>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(release_start_metadata, 'ReleaseStartMetadata')
response = self._send(http_method='POST',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('Release', response)
def get_release(self, project, release_id, approval_filters=None, property_filters=None, expand=None, top_gate_records=None):
"""GetRelease.
Get a Release
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param str approval_filters: A filter which would allow fetching approval steps selectively based on whether it is automated, or manual. This would also decide whether we should fetch pre and post approval snapshots. Assumes All by default
:param [str] property_filters: A comma-delimited list of extended properties to be retrieved. If set, the returned Release will contain values for the specified property Ids (if they exist). If not set, properties will not be included.
:param str expand: A property that should be expanded in the release.
:param int top_gate_records: Number of release gate records to get. Default is 5.
:rtype: :class:`<Release> <azure.devops.v7_0.release.models.Release>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
query_parameters = {}
if approval_filters is not None:
query_parameters['approvalFilters'] = self._serialize.query('approval_filters', approval_filters, 'str')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if top_gate_records is not None:
query_parameters['$topGateRecords'] = self._serialize.query('top_gate_records', top_gate_records, 'int')
response = self._send(http_method='GET',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Release', response)
def get_release_revision(self, project, release_id, definition_snapshot_revision, **kwargs):
"""GetReleaseRevision.
Get release for a given revision number.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int definition_snapshot_revision: Definition snapshot revision number.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
query_parameters = {}
if definition_snapshot_revision is not None:
query_parameters['definitionSnapshotRevision'] = self._serialize.query('definition_snapshot_revision', definition_snapshot_revision, 'int')
response = self._send(http_method='GET',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='7.0',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def update_release(self, release, project, release_id):
"""UpdateRelease.
Update a complete release object.
:param :class:`<Release> <azure.devops.v7_0.release.models.Release>` release: Release object for update.
:param str project: Project ID or project name
:param int release_id: Id of the release to update.
:rtype: :class:`<Release> <azure.devops.v7_0.release.models.Release>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
content = self._serialize.body(release, 'Release')
response = self._send(http_method='PUT',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('Release', response)
def update_release_resource(self, release_update_metadata, project, release_id):
"""UpdateReleaseResource.
Update few properties of a release.
:param :class:`<ReleaseUpdateMetadata> <azure.devops.v7_0.release.models.ReleaseUpdateMetadata>` release_update_metadata: Properties of release to update.
:param str project: Project ID or project name
:param int release_id: Id of the release to update.
:rtype: :class:`<Release> <azure.devops.v7_0.release.models.Release>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
content = self._serialize.body(release_update_metadata, 'ReleaseUpdateMetadata')
response = self._send(http_method='PATCH',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('Release', response)
def get_definition_revision(self, project, definition_id, revision, **kwargs):
"""GetDefinitionRevision.
Get release definition for a given definitionId and revision
:param str project: Project ID or project name
:param int definition_id: Id of the definition.
:param int revision: Id of the revision.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
if revision is not None:
route_values['revision'] = self._serialize.url('revision', revision, 'int')
response = self._send(http_method='GET',
location_id='258b82e0-9d41-43f3-86d6-fef14ddd44bc',
version='7.0',
route_values=route_values,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_release_definition_history(self, project, definition_id):
"""GetReleaseDefinitionHistory.
Get revision history for a release definition
:param str project: Project ID or project name
:param int definition_id: Id of the definition.
:rtype: [ReleaseDefinitionRevision]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
response = self._send(http_method='GET',
location_id='258b82e0-9d41-43f3-86d6-fef14ddd44bc',
version='7.0',
route_values=route_values)
return self._deserialize('[ReleaseDefinitionRevision]', self._unwrap_collection(response))
|
azure-devops-python-api/azure-devops/azure/devops/released/release/release_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/released/release/release_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 22588
}
| 366 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class ServiceHooksClient(Client):
"""ServiceHooks
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(ServiceHooksClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = None
def get_consumer_action(self, consumer_id, consumer_action_id, publisher_id=None):
"""GetConsumerAction.
Get details about a specific consumer action.
:param str consumer_id: ID for a consumer.
:param str consumer_action_id: ID for a consumerActionId.
:param str publisher_id:
:rtype: :class:`<ConsumerAction> <azure.devops.v7_0.service_hooks.models.ConsumerAction>`
"""
route_values = {}
if consumer_id is not None:
route_values['consumerId'] = self._serialize.url('consumer_id', consumer_id, 'str')
if consumer_action_id is not None:
route_values['consumerActionId'] = self._serialize.url('consumer_action_id', consumer_action_id, 'str')
query_parameters = {}
if publisher_id is not None:
query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='c3428e90-7a69-4194-8ed8-0f153185ee0d',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ConsumerAction', response)
def list_consumer_actions(self, consumer_id, publisher_id=None):
"""ListConsumerActions.
Get a list of consumer actions for a specific consumer.
:param str consumer_id: ID for a consumer.
:param str publisher_id:
:rtype: [ConsumerAction]
"""
route_values = {}
if consumer_id is not None:
route_values['consumerId'] = self._serialize.url('consumer_id', consumer_id, 'str')
query_parameters = {}
if publisher_id is not None:
query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='c3428e90-7a69-4194-8ed8-0f153185ee0d',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ConsumerAction]', self._unwrap_collection(response))
def get_consumer(self, consumer_id, publisher_id=None):
"""GetConsumer.
Get a specific consumer service. Optionally filter out consumer actions that do not support any event types for the specified publisher.
:param str consumer_id: ID for a consumer.
:param str publisher_id:
:rtype: :class:`<Consumer> <azure.devops.v7_0.service_hooks.models.Consumer>`
"""
route_values = {}
if consumer_id is not None:
route_values['consumerId'] = self._serialize.url('consumer_id', consumer_id, 'str')
query_parameters = {}
if publisher_id is not None:
query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='4301c514-5f34-4f5d-a145-f0ea7b5b7d19',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Consumer', response)
def list_consumers(self, publisher_id=None):
"""ListConsumers.
Get a list of available service hook consumer services. Optionally filter by consumers that support at least one event type from the specific publisher.
:param str publisher_id:
:rtype: [Consumer]
"""
query_parameters = {}
if publisher_id is not None:
query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='4301c514-5f34-4f5d-a145-f0ea7b5b7d19',
version='7.0',
query_parameters=query_parameters)
return self._deserialize('[Consumer]', self._unwrap_collection(response))
def get_subscription_diagnostics(self, subscription_id):
"""GetSubscriptionDiagnostics.
:param str subscription_id:
:rtype: :class:`<SubscriptionDiagnostics> <azure.devops.v7_0.service_hooks.models.SubscriptionDiagnostics>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
response = self._send(http_method='GET',
location_id='3b36bcb5-02ad-43c6-bbfa-6dfc6f8e9d68',
version='7.0',
route_values=route_values)
return self._deserialize('SubscriptionDiagnostics', response)
def update_subscription_diagnostics(self, update_parameters, subscription_id):
"""UpdateSubscriptionDiagnostics.
:param :class:`<UpdateSubscripitonDiagnosticsParameters> <azure.devops.v7_0.service_hooks.models.UpdateSubscripitonDiagnosticsParameters>` update_parameters:
:param str subscription_id:
:rtype: :class:`<SubscriptionDiagnostics> <azure.devops.v7_0.service_hooks.models.SubscriptionDiagnostics>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
content = self._serialize.body(update_parameters, 'UpdateSubscripitonDiagnosticsParameters')
response = self._send(http_method='PUT',
location_id='3b36bcb5-02ad-43c6-bbfa-6dfc6f8e9d68',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('SubscriptionDiagnostics', response)
def get_event_type(self, publisher_id, event_type_id):
"""GetEventType.
Get a specific event type.
:param str publisher_id: ID for a publisher.
:param str event_type_id:
:rtype: :class:`<EventTypeDescriptor> <azure.devops.v7_0.service_hooks.models.EventTypeDescriptor>`
"""
route_values = {}
if publisher_id is not None:
route_values['publisherId'] = self._serialize.url('publisher_id', publisher_id, 'str')
if event_type_id is not None:
route_values['eventTypeId'] = self._serialize.url('event_type_id', event_type_id, 'str')
response = self._send(http_method='GET',
location_id='db4777cd-8e08-4a84-8ba3-c974ea033718',
version='7.0',
route_values=route_values)
return self._deserialize('EventTypeDescriptor', response)
def list_event_types(self, publisher_id):
"""ListEventTypes.
Get the event types for a specific publisher.
:param str publisher_id: ID for a publisher.
:rtype: [EventTypeDescriptor]
"""
route_values = {}
if publisher_id is not None:
route_values['publisherId'] = self._serialize.url('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='db4777cd-8e08-4a84-8ba3-c974ea033718',
version='7.0',
route_values=route_values)
return self._deserialize('[EventTypeDescriptor]', self._unwrap_collection(response))
def get_notification(self, subscription_id, notification_id):
"""GetNotification.
Get a specific notification for a subscription.
:param str subscription_id: ID for a subscription.
:param int notification_id:
:rtype: :class:`<Notification> <azure.devops.v7_0.service_hooks.models.Notification>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
if notification_id is not None:
route_values['notificationId'] = self._serialize.url('notification_id', notification_id, 'int')
response = self._send(http_method='GET',
location_id='0c62d343-21b0-4732-997b-017fde84dc28',
version='7.0',
route_values=route_values)
return self._deserialize('Notification', response)
def get_notifications(self, subscription_id, max_results=None, status=None, result=None):
"""GetNotifications.
Get a list of notifications for a specific subscription. A notification includes details about the event, the request to and the response from the consumer service.
:param str subscription_id: ID for a subscription.
:param int max_results: Maximum number of notifications to return. Default is **100**.
:param str status: Get only notifications with this status.
:param str result: Get only notifications with this result type.
:rtype: [Notification]
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
query_parameters = {}
if max_results is not None:
query_parameters['maxResults'] = self._serialize.query('max_results', max_results, 'int')
if status is not None:
query_parameters['status'] = self._serialize.query('status', status, 'str')
if result is not None:
query_parameters['result'] = self._serialize.query('result', result, 'str')
response = self._send(http_method='GET',
location_id='0c62d343-21b0-4732-997b-017fde84dc28',
version='7.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Notification]', self._unwrap_collection(response))
def query_notifications(self, query):
"""QueryNotifications.
Query for notifications. A notification includes details about the event, the request to and the response from the consumer service.
:param :class:`<NotificationsQuery> <azure.devops.v7_0.service_hooks.models.NotificationsQuery>` query:
:rtype: :class:`<NotificationsQuery> <azure.devops.v7_0.service_hooks.models.NotificationsQuery>`
"""
content = self._serialize.body(query, 'NotificationsQuery')
response = self._send(http_method='POST',
location_id='1a57562f-160a-4b5c-9185-905e95b39d36',
version='7.0',
content=content)
return self._deserialize('NotificationsQuery', response)
def query_input_values(self, input_values_query, publisher_id):
"""QueryInputValues.
:param :class:`<InputValuesQuery> <azure.devops.v7_0.service_hooks.models.InputValuesQuery>` input_values_query:
:param str publisher_id:
:rtype: :class:`<InputValuesQuery> <azure.devops.v7_0.service_hooks.models.InputValuesQuery>`
"""
route_values = {}
if publisher_id is not None:
route_values['publisherId'] = self._serialize.url('publisher_id', publisher_id, 'str')
content = self._serialize.body(input_values_query, 'InputValuesQuery')
response = self._send(http_method='POST',
location_id='d815d352-a566-4dc1-a3e3-fd245acf688c',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('InputValuesQuery', response)
def get_publisher(self, publisher_id):
"""GetPublisher.
Get a specific service hooks publisher.
:param str publisher_id: ID for a publisher.
:rtype: :class:`<Publisher> <azure.devops.v7_0.service_hooks.models.Publisher>`
"""
route_values = {}
if publisher_id is not None:
route_values['publisherId'] = self._serialize.url('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='1e83a210-5b53-43bc-90f0-d476a4e5d731',
version='7.0',
route_values=route_values)
return self._deserialize('Publisher', response)
def list_publishers(self):
"""ListPublishers.
Get a list of publishers.
:rtype: [Publisher]
"""
response = self._send(http_method='GET',
location_id='1e83a210-5b53-43bc-90f0-d476a4e5d731',
version='7.0')
return self._deserialize('[Publisher]', self._unwrap_collection(response))
def query_publishers(self, query):
"""QueryPublishers.
Query for service hook publishers.
:param :class:`<PublishersQuery> <azure.devops.v7_0.service_hooks.models.PublishersQuery>` query:
:rtype: :class:`<PublishersQuery> <azure.devops.v7_0.service_hooks.models.PublishersQuery>`
"""
content = self._serialize.body(query, 'PublishersQuery')
response = self._send(http_method='POST',
location_id='99b44a8a-65a8-4670-8f3e-e7f7842cce64',
version='7.0',
content=content)
return self._deserialize('PublishersQuery', response)
def create_subscription(self, subscription):
"""CreateSubscription.
Create a subscription.
:param :class:`<Subscription> <azure.devops.v7_0.service_hooks.models.Subscription>` subscription: Subscription to be created.
:rtype: :class:`<Subscription> <azure.devops.v7_0.service_hooks.models.Subscription>`
"""
content = self._serialize.body(subscription, 'Subscription')
response = self._send(http_method='POST',
location_id='fc50d02a-849f-41fb-8af1-0a5216103269',
version='7.0',
content=content)
return self._deserialize('Subscription', response)
def delete_subscription(self, subscription_id):
"""DeleteSubscription.
Delete a specific service hooks subscription.
:param str subscription_id: ID for a subscription.
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
self._send(http_method='DELETE',
location_id='fc50d02a-849f-41fb-8af1-0a5216103269',
version='7.0',
route_values=route_values)
def get_subscription(self, subscription_id):
"""GetSubscription.
Get a specific service hooks subscription.
:param str subscription_id: ID for a subscription.
:rtype: :class:`<Subscription> <azure.devops.v7_0.service_hooks.models.Subscription>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
response = self._send(http_method='GET',
location_id='fc50d02a-849f-41fb-8af1-0a5216103269',
version='7.0',
route_values=route_values)
return self._deserialize('Subscription', response)
def list_subscriptions(self, publisher_id=None, event_type=None, consumer_id=None, consumer_action_id=None):
"""ListSubscriptions.
Get a list of subscriptions.
:param str publisher_id: ID for a subscription.
:param str event_type: The event type to filter on (if any).
:param str consumer_id: ID for a consumer.
:param str consumer_action_id: ID for a consumerActionId.
:rtype: [Subscription]
"""
query_parameters = {}
if publisher_id is not None:
query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str')
if event_type is not None:
query_parameters['eventType'] = self._serialize.query('event_type', event_type, 'str')
if consumer_id is not None:
query_parameters['consumerId'] = self._serialize.query('consumer_id', consumer_id, 'str')
if consumer_action_id is not None:
query_parameters['consumerActionId'] = self._serialize.query('consumer_action_id', consumer_action_id, 'str')
response = self._send(http_method='GET',
location_id='fc50d02a-849f-41fb-8af1-0a5216103269',
version='7.0',
query_parameters=query_parameters)
return self._deserialize('[Subscription]', self._unwrap_collection(response))
def replace_subscription(self, subscription, subscription_id=None):
"""ReplaceSubscription.
Update a subscription. <param name="subscriptionId">ID for a subscription that you wish to update.</param>
:param :class:`<Subscription> <azure.devops.v7_0.service_hooks.models.Subscription>` subscription:
:param str subscription_id:
:rtype: :class:`<Subscription> <azure.devops.v7_0.service_hooks.models.Subscription>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
content = self._serialize.body(subscription, 'Subscription')
response = self._send(http_method='PUT',
location_id='fc50d02a-849f-41fb-8af1-0a5216103269',
version='7.0',
route_values=route_values,
content=content)
return self._deserialize('Subscription', response)
def create_subscriptions_query(self, query):
"""CreateSubscriptionsQuery.
Query for service hook subscriptions.
:param :class:`<SubscriptionsQuery> <azure.devops.v7_0.service_hooks.models.SubscriptionsQuery>` query:
:rtype: :class:`<SubscriptionsQuery> <azure.devops.v7_0.service_hooks.models.SubscriptionsQuery>`
"""
content = self._serialize.body(query, 'SubscriptionsQuery')
response = self._send(http_method='POST',
location_id='c7c3c1cf-9e05-4c0d-a425-a0f922c2c6ed',
version='7.0',
content=content)
return self._deserialize('SubscriptionsQuery', response)
def create_test_notification(self, test_notification, use_real_data=None):
"""CreateTestNotification.
Sends a test notification. This is useful for verifying the configuration of an updated or new service hooks subscription.
:param :class:`<Notification> <azure.devops.v7_0.service_hooks.models.Notification>` test_notification:
:param bool use_real_data: Only allow testing with real data in existing subscriptions.
:rtype: :class:`<Notification> <azure.devops.v7_0.service_hooks.models.Notification>`
"""
query_parameters = {}
if use_real_data is not None:
query_parameters['useRealData'] = self._serialize.query('use_real_data', use_real_data, 'bool')
content = self._serialize.body(test_notification, 'Notification')
response = self._send(http_method='POST',
location_id='1139462c-7e27-4524-a997-31b9b73551fe',
version='7.0',
query_parameters=query_parameters,
content=content)
return self._deserialize('Notification', response)
|
azure-devops-python-api/azure-devops/azure/devops/v7_0/service_hooks/service_hooks_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_0/service_hooks/service_hooks_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 9540
}
| 367 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ClientTraceEvent(Model):
"""
:param area:
:type area: str
:param component:
:type component: str
:param exception_type:
:type exception_type: str
:param feature:
:type feature: str
:param level:
:type level: object
:param message:
:type message: str
:param method:
:type method: str
:param properties:
:type properties: dict
"""
_attribute_map = {
'area': {'key': 'area', 'type': 'str'},
'component': {'key': 'component', 'type': 'str'},
'exception_type': {'key': 'exceptionType', 'type': 'str'},
'feature': {'key': 'feature', 'type': 'str'},
'level': {'key': 'level', 'type': 'object'},
'message': {'key': 'message', 'type': 'str'},
'method': {'key': 'method', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{object}'}
}
def __init__(self, area=None, component=None, exception_type=None, feature=None, level=None, message=None, method=None, properties=None):
super(ClientTraceEvent, self).__init__()
self.area = area
self.component = component
self.exception_type = exception_type
self.feature = feature
self.level = level
self.message = message
self.method = method
self.properties = properties
__all__ = [
'ClientTraceEvent',
]
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/client_trace/models.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/client_trace/models.py",
"repo_id": "azure-devops-python-api",
"token_count": 664
}
| 368 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .models import *
from .extension_management_client import ExtensionManagementClient
__all__ = [
'AcquisitionOperation',
'AcquisitionOperationDisallowReason',
'AcquisitionOptions',
'Contribution',
'ContributionBase',
'ContributionConstraint',
'ContributionPropertyDescription',
'ContributionType',
'ExtensionAcquisitionRequest',
'ExtensionAuditLog',
'ExtensionAuditLogEntry',
'ExtensionAuthorization',
'ExtensionBadge',
'ExtensionDataCollection',
'ExtensionDataCollectionQuery',
'ExtensionEventCallback',
'ExtensionEventCallbackCollection',
'ExtensionFile',
'ExtensionIdentifier',
'ExtensionLicensing',
'ExtensionManifest',
'ExtensionPolicy',
'ExtensionRequest',
'ExtensionShare',
'ExtensionState',
'ExtensionStatistic',
'ExtensionVersion',
'GraphSubjectBase',
'IdentityRef',
'InstallationTarget',
'InstalledExtension',
'InstalledExtensionQuery',
'InstalledExtensionState',
'InstalledExtensionStateIssue',
'LicensingOverride',
'PublishedExtension',
'PublisherFacts',
'ReferenceLinks',
'RequestedExtension',
'UserExtensionPolicy',
'ExtensionManagementClient'
]
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/extension_management/__init__.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/extension_management/__init__.py",
"repo_id": "azure-devops-python-api",
"token_count": 513
}
| 369 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class GalleryClient(Client):
"""Gallery
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(GalleryClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '69d21c00-f135-441b-b5ce-3626378e0819'
def share_extension_by_id(self, extension_id, account_name):
"""ShareExtensionById.
[Preview API]
:param str extension_id:
:param str account_name:
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str')
if account_name is not None:
route_values['accountName'] = self._serialize.url('account_name', account_name, 'str')
self._send(http_method='POST',
location_id='1f19631b-a0b4-4a03-89c2-d79785d24360',
version='7.1-preview.1',
route_values=route_values)
def unshare_extension_by_id(self, extension_id, account_name):
"""UnshareExtensionById.
[Preview API]
:param str extension_id:
:param str account_name:
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str')
if account_name is not None:
route_values['accountName'] = self._serialize.url('account_name', account_name, 'str')
self._send(http_method='DELETE',
location_id='1f19631b-a0b4-4a03-89c2-d79785d24360',
version='7.1-preview.1',
route_values=route_values)
def share_extension(self, publisher_name, extension_name, account_name):
"""ShareExtension.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str account_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if account_name is not None:
route_values['accountName'] = self._serialize.url('account_name', account_name, 'str')
self._send(http_method='POST',
location_id='a1e66d8f-f5de-4d16-8309-91a4e015ee46',
version='7.1-preview.1',
route_values=route_values)
def unshare_extension(self, publisher_name, extension_name, account_name):
"""UnshareExtension.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str account_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if account_name is not None:
route_values['accountName'] = self._serialize.url('account_name', account_name, 'str')
self._send(http_method='DELETE',
location_id='a1e66d8f-f5de-4d16-8309-91a4e015ee46',
version='7.1-preview.1',
route_values=route_values)
def get_acquisition_options(self, item_id, installation_target, test_commerce=None, is_free_or_trial_install=None):
"""GetAcquisitionOptions.
[Preview API]
:param str item_id:
:param str installation_target:
:param bool test_commerce:
:param bool is_free_or_trial_install:
:rtype: :class:`<AcquisitionOptions> <azure.devops.v7_1.gallery.models.AcquisitionOptions>`
"""
route_values = {}
if item_id is not None:
route_values['itemId'] = self._serialize.url('item_id', item_id, 'str')
query_parameters = {}
if installation_target is not None:
query_parameters['installationTarget'] = self._serialize.query('installation_target', installation_target, 'str')
if test_commerce is not None:
query_parameters['testCommerce'] = self._serialize.query('test_commerce', test_commerce, 'bool')
if is_free_or_trial_install is not None:
query_parameters['isFreeOrTrialInstall'] = self._serialize.query('is_free_or_trial_install', is_free_or_trial_install, 'bool')
response = self._send(http_method='GET',
location_id='9d0a0105-075e-4760-aa15-8bcf54d1bd7d',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('AcquisitionOptions', response)
def request_acquisition(self, acquisition_request):
"""RequestAcquisition.
[Preview API]
:param :class:`<ExtensionAcquisitionRequest> <azure.devops.v7_1.gallery.models.ExtensionAcquisitionRequest>` acquisition_request:
:rtype: :class:`<ExtensionAcquisitionRequest> <azure.devops.v7_1.gallery.models.ExtensionAcquisitionRequest>`
"""
content = self._serialize.body(acquisition_request, 'ExtensionAcquisitionRequest')
response = self._send(http_method='POST',
location_id='3adb1f2d-e328-446e-be73-9f6d98071c45',
version='7.1-preview.1',
content=content)
return self._deserialize('ExtensionAcquisitionRequest', response)
def get_asset_by_name(self, publisher_name, extension_name, version, asset_type, account_token=None, accept_default=None, account_token_header=None, **kwargs):
"""GetAssetByName.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
:param str asset_type:
:param str account_token:
:param bool accept_default:
:param String account_token_header: Header to pass the account token
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
query_parameters = {}
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
if accept_default is not None:
query_parameters['acceptDefault'] = self._serialize.query('accept_default', accept_default, 'bool')
additional_headers = {}
if account_token_header is not None:
additional_headers['X-Market-AccountToken'] = account_token_header
response = self._send(http_method='GET',
location_id='7529171f-a002-4180-93ba-685f358a0482',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
additional_headers=additional_headers,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_asset(self, extension_id, version, asset_type, account_token=None, accept_default=None, account_token_header=None, **kwargs):
"""GetAsset.
[Preview API]
:param str extension_id:
:param str version:
:param str asset_type:
:param str account_token:
:param bool accept_default:
:param String account_token_header: Header to pass the account token
:rtype: object
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
query_parameters = {}
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
if accept_default is not None:
query_parameters['acceptDefault'] = self._serialize.query('accept_default', accept_default, 'bool')
additional_headers = {}
if account_token_header is not None:
additional_headers['X-Market-AccountToken'] = account_token_header
response = self._send(http_method='GET',
location_id='5d545f3d-ef47-488b-8be3-f5ee1517856c',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
additional_headers=additional_headers,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_asset_authenticated(self, publisher_name, extension_name, version, asset_type, account_token=None, account_token_header=None, **kwargs):
"""GetAssetAuthenticated.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
:param str asset_type:
:param str account_token:
:param String account_token_header: Header to pass the account token
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
query_parameters = {}
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
additional_headers = {}
if account_token_header is not None:
additional_headers['X-Market-AccountToken'] = account_token_header
response = self._send(http_method='GET',
location_id='506aff36-2622-4f70-8063-77cce6366d20',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
additional_headers=additional_headers,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def associate_azure_publisher(self, publisher_name, azure_publisher_id):
"""AssociateAzurePublisher.
[Preview API]
:param str publisher_name:
:param str azure_publisher_id:
:rtype: :class:`<AzurePublisher> <azure.devops.v7_1.gallery.models.AzurePublisher>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
query_parameters = {}
if azure_publisher_id is not None:
query_parameters['azurePublisherId'] = self._serialize.query('azure_publisher_id', azure_publisher_id, 'str')
response = self._send(http_method='PUT',
location_id='efd202a6-9d87-4ebc-9229-d2b8ae2fdb6d',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('AzurePublisher', response)
def query_associated_azure_publisher(self, publisher_name):
"""QueryAssociatedAzurePublisher.
[Preview API]
:param str publisher_name:
:rtype: :class:`<AzurePublisher> <azure.devops.v7_1.gallery.models.AzurePublisher>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
response = self._send(http_method='GET',
location_id='efd202a6-9d87-4ebc-9229-d2b8ae2fdb6d',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('AzurePublisher', response)
def get_categories(self, languages=None):
"""GetCategories.
[Preview API]
:param str languages:
:rtype: [str]
"""
query_parameters = {}
if languages is not None:
query_parameters['languages'] = self._serialize.query('languages', languages, 'str')
response = self._send(http_method='GET',
location_id='e0a5a71e-3ac3-43a0-ae7d-0bb5c3046a2a',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[str]', self._unwrap_collection(response))
def get_category_details(self, category_name, languages=None, product=None):
"""GetCategoryDetails.
[Preview API]
:param str category_name:
:param str languages:
:param str product:
:rtype: :class:`<CategoriesResult> <azure.devops.v7_1.gallery.models.CategoriesResult>`
"""
route_values = {}
if category_name is not None:
route_values['categoryName'] = self._serialize.url('category_name', category_name, 'str')
query_parameters = {}
if languages is not None:
query_parameters['languages'] = self._serialize.query('languages', languages, 'str')
if product is not None:
query_parameters['product'] = self._serialize.query('product', product, 'str')
response = self._send(http_method='GET',
location_id='75d3c04d-84d2-4973-acd2-22627587dabc',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('CategoriesResult', response)
def get_category_tree(self, product, category_id, lcid=None, source=None, product_version=None, skus=None, sub_skus=None, product_architecture=None):
"""GetCategoryTree.
[Preview API]
:param str product:
:param str category_id:
:param int lcid:
:param str source:
:param str product_version:
:param str skus:
:param str sub_skus:
:param str product_architecture:
:rtype: :class:`<ProductCategory> <azure.devops.v7_1.gallery.models.ProductCategory>`
"""
route_values = {}
if product is not None:
route_values['product'] = self._serialize.url('product', product, 'str')
if category_id is not None:
route_values['categoryId'] = self._serialize.url('category_id', category_id, 'str')
query_parameters = {}
if lcid is not None:
query_parameters['lcid'] = self._serialize.query('lcid', lcid, 'int')
if source is not None:
query_parameters['source'] = self._serialize.query('source', source, 'str')
if product_version is not None:
query_parameters['productVersion'] = self._serialize.query('product_version', product_version, 'str')
if skus is not None:
query_parameters['skus'] = self._serialize.query('skus', skus, 'str')
if sub_skus is not None:
query_parameters['subSkus'] = self._serialize.query('sub_skus', sub_skus, 'str')
if product_architecture is not None:
query_parameters['productArchitecture'] = self._serialize.query('product_architecture', product_architecture, 'str')
response = self._send(http_method='GET',
location_id='1102bb42-82b0-4955-8d8a-435d6b4cedd3',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ProductCategory', response)
def get_root_categories(self, product, lcid=None, source=None, product_version=None, skus=None, sub_skus=None):
"""GetRootCategories.
[Preview API]
:param str product:
:param int lcid:
:param str source:
:param str product_version:
:param str skus:
:param str sub_skus:
:rtype: :class:`<ProductCategoriesResult> <azure.devops.v7_1.gallery.models.ProductCategoriesResult>`
"""
route_values = {}
if product is not None:
route_values['product'] = self._serialize.url('product', product, 'str')
query_parameters = {}
if lcid is not None:
query_parameters['lcid'] = self._serialize.query('lcid', lcid, 'int')
if source is not None:
query_parameters['source'] = self._serialize.query('source', source, 'str')
if product_version is not None:
query_parameters['productVersion'] = self._serialize.query('product_version', product_version, 'str')
if skus is not None:
query_parameters['skus'] = self._serialize.query('skus', skus, 'str')
if sub_skus is not None:
query_parameters['subSkus'] = self._serialize.query('sub_skus', sub_skus, 'str')
response = self._send(http_method='GET',
location_id='31fba831-35b2-46f6-a641-d05de5a877d8',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ProductCategoriesResult', response)
def get_certificate(self, publisher_name, extension_name, version=None, **kwargs):
"""GetCertificate.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
response = self._send(http_method='GET',
location_id='e905ad6a-3f1f-4d08-9f6d-7d357ff8b7d0',
version='7.1-preview.1',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_content_verification_log(self, publisher_name, extension_name, **kwargs):
"""GetContentVerificationLog.
[Preview API]
:param str publisher_name:
:param str extension_name:
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
response = self._send(http_method='GET',
location_id='c0f1c7c4-3557-4ffb-b774-1e48c4865e99',
version='7.1-preview.1',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def create_support_request(self, customer_support_request):
"""CreateSupportRequest.
[Preview API]
:param :class:`<CustomerSupportRequest> <azure.devops.v7_1.gallery.models.CustomerSupportRequest>` customer_support_request:
"""
content = self._serialize.body(customer_support_request, 'CustomerSupportRequest')
self._send(http_method='POST',
location_id='8eded385-026a-4c15-b810-b8eb402771f1',
version='7.1-preview.1',
content=content)
def create_draft_for_edit_extension(self, publisher_name, extension_name):
"""CreateDraftForEditExtension.
[Preview API]
:param str publisher_name:
:param str extension_name:
:rtype: :class:`<ExtensionDraft> <azure.devops.v7_1.gallery.models.ExtensionDraft>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
response = self._send(http_method='POST',
location_id='02b33873-4e61-496e-83a2-59d1df46b7d8',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('ExtensionDraft', response)
def perform_edit_extension_draft_operation(self, draft_patch, publisher_name, extension_name, draft_id):
"""PerformEditExtensionDraftOperation.
[Preview API]
:param :class:`<ExtensionDraftPatch> <azure.devops.v7_1.gallery.models.ExtensionDraftPatch>` draft_patch:
:param str publisher_name:
:param str extension_name:
:param str draft_id:
:rtype: :class:`<ExtensionDraft> <azure.devops.v7_1.gallery.models.ExtensionDraft>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if draft_id is not None:
route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str')
content = self._serialize.body(draft_patch, 'ExtensionDraftPatch')
response = self._send(http_method='PATCH',
location_id='02b33873-4e61-496e-83a2-59d1df46b7d8',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('ExtensionDraft', response)
def update_payload_in_draft_for_edit_extension(self, upload_stream, publisher_name, extension_name, draft_id, file_name=None, **kwargs):
"""UpdatePayloadInDraftForEditExtension.
[Preview API]
:param object upload_stream: Stream to upload
:param str publisher_name:
:param str extension_name:
:param str draft_id:
:param String file_name: Header to pass the filename of the uploaded data
:rtype: :class:`<ExtensionDraft> <azure.devops.v7_1.gallery.models.ExtensionDraft>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if draft_id is not None:
route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str')
additional_headers = {}
if file_name is not None:
additional_headers['X-Market-UploadFileName'] = file_name
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='PUT',
location_id='02b33873-4e61-496e-83a2-59d1df46b7d8',
version='7.1-preview.1',
route_values=route_values,
additional_headers=additional_headers,
content=content,
media_type='application/octet-stream')
return self._deserialize('ExtensionDraft', response)
def add_asset_for_edit_extension_draft(self, upload_stream, publisher_name, extension_name, draft_id, asset_type, **kwargs):
"""AddAssetForEditExtensionDraft.
[Preview API]
:param object upload_stream: Stream to upload
:param str publisher_name:
:param str extension_name:
:param str draft_id:
:param str asset_type:
:rtype: :class:`<ExtensionDraftAsset> <azure.devops.v7_1.gallery.models.ExtensionDraftAsset>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if draft_id is not None:
route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='PUT',
location_id='f1db9c47-6619-4998-a7e5-d7f9f41a4617',
version='7.1-preview.1',
route_values=route_values,
content=content,
media_type='application/octet-stream')
return self._deserialize('ExtensionDraftAsset', response)
def create_draft_for_new_extension(self, upload_stream, publisher_name, product, file_name=None, **kwargs):
"""CreateDraftForNewExtension.
[Preview API]
:param object upload_stream: Stream to upload
:param str publisher_name:
:param String product: Header to pass the product type of the payload file
:param String file_name: Header to pass the filename of the uploaded data
:rtype: :class:`<ExtensionDraft> <azure.devops.v7_1.gallery.models.ExtensionDraft>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
additional_headers = {}
if product is not None:
additional_headers['X-Market-UploadFileProduct'] = product
if file_name is not None:
additional_headers['X-Market-UploadFileName'] = file_name
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='POST',
location_id='b3ab127d-ebb9-4d22-b611-4e09593c8d79',
version='7.1-preview.1',
route_values=route_values,
additional_headers=additional_headers,
content=content,
media_type='application/octet-stream')
return self._deserialize('ExtensionDraft', response)
def perform_new_extension_draft_operation(self, draft_patch, publisher_name, draft_id):
"""PerformNewExtensionDraftOperation.
[Preview API]
:param :class:`<ExtensionDraftPatch> <azure.devops.v7_1.gallery.models.ExtensionDraftPatch>` draft_patch:
:param str publisher_name:
:param str draft_id:
:rtype: :class:`<ExtensionDraft> <azure.devops.v7_1.gallery.models.ExtensionDraft>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if draft_id is not None:
route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str')
content = self._serialize.body(draft_patch, 'ExtensionDraftPatch')
response = self._send(http_method='PATCH',
location_id='b3ab127d-ebb9-4d22-b611-4e09593c8d79',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('ExtensionDraft', response)
def update_payload_in_draft_for_new_extension(self, upload_stream, publisher_name, draft_id, file_name=None, **kwargs):
"""UpdatePayloadInDraftForNewExtension.
[Preview API]
:param object upload_stream: Stream to upload
:param str publisher_name:
:param str draft_id:
:param String file_name: Header to pass the filename of the uploaded data
:rtype: :class:`<ExtensionDraft> <azure.devops.v7_1.gallery.models.ExtensionDraft>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if draft_id is not None:
route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str')
additional_headers = {}
if file_name is not None:
additional_headers['X-Market-UploadFileName'] = file_name
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='PUT',
location_id='b3ab127d-ebb9-4d22-b611-4e09593c8d79',
version='7.1-preview.1',
route_values=route_values,
additional_headers=additional_headers,
content=content,
media_type='application/octet-stream')
return self._deserialize('ExtensionDraft', response)
def add_asset_for_new_extension_draft(self, upload_stream, publisher_name, draft_id, asset_type, **kwargs):
"""AddAssetForNewExtensionDraft.
[Preview API]
:param object upload_stream: Stream to upload
:param str publisher_name:
:param str draft_id:
:param str asset_type:
:rtype: :class:`<ExtensionDraftAsset> <azure.devops.v7_1.gallery.models.ExtensionDraftAsset>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if draft_id is not None:
route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='PUT',
location_id='88c0b1c8-b4f1-498a-9b2a-8446ef9f32e7',
version='7.1-preview.1',
route_values=route_values,
content=content,
media_type='application/octet-stream')
return self._deserialize('ExtensionDraftAsset', response)
def get_asset_from_edit_extension_draft(self, publisher_name, draft_id, asset_type, extension_name, **kwargs):
"""GetAssetFromEditExtensionDraft.
[Preview API]
:param str publisher_name:
:param str draft_id:
:param str asset_type:
:param str extension_name:
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if draft_id is not None:
route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
query_parameters = {}
if extension_name is not None:
query_parameters['extensionName'] = self._serialize.query('extension_name', extension_name, 'str')
response = self._send(http_method='GET',
location_id='88c0b1c8-b4f1-498a-9b2a-8446ef9f32e7',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_asset_from_new_extension_draft(self, publisher_name, draft_id, asset_type, **kwargs):
"""GetAssetFromNewExtensionDraft.
[Preview API]
:param str publisher_name:
:param str draft_id:
:param str asset_type:
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if draft_id is not None:
route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
response = self._send(http_method='GET',
location_id='88c0b1c8-b4f1-498a-9b2a-8446ef9f32e7',
version='7.1-preview.1',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_extension_events(self, publisher_name, extension_name, count=None, after_date=None, include=None, include_property=None):
"""GetExtensionEvents.
[Preview API] Get install/uninstall events of an extension. If both count and afterDate parameters are specified, count takes precedence.
:param str publisher_name: Name of the publisher
:param str extension_name: Name of the extension
:param int count: Count of events to fetch, applies to each event type.
:param datetime after_date: Fetch events that occurred on or after this date
:param str include: Filter options. Supported values: install, uninstall, review, acquisition, sales. Default is to fetch all types of events
:param str include_property: Event properties to include. Currently only 'lastContactDetails' is supported for uninstall events
:rtype: :class:`<ExtensionEvents> <azure.devops.v7_1.gallery.models.ExtensionEvents>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if count is not None:
query_parameters['count'] = self._serialize.query('count', count, 'int')
if after_date is not None:
query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601')
if include is not None:
query_parameters['include'] = self._serialize.query('include', include, 'str')
if include_property is not None:
query_parameters['includeProperty'] = self._serialize.query('include_property', include_property, 'str')
response = self._send(http_method='GET',
location_id='3d13c499-2168-4d06-bef4-14aba185dcd5',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ExtensionEvents', response)
def publish_extension_events(self, extension_events):
"""PublishExtensionEvents.
[Preview API] API endpoint to publish extension install/uninstall events. This is meant to be invoked by EMS only for sending us data related to install/uninstall of an extension.
:param [ExtensionEvents] extension_events:
"""
content = self._serialize.body(extension_events, '[ExtensionEvents]')
self._send(http_method='POST',
location_id='0bf2bd3a-70e0-4d5d-8bf7-bd4a9c2ab6e7',
version='7.1-preview.1',
content=content)
def query_extensions(self, extension_query, account_token=None, account_token_header=None):
"""QueryExtensions.
[Preview API]
:param :class:`<ExtensionQuery> <azure.devops.v7_1.gallery.models.ExtensionQuery>` extension_query:
:param str account_token:
:param String account_token_header: Header to pass the account token
:rtype: :class:`<ExtensionQueryResult> <azure.devops.v7_1.gallery.models.ExtensionQueryResult>`
"""
query_parameters = {}
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
additional_headers = {}
if account_token_header is not None:
additional_headers['X-Market-AccountToken'] = account_token_header
content = self._serialize.body(extension_query, 'ExtensionQuery')
response = self._send(http_method='POST',
location_id='eb9d5ee1-6d43-456b-b80e-8a96fbc014b6',
version='7.1-preview.1',
query_parameters=query_parameters,
additional_headers=additional_headers,
content=content)
return self._deserialize('ExtensionQueryResult', response)
def create_extension(self, upload_stream, extension_type=None, re_captcha_token=None, **kwargs):
"""CreateExtension.
[Preview API]
:param object upload_stream: Stream to upload
:param str extension_type:
:param str re_captcha_token:
:rtype: :class:`<PublishedExtension> <azure.devops.v7_1.gallery.models.PublishedExtension>`
"""
query_parameters = {}
if extension_type is not None:
query_parameters['extensionType'] = self._serialize.query('extension_type', extension_type, 'str')
if re_captcha_token is not None:
query_parameters['reCaptchaToken'] = self._serialize.query('re_captcha_token', re_captcha_token, 'str')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='POST',
location_id='a41192c8-9525-4b58-bc86-179fa549d80d',
version='7.1-preview.2',
query_parameters=query_parameters,
content=content,
media_type='application/octet-stream')
return self._deserialize('PublishedExtension', response)
def delete_extension_by_id(self, extension_id, version=None):
"""DeleteExtensionById.
[Preview API]
:param str extension_id:
:param str version:
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str')
query_parameters = {}
if version is not None:
query_parameters['version'] = self._serialize.query('version', version, 'str')
self._send(http_method='DELETE',
location_id='a41192c8-9525-4b58-bc86-179fa549d80d',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
def get_extension_by_id(self, extension_id, version=None, flags=None):
"""GetExtensionById.
[Preview API]
:param str extension_id:
:param str version:
:param str flags:
:rtype: :class:`<PublishedExtension> <azure.devops.v7_1.gallery.models.PublishedExtension>`
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str')
query_parameters = {}
if version is not None:
query_parameters['version'] = self._serialize.query('version', version, 'str')
if flags is not None:
query_parameters['flags'] = self._serialize.query('flags', flags, 'str')
response = self._send(http_method='GET',
location_id='a41192c8-9525-4b58-bc86-179fa549d80d',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('PublishedExtension', response)
def update_extension_by_id(self, extension_id, re_captcha_token=None):
"""UpdateExtensionById.
[Preview API]
:param str extension_id:
:param str re_captcha_token:
:rtype: :class:`<PublishedExtension> <azure.devops.v7_1.gallery.models.PublishedExtension>`
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str')
query_parameters = {}
if re_captcha_token is not None:
query_parameters['reCaptchaToken'] = self._serialize.query('re_captcha_token', re_captcha_token, 'str')
response = self._send(http_method='PUT',
location_id='a41192c8-9525-4b58-bc86-179fa549d80d',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('PublishedExtension', response)
def create_extension_with_publisher(self, upload_stream, publisher_name, extension_type=None, re_captcha_token=None, **kwargs):
"""CreateExtensionWithPublisher.
[Preview API]
:param object upload_stream: Stream to upload
:param str publisher_name:
:param str extension_type:
:param str re_captcha_token:
:rtype: :class:`<PublishedExtension> <azure.devops.v7_1.gallery.models.PublishedExtension>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
query_parameters = {}
if extension_type is not None:
query_parameters['extensionType'] = self._serialize.query('extension_type', extension_type, 'str')
if re_captcha_token is not None:
query_parameters['reCaptchaToken'] = self._serialize.query('re_captcha_token', re_captcha_token, 'str')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='POST',
location_id='e11ea35a-16fe-4b80-ab11-c4cab88a0966',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/octet-stream')
return self._deserialize('PublishedExtension', response)
def delete_extension(self, publisher_name, extension_name, version=None):
"""DeleteExtension.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if version is not None:
query_parameters['version'] = self._serialize.query('version', version, 'str')
self._send(http_method='DELETE',
location_id='e11ea35a-16fe-4b80-ab11-c4cab88a0966',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
def get_extension(self, publisher_name, extension_name, version=None, flags=None, account_token=None, account_token_header=None):
"""GetExtension.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
:param str flags:
:param str account_token:
:param String account_token_header: Header to pass the account token
:rtype: :class:`<PublishedExtension> <azure.devops.v7_1.gallery.models.PublishedExtension>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if version is not None:
query_parameters['version'] = self._serialize.query('version', version, 'str')
if flags is not None:
query_parameters['flags'] = self._serialize.query('flags', flags, 'str')
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
additional_headers = {}
if account_token_header is not None:
additional_headers['X-Market-AccountToken'] = account_token_header
response = self._send(http_method='GET',
location_id='e11ea35a-16fe-4b80-ab11-c4cab88a0966',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters,
additional_headers=additional_headers)
return self._deserialize('PublishedExtension', response)
def update_extension(self, upload_stream, publisher_name, extension_name, extension_type=None, re_captcha_token=None, bypass_scope_check=None, **kwargs):
"""UpdateExtension.
[Preview API] REST endpoint to update an extension.
:param object upload_stream: Stream to upload
:param str publisher_name: Name of the publisher
:param str extension_name: Name of the extension
:param str extension_type:
:param str re_captcha_token:
:param bool bypass_scope_check: This parameter decides if the scope change check needs to be invoked or not
:rtype: :class:`<PublishedExtension> <azure.devops.v7_1.gallery.models.PublishedExtension>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if extension_type is not None:
query_parameters['extensionType'] = self._serialize.query('extension_type', extension_type, 'str')
if re_captcha_token is not None:
query_parameters['reCaptchaToken'] = self._serialize.query('re_captcha_token', re_captcha_token, 'str')
if bypass_scope_check is not None:
query_parameters['bypassScopeCheck'] = self._serialize.query('bypass_scope_check', bypass_scope_check, 'bool')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='PUT',
location_id='e11ea35a-16fe-4b80-ab11-c4cab88a0966',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/octet-stream')
return self._deserialize('PublishedExtension', response)
def update_extension_properties(self, publisher_name, extension_name, flags):
"""UpdateExtensionProperties.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str flags:
:rtype: :class:`<PublishedExtension> <azure.devops.v7_1.gallery.models.PublishedExtension>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if flags is not None:
query_parameters['flags'] = self._serialize.query('flags', flags, 'str')
response = self._send(http_method='PATCH',
location_id='e11ea35a-16fe-4b80-ab11-c4cab88a0966',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('PublishedExtension', response)
def share_extension_with_host(self, publisher_name, extension_name, host_type, host_name):
"""ShareExtensionWithHost.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str host_type:
:param str host_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if host_type is not None:
route_values['hostType'] = self._serialize.url('host_type', host_type, 'str')
if host_name is not None:
route_values['hostName'] = self._serialize.url('host_name', host_name, 'str')
self._send(http_method='POST',
location_id='328a3af8-d124-46e9-9483-01690cd415b9',
version='7.1-preview.1',
route_values=route_values)
def unshare_extension_with_host(self, publisher_name, extension_name, host_type, host_name):
"""UnshareExtensionWithHost.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str host_type:
:param str host_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if host_type is not None:
route_values['hostType'] = self._serialize.url('host_type', host_type, 'str')
if host_name is not None:
route_values['hostName'] = self._serialize.url('host_name', host_name, 'str')
self._send(http_method='DELETE',
location_id='328a3af8-d124-46e9-9483-01690cd415b9',
version='7.1-preview.1',
route_values=route_values)
def extension_validator(self, azure_rest_api_request_model):
"""ExtensionValidator.
[Preview API]
:param :class:`<AzureRestApiRequestModel> <azure.devops.v7_1.gallery.models.AzureRestApiRequestModel>` azure_rest_api_request_model:
"""
content = self._serialize.body(azure_rest_api_request_model, 'AzureRestApiRequestModel')
self._send(http_method='POST',
location_id='05e8a5e1-8c59-4c2c-8856-0ff087d1a844',
version='7.1-preview.1',
content=content)
def send_notifications(self, notification_data):
"""SendNotifications.
[Preview API] Send Notification
:param :class:`<NotificationsData> <azure.devops.v7_1.gallery.models.NotificationsData>` notification_data: Denoting the data needed to send notification
"""
content = self._serialize.body(notification_data, 'NotificationsData')
self._send(http_method='POST',
location_id='eab39817-413c-4602-a49f-07ad00844980',
version='7.1-preview.1',
content=content)
def get_package(self, publisher_name, extension_name, version, account_token=None, accept_default=None, account_token_header=None, **kwargs):
"""GetPackage.
[Preview API] This endpoint gets hit when you download a VSTS extension from the Web UI
:param str publisher_name:
:param str extension_name:
:param str version:
:param str account_token:
:param bool accept_default:
:param String account_token_header: Header to pass the account token
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
query_parameters = {}
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
if accept_default is not None:
query_parameters['acceptDefault'] = self._serialize.query('accept_default', accept_default, 'bool')
additional_headers = {}
if account_token_header is not None:
additional_headers['X-Market-AccountToken'] = account_token_header
response = self._send(http_method='GET',
location_id='7cb576f8-1cae-4c4b-b7b1-e4af5759e965',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
additional_headers=additional_headers,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_asset_with_token(self, publisher_name, extension_name, version, asset_type, asset_token=None, account_token=None, accept_default=None, account_token_header=None, **kwargs):
"""GetAssetWithToken.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
:param str asset_type:
:param str asset_token:
:param str account_token:
:param bool accept_default:
:param String account_token_header: Header to pass the account token
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
if asset_token is not None:
route_values['assetToken'] = self._serialize.url('asset_token', asset_token, 'str')
query_parameters = {}
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
if accept_default is not None:
query_parameters['acceptDefault'] = self._serialize.query('accept_default', accept_default, 'bool')
additional_headers = {}
if account_token_header is not None:
additional_headers['X-Market-AccountToken'] = account_token_header
response = self._send(http_method='GET',
location_id='364415a1-0077-4a41-a7a0-06edd4497492',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
additional_headers=additional_headers,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def delete_publisher_asset(self, publisher_name, asset_type=None):
"""DeletePublisherAsset.
[Preview API] Delete publisher asset like logo
:param str publisher_name: Internal name of the publisher
:param str asset_type: Type of asset. Default value is 'logo'.
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
query_parameters = {}
if asset_type is not None:
query_parameters['assetType'] = self._serialize.query('asset_type', asset_type, 'str')
self._send(http_method='DELETE',
location_id='21143299-34f9-4c62-8ca8-53da691192f9',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
def get_publisher_asset(self, publisher_name, asset_type=None, **kwargs):
"""GetPublisherAsset.
[Preview API] Get publisher asset like logo as a stream
:param str publisher_name: Internal name of the publisher
:param str asset_type: Type of asset. Default value is 'logo'.
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
query_parameters = {}
if asset_type is not None:
query_parameters['assetType'] = self._serialize.query('asset_type', asset_type, 'str')
response = self._send(http_method='GET',
location_id='21143299-34f9-4c62-8ca8-53da691192f9',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def update_publisher_asset(self, upload_stream, publisher_name, asset_type=None, file_name=None, **kwargs):
"""UpdatePublisherAsset.
[Preview API] Update publisher asset like logo. It accepts asset file as an octet stream and file name is passed in header values.
:param object upload_stream: Stream to upload
:param str publisher_name: Internal name of the publisher
:param str asset_type: Type of asset. Default value is 'logo'.
:param String file_name: Header to pass the filename of the uploaded data
:rtype: {str}
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
query_parameters = {}
if asset_type is not None:
query_parameters['assetType'] = self._serialize.query('asset_type', asset_type, 'str')
additional_headers = {}
if file_name is not None:
additional_headers['X-Market-UploadFileName'] = file_name
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='PUT',
location_id='21143299-34f9-4c62-8ca8-53da691192f9',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
additional_headers=additional_headers,
content=content,
media_type='application/octet-stream')
return self._deserialize('{str}', self._unwrap_collection(response))
def fetch_domain_token(self, publisher_name):
"""FetchDomainToken.
[Preview API]
:param str publisher_name:
:rtype: str
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
response = self._send(http_method='GET',
location_id='67a609ef-fa74-4b52-8664-78d76f7b3634',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('str', response)
def verify_domain_token(self, publisher_name):
"""VerifyDomainToken.
[Preview API]
:param str publisher_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
self._send(http_method='PUT',
location_id='67a609ef-fa74-4b52-8664-78d76f7b3634',
version='7.1-preview.1',
route_values=route_values)
def query_publishers(self, publisher_query):
"""QueryPublishers.
[Preview API]
:param :class:`<PublisherQuery> <azure.devops.v7_1.gallery.models.PublisherQuery>` publisher_query:
:rtype: :class:`<PublisherQueryResult> <azure.devops.v7_1.gallery.models.PublisherQueryResult>`
"""
content = self._serialize.body(publisher_query, 'PublisherQuery')
response = self._send(http_method='POST',
location_id='2ad6ee0a-b53f-4034-9d1d-d009fda1212e',
version='7.1-preview.1',
content=content)
return self._deserialize('PublisherQueryResult', response)
def create_publisher(self, publisher):
"""CreatePublisher.
[Preview API]
:param :class:`<Publisher> <azure.devops.v7_1.gallery.models.Publisher>` publisher:
:rtype: :class:`<Publisher> <azure.devops.v7_1.gallery.models.Publisher>`
"""
content = self._serialize.body(publisher, 'Publisher')
response = self._send(http_method='POST',
location_id='4ddec66a-e4f6-4f5d-999e-9e77710d7ff4',
version='7.1-preview.1',
content=content)
return self._deserialize('Publisher', response)
def delete_publisher(self, publisher_name):
"""DeletePublisher.
[Preview API]
:param str publisher_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
self._send(http_method='DELETE',
location_id='4ddec66a-e4f6-4f5d-999e-9e77710d7ff4',
version='7.1-preview.1',
route_values=route_values)
def get_publisher(self, publisher_name, flags=None):
"""GetPublisher.
[Preview API]
:param str publisher_name:
:param int flags:
:rtype: :class:`<Publisher> <azure.devops.v7_1.gallery.models.Publisher>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
query_parameters = {}
if flags is not None:
query_parameters['flags'] = self._serialize.query('flags', flags, 'int')
response = self._send(http_method='GET',
location_id='4ddec66a-e4f6-4f5d-999e-9e77710d7ff4',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Publisher', response)
def update_publisher(self, publisher, publisher_name):
"""UpdatePublisher.
[Preview API]
:param :class:`<Publisher> <azure.devops.v7_1.gallery.models.Publisher>` publisher:
:param str publisher_name:
:rtype: :class:`<Publisher> <azure.devops.v7_1.gallery.models.Publisher>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
content = self._serialize.body(publisher, 'Publisher')
response = self._send(http_method='PUT',
location_id='4ddec66a-e4f6-4f5d-999e-9e77710d7ff4',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Publisher', response)
def update_publisher_members(self, role_assignments, publisher_name, limit_to_caller_identity_domain=None):
"""UpdatePublisherMembers.
[Preview API] Endpoint to add/modify publisher membership. Currently Supports only addition/modification of 1 user at a time Works only for adding members of same tenant.
:param [PublisherUserRoleAssignmentRef] role_assignments: List of user identifiers(email address) and role to be added. Currently only one entry is supported.
:param str publisher_name: The name/id of publisher to which users have to be added
:param bool limit_to_caller_identity_domain: Should cross tenant addtions be allowed or not.
:rtype: [PublisherRoleAssignment]
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
query_parameters = {}
if limit_to_caller_identity_domain is not None:
query_parameters['limitToCallerIdentityDomain'] = self._serialize.query('limit_to_caller_identity_domain', limit_to_caller_identity_domain, 'bool')
content = self._serialize.body(role_assignments, '[PublisherUserRoleAssignmentRef]')
response = self._send(http_method='POST',
location_id='4ddec66a-e4f6-4f5d-999e-9e77710d7ff4',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('[PublisherRoleAssignment]', self._unwrap_collection(response))
def get_publisher_without_token(self, publisher_name):
"""GetPublisherWithoutToken.
[Preview API]
:param str publisher_name:
:rtype: :class:`<Publisher> <azure.devops.v7_1.gallery.models.Publisher>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
response = self._send(http_method='GET',
location_id='215a2ed8-458a-4850-ad5a-45f1dabc3461',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('Publisher', response)
def get_questions(self, publisher_name, extension_name, count=None, page=None, after_date=None):
"""GetQuestions.
[Preview API] Returns a list of questions with their responses associated with an extension.
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:param int count: Number of questions to retrieve (defaults to 10).
:param int page: Page number from which set of questions are to be retrieved.
:param datetime after_date: If provided, results questions are returned which were posted after this date
:rtype: :class:`<QuestionsResult> <azure.devops.v7_1.gallery.models.QuestionsResult>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if count is not None:
query_parameters['count'] = self._serialize.query('count', count, 'int')
if page is not None:
query_parameters['page'] = self._serialize.query('page', page, 'int')
if after_date is not None:
query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601')
response = self._send(http_method='GET',
location_id='c010d03d-812c-4ade-ae07-c1862475eda5',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('QuestionsResult', response)
def report_question(self, concern, pub_name, ext_name, question_id):
"""ReportQuestion.
[Preview API] Flags a concern with an existing question for an extension.
:param :class:`<Concern> <azure.devops.v7_1.gallery.models.Concern>` concern: User reported concern with a question for the extension.
:param str pub_name: Name of the publisher who published the extension.
:param str ext_name: Name of the extension.
:param long question_id: Identifier of the question to be updated for the extension.
:rtype: :class:`<Concern> <azure.devops.v7_1.gallery.models.Concern>`
"""
route_values = {}
if pub_name is not None:
route_values['pubName'] = self._serialize.url('pub_name', pub_name, 'str')
if ext_name is not None:
route_values['extName'] = self._serialize.url('ext_name', ext_name, 'str')
if question_id is not None:
route_values['questionId'] = self._serialize.url('question_id', question_id, 'long')
content = self._serialize.body(concern, 'Concern')
response = self._send(http_method='POST',
location_id='784910cd-254a-494d-898b-0728549b2f10',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Concern', response)
def create_question(self, question, publisher_name, extension_name):
"""CreateQuestion.
[Preview API] Creates a new question for an extension.
:param :class:`<Question> <azure.devops.v7_1.gallery.models.Question>` question: Question to be created for the extension.
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:rtype: :class:`<Question> <azure.devops.v7_1.gallery.models.Question>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
content = self._serialize.body(question, 'Question')
response = self._send(http_method='POST',
location_id='6d1d9741-eca8-4701-a3a5-235afc82dfa4',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Question', response)
def delete_question(self, publisher_name, extension_name, question_id):
"""DeleteQuestion.
[Preview API] Deletes an existing question and all its associated responses for an extension. (soft delete)
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:param long question_id: Identifier of the question to be deleted for the extension.
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if question_id is not None:
route_values['questionId'] = self._serialize.url('question_id', question_id, 'long')
self._send(http_method='DELETE',
location_id='6d1d9741-eca8-4701-a3a5-235afc82dfa4',
version='7.1-preview.1',
route_values=route_values)
def update_question(self, question, publisher_name, extension_name, question_id):
"""UpdateQuestion.
[Preview API] Updates an existing question for an extension.
:param :class:`<Question> <azure.devops.v7_1.gallery.models.Question>` question: Updated question to be set for the extension.
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:param long question_id: Identifier of the question to be updated for the extension.
:rtype: :class:`<Question> <azure.devops.v7_1.gallery.models.Question>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if question_id is not None:
route_values['questionId'] = self._serialize.url('question_id', question_id, 'long')
content = self._serialize.body(question, 'Question')
response = self._send(http_method='PATCH',
location_id='6d1d9741-eca8-4701-a3a5-235afc82dfa4',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Question', response)
def create_response(self, response, publisher_name, extension_name, question_id):
"""CreateResponse.
[Preview API] Creates a new response for a given question for an extension.
:param :class:`<Response> <azure.devops.v7_1.gallery.models.Response>` response: Response to be created for the extension.
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:param long question_id: Identifier of the question for which response is to be created for the extension.
:rtype: :class:`<Response> <azure.devops.v7_1.gallery.models.Response>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if question_id is not None:
route_values['questionId'] = self._serialize.url('question_id', question_id, 'long')
content = self._serialize.body(response, 'Response')
response = self._send(http_method='POST',
location_id='7f8ae5e0-46b0-438f-b2e8-13e8513517bd',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Response', response)
def delete_response(self, publisher_name, extension_name, question_id, response_id):
"""DeleteResponse.
[Preview API] Deletes a response for an extension. (soft delete)
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:param long question_id: Identifies the question whose response is to be deleted.
:param long response_id: Identifies the response to be deleted.
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if question_id is not None:
route_values['questionId'] = self._serialize.url('question_id', question_id, 'long')
if response_id is not None:
route_values['responseId'] = self._serialize.url('response_id', response_id, 'long')
self._send(http_method='DELETE',
location_id='7f8ae5e0-46b0-438f-b2e8-13e8513517bd',
version='7.1-preview.1',
route_values=route_values)
def update_response(self, response, publisher_name, extension_name, question_id, response_id):
"""UpdateResponse.
[Preview API] Updates an existing response for a given question for an extension.
:param :class:`<Response> <azure.devops.v7_1.gallery.models.Response>` response: Updated response to be set for the extension.
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:param long question_id: Identifier of the question for which response is to be updated for the extension.
:param long response_id: Identifier of the response which has to be updated.
:rtype: :class:`<Response> <azure.devops.v7_1.gallery.models.Response>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if question_id is not None:
route_values['questionId'] = self._serialize.url('question_id', question_id, 'long')
if response_id is not None:
route_values['responseId'] = self._serialize.url('response_id', response_id, 'long')
content = self._serialize.body(response, 'Response')
response = self._send(http_method='PATCH',
location_id='7f8ae5e0-46b0-438f-b2e8-13e8513517bd',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Response', response)
def get_extension_reports(self, publisher_name, extension_name, days=None, count=None, after_date=None):
"""GetExtensionReports.
[Preview API] Returns extension reports
:param str publisher_name: Name of the publisher who published the extension
:param str extension_name: Name of the extension
:param int days: Last n days report. If afterDate and days are specified, days will take priority
:param int count: Number of events to be returned
:param datetime after_date: Use if you want to fetch events newer than the specified date
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if days is not None:
query_parameters['days'] = self._serialize.query('days', days, 'int')
if count is not None:
query_parameters['count'] = self._serialize.query('count', count, 'int')
if after_date is not None:
query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601')
response = self._send(http_method='GET',
location_id='79e0c74f-157f-437e-845f-74fbb4121d4c',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('object', response)
def get_reviews(self, publisher_name, extension_name, count=None, filter_options=None, before_date=None, after_date=None):
"""GetReviews.
[Preview API] Returns a list of reviews associated with an extension
:param str publisher_name: Name of the publisher who published the extension
:param str extension_name: Name of the extension
:param int count: Number of reviews to retrieve (defaults to 5)
:param str filter_options: FilterOptions to filter out empty reviews etcetera, defaults to none
:param datetime before_date: Use if you want to fetch reviews older than the specified date, defaults to null
:param datetime after_date: Use if you want to fetch reviews newer than the specified date, defaults to null
:rtype: :class:`<ReviewsResult> <azure.devops.v7_1.gallery.models.ReviewsResult>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if count is not None:
query_parameters['count'] = self._serialize.query('count', count, 'int')
if filter_options is not None:
query_parameters['filterOptions'] = self._serialize.query('filter_options', filter_options, 'str')
if before_date is not None:
query_parameters['beforeDate'] = self._serialize.query('before_date', before_date, 'iso-8601')
if after_date is not None:
query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601')
response = self._send(http_method='GET',
location_id='5b3f819f-f247-42ad-8c00-dd9ab9ab246d',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReviewsResult', response)
def get_reviews_summary(self, pub_name, ext_name, before_date=None, after_date=None):
"""GetReviewsSummary.
[Preview API] Returns a summary of the reviews
:param str pub_name: Name of the publisher who published the extension
:param str ext_name: Name of the extension
:param datetime before_date: Use if you want to fetch summary of reviews older than the specified date, defaults to null
:param datetime after_date: Use if you want to fetch summary of reviews newer than the specified date, defaults to null
:rtype: :class:`<ReviewSummary> <azure.devops.v7_1.gallery.models.ReviewSummary>`
"""
route_values = {}
if pub_name is not None:
route_values['pubName'] = self._serialize.url('pub_name', pub_name, 'str')
if ext_name is not None:
route_values['extName'] = self._serialize.url('ext_name', ext_name, 'str')
query_parameters = {}
if before_date is not None:
query_parameters['beforeDate'] = self._serialize.query('before_date', before_date, 'iso-8601')
if after_date is not None:
query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601')
response = self._send(http_method='GET',
location_id='b7b44e21-209e-48f0-ae78-04727fc37d77',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReviewSummary', response)
def create_review(self, review, pub_name, ext_name):
"""CreateReview.
[Preview API] Creates a new review for an extension
:param :class:`<Review> <azure.devops.v7_1.gallery.models.Review>` review: Review to be created for the extension
:param str pub_name: Name of the publisher who published the extension
:param str ext_name: Name of the extension
:rtype: :class:`<Review> <azure.devops.v7_1.gallery.models.Review>`
"""
route_values = {}
if pub_name is not None:
route_values['pubName'] = self._serialize.url('pub_name', pub_name, 'str')
if ext_name is not None:
route_values['extName'] = self._serialize.url('ext_name', ext_name, 'str')
content = self._serialize.body(review, 'Review')
response = self._send(http_method='POST',
location_id='e6e85b9d-aa70-40e6-aa28-d0fbf40b91a3',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Review', response)
def delete_review(self, pub_name, ext_name, review_id):
"""DeleteReview.
[Preview API] Deletes a review
:param str pub_name: Name of the publisher who published the extension
:param str ext_name: Name of the extension
:param long review_id: Id of the review which needs to be updated
"""
route_values = {}
if pub_name is not None:
route_values['pubName'] = self._serialize.url('pub_name', pub_name, 'str')
if ext_name is not None:
route_values['extName'] = self._serialize.url('ext_name', ext_name, 'str')
if review_id is not None:
route_values['reviewId'] = self._serialize.url('review_id', review_id, 'long')
self._send(http_method='DELETE',
location_id='e6e85b9d-aa70-40e6-aa28-d0fbf40b91a3',
version='7.1-preview.1',
route_values=route_values)
def update_review(self, review_patch, pub_name, ext_name, review_id):
"""UpdateReview.
[Preview API] Updates or Flags a review
:param :class:`<ReviewPatch> <azure.devops.v7_1.gallery.models.ReviewPatch>` review_patch: ReviewPatch object which contains the changes to be applied to the review
:param str pub_name: Name of the publisher who published the extension
:param str ext_name: Name of the extension
:param long review_id: Id of the review which needs to be updated
:rtype: :class:`<ReviewPatch> <azure.devops.v7_1.gallery.models.ReviewPatch>`
"""
route_values = {}
if pub_name is not None:
route_values['pubName'] = self._serialize.url('pub_name', pub_name, 'str')
if ext_name is not None:
route_values['extName'] = self._serialize.url('ext_name', ext_name, 'str')
if review_id is not None:
route_values['reviewId'] = self._serialize.url('review_id', review_id, 'long')
content = self._serialize.body(review_patch, 'ReviewPatch')
response = self._send(http_method='PATCH',
location_id='e6e85b9d-aa70-40e6-aa28-d0fbf40b91a3',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('ReviewPatch', response)
def create_category(self, category):
"""CreateCategory.
[Preview API]
:param :class:`<ExtensionCategory> <azure.devops.v7_1.gallery.models.ExtensionCategory>` category:
:rtype: :class:`<ExtensionCategory> <azure.devops.v7_1.gallery.models.ExtensionCategory>`
"""
content = self._serialize.body(category, 'ExtensionCategory')
response = self._send(http_method='POST',
location_id='476531a3-7024-4516-a76a-ed64d3008ad6',
version='7.1-preview.1',
content=content)
return self._deserialize('ExtensionCategory', response)
def get_gallery_user_settings(self, user_scope, key=None):
"""GetGalleryUserSettings.
[Preview API] Get all setting entries for the given user/all-users scope
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
:param str key: Optional key under which to filter all the entries
:rtype: {object}
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
if key is not None:
route_values['key'] = self._serialize.url('key', key, 'str')
response = self._send(http_method='GET',
location_id='9b75ece3-7960-401c-848b-148ac01ca350',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('{object}', self._unwrap_collection(response))
def set_gallery_user_settings(self, entries, user_scope):
"""SetGalleryUserSettings.
[Preview API] Set all setting entries for the given user/all-users scope
:param {object} entries: A key-value pair of all settings that need to be set
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
content = self._serialize.body(entries, '{object}')
self._send(http_method='PATCH',
location_id='9b75ece3-7960-401c-848b-148ac01ca350',
version='7.1-preview.1',
route_values=route_values,
content=content)
def generate_key(self, key_type, expire_current_seconds=None):
"""GenerateKey.
[Preview API]
:param str key_type:
:param int expire_current_seconds:
"""
route_values = {}
if key_type is not None:
route_values['keyType'] = self._serialize.url('key_type', key_type, 'str')
query_parameters = {}
if expire_current_seconds is not None:
query_parameters['expireCurrentSeconds'] = self._serialize.query('expire_current_seconds', expire_current_seconds, 'int')
self._send(http_method='POST',
location_id='92ed5cf4-c38b-465a-9059-2f2fb7c624b5',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
def get_signing_key(self, key_type):
"""GetSigningKey.
[Preview API]
:param str key_type:
:rtype: str
"""
route_values = {}
if key_type is not None:
route_values['keyType'] = self._serialize.url('key_type', key_type, 'str')
response = self._send(http_method='GET',
location_id='92ed5cf4-c38b-465a-9059-2f2fb7c624b5',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('str', response)
def update_extension_statistics(self, extension_statistics_update, publisher_name, extension_name):
"""UpdateExtensionStatistics.
[Preview API]
:param :class:`<ExtensionStatisticUpdate> <azure.devops.v7_1.gallery.models.ExtensionStatisticUpdate>` extension_statistics_update:
:param str publisher_name:
:param str extension_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
content = self._serialize.body(extension_statistics_update, 'ExtensionStatisticUpdate')
self._send(http_method='PATCH',
location_id='a0ea3204-11e9-422d-a9ca-45851cc41400',
version='7.1-preview.1',
route_values=route_values,
content=content)
def get_extension_daily_stats(self, publisher_name, extension_name, days=None, aggregate=None, after_date=None):
"""GetExtensionDailyStats.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param int days:
:param str aggregate:
:param datetime after_date:
:rtype: :class:`<ExtensionDailyStats> <azure.devops.v7_1.gallery.models.ExtensionDailyStats>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if days is not None:
query_parameters['days'] = self._serialize.query('days', days, 'int')
if aggregate is not None:
query_parameters['aggregate'] = self._serialize.query('aggregate', aggregate, 'str')
if after_date is not None:
query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601')
response = self._send(http_method='GET',
location_id='ae06047e-51c5-4fb4-ab65-7be488544416',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ExtensionDailyStats', response)
def get_extension_daily_stats_anonymous(self, publisher_name, extension_name, version):
"""GetExtensionDailyStatsAnonymous.
[Preview API] This route/location id only supports HTTP POST anonymously, so that the page view daily stat can be incremented from Marketplace client. Trying to call GET on this route should result in an exception. Without this explicit implementation, calling GET on this public route invokes the above GET implementation GetExtensionDailyStats.
:param str publisher_name: Name of the publisher
:param str extension_name: Name of the extension
:param str version: Version of the extension
:rtype: :class:`<ExtensionDailyStats> <azure.devops.v7_1.gallery.models.ExtensionDailyStats>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
response = self._send(http_method='GET',
location_id='4fa7adb6-ca65-4075-a232-5f28323288ea',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('ExtensionDailyStats', response)
def increment_extension_daily_stat(self, publisher_name, extension_name, version, stat_type, target_platform=None):
"""IncrementExtensionDailyStat.
[Preview API] Increments a daily statistic associated with the extension
:param str publisher_name: Name of the publisher
:param str extension_name: Name of the extension
:param str version: Version of the extension
:param str stat_type: Type of stat to increment
:param str target_platform:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
query_parameters = {}
if stat_type is not None:
query_parameters['statType'] = self._serialize.query('stat_type', stat_type, 'str')
if target_platform is not None:
query_parameters['targetPlatform'] = self._serialize.query('target_platform', target_platform, 'str')
self._send(http_method='POST',
location_id='4fa7adb6-ca65-4075-a232-5f28323288ea',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
def get_verification_log(self, publisher_name, extension_name, version, target_platform=None, **kwargs):
"""GetVerificationLog.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
:param str target_platform:
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
query_parameters = {}
if target_platform is not None:
query_parameters['targetPlatform'] = self._serialize.query('target_platform', target_platform, 'str')
response = self._send(http_method='GET',
location_id='c5523abe-b843-437f-875b-5833064efe4d',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def update_vSCode_web_extension_statistics(self, item_name, version, stat_type):
"""UpdateVSCodeWebExtensionStatistics.
[Preview API]
:param str item_name:
:param str version:
:param VSCodeWebExtensionStatisicsType stat_type:
"""
route_values = {}
if item_name is not None:
route_values['itemName'] = self._serialize.url('item_name', item_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
if stat_type is not None:
route_values['statType'] = self._serialize.url('stat_type', stat_type, 'VSCodeWebExtensionStatisicsType')
self._send(http_method='POST',
location_id='205c91a8-7841-4fd3-ae4f-5a745d5a8df5',
version='7.1-preview.1',
route_values=route_values)
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/gallery/gallery_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/gallery/gallery_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 47506
}
| 370 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class MavenClient(Client):
"""Maven
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(MavenClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '6f7f8c07-ff36-473c-bcf3-bd6cc9b6c066'
def download_package(self, feed_id, group_id, artifact_id, version, file_name, project=None, **kwargs):
"""DownloadPackage.
[Preview API] Fulfills Maven package file download requests by either returning the URL of the requested package file or, in the case of Azure DevOps Server (OnPrem), returning the content as a stream.
:param str feed_id: Name or ID of the feed.
:param str group_id: GroupId of the maven package
:param str artifact_id: ArtifactId of the maven package
:param str version: Version of the package
:param str file_name: File name to download
:param str project: Project ID or project name
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
if artifact_id is not None:
route_values['artifactId'] = self._serialize.url('artifact_id', artifact_id, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
if file_name is not None:
route_values['fileName'] = self._serialize.url('file_name', file_name, 'str')
response = self._send(http_method='GET',
location_id='c338d4b5-d30a-47e2-95b7-f157ef558833',
version='7.1-preview.1',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def update_package_versions(self, batch_request, feed_id, project=None):
"""UpdatePackageVersions.
[Preview API] Update several packages from a single feed in a single request. The updates to the packages do not happen atomically.
:param :class:`<MavenPackagesBatchRequest> <azure.devops.v7_1.maven.models.MavenPackagesBatchRequest>` batch_request: Information about the packages to update, the operation to perform, and its associated data.
:param str feed_id: Feed which contains the packages to update.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
content = self._serialize.body(batch_request, 'MavenPackagesBatchRequest')
self._send(http_method='POST',
location_id='b7c586b0-d947-4d35-811a-f1161de80e6c',
version='7.1-preview.1',
route_values=route_values,
content=content)
def update_recycle_bin_packages(self, batch_request, feed, project=None):
"""UpdateRecycleBinPackages.
[Preview API] Delete or restore several package versions from the recycle bin.
:param :class:`<MavenPackagesBatchRequest> <azure.devops.v7_1.maven.models.MavenPackagesBatchRequest>` batch_request: Information about the packages to update, the operation to perform, and its associated data.
:param str feed:
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed is not None:
route_values['feed'] = self._serialize.url('feed', feed, 'str')
content = self._serialize.body(batch_request, 'MavenPackagesBatchRequest')
self._send(http_method='POST',
location_id='5dd6f547-c76f-4d9d-b2ec-4720feda641f',
version='7.1-preview.1',
route_values=route_values,
content=content)
def delete_package_version_from_recycle_bin(self, feed, group_id, artifact_id, version, project=None):
"""DeletePackageVersionFromRecycleBin.
[Preview API] Permanently delete a package from a feed's recycle bin.
:param str feed: Name or ID of the feed.
:param str group_id: Group ID of the package.
:param str artifact_id: Artifact ID of the package.
:param str version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed is not None:
route_values['feed'] = self._serialize.url('feed', feed, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
if artifact_id is not None:
route_values['artifactId'] = self._serialize.url('artifact_id', artifact_id, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
self._send(http_method='DELETE',
location_id='f67e10eb-1254-4953-add7-d49b83a16c9f',
version='7.1-preview.1',
route_values=route_values)
def get_package_version_metadata_from_recycle_bin(self, feed, group_id, artifact_id, version, project=None):
"""GetPackageVersionMetadataFromRecycleBin.
[Preview API] Get information about a package version in the recycle bin.
:param str feed: Name or ID of the feed.
:param str group_id: Group ID of the package.
:param str artifact_id: Artifact ID of the package.
:param str version: Version of the package.
:param str project: Project ID or project name
:rtype: :class:`<MavenPackageVersionDeletionState> <azure.devops.v7_1.maven.models.MavenPackageVersionDeletionState>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed is not None:
route_values['feed'] = self._serialize.url('feed', feed, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
if artifact_id is not None:
route_values['artifactId'] = self._serialize.url('artifact_id', artifact_id, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
response = self._send(http_method='GET',
location_id='f67e10eb-1254-4953-add7-d49b83a16c9f',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('MavenPackageVersionDeletionState', response)
def restore_package_version_from_recycle_bin(self, package_version_details, feed, group_id, artifact_id, version, project=None):
"""RestorePackageVersionFromRecycleBin.
[Preview API] Restore a package version from the recycle bin to its associated feed.
:param :class:`<MavenRecycleBinPackageVersionDetails> <azure.devops.v7_1.maven.models.MavenRecycleBinPackageVersionDetails>` package_version_details: Set the 'Deleted' property to false to restore the package.
:param str feed: Name or ID of the feed.
:param str group_id: Group ID of the package.
:param str artifact_id: Artifact ID of the package.
:param str version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed is not None:
route_values['feed'] = self._serialize.url('feed', feed, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
if artifact_id is not None:
route_values['artifactId'] = self._serialize.url('artifact_id', artifact_id, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
content = self._serialize.body(package_version_details, 'MavenRecycleBinPackageVersionDetails')
self._send(http_method='PATCH',
location_id='f67e10eb-1254-4953-add7-d49b83a16c9f',
version='7.1-preview.1',
route_values=route_values,
content=content)
def get_upstreaming_behavior(self, feed, group_id, artifact_id, project=None):
"""GetUpstreamingBehavior.
[Preview API] Get the upstreaming behavior of a package within the context of a feed
:param str feed: The name or id of the feed
:param str group_id: The group id of the package
:param str artifact_id: The artifact id of the package
:param str project: Project ID or project name
:rtype: :class:`<UpstreamingBehavior> <azure.devops.v7_1.maven.models.UpstreamingBehavior>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed is not None:
route_values['feed'] = self._serialize.url('feed', feed, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
if artifact_id is not None:
route_values['artifactId'] = self._serialize.url('artifact_id', artifact_id, 'str')
response = self._send(http_method='GET',
location_id='fba7ba8c-d1f5-4aeb-8f5d-f017a7d5e719',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('UpstreamingBehavior', response)
def set_upstreaming_behavior(self, feed, group_id, artifact_id, behavior, project=None):
"""SetUpstreamingBehavior.
[Preview API] Set the upstreaming behavior of a package within the context of a feed
:param str feed: The name or id of the feed
:param str group_id:
:param str artifact_id:
:param :class:`<UpstreamingBehavior> <azure.devops.v7_1.maven.models.UpstreamingBehavior>` behavior: The behavior to apply to the package within the scope of the feed
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed is not None:
route_values['feed'] = self._serialize.url('feed', feed, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
if artifact_id is not None:
route_values['artifactId'] = self._serialize.url('artifact_id', artifact_id, 'str')
content = self._serialize.body(behavior, 'UpstreamingBehavior')
self._send(http_method='PATCH',
location_id='fba7ba8c-d1f5-4aeb-8f5d-f017a7d5e719',
version='7.1-preview.1',
route_values=route_values,
content=content)
def get_package_version(self, feed, group_id, artifact_id, version, project=None, show_deleted=None):
"""GetPackageVersion.
[Preview API] Get information about a package version.
:param str feed: Name or ID of the feed.
:param str group_id: Group ID of the package.
:param str artifact_id: Artifact ID of the package.
:param str version: Version of the package.
:param str project: Project ID or project name
:param bool show_deleted: True to show information for deleted packages.
:rtype: :class:`<Package> <azure.devops.v7_1.maven.models.Package>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed is not None:
route_values['feed'] = self._serialize.url('feed', feed, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
if artifact_id is not None:
route_values['artifactId'] = self._serialize.url('artifact_id', artifact_id, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
query_parameters = {}
if show_deleted is not None:
query_parameters['showDeleted'] = self._serialize.query('show_deleted', show_deleted, 'bool')
response = self._send(http_method='GET',
location_id='180ed967-377a-4112-986b-607adb14ded4',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Package', response)
def package_delete(self, feed, group_id, artifact_id, version, project=None):
"""PackageDelete.
[Preview API] Delete a package version from the feed and move it to the feed's recycle bin.
:param str feed: Name or ID of the feed.
:param str group_id: Group ID of the package.
:param str artifact_id: Artifact ID of the package.
:param str version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed is not None:
route_values['feed'] = self._serialize.url('feed', feed, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
if artifact_id is not None:
route_values['artifactId'] = self._serialize.url('artifact_id', artifact_id, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
self._send(http_method='DELETE',
location_id='180ed967-377a-4112-986b-607adb14ded4',
version='7.1-preview.1',
route_values=route_values)
def update_package_version(self, package_version_details, feed, group_id, artifact_id, version, project=None):
"""UpdatePackageVersion.
[Preview API] Update state for a package version.
:param :class:`<PackageVersionDetails> <azure.devops.v7_1.maven.models.PackageVersionDetails>` package_version_details: Details to be updated.
:param str feed: Name or ID of the feed.
:param str group_id: Group ID of the package.
:param str artifact_id: Artifact ID of the package.
:param str version: Version of the package.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed is not None:
route_values['feed'] = self._serialize.url('feed', feed, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
if artifact_id is not None:
route_values['artifactId'] = self._serialize.url('artifact_id', artifact_id, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
content = self._serialize.body(package_version_details, 'PackageVersionDetails')
self._send(http_method='PATCH',
location_id='180ed967-377a-4112-986b-607adb14ded4',
version='7.1-preview.1',
route_values=route_values,
content=content)
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/maven/maven_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/maven/maven_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 7320
}
| 371 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class OperationsClient(Client):
"""Operations
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(OperationsClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = None
def get_operation(self, operation_id, plugin_id=None):
"""GetOperation.
[Preview API] Gets an operation from the operationId using the given pluginId.
:param str operation_id: The ID for the operation.
:param str plugin_id: The ID for the plugin.
:rtype: :class:`<Operation> <azure.devops.v7_1.operations.models.Operation>`
"""
route_values = {}
if operation_id is not None:
route_values['operationId'] = self._serialize.url('operation_id', operation_id, 'str')
query_parameters = {}
if plugin_id is not None:
query_parameters['pluginId'] = self._serialize.query('plugin_id', plugin_id, 'str')
response = self._send(http_method='GET',
location_id='9a1b74b4-2ca8-4a9f-8470-c2f2e6fdc949',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Operation', response)
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/operations/operations_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/operations/operations_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 794
}
| 372 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class Configuration(Model):
"""
This holds the configuration for the ManifestTool. The values in this file are populated from the command line, config file and default.
:param additional_component_detector_args: Additional set of command-line arguments for Component Detector.
:type additional_component_detector_args: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param build_component_path: The folder containing the build components and packages.
:type build_component_path: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param build_drop_path: The root folder of the drop directory to validate or generate.
:type build_drop_path: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param build_list_file: Full file name of a list file that contains all files to be validated.
:type build_list_file: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param catalog_file_path: The path of the signed catalog file used to validate the manifest.json.
:type catalog_file_path: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param config_file_path: The json file that contains the configuration for the DropValidator.
:type config_file_path: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param docker_images_to_scan: Comma separated list of docker image names or hashes to be scanned for packages, ex: ubuntu:16.04, 56bab49eef2ef07505f6a1b0d5bd3a601dfc3c76ad4460f24c91d6fa298369ab.
:type docker_images_to_scan: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param external_document_reference_list_file: Full file path to a file that contains list of external SBOMs to be included as External document reference.
:type external_document_reference_list_file: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param hash_algorithm: The Hash algorithm to use while verifying the hash value of a file.
:type hash_algorithm: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param ignore_missing: If set, will not fail validation on the files presented in Manifest but missing on the disk.
:type ignore_missing: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param manifest_dir_path: The root folder where the generated manifest (and other files like bsi.json) files will be placed. By default we will generate this folder in the same level as the build drop with the name '_manifest'
:type manifest_dir_path: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param manifest_info: A list of name and version of the manifest that we are generating.
:type manifest_info: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param manifest_tool_action: The action currently being performed by the manifest tool.
:type manifest_tool_action: object
:param package_name: The name of the package this SBOM represents.
:type package_name: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param package_version: The version of the package this SBOM represents.
:type package_version: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param parallelism: The number of parallel threads to use for the workflows.
:type parallelism: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param root_path_filter: If you're downloading only a part of the drop using the '-r' or 'root' parameter in the drop client, specify the same string value here in order to skip validating paths that are not downloaded.
:type root_path_filter: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param telemetry_file_path: If specified, we will store the generated telemetry for the execution of the SBOM tool at this path.
:type telemetry_file_path: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
:param validate_signature: If set, will validate the manifest using the signed catalog file.
:type validate_signature: :class:`ConfigurationSetting <azure.devops.v7_1.sbom.models.ConfigurationSetting>`
"""
_attribute_map = {
'additional_component_detector_args': {'key': 'additionalComponentDetectorArgs', 'type': 'ConfigurationSetting'},
'build_component_path': {'key': 'buildComponentPath', 'type': 'ConfigurationSetting'},
'build_drop_path': {'key': 'buildDropPath', 'type': 'ConfigurationSetting'},
'build_list_file': {'key': 'buildListFile', 'type': 'ConfigurationSetting'},
'catalog_file_path': {'key': 'catalogFilePath', 'type': 'ConfigurationSetting'},
'config_file_path': {'key': 'configFilePath', 'type': 'ConfigurationSetting'},
'docker_images_to_scan': {'key': 'dockerImagesToScan', 'type': 'ConfigurationSetting'},
'external_document_reference_list_file': {'key': 'externalDocumentReferenceListFile', 'type': 'ConfigurationSetting'},
'hash_algorithm': {'key': 'hashAlgorithm', 'type': 'ConfigurationSetting'},
'ignore_missing': {'key': 'ignoreMissing', 'type': 'ConfigurationSetting'},
'manifest_dir_path': {'key': 'manifestDirPath', 'type': 'ConfigurationSetting'},
'manifest_info': {'key': 'manifestInfo', 'type': 'ConfigurationSetting'},
'manifest_tool_action': {'key': 'manifestToolAction', 'type': 'object'},
'package_name': {'key': 'packageName', 'type': 'ConfigurationSetting'},
'package_version': {'key': 'packageVersion', 'type': 'ConfigurationSetting'},
'parallelism': {'key': 'parallelism', 'type': 'ConfigurationSetting'},
'root_path_filter': {'key': 'rootPathFilter', 'type': 'ConfigurationSetting'},
'telemetry_file_path': {'key': 'telemetryFilePath', 'type': 'ConfigurationSetting'},
'validate_signature': {'key': 'validateSignature', 'type': 'ConfigurationSetting'}
}
def __init__(self, additional_component_detector_args=None, build_component_path=None, build_drop_path=None, build_list_file=None, catalog_file_path=None, config_file_path=None, docker_images_to_scan=None, external_document_reference_list_file=None, hash_algorithm=None, ignore_missing=None, manifest_dir_path=None, manifest_info=None, manifest_tool_action=None, package_name=None, package_version=None, parallelism=None, root_path_filter=None, telemetry_file_path=None, validate_signature=None):
super(Configuration, self).__init__()
self.additional_component_detector_args = additional_component_detector_args
self.build_component_path = build_component_path
self.build_drop_path = build_drop_path
self.build_list_file = build_list_file
self.catalog_file_path = catalog_file_path
self.config_file_path = config_file_path
self.docker_images_to_scan = docker_images_to_scan
self.external_document_reference_list_file = external_document_reference_list_file
self.hash_algorithm = hash_algorithm
self.ignore_missing = ignore_missing
self.manifest_dir_path = manifest_dir_path
self.manifest_info = manifest_info
self.manifest_tool_action = manifest_tool_action
self.package_name = package_name
self.package_version = package_version
self.parallelism = parallelism
self.root_path_filter = root_path_filter
self.telemetry_file_path = telemetry_file_path
self.validate_signature = validate_signature
class ConfigurationSetting(Model):
"""
Encapsulates a configuration setting to provide metadata about the setting source and type.
:param source: The source where this setting came from.
:type source: str
:param value: The actual value of the setting.
:type value: object
"""
_attribute_map = {
'source': {'key': 'source', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'}
}
def __init__(self, source=None, value=None):
super(ConfigurationSetting, self).__init__()
self.source = source
self.value = value
class FileHash(Model):
"""
Used to provide the filename and hash of the SBOM file to be added to the catalog file.
:param file_name: The filename of the SBOM.
:type file_name: str
:param hash: The string hash of the SBOM file.
:type hash: str
:param hash_algorithm_name: The HashAlgorithmName used to generate the hash of the file.
:type hash_algorithm_name: HashAlgorithmName
"""
_attribute_map = {
'file_name': {'key': 'fileName', 'type': 'str'},
'hash': {'key': 'hash', 'type': 'str'},
'hash_algorithm_name': {'key': 'hashAlgorithmName', 'type': 'HashAlgorithmName'}
}
def __init__(self, file_name=None, hash=None, hash_algorithm_name=None):
super(FileHash, self).__init__()
self.file_name = file_name
self.hash = hash
self.hash_algorithm_name = hash_algorithm_name
class ManifestInfo(Model):
"""
Defines a manifest name and version.
:param name: The name of the manifest.
:type name: str
:param version: The version of the manifest.
:type version: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, name=None, version=None):
super(ManifestInfo, self).__init__()
self.name = name
self.version = version
class SBOMFile(Model):
"""
Represents a SBOM file object and contains additional properties related to the file.
:param file_size_in_bytes: The size of the SBOM file in bytes.
:type file_size_in_bytes: long
:param sbom_file_path: The path where the final generated SBOM is placed.
:type sbom_file_path: str
:param sbom_format_name: The name and version of the format of the generated SBOM.
:type sbom_format_name: :class:`ManifestInfo <azure.devops.v7_1.sbom.models.ManifestInfo>`
"""
_attribute_map = {
'file_size_in_bytes': {'key': 'fileSizeInBytes', 'type': 'long'},
'sbom_file_path': {'key': 'sbomFilePath', 'type': 'str'},
'sbom_format_name': {'key': 'sbomFormatName', 'type': 'ManifestInfo'}
}
def __init__(self, file_size_in_bytes=None, sbom_file_path=None, sbom_format_name=None):
super(SBOMFile, self).__init__()
self.file_size_in_bytes = file_size_in_bytes
self.sbom_file_path = sbom_file_path
self.sbom_format_name = sbom_format_name
class SBOMTelemetry(Model):
"""
The telemetry that is logged to a file/console for the given SBOM execution.
:param bsi_data: All available bsi data from the task build execution which includes build and system environment variables like repository and build information.
:type bsi_data: dict
:param bsi_source: The source of the bsi data.
:type bsi_source: str
:param e2_eTask_result: The end to end results of the extension task.
:type e2_eTask_result: str
:param parameters: A list of ConfigurationSetting`1 representing each input parameter used in the validation.
:type parameters: :class:`Configuration <azure.devops.v7_1.sbom.models.Configuration>`
:param result: The result of the execution
:type result: str
:param sbom_formats_used: A list of the SBOM formats and related file properties that was used in the generation/validation of the SBOM.
:type sbom_formats_used: list of :class:`SBOMFile <azure.devops.v7_1.sbom.models.SBOMFile>`
:param switches: Any internal switches and their value that were used during the execution. A switch can be something that was provided through a configuraiton or an environment variable.
:type switches: dict
:param task_error_message: Error messages that came from the extension task.
:type task_error_message: str
:param telemetry_id: The unique id for this telemetry
:type telemetry_id: str
:param tool_execution_result: The result of the tool as a numeric value.
:type tool_execution_result: int
"""
_attribute_map = {
'bsi_data': {'key': 'bsiData', 'type': '{str}'},
'bsi_source': {'key': 'bsiSource', 'type': 'str'},
'e2_eTask_result': {'key': 'e2ETaskResult', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'Configuration'},
'result': {'key': 'result', 'type': 'str'},
'sbom_formats_used': {'key': 'sbomFormatsUsed', 'type': '[SBOMFile]'},
'switches': {'key': 'switches', 'type': '{object}'},
'task_error_message': {'key': 'taskErrorMessage', 'type': 'str'},
'telemetry_id': {'key': 'telemetryId', 'type': 'str'},
'tool_execution_result': {'key': 'toolExecutionResult', 'type': 'int'}
}
def __init__(self, bsi_data=None, bsi_source=None, e2_eTask_result=None, parameters=None, result=None, sbom_formats_used=None, switches=None, task_error_message=None, telemetry_id=None, tool_execution_result=None):
super(SBOMTelemetry, self).__init__()
self.bsi_data = bsi_data
self.bsi_source = bsi_source
self.e2_eTask_result = e2_eTask_result
self.parameters = parameters
self.result = result
self.sbom_formats_used = sbom_formats_used
self.switches = switches
self.task_error_message = task_error_message
self.telemetry_id = telemetry_id
self.tool_execution_result = tool_execution_result
class SignResponseBase(Model):
"""
The base reponse object for all responses from the signing api.
:param customer_correlation_id: The customer correlation id that is sent to ESRP for correlating the current request to ESRP.
:type customer_correlation_id: str
:param error_info: If this is an error response, it will have more information about the error.
:type error_info: str
:param result: The result of the response.
:type result: object
"""
_attribute_map = {
'customer_correlation_id': {'key': 'customerCorrelationId', 'type': 'str'},
'error_info': {'key': 'errorInfo', 'type': 'str'},
'result': {'key': 'result', 'type': 'object'}
}
def __init__(self, customer_correlation_id=None, error_info=None, result=None):
super(SignResponseBase, self).__init__()
self.customer_correlation_id = customer_correlation_id
self.error_info = error_info
self.result = result
class SignStatusResponse(SignResponseBase):
"""
The response returned by the sign status api.
:param customer_correlation_id: The customer correlation id that is sent to ESRP for correlating the current request to ESRP.
:type customer_correlation_id: str
:param error_info: If this is an error response, it will have more information about the error.
:type error_info: str
:param result: The result of the response.
:type result: object
:param download_url: The pre-signed download url used to download the signed catalog file.
:type download_url: str
"""
_attribute_map = {
'customer_correlation_id': {'key': 'customerCorrelationId', 'type': 'str'},
'error_info': {'key': 'errorInfo', 'type': 'str'},
'result': {'key': 'result', 'type': 'object'},
'download_url': {'key': 'downloadUrl', 'type': 'str'}
}
def __init__(self, customer_correlation_id=None, error_info=None, result=None, download_url=None):
super(SignStatusResponse, self).__init__(customer_correlation_id=customer_correlation_id, error_info=error_info, result=result)
self.download_url = download_url
__all__ = [
'Configuration',
'ConfigurationSetting',
'FileHash',
'ManifestInfo',
'SBOMFile',
'SBOMTelemetry',
'SignResponseBase',
'SignStatusResponse',
]
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/sbom/models.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/sbom/models.py",
"repo_id": "azure-devops-python-api",
"token_count": 5801
}
| 373 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class AggregatedDataForResultTrend(Model):
"""
:param duration: This is tests execution duration.
:type duration: object
:param results_by_outcome:
:type results_by_outcome: dict
:param run_summary_by_state:
:type run_summary_by_state: dict
:param test_results_context:
:type test_results_context: :class:`TestResultsContext <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestResultsContext>`
:param total_tests:
:type total_tests: int
"""
_attribute_map = {
'duration': {'key': 'duration', 'type': 'object'},
'results_by_outcome': {'key': 'resultsByOutcome', 'type': '{AggregatedResultsByOutcome}'},
'run_summary_by_state': {'key': 'runSummaryByState', 'type': '{AggregatedRunsByState}'},
'test_results_context': {'key': 'testResultsContext', 'type': 'TestResultsContext'},
'total_tests': {'key': 'totalTests', 'type': 'int'}
}
def __init__(self, duration=None, results_by_outcome=None, run_summary_by_state=None, test_results_context=None, total_tests=None):
super(AggregatedDataForResultTrend, self).__init__()
self.duration = duration
self.results_by_outcome = results_by_outcome
self.run_summary_by_state = run_summary_by_state
self.test_results_context = test_results_context
self.total_tests = total_tests
class AggregatedResultDetailsByOutcome(Model):
"""
Result deatils for a particular test result outcome.
:param count: Number of results for current outcome.
:type count: int
:param duration: Time taken by results.
:type duration: object
:param outcome: Test result outcome
:type outcome: object
:param rerun_result_count: Number of results on rerun
:type rerun_result_count: int
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'duration': {'key': 'duration', 'type': 'object'},
'outcome': {'key': 'outcome', 'type': 'object'},
'rerun_result_count': {'key': 'rerunResultCount', 'type': 'int'}
}
def __init__(self, count=None, duration=None, outcome=None, rerun_result_count=None):
super(AggregatedResultDetailsByOutcome, self).__init__()
self.count = count
self.duration = duration
self.outcome = outcome
self.rerun_result_count = rerun_result_count
class AggregatedResultsAnalysis(Model):
"""
:param duration:
:type duration: object
:param not_reported_results_by_outcome:
:type not_reported_results_by_outcome: dict
:param previous_context:
:type previous_context: :class:`TestResultsContext <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestResultsContext>`
:param results_by_outcome:
:type results_by_outcome: dict
:param results_difference:
:type results_difference: :class:`AggregatedResultsDifference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.AggregatedResultsDifference>`
:param run_summary_by_outcome:
:type run_summary_by_outcome: dict
:param run_summary_by_state:
:type run_summary_by_state: dict
:param total_tests:
:type total_tests: int
"""
_attribute_map = {
'duration': {'key': 'duration', 'type': 'object'},
'not_reported_results_by_outcome': {'key': 'notReportedResultsByOutcome', 'type': '{AggregatedResultsByOutcome}'},
'previous_context': {'key': 'previousContext', 'type': 'TestResultsContext'},
'results_by_outcome': {'key': 'resultsByOutcome', 'type': '{AggregatedResultsByOutcome}'},
'results_difference': {'key': 'resultsDifference', 'type': 'AggregatedResultsDifference'},
'run_summary_by_outcome': {'key': 'runSummaryByOutcome', 'type': '{AggregatedRunsByOutcome}'},
'run_summary_by_state': {'key': 'runSummaryByState', 'type': '{AggregatedRunsByState}'},
'total_tests': {'key': 'totalTests', 'type': 'int'}
}
def __init__(self, duration=None, not_reported_results_by_outcome=None, previous_context=None, results_by_outcome=None, results_difference=None, run_summary_by_outcome=None, run_summary_by_state=None, total_tests=None):
super(AggregatedResultsAnalysis, self).__init__()
self.duration = duration
self.not_reported_results_by_outcome = not_reported_results_by_outcome
self.previous_context = previous_context
self.results_by_outcome = results_by_outcome
self.results_difference = results_difference
self.run_summary_by_outcome = run_summary_by_outcome
self.run_summary_by_state = run_summary_by_state
self.total_tests = total_tests
class AggregatedResultsByOutcome(Model):
"""
:param count:
:type count: int
:param duration:
:type duration: object
:param group_by_field:
:type group_by_field: str
:param group_by_value:
:type group_by_value: object
:param outcome:
:type outcome: object
:param rerun_result_count:
:type rerun_result_count: int
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'duration': {'key': 'duration', 'type': 'object'},
'group_by_field': {'key': 'groupByField', 'type': 'str'},
'group_by_value': {'key': 'groupByValue', 'type': 'object'},
'outcome': {'key': 'outcome', 'type': 'object'},
'rerun_result_count': {'key': 'rerunResultCount', 'type': 'int'}
}
def __init__(self, count=None, duration=None, group_by_field=None, group_by_value=None, outcome=None, rerun_result_count=None):
super(AggregatedResultsByOutcome, self).__init__()
self.count = count
self.duration = duration
self.group_by_field = group_by_field
self.group_by_value = group_by_value
self.outcome = outcome
self.rerun_result_count = rerun_result_count
class AggregatedResultsDifference(Model):
"""
:param increase_in_duration:
:type increase_in_duration: object
:param increase_in_failures:
:type increase_in_failures: int
:param increase_in_non_impacted_tests:
:type increase_in_non_impacted_tests: int
:param increase_in_other_tests:
:type increase_in_other_tests: int
:param increase_in_passed_tests:
:type increase_in_passed_tests: int
:param increase_in_total_tests:
:type increase_in_total_tests: int
"""
_attribute_map = {
'increase_in_duration': {'key': 'increaseInDuration', 'type': 'object'},
'increase_in_failures': {'key': 'increaseInFailures', 'type': 'int'},
'increase_in_non_impacted_tests': {'key': 'increaseInNonImpactedTests', 'type': 'int'},
'increase_in_other_tests': {'key': 'increaseInOtherTests', 'type': 'int'},
'increase_in_passed_tests': {'key': 'increaseInPassedTests', 'type': 'int'},
'increase_in_total_tests': {'key': 'increaseInTotalTests', 'type': 'int'}
}
def __init__(self, increase_in_duration=None, increase_in_failures=None, increase_in_non_impacted_tests=None, increase_in_other_tests=None, increase_in_passed_tests=None, increase_in_total_tests=None):
super(AggregatedResultsDifference, self).__init__()
self.increase_in_duration = increase_in_duration
self.increase_in_failures = increase_in_failures
self.increase_in_non_impacted_tests = increase_in_non_impacted_tests
self.increase_in_other_tests = increase_in_other_tests
self.increase_in_passed_tests = increase_in_passed_tests
self.increase_in_total_tests = increase_in_total_tests
class AggregatedRunsByOutcome(Model):
"""
:param outcome:
:type outcome: object
:param runs_count:
:type runs_count: int
"""
_attribute_map = {
'outcome': {'key': 'outcome', 'type': 'object'},
'runs_count': {'key': 'runsCount', 'type': 'int'}
}
def __init__(self, outcome=None, runs_count=None):
super(AggregatedRunsByOutcome, self).__init__()
self.outcome = outcome
self.runs_count = runs_count
class AggregatedRunsByState(Model):
"""
:param results_by_outcome:
:type results_by_outcome: dict
:param runs_count:
:type runs_count: int
:param state:
:type state: object
"""
_attribute_map = {
'results_by_outcome': {'key': 'resultsByOutcome', 'type': '{AggregatedResultsByOutcome}'},
'runs_count': {'key': 'runsCount', 'type': 'int'},
'state': {'key': 'state', 'type': 'object'}
}
def __init__(self, results_by_outcome=None, runs_count=None, state=None):
super(AggregatedRunsByState, self).__init__()
self.results_by_outcome = results_by_outcome
self.runs_count = runs_count
self.state = state
class BuildConfiguration(Model):
"""
BuildConfiguration Details.
:param branch_name: Branch name for which build is generated.
:type branch_name: str
:param build_definition_id: BuildDefinitionId for build.
:type build_definition_id: int
:param build_system: Build system.
:type build_system: str
:param creation_date: Build Creation Date.
:type creation_date: datetime
:param flavor: Build flavor (eg Build/Release).
:type flavor: str
:param id: BuildConfiguration Id.
:type id: int
:param number: Build Number.
:type number: str
:param platform: BuildConfiguration Platform.
:type platform: str
:param project: Project associated with this BuildConfiguration.
:type project: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param repository_guid: Repository Guid for the Build.
:type repository_guid: str
:param repository_id: Repository Id.
:type repository_id: int
:param repository_type: Repository Type (eg. TFSGit).
:type repository_type: str
:param source_version: Source Version(/first commit) for the build was triggered.
:type source_version: str
:param target_branch_name: Target BranchName.
:type target_branch_name: str
:param uri: Build Uri.
:type uri: str
"""
_attribute_map = {
'branch_name': {'key': 'branchName', 'type': 'str'},
'build_definition_id': {'key': 'buildDefinitionId', 'type': 'int'},
'build_system': {'key': 'buildSystem', 'type': 'str'},
'creation_date': {'key': 'creationDate', 'type': 'iso-8601'},
'flavor': {'key': 'flavor', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'number': {'key': 'number', 'type': 'str'},
'platform': {'key': 'platform', 'type': 'str'},
'project': {'key': 'project', 'type': 'ShallowReference'},
'repository_guid': {'key': 'repositoryGuid', 'type': 'str'},
'repository_id': {'key': 'repositoryId', 'type': 'int'},
'repository_type': {'key': 'repositoryType', 'type': 'str'},
'source_version': {'key': 'sourceVersion', 'type': 'str'},
'target_branch_name': {'key': 'targetBranchName', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'}
}
def __init__(self, branch_name=None, build_definition_id=None, build_system=None, creation_date=None, flavor=None, id=None, number=None, platform=None, project=None, repository_guid=None, repository_id=None, repository_type=None, source_version=None, target_branch_name=None, uri=None):
super(BuildConfiguration, self).__init__()
self.branch_name = branch_name
self.build_definition_id = build_definition_id
self.build_system = build_system
self.creation_date = creation_date
self.flavor = flavor
self.id = id
self.number = number
self.platform = platform
self.project = project
self.repository_guid = repository_guid
self.repository_id = repository_id
self.repository_type = repository_type
self.source_version = source_version
self.target_branch_name = target_branch_name
self.uri = uri
class BuildCoverage(Model):
"""
Build Coverage Detail
:param code_coverage_file_url: Code Coverage File Url
:type code_coverage_file_url: str
:param configuration: Build Configuration
:type configuration: :class:`BuildConfiguration <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.BuildConfiguration>`
:param last_error: Last Error
:type last_error: str
:param modules: List of Modules
:type modules: list of :class:`ModuleCoverage <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ModuleCoverage>`
:param state: State
:type state: str
"""
_attribute_map = {
'code_coverage_file_url': {'key': 'codeCoverageFileUrl', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'BuildConfiguration'},
'last_error': {'key': 'lastError', 'type': 'str'},
'modules': {'key': 'modules', 'type': '[ModuleCoverage]'},
'state': {'key': 'state', 'type': 'str'}
}
def __init__(self, code_coverage_file_url=None, configuration=None, last_error=None, modules=None, state=None):
super(BuildCoverage, self).__init__()
self.code_coverage_file_url = code_coverage_file_url
self.configuration = configuration
self.last_error = last_error
self.modules = modules
self.state = state
class BuildReference(Model):
"""
Reference to a build.
:param branch_name: Branch name.
:type branch_name: str
:param build_system: Build system.
:type build_system: str
:param definition_id: Build Definition ID.
:type definition_id: int
:param id: Build ID.
:type id: int
:param number: Build Number.
:type number: str
:param repository_id: Repository ID.
:type repository_id: str
:param uri: Build URI.
:type uri: str
"""
_attribute_map = {
'branch_name': {'key': 'branchName', 'type': 'str'},
'build_system': {'key': 'buildSystem', 'type': 'str'},
'definition_id': {'key': 'definitionId', 'type': 'int'},
'id': {'key': 'id', 'type': 'int'},
'number': {'key': 'number', 'type': 'str'},
'repository_id': {'key': 'repositoryId', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'}
}
def __init__(self, branch_name=None, build_system=None, definition_id=None, id=None, number=None, repository_id=None, uri=None):
super(BuildReference, self).__init__()
self.branch_name = branch_name
self.build_system = build_system
self.definition_id = definition_id
self.id = id
self.number = number
self.repository_id = repository_id
self.uri = uri
class CodeCoverageData(Model):
"""
Represents the build configuration (platform, flavor) and coverage data for the build
:param build_flavor: Flavor of build for which data is retrieved/published
:type build_flavor: str
:param build_platform: Platform of build for which data is retrieved/published
:type build_platform: str
:param coverage_stats: List of coverage data for the build
:type coverage_stats: list of :class:`CodeCoverageStatistics <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.CodeCoverageStatistics>`
"""
_attribute_map = {
'build_flavor': {'key': 'buildFlavor', 'type': 'str'},
'build_platform': {'key': 'buildPlatform', 'type': 'str'},
'coverage_stats': {'key': 'coverageStats', 'type': '[CodeCoverageStatistics]'}
}
def __init__(self, build_flavor=None, build_platform=None, coverage_stats=None):
super(CodeCoverageData, self).__init__()
self.build_flavor = build_flavor
self.build_platform = build_platform
self.coverage_stats = coverage_stats
class CodeCoverageStatistics(Model):
"""
Represents the code coverage statistics for a particular coverage label (modules, statements, blocks, etc.)
:param covered: Covered units
:type covered: int
:param delta: Delta of coverage
:type delta: float
:param is_delta_available: Is delta valid
:type is_delta_available: bool
:param label: Label of coverage data ("Blocks", "Statements", "Modules", etc.)
:type label: str
:param position: Position of label
:type position: int
:param total: Total units
:type total: int
"""
_attribute_map = {
'covered': {'key': 'covered', 'type': 'int'},
'delta': {'key': 'delta', 'type': 'float'},
'is_delta_available': {'key': 'isDeltaAvailable', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'position': {'key': 'position', 'type': 'int'},
'total': {'key': 'total', 'type': 'int'}
}
def __init__(self, covered=None, delta=None, is_delta_available=None, label=None, position=None, total=None):
super(CodeCoverageStatistics, self).__init__()
self.covered = covered
self.delta = delta
self.is_delta_available = is_delta_available
self.label = label
self.position = position
self.total = total
class CodeCoverageSummary(Model):
"""
Represents the code coverage summary results Used to publish or retrieve code coverage summary against a build
:param build: Uri of build for which data is retrieved/published
:type build: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param coverage_data: List of coverage data and details for the build
:type coverage_data: list of :class:`CodeCoverageData <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.CodeCoverageData>`
:param delta_build: Uri of build against which difference in coverage is computed
:type delta_build: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param status: Uri of build against which difference in coverage is computed
:type status: object
"""
_attribute_map = {
'build': {'key': 'build', 'type': 'ShallowReference'},
'coverage_data': {'key': 'coverageData', 'type': '[CodeCoverageData]'},
'delta_build': {'key': 'deltaBuild', 'type': 'ShallowReference'},
'status': {'key': 'status', 'type': 'object'}
}
def __init__(self, build=None, coverage_data=None, delta_build=None, status=None):
super(CodeCoverageSummary, self).__init__()
self.build = build
self.coverage_data = coverage_data
self.delta_build = delta_build
self.status = status
class CoverageStatistics(Model):
"""
:param blocks_covered:
:type blocks_covered: int
:param blocks_not_covered:
:type blocks_not_covered: int
:param lines_covered:
:type lines_covered: int
:param lines_not_covered:
:type lines_not_covered: int
:param lines_partially_covered:
:type lines_partially_covered: int
"""
_attribute_map = {
'blocks_covered': {'key': 'blocksCovered', 'type': 'int'},
'blocks_not_covered': {'key': 'blocksNotCovered', 'type': 'int'},
'lines_covered': {'key': 'linesCovered', 'type': 'int'},
'lines_not_covered': {'key': 'linesNotCovered', 'type': 'int'},
'lines_partially_covered': {'key': 'linesPartiallyCovered', 'type': 'int'}
}
def __init__(self, blocks_covered=None, blocks_not_covered=None, lines_covered=None, lines_not_covered=None, lines_partially_covered=None):
super(CoverageStatistics, self).__init__()
self.blocks_covered = blocks_covered
self.blocks_not_covered = blocks_not_covered
self.lines_covered = lines_covered
self.lines_not_covered = lines_not_covered
self.lines_partially_covered = lines_partially_covered
class CustomTestField(Model):
"""
A custom field information. Allowed Key : Value pairs - ( AttemptId: int value, IsTestResultFlaky: bool)
:param field_name: Field Name.
:type field_name: str
:param value: Field value.
:type value: object
"""
_attribute_map = {
'field_name': {'key': 'fieldName', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'}
}
def __init__(self, field_name=None, value=None):
super(CustomTestField, self).__init__()
self.field_name = field_name
self.value = value
class DtlEnvironmentDetails(Model):
"""
This is a temporary class to provide the details for the test run environment.
:param csm_content:
:type csm_content: str
:param csm_parameters:
:type csm_parameters: str
:param subscription_name:
:type subscription_name: str
"""
_attribute_map = {
'csm_content': {'key': 'csmContent', 'type': 'str'},
'csm_parameters': {'key': 'csmParameters', 'type': 'str'},
'subscription_name': {'key': 'subscriptionName', 'type': 'str'}
}
def __init__(self, csm_content=None, csm_parameters=None, subscription_name=None):
super(DtlEnvironmentDetails, self).__init__()
self.csm_content = csm_content
self.csm_parameters = csm_parameters
self.subscription_name = subscription_name
class FailingSince(Model):
"""
Failing since information of a test result.
:param build: Build reference since failing.
:type build: :class:`BuildReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.BuildReference>`
:param date: Time since failing(UTC).
:type date: datetime
:param release: Release reference since failing.
:type release: :class:`ReleaseReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ReleaseReference>`
"""
_attribute_map = {
'build': {'key': 'build', 'type': 'BuildReference'},
'date': {'key': 'date', 'type': 'iso-8601'},
'release': {'key': 'release', 'type': 'ReleaseReference'}
}
def __init__(self, build=None, date=None, release=None):
super(FailingSince, self).__init__()
self.build = build
self.date = date
self.release = release
class FieldDetailsForTestResults(Model):
"""
:param field_name: Group by field name
:type field_name: str
:param groups_for_field: Group by field values
:type groups_for_field: list of object
"""
_attribute_map = {
'field_name': {'key': 'fieldName', 'type': 'str'},
'groups_for_field': {'key': 'groupsForField', 'type': '[object]'}
}
def __init__(self, field_name=None, groups_for_field=None):
super(FieldDetailsForTestResults, self).__init__()
self.field_name = field_name
self.groups_for_field = groups_for_field
class FileCoverageRequest(Model):
"""
:param file_path:
:type file_path: str
:param pull_request_base_iteration_id:
:type pull_request_base_iteration_id: int
:param pull_request_id:
:type pull_request_id: int
:param pull_request_iteration_id:
:type pull_request_iteration_id: int
:param repo_id:
:type repo_id: str
"""
_attribute_map = {
'file_path': {'key': 'filePath', 'type': 'str'},
'pull_request_base_iteration_id': {'key': 'pullRequestBaseIterationId', 'type': 'int'},
'pull_request_id': {'key': 'pullRequestId', 'type': 'int'},
'pull_request_iteration_id': {'key': 'pullRequestIterationId', 'type': 'int'},
'repo_id': {'key': 'repoId', 'type': 'str'}
}
def __init__(self, file_path=None, pull_request_base_iteration_id=None, pull_request_id=None, pull_request_iteration_id=None, repo_id=None):
super(FileCoverageRequest, self).__init__()
self.file_path = file_path
self.pull_request_base_iteration_id = pull_request_base_iteration_id
self.pull_request_id = pull_request_id
self.pull_request_iteration_id = pull_request_iteration_id
self.repo_id = repo_id
class FlakyDetection(Model):
"""
:param flaky_detection_pipelines: FlakyDetectionPipelines defines Pipelines for Detection.
:type flaky_detection_pipelines: :class:`FlakyDetectionPipelines <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.FlakyDetectionPipelines>`
:param flaky_detection_type: FlakyDetectionType defines Detection type i.e. 1. System or 2. Manual.
:type flaky_detection_type: object
"""
_attribute_map = {
'flaky_detection_pipelines': {'key': 'flakyDetectionPipelines', 'type': 'FlakyDetectionPipelines'},
'flaky_detection_type': {'key': 'flakyDetectionType', 'type': 'object'}
}
def __init__(self, flaky_detection_pipelines=None, flaky_detection_type=None):
super(FlakyDetection, self).__init__()
self.flaky_detection_pipelines = flaky_detection_pipelines
self.flaky_detection_type = flaky_detection_type
class FlakyDetectionPipelines(Model):
"""
:param allowed_pipelines: AllowedPipelines - List All Pipelines allowed for detection.
:type allowed_pipelines: list of int
:param is_all_pipelines_allowed: IsAllPipelinesAllowed if users configure all system's pipelines.
:type is_all_pipelines_allowed: bool
"""
_attribute_map = {
'allowed_pipelines': {'key': 'allowedPipelines', 'type': '[int]'},
'is_all_pipelines_allowed': {'key': 'isAllPipelinesAllowed', 'type': 'bool'}
}
def __init__(self, allowed_pipelines=None, is_all_pipelines_allowed=None):
super(FlakyDetectionPipelines, self).__init__()
self.allowed_pipelines = allowed_pipelines
self.is_all_pipelines_allowed = is_all_pipelines_allowed
class FlakySettings(Model):
"""
:param flaky_detection: FlakyDetection defines types of detection.
:type flaky_detection: :class:`FlakyDetection <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.FlakyDetection>`
:param flaky_in_summary_report: FlakyInSummaryReport defines flaky data should show in summary report or not.
:type flaky_in_summary_report: bool
:param is_flaky_bug_created: IsFlakyBugCreated defines if there is any bug that has been created with flaky testresult.
:type is_flaky_bug_created: bool
:param manual_mark_unmark_flaky: ManualMarkUnmarkFlaky defines manual marking unmarking of flaky testcase.
:type manual_mark_unmark_flaky: bool
"""
_attribute_map = {
'flaky_detection': {'key': 'flakyDetection', 'type': 'FlakyDetection'},
'flaky_in_summary_report': {'key': 'flakyInSummaryReport', 'type': 'bool'},
'is_flaky_bug_created': {'key': 'isFlakyBugCreated', 'type': 'bool'},
'manual_mark_unmark_flaky': {'key': 'manualMarkUnmarkFlaky', 'type': 'bool'}
}
def __init__(self, flaky_detection=None, flaky_in_summary_report=None, is_flaky_bug_created=None, manual_mark_unmark_flaky=None):
super(FlakySettings, self).__init__()
self.flaky_detection = flaky_detection
self.flaky_in_summary_report = flaky_in_summary_report
self.is_flaky_bug_created = is_flaky_bug_created
self.manual_mark_unmark_flaky = manual_mark_unmark_flaky
class FunctionCoverage(Model):
"""
:param class_:
:type class_: str
:param name:
:type name: str
:param namespace:
:type namespace: str
:param source_file:
:type source_file: str
:param statistics:
:type statistics: :class:`CoverageStatistics <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.CoverageStatistics>`
"""
_attribute_map = {
'class_': {'key': 'class', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'source_file': {'key': 'sourceFile', 'type': 'str'},
'statistics': {'key': 'statistics', 'type': 'CoverageStatistics'}
}
def __init__(self, class_=None, name=None, namespace=None, source_file=None, statistics=None):
super(FunctionCoverage, self).__init__()
self.class_ = class_
self.name = name
self.namespace = namespace
self.source_file = source_file
self.statistics = statistics
class GraphSubjectBase(Model):
"""
:param _links:
:type _links: :class:`ReferenceLinks <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ReferenceLinks>`
:param descriptor:
:type descriptor: str
:param display_name:
:type display_name: str
:param url:
:type url: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None):
super(GraphSubjectBase, self).__init__()
self._links = _links
self.descriptor = descriptor
self.display_name = display_name
self.url = url
class IdentityRef(GraphSubjectBase):
"""
:param _links:
:type _links: :class:`ReferenceLinks <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ReferenceLinks>`
:param descriptor:
:type descriptor: str
:param display_name:
:type display_name: str
:param url:
:type url: str
:param directory_alias:
:type directory_alias: str
:param id:
:type id: str
:param image_url:
:type image_url: str
:param inactive:
:type inactive: bool
:param is_aad_identity:
:type is_aad_identity: bool
:param is_container:
:type is_container: bool
:param is_deleted_in_origin:
:type is_deleted_in_origin: bool
:param profile_url:
:type profile_url: str
:param unique_name:
:type unique_name: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'directory_alias': {'key': 'directoryAlias', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'image_url': {'key': 'imageUrl', 'type': 'str'},
'inactive': {'key': 'inactive', 'type': 'bool'},
'is_aad_identity': {'key': 'isAadIdentity', 'type': 'bool'},
'is_container': {'key': 'isContainer', 'type': 'bool'},
'is_deleted_in_origin': {'key': 'isDeletedInOrigin', 'type': 'bool'},
'profile_url': {'key': 'profileUrl', 'type': 'str'},
'unique_name': {'key': 'uniqueName', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None, directory_alias=None, id=None, image_url=None, inactive=None, is_aad_identity=None, is_container=None, is_deleted_in_origin=None, profile_url=None, unique_name=None):
super(IdentityRef, self).__init__(_links=_links, descriptor=descriptor, display_name=display_name, url=url)
self.directory_alias = directory_alias
self.id = id
self.image_url = image_url
self.inactive = inactive
self.is_aad_identity = is_aad_identity
self.is_container = is_container
self.is_deleted_in_origin = is_deleted_in_origin
self.profile_url = profile_url
self.unique_name = unique_name
class JobReference(Model):
"""
Job in pipeline. This is related to matrixing in YAML.
:param attempt: Attempt number of the job
:type attempt: int
:param job_name: Matrixing in YAML generates copies of a job with different inputs in matrix. JobName is the name of those input. Maximum supported length for name is 256 character.
:type job_name: str
"""
_attribute_map = {
'attempt': {'key': 'attempt', 'type': 'int'},
'job_name': {'key': 'jobName', 'type': 'str'}
}
def __init__(self, attempt=None, job_name=None):
super(JobReference, self).__init__()
self.attempt = attempt
self.job_name = job_name
class ModuleCoverage(Model):
"""
:param block_count:
:type block_count: int
:param block_data:
:type block_data: str
:param file_url: Code Coverage File Url
:type file_url: str
:param functions:
:type functions: list of :class:`FunctionCoverage <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.FunctionCoverage>`
:param name:
:type name: str
:param signature:
:type signature: str
:param signature_age:
:type signature_age: int
:param statistics:
:type statistics: :class:`CoverageStatistics <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.CoverageStatistics>`
"""
_attribute_map = {
'block_count': {'key': 'blockCount', 'type': 'int'},
'block_data': {'key': 'blockData', 'type': 'str'},
'file_url': {'key': 'fileUrl', 'type': 'str'},
'functions': {'key': 'functions', 'type': '[FunctionCoverage]'},
'name': {'key': 'name', 'type': 'str'},
'signature': {'key': 'signature', 'type': 'str'},
'signature_age': {'key': 'signatureAge', 'type': 'int'},
'statistics': {'key': 'statistics', 'type': 'CoverageStatistics'}
}
def __init__(self, block_count=None, block_data=None, file_url=None, functions=None, name=None, signature=None, signature_age=None, statistics=None):
super(ModuleCoverage, self).__init__()
self.block_count = block_count
self.block_data = block_data
self.file_url = file_url
self.functions = functions
self.name = name
self.signature = signature
self.signature_age = signature_age
self.statistics = statistics
class NewTestResultLoggingSettings(Model):
"""
:param log_new_tests: LogNewTests defines whether or not we will record new test cases coming into the system
:type log_new_tests: bool
"""
_attribute_map = {
'log_new_tests': {'key': 'logNewTests', 'type': 'bool'}
}
def __init__(self, log_new_tests=None):
super(NewTestResultLoggingSettings, self).__init__()
self.log_new_tests = log_new_tests
class PhaseReference(Model):
"""
Phase in pipeline
:param attempt: Attempt number of the phase
:type attempt: int
:param phase_name: Name of the phase. Maximum supported length for name is 256 character.
:type phase_name: str
"""
_attribute_map = {
'attempt': {'key': 'attempt', 'type': 'int'},
'phase_name': {'key': 'phaseName', 'type': 'str'}
}
def __init__(self, attempt=None, phase_name=None):
super(PhaseReference, self).__init__()
self.attempt = attempt
self.phase_name = phase_name
class PipelineReference(Model):
"""
Pipeline reference
:param job_reference: Reference of the job
:type job_reference: :class:`JobReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.JobReference>`
:param phase_reference: Reference of the phase.
:type phase_reference: :class:`PhaseReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.PhaseReference>`
:param pipeline_id: Reference of the pipeline with which this pipeline instance is related.
:type pipeline_id: int
:param stage_reference: Reference of the stage.
:type stage_reference: :class:`StageReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.StageReference>`
"""
_attribute_map = {
'job_reference': {'key': 'jobReference', 'type': 'JobReference'},
'phase_reference': {'key': 'phaseReference', 'type': 'PhaseReference'},
'pipeline_id': {'key': 'pipelineId', 'type': 'int'},
'stage_reference': {'key': 'stageReference', 'type': 'StageReference'}
}
def __init__(self, job_reference=None, phase_reference=None, pipeline_id=None, stage_reference=None):
super(PipelineReference, self).__init__()
self.job_reference = job_reference
self.phase_reference = phase_reference
self.pipeline_id = pipeline_id
self.stage_reference = stage_reference
class PipelineTestMetrics(Model):
"""
Test summary of a pipeline instance.
:param current_context: Reference of Pipeline instance for which test summary is calculated.
:type current_context: :class:`PipelineReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.PipelineReference>`
:param results_analysis: This is the return value for metric ResultsAnalysis Results insights which include failure analysis, increase/decrease in results count analysis.
:type results_analysis: :class:`ResultsAnalysis <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ResultsAnalysis>`
:param result_summary: This is the return value for metric ResultSummary Results summary based on results outcome.
:type result_summary: :class:`ResultSummary <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ResultSummary>`
:param run_summary: This is the return value for metric RunSummary Run summary.
:type run_summary: :class:`RunSummary <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.RunSummary>`
:param summary_at_child: Summary at child node.
:type summary_at_child: list of :class:`PipelineTestMetrics <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.PipelineTestMetrics>`
"""
_attribute_map = {
'current_context': {'key': 'currentContext', 'type': 'PipelineReference'},
'results_analysis': {'key': 'resultsAnalysis', 'type': 'ResultsAnalysis'},
'result_summary': {'key': 'resultSummary', 'type': 'ResultSummary'},
'run_summary': {'key': 'runSummary', 'type': 'RunSummary'},
'summary_at_child': {'key': 'summaryAtChild', 'type': '[PipelineTestMetrics]'}
}
def __init__(self, current_context=None, results_analysis=None, result_summary=None, run_summary=None, summary_at_child=None):
super(PipelineTestMetrics, self).__init__()
self.current_context = current_context
self.results_analysis = results_analysis
self.result_summary = result_summary
self.run_summary = run_summary
self.summary_at_child = summary_at_child
class QueryModel(Model):
"""
:param query:
:type query: str
"""
_attribute_map = {
'query': {'key': 'query', 'type': 'str'}
}
def __init__(self, query=None):
super(QueryModel, self).__init__()
self.query = query
class ReferenceLinks(Model):
"""
:param links:
:type links: dict
"""
_attribute_map = {
'links': {'key': 'links', 'type': '{object}'}
}
def __init__(self, links=None):
super(ReferenceLinks, self).__init__()
self.links = links
class ReleaseReference(Model):
"""
Reference to a release.
:param attempt: Number of Release Attempt.
:type attempt: int
:param creation_date: Release Creation Date(UTC).
:type creation_date: datetime
:param definition_id: Release definition ID.
:type definition_id: int
:param environment_creation_date: Environment creation Date(UTC).
:type environment_creation_date: datetime
:param environment_definition_id: Release environment definition ID.
:type environment_definition_id: int
:param environment_definition_name: Release environment definition name.
:type environment_definition_name: str
:param environment_id: Release environment ID.
:type environment_id: int
:param environment_name: Release environment name.
:type environment_name: str
:param id: Release ID.
:type id: int
:param name: Release name.
:type name: str
"""
_attribute_map = {
'attempt': {'key': 'attempt', 'type': 'int'},
'creation_date': {'key': 'creationDate', 'type': 'iso-8601'},
'definition_id': {'key': 'definitionId', 'type': 'int'},
'environment_creation_date': {'key': 'environmentCreationDate', 'type': 'iso-8601'},
'environment_definition_id': {'key': 'environmentDefinitionId', 'type': 'int'},
'environment_definition_name': {'key': 'environmentDefinitionName', 'type': 'str'},
'environment_id': {'key': 'environmentId', 'type': 'int'},
'environment_name': {'key': 'environmentName', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, attempt=None, creation_date=None, definition_id=None, environment_creation_date=None, environment_definition_id=None, environment_definition_name=None, environment_id=None, environment_name=None, id=None, name=None):
super(ReleaseReference, self).__init__()
self.attempt = attempt
self.creation_date = creation_date
self.definition_id = definition_id
self.environment_creation_date = environment_creation_date
self.environment_definition_id = environment_definition_id
self.environment_definition_name = environment_definition_name
self.environment_id = environment_id
self.environment_name = environment_name
self.id = id
self.name = name
class ResultsAnalysis(Model):
"""
Results insights for runs with state completed and NeedInvestigation.
:param previous_context: Reference of pipeline instance from which to compare the results.
:type previous_context: :class:`PipelineReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.PipelineReference>`
:param results_difference: Increase/Decrease in counts of results for a different outcome with respect to PreviousContext.
:type results_difference: :class:`AggregatedResultsDifference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.AggregatedResultsDifference>`
:param test_failures_analysis: Failure analysis of results with respect to PreviousContext
:type test_failures_analysis: :class:`TestResultFailuresAnalysis <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestResultFailuresAnalysis>`
"""
_attribute_map = {
'previous_context': {'key': 'previousContext', 'type': 'PipelineReference'},
'results_difference': {'key': 'resultsDifference', 'type': 'AggregatedResultsDifference'},
'test_failures_analysis': {'key': 'testFailuresAnalysis', 'type': 'TestResultFailuresAnalysis'}
}
def __init__(self, previous_context=None, results_difference=None, test_failures_analysis=None):
super(ResultsAnalysis, self).__init__()
self.previous_context = previous_context
self.results_difference = results_difference
self.test_failures_analysis = test_failures_analysis
class ResultsFilter(Model):
"""
:param automated_test_name:
:type automated_test_name: str
:param branch:
:type branch: str
:param executed_in:
:type executed_in: object
:param group_by:
:type group_by: str
:param max_complete_date:
:type max_complete_date: datetime
:param results_count:
:type results_count: int
:param test_case_id:
:type test_case_id: int
:param test_case_reference_ids:
:type test_case_reference_ids: list of int
:param test_plan_id:
:type test_plan_id: int
:param test_point_ids:
:type test_point_ids: list of int
:param test_results_context:
:type test_results_context: :class:`TestResultsContext <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestResultsContext>`
:param trend_days:
:type trend_days: int
"""
_attribute_map = {
'automated_test_name': {'key': 'automatedTestName', 'type': 'str'},
'branch': {'key': 'branch', 'type': 'str'},
'executed_in': {'key': 'executedIn', 'type': 'object'},
'group_by': {'key': 'groupBy', 'type': 'str'},
'max_complete_date': {'key': 'maxCompleteDate', 'type': 'iso-8601'},
'results_count': {'key': 'resultsCount', 'type': 'int'},
'test_case_id': {'key': 'testCaseId', 'type': 'int'},
'test_case_reference_ids': {'key': 'testCaseReferenceIds', 'type': '[int]'},
'test_plan_id': {'key': 'testPlanId', 'type': 'int'},
'test_point_ids': {'key': 'testPointIds', 'type': '[int]'},
'test_results_context': {'key': 'testResultsContext', 'type': 'TestResultsContext'},
'trend_days': {'key': 'trendDays', 'type': 'int'}
}
def __init__(self, automated_test_name=None, branch=None, executed_in=None, group_by=None, max_complete_date=None, results_count=None, test_case_id=None, test_case_reference_ids=None, test_plan_id=None, test_point_ids=None, test_results_context=None, trend_days=None):
super(ResultsFilter, self).__init__()
self.automated_test_name = automated_test_name
self.branch = branch
self.executed_in = executed_in
self.group_by = group_by
self.max_complete_date = max_complete_date
self.results_count = results_count
self.test_case_id = test_case_id
self.test_case_reference_ids = test_case_reference_ids
self.test_plan_id = test_plan_id
self.test_point_ids = test_point_ids
self.test_results_context = test_results_context
self.trend_days = trend_days
class ResultsSummaryByOutcome(Model):
"""
Result summary by the outcome of test results.
:param aggregated_result_details_by_outcome: Aggregated result details for each test result outcome.
:type aggregated_result_details_by_outcome: dict
:param duration: Time taken by results.
:type duration: object
:param not_reported_test_count: Total number of not reported test results.
:type not_reported_test_count: int
:param total_test_count: Total number of test results. (It includes NotImpacted test results as well which need to exclude while calculating pass/fail test result percentage).
:type total_test_count: int
"""
_attribute_map = {
'aggregated_result_details_by_outcome': {'key': 'aggregatedResultDetailsByOutcome', 'type': '{AggregatedResultDetailsByOutcome}'},
'duration': {'key': 'duration', 'type': 'object'},
'not_reported_test_count': {'key': 'notReportedTestCount', 'type': 'int'},
'total_test_count': {'key': 'totalTestCount', 'type': 'int'}
}
def __init__(self, aggregated_result_details_by_outcome=None, duration=None, not_reported_test_count=None, total_test_count=None):
super(ResultsSummaryByOutcome, self).__init__()
self.aggregated_result_details_by_outcome = aggregated_result_details_by_outcome
self.duration = duration
self.not_reported_test_count = not_reported_test_count
self.total_test_count = total_test_count
class ResultSummary(Model):
"""
Summary of results for a pipeline instance.
:param result_summary_by_run_state: Result summary of pipeline, group by TestRun state.
:type result_summary_by_run_state: dict
"""
_attribute_map = {
'result_summary_by_run_state': {'key': 'resultSummaryByRunState', 'type': '{ResultsSummaryByOutcome}'}
}
def __init__(self, result_summary_by_run_state=None):
super(ResultSummary, self).__init__()
self.result_summary_by_run_state = result_summary_by_run_state
class RunCreateModel(Model):
"""
Test run create details.
:param automated: true if test run is automated, false otherwise. By default it will be false.
:type automated: bool
:param build: An abstracted reference to the build that it belongs.
:type build: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param build_drop_location: Drop location of the build used for test run.
:type build_drop_location: str
:param build_flavor: Flavor of the build used for test run. (E.g: Release, Debug)
:type build_flavor: str
:param build_platform: Platform of the build used for test run. (E.g.: x86, amd64)
:type build_platform: str
:param build_reference: BuildReference of the test run.
:type build_reference: :class:`BuildConfiguration <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.BuildConfiguration>`
:param comment: Comments entered by those analyzing the run.
:type comment: str
:param complete_date: Completed date time of the run.
:type complete_date: str
:param configuration_ids: IDs of the test configurations associated with the run.
:type configuration_ids: list of int
:param controller: Name of the test controller used for automated run.
:type controller: str
:param custom_test_fields: Additional properties of test Run.
:type custom_test_fields: list of :class:`CustomTestField <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.CustomTestField>`
:param dtl_aut_environment: An abstracted reference to DtlAutEnvironment.
:type dtl_aut_environment: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param dtl_test_environment: An abstracted reference to DtlTestEnvironment.
:type dtl_test_environment: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param due_date: Due date and time for test run.
:type due_date: str
:param environment_details:
:type environment_details: :class:`DtlEnvironmentDetails <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.DtlEnvironmentDetails>`
:param error_message: Error message associated with the run.
:type error_message: str
:param filter: Filter used for discovering the Run.
:type filter: :class:`RunFilter <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.RunFilter>`
:param iteration: The iteration in which to create the run. Root iteration of the team project will be default
:type iteration: str
:param name: Name of the test run.
:type name: str
:param owner: Display name of the owner of the run.
:type owner: :class:`IdentityRef <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.IdentityRef>`
:param pipeline_reference: Reference of the pipeline to which this test run belongs. PipelineReference.PipelineId should be equal to RunCreateModel.Build.Id
:type pipeline_reference: :class:`PipelineReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.PipelineReference>`
:param plan: An abstracted reference to the plan that it belongs.
:type plan: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param point_ids: IDs of the test points to use in the run.
:type point_ids: list of int
:param release_environment_uri: URI of release environment associated with the run.
:type release_environment_uri: str
:param release_reference: Reference to release associated with test run.
:type release_reference: :class:`ReleaseReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ReleaseReference>`
:param release_uri: URI of release associated with the run.
:type release_uri: str
:param run_summary: Run summary for run Type = NoConfigRun.
:type run_summary: list of :class:`RunSummaryModel <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.RunSummaryModel>`
:param run_timeout: Timespan till the run times out.
:type run_timeout: object
:param source_workflow: SourceWorkFlow(CI/CD) of the test run.
:type source_workflow: str
:param start_date: Start date time of the run.
:type start_date: str
:param state: The state of the run. Type TestRunState Valid states - NotStarted, InProgress, Waiting
:type state: str
:param tags: Tags to attach with the test run, maximum of 5 tags can be added to run.
:type tags: list of :class:`TestTag <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestTag>`
:param test_configurations_mapping: TestConfigurationMapping of the test run.
:type test_configurations_mapping: str
:param test_environment_id: ID of the test environment associated with the run.
:type test_environment_id: str
:param test_settings: An abstracted reference to the test settings resource.
:type test_settings: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param type: Type of the run(RunType) Valid Values : (Unspecified, Normal, Blocking, Web, MtrRunInitiatedFromWeb, RunWithDtlEnv, NoConfigRun)
:type type: str
"""
_attribute_map = {
'automated': {'key': 'automated', 'type': 'bool'},
'build': {'key': 'build', 'type': 'ShallowReference'},
'build_drop_location': {'key': 'buildDropLocation', 'type': 'str'},
'build_flavor': {'key': 'buildFlavor', 'type': 'str'},
'build_platform': {'key': 'buildPlatform', 'type': 'str'},
'build_reference': {'key': 'buildReference', 'type': 'BuildConfiguration'},
'comment': {'key': 'comment', 'type': 'str'},
'complete_date': {'key': 'completeDate', 'type': 'str'},
'configuration_ids': {'key': 'configurationIds', 'type': '[int]'},
'controller': {'key': 'controller', 'type': 'str'},
'custom_test_fields': {'key': 'customTestFields', 'type': '[CustomTestField]'},
'dtl_aut_environment': {'key': 'dtlAutEnvironment', 'type': 'ShallowReference'},
'dtl_test_environment': {'key': 'dtlTestEnvironment', 'type': 'ShallowReference'},
'due_date': {'key': 'dueDate', 'type': 'str'},
'environment_details': {'key': 'environmentDetails', 'type': 'DtlEnvironmentDetails'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'filter': {'key': 'filter', 'type': 'RunFilter'},
'iteration': {'key': 'iteration', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'pipeline_reference': {'key': 'pipelineReference', 'type': 'PipelineReference'},
'plan': {'key': 'plan', 'type': 'ShallowReference'},
'point_ids': {'key': 'pointIds', 'type': '[int]'},
'release_environment_uri': {'key': 'releaseEnvironmentUri', 'type': 'str'},
'release_reference': {'key': 'releaseReference', 'type': 'ReleaseReference'},
'release_uri': {'key': 'releaseUri', 'type': 'str'},
'run_summary': {'key': 'runSummary', 'type': '[RunSummaryModel]'},
'run_timeout': {'key': 'runTimeout', 'type': 'object'},
'source_workflow': {'key': 'sourceWorkflow', 'type': 'str'},
'start_date': {'key': 'startDate', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[TestTag]'},
'test_configurations_mapping': {'key': 'testConfigurationsMapping', 'type': 'str'},
'test_environment_id': {'key': 'testEnvironmentId', 'type': 'str'},
'test_settings': {'key': 'testSettings', 'type': 'ShallowReference'},
'type': {'key': 'type', 'type': 'str'}
}
def __init__(self, automated=None, build=None, build_drop_location=None, build_flavor=None, build_platform=None, build_reference=None, comment=None, complete_date=None, configuration_ids=None, controller=None, custom_test_fields=None, dtl_aut_environment=None, dtl_test_environment=None, due_date=None, environment_details=None, error_message=None, filter=None, iteration=None, name=None, owner=None, pipeline_reference=None, plan=None, point_ids=None, release_environment_uri=None, release_reference=None, release_uri=None, run_summary=None, run_timeout=None, source_workflow=None, start_date=None, state=None, tags=None, test_configurations_mapping=None, test_environment_id=None, test_settings=None, type=None):
super(RunCreateModel, self).__init__()
self.automated = automated
self.build = build
self.build_drop_location = build_drop_location
self.build_flavor = build_flavor
self.build_platform = build_platform
self.build_reference = build_reference
self.comment = comment
self.complete_date = complete_date
self.configuration_ids = configuration_ids
self.controller = controller
self.custom_test_fields = custom_test_fields
self.dtl_aut_environment = dtl_aut_environment
self.dtl_test_environment = dtl_test_environment
self.due_date = due_date
self.environment_details = environment_details
self.error_message = error_message
self.filter = filter
self.iteration = iteration
self.name = name
self.owner = owner
self.pipeline_reference = pipeline_reference
self.plan = plan
self.point_ids = point_ids
self.release_environment_uri = release_environment_uri
self.release_reference = release_reference
self.release_uri = release_uri
self.run_summary = run_summary
self.run_timeout = run_timeout
self.source_workflow = source_workflow
self.start_date = start_date
self.state = state
self.tags = tags
self.test_configurations_mapping = test_configurations_mapping
self.test_environment_id = test_environment_id
self.test_settings = test_settings
self.type = type
class RunFilter(Model):
"""
This class is used to provide the filters used for discovery
:param source_filter: filter for the test case sources (test containers)
:type source_filter: str
:param test_case_filter: filter for the test cases
:type test_case_filter: str
"""
_attribute_map = {
'source_filter': {'key': 'sourceFilter', 'type': 'str'},
'test_case_filter': {'key': 'testCaseFilter', 'type': 'str'}
}
def __init__(self, source_filter=None, test_case_filter=None):
super(RunFilter, self).__init__()
self.source_filter = source_filter
self.test_case_filter = test_case_filter
class RunStatistic(Model):
"""
Test run statistics per outcome.
:param count: Test result count fo the given outcome.
:type count: int
:param outcome: Test result outcome
:type outcome: str
:param resolution_state: Test run Resolution State.
:type resolution_state: :class:`TestResolutionState <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestResolutionState>`
:param result_metadata: ResultMetadata for the given outcome/count.
:type result_metadata: object
:param state: State of the test run
:type state: str
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'outcome': {'key': 'outcome', 'type': 'str'},
'resolution_state': {'key': 'resolutionState', 'type': 'TestResolutionState'},
'result_metadata': {'key': 'resultMetadata', 'type': 'object'},
'state': {'key': 'state', 'type': 'str'}
}
def __init__(self, count=None, outcome=None, resolution_state=None, result_metadata=None, state=None):
super(RunStatistic, self).__init__()
self.count = count
self.outcome = outcome
self.resolution_state = resolution_state
self.result_metadata = result_metadata
self.state = state
class RunSummary(Model):
"""
Summary of runs for a pipeline instance.
:param duration: Total time taken by runs with state completed and NeedInvestigation.
:type duration: object
:param no_config_runs_count: NoConfig runs count.
:type no_config_runs_count: int
:param run_summary_by_outcome: Runs count by outcome for runs with state completed and NeedInvestigation runs.
:type run_summary_by_outcome: dict
:param run_summary_by_state: Runs count by state.
:type run_summary_by_state: dict
:param total_runs_count: Total runs count.
:type total_runs_count: int
"""
_attribute_map = {
'duration': {'key': 'duration', 'type': 'object'},
'no_config_runs_count': {'key': 'noConfigRunsCount', 'type': 'int'},
'run_summary_by_outcome': {'key': 'runSummaryByOutcome', 'type': '{int}'},
'run_summary_by_state': {'key': 'runSummaryByState', 'type': '{int}'},
'total_runs_count': {'key': 'totalRunsCount', 'type': 'int'}
}
def __init__(self, duration=None, no_config_runs_count=None, run_summary_by_outcome=None, run_summary_by_state=None, total_runs_count=None):
super(RunSummary, self).__init__()
self.duration = duration
self.no_config_runs_count = no_config_runs_count
self.run_summary_by_outcome = run_summary_by_outcome
self.run_summary_by_state = run_summary_by_state
self.total_runs_count = total_runs_count
class RunSummaryModel(Model):
"""
Run summary for each output type of test.
:param duration: Total time taken in milliseconds.
:type duration: long
:param result_count: Number of results for Outcome TestOutcome
:type result_count: int
:param test_outcome: Summary is based on outcome
:type test_outcome: object
"""
_attribute_map = {
'duration': {'key': 'duration', 'type': 'long'},
'result_count': {'key': 'resultCount', 'type': 'int'},
'test_outcome': {'key': 'testOutcome', 'type': 'object'}
}
def __init__(self, duration=None, result_count=None, test_outcome=None):
super(RunSummaryModel, self).__init__()
self.duration = duration
self.result_count = result_count
self.test_outcome = test_outcome
class RunUpdateModel(Model):
"""
:param build: An abstracted reference to the build that it belongs.
:type build: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param build_drop_location: Drop location of the build used for test run.
:type build_drop_location: str
:param build_flavor: Flavor of the build used for test run. (E.g: Release, Debug)
:type build_flavor: str
:param build_platform: Platform of the build used for test run. (E.g.: x86, amd64)
:type build_platform: str
:param comment: Comments entered by those analyzing the run.
:type comment: str
:param completed_date: Completed date time of the run.
:type completed_date: str
:param controller: Name of the test controller used for automated run.
:type controller: str
:param delete_in_progress_results: true to delete inProgess Results , false otherwise.
:type delete_in_progress_results: bool
:param dtl_aut_environment: An abstracted reference to DtlAutEnvironment.
:type dtl_aut_environment: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param dtl_environment: An abstracted reference to DtlEnvironment.
:type dtl_environment: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param dtl_environment_details:
:type dtl_environment_details: :class:`DtlEnvironmentDetails <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.DtlEnvironmentDetails>`
:param due_date: Due date and time for test run.
:type due_date: str
:param error_message: Error message associated with the run.
:type error_message: str
:param iteration: The iteration in which to create the run.
:type iteration: str
:param log_entries: Log entries associated with the run. Use a comma-separated list of multiple log entry objects. { logEntry }, { logEntry }, ...
:type log_entries: list of :class:`TestMessageLogDetails <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestMessageLogDetails>`
:param name: Name of the test run.
:type name: str
:param release_environment_uri: URI of release environment associated with the run.
:type release_environment_uri: str
:param release_uri: URI of release associated with the run.
:type release_uri: str
:param run_summary: Run summary for run Type = NoConfigRun.
:type run_summary: list of :class:`RunSummaryModel <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.RunSummaryModel>`
:param source_workflow: SourceWorkFlow(CI/CD) of the test run.
:type source_workflow: str
:param started_date: Start date time of the run.
:type started_date: str
:param state: The state of the test run Below are the valid values - NotStarted, InProgress, Completed, Aborted, Waiting
:type state: str
:param substate: The types of sub states for test run.
:type substate: object
:param tags: Tags to attach with the test run.
:type tags: list of :class:`TestTag <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestTag>`
:param test_environment_id: ID of the test environment associated with the run.
:type test_environment_id: str
:param test_settings: An abstracted reference to test setting resource.
:type test_settings: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
"""
_attribute_map = {
'build': {'key': 'build', 'type': 'ShallowReference'},
'build_drop_location': {'key': 'buildDropLocation', 'type': 'str'},
'build_flavor': {'key': 'buildFlavor', 'type': 'str'},
'build_platform': {'key': 'buildPlatform', 'type': 'str'},
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'str'},
'controller': {'key': 'controller', 'type': 'str'},
'delete_in_progress_results': {'key': 'deleteInProgressResults', 'type': 'bool'},
'dtl_aut_environment': {'key': 'dtlAutEnvironment', 'type': 'ShallowReference'},
'dtl_environment': {'key': 'dtlEnvironment', 'type': 'ShallowReference'},
'dtl_environment_details': {'key': 'dtlEnvironmentDetails', 'type': 'DtlEnvironmentDetails'},
'due_date': {'key': 'dueDate', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'iteration': {'key': 'iteration', 'type': 'str'},
'log_entries': {'key': 'logEntries', 'type': '[TestMessageLogDetails]'},
'name': {'key': 'name', 'type': 'str'},
'release_environment_uri': {'key': 'releaseEnvironmentUri', 'type': 'str'},
'release_uri': {'key': 'releaseUri', 'type': 'str'},
'run_summary': {'key': 'runSummary', 'type': '[RunSummaryModel]'},
'source_workflow': {'key': 'sourceWorkflow', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'substate': {'key': 'substate', 'type': 'object'},
'tags': {'key': 'tags', 'type': '[TestTag]'},
'test_environment_id': {'key': 'testEnvironmentId', 'type': 'str'},
'test_settings': {'key': 'testSettings', 'type': 'ShallowReference'}
}
def __init__(self, build=None, build_drop_location=None, build_flavor=None, build_platform=None, comment=None, completed_date=None, controller=None, delete_in_progress_results=None, dtl_aut_environment=None, dtl_environment=None, dtl_environment_details=None, due_date=None, error_message=None, iteration=None, log_entries=None, name=None, release_environment_uri=None, release_uri=None, run_summary=None, source_workflow=None, started_date=None, state=None, substate=None, tags=None, test_environment_id=None, test_settings=None):
super(RunUpdateModel, self).__init__()
self.build = build
self.build_drop_location = build_drop_location
self.build_flavor = build_flavor
self.build_platform = build_platform
self.comment = comment
self.completed_date = completed_date
self.controller = controller
self.delete_in_progress_results = delete_in_progress_results
self.dtl_aut_environment = dtl_aut_environment
self.dtl_environment = dtl_environment
self.dtl_environment_details = dtl_environment_details
self.due_date = due_date
self.error_message = error_message
self.iteration = iteration
self.log_entries = log_entries
self.name = name
self.release_environment_uri = release_environment_uri
self.release_uri = release_uri
self.run_summary = run_summary
self.source_workflow = source_workflow
self.started_date = started_date
self.state = state
self.substate = substate
self.tags = tags
self.test_environment_id = test_environment_id
self.test_settings = test_settings
class ShallowReference(Model):
"""
An abstracted reference to some other resource. This class is used to provide the build data contracts with a uniform way to reference other resources in a way that provides easy traversal through links.
:param id: ID of the resource
:type id: str
:param name: Name of the linked resource (definition name, controller name, etc.)
:type name: str
:param url: Full http link to the resource
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, name=None, url=None):
super(ShallowReference, self).__init__()
self.id = id
self.name = name
self.url = url
class ShallowTestCaseResult(Model):
"""
:param automated_test_name:
:type automated_test_name: str
:param automated_test_storage:
:type automated_test_storage: str
:param duration_in_ms:
:type duration_in_ms: float
:param id:
:type id: int
:param is_re_run:
:type is_re_run: bool
:param outcome:
:type outcome: str
:param owner:
:type owner: str
:param priority:
:type priority: int
:param ref_id:
:type ref_id: int
:param run_id:
:type run_id: int
:param tags:
:type tags: list of str
:param test_case_title:
:type test_case_title: str
"""
_attribute_map = {
'automated_test_name': {'key': 'automatedTestName', 'type': 'str'},
'automated_test_storage': {'key': 'automatedTestStorage', 'type': 'str'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'float'},
'id': {'key': 'id', 'type': 'int'},
'is_re_run': {'key': 'isReRun', 'type': 'bool'},
'outcome': {'key': 'outcome', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'ref_id': {'key': 'refId', 'type': 'int'},
'run_id': {'key': 'runId', 'type': 'int'},
'tags': {'key': 'tags', 'type': '[str]'},
'test_case_title': {'key': 'testCaseTitle', 'type': 'str'}
}
def __init__(self, automated_test_name=None, automated_test_storage=None, duration_in_ms=None, id=None, is_re_run=None, outcome=None, owner=None, priority=None, ref_id=None, run_id=None, tags=None, test_case_title=None):
super(ShallowTestCaseResult, self).__init__()
self.automated_test_name = automated_test_name
self.automated_test_storage = automated_test_storage
self.duration_in_ms = duration_in_ms
self.id = id
self.is_re_run = is_re_run
self.outcome = outcome
self.owner = owner
self.priority = priority
self.ref_id = ref_id
self.run_id = run_id
self.tags = tags
self.test_case_title = test_case_title
class SharedStepModel(Model):
"""
Reference to shared step workitem.
:param id: WorkItem shared step ID.
:type id: int
:param revision: Shared step workitem revision.
:type revision: int
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'revision': {'key': 'revision', 'type': 'int'}
}
def __init__(self, id=None, revision=None):
super(SharedStepModel, self).__init__()
self.id = id
self.revision = revision
class StageReference(Model):
"""
Stage in pipeline
:param attempt: Attempt number of stage
:type attempt: int
:param stage_name: Name of the stage. Maximum supported length for name is 256 character.
:type stage_name: str
"""
_attribute_map = {
'attempt': {'key': 'attempt', 'type': 'int'},
'stage_name': {'key': 'stageName', 'type': 'str'}
}
def __init__(self, attempt=None, stage_name=None):
super(StageReference, self).__init__()
self.attempt = attempt
self.stage_name = stage_name
class TeamProjectReference(Model):
"""
:param abbreviation:
:type abbreviation: str
:param default_team_image_url:
:type default_team_image_url: str
:param description:
:type description: str
:param id:
:type id: str
:param last_update_time:
:type last_update_time: datetime
:param name:
:type name: str
:param revision:
:type revision: long
:param state:
:type state: object
:param url:
:type url: str
:param visibility:
:type visibility: object
"""
_attribute_map = {
'abbreviation': {'key': 'abbreviation', 'type': 'str'},
'default_team_image_url': {'key': 'defaultTeamImageUrl', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'long'},
'state': {'key': 'state', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'},
'visibility': {'key': 'visibility', 'type': 'object'}
}
def __init__(self, abbreviation=None, default_team_image_url=None, description=None, id=None, last_update_time=None, name=None, revision=None, state=None, url=None, visibility=None):
super(TeamProjectReference, self).__init__()
self.abbreviation = abbreviation
self.default_team_image_url = default_team_image_url
self.description = description
self.id = id
self.last_update_time = last_update_time
self.name = name
self.revision = revision
self.state = state
self.url = url
self.visibility = visibility
class TestAttachment(Model):
"""
:param attachment_type: Attachment type.
:type attachment_type: object
:param comment: Comment associated with attachment.
:type comment: str
:param created_date: Attachment created date.
:type created_date: datetime
:param file_name: Attachment file name
:type file_name: str
:param id: ID of the attachment.
:type id: int
:param size: Attachment size.
:type size: long
:param url: Attachment Url.
:type url: str
"""
_attribute_map = {
'attachment_type': {'key': 'attachmentType', 'type': 'object'},
'comment': {'key': 'comment', 'type': 'str'},
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'file_name': {'key': 'fileName', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'size': {'key': 'size', 'type': 'long'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, attachment_type=None, comment=None, created_date=None, file_name=None, id=None, size=None, url=None):
super(TestAttachment, self).__init__()
self.attachment_type = attachment_type
self.comment = comment
self.created_date = created_date
self.file_name = file_name
self.id = id
self.size = size
self.url = url
class TestAttachmentReference(Model):
"""
Reference to test attachment.
:param id: ID of the attachment.
:type id: int
:param url: Url to download the attachment.
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, url=None):
super(TestAttachmentReference, self).__init__()
self.id = id
self.url = url
class TestAttachmentRequestModel(Model):
"""
Test attachment request model
:param attachment_type: Attachment type By Default it will be GeneralAttachment. It can be one of the following type. { GeneralAttachment, AfnStrip, BugFilingData, CodeCoverage, IntermediateCollectorData, RunConfig, TestImpactDetails, TmiTestRunDeploymentFiles, TmiTestRunReverseDeploymentFiles, TmiTestResultDetail, TmiTestRunSummary }
:type attachment_type: str
:param comment: Comment associated with attachment
:type comment: str
:param file_name: Attachment filename
:type file_name: str
:param stream: Base64 encoded file stream
:type stream: str
"""
_attribute_map = {
'attachment_type': {'key': 'attachmentType', 'type': 'str'},
'comment': {'key': 'comment', 'type': 'str'},
'file_name': {'key': 'fileName', 'type': 'str'},
'stream': {'key': 'stream', 'type': 'str'}
}
def __init__(self, attachment_type=None, comment=None, file_name=None, stream=None):
super(TestAttachmentRequestModel, self).__init__()
self.attachment_type = attachment_type
self.comment = comment
self.file_name = file_name
self.stream = stream
class TestCaseResult(Model):
"""
Represents a test result.
:param afn_strip_id: Test attachment ID of action recording.
:type afn_strip_id: int
:param area: Reference to area path of test.
:type area: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param associated_bugs: Reference to bugs linked to test result.
:type associated_bugs: list of :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param automated_test_id: ID representing test method in a dll.
:type automated_test_id: str
:param automated_test_name: Fully qualified name of test executed.
:type automated_test_name: str
:param automated_test_storage: Container to which test belongs.
:type automated_test_storage: str
:param automated_test_type: Type of automated test.
:type automated_test_type: str
:param automated_test_type_id: TypeId of automated test.
:type automated_test_type_id: str
:param build: Shallow reference to build associated with test result.
:type build: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param build_reference: Reference to build associated with test result.
:type build_reference: :class:`BuildReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.BuildReference>`
:param comment: Comment in a test result with maxSize= 1000 chars.
:type comment: str
:param completed_date: Time when test execution completed(UTC). Completed date should be greater than StartedDate.
:type completed_date: datetime
:param computer_name: Machine name where test executed.
:type computer_name: str
:param configuration: Reference to test configuration. Type ShallowReference.
:type configuration: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param created_date: Timestamp when test result created(UTC).
:type created_date: datetime
:param custom_fields: Additional properties of test result.
:type custom_fields: list of :class:`CustomTestField <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.CustomTestField>`
:param duration_in_ms: Duration of test execution in milliseconds. If not provided value will be set as CompletedDate - StartedDate
:type duration_in_ms: float
:param error_message: Error message in test execution.
:type error_message: str
:param failing_since: Information when test results started failing.
:type failing_since: :class:`FailingSince <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.FailingSince>`
:param failure_type: Failure type of test result. Valid Value= (Known Issue, New Issue, Regression, Unknown, None)
:type failure_type: str
:param id: ID of a test result.
:type id: int
:param iteration_details: Test result details of test iterations used only for Manual Testing.
:type iteration_details: list of :class:`TestIterationDetailsModel <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestIterationDetailsModel>`
:param last_updated_by: Reference to identity last updated test result.
:type last_updated_by: :class:`IdentityRef <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.IdentityRef>`
:param last_updated_date: Last updated datetime of test result(UTC).
:type last_updated_date: datetime
:param outcome: Test outcome of test result. Valid values = (Unspecified, None, Passed, Failed, Inconclusive, Timeout, Aborted, Blocked, NotExecuted, Warning, Error, NotApplicable, Paused, InProgress, NotImpacted)
:type outcome: str
:param owner: Reference to test owner.
:type owner: :class:`IdentityRef <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.IdentityRef>`
:param priority: Priority of test executed.
:type priority: int
:param project: Reference to team project.
:type project: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param release: Shallow reference to release associated with test result.
:type release: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param release_reference: Reference to release associated with test result.
:type release_reference: :class:`ReleaseReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ReleaseReference>`
:param reset_count: ResetCount.
:type reset_count: int
:param resolution_state: Resolution state of test result.
:type resolution_state: str
:param resolution_state_id: ID of resolution state.
:type resolution_state_id: int
:param result_group_type: Hierarchy type of the result, default value of None means its leaf node.
:type result_group_type: object
:param revision: Revision number of test result.
:type revision: int
:param run_by: Reference to identity executed the test.
:type run_by: :class:`IdentityRef <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.IdentityRef>`
:param stack_trace: Stacktrace with maxSize= 1000 chars.
:type stack_trace: str
:param started_date: Time when test execution started(UTC).
:type started_date: datetime
:param state: State of test result. Type TestRunState.
:type state: str
:param sub_results: List of sub results inside a test result, if ResultGroupType is not None, it holds corresponding type sub results.
:type sub_results: list of :class:`TestSubResult <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestSubResult>`
:param test_case: Reference to the test executed.
:type test_case: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param test_case_reference_id: Reference ID of test used by test result. Type TestResultMetaData
:type test_case_reference_id: int
:param test_case_revision: TestCaseRevision Number.
:type test_case_revision: int
:param test_case_title: Name of test.
:type test_case_title: str
:param test_plan: Reference to test plan test case workitem is part of.
:type test_plan: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param test_point: Reference to the test point executed.
:type test_point: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param test_run: Reference to test run.
:type test_run: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param test_suite: Reference to test suite test case workitem is part of.
:type test_suite: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param url: Url of test result.
:type url: str
"""
_attribute_map = {
'afn_strip_id': {'key': 'afnStripId', 'type': 'int'},
'area': {'key': 'area', 'type': 'ShallowReference'},
'associated_bugs': {'key': 'associatedBugs', 'type': '[ShallowReference]'},
'automated_test_id': {'key': 'automatedTestId', 'type': 'str'},
'automated_test_name': {'key': 'automatedTestName', 'type': 'str'},
'automated_test_storage': {'key': 'automatedTestStorage', 'type': 'str'},
'automated_test_type': {'key': 'automatedTestType', 'type': 'str'},
'automated_test_type_id': {'key': 'automatedTestTypeId', 'type': 'str'},
'build': {'key': 'build', 'type': 'ShallowReference'},
'build_reference': {'key': 'buildReference', 'type': 'BuildReference'},
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'computer_name': {'key': 'computerName', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ShallowReference'},
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'custom_fields': {'key': 'customFields', 'type': '[CustomTestField]'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'float'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'failing_since': {'key': 'failingSince', 'type': 'FailingSince'},
'failure_type': {'key': 'failureType', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'iteration_details': {'key': 'iterationDetails', 'type': '[TestIterationDetailsModel]'},
'last_updated_by': {'key': 'lastUpdatedBy', 'type': 'IdentityRef'},
'last_updated_date': {'key': 'lastUpdatedDate', 'type': 'iso-8601'},
'outcome': {'key': 'outcome', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'priority': {'key': 'priority', 'type': 'int'},
'project': {'key': 'project', 'type': 'ShallowReference'},
'release': {'key': 'release', 'type': 'ShallowReference'},
'release_reference': {'key': 'releaseReference', 'type': 'ReleaseReference'},
'reset_count': {'key': 'resetCount', 'type': 'int'},
'resolution_state': {'key': 'resolutionState', 'type': 'str'},
'resolution_state_id': {'key': 'resolutionStateId', 'type': 'int'},
'result_group_type': {'key': 'resultGroupType', 'type': 'object'},
'revision': {'key': 'revision', 'type': 'int'},
'run_by': {'key': 'runBy', 'type': 'IdentityRef'},
'stack_trace': {'key': 'stackTrace', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'str'},
'sub_results': {'key': 'subResults', 'type': '[TestSubResult]'},
'test_case': {'key': 'testCase', 'type': 'ShallowReference'},
'test_case_reference_id': {'key': 'testCaseReferenceId', 'type': 'int'},
'test_case_revision': {'key': 'testCaseRevision', 'type': 'int'},
'test_case_title': {'key': 'testCaseTitle', 'type': 'str'},
'test_plan': {'key': 'testPlan', 'type': 'ShallowReference'},
'test_point': {'key': 'testPoint', 'type': 'ShallowReference'},
'test_run': {'key': 'testRun', 'type': 'ShallowReference'},
'test_suite': {'key': 'testSuite', 'type': 'ShallowReference'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, afn_strip_id=None, area=None, associated_bugs=None, automated_test_id=None, automated_test_name=None, automated_test_storage=None, automated_test_type=None, automated_test_type_id=None, build=None, build_reference=None, comment=None, completed_date=None, computer_name=None, configuration=None, created_date=None, custom_fields=None, duration_in_ms=None, error_message=None, failing_since=None, failure_type=None, id=None, iteration_details=None, last_updated_by=None, last_updated_date=None, outcome=None, owner=None, priority=None, project=None, release=None, release_reference=None, reset_count=None, resolution_state=None, resolution_state_id=None, result_group_type=None, revision=None, run_by=None, stack_trace=None, started_date=None, state=None, sub_results=None, test_case=None, test_case_reference_id=None, test_case_revision=None, test_case_title=None, test_plan=None, test_point=None, test_run=None, test_suite=None, url=None):
super(TestCaseResult, self).__init__()
self.afn_strip_id = afn_strip_id
self.area = area
self.associated_bugs = associated_bugs
self.automated_test_id = automated_test_id
self.automated_test_name = automated_test_name
self.automated_test_storage = automated_test_storage
self.automated_test_type = automated_test_type
self.automated_test_type_id = automated_test_type_id
self.build = build
self.build_reference = build_reference
self.comment = comment
self.completed_date = completed_date
self.computer_name = computer_name
self.configuration = configuration
self.created_date = created_date
self.custom_fields = custom_fields
self.duration_in_ms = duration_in_ms
self.error_message = error_message
self.failing_since = failing_since
self.failure_type = failure_type
self.id = id
self.iteration_details = iteration_details
self.last_updated_by = last_updated_by
self.last_updated_date = last_updated_date
self.outcome = outcome
self.owner = owner
self.priority = priority
self.project = project
self.release = release
self.release_reference = release_reference
self.reset_count = reset_count
self.resolution_state = resolution_state
self.resolution_state_id = resolution_state_id
self.result_group_type = result_group_type
self.revision = revision
self.run_by = run_by
self.stack_trace = stack_trace
self.started_date = started_date
self.state = state
self.sub_results = sub_results
self.test_case = test_case
self.test_case_reference_id = test_case_reference_id
self.test_case_revision = test_case_revision
self.test_case_title = test_case_title
self.test_plan = test_plan
self.test_point = test_point
self.test_run = test_run
self.test_suite = test_suite
self.url = url
class TestCaseResultAttachmentModel(Model):
"""
Test attachment information in a test iteration.
:param action_path: Path identifier test step in test case workitem.
:type action_path: str
:param id: Attachment ID.
:type id: int
:param iteration_id: Iteration ID.
:type iteration_id: int
:param name: Name of attachment.
:type name: str
:param size: Attachment size.
:type size: long
:param url: Url to attachment.
:type url: str
"""
_attribute_map = {
'action_path': {'key': 'actionPath', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'iteration_id': {'key': 'iterationId', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'size': {'key': 'size', 'type': 'long'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, action_path=None, id=None, iteration_id=None, name=None, size=None, url=None):
super(TestCaseResultAttachmentModel, self).__init__()
self.action_path = action_path
self.id = id
self.iteration_id = iteration_id
self.name = name
self.size = size
self.url = url
class TestCaseResultIdentifier(Model):
"""
Reference to a test result.
:param test_result_id: Test result ID.
:type test_result_id: int
:param test_run_id: Test run ID.
:type test_run_id: int
"""
_attribute_map = {
'test_result_id': {'key': 'testResultId', 'type': 'int'},
'test_run_id': {'key': 'testRunId', 'type': 'int'}
}
def __init__(self, test_result_id=None, test_run_id=None):
super(TestCaseResultIdentifier, self).__init__()
self.test_result_id = test_result_id
self.test_run_id = test_run_id
class TestEnvironment(Model):
"""
Test environment Detail.
:param environment_id: Test Environment Id.
:type environment_id: str
:param environment_name: Test Environment Name.
:type environment_name: str
"""
_attribute_map = {
'environment_id': {'key': 'environmentId', 'type': 'str'},
'environment_name': {'key': 'environmentName', 'type': 'str'}
}
def __init__(self, environment_id=None, environment_name=None):
super(TestEnvironment, self).__init__()
self.environment_id = environment_id
self.environment_name = environment_name
class TestFailureDetails(Model):
"""
:param count:
:type count: int
:param test_results:
:type test_results: list of :class:`TestCaseResultIdentifier <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestCaseResultIdentifier>`
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'test_results': {'key': 'testResults', 'type': '[TestCaseResultIdentifier]'}
}
def __init__(self, count=None, test_results=None):
super(TestFailureDetails, self).__init__()
self.count = count
self.test_results = test_results
class TestFailuresAnalysis(Model):
"""
:param existing_failures:
:type existing_failures: :class:`TestFailureDetails <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestFailureDetails>`
:param fixed_tests:
:type fixed_tests: :class:`TestFailureDetails <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestFailureDetails>`
:param new_failures:
:type new_failures: :class:`TestFailureDetails <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestFailureDetails>`
:param previous_context:
:type previous_context: :class:`TestResultsContext <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestResultsContext>`
"""
_attribute_map = {
'existing_failures': {'key': 'existingFailures', 'type': 'TestFailureDetails'},
'fixed_tests': {'key': 'fixedTests', 'type': 'TestFailureDetails'},
'new_failures': {'key': 'newFailures', 'type': 'TestFailureDetails'},
'previous_context': {'key': 'previousContext', 'type': 'TestResultsContext'}
}
def __init__(self, existing_failures=None, fixed_tests=None, new_failures=None, previous_context=None):
super(TestFailuresAnalysis, self).__init__()
self.existing_failures = existing_failures
self.fixed_tests = fixed_tests
self.new_failures = new_failures
self.previous_context = previous_context
class TestFailureType(Model):
"""
:param id:
:type id: int
:param name:
:type name: str
:param project:
:type project: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'project': {'key': 'project', 'type': 'ShallowReference'}
}
def __init__(self, id=None, name=None, project=None):
super(TestFailureType, self).__init__()
self.id = id
self.name = name
self.project = project
class TestFlakyIdentifier(Model):
"""
Test Flaky Identifier
:param branch_name: Branch Name where Flakiness has to be Marked/Unmarked
:type branch_name: str
:param is_flaky: State for Flakiness
:type is_flaky: bool
"""
_attribute_map = {
'branch_name': {'key': 'branchName', 'type': 'str'},
'is_flaky': {'key': 'isFlaky', 'type': 'bool'}
}
def __init__(self, branch_name=None, is_flaky=None):
super(TestFlakyIdentifier, self).__init__()
self.branch_name = branch_name
self.is_flaky = is_flaky
class TestHistoryQuery(Model):
"""
Filter to get TestCase result history.
:param automated_test_name: Automated test name of the TestCase.
:type automated_test_name: str
:param branch: Results to be get for a particular branches.
:type branch: str
:param build_definition_id: Get the results history only for this BuildDefinitionId. This to get used in query GroupBy should be Branch. If this is provided, Branch will have no use.
:type build_definition_id: int
:param continuation_token: It will be filled by server. If not null means there are some results still to be get, and we need to call this REST API with this ContinuousToken. It is not supposed to be created (or altered, if received from server in last batch) by user.
:type continuation_token: str
:param group_by: Group the result on the basis of TestResultGroupBy. This can be Branch, Environment or null(if results are fetched by BuildDefinitionId)
:type group_by: object
:param max_complete_date: History to get between time interval MaxCompleteDate and (MaxCompleteDate - TrendDays). Default is current date time.
:type max_complete_date: datetime
:param release_env_definition_id: Get the results history only for this ReleaseEnvDefinitionId. This to get used in query GroupBy should be Environment.
:type release_env_definition_id: int
:param results_for_group: List of TestResultHistoryForGroup which are grouped by GroupBy
:type results_for_group: list of :class:`TestResultHistoryForGroup <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestResultHistoryForGroup>`
:param test_case_id: Get the results history only for this testCaseId. This to get used in query to filter the result along with automatedtestname
:type test_case_id: int
:param trend_days: Number of days for which history to collect. Maximum supported value is 7 days. Default is 7 days.
:type trend_days: int
"""
_attribute_map = {
'automated_test_name': {'key': 'automatedTestName', 'type': 'str'},
'branch': {'key': 'branch', 'type': 'str'},
'build_definition_id': {'key': 'buildDefinitionId', 'type': 'int'},
'continuation_token': {'key': 'continuationToken', 'type': 'str'},
'group_by': {'key': 'groupBy', 'type': 'object'},
'max_complete_date': {'key': 'maxCompleteDate', 'type': 'iso-8601'},
'release_env_definition_id': {'key': 'releaseEnvDefinitionId', 'type': 'int'},
'results_for_group': {'key': 'resultsForGroup', 'type': '[TestResultHistoryForGroup]'},
'test_case_id': {'key': 'testCaseId', 'type': 'int'},
'trend_days': {'key': 'trendDays', 'type': 'int'}
}
def __init__(self, automated_test_name=None, branch=None, build_definition_id=None, continuation_token=None, group_by=None, max_complete_date=None, release_env_definition_id=None, results_for_group=None, test_case_id=None, trend_days=None):
super(TestHistoryQuery, self).__init__()
self.automated_test_name = automated_test_name
self.branch = branch
self.build_definition_id = build_definition_id
self.continuation_token = continuation_token
self.group_by = group_by
self.max_complete_date = max_complete_date
self.release_env_definition_id = release_env_definition_id
self.results_for_group = results_for_group
self.test_case_id = test_case_id
self.trend_days = trend_days
class TestIterationDetailsModel(Model):
"""
Represents a test iteration result.
:param action_results: Test step results in an iteration.
:type action_results: list of :class:`TestActionResultModel <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestActionResultModel>`
:param attachments: Reference to attachments in test iteration result.
:type attachments: list of :class:`TestCaseResultAttachmentModel <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestCaseResultAttachmentModel>`
:param comment: Comment in test iteration result.
:type comment: str
:param completed_date: Time when execution completed(UTC).
:type completed_date: datetime
:param duration_in_ms: Duration of execution.
:type duration_in_ms: float
:param error_message: Error message in test iteration result execution.
:type error_message: str
:param id: ID of test iteration result.
:type id: int
:param outcome: Test outcome if test iteration result.
:type outcome: str
:param parameters: Test parameters in an iteration.
:type parameters: list of :class:`TestResultParameterModel <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestResultParameterModel>`
:param started_date: Time when execution started(UTC).
:type started_date: datetime
:param url: Url to test iteration result.
:type url: str
"""
_attribute_map = {
'action_results': {'key': 'actionResults', 'type': '[TestActionResultModel]'},
'attachments': {'key': 'attachments', 'type': '[TestCaseResultAttachmentModel]'},
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'float'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'outcome': {'key': 'outcome', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[TestResultParameterModel]'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, action_results=None, attachments=None, comment=None, completed_date=None, duration_in_ms=None, error_message=None, id=None, outcome=None, parameters=None, started_date=None, url=None):
super(TestIterationDetailsModel, self).__init__()
self.action_results = action_results
self.attachments = attachments
self.comment = comment
self.completed_date = completed_date
self.duration_in_ms = duration_in_ms
self.error_message = error_message
self.id = id
self.outcome = outcome
self.parameters = parameters
self.started_date = started_date
self.url = url
class TestLog(Model):
"""
Represents Test Log Result object.
:param log_reference: Test Log Context run, build
:type log_reference: :class:`TestLogReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestLogReference>`
:param meta_data: Meta data for Log file
:type meta_data: dict
:param modified_on: LastUpdatedDate for Log file
:type modified_on: datetime
:param size: Size in Bytes for Log file
:type size: long
"""
_attribute_map = {
'log_reference': {'key': 'logReference', 'type': 'TestLogReference'},
'meta_data': {'key': 'metaData', 'type': '{str}'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'size': {'key': 'size', 'type': 'long'}
}
def __init__(self, log_reference=None, meta_data=None, modified_on=None, size=None):
super(TestLog, self).__init__()
self.log_reference = log_reference
self.meta_data = meta_data
self.modified_on = modified_on
self.size = size
class TestLogReference(Model):
"""
Test Log Reference object
:param build_id: BuildId for test log, if context is build
:type build_id: int
:param file_path: FileName for log file
:type file_path: str
:param release_env_id: ReleaseEnvId for test log, if context is Release
:type release_env_id: int
:param release_id: ReleaseId for test log, if context is Release
:type release_id: int
:param result_id: Resultid for test log, if context is run and log is related to result
:type result_id: int
:param run_id: runid for test log, if context is run
:type run_id: int
:param scope: Test Log Scope
:type scope: object
:param sub_result_id: SubResultid for test log, if context is run and log is related to subresult
:type sub_result_id: int
:param type: Log Type
:type type: object
"""
_attribute_map = {
'build_id': {'key': 'buildId', 'type': 'int'},
'file_path': {'key': 'filePath', 'type': 'str'},
'release_env_id': {'key': 'releaseEnvId', 'type': 'int'},
'release_id': {'key': 'releaseId', 'type': 'int'},
'result_id': {'key': 'resultId', 'type': 'int'},
'run_id': {'key': 'runId', 'type': 'int'},
'scope': {'key': 'scope', 'type': 'object'},
'sub_result_id': {'key': 'subResultId', 'type': 'int'},
'type': {'key': 'type', 'type': 'object'}
}
def __init__(self, build_id=None, file_path=None, release_env_id=None, release_id=None, result_id=None, run_id=None, scope=None, sub_result_id=None, type=None):
super(TestLogReference, self).__init__()
self.build_id = build_id
self.file_path = file_path
self.release_env_id = release_env_id
self.release_id = release_id
self.result_id = result_id
self.run_id = run_id
self.scope = scope
self.sub_result_id = sub_result_id
self.type = type
class TestLogStoreAttachment(Model):
"""
Attachment metadata for test attachments from LogStore.
:param attachment_type: Attachment type.
:type attachment_type: object
:param comment: Comment associated with attachment.
:type comment: str
:param created_date: Attachment created date.
:type created_date: datetime
:param file_name: Attachment file name.
:type file_name: str
:param size: Attachment size.
:type size: long
:param url: Attachment Url.
:type url: str
"""
_attribute_map = {
'attachment_type': {'key': 'attachmentType', 'type': 'object'},
'comment': {'key': 'comment', 'type': 'str'},
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'file_name': {'key': 'fileName', 'type': 'str'},
'size': {'key': 'size', 'type': 'long'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, attachment_type=None, comment=None, created_date=None, file_name=None, size=None, url=None):
super(TestLogStoreAttachment, self).__init__()
self.attachment_type = attachment_type
self.comment = comment
self.created_date = created_date
self.file_name = file_name
self.size = size
self.url = url
class TestLogStoreAttachmentReference(Model):
"""
Reference to test attachment.
:param url: Url to download the attachment.
:type url: str
"""
_attribute_map = {
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, url=None):
super(TestLogStoreAttachmentReference, self).__init__()
self.url = url
class TestLogStoreEndpointDetails(Model):
"""
Represents Test Log store endpoint details.
:param endpoint_sASUri: Test log store connection Uri.
:type endpoint_sASUri: str
:param endpoint_type: Test log store endpoint type.
:type endpoint_type: object
:param status: Test log store status code
:type status: object
"""
_attribute_map = {
'endpoint_sASUri': {'key': 'endpointSASUri', 'type': 'str'},
'endpoint_type': {'key': 'endpointType', 'type': 'object'},
'status': {'key': 'status', 'type': 'object'}
}
def __init__(self, endpoint_sASUri=None, endpoint_type=None, status=None):
super(TestLogStoreEndpointDetails, self).__init__()
self.endpoint_sASUri = endpoint_sASUri
self.endpoint_type = endpoint_type
self.status = status
class TestMessageLogDetails(Model):
"""
An abstracted reference to some other resource. This class is used to provide the build data contracts with a uniform way to reference other resources in a way that provides easy traversal through links.
:param date_created: Date when the resource is created
:type date_created: datetime
:param entry_id: Id of the resource
:type entry_id: int
:param message: Message of the resource
:type message: str
"""
_attribute_map = {
'date_created': {'key': 'dateCreated', 'type': 'iso-8601'},
'entry_id': {'key': 'entryId', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'}
}
def __init__(self, date_created=None, entry_id=None, message=None):
super(TestMessageLogDetails, self).__init__()
self.date_created = date_created
self.entry_id = entry_id
self.message = message
class TestMethod(Model):
"""
:param container:
:type container: str
:param name:
:type name: str
"""
_attribute_map = {
'container': {'key': 'container', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, container=None, name=None):
super(TestMethod, self).__init__()
self.container = container
self.name = name
class TestOperationReference(Model):
"""
Class representing a reference to an operation.
:param id:
:type id: str
:param status:
:type status: str
:param url:
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, status=None, url=None):
super(TestOperationReference, self).__init__()
self.id = id
self.status = status
self.url = url
class TestResolutionState(Model):
"""
Test Resolution State Details.
:param id: Test Resolution state Id.
:type id: int
:param name: Test Resolution State Name.
:type name: str
:param project:
:type project: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'project': {'key': 'project', 'type': 'ShallowReference'}
}
def __init__(self, id=None, name=None, project=None):
super(TestResolutionState, self).__init__()
self.id = id
self.name = name
self.project = project
class TestResultDocument(Model):
"""
:param operation_reference:
:type operation_reference: :class:`TestOperationReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestOperationReference>`
:param payload:
:type payload: :class:`TestResultPayload <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestResultPayload>`
"""
_attribute_map = {
'operation_reference': {'key': 'operationReference', 'type': 'TestOperationReference'},
'payload': {'key': 'payload', 'type': 'TestResultPayload'}
}
def __init__(self, operation_reference=None, payload=None):
super(TestResultDocument, self).__init__()
self.operation_reference = operation_reference
self.payload = payload
class TestResultFailuresAnalysis(Model):
"""
:param existing_failures:
:type existing_failures: :class:`TestFailureDetails <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestFailureDetails>`
:param fixed_tests:
:type fixed_tests: :class:`TestFailureDetails <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestFailureDetails>`
:param new_failures:
:type new_failures: :class:`TestFailureDetails <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestFailureDetails>`
"""
_attribute_map = {
'existing_failures': {'key': 'existingFailures', 'type': 'TestFailureDetails'},
'fixed_tests': {'key': 'fixedTests', 'type': 'TestFailureDetails'},
'new_failures': {'key': 'newFailures', 'type': 'TestFailureDetails'}
}
def __init__(self, existing_failures=None, fixed_tests=None, new_failures=None):
super(TestResultFailuresAnalysis, self).__init__()
self.existing_failures = existing_failures
self.fixed_tests = fixed_tests
self.new_failures = new_failures
class TestResultHistory(Model):
"""
:param group_by_field:
:type group_by_field: str
:param results_for_group:
:type results_for_group: list of :class:`TestResultHistoryDetailsForGroup <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestResultHistoryDetailsForGroup>`
"""
_attribute_map = {
'group_by_field': {'key': 'groupByField', 'type': 'str'},
'results_for_group': {'key': 'resultsForGroup', 'type': '[TestResultHistoryDetailsForGroup]'}
}
def __init__(self, group_by_field=None, results_for_group=None):
super(TestResultHistory, self).__init__()
self.group_by_field = group_by_field
self.results_for_group = results_for_group
class TestResultHistoryDetailsForGroup(Model):
"""
:param group_by_value:
:type group_by_value: object
:param latest_result:
:type latest_result: :class:`TestCaseResult <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestCaseResult>`
"""
_attribute_map = {
'group_by_value': {'key': 'groupByValue', 'type': 'object'},
'latest_result': {'key': 'latestResult', 'type': 'TestCaseResult'}
}
def __init__(self, group_by_value=None, latest_result=None):
super(TestResultHistoryDetailsForGroup, self).__init__()
self.group_by_value = group_by_value
self.latest_result = latest_result
class TestResultHistoryForGroup(Model):
"""
List of test results filtered on the basis of GroupByValue
:param display_name: Display name of the group.
:type display_name: str
:param group_by_value: Name or Id of the group identifier by which results are grouped together.
:type group_by_value: str
:param results: List of results for GroupByValue
:type results: list of :class:`TestCaseResult <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestCaseResult>`
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'group_by_value': {'key': 'groupByValue', 'type': 'str'},
'results': {'key': 'results', 'type': '[TestCaseResult]'}
}
def __init__(self, display_name=None, group_by_value=None, results=None):
super(TestResultHistoryForGroup, self).__init__()
self.display_name = display_name
self.group_by_value = group_by_value
self.results = results
class TestResultMetaData(Model):
"""
Represents a Meta Data of a test result.
:param automated_test_name: AutomatedTestName of test result.
:type automated_test_name: str
:param automated_test_storage: AutomatedTestStorage of test result.
:type automated_test_storage: str
:param flaky_identifiers: List of Flaky Identifier for TestCaseReferenceId
:type flaky_identifiers: list of :class:`TestFlakyIdentifier <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestFlakyIdentifier>`
:param owner: Owner of test result.
:type owner: str
:param priority: Priority of test result.
:type priority: int
:param test_case_reference_id: ID of TestCaseReference.
:type test_case_reference_id: int
:param test_case_title: TestCaseTitle of test result.
:type test_case_title: str
"""
_attribute_map = {
'automated_test_name': {'key': 'automatedTestName', 'type': 'str'},
'automated_test_storage': {'key': 'automatedTestStorage', 'type': 'str'},
'flaky_identifiers': {'key': 'flakyIdentifiers', 'type': '[TestFlakyIdentifier]'},
'owner': {'key': 'owner', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'test_case_reference_id': {'key': 'testCaseReferenceId', 'type': 'int'},
'test_case_title': {'key': 'testCaseTitle', 'type': 'str'}
}
def __init__(self, automated_test_name=None, automated_test_storage=None, flaky_identifiers=None, owner=None, priority=None, test_case_reference_id=None, test_case_title=None):
super(TestResultMetaData, self).__init__()
self.automated_test_name = automated_test_name
self.automated_test_storage = automated_test_storage
self.flaky_identifiers = flaky_identifiers
self.owner = owner
self.priority = priority
self.test_case_reference_id = test_case_reference_id
self.test_case_title = test_case_title
class TestResultMetaDataUpdateInput(Model):
"""
Represents a TestResultMetaData Input
:param flaky_identifiers: List of Flaky Identifiers
:type flaky_identifiers: list of :class:`TestFlakyIdentifier <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestFlakyIdentifier>`
"""
_attribute_map = {
'flaky_identifiers': {'key': 'flakyIdentifiers', 'type': '[TestFlakyIdentifier]'}
}
def __init__(self, flaky_identifiers=None):
super(TestResultMetaDataUpdateInput, self).__init__()
self.flaky_identifiers = flaky_identifiers
class TestResultModelBase(Model):
"""
:param comment: Comment in result.
:type comment: str
:param completed_date: Time when execution completed(UTC).
:type completed_date: datetime
:param duration_in_ms: Duration of execution.
:type duration_in_ms: float
:param error_message: Error message in result.
:type error_message: str
:param outcome: Test outcome of result.
:type outcome: str
:param started_date: Time when execution started(UTC).
:type started_date: datetime
"""
_attribute_map = {
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'float'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'outcome': {'key': 'outcome', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'}
}
def __init__(self, comment=None, completed_date=None, duration_in_ms=None, error_message=None, outcome=None, started_date=None):
super(TestResultModelBase, self).__init__()
self.comment = comment
self.completed_date = completed_date
self.duration_in_ms = duration_in_ms
self.error_message = error_message
self.outcome = outcome
self.started_date = started_date
class TestResultParameterModel(Model):
"""
Test parameter information in a test iteration.
:param action_path: Test step path where parameter is referenced.
:type action_path: str
:param iteration_id: Iteration ID.
:type iteration_id: int
:param parameter_name: Name of parameter.
:type parameter_name: str
:param step_identifier: This is step Id of test case. For shared step, it is step Id of shared step in test case workitem; step Id in shared step. Example: TestCase workitem has two steps: 1) Normal step with Id = 1 2) Shared Step with Id = 2. Inside shared step: a) Normal Step with Id = 1 Value for StepIdentifier for First step: "1" Second step: "2;1"
:type step_identifier: str
:param url: Url of test parameter. Deprecated in hosted environment.
:type url: str
:param value: Value of parameter.
:type value: str
"""
_attribute_map = {
'action_path': {'key': 'actionPath', 'type': 'str'},
'iteration_id': {'key': 'iterationId', 'type': 'int'},
'parameter_name': {'key': 'parameterName', 'type': 'str'},
'step_identifier': {'key': 'stepIdentifier', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, action_path=None, iteration_id=None, parameter_name=None, step_identifier=None, url=None, value=None):
super(TestResultParameterModel, self).__init__()
self.action_path = action_path
self.iteration_id = iteration_id
self.parameter_name = parameter_name
self.step_identifier = step_identifier
self.url = url
self.value = value
class TestResultPayload(Model):
"""
:param comment:
:type comment: str
:param name:
:type name: str
:param stream:
:type stream: str
"""
_attribute_map = {
'comment': {'key': 'comment', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'stream': {'key': 'stream', 'type': 'str'}
}
def __init__(self, comment=None, name=None, stream=None):
super(TestResultPayload, self).__init__()
self.comment = comment
self.name = name
self.stream = stream
class TestResultsContext(Model):
"""
:param build:
:type build: :class:`BuildReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.BuildReference>`
:param context_type:
:type context_type: object
:param pipeline_reference:
:type pipeline_reference: :class:`PipelineReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.PipelineReference>`
:param release:
:type release: :class:`ReleaseReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ReleaseReference>`
"""
_attribute_map = {
'build': {'key': 'build', 'type': 'BuildReference'},
'context_type': {'key': 'contextType', 'type': 'object'},
'pipeline_reference': {'key': 'pipelineReference', 'type': 'PipelineReference'},
'release': {'key': 'release', 'type': 'ReleaseReference'}
}
def __init__(self, build=None, context_type=None, pipeline_reference=None, release=None):
super(TestResultsContext, self).__init__()
self.build = build
self.context_type = context_type
self.pipeline_reference = pipeline_reference
self.release = release
class TestResultsDetails(Model):
"""
:param group_by_field:
:type group_by_field: str
:param results_for_group:
:type results_for_group: list of :class:`TestResultsDetailsForGroup <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestResultsDetailsForGroup>`
"""
_attribute_map = {
'group_by_field': {'key': 'groupByField', 'type': 'str'},
'results_for_group': {'key': 'resultsForGroup', 'type': '[TestResultsDetailsForGroup]'}
}
def __init__(self, group_by_field=None, results_for_group=None):
super(TestResultsDetails, self).__init__()
self.group_by_field = group_by_field
self.results_for_group = results_for_group
class TestResultsDetailsForGroup(Model):
"""
:param group_by_value:
:type group_by_value: object
:param results:
:type results: list of :class:`TestCaseResult <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestCaseResult>`
:param results_count_by_outcome:
:type results_count_by_outcome: dict
:param tags:
:type tags: list of str
"""
_attribute_map = {
'group_by_value': {'key': 'groupByValue', 'type': 'object'},
'results': {'key': 'results', 'type': '[TestCaseResult]'},
'results_count_by_outcome': {'key': 'resultsCountByOutcome', 'type': '{AggregatedResultsByOutcome}'},
'tags': {'key': 'tags', 'type': '[str]'}
}
def __init__(self, group_by_value=None, results=None, results_count_by_outcome=None, tags=None):
super(TestResultsDetailsForGroup, self).__init__()
self.group_by_value = group_by_value
self.results = results
self.results_count_by_outcome = results_count_by_outcome
self.tags = tags
class TestResultsQuery(Model):
"""
:param fields:
:type fields: list of str
:param results:
:type results: list of :class:`TestCaseResult <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestCaseResult>`
:param results_filter:
:type results_filter: :class:`ResultsFilter <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ResultsFilter>`
"""
_attribute_map = {
'fields': {'key': 'fields', 'type': '[str]'},
'results': {'key': 'results', 'type': '[TestCaseResult]'},
'results_filter': {'key': 'resultsFilter', 'type': 'ResultsFilter'}
}
def __init__(self, fields=None, results=None, results_filter=None):
super(TestResultsQuery, self).__init__()
self.fields = fields
self.results = results
self.results_filter = results_filter
class TestResultsSettings(Model):
"""
:param flaky_settings: IsRequired and EmitDefaultValue are passed as false as if users doesn't pass anything, should not come for serialisation and deserialisation.
:type flaky_settings: :class:`FlakySettings <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.FlakySettings>`
:param new_test_result_logging_settings:
:type new_test_result_logging_settings: :class:`NewTestResultLoggingSettings <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.NewTestResultLoggingSettings>`
"""
_attribute_map = {
'flaky_settings': {'key': 'flakySettings', 'type': 'FlakySettings'},
'new_test_result_logging_settings': {'key': 'newTestResultLoggingSettings', 'type': 'NewTestResultLoggingSettings'}
}
def __init__(self, flaky_settings=None, new_test_result_logging_settings=None):
super(TestResultsSettings, self).__init__()
self.flaky_settings = flaky_settings
self.new_test_result_logging_settings = new_test_result_logging_settings
class TestResultSummary(Model):
"""
:param aggregated_results_analysis:
:type aggregated_results_analysis: :class:`AggregatedResultsAnalysis <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.AggregatedResultsAnalysis>`
:param no_config_runs_count:
:type no_config_runs_count: int
:param team_project:
:type team_project: :class:`TeamProjectReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TeamProjectReference>`
:param test_failures:
:type test_failures: :class:`TestFailuresAnalysis <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestFailuresAnalysis>`
:param test_results_context:
:type test_results_context: :class:`TestResultsContext <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestResultsContext>`
:param total_runs_count:
:type total_runs_count: int
"""
_attribute_map = {
'aggregated_results_analysis': {'key': 'aggregatedResultsAnalysis', 'type': 'AggregatedResultsAnalysis'},
'no_config_runs_count': {'key': 'noConfigRunsCount', 'type': 'int'},
'team_project': {'key': 'teamProject', 'type': 'TeamProjectReference'},
'test_failures': {'key': 'testFailures', 'type': 'TestFailuresAnalysis'},
'test_results_context': {'key': 'testResultsContext', 'type': 'TestResultsContext'},
'total_runs_count': {'key': 'totalRunsCount', 'type': 'int'}
}
def __init__(self, aggregated_results_analysis=None, no_config_runs_count=None, team_project=None, test_failures=None, test_results_context=None, total_runs_count=None):
super(TestResultSummary, self).__init__()
self.aggregated_results_analysis = aggregated_results_analysis
self.no_config_runs_count = no_config_runs_count
self.team_project = team_project
self.test_failures = test_failures
self.test_results_context = test_results_context
self.total_runs_count = total_runs_count
class TestResultsUpdateSettings(Model):
"""
:param flaky_settings: FlakySettings defines Flaky Settings Data.
:type flaky_settings: :class:`FlakySettings <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.FlakySettings>`
:param new_test_result_logging_settings: NewTestResultLoggingSettings defines the setting for logging new test results
:type new_test_result_logging_settings: :class:`NewTestResultLoggingSettings <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.NewTestResultLoggingSettings>`
"""
_attribute_map = {
'flaky_settings': {'key': 'flakySettings', 'type': 'FlakySettings'},
'new_test_result_logging_settings': {'key': 'newTestResultLoggingSettings', 'type': 'NewTestResultLoggingSettings'}
}
def __init__(self, flaky_settings=None, new_test_result_logging_settings=None):
super(TestResultsUpdateSettings, self).__init__()
self.flaky_settings = flaky_settings
self.new_test_result_logging_settings = new_test_result_logging_settings
class TestResultTrendFilter(Model):
"""
:param branch_names:
:type branch_names: list of str
:param build_count:
:type build_count: int
:param definition_ids:
:type definition_ids: list of int
:param env_definition_ids:
:type env_definition_ids: list of int
:param max_complete_date:
:type max_complete_date: datetime
:param publish_context:
:type publish_context: str
:param test_run_titles:
:type test_run_titles: list of str
:param trend_days:
:type trend_days: int
"""
_attribute_map = {
'branch_names': {'key': 'branchNames', 'type': '[str]'},
'build_count': {'key': 'buildCount', 'type': 'int'},
'definition_ids': {'key': 'definitionIds', 'type': '[int]'},
'env_definition_ids': {'key': 'envDefinitionIds', 'type': '[int]'},
'max_complete_date': {'key': 'maxCompleteDate', 'type': 'iso-8601'},
'publish_context': {'key': 'publishContext', 'type': 'str'},
'test_run_titles': {'key': 'testRunTitles', 'type': '[str]'},
'trend_days': {'key': 'trendDays', 'type': 'int'}
}
def __init__(self, branch_names=None, build_count=None, definition_ids=None, env_definition_ids=None, max_complete_date=None, publish_context=None, test_run_titles=None, trend_days=None):
super(TestResultTrendFilter, self).__init__()
self.branch_names = branch_names
self.build_count = build_count
self.definition_ids = definition_ids
self.env_definition_ids = env_definition_ids
self.max_complete_date = max_complete_date
self.publish_context = publish_context
self.test_run_titles = test_run_titles
self.trend_days = trend_days
class TestRun(Model):
"""
Test run details.
:param build: Build associated with this test run.
:type build: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param build_configuration: Build configuration details associated with this test run.
:type build_configuration: :class:`BuildConfiguration <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.BuildConfiguration>`
:param comment: Comments entered by those analyzing the run.
:type comment: str
:param completed_date: Completed date time of the run.
:type completed_date: datetime
:param controller: Test Run Controller.
:type controller: str
:param created_date: Test Run CreatedDate.
:type created_date: datetime
:param custom_fields: List of Custom Fields for TestRun.
:type custom_fields: list of :class:`CustomTestField <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.CustomTestField>`
:param drop_location: Drop Location for the test Run.
:type drop_location: str
:param dtl_aut_environment:
:type dtl_aut_environment: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param dtl_environment:
:type dtl_environment: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param dtl_environment_creation_details:
:type dtl_environment_creation_details: :class:`DtlEnvironmentDetails <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.DtlEnvironmentDetails>`
:param due_date: Due date and time for test run.
:type due_date: datetime
:param error_message: Error message associated with the run.
:type error_message: str
:param filter:
:type filter: :class:`RunFilter <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.RunFilter>`
:param id: ID of the test run.
:type id: int
:param incomplete_tests: Number of Incomplete Tests.
:type incomplete_tests: int
:param is_automated: true if test run is automated, false otherwise.
:type is_automated: bool
:param iteration: The iteration to which the run belongs.
:type iteration: str
:param last_updated_by: Team foundation ID of the last updated the test run.
:type last_updated_by: :class:`IdentityRef <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.IdentityRef>`
:param last_updated_date: Last updated date and time
:type last_updated_date: datetime
:param name: Name of the test run.
:type name: str
:param not_applicable_tests: Number of Not Applicable Tests.
:type not_applicable_tests: int
:param owner: Team Foundation ID of the owner of the runs.
:type owner: :class:`IdentityRef <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.IdentityRef>`
:param passed_tests: Number of passed tests in the run
:type passed_tests: int
:param phase: Phase/State for the testRun.
:type phase: str
:param pipeline_reference: Reference of the pipeline to which this test run belongs.
:type pipeline_reference: :class:`PipelineReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.PipelineReference>`
:param plan: Test plan associated with this test run.
:type plan: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param post_process_state: Post Process State.
:type post_process_state: str
:param project: Project associated with this run.
:type project: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param release: Release Reference for the Test Run.
:type release: :class:`ReleaseReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ReleaseReference>`
:param release_environment_uri: Release Environment Uri for TestRun.
:type release_environment_uri: str
:param release_uri: Release Uri for TestRun.
:type release_uri: str
:param revision:
:type revision: int
:param run_statistics: RunSummary by outcome.
:type run_statistics: list of :class:`RunStatistic <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.RunStatistic>`
:param started_date: Start date time of the run.
:type started_date: datetime
:param state: The state of the run. Type TestRunState Valid states - Unspecified ,NotStarted, InProgress, Completed, Waiting, Aborted, NeedsInvestigation
:type state: str
:param substate: TestRun Substate.
:type substate: object
:param tags: Tags attached with this test run.
:type tags: list of :class:`TestTag <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestTag>`
:param test_environment: Test environment associated with the run.
:type test_environment: :class:`TestEnvironment <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestEnvironment>`
:param test_message_log_id:
:type test_message_log_id: int
:param test_settings:
:type test_settings: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param total_tests: Total tests in the run
:type total_tests: int
:param unanalyzed_tests: Number of failed tests in the run.
:type unanalyzed_tests: int
:param url: Url of the test run
:type url: str
:param web_access_url: Web Access Url for TestRun.
:type web_access_url: str
"""
_attribute_map = {
'build': {'key': 'build', 'type': 'ShallowReference'},
'build_configuration': {'key': 'buildConfiguration', 'type': 'BuildConfiguration'},
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'controller': {'key': 'controller', 'type': 'str'},
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'custom_fields': {'key': 'customFields', 'type': '[CustomTestField]'},
'drop_location': {'key': 'dropLocation', 'type': 'str'},
'dtl_aut_environment': {'key': 'dtlAutEnvironment', 'type': 'ShallowReference'},
'dtl_environment': {'key': 'dtlEnvironment', 'type': 'ShallowReference'},
'dtl_environment_creation_details': {'key': 'dtlEnvironmentCreationDetails', 'type': 'DtlEnvironmentDetails'},
'due_date': {'key': 'dueDate', 'type': 'iso-8601'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'filter': {'key': 'filter', 'type': 'RunFilter'},
'id': {'key': 'id', 'type': 'int'},
'incomplete_tests': {'key': 'incompleteTests', 'type': 'int'},
'is_automated': {'key': 'isAutomated', 'type': 'bool'},
'iteration': {'key': 'iteration', 'type': 'str'},
'last_updated_by': {'key': 'lastUpdatedBy', 'type': 'IdentityRef'},
'last_updated_date': {'key': 'lastUpdatedDate', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'not_applicable_tests': {'key': 'notApplicableTests', 'type': 'int'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'passed_tests': {'key': 'passedTests', 'type': 'int'},
'phase': {'key': 'phase', 'type': 'str'},
'pipeline_reference': {'key': 'pipelineReference', 'type': 'PipelineReference'},
'plan': {'key': 'plan', 'type': 'ShallowReference'},
'post_process_state': {'key': 'postProcessState', 'type': 'str'},
'project': {'key': 'project', 'type': 'ShallowReference'},
'release': {'key': 'release', 'type': 'ReleaseReference'},
'release_environment_uri': {'key': 'releaseEnvironmentUri', 'type': 'str'},
'release_uri': {'key': 'releaseUri', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'int'},
'run_statistics': {'key': 'runStatistics', 'type': '[RunStatistic]'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'str'},
'substate': {'key': 'substate', 'type': 'object'},
'tags': {'key': 'tags', 'type': '[TestTag]'},
'test_environment': {'key': 'testEnvironment', 'type': 'TestEnvironment'},
'test_message_log_id': {'key': 'testMessageLogId', 'type': 'int'},
'test_settings': {'key': 'testSettings', 'type': 'ShallowReference'},
'total_tests': {'key': 'totalTests', 'type': 'int'},
'unanalyzed_tests': {'key': 'unanalyzedTests', 'type': 'int'},
'url': {'key': 'url', 'type': 'str'},
'web_access_url': {'key': 'webAccessUrl', 'type': 'str'}
}
def __init__(self, build=None, build_configuration=None, comment=None, completed_date=None, controller=None, created_date=None, custom_fields=None, drop_location=None, dtl_aut_environment=None, dtl_environment=None, dtl_environment_creation_details=None, due_date=None, error_message=None, filter=None, id=None, incomplete_tests=None, is_automated=None, iteration=None, last_updated_by=None, last_updated_date=None, name=None, not_applicable_tests=None, owner=None, passed_tests=None, phase=None, pipeline_reference=None, plan=None, post_process_state=None, project=None, release=None, release_environment_uri=None, release_uri=None, revision=None, run_statistics=None, started_date=None, state=None, substate=None, tags=None, test_environment=None, test_message_log_id=None, test_settings=None, total_tests=None, unanalyzed_tests=None, url=None, web_access_url=None):
super(TestRun, self).__init__()
self.build = build
self.build_configuration = build_configuration
self.comment = comment
self.completed_date = completed_date
self.controller = controller
self.created_date = created_date
self.custom_fields = custom_fields
self.drop_location = drop_location
self.dtl_aut_environment = dtl_aut_environment
self.dtl_environment = dtl_environment
self.dtl_environment_creation_details = dtl_environment_creation_details
self.due_date = due_date
self.error_message = error_message
self.filter = filter
self.id = id
self.incomplete_tests = incomplete_tests
self.is_automated = is_automated
self.iteration = iteration
self.last_updated_by = last_updated_by
self.last_updated_date = last_updated_date
self.name = name
self.not_applicable_tests = not_applicable_tests
self.owner = owner
self.passed_tests = passed_tests
self.phase = phase
self.pipeline_reference = pipeline_reference
self.plan = plan
self.post_process_state = post_process_state
self.project = project
self.release = release
self.release_environment_uri = release_environment_uri
self.release_uri = release_uri
self.revision = revision
self.run_statistics = run_statistics
self.started_date = started_date
self.state = state
self.substate = substate
self.tags = tags
self.test_environment = test_environment
self.test_message_log_id = test_message_log_id
self.test_settings = test_settings
self.total_tests = total_tests
self.unanalyzed_tests = unanalyzed_tests
self.url = url
self.web_access_url = web_access_url
class TestRunCoverage(Model):
"""
Test Run Code Coverage Details
:param last_error: Last Error
:type last_error: str
:param modules: List of Modules Coverage
:type modules: list of :class:`ModuleCoverage <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ModuleCoverage>`
:param state: State
:type state: str
:param test_run: Reference of test Run.
:type test_run: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
"""
_attribute_map = {
'last_error': {'key': 'lastError', 'type': 'str'},
'modules': {'key': 'modules', 'type': '[ModuleCoverage]'},
'state': {'key': 'state', 'type': 'str'},
'test_run': {'key': 'testRun', 'type': 'ShallowReference'}
}
def __init__(self, last_error=None, modules=None, state=None, test_run=None):
super(TestRunCoverage, self).__init__()
self.last_error = last_error
self.modules = modules
self.state = state
self.test_run = test_run
class TestRunStatistic(Model):
"""
Test run statistics.
:param run:
:type run: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param run_statistics:
:type run_statistics: list of :class:`RunStatistic <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.RunStatistic>`
"""
_attribute_map = {
'run': {'key': 'run', 'type': 'ShallowReference'},
'run_statistics': {'key': 'runStatistics', 'type': '[RunStatistic]'}
}
def __init__(self, run=None, run_statistics=None):
super(TestRunStatistic, self).__init__()
self.run = run
self.run_statistics = run_statistics
class TestSettings(Model):
"""
Represents the test settings of the run. Used to create test settings and fetch test settings
:param area_path: Area path required to create test settings
:type area_path: str
:param description: Description of the test settings. Used in create test settings.
:type description: str
:param is_public: Indicates if the tests settings is public or private.Used in create test settings.
:type is_public: bool
:param machine_roles: Xml string of machine roles. Used in create test settings.
:type machine_roles: str
:param test_settings_content: Test settings content.
:type test_settings_content: str
:param test_settings_id: Test settings id.
:type test_settings_id: int
:param test_settings_name: Test settings name.
:type test_settings_name: str
"""
_attribute_map = {
'area_path': {'key': 'areaPath', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'is_public': {'key': 'isPublic', 'type': 'bool'},
'machine_roles': {'key': 'machineRoles', 'type': 'str'},
'test_settings_content': {'key': 'testSettingsContent', 'type': 'str'},
'test_settings_id': {'key': 'testSettingsId', 'type': 'int'},
'test_settings_name': {'key': 'testSettingsName', 'type': 'str'}
}
def __init__(self, area_path=None, description=None, is_public=None, machine_roles=None, test_settings_content=None, test_settings_id=None, test_settings_name=None):
super(TestSettings, self).__init__()
self.area_path = area_path
self.description = description
self.is_public = is_public
self.machine_roles = machine_roles
self.test_settings_content = test_settings_content
self.test_settings_id = test_settings_id
self.test_settings_name = test_settings_name
class TestSubResult(Model):
"""
Represents a sub result of a test result.
:param comment: Comment in sub result.
:type comment: str
:param completed_date: Time when test execution completed(UTC).
:type completed_date: datetime
:param computer_name: Machine where test executed.
:type computer_name: str
:param configuration: Reference to test configuration.
:type configuration: :class:`ShallowReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.ShallowReference>`
:param custom_fields: Additional properties of sub result.
:type custom_fields: list of :class:`CustomTestField <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.CustomTestField>`
:param display_name: Name of sub result.
:type display_name: str
:param duration_in_ms: Duration of test execution.
:type duration_in_ms: long
:param error_message: Error message in sub result.
:type error_message: str
:param id: ID of sub result.
:type id: int
:param last_updated_date: Time when result last updated(UTC).
:type last_updated_date: datetime
:param outcome: Outcome of sub result.
:type outcome: str
:param parent_id: Immediate parent ID of sub result.
:type parent_id: int
:param result_group_type: Hierarchy type of the result, default value of None means its leaf node.
:type result_group_type: object
:param sequence_id: Index number of sub result.
:type sequence_id: int
:param stack_trace: Stacktrace.
:type stack_trace: str
:param started_date: Time when test execution started(UTC).
:type started_date: datetime
:param sub_results: List of sub results inside a sub result, if ResultGroupType is not None, it holds corresponding type sub results.
:type sub_results: list of :class:`TestSubResult <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestSubResult>`
:param test_result: Reference to test result.
:type test_result: :class:`TestCaseResultIdentifier <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestCaseResultIdentifier>`
:param url: Url of sub result.
:type url: str
"""
_attribute_map = {
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'computer_name': {'key': 'computerName', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ShallowReference'},
'custom_fields': {'key': 'customFields', 'type': '[CustomTestField]'},
'display_name': {'key': 'displayName', 'type': 'str'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'long'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'last_updated_date': {'key': 'lastUpdatedDate', 'type': 'iso-8601'},
'outcome': {'key': 'outcome', 'type': 'str'},
'parent_id': {'key': 'parentId', 'type': 'int'},
'result_group_type': {'key': 'resultGroupType', 'type': 'object'},
'sequence_id': {'key': 'sequenceId', 'type': 'int'},
'stack_trace': {'key': 'stackTrace', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'},
'sub_results': {'key': 'subResults', 'type': '[TestSubResult]'},
'test_result': {'key': 'testResult', 'type': 'TestCaseResultIdentifier'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, comment=None, completed_date=None, computer_name=None, configuration=None, custom_fields=None, display_name=None, duration_in_ms=None, error_message=None, id=None, last_updated_date=None, outcome=None, parent_id=None, result_group_type=None, sequence_id=None, stack_trace=None, started_date=None, sub_results=None, test_result=None, url=None):
super(TestSubResult, self).__init__()
self.comment = comment
self.completed_date = completed_date
self.computer_name = computer_name
self.configuration = configuration
self.custom_fields = custom_fields
self.display_name = display_name
self.duration_in_ms = duration_in_ms
self.error_message = error_message
self.id = id
self.last_updated_date = last_updated_date
self.outcome = outcome
self.parent_id = parent_id
self.result_group_type = result_group_type
self.sequence_id = sequence_id
self.stack_trace = stack_trace
self.started_date = started_date
self.sub_results = sub_results
self.test_result = test_result
self.url = url
class TestSummaryForWorkItem(Model):
"""
:param summary:
:type summary: :class:`AggregatedDataForResultTrend <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.AggregatedDataForResultTrend>`
:param work_item:
:type work_item: :class:`WorkItemReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.WorkItemReference>`
"""
_attribute_map = {
'summary': {'key': 'summary', 'type': 'AggregatedDataForResultTrend'},
'work_item': {'key': 'workItem', 'type': 'WorkItemReference'}
}
def __init__(self, summary=None, work_item=None):
super(TestSummaryForWorkItem, self).__init__()
self.summary = summary
self.work_item = work_item
class TestTag(Model):
"""
Tag attached to a run or result.
:param name: Name of the tag, alphanumeric value less than 30 chars
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, name=None):
super(TestTag, self).__init__()
self.name = name
class TestTagSummary(Model):
"""
Test tag summary for build or release grouped by test run.
:param tags_group_by_test_artifact: Dictionary which contains tags associated with a test run.
:type tags_group_by_test_artifact: dict
"""
_attribute_map = {
'tags_group_by_test_artifact': {'key': 'tagsGroupByTestArtifact', 'type': '{[TestTag]}'}
}
def __init__(self, tags_group_by_test_artifact=None):
super(TestTagSummary, self).__init__()
self.tags_group_by_test_artifact = tags_group_by_test_artifact
class TestTagsUpdateModel(Model):
"""
Tags to update to a run or result.
:param tags:
:type tags: list of { key: OperationType; value: [TestTag] }
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '[{ key: OperationType; value: [TestTag] }]'}
}
def __init__(self, tags=None):
super(TestTagsUpdateModel, self).__init__()
self.tags = tags
class TestToWorkItemLinks(Model):
"""
:param test:
:type test: :class:`TestMethod <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestMethod>`
:param work_items:
:type work_items: list of :class:`WorkItemReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.WorkItemReference>`
"""
_attribute_map = {
'test': {'key': 'test', 'type': 'TestMethod'},
'work_items': {'key': 'workItems', 'type': '[WorkItemReference]'}
}
def __init__(self, test=None, work_items=None):
super(TestToWorkItemLinks, self).__init__()
self.test = test
self.work_items = work_items
class WorkItemReference(Model):
"""
WorkItem reference Details.
:param id: WorkItem Id.
:type id: str
:param name: WorkItem Name.
:type name: str
:param type: WorkItem Type.
:type type: str
:param url: WorkItem Url. Valid Values : (Bug, Task, User Story, Test Case)
:type url: str
:param web_url: WorkItem WebUrl.
:type web_url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'web_url': {'key': 'webUrl', 'type': 'str'}
}
def __init__(self, id=None, name=None, type=None, url=None, web_url=None):
super(WorkItemReference, self).__init__()
self.id = id
self.name = name
self.type = type
self.url = url
self.web_url = web_url
class WorkItemToTestLinks(Model):
"""
:param executed_in:
:type executed_in: object
:param tests:
:type tests: list of :class:`TestMethod <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.TestMethod>`
:param work_item:
:type work_item: :class:`WorkItemReference <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.WorkItemReference>`
"""
_attribute_map = {
'executed_in': {'key': 'executedIn', 'type': 'object'},
'tests': {'key': 'tests', 'type': '[TestMethod]'},
'work_item': {'key': 'workItem', 'type': 'WorkItemReference'}
}
def __init__(self, executed_in=None, tests=None, work_item=None):
super(WorkItemToTestLinks, self).__init__()
self.executed_in = executed_in
self.tests = tests
self.work_item = work_item
class TestActionResultModel(TestResultModelBase):
"""
Represents a test step result.
:param comment: Comment in result.
:type comment: str
:param completed_date: Time when execution completed(UTC).
:type completed_date: datetime
:param duration_in_ms: Duration of execution.
:type duration_in_ms: float
:param error_message: Error message in result.
:type error_message: str
:param outcome: Test outcome of result.
:type outcome: str
:param started_date: Time when execution started(UTC).
:type started_date: datetime
:param action_path: Path identifier for test step in test case workitem. Note: 1) It is represented in Hexadecimal format with 8 digits for a step. 2) Internally, the step ID value for first step starts with 2 so actionPath = 00000002 step 9, will have an ID = 10 and actionPath = 0000000a step 15, will have an ID =16 and actionPath = 00000010 3) actionPath of shared step is concatenated with the parent step of test case. Example, it would be something of type - 0000000300000001 where 00000003 denotes action path of test step and 00000001 denotes action path for shared step
:type action_path: str
:param iteration_id: Iteration ID of test action result.
:type iteration_id: int
:param shared_step_model: Reference to shared step workitem.
:type shared_step_model: :class:`SharedStepModel <azure.devops.v7_1.microsoft._team_foundation._test_management._web_api.models.SharedStepModel>`
:param step_identifier: This is step Id of test case. For shared step, it is step Id of shared step in test case workitem; step Id in shared step. Example: TestCase workitem has two steps: 1) Normal step with Id = 1 2) Shared Step with Id = 2. Inside shared step: a) Normal Step with Id = 1 Value for StepIdentifier for First step: "1" Second step: "2;1"
:type step_identifier: str
:param url: Url of test action result. Deprecated in hosted environment.
:type url: str
"""
_attribute_map = {
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'float'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'outcome': {'key': 'outcome', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'},
'action_path': {'key': 'actionPath', 'type': 'str'},
'iteration_id': {'key': 'iterationId', 'type': 'int'},
'shared_step_model': {'key': 'sharedStepModel', 'type': 'SharedStepModel'},
'step_identifier': {'key': 'stepIdentifier', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, comment=None, completed_date=None, duration_in_ms=None, error_message=None, outcome=None, started_date=None, action_path=None, iteration_id=None, shared_step_model=None, step_identifier=None, url=None):
super(TestActionResultModel, self).__init__(comment=comment, completed_date=completed_date, duration_in_ms=duration_in_ms, error_message=error_message, outcome=outcome, started_date=started_date)
self.action_path = action_path
self.iteration_id = iteration_id
self.shared_step_model = shared_step_model
self.step_identifier = step_identifier
self.url = url
__all__ = [
'AggregatedDataForResultTrend',
'AggregatedResultDetailsByOutcome',
'AggregatedResultsAnalysis',
'AggregatedResultsByOutcome',
'AggregatedResultsDifference',
'AggregatedRunsByOutcome',
'AggregatedRunsByState',
'BuildConfiguration',
'BuildCoverage',
'BuildReference',
'CodeCoverageData',
'CodeCoverageStatistics',
'CodeCoverageSummary',
'CoverageStatistics',
'CustomTestField',
'DtlEnvironmentDetails',
'FailingSince',
'FieldDetailsForTestResults',
'FileCoverageRequest',
'FlakyDetection',
'FlakyDetectionPipelines',
'FlakySettings',
'FunctionCoverage',
'GraphSubjectBase',
'IdentityRef',
'JobReference',
'ModuleCoverage',
'NewTestResultLoggingSettings',
'PhaseReference',
'PipelineReference',
'PipelineTestMetrics',
'QueryModel',
'ReferenceLinks',
'ReleaseReference',
'ResultsAnalysis',
'ResultsFilter',
'ResultsSummaryByOutcome',
'ResultSummary',
'RunCreateModel',
'RunFilter',
'RunStatistic',
'RunSummary',
'RunSummaryModel',
'RunUpdateModel',
'ShallowReference',
'ShallowTestCaseResult',
'SharedStepModel',
'StageReference',
'TeamProjectReference',
'TestAttachment',
'TestAttachmentReference',
'TestAttachmentRequestModel',
'TestCaseResult',
'TestCaseResultAttachmentModel',
'TestCaseResultIdentifier',
'TestEnvironment',
'TestFailureDetails',
'TestFailuresAnalysis',
'TestFailureType',
'TestFlakyIdentifier',
'TestHistoryQuery',
'TestIterationDetailsModel',
'TestLog',
'TestLogReference',
'TestLogStoreAttachment',
'TestLogStoreAttachmentReference',
'TestLogStoreEndpointDetails',
'TestMessageLogDetails',
'TestMethod',
'TestOperationReference',
'TestResolutionState',
'TestResultDocument',
'TestResultFailuresAnalysis',
'TestResultHistory',
'TestResultHistoryDetailsForGroup',
'TestResultHistoryForGroup',
'TestResultMetaData',
'TestResultMetaDataUpdateInput',
'TestResultModelBase',
'TestResultParameterModel',
'TestResultPayload',
'TestResultsContext',
'TestResultsDetails',
'TestResultsDetailsForGroup',
'TestResultsQuery',
'TestResultsSettings',
'TestResultSummary',
'TestResultsUpdateSettings',
'TestResultTrendFilter',
'TestRun',
'TestRunCoverage',
'TestRunStatistic',
'TestSettings',
'TestSubResult',
'TestSummaryForWorkItem',
'TestTag',
'TestTagSummary',
'TestTagsUpdateModel',
'TestToWorkItemLinks',
'WorkItemReference',
'WorkItemToTestLinks',
'TestActionResultModel',
]
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/test_results/models.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/test_results/models.py",
"repo_id": "azure-devops-python-api",
"token_count": 62253
}
| 374 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import sys
import glob
import os
from subprocess import check_call, CalledProcessError
root_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), '..', '..'))
os.chdir(root_dir)
def exec_command(command, cwd):
try:
print('CWD: ' + cwd)
print('Executing: ' + command)
check_call(command.split(), cwd=cwd)
print()
except CalledProcessError as err:
print(err, file=sys.stderr)
sys.exit(1)
setup_files = [setup_file for root, dirs, files in os.walk(root_dir)
for setup_file in glob.glob(os.path.join(root, 'setup.py'))]
# sdist packages
for file in setup_files:
exec_command('python setup.py bdist_wheel --universal', os.path.dirname(file))
|
azure-devops-python-api/scripts/create_wheels.py/0
|
{
"file_path": "azure-devops-python-api/scripts/create_wheels.py",
"repo_id": "azure-devops-python-api",
"token_count": 362
}
| 375 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.pipeline import policies
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class QuantumClientConfiguration: # pylint: disable=too-many-instance-attributes
"""Configuration for QuantumClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param azure_region: Supported Azure regions for Azure Quantum Services. For example, "eastus".
Required.
:type azure_region: str
:param subscription_id: The Azure subscription ID. This is a GUID-formatted string (e.g.
00000000-0000-0000-0000-000000000000). Required.
:type subscription_id: str
:param resource_group_name: Name of an Azure resource group. Required.
:type resource_group_name: str
:param workspace_name: Name of the workspace. Required.
:type workspace_name: str
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:keyword api_version: Api Version. Default value is "2023-11-13-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
azure_region: str,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
credential: "TokenCredential",
**kwargs: Any
) -> None:
api_version: str = kwargs.pop("api_version", "2022-09-12-preview")
if azure_region is None:
raise ValueError("Parameter 'azure_region' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if resource_group_name is None:
raise ValueError("Parameter 'resource_group_name' must not be None.")
if workspace_name is None:
raise ValueError("Parameter 'workspace_name' must not be None.")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
self.azure_region = azure_region
self.subscription_id = subscription_id
self.resource_group_name = resource_group_name
self.workspace_name = workspace_name
self.credential = credential
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://quantum.microsoft.com/.default"])
kwargs.setdefault("sdk_moniker", "quantum/{}".format(VERSION))
self.polling_interval = kwargs.get("polling_interval", 30)
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(
self.credential, *self.credential_scopes, **kwargs
)
|
azure-quantum-python/azure-quantum/azure/quantum/_client/_configuration.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/azure/quantum/_client/_configuration.py",
"repo_id": "azure-quantum-python",
"token_count": 1534
}
| 376 |
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
from urllib.request import urlopen
def df_chemistry() -> bytes:
"""
Returns bitcode of a QIR program for the double-factorized chemistry
quantum algorithm.
"""
return urlopen("https://aka.ms/RE/df_chemistry").read()
|
azure-quantum-python/azure-quantum/azure/quantum/chemistry/__init__.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/azure/quantum/chemistry/__init__.py",
"repo_id": "azure-quantum-python",
"token_count": 106
}
| 377 |
##
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
"""Azure Quantum Qiskit Provider"""
from .provider import AzureQuantumProvider, AzureQuantumJob
from azure.quantum import __version__
__all__ = [
"AzureQuantumProvider",
"AzureQuantumJob",
"__version__"
]
|
azure-quantum-python/azure-quantum/azure/quantum/qiskit/__init__.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/azure/quantum/qiskit/__init__.py",
"repo_id": "azure-quantum-python",
"token_count": 99
}
| 378 |
"""Defines classes for interacting with Microsoft Elements services"""
|
azure-quantum-python/azure-quantum/azure/quantum/target/microsoft/elements/__init__.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/azure/quantum/target/microsoft/elements/__init__.py",
"repo_id": "azure-quantum-python",
"token_count": 11
}
| 379 |
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
import warnings
from typing import Any, Dict, List, TYPE_CHECKING, Union, Type
from azure.quantum.target import *
if TYPE_CHECKING:
from azure.quantum import Workspace
from azure.quantum._client.models import TargetStatus
class TargetFactory:
"""Factory class for generating a Target based on a
provider and target name
"""
__instances = {}
def __new__(cls, *args, **kwargs):
base_cls = kwargs.get("base_cls")
if cls.__instances.get(base_cls) is None:
cls.__instances[base_cls] = super().__new__(cls)
return cls.__instances[base_cls]
def __init__(
self,
base_cls: Type[Target],
workspace: "Workspace",
default_targets: Dict[str, Any] = DEFAULT_TARGETS,
all_targets: Dict[str, Any] = None
):
"""Target factory class for creating targets
based on a name and/or provider ID.
:param base_cls: Base class for findng first and second
generation child classes.
:type base_cls: Target
:param workspace: Azure Quantum Workspace
:type workspace: Workspace
:param default_targets: Dictionary of default target classes keyed
by provider ID, defaults to DEFAULT_TARGETS
:type default_targets: Dict[str, Any], optional
:param all_targets: Dictionary of all target classes by name,
optional. Defaults to finding all first and second degree
subclasses of base_cls by name via cls.target_names.
:type all_targets: Dict[str, Any]
"""
self._workspace = workspace
self._base_cls = base_cls
# case insensitive lookup
self._default_targets = {k.lower(): v for k, v in default_targets.items()}
self._all_targets = all_targets or self._get_all_target_cls()
def _get_all_target_cls(self) -> Dict[str, Target]:
"""Get all target classes by target name"""
return {
name.lower(): _t for t in self._base_cls.__subclasses__()
for _t in t.__subclasses__() + [t]
if hasattr(_t, "target_names")
for name in _t.target_names
}
def _target_cls(self, provider_id: str, name: str):
if name in self._all_targets:
return self._all_targets[name.lower()]
if provider_id.lower() in self._default_targets:
return self._default_targets[provider_id.lower()]
warnings.warn(
f"No default target specified for provider {provider_id}. \
Please check the provider name and try again or create an issue here: \
https://github.com/microsoft/qdk-python/issues.")
return Target
def create_target(
self, provider_id: str, name: str, **kwargs
) -> Target:
"""Create target from provider ID and target name.
:param workspace: Workspace
:type workspace: Workspace
:param provider_id: Provider name
:type provider_id: str
:param name: Target name
:type name: str
:return: Target instance
:rtype: Target
"""
cls = self._target_cls(provider_id, name)
if cls is not None:
return cls(
workspace=self._workspace,
name=name,
provider_id=provider_id,
**kwargs
)
def from_target_status(
self,
provider_id: str,
status: "TargetStatus",
**kwargs
):
cls = self._target_cls(provider_id, status.id)
if hasattr(cls, "from_target_status"):
return cls.from_target_status(self._workspace, status, **kwargs)
elif cls is not None:
return cls(name=status.id, **kwargs)
def get_targets(
self,
name: str = None,
provider_id: str = None,
**kwargs
) -> Union[Target, List[Target]]:
"""Create targets that are available to this workspace
filtered by name and provider ID.
:param name: Target name
:type name: str
:param workspace: Workspace
:type workspace: Workspace
:param provider_id: Provider name
:type provider_id: str
:return: One or more Target objects
:rtype: Union[Target, List[Target]]
"""
target_statuses = self._workspace._get_target_status(name, provider_id)
# TODO: Make this function always return a list in the next major release.
if len(target_statuses) == 1:
result = self.from_target_status(*target_statuses[0], **kwargs)
if name is None:
return [result]
else:
return result
else:
# Don't return redundant targets
return [
self.from_target_status(_provider_id, status, **kwargs)
for _provider_id, status in target_statuses
if _provider_id.lower() in self._default_targets
or status.id in self._all_targets
]
|
azure-quantum-python/azure-quantum/azure/quantum/target/target_factory.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/azure/quantum/target/target_factory.py",
"repo_id": "azure-quantum-python",
"token_count": 2276
}
| 380 |
interactions:
- request:
body: client_id=PLACEHOLDER&grant_type=client_credentials&client_info=1&client_secret=PLACEHOLDER&scope=https%3A%2F%2Fquantum.microsoft.com%2F.default
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '144'
Content-Type:
- application/x-www-form-urlencoded
User-Agent:
- azsdk-python-identity/1.16.0 Python/3.9.19 (Windows-10-10.0.22631-SP0)
x-client-current-telemetry:
- 4|730,2|
x-client-os:
- win32
x-client-sku:
- MSAL.Python
x-client-ver:
- 1.28.0
method: POST
uri: https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/v2.0/token
response:
body:
string: '{"token_type": "Bearer", "expires_in": 1745073510, "ext_expires_in":
1745073510, "refresh_in": 31536000, "access_token": "PLACEHOLDER"}'
headers:
content-length:
- '135'
content-type:
- application/json; charset=utf-8
status:
code: 200
message: OK
- request:
body: 'b''{"id": "00000000-0000-0000-0000-000000000001", "name": "session-00000000-0000-0000-0000-000000000001",
"providerId": "microsoft.test", "target": "echo-quantinuum", "itemType": "Session"}'''
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '189'
Content-Type:
- application/json
User-Agent:
- testapp-azure-quantum-qiskit azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: PUT
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/sessions/00000000-0000-0000-0000-000000000001?api-version=2022-09-12-preview&test-sequence-id=1
response:
body:
string: '{"status": "Waiting", "jobFailurePolicy": "Abort", "name": "session-00000000-0000-0000-0000-000000000001",
"id": "00000000-0000-0000-0000-000000000001", "providerId": "microsoft.test",
"target": "echo-quantinuum", "creationTime": "2024-04-19T14:38:32.2140188Z",
"endExecutionTime": null, "costEstimate": null, "itemType": "Session"}'
headers:
connection:
- keep-alive
content-length:
- '332'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: 'b''{"containerName": "job-00000000-0000-0000-0000-000000000002"}'''
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '64'
Content-Type:
- application/json
User-Agent:
- testapp-azure-quantum-qiskit azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/storage/sasUri?api-version=2022-09-12-preview&test-sequence-id=1
response:
body:
string: '{"sasUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl"}'
headers:
connection:
- keep-alive
content-length:
- '174'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 19 Apr 2024 14:38:32 GMT
x-ms-version:
- '2023-11-03'
method: GET
uri: https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002?restype=container&sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl
response:
body:
string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ContainerNotFound</Code><Message>The
specified container does not exist.\nRequestId:91aac66c-e01e-0076-1967-92a688000000\nTime:2024-04-19T14:38:33.5214230Z</Message></Error>"
headers:
content-length:
- '223'
content-type:
- application/xml
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified container does not exist.
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-blob/12.19.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 19 Apr 2024 14:38:33 GMT
x-ms-version:
- '2023-11-03'
method: PUT
uri: https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002?restype=container&sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl
response:
body:
string: ''
headers:
content-length:
- '0'
x-ms-version:
- '2023-11-03'
status:
code: 201
message: Created
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 19 Apr 2024 14:38:33 GMT
x-ms-version:
- '2023-11-03'
method: GET
uri: https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002?restype=container&sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl
response:
body:
string: ''
headers:
content-length:
- '0'
x-ms-lease-state:
- available
x-ms-lease-status:
- unlocked
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: 'b''BC\xc0\xde5\x14\x00\x00\x05\x00\x00\x00b\x0c0$JY\xbef\x8d\xfb\xb4\xaf\x0bQ\x80L\x01\x00\x00\x00!\x0c\x00\x00\xb3\x01\x00\x00\x0b\x02!\x00\x02\x00\x00\x00\x16\x00\x00\x00\x07\x81#\x91A\xc8\x04I\x06\x1029\x92\x01\x84\x0c%\x05\x08\x19\x1e\x04\x8bb\x80\x14E\x02B\x92\x0bB\xa4\x102\x148\x08\x18K\n2R\x88Hp\xc4!#D\x12\x87\x8c\x10A\x92\x02d\xc8\x08\xb1\x14
CF\x88 \xc9\x012R\x84\x18*(*\x901|\xb0\\\x91 \xc5\xc8\x00\x00\x00\x89 \x00\x00\x19\x00\x00\x002"H\t
bF\x00!+$\x98\x14!%$\x98\x14\x19\''\x0c\x85\xa4\x90`Rd\\ $e\x82 \x1b\x010\x01\xa00G\x80\xd00\x02
T\xb0P\xa9\x00\x99\xc6\x08\x00:F\x00\x88\x92\x19\x00B\xb4\xd0X\t\x89Z\xa61\x02\x80\x9a\x19\x00r\xf4\xe6\x08@\xc1\x0c
\x12\x92f\x00\x8e\x10\x1d\x08\x98#\x00\x839\x82\x00\x00\x00Q\x18\x00\x000\x00\x00\x00\x1b\xd6!\xf8\xff\xff\xff\xffa(\x07w\xa0\x07y\xc8\x87_\x80\x87wH\x07w\xa0\x07`x\x87z\xa0\x07x\xa8\x07z\xf8\x05v\x08\x07q(\x07vH\x07w8\x87_\x98\x87q@\x87rh\x87p\x00\x88xH\x07y\xf8\x05x\x90\x87w0\x87t`\x87r\x98\x07`\x1c\xeaa\x1e\xe8\xe1\x1d\xda\x01
\xe4\xa1\x1c\xe2\xa1\x1e\xd2A\x1e\xca\x81\x1c~\xc1\x1d\xea\xa1\x1d~!\x1e\xeaA\x1c\xd2\x81\x1e\xe6\x01\x90\x03\x80\x90\x87r\x88\x87zH\x07y(\x07r\xf8\x05w\xa8\x87v\xf8\x05y(\x87y\xa8\x07v\xa0\x87y\x00\xe4\x00\xd8\x00\t\xff\xff\xff\xff?\x0c\xe9
\x0f\xf2P\x0e\xf6P\x0e\xf20\x0f\xe9 \x0e\xecP\x0e\xc0\x06b\x10\x00:\x00\x00\x00I\x18\x00\x00\x02\x00\x00\x00\x13\x82`\x82
\x0c\x00\x00\x1a!\x0cY\x04\xc0$\x1cC* \t\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12`H\x95d\x05\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x02\x0c\xa9\xc4\xe0J\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12`H\x95\x07\xda\x02\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x00C\xaaY\xc0\x1e
\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x80\x04\x18R\xad\x83\x18H@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\t0\xa4j\x891\xa0\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12@b\x83@\xd1\xdb\x00\x00\x80,\x10\x00\x07\x00\x00\x002\x1e\x98\x10\x19\x11L\x90\x8c\t&G\xc6\x04C\xba%P\x0e#\x00\x84G\x00\x00\x00\x00\x00\xb1\x18\x00\x00\x97\x00\x00\x003\x08\x80\x1c\xc4\xe1\x1cf\x14\x01=\x88C8\x84\xc3\x8cB\x80\x07yx\x07s\x98q\x0c\xe6\x00\x0f\xed\x10\x0e\xf4\x80\x0e3\x0cB\x1e\xc2\xc1\x1d\xce\xa1\x1cf0\x05=\x88C8\x84\x83\x1b\xcc\x03=\xc8C=\x8c\x03=\xccx\x8ctp\x07{\x08\x07yH\x87pp\x07zp\x03vx\x87p
\x87\x19\xcc\x11\x0e\xec\x90\x0e\xe10\x0fn0\x0f\xe3\xf0\x0e\xf0P\x0e3\x10\xc4\x1d\xde!\x1c\xd8!\x1d\xc2a\x1ef0\x89;\xbc\x83;\xd0C9\xb4\x03<\xbc\x83<\x84\x03;\xcc\xf0\x14v`\x07{h\x077h\x87rh\x077\x80\x87p\x90\x87p`\x07v(\x07v\xf8\x05vx\x87w\x80\x87_\x08\x87q\x18\x87r\x98\x87y\x98\x81,\xee\xf0\x0e\xee\xe0\x0e\xf5\xc0\x0e\xec0\x03b\xc8\xa1\x1c\xe4\xa1\x1c\xcc\xa1\x1c\xe4\xa1\x1c\xdca\x1c\xca!\x1c\xc4\x81\x1d\xcaa\x06\xd6\x90C9\xc8C9\x98C9\xc8C9\xb8\xc38\x94C8\x88\x03;\x94\xc3/\xbc\x83<\xfc\x82;\xd4\x03;\xb0\xc3\x0c\xc7i\x87pX\x87rp\x83th\x07x`\x87t\x18\x87t\xa0\x87\x19\xceS\x0f\xee\x00\x0f\xf2P\x0e\xe4\x90\x0e\xe3@\x0f\xe1
\x0e\xecP\x0e3 (\x1d\xdc\xc1\x1e\xc2A\x1e\xd2!\x1c\xdc\x81\x1e\xdc\xe0\x1c\xe4\xe1\x1d\xea\x01\x1ef\x18Q8\xb0C:\x9c\x83;\xccP$v`\x07{h\x077`\x87wx\x07x\x98QL\xf4\x90\x0f\xf0P\x0e3\x1ej\x1e\xcaa\x1c\xe8!\x1d\xde\xc1\x1d~\x01\x1e\xe4\xa1\x1c\xcc!\x1d\xf0a\x06T\x85\x838\xcc\xc3;\xb0C=\xd0C9\xfc\xc2<\xe4C;\x88\xc3;\xb0\xc3\x8c\xc5\n\x87y\x98\x87w\x18\x87t\x08\x07z(\x07r\x98\x81\\\xe3\x10\x0e\xec\xc0\x0e\xe5P\x0e\xf30#\xc1\xd2A\x1e\xe4\xe1\x17\xd8\xe1\x1d\xde\x01\x1efH\x19;\xb0\x83=\xb4\x83\x1b\x84\xc38\x8cC9\xcc\xc3<\xb8\xc19\xc8\xc3;\xd4\x03<\xccH\xb4q\x08\x07v`\x07q\x08\x87qX\x87\x19\xdb\xc6\x0e\xec`\x0f\xed\xe0\x06\xf0
\x0f\xe50\x0f\xe5 \x0f\xf6P\x0en\x10\x0e\xe30\x0e\xe50\x0f\xf3\xe0\x06\xe9\xe0\x0e\xe4P\x0e\xf80#\xe2\xeca\x1c\xc2\x81\x1d\xd8\xe1\x17\xec!\x1d\xe6!\x1d\xc4!\x1d\xd8!\x1d\xe8!\x1ff
\x9d;\xbcC=\xb8\x039\x94\x839\xccX\xbcpp\x07wx\x07z\x08\x07zH\x87wp\x07\x00\x00y
\x00\x00.\x00\x00\x00r\x1eH C\x88\x0c\x19\tr2H #\x81\x8c\x91\x91\xd1D\xa0\x10(d<12B\x8e\x90!\xa3H\x10\xb7\x00Q\x84e\x00qir_major_versionqir_minor_versiondynamic_qubit_managementdynamic_result_management\x00#\x08\xd71\x82p!#\x08W2\x82\x80)3\x0cEP\xcc0\x18\xc21\xc3P\x0c\xc8\x0cCA
2\x12\x98\xa0\x8c\xd8\xd8\xec\xda\\\xda\xde\xc8\xea\xd8\xca\\\xcc\xd8\xc2\xce\xe6F!\x90DY\x00\x00\xa9\x18\x00\x00!\x00\x00\x00\x0b\nr(\x87w\x80\x07zXp\x98C=\xb8\xc38\xb0C9\xd0\xc3\x82\xe6\x1c\xc6\xa1\r\xe8A\x1e\xc2\xc1\x1d\xe6!\x1d\xe8!\x1d\xde\xc1\x1d\x164\xe3`\x0e\xe7P\x0f\xe1
\x0f\xe4@\x0f\xe1 \x0f\xe7P\x0e\xf4\xb0\x80\x81\x07y(\x87p`\x07vx\x87q\x08\x07z(\x07rXp\x9c\xc38\xb4\x01;\xa4\x83=\x94\xc3\x02k\x1c\xd8!\x1c\xdc\xe1\x1c\xdc
\x1c\xe4a\x1c\xdc \x1c\xe8\x81\x1e\xc2a\x1c\xd0\xa1\x1c\xc8a\x1c\xc2\x81\x1d\xd8\x01\xd1\x10\x00\x00\x06\x00\x00\x00\x07\xcc<\xa4\x83;\x9c\x03;\x94\x03=\xa0\x83<\x94C8\x90\xc3\x01\x00\x00\x00a
\x00\x00 \x00\x00\x00\x13\x04A,\x10\x00\x00\x00\x05\x00\x00\x00\x14K\xa0\x08\x88\x8c\x00\x10\x1a\x010\x8d\x0b\xe4F\x00L\xe3\x02\x00#\x06\x05\x00\x82`PD\xc5\x88A\x01\x80
\x18$\x101b`\x00 \x08\x06\xcbC\x0c#\x06\x06\x00\x82`\xf08\x840b`\x00 \x08\x06\x8f3\x04#\x06\x06\x00\x82`
5F1b`\x00 \x08\x06\x14\x13\x14#\x06\x06\x00\x82`@1B\xa1\xe1@\x00\x00\x00\x02\x00\x00\x00\x07P\x10\xcd\x14a\x00\x00\x00\x00\x00\x00q
\x00\x00\x03\x00\x00\x002\x0e\x10"\x84\x00\x95\x03\x00\x00\x00\x00\x00\x00\x00\x00]\x0c\x00\x00<\x00\x00\x00\x12\x03\x94\xe4\x01\x00\x00\x00BellState__quantum__rt__initialize__quantum__qis__h__body__quantum__qis__cnot__body__quantum__qis__mz__body__quantum__rt__array_record_output__quantum__rt__result_record_output14.0.6
f28c006a5895fc0e329fe15fead81e37457cb1d1batch\x00\x00\x00\x00'''
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '5477'
Content-Type:
- application/octet-stream
User-Agent:
- azsdk-python-storage-blob/12.19.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
x-ms-blob-type:
- BlockBlob
x-ms-date:
- Fri, 19 Apr 2024 14:38:34 GMT
x-ms-version:
- '2023-11-03'
method: PUT
uri: https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/inputData?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl
response:
body:
string: ''
headers:
content-length:
- '0'
x-ms-version:
- '2023-11-03'
status:
code: 201
message: Created
- request:
body: 'b''{"id": "00000000-0000-0000-0000-000000000002", "name": "Job 1", "providerId":
"microsoft.test", "target": "echo-quantinuum", "itemType": "Job", "sessionId":
"00000000-0000-0000-0000-000000000001", "containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/inputData",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "outputDataFormat":
"honeywell.qir.v1"}'''
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '750'
Content-Type:
- application/json
User-Agent:
- testapp-azure-quantum-qiskit azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: PUT
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000002?api-version=2022-09-12-preview&test-sequence-id=1
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcw",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Waiting", "jobType": "QuantumComputing", "outputDataFormat": "honeywell.qir.v1",
"outputDataUri": "https://mystorage.blob.core.windows.net:443/job-00000000-0000-0000-0000-000000000002/outputData?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl",
"beginExecutionTime": null, "cancellationTime": null, "quantumComputingData":
null, "errorData": null, "isCancelling": false, "tags": [], "name": "Job 1",
"id": "00000000-0000-0000-0000-000000000002", "providerId": "microsoft.test",
"target": "echo-quantinuum", "creationTime": "2024-04-19T14:38:34.8503456+00:00",
"endExecutionTime": null, "costEstimate": null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1305'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: 'b''{"containerName": "job-00000000-0000-0000-0000-000000000003"}'''
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '64'
Content-Type:
- application/json
User-Agent:
- testapp-azure-quantum-qiskit azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/storage/sasUri?api-version=2022-09-12-preview&test-sequence-id=2
response:
body:
string: '{"sasUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl"}'
headers:
connection:
- keep-alive
content-length:
- '174'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 19 Apr 2024 14:38:35 GMT
x-ms-version:
- '2023-11-03'
method: GET
uri: https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?restype=container&sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl
response:
body:
string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ContainerNotFound</Code><Message>The
specified container does not exist.\nRequestId:1e990d8d-901e-0031-2067-92cdd3000000\nTime:2024-04-19T14:38:36.0064833Z</Message></Error>"
headers:
content-length:
- '223'
content-type:
- application/xml
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified container does not exist.
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-blob/12.19.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 19 Apr 2024 14:38:35 GMT
x-ms-version:
- '2023-11-03'
method: PUT
uri: https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?restype=container&sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl
response:
body:
string: ''
headers:
content-length:
- '0'
x-ms-version:
- '2023-11-03'
status:
code: 201
message: Created
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 19 Apr 2024 14:38:36 GMT
x-ms-version:
- '2023-11-03'
method: GET
uri: https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?restype=container&sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl
response:
body:
string: ''
headers:
content-length:
- '0'
x-ms-lease-state:
- available
x-ms-lease-status:
- unlocked
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: 'b''BC\xc0\xde5\x14\x00\x00\x05\x00\x00\x00b\x0c0$JY\xbef\x8d\xfb\xb4\xaf\x0bQ\x80L\x01\x00\x00\x00!\x0c\x00\x00\xb3\x01\x00\x00\x0b\x02!\x00\x02\x00\x00\x00\x16\x00\x00\x00\x07\x81#\x91A\xc8\x04I\x06\x1029\x92\x01\x84\x0c%\x05\x08\x19\x1e\x04\x8bb\x80\x14E\x02B\x92\x0bB\xa4\x102\x148\x08\x18K\n2R\x88Hp\xc4!#D\x12\x87\x8c\x10A\x92\x02d\xc8\x08\xb1\x14
CF\x88 \xc9\x012R\x84\x18*(*\x901|\xb0\\\x91 \xc5\xc8\x00\x00\x00\x89 \x00\x00\x19\x00\x00\x002"H\t
bF\x00!+$\x98\x14!%$\x98\x14\x19\''\x0c\x85\xa4\x90`Rd\\ $e\x82 \x1b\x010\x01\xa00G\x80\xd00\x02
T\xb0P\xa9\x00\x99\xc6\x08\x00:F\x00\x88\x92\x19\x00B\xb4\xd0X\t\x89Z\xa61\x02\x80\x9a\x19\x00r\xf4\xe6\x08@\xc1\x0c
\x12\x92f\x00\x8e\x10\x1d\x08\x98#\x00\x839\x82\x00\x00\x00Q\x18\x00\x000\x00\x00\x00\x1b\xd6!\xf8\xff\xff\xff\xffa(\x07w\xa0\x07y\xc8\x87_\x80\x87wH\x07w\xa0\x07`x\x87z\xa0\x07x\xa8\x07z\xf8\x05v\x08\x07q(\x07vH\x07w8\x87_\x98\x87q@\x87rh\x87p\x00\x88xH\x07y\xf8\x05x\x90\x87w0\x87t`\x87r\x98\x07`\x1c\xeaa\x1e\xe8\xe1\x1d\xda\x01
\xe4\xa1\x1c\xe2\xa1\x1e\xd2A\x1e\xca\x81\x1c~\xc1\x1d\xea\xa1\x1d~!\x1e\xeaA\x1c\xd2\x81\x1e\xe6\x01\x90\x03\x80\x90\x87r\x88\x87zH\x07y(\x07r\xf8\x05w\xa8\x87v\xf8\x05y(\x87y\xa8\x07v\xa0\x87y\x00\xe4\x00\xd8\x00\t\xff\xff\xff\xff?\x0c\xe9
\x0f\xf2P\x0e\xf6P\x0e\xf20\x0f\xe9 \x0e\xecP\x0e\xc0\x06b\x10\x00:\x00\x00\x00I\x18\x00\x00\x02\x00\x00\x00\x13\x82`\x82
\x0c\x00\x00\x1a!\x0cY\x04\xc0$\x1cC* \t\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12`H\x95d\x05\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x02\x0c\xa9\xc4\xe0J\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12`H\x95\x07\xda\x02\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x00C\xaaY\xc0\x1e
\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x80\x04\x18R\xad\x83\x18H@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\t0\xa4j\x891\xa0\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12@b\x83@\xd1\xdb\x00\x00\x80,\x10\x00\x07\x00\x00\x002\x1e\x98\x10\x19\x11L\x90\x8c\t&G\xc6\x04C\xba%P\x0e#\x00\x84G\x00\x00\x00\x00\x00\xb1\x18\x00\x00\x97\x00\x00\x003\x08\x80\x1c\xc4\xe1\x1cf\x14\x01=\x88C8\x84\xc3\x8cB\x80\x07yx\x07s\x98q\x0c\xe6\x00\x0f\xed\x10\x0e\xf4\x80\x0e3\x0cB\x1e\xc2\xc1\x1d\xce\xa1\x1cf0\x05=\x88C8\x84\x83\x1b\xcc\x03=\xc8C=\x8c\x03=\xccx\x8ctp\x07{\x08\x07yH\x87pp\x07zp\x03vx\x87p
\x87\x19\xcc\x11\x0e\xec\x90\x0e\xe10\x0fn0\x0f\xe3\xf0\x0e\xf0P\x0e3\x10\xc4\x1d\xde!\x1c\xd8!\x1d\xc2a\x1ef0\x89;\xbc\x83;\xd0C9\xb4\x03<\xbc\x83<\x84\x03;\xcc\xf0\x14v`\x07{h\x077h\x87rh\x077\x80\x87p\x90\x87p`\x07v(\x07v\xf8\x05vx\x87w\x80\x87_\x08\x87q\x18\x87r\x98\x87y\x98\x81,\xee\xf0\x0e\xee\xe0\x0e\xf5\xc0\x0e\xec0\x03b\xc8\xa1\x1c\xe4\xa1\x1c\xcc\xa1\x1c\xe4\xa1\x1c\xdca\x1c\xca!\x1c\xc4\x81\x1d\xcaa\x06\xd6\x90C9\xc8C9\x98C9\xc8C9\xb8\xc38\x94C8\x88\x03;\x94\xc3/\xbc\x83<\xfc\x82;\xd4\x03;\xb0\xc3\x0c\xc7i\x87pX\x87rp\x83th\x07x`\x87t\x18\x87t\xa0\x87\x19\xceS\x0f\xee\x00\x0f\xf2P\x0e\xe4\x90\x0e\xe3@\x0f\xe1
\x0e\xecP\x0e3 (\x1d\xdc\xc1\x1e\xc2A\x1e\xd2!\x1c\xdc\x81\x1e\xdc\xe0\x1c\xe4\xe1\x1d\xea\x01\x1ef\x18Q8\xb0C:\x9c\x83;\xccP$v`\x07{h\x077`\x87wx\x07x\x98QL\xf4\x90\x0f\xf0P\x0e3\x1ej\x1e\xcaa\x1c\xe8!\x1d\xde\xc1\x1d~\x01\x1e\xe4\xa1\x1c\xcc!\x1d\xf0a\x06T\x85\x838\xcc\xc3;\xb0C=\xd0C9\xfc\xc2<\xe4C;\x88\xc3;\xb0\xc3\x8c\xc5\n\x87y\x98\x87w\x18\x87t\x08\x07z(\x07r\x98\x81\\\xe3\x10\x0e\xec\xc0\x0e\xe5P\x0e\xf30#\xc1\xd2A\x1e\xe4\xe1\x17\xd8\xe1\x1d\xde\x01\x1efH\x19;\xb0\x83=\xb4\x83\x1b\x84\xc38\x8cC9\xcc\xc3<\xb8\xc19\xc8\xc3;\xd4\x03<\xccH\xb4q\x08\x07v`\x07q\x08\x87qX\x87\x19\xdb\xc6\x0e\xec`\x0f\xed\xe0\x06\xf0
\x0f\xe50\x0f\xe5 \x0f\xf6P\x0en\x10\x0e\xe30\x0e\xe50\x0f\xf3\xe0\x06\xe9\xe0\x0e\xe4P\x0e\xf80#\xe2\xeca\x1c\xc2\x81\x1d\xd8\xe1\x17\xec!\x1d\xe6!\x1d\xc4!\x1d\xd8!\x1d\xe8!\x1ff
\x9d;\xbcC=\xb8\x039\x94\x839\xccX\xbcpp\x07wx\x07z\x08\x07zH\x87wp\x07\x00\x00y
\x00\x00.\x00\x00\x00r\x1eH C\x88\x0c\x19\tr2H #\x81\x8c\x91\x91\xd1D\xa0\x10(d<12B\x8e\x90!\xa3H\x10\xb7\x00Q\x84e\x00qir_major_versionqir_minor_versiondynamic_qubit_managementdynamic_result_management\x00#\x08\xd71\x82p!#\x08W2\x82\x80)3\x0cEP\xcc0\x18\xc21\xc3P\x0c\xc8\x0cCA
2\x12\x98\xa0\x8c\xd8\xd8\xec\xda\\\xda\xde\xc8\xea\xd8\xca\\\xcc\xd8\xc2\xce\xe6F!\x90DY\x00\x00\xa9\x18\x00\x00!\x00\x00\x00\x0b\nr(\x87w\x80\x07zXp\x98C=\xb8\xc38\xb0C9\xd0\xc3\x82\xe6\x1c\xc6\xa1\r\xe8A\x1e\xc2\xc1\x1d\xe6!\x1d\xe8!\x1d\xde\xc1\x1d\x164\xe3`\x0e\xe7P\x0f\xe1
\x0f\xe4@\x0f\xe1 \x0f\xe7P\x0e\xf4\xb0\x80\x81\x07y(\x87p`\x07vx\x87q\x08\x07z(\x07rXp\x9c\xc38\xb4\x01;\xa4\x83=\x94\xc3\x02k\x1c\xd8!\x1c\xdc\xe1\x1c\xdc
\x1c\xe4a\x1c\xdc \x1c\xe8\x81\x1e\xc2a\x1c\xd0\xa1\x1c\xc8a\x1c\xc2\x81\x1d\xd8\x01\xd1\x10\x00\x00\x06\x00\x00\x00\x07\xcc<\xa4\x83;\x9c\x03;\x94\x03=\xa0\x83<\x94C8\x90\xc3\x01\x00\x00\x00a
\x00\x00 \x00\x00\x00\x13\x04A,\x10\x00\x00\x00\x05\x00\x00\x00\x14K\xa0\x08\x88\x8c\x00\x10\x1a\x010\x8d\x0b\xe4F\x00L\xe3\x02\x00#\x06\x05\x00\x82`PD\xc5\x88A\x01\x80
\x18$\x101b`\x00 \x08\x06\xcbC\x0c#\x06\x06\x00\x82`\xf08\x840b`\x00 \x08\x06\x8f3\x04#\x06\x06\x00\x82`
5F1b`\x00 \x08\x06\x14\x13\x14#\x06\x06\x00\x82`@1B\xa1\xe1@\x00\x00\x00\x02\x00\x00\x00\x07P\x10\xcd\x14a\x00\x00\x00\x00\x00\x00q
\x00\x00\x03\x00\x00\x002\x0e\x10"\x84\x00\x95\x03\x00\x00\x00\x00\x00\x00\x00\x00]\x0c\x00\x00<\x00\x00\x00\x12\x03\x94\xe4\x01\x00\x00\x00BellState__quantum__rt__initialize__quantum__qis__h__body__quantum__qis__cnot__body__quantum__qis__mz__body__quantum__rt__array_record_output__quantum__rt__result_record_output14.0.6
f28c006a5895fc0e329fe15fead81e37457cb1d1batch\x00\x00\x00\x00'''
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '5477'
Content-Type:
- application/octet-stream
User-Agent:
- azsdk-python-storage-blob/12.19.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
x-ms-blob-type:
- BlockBlob
x-ms-date:
- Fri, 19 Apr 2024 14:38:36 GMT
x-ms-version:
- '2023-11-03'
method: PUT
uri: https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/inputData?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl
response:
body:
string: ''
headers:
content-length:
- '0'
x-ms-version:
- '2023-11-03'
status:
code: 201
message: Created
- request:
body: 'b''{"id": "00000000-0000-0000-0000-000000000003", "name": "Job 2", "providerId":
"microsoft.test", "target": "echo-quantinuum", "itemType": "Job", "sessionId":
"00000000-0000-0000-0000-000000000001", "containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/inputData",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "outputDataFormat":
"honeywell.qir.v1"}'''
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '750'
Content-Type:
- application/json
User-Agent:
- testapp-azure-quantum-qiskit azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: PUT
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000003?api-version=2022-09-12-preview&test-sequence-id=1
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/inputData",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Waiting", "jobType": "QuantumComputing", "outputDataFormat": "honeywell.qir.v1",
"outputDataUri": "https://mystorage.blob.core.windows.net:443/job-00000000-0000-0000-0000-000000000003/outputData?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl",
"beginExecutionTime": null, "cancellationTime": null, "quantumComputingData":
null, "errorData": null, "isCancelling": false, "tags": [], "name": "Job 2",
"id": "00000000-0000-0000-0000-000000000003", "providerId": "microsoft.test",
"target": "echo-quantinuum", "creationTime": "2024-04-19T14:38:37.3472477+00:00",
"endExecutionTime": null, "costEstimate": null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1234'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp-azure-quantum-qiskit azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000002?api-version=2022-09-12-preview&test-sequence-id=1
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B1-00000000-0000-0000-0000-000000000002.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Waiting", "jobType": "QuantumComputing", "outputDataFormat": "honeywell.qir.v1",
"outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/outputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B1-00000000-0000-0000-0000-000000000002.output.json",
"beginExecutionTime": null, "cancellationTime": null, "quantumComputingData":
{"count": 1}, "errorData": null, "isCancelling": false, "tags": [], "name":
"Job 1", "id": "00000000-0000-0000-0000-000000000002", "providerId": "microsoft.test",
"target": "echo-quantinuum", "creationTime": "2024-04-19T14:38:34.8503456+00:00",
"endExecutionTime": null, "costEstimate": null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1465'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp-azure-quantum-qiskit azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000002?api-version=2022-09-12-preview&test-sequence-id=2
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B1-00000000-0000-0000-0000-000000000002.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Executing", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/outputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B1-00000000-0000-0000-0000-000000000002.output.json",
"beginExecutionTime": "2024-04-19T14:38:37.829Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 1", "id": "00000000-0000-0000-0000-000000000002",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:34.8503456+00:00", "endExecutionTime": null, "costEstimate":
null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1489'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp-azure-quantum-qiskit azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000002?api-version=2022-09-12-preview&test-sequence-id=3
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B1-00000000-0000-0000-0000-000000000002.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Executing", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/outputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B1-00000000-0000-0000-0000-000000000002.output.json",
"beginExecutionTime": "2024-04-19T14:38:37.829Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 1", "id": "00000000-0000-0000-0000-000000000002",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:34.8503456+00:00", "endExecutionTime": null, "costEstimate":
null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1489'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp-azure-quantum-qiskit azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000002?api-version=2022-09-12-preview&test-sequence-id=4
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B1-00000000-0000-0000-0000-000000000002.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Executing", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/outputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B1-00000000-0000-0000-0000-000000000002.output.json",
"beginExecutionTime": "2024-04-19T14:38:37.829Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 1", "id": "00000000-0000-0000-0000-000000000002",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:34.8503456+00:00", "endExecutionTime": null, "costEstimate":
null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1489'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp-azure-quantum-qiskit azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000002?api-version=2022-09-12-preview&test-sequence-id=5
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B1-00000000-0000-0000-0000-000000000002.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Succeeded", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/rawOutputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B1-00000000-0000-0000-0000-000000000002.output.json",
"beginExecutionTime": "2024-04-19T14:38:37.829Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 1", "id": "00000000-0000-0000-0000-000000000002",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:34.8503456+00:00", "endExecutionTime": "2024-04-19T14:38:53.635Z",
"costEstimate": null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1514'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp-azure-quantum-qiskit azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/sessions/00000000-0000-0000-0000-000000000001?api-version=2022-09-12-preview&test-sequence-id=1
response:
body:
string: '{"status": "Executing", "jobFailurePolicy": "Abort", "name": "session-00000000-0000-0000-0000-000000000001",
"id": "00000000-0000-0000-0000-000000000001", "providerId": "microsoft.test",
"target": "echo-quantinuum", "creationTime": "2024-04-19T14:38:32.2140188Z",
"endExecutionTime": null, "costEstimate": null, "itemType": "Session"}'
headers:
connection:
- keep-alive
content-length:
- '334'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- testapp-azure-quantum-qiskit azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/sessions/00000000-0000-0000-0000-000000000001:close?api-version=2022-09-12-preview&test-sequence-id=1
response:
body:
string: '{"status": "Executing", "jobFailurePolicy": "Abort", "name": "session-00000000-0000-0000-0000-000000000001",
"id": "00000000-0000-0000-0000-000000000001", "providerId": "microsoft.test",
"target": "echo-quantinuum", "creationTime": "2024-04-19T14:38:32.2140188Z",
"endExecutionTime": null, "costEstimate": null, "itemType": "Session"}'
headers:
connection:
- keep-alive
content-length:
- '334'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: client_id=PLACEHOLDER&grant_type=client_credentials&client_info=1&client_secret=PLACEHOLDER&scope=https%3A%2F%2Fquantum.microsoft.com%2F.default
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '144'
Content-Type:
- application/x-www-form-urlencoded
User-Agent:
- azsdk-python-identity/1.16.0 Python/3.9.19 (Windows-10-10.0.22631-SP0)
x-client-current-telemetry:
- 4|730,2|
x-client-os:
- win32
x-client-sku:
- MSAL.Python
x-client-ver:
- 1.28.0
method: POST
uri: https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/v2.0/token
response:
body:
string: '{"token_type": "Bearer", "expires_in": 1745073539, "ext_expires_in":
1745073539, "refresh_in": 31536000, "access_token": "PLACEHOLDER"}'
headers:
content-length:
- '135'
content-type:
- application/json; charset=utf-8
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/sessions/00000000-0000-0000-0000-000000000001?api-version=2022-09-12-preview&test-sequence-id=2
response:
body:
string: '{"status": "Executing", "jobFailurePolicy": "Abort", "name": "session-00000000-0000-0000-0000-000000000001",
"id": "00000000-0000-0000-0000-000000000001", "providerId": "microsoft.test",
"target": "echo-quantinuum", "creationTime": "2024-04-19T14:38:32.2140188Z",
"endExecutionTime": null, "costEstimate": null, "itemType": "Session"}'
headers:
connection:
- keep-alive
content-length:
- '334'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/sessions/00000000-0000-0000-0000-000000000001/jobs?api-version=2022-09-12-preview&test-sequence-id=1
response:
body:
string: '{"value": [{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcw",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Succeeded", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net:443/job-00000000-0000-0000-0000-000000000002/rawOutputData?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl",
"beginExecutionTime": "2024-04-19T14:38:37.829Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 1", "id": "00000000-0000-0000-0000-000000000002",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:34.8503456Z", "endExecutionTime": "2024-04-19T14:38:53.635Z",
"costEstimate": null, "itemType": "Job"}, {"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcw",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Executing", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net:443/job-00000000-0000-0000-0000-000000000003/outputData?sv=PLACEHOLDER&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&srt=co&ss=b&sp=racwl",
"beginExecutionTime": "2024-04-19T14:38:41.564Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 2", "id": "00000000-0000-0000-0000-000000000003",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:37.3472477Z", "endExecutionTime": null, "costEstimate":
null, "itemType": "Job"}], "nextLink": null}'
headers:
connection:
- keep-alive
content-length:
- '2722'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000002?api-version=2022-09-12-preview&test-sequence-id=6
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B1-00000000-0000-0000-0000-000000000002.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Succeeded", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000002/rawOutputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B1-00000000-0000-0000-0000-000000000002.output.json",
"beginExecutionTime": "2024-04-19T14:38:37.829Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 1", "id": "00000000-0000-0000-0000-000000000002",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:34.8503456+00:00", "endExecutionTime": "2024-04-19T14:38:53.635Z",
"costEstimate": null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1514'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000003?api-version=2022-09-12-preview&test-sequence-id=1
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Executing", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/outputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.output.json",
"beginExecutionTime": "2024-04-19T14:38:41.564Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 2", "id": "00000000-0000-0000-0000-000000000003",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:37.3472477+00:00", "endExecutionTime": null, "costEstimate":
null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1489'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000003?api-version=2022-09-12-preview&test-sequence-id=2
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Executing", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/outputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.output.json",
"beginExecutionTime": "2024-04-19T14:38:41.564Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 2", "id": "00000000-0000-0000-0000-000000000003",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:37.3472477+00:00", "endExecutionTime": null, "costEstimate":
null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1489'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000003?api-version=2022-09-12-preview&test-sequence-id=3
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Executing", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/outputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.output.json",
"beginExecutionTime": "2024-04-19T14:38:41.564Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 2", "id": "00000000-0000-0000-0000-000000000003",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:37.3472477+00:00", "endExecutionTime": null, "costEstimate":
null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1489'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000003?api-version=2022-09-12-preview&test-sequence-id=4
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Executing", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/outputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.output.json",
"beginExecutionTime": "2024-04-19T14:38:41.564Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 2", "id": "00000000-0000-0000-0000-000000000003",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:37.3472477+00:00", "endExecutionTime": null, "costEstimate":
null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1489'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000003?api-version=2022-09-12-preview&test-sequence-id=5
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Executing", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/outputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.output.json",
"beginExecutionTime": "2024-04-19T14:38:41.564Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 2", "id": "00000000-0000-0000-0000-000000000003",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:37.3472477+00:00", "endExecutionTime": null, "costEstimate":
null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1489'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000003?api-version=2022-09-12-preview&test-sequence-id=6
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Executing", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/outputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.output.json",
"beginExecutionTime": "2024-04-19T14:38:41.564Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 2", "id": "00000000-0000-0000-0000-000000000003",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:37.3472477+00:00", "endExecutionTime": null, "costEstimate":
null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1489'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000003?api-version=2022-09-12-preview&test-sequence-id=7
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Executing", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/outputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.output.json",
"beginExecutionTime": "2024-04-19T14:38:41.564Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 2", "id": "00000000-0000-0000-0000-000000000003",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:37.3472477+00:00", "endExecutionTime": null, "costEstimate":
null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1489'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000003?api-version=2022-09-12-preview&test-sequence-id=8
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Executing", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/outputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.output.json",
"beginExecutionTime": "2024-04-19T14:38:41.564Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 2", "id": "00000000-0000-0000-0000-000000000003",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:37.3472477+00:00", "endExecutionTime": null, "costEstimate":
null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1489'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/jobs/00000000-0000-0000-0000-000000000003?api-version=2022-09-12-preview&test-sequence-id=9
response:
body:
string: '{"containerUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003?sv=PLACEHOLDER&sr=c&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=rcwl",
"inputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/inputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.input.json",
"inputDataFormat": "qir.v1", "inputParams": {"count": 100, "shots": 100, "items":
[{"entryPoint": "BellState", "arguments": []}]}, "metadata": {"qiskit": "True",
"name": "BellState", "num_qubits": "2", "metadata": "{}"}, "sessionId": "00000000-0000-0000-0000-000000000001",
"status": "Succeeded", "jobType": "QuantumComputing", "outputDataFormat":
"honeywell.qir.v1", "outputDataUri": "https://mystorage.blob.core.windows.net/job-00000000-0000-0000-0000-000000000003/rawOutputData?sv=PLACEHOLDER&sr=b&sig=PLACEHOLDER&se=2050-01-01T00%3A00%3A00Z&sp=r&rscd=attachment%3B%20filename%3DJob%2B2-00000000-0000-0000-0000-000000000003.output.json",
"beginExecutionTime": "2024-04-19T14:38:41.564Z", "cancellationTime": null,
"quantumComputingData": {"count": 1}, "errorData": null, "isCancelling": false,
"tags": [], "name": "Job 2", "id": "00000000-0000-0000-0000-000000000003",
"providerId": "microsoft.test", "target": "echo-quantinuum", "creationTime":
"2024-04-19T14:38:37.3472477+00:00", "endExecutionTime": "2024-04-19T14:39:10.788Z",
"costEstimate": null, "itemType": "Job"}'
headers:
connection:
- keep-alive
content-length:
- '1514'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- testapp azsdk-python-quantum/0.0.1 Python/3.9.19 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.quantum.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Quantum/workspaces/myworkspace/sessions/00000000-0000-0000-0000-000000000001?api-version=2022-09-12-preview&test-sequence-id=3
response:
body:
string: '{"status": "Succeeded", "jobFailurePolicy": "Abort", "name": "session-00000000-0000-0000-0000-000000000001",
"id": "00000000-0000-0000-0000-000000000001", "providerId": "microsoft.test",
"target": "echo-quantinuum", "creationTime": "2024-04-19T14:38:32.2140188Z",
"endExecutionTime": "2024-04-19T14:39:11.6887716Z", "costEstimate": null,
"itemType": "Session"}'
headers:
connection:
- keep-alive
content-length:
- '360'
content-type:
- application/json; charset=utf-8
transfer-encoding:
- chunked
status:
code: 200
message: OK
version: 1
|
azure-quantum-python/azure-quantum/tests/unit/recordings/test_session_job_qiskit_circuit_echo_quantinuum.yaml/0
|
{
"file_path": "azure-quantum-python/azure-quantum/tests/unit/recordings/test_session_job_qiskit_circuit_echo_quantinuum.yaml",
"repo_id": "azure-quantum-python",
"token_count": 37456
}
| 381 |
##
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
import pytest
from azure.quantum import JobStatus
from azure.quantum.target import Quantinuum
from common import QuantumTestBase, DEFAULT_TIMEOUT_SECS
class TestQuantinuum(QuantumTestBase):
def _teleport(self):
return """OPENQASM 2.0;
include "qelib1.inc";
qreg q[3];
creg c0[1];
creg c1[1];
creg c2[1];
h q[0];
cx q[0], q[1];
x q[2];
h q[2];
cx q[2], q[0];
h q[2];
measure q[0] -> c0[0];
if (c0==1) x q[1];
measure q[2] -> c1[0];
if (c1==1) z q[1];
h q[1];
measure q[1] -> c2[0];
"""
@pytest.mark.quantinuum
def test_job_estimate_cost_quantinuum(self):
workspace = self.create_workspace()
circuit = self._teleport()
target = Quantinuum(workspace=workspace, name="quantinuum.sim.h1-1sc")
cost = target.estimate_cost(circuit, shots=100e3)
self.assertEqual(cost.estimated_total, 0.0)
target = Quantinuum(workspace=workspace, name="quantinuum.qpu.h1-1")
cost = target.estimate_cost(circuit, shots=100e3)
self.assertEqual(cost.estimated_total, 845.0)
target = Quantinuum(workspace=workspace, name="quantinuum.sim.h2-1sc")
cost = target.estimate_cost(circuit, shots=100e3)
self.assertEqual(cost.estimated_total, 0.0)
target = Quantinuum(workspace=workspace, name="quantinuum.qpu.h2-1")
cost = target.estimate_cost(circuit, shots=100e3)
self.assertEqual(cost.estimated_total, 845.0)
@pytest.mark.quantinuum
@pytest.mark.live_test
def test_job_submit_quantinuum(self):
self._test_job_submit_quantinuum("quantinuum.sim.h1-1e")
@pytest.mark.quantinuum
@pytest.mark.live_test
def test_job_submit_quantinuum_with_shots(self):
self._test_job_submit_quantinuum("quantinuum.sim.h1-1e", shots=100)
@pytest.mark.quantinuum
@pytest.mark.live_test
def test_job_submit_quantinuum_with_none_shots(self):
self._test_job_submit_quantinuum("quantinuum.sim.h1-1e", shots=None)
@pytest.mark.quantinuum
@pytest.mark.live_test
def test_job_submit_quantinuum_with_deprecated_num_shots(self):
with pytest.warns(
DeprecationWarning,
match="The 'num_shots' parameter will be deprecated. Please, use 'shots' parameter instead."
):
self._test_job_submit_quantinuum("quantinuum.sim.h1-1e", shots=100, shots_as_deprecated_num_shots=True)
@pytest.mark.quantinuum
@pytest.mark.live_test
def test_job_submit_quantinuum_with_shots_and_deprecated_num_shots(self):
workspace = self.create_workspace()
circuit = self._teleport()
target = workspace.get_targets("quantinuum.sim.h1-1e")
shots = 100
with pytest.warns(
DeprecationWarning,
match="Both 'shots' and 'num_shots' parameters were specified. Defaulting to 'shots' parameter. "
"Please, use 'shots' since 'num_shots' will be deprecated."
):
job = target.submit(
circuit,
shots=shots,
num_shots=10,
)
job.wait_until_completed(timeout_secs=DEFAULT_TIMEOUT_SECS)
assert job.details.input_params["count"] == shots
@pytest.mark.quantinuum
@pytest.mark.live_test
def test_job_submit_quantinuum_with_conflictin_shots_and_count_from_input_params(self):
workspace = self.create_workspace()
circuit = self._teleport()
target = workspace.get_targets("quantinuum.sim.h1-1e")
shots = 100
with pytest.warns(
match="Parameter 'shots' conflicts with the 'count' field of the 'input_params' parameter. "
"Please, provide only one option for setting shots. Defaulting to 'shots' parameter.",
):
job = target.submit(
circuit,
shots=shots,
input_params={"count": 20}
)
job.wait_until_completed(timeout_secs=DEFAULT_TIMEOUT_SECS)
assert job.details.input_params["count"] == shots
@pytest.mark.quantinuum
@pytest.mark.live_test
def test_job_submit_quantinuum_with_count_from_input_params(self):
workspace = self.create_workspace()
circuit = self._teleport()
target = workspace.get_targets("quantinuum.sim.h1-1e")
shots = 100
with pytest.warns(
match="Field 'count' from the 'input_params' parameter is subject to change in future versions. "
"Please, use 'shots' parameter instead."
):
job = target.submit(
circuit,
input_params={"count": shots}
)
job.wait_until_completed(timeout_secs=DEFAULT_TIMEOUT_SECS)
assert job.details.input_params["count"] == shots
@pytest.mark.quantinuum
@pytest.mark.live_test
def test_job_submit_quantinuum_h2_1e(self):
self._test_job_submit_quantinuum("quantinuum.sim.h2-1e")
@pytest.mark.quantinuum
@pytest.mark.live_test
def test_job_submit_quantinuum_h2_1sc(self):
self._test_job_submit_quantinuum("quantinuum.sim.h2-1sc")
@pytest.mark.quantinuum
@pytest.mark.skip("Target was unavailable at the moment of the recording")
def test_job_submit_quantinuum_h2_1qpu(self):
self._test_job_submit_quantinuum("quantinuum.qpu.h2-1")
def _test_job_submit_quantinuum(
self,
target_name,
shots: int = None,
shots_as_deprecated_num_shots: bool = False
):
workspace = self.create_workspace()
circuit = self._teleport()
target = workspace.get_targets(target_name)
additional_kwargs = {}
if shots:
if shots_as_deprecated_num_shots:
additional_kwargs["num_shots"] = shots
else:
additional_kwargs["shots"] = shots
job = target.submit(
circuit,
**additional_kwargs,
)
self.assertEqual(False, job.has_completed())
job.wait_until_completed(timeout_secs=DEFAULT_TIMEOUT_SECS)
self.assertEqual(True, job.has_completed())
self.assertEqual(job.details.status, JobStatus.SUCCEEDED)
job = workspace.get_job(job.id)
self.assertEqual(True, job.has_completed())
results = job.get_results(timeout_secs=DEFAULT_TIMEOUT_SECS)
self.assertAlmostEqual(int(results["c0"][0]), 1, delta=1)
self.assertAlmostEqual(int(results["c1"][0]), 1, delta=1)
self.assertAlmostEqual(int(results["c2"][0]), 1, delta=1)
|
azure-quantum-python/azure-quantum/tests/unit/test_quantinuum.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/tests/unit/test_quantinuum.py",
"repo_id": "azure-quantum-python",
"token_count": 3183
}
| 382 |
<jupyter_start><jupyter_text>Quantum algorithms for fast convolutions and hidden shiftsWelcome, this notebook explores the execution of _hidden shift problems_ on a quantum computer.This notebook does assume some quantum knowledge. We invite you to have a look into our 👋🌎 _Hello world_ samples in the notebook gallery and the [Quantum Katas](https://learn.microsoft.com/azure/quantum/tutorial-qdk-intro-to-katas) for introductory content on quantum computing.One way to think about the space of hidden shift problemsis that we are using the quantum computer to help us compute the convolution of two functions. It turns out that a quantum computer can in principle compute convolutions in a super-fast way, which in turn is based on the quantum computer's ability to compute Fourier transforms in a super-fast way. The way convolutions are used is to find unknown shifts ("offsets") between two functions. We call this the "hidden shift problem." Like Shor's algorithm for factoring, the hidden shift problem is a natural source of problems for which a quantum computer has an exponential advantage over the best known classical algorithms. This may eventually help in solving deconvolution problems and enable us to efficiently find patterns in complex data sets,for example finding the pattern on the right-hand side in the graph:You can have a look into [arXiv:0812.0380](https://arxiv.org/abs/0812.0380) for general background on hidden shifts and related problemsand into [arXiv:0811.3208](https://arxiv.org/abs/0811.3208) for the case of hidden shifts over Boolean functions.We will investigate the latter case, in which one is given two Boolean bent functions $f$ and $g$ with the promise that they satisfy the relation$$ g(x) = f(x \oplus s) $$for all $x$, where $s$ is a hidden bitstring, called _shift_ that we would like to find.A Boolean function is _bent_ if it is as far from linear as possible. Inparticular, bent functions have flat Fourier spectra, such that eachFourier coefficient is equal in absolute value.In this case, the quantum algorithm described in [arXiv:0811.3208](https://arxiv.org/abs/0811.3208) usesblack-box phase oracles $U_{f^\star}$ and $U_g$ for $f^\star$ and $g$, respectively, where $f^\star$ is the dual bent functionto $f$ (defined in more detail below), and computes the hidden shift $s$ between $f$ and $g$.The quantum circuit for this type of hidden shift problems is as follows:Black-box phase oracles compute the value of a Boolean function $f(x)$ into the phase, i.e., $U_f = \sum_x (-1)^{f(x)}|x\rangle\langle x|$.More information on phase oracles can be found in the [Quantum Katas](https://github.com/microsoft/QuantumKatas/tree/main/tutorials/Oracles).You can have a look into [arXiv:0812.0380](https://arxiv.org/abs/0812.0380) for general background on hidden shifts and related problemsand into [arXiv:0811.3208](https://arxiv.org/abs/0811.3208) for the case of hidden shifts 1. Connect to the Azure Quantum workspaceLet's start by connecting to the Azure Quantum service.We construct an instance of the `AzureQuantumProvider`. Note that it's imported from `azure.quantum.qiskit`.<jupyter_code>from azure.quantum import Workspace
from azure.quantum.qiskit import AzureQuantumProvider
workspace = Workspace(
resource_id = "",
location = "",
)
provider = AzureQuantumProvider(workspace)<jupyter_output><empty_output><jupyter_text>Next we are going to import some packages to describe and visualize quantum circuits from the `qiskit` package.We'll also import packages from _NumPy_ and _Matplotlib_ to create plots from the experimental data.Finally, we'll list the targets included in our workspace.<jupyter_code>from qiskit import QuantumCircuit, QuantumRegister
from qiskit.compiler import transpile
from qiskit.visualization import plot_histogram
import numpy as np
from numpy.linalg import norm
import matplotlib.pyplot as plt
print("This workspace's targets:")
for backend in provider.backends():
print("- " + backend.name())<jupyter_output>This workspace's targets:
- quantinuum.qpu.h1-1
- quantinuum.sim.h1-1sc
- quantinuum.sim.h1-1e
- ionq.qpu
- ionq.simulator<jupyter_text>We will be using some of these targets in this notebook, but whenever you see such a target's name, note that you can easily exchange them by other targets in this list. (Note, however, that some of these targets are API validators and do not simulate or execute quantum functionality.)You can learn more about the various targets in the 👋🌎 _Hello world_ samples. 2. Build a simple hidden shift problemLet's first create a function that creates hidden shift problems by passing to it phase oracle circuits for $U_g$ and $U_{f^\star}$.<jupyter_code>def hidden_shift(oracle_g, oracle_fstar):
"""Creates a hidden shift circuit based on phase oracle circuits for `oracle_g` and `oracle_fstar`."""
# Obtain the number of qubits from oracle_g
num_qubits = oracle_g.num_qubits
# Check that number of qubits are the same for both oracles
assert num_qubits == oracle_fstar.num_qubits, "Number of qubits for both oracles must be equal"
# A quantum register for all qubits we are using in this circuit.
qs = QuantumRegister(num_qubits, name="q")
# This constructs the quantum circuit
circ = QuantumCircuit(qs, name="Hidden shift")
# A layer of H gates
circ.h(qs)
# Apply $U_g$ (rename it for visualization)
oracle_g.name = "Ug"
circ.append(oracle_g, qs)
# Another layer of H gates
circ.h(qs)
# Apply $U_{f^\star}$
oracle_fstar.name = "Uf*"
circ.append(oracle_fstar, qs)
# And a final layer of H gates
circ.h(qs)
circ.measure_all()
return circ<jupyter_output><empty_output><jupyter_text>We have now a powerful routine to describe various hidden shift problems based on Boolean bent functions.The first bent function we will investigate is the inner product$$ f(x_1, \dots, x_{2k}) = (x_1 \land x_2) \oplus (x_3 \land x_4) \oplus \cdots \oplus (x_{2k-1} \land x_{2k}) $$An appealing property about this function is it's equal to its dual, i.e., $f^\star = f$.The following helper circuit implements this function as a phase oracle $U_f$ by using CZ gates for each AND pair in the inner product.Its effect to the computational basis state $|x\rangle$ is $(-1)^{f(x)}|x\rangle$.In the following Python code the argument `num_qubits` corresponds to $2k$.<jupyter_code>def inner_product(num_qubits):
"""Creates the inner product of two qubit registers, which are the alternating qubits over `num_qubits` bits"""
# Number of qubits must be even
assert num_qubits % 2 == 0, "Number of qubits must be even"
# Create a quantum circuit
circ = QuantumCircuit(num_qubits, name='inner_product')
# Apply CZ gates to pairs of even and odd qubit indices
for c, t in zip(circ.qubits[::2], circ.qubits[1::2]):
circ.cz(c, t)
return circ<jupyter_output><empty_output><jupyter_text>Next, we want to implement $g(x) = f(x \oplus s)$ for some shift $s$.For this purpose, we first construct a building block that computes the XOR operation with $s$ (passed as argument `shift`) in-place into a quantum register of length `num_bits`.More precisely, the action of this building block on the computational basis state $|x\rangle$ is $|x \oplus s\rangle$, where $\oplus$ is the bit-wise XOR operation.<jupyter_code>def xor(shift, num_bits):
"""Bitwise XOR of `num_bits`-bit value `shift`"""
# Shift must be smaller than 2^num_bits
assert shift >= 0 and shift < 2**num_bits, f"Shift must be between 0 and {2**num_bits - 1}"
# Create a quantum circuit with `num_bits` qubits
circ = QuantumCircuit(num_bits, name=f'xor({shift})')
# Returns all indexes which correspond to a `1` in the binary expansion of `shift`
ones = [idx for idx in range(num_bits) if (shift >> idx) & 1 == 1]
# Apply an X operation to all indexes in `ones` (this only works if `ones` is not empty)
if ones:
circ.x(ones)
return circ<jupyter_output><empty_output><jupyter_text>The next helper function takes as input a phase oracle $U_f$ and creates a phase oracle $U_g$ such that $g(x) = f(x \oplus s)$ by conjugating the phase oracle with the XOR operation we just defined.<jupyter_code>def shifted_phase_oracle(shift, phase_oracle):
"""Creates U_g from U_f (phase_oracle), where g(x) = f(x ⊕ s)"""
# Retrieve qubits from input phase oracle
qs = phase_oracle.qubits
# Create a quantum circuit using the qubits from the phase oracle
circ = QuantumCircuit(qs)
# Compute shift into the computational state using the XOR operation
circ.append(xor(shift, phase_oracle.num_qubits), qs)
# Apply the phase oracle onto the shifted state
circ.append(phase_oracle, qs)
# Uncompute the shift by applying the XOR operation another time
circ.append(xor(shift, phase_oracle.num_qubits), qs)
return circ<jupyter_output><empty_output><jupyter_text>We are now equipped to build a first simple hidden shift problem using the inner product as bent function.We configure the problem by its size `num_qubits`, which should be an even number, and represents the overall number of qubits $n$ in the circuit,and `shift` which is a non-negative number smaller than $2^n$.<jupyter_code>num_qubits = 4
shift = 5
# Create phase oracle circuit for $f^\star$
oracle_fstar = inner_product(num_qubits)
# Create phase oracle circuit for $g$
oracle_g = shifted_phase_oracle(shift, oracle_fstar)
# Create hidden shift circuit
circ = hidden_shift(oracle_g, oracle_fstar)
# Print out the circuit (and ensure that the circuit is not paginated)
circ.draw(fold=-1)<jupyter_output><empty_output><jupyter_text>3. Submit the quantum program to IonQ's and Quantinuum's simulatorsNext, we are executing this circuit on IonQ's full-state simulator `ionq.simulator` and on Quantinuum's simulator `quantinuum.sim.h1-1e`.Afterwards, we will write a loop in which we run several experiments using various shifts.Before sending the circuit to a backend, we must expand all building blocks.The easiest way to do this is by transpiling the quantum circuit into the target gate set of the backend's quantum computer.To this end, we construct a backend, and call the `transpile` function to obtainan expanded and possibly optimized quantum circuit (note that the `X` gates were merged with the `H` gates).Note that any target you have enabled in this workspace canbe used here. Azure Quantum makes it extremely easy to submitthe same quantum program to different providers.<jupyter_code>ionq_backend = provider.get_backend("ionq.simulator")
transpiled = transpile(circ, ionq_backend)
transpiled.draw()<jupyter_output><empty_output><jupyter_text>Now we are submitting the transpiled circuit to the selected backend for 100 shots.We'll get back a `job` instance and print its job ID.<jupyter_code># Using the IonQ simulator target, call "run" to submit the job. We'll
# use 100 shots (simulated runs).
job = ionq_backend.run(transpiled, shots=100)
print("Job id:", job.id())<jupyter_output>Job id: a9aceb1c-9ec4-11ec-93c4-acde48001122<jupyter_text>The job ID can be used to retrieve the results later using the [get_job method](https://learn.microsoft.com/python/azure-quantum/azure.quantum.workspace?azure-quantum-workspace-get-job) or by viewing it under the **Job management** section of the portal.This may take a minute or so ⏳. Your job will be packaged and sent to IonQ, where it will wait its turn to be run.Once the job has finished, we are retrieving the counts from the backend.With IonQ's full state simulator, we should see a 100% probability only for the selected shift.<jupyter_code>result = job.result()
plot_histogram(result.get_counts(transpiled), title="Result")<jupyter_output>Job Status: job has successfully run<jupyter_text>Now we'll create a backend for Quantinuum's noisy simulator. Before we running the circuit, we are going to estimate the cost of simulating the circuit (To learn more about job pricing, review [the Azure Quantum docs](https://learn.microsoft.com/azure/quantum/azure-quantum-job-costs).).<jupyter_code># This time we are creating a backend for Quantinuum's emulator, which also simulates noise.
quantinuum_backend = provider.get_backend("quantinuum.sim.h1-1e")
# We transpile the circuit into the backend's instruction set.
transpiled = transpile(circ, quantinuum_backend)
# And we estimate the cost of simulating the circuit 100 times.
cost = quantinuum_backend.estimate_cost(transpiled, shots=100)
print(f"Estimated cost: {cost.estimated_total} {cost.currency_code}")<jupyter_output>Estimated cost: 6.64 HQC<jupyter_text>The next code section is commented out to prevent accidental costs. Uncomment the code to run the code on the Quantinuum noisy simulator.<jupyter_code># job = quantinuum_backend.run(transpiled, shots=100)
# print("Job id:", job.id())
# result = job.result()
# plot_histogram(result.get_counts(transpiled), title="Result")<jupyter_output><empty_output><jupyter_text>4. Create a large experiment and a 3D histogram plotBased on what we have learned, we are now running a more involved.We want to explore the quantum circuit measurement results for multiple shifts, possibly all shifts.We are then plotting the histogram for each experiment in a 3D bar plot,where one axis shows all possible measurement outcomes, and the other axis lists all experiments for various shifts.We expect to see some skyscrapers reaching out whenever the shift corresponds to the measurement outcome.<jupyter_code># Number of qubits in the hidden shift experiment
num_qubits = 4
# List of shifts to perform (set to `range(1 << num_qubits)` to run all shifts)
shifts = range(1 << num_qubits)
# Store histogram data for all shifts in a matrix
histogram = np.zeros((len(shifts), 1 << num_qubits))
# Creates an experiment circuit based on shift
def experiment_circuit(shift):
oracle_f = inner_product(num_qubits)
oracle_g = shifted_phase_oracle(shift, oracle_f)
circ = hidden_shift(oracle_g, oracle_fstar)
transpiled = transpile(circ, ionq_backend)
return transpiled
# Submit jobs for each shift
jobs = [ionq_backend.run(experiment_circuit(shift), shots=100) for shift in shifts]
# After we submitted all the jobs, we wait for each of them.
# It does not matter whether jobs finishes in a different order than they were executed, since we
# traverse the jobs in the same order as they were executed.
for row, job in enumerate(jobs):
# Retrieve all counts from the job
counts = job.result().get_counts()
# Retrieve the counts for all input assignments
histogram[row] = [counts.get(format(idx, f"0{num_qubits}b")) or 0 for idx in range(1 << num_qubits)]
# Compute probabilities from counts
histogram[row] /= norm(histogram[row], ord=1)
# Set up the figure and axes to print the 3D-bar plot
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(1, 1, 1, projection='3d')
# Create x and y coordinates for the grid based on the dimensions of the matrix
x_grid, y_grid = np.meshgrid(np.arange(1 << num_qubits), np.arange(len(shifts)))
# Flatten matrices into lists
x, y = x_grid.ravel(), y_grid.ravel()
top = histogram.ravel()
bottom = np.zeros_like(top)
ax.bar3d(x, y, bottom, 1, 1, top, shade=True)
ax.set_title(f'3D-bar plot for hidden shift on {num_qubits} qubits')
plt.show()<jupyter_output><empty_output><jupyter_text>5. Investigating more complex bent functionsThe inner product bent function can be generalized to the family of Maiorana-McFarland bent functions$$ f(x, y) = x\pi(y)^t \oplus h(y) $$where $x = x_1, \dots, x_k$, $y = y_1, \dots, y_n$, and $\pi \in S_{2^k}$ is a permutation of all possible $2^k$ bitstrings of length $k$.The inner product $x\pi(y)^t$ is as defined above, and $h$ can be any $k$-input function.The dual function of a Maiorana-McFarland function is$$ f^\star(x, y) = \pi^{-1}(x)y^t \oplus h(\pi^{-1}(x)) $$where $\pi^{-1}$ is the inverse permutation to $\pi$.The next function can create phase oracles for some of these Maiorana-McFarland functions $f$ and their dual $f^\star$, in which $h = 0$ and $\pi \in S_{2^k}$ is defined via an index permutation $\sigma \in S_k$ such that$ \pi((b_{k-1}\dots b_0)_2) \mapsto (b_{\sigma(k-1)} \dots b_{\sigma(0)})_2 $.Such permutations can easily be implemented using SWAP gates on the input qubits and we are using the `Permutation` class to find these SWAP gates automatically.<jupyter_code>from qiskit.circuit.library.generalized_gates.permutation import Permutation
def maiorana_mcfarland_bent_function(num_qubits, index_permutation):
"""Creates Maiorana-McFarland bent function based on index permutation"""
assert num_qubits % 2 == 0, "Number of qubits must be even"
# Create permutation circuit based on index_permutation.
# Note that the permutation is only applied on half the qubits
perm = Permutation(num_qubits // 2, index_permutation)
# We are considering the even qubits in this circuits corresponding to x in the definition
# above, and the odd qubits corresponding to y in the definition above.
# Create phase oracle for f
circ_f = QuantumCircuit(num_qubits, name="maiorana_mcfarland")
# Apply permutation to qubits corresponding to x
circ_f.append(perm, range(0, num_qubits, 2))
# Apply inner product to x and y
circ_f.append(inner_product(num_qubits), range(num_qubits))
# Invert permutation on x
circ_f.append(perm.reverse_ops(), range(0, num_qubits, 2))
# Create phase oracle for f_star
circ_fstar = QuantumCircuit(num_qubits, name="maiorana_mcfarland")
# Apply inverse permutation to qubits corresponding to y
circ_fstar.append(perm.reverse_ops(), range(1, num_qubits, 2))
# Apply inner product to x and y
circ_fstar.append(inner_product(num_qubits), range(num_qubits))
# Invert permutation on y
circ_fstar.append(perm, range(1, num_qubits, 2))
# Return both circuits
return circ_f, circ_fstar<jupyter_output><empty_output><jupyter_text>Let's try an instance of this new family of bent functions. We are creating a hidden shift problem with 6 qubits and apply the index permutation $(1, 2, 0)$. Do you know which answer is hidden in the shift (👍), and also let's see if the quantum computer can discover it?<jupyter_code>num_qubits = 6
qubit_permutation = [1, 2, 0]
shift = 0b101010 # The answer ;)
# Create phase oracles for $f$ and $f^star$
oracle_f, oracle_fstar = maiorana_mcfarland_bent_function(num_qubits, qubit_permutation)
# Create phase oracle for $g = f(x ⊕ s)$
oracle_g = shifted_phase_oracle(shift, oracle_f)
# Create hidden shift problem based on oracles
circ = hidden_shift(oracle_g, oracle_fstar)
# Transpile hidden shift circuit for the backend
transpiled = transpile(circ, ionq_backend)
# Submit a job
job = ionq_backend.run(transpiled, shots=100)
# Print the job's result
print(job.result().get_counts())<jupyter_output>Job Status: job has successfully run
{'101010': 100}<jupyter_text>6. Investigating the impact of noise on the quantum algorithmIn this section we do a simple experiment that adds noise to the quantum simulation and see what happens when we crank upthe number of quantum gates in the circuit, in particular, if we increase the number of entangling gates such as CZ or CNOT significantly. We expect to see a less clear peak at the correct shift and want to see this in the histogram data coming back from emulating the circuit.<jupyter_code>def toffoli_oracles(num_qubits):
"""Create two oracles for the hidden shift problem that use Toffoli gates for a state space permutation."""
assert num_qubits >= 6, "Number of bits must be at least 6"
circ_f = QuantumCircuit(num_qubits, name='IP with Toffoli')
circ_f.ccx(4, 2, 0)
circ_f.ccx(0, 2, 4)
circ_f.append(inner_product(num_qubits), range(num_qubits))
circ_f.ccx(0, 2, 4)
circ_f.ccx(4, 2, 0)
circ_fstar = QuantumCircuit(num_qubits, name='IP with Toffoli')
circ_fstar.ccx(1, 3, 5)
circ_fstar.ccx(5, 3, 1)
circ_fstar.append(inner_product(num_qubits), range(num_qubits))
circ_fstar.ccx(5, 3, 1)
circ_fstar.ccx(1, 3, 5)
return circ_f, circ_fstar<jupyter_output><empty_output><jupyter_text>Let us next create a concrete instance of the problem with 6 qubits and a shift of 17. Then, let's draw the circuit.<jupyter_code>num_qubits = 6
shift = 17
# Create phase oracles for $f$ and $f^star$
oracle_f, oracle_fstar = toffoli_oracles(num_qubits)
# Create phase oracle for $g = f(x ⊕ s)$
oracle_g = shifted_phase_oracle(shift, oracle_f)
# Create hidden shift problem based on oracles
circ = hidden_shift(oracle_g, oracle_fstar)
# Print out the circuit (and ensure that the circuit is not paginated)
circ.draw(fold=-1)<jupyter_output><empty_output><jupyter_text>Next, we are sending this circuit to Quantinuum's emulator `quantinuum.sim.h1-1e`. We can use the `quantinuum_backend` instance that we created earlier, and call the `transpile` function to obtain an expanded and possibly optimized quantum circuit (note that the `X` gates were merged with the `H` gates). Before submitting the circuit to the backend, we print the estimated cost for execution.<jupyter_code>transpiled = transpile(circ, quantinuum_backend)
# And we estimate the cost of simulating the circuit 100 times
cost = quantinuum_backend.estimate_cost(transpiled, shots=100)
print(f"Estimated cost: {cost.estimated_total} {cost.currency_code}")<jupyter_output>Estimated cost: 8.96 HQC<jupyter_text>Now, we submit the transpiled circuit to the selected backend for 100 shots.The next code section is commented out to prevent accidental costs. Uncomment the code to run the code on the Quantinuum noisy simulator.<jupyter_code># # Using the simulator target, call "run" to submit the job.
# job = quantinuum_backend.run(transpiled, shots=100)
# # Get the job results (this method also waits for the Job to complete):
# result = job.result()
# # The histogram returned by the results can be sparse, so here we add any of the missing bitstring labels.
# counts = {format(n, "03b"): 0 for n in range(8)}
# counts.update(result.get_counts(circ))
# plot_histogram(counts)<jupyter_output><empty_output>
|
azure-quantum-python/samples/hidden-shift/hidden-shift.ipynb/0
|
{
"file_path": "azure-quantum-python/samples/hidden-shift/hidden-shift.ipynb",
"repo_id": "azure-quantum-python",
"token_count": 7056
}
| 383 |
#!/bin/bash
# Use flag buildJsLibOnly to only build js-lib
if [ "$1" == "buildJsLibOnly" ]; then
cd ../js-lib
npm link react quantum-visualization
if [ $? -eq 0 ]; then
echo 'Successfully linked react and quantum-visualization'
fi
npm run lint
npm run sortpackagejson || true
npm run build
if [ $? -eq 0 ]; then
echo 'Successfully built js-lib'
exit 0
fi
else
cd ../react-lib
npm install
if [ $? -eq 0 ]; then
echo 'Successfully installed react-lib'
fi
npm run sortpackagejson || true
npm run build
if [ $? -eq 0 ]; then
echo 'Successfully built react-lib'
fi
npm link
if [ $? -eq 0 ]; then
echo 'Successfully created link" react-lib'
fi
cd node_modules/react
npm link
if [ $? -eq 0 ]; then
echo 'Successfully created link: node_modules/react'
fi
cd ../../../js-lib
npm link react quantum-visualization
if [ $? -eq 0 ]; then
echo 'Successfully linked react and quantum-visualization to js-lib.'
fi
npm run sortpackagejson || true
npm run build
if [ $? -eq 0 ]; then
echo 'Successfully built js-lib'
fi
echo 'Successfully built js-lib and dependencies.'
echo 'js-lib located in dist/index.js.'
exit 0
fi
|
azure-quantum-python/visualization/build/build-all-local.sh/0
|
{
"file_path": "azure-quantum-python/visualization/build/build-all-local.sh",
"repo_id": "azure-quantum-python",
"token_count": 431
}
| 384 |
/*------------------------------------
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
All rights reserved.
------------------------------------ */
import React from "react";
import { create } from "react-test-renderer";
import { LegendData } from "../D3HelperFunctions";
import DonutChart, { DonutChartProps } from "../DonutChart";
describe("Donut chart tests", () => {
it("Verify Donut Chart", () => {
const testData: LegendData[] = [
{
legendTitle: "Logical qubits",
title: "Logical qubits",
value: 50,
},
{
legendTitle: "Physical qubits",
title: "Physical qubits",
value: 200,
},
];
const donutProps: DonutChartProps = {
data: testData,
width: 1000,
height: 1000,
innerRadius: 100,
outerRadius: 200,
};
const component = create(<DonutChart {...donutProps}></DonutChart>);
expect(component.toJSON()).toMatchSnapshot("DonutChart");
});
});
|
azure-quantum-python/visualization/react-lib/src/components/d3-visualization-components/__tests__/DonutChart.test.tsx/0
|
{
"file_path": "azure-quantum-python/visualization/react-lib/src/components/d3-visualization-components/__tests__/DonutChart.test.tsx",
"repo_id": "azure-quantum-python",
"token_count": 377
}
| 385 |
module.exports = {};
|
azure-quantum-python/visualization/react-lib/test-config/mocks/styleMock.js/0
|
{
"file_path": "azure-quantum-python/visualization/react-lib/test-config/mocks/styleMock.js",
"repo_id": "azure-quantum-python",
"token_count": 8
}
| 386 |
[[source]]
name = "pypi"
url = "https://pypi.org/simple"
verify_ssl = true
[dev-packages]
[packages]
regex = "*"
sphinx = "*"
sphinx-autodoc-typehints = "*"
sphinx-rtd-theme = "*"
sphinx-js = {git = "https://github.com/pyodide/sphinx-js", ref = "28105a224ec6c4c3141d10e182b9feaae7f10183"}
pydantic = "<2.0"
bistring = {editable = true, path = "./../python"}
|
bistring/docs/Pipfile/0
|
{
"file_path": "bistring/docs/Pipfile",
"repo_id": "bistring",
"token_count": 172
}
| 387 |
bistring
========
[](https://www.npmjs.com/package/bistring)
The bistring library provides non-destructive versions of common string processing operations like normalization, case folding, and find/replace.
Each bistring remembers the original string, and how its substrings map to substrings of the modified version.
For example:
```js
import BiString from "bistring";
let s = new BiString("𝕿𝖍𝖊 𝖖𝖚𝖎𝖈𝖐, 𝖇𝖗𝖔𝖜𝖓 🦊 𝖏𝖚𝖒𝖕𝖘 𝖔𝖛𝖊𝖗 𝖙𝖍𝖊 𝖑𝖆𝖟𝖞 🐶");
s = s.normalize("NFKD"); // Unicode normalization
s = s.toLowerCase(); // Case-insensitivity
s = s.replace("🦊", "fox"); // Replace emoji with text
s = s.replace("🐶", "dog");
s = s.replace(/[^\w\s]+/g, ""); // Strip everything but letters and spaces
s = s.substring(0, 19); // Extract a substring
console.log(s.modified); // The modified substring, after changes
// the quick brown fox
console.log(s.original); // The original substring, before changes
// 𝕿𝖍𝖊 𝖖𝖚𝖎𝖈𝖐, 𝖇𝖗𝖔𝖜𝖓 🦊
```
This allows you to perform very aggressive text processing completely invisibly.
Demo
----
[Click here](https://microsoft.github.io/bistring/demo.html) for a live demo of the bistring library in your browser.
|
bistring/js/README.md/0
|
{
"file_path": "bistring/js/README.md",
"repo_id": "bistring",
"token_count": 539
}
| 388 |
/*!
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT license.
*/
import BiString, { Token, Tokenization, RegExpTokenizer, SplittingTokenizer } from "..";
test("Tokenization", () => {
let text = new BiString(" The quick, brown fox jumps over the lazy dog ");
text = text.replace(",", "");
text = text.replace(/^ +| +$/g, "");
let tokens = new Tokenization(text, [
Token.slice(text, 0, 3),
Token.slice(text, 4, 9),
Token.slice(text, 10, 15),
Token.slice(text, 16, 19),
Token.slice(text, 20, 25),
Token.slice(text, 26, 30),
Token.slice(text, 31, 34),
Token.slice(text, 35, 39),
Token.slice(text, 40, 43),
]);
expect(tokens.text.equals(text)).toBe(true);
expect(tokens.textBounds(1, 3)).toEqual([4, 15]);
expect(tokens.originalBounds(1, 3)).toEqual([6, 18]);
expect(tokens.boundsForText(0, 13)).toEqual([0, 3]);
expect(tokens.boundsForOriginal(0, 13)).toEqual([0, 2]);
expect(tokens.sliceByText(34, 43).substring().equals(new BiString("lazy dog"))).toBe(true);
expect(tokens.sliceByOriginal(36, 48).substring().equals(new BiString("the lazy dog"))).toBe(true);
expect(tokens.snapTextBounds(2, 13)).toEqual([0, 15]);
expect(tokens.snapOriginalBounds(36, 47)).toEqual([34, 46]);
});
test("Tokenization.infer", () => {
const text = "the quick, brown fox"
const tokens = Tokenization.infer(text, ["the", "quick", "brown", "fox"]);
expect(tokens.substring(1, 3).equals(new BiString("quick, brown")));
expect(() => Tokenization.infer(text, ["the", "quick", "red", "fox"])).toThrow();
});
test("RegExpTokenizer", () => {
const text = new BiString(" The quick, brown fox jumps over the lazy dog ");
const tokenizer = new RegExpTokenizer(/\w+/g);
const tokens = tokenizer.tokenize(text);
expect(tokens.text).toBe(text);
expect(tokens.length).toBe(9);
expect(tokens.textBounds(0, 2)).toEqual([1, 10]);
expect(tokens.slice(0, 2).substring().equals(text.slice(1, 10))).toBe(true);
expect(tokens.sliceByText(5, 10).length).toBe(1);
expect(tokens.sliceByText(5, 11).length).toBe(1);
expect(tokens.sliceByText(3, 13).length).toBe(3);
});
test("SplittingTokenizer", () => {
const text = new BiString(" The quick, brown fox jumps over the lazy dog ");
const tokenizer = new SplittingTokenizer(/\s+/g);
const tokens = tokenizer.tokenize(text);
expect(tokens.text).toBe(text);
expect(tokens.length).toBe(9);
expect(tokens.textBounds(0, 2)).toEqual([1, 11]);
expect(tokens.slice(0, 2).substring().equals(text.slice(1, 11))).toBe(true);
expect(tokens.sliceByText(5, 10).length).toBe(1);
expect(tokens.sliceByText(5, 11).length).toBe(1);
expect(tokens.sliceByText(3, 13).length).toBe(3);
});
|
bistring/js/tests/token.test.ts/0
|
{
"file_path": "bistring/js/tests/token.test.ts",
"repo_id": "bistring",
"token_count": 1170
}
| 389 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import sys
import traceback
from datetime import datetime
from botbuilder.core import (
ConversationState,
TurnContext,
)
from botbuilder.integration.aiohttp import CloudAdapter, ConfigurationBotFrameworkAuthentication
from botbuilder.schema import ActivityTypes, Activity
class AdapterWithErrorHandler(CloudAdapter):
def __init__(
self,
settings: ConfigurationBotFrameworkAuthentication,
conversation_state: ConversationState,
):
super().__init__(settings)
self._conversation_state = conversation_state
# Catch-all for errors.
async def on_error(context: TurnContext, error: Exception):
# This check writes out errors to console log
# NOTE: In production environment, you should consider logging this to Azure
# application insights.
print(f"\n [on_turn_error] unhandled error: {error}", file=sys.stderr)
traceback.print_exc()
# Send a message to the user
await context.send_activity("The bot encountered an error or bug.")
await context.send_activity(
"To continue to run this bot, please fix the bot source code."
)
# Send a trace activity if we're talking to the Bot Framework Emulator
if context.activity.channel_id == "emulator":
# Create a trace activity that contains the error object
trace_activity = Activity(
label="TurnError",
name="on_turn_error Trace",
timestamp=datetime.utcnow(),
type=ActivityTypes.trace,
value=f"{error}",
value_type="https://www.botframework.com/schemas/error",
)
# Send a trace activity, which will be displayed in Bot Framework Emulator
await context.send_activity(trace_activity)
# Clear out state
nonlocal self
await self._conversation_state.delete(context)
self.on_turn_error = on_error
|
botbuilder-python/generators/app/templates/core/{{cookiecutter.bot_name}}/adapter_with_error_handler.py/0
|
{
"file_path": "botbuilder-python/generators/app/templates/core/{{cookiecutter.bot_name}}/adapter_with_error_handler.py",
"repo_id": "botbuilder-python",
"token_count": 898
}
| 390 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
from setuptools import setup
REQUIRES = [
"botbuilder-schema==4.16.0",
"botframework-connector==4.16.0",
"botbuilder-core==4.16.0",
"pyslack",
"slackclient",
]
TEST_REQUIRES = ["aiounittest==1.3.0"]
root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(root, "botbuilder", "adapters", "slack", "about.py")) as f:
package_info = {}
info = f.read()
exec(info, package_info)
with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name=package_info["__title__"],
version=package_info["__version__"],
url=package_info["__uri__"],
author=package_info["__author__"],
description=package_info["__description__"],
keywords=["BotBuilderAdapters", "bots", "ai", "botframework", "botbuilder"],
long_description=long_description,
long_description_content_type="text/x-rst",
license=package_info["__license__"],
packages=["botbuilder.adapters.slack"],
install_requires=REQUIRES + TEST_REQUIRES,
tests_require=TEST_REQUIRES,
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3.7",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
|
botbuilder-python/libraries/botbuilder-adapters-slack/setup.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-adapters-slack/setup.py",
"repo_id": "botbuilder-python",
"token_count": 592
}
| 391 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from enum import Enum
class LuisTelemetryConstants(str, Enum):
"""
The IBotTelemetryClient event and property names that logged by default.
"""
luis_result = "LuisResult"
"""Event name"""
application_id_property = "applicationId"
intent_property = "intent"
intent_score_property = "intentScore"
intent2_property = "intent2"
intent_score2_property = "intentScore2"
entities_property = "entities"
question_property = "question"
activity_id_property = "activityId"
sentiment_label_property = "sentimentLabel"
sentiment_score_property = "sentimentScore"
from_id_property = "fromId"
|
botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/luis/luis_telemetry_constants.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/luis/luis_telemetry_constants.py",
"repo_id": "botbuilder-python",
"token_count": 243
}
| 392 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from msrest.serialization import Model
class QueryResult(Model):
"""Represents an individual result from a knowledge base query."""
_attribute_map = {
"questions": {"key": "questions", "type": "[str]"},
"answer": {"key": "answer", "type": "str"},
"score": {"key": "score", "type": "float"},
"metadata": {"key": "metadata", "type": "[Metadata]"},
"source": {"key": "source", "type": "str"},
"id": {"key": "id", "type": "int"},
"context": {"key": "context", "type": "QnAResponseContext"},
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.questions = kwargs.get("questions", None)
self.answer = kwargs.get("answer", None)
self.score = kwargs.get("score", None)
self.metadata = kwargs.get("metadata", None)
self.source = kwargs.get("source", None)
self.context = kwargs.get("context", None)
self.id = kwargs.get("id", None) # pylint: disable=invalid-name
|
botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/qna/models/query_result.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/qna/models/query_result.py",
"repo_id": "botbuilder-python",
"token_count": 443
}
| 393 |
{
"text": "buy hul and 2 items",
"intents": {
"Cancel": {
"score": 0.006906527
},
"Delivery": {
"score": 0.00567273
},
"EntityTests": {
"score": 0.128755629
},
"Greeting": {
"score": 0.00450348156
},
"Help": {
"score": 0.00583425
},
"None": {
"score": 0.0135525977
},
"Roles": {
"score": 0.04635598
},
"search": {
"score": 0.008885799
},
"SpecifyName": {
"score": 0.00721160974
},
"Travel": {
"score": 0.005146626
},
"Weather_GetForecast": {
"score": 0.00913477
}
},
"entities": {
"$instance": {
"number": [
{
"endIndex": 7,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"externalEntities"
],
"startIndex": 4,
"text": "hul",
"type": "builtin.number"
},
{
"endIndex": 13,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 12,
"text": "2",
"type": "builtin.number"
}
]
},
"number": [
8,
2
]
},
"sentiment": {
"label": "positive",
"score": 0.7149857
},
"v3": {
"response": {
"prediction": {
"entities": {
"$instance": {
"number": [
{
"length": 3,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"externalEntities"
],
"startIndex": 4,
"text": "hul",
"type": "builtin.number"
},
{
"length": 1,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 12,
"text": "2",
"type": "builtin.number"
}
]
},
"number": [
8,
2
]
},
"intents": {
"Cancel": {
"score": 0.006906527
},
"Delivery": {
"score": 0.00567273
},
"EntityTests": {
"score": 0.128755629
},
"Greeting": {
"score": 0.00450348156
},
"Help": {
"score": 0.00583425
},
"None": {
"score": 0.0135525977
},
"Roles": {
"score": 0.04635598
},
"search": {
"score": 0.008885799
},
"SpecifyName": {
"score": 0.00721160974
},
"Travel": {
"score": 0.005146626
},
"Weather.GetForecast": {
"score": 0.00913477
}
},
"normalizedQuery": "buy hul and 2 items",
"sentiment": {
"label": "positive",
"score": 0.7149857
},
"topIntent": "EntityTests"
},
"query": "buy hul and 2 items"
},
"options": {
"externalEntities": [
{
"entityLength": 3,
"entityName": "number",
"resolution": 8,
"startIndex": 4
}
],
"includeAllIntents": true,
"includeAPIResults": true,
"includeInstanceData": true,
"log": true,
"preferExternalEntities": true,
"slot": "production"
}
}
}
|
botbuilder-python/libraries/botbuilder-ai/tests/luis/test_data/ExternalEntitiesAndBuiltIn_v3.json/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-ai/tests/luis/test_data/ExternalEntitiesAndBuiltIn_v3.json",
"repo_id": "botbuilder-python",
"token_count": 2218
}
| 394 |
{
"entities": {
"$instance": {
"extra": [
{
"endIndex": 76,
"modelType": "Pattern.Any Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 71,
"text": "kb435",
"type": "subject"
}
],
"Part": [
{
"endIndex": 76,
"modelType": "Regex Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 71,
"text": "kb435",
"type": "Part"
}
],
"person": [
{
"endIndex": 61,
"modelType": "Pattern.Any Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 49,
"text": "bart simpson",
"type": "person"
}
],
"personName": [
{
"endIndex": 61,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 49,
"text": "bart simpson",
"type": "builtin.personName"
}
],
"subject": [
{
"endIndex": 43,
"modelType": "Pattern.Any Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 12,
"text": "something wicked this way comes",
"type": "subject"
}
]
},
"extra": [
"kb435"
],
"Part": [
"kb435"
],
"person": [
"bart simpson"
],
"personName": [
"bart simpson"
],
"subject": [
"something wicked this way comes"
]
},
"intents": {
"Cancel": {
"score": 1.01764708E-09
},
"Delivery": {
"score": 1.8E-09
},
"EntityTests": {
"score": 1.044335E-05
},
"Greeting": {
"score": 1.0875E-09
},
"Help": {
"score": 1.01764708E-09
},
"None": {
"score": 2.38094663E-06
},
"Roles": {
"score": 5.98274755E-06
},
"search": {
"score": 0.9999993
},
"SpecifyName": {
"score": 3.0666667E-09
},
"Travel": {
"score": 3.09763345E-06
},
"Weather_GetForecast": {
"score": 1.02792524E-06
}
},
"sentiment": {
"label": "negative",
"score": 0.210341513
},
"text": "email about something wicked this way comes from bart simpson and also kb435",
"v3": {
"options": {
"includeAllIntents": true,
"includeAPIResults": true,
"includeInstanceData": true,
"log": true,
"preferExternalEntities": true,
"slot": "production"
},
"response": {
"prediction": {
"entities": {
"$instance": {
"extra": [
{
"length": 5,
"modelType": "Pattern.Any Entity Extractor",
"modelTypeId": 7,
"recognitionSources": [
"model"
],
"role": "extra",
"startIndex": 71,
"text": "kb435",
"type": "subject"
}
],
"Part": [
{
"length": 5,
"modelType": "Regex Entity Extractor",
"modelTypeId": 8,
"recognitionSources": [
"model"
],
"startIndex": 71,
"text": "kb435",
"type": "Part"
}
],
"person": [
{
"length": 12,
"modelType": "Pattern.Any Entity Extractor",
"modelTypeId": 7,
"recognitionSources": [
"model"
],
"startIndex": 49,
"text": "bart simpson",
"type": "person"
}
],
"personName": [
{
"length": 12,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 49,
"text": "bart simpson",
"type": "builtin.personName"
}
],
"subject": [
{
"length": 31,
"modelType": "Pattern.Any Entity Extractor",
"modelTypeId": 7,
"recognitionSources": [
"model"
],
"startIndex": 12,
"text": "something wicked this way comes",
"type": "subject"
}
]
},
"extra": [
"kb435"
],
"Part": [
"kb435"
],
"person": [
"bart simpson"
],
"personName": [
"bart simpson"
],
"subject": [
"something wicked this way comes"
]
},
"intents": {
"Cancel": {
"score": 1.01764708E-09
},
"Delivery": {
"score": 1.8E-09
},
"EntityTests": {
"score": 1.044335E-05
},
"Greeting": {
"score": 1.0875E-09
},
"Help": {
"score": 1.01764708E-09
},
"None": {
"score": 2.38094663E-06
},
"Roles": {
"score": 5.98274755E-06
},
"search": {
"score": 0.9999993
},
"SpecifyName": {
"score": 3.0666667E-09
},
"Travel": {
"score": 3.09763345E-06
},
"Weather.GetForecast": {
"score": 1.02792524E-06
}
},
"normalizedQuery": "email about something wicked this way comes from bart simpson and also kb435",
"sentiment": {
"label": "negative",
"score": 0.210341513
},
"topIntent": "search"
},
"query": "email about something wicked this way comes from bart simpson and also kb435"
}
}
}
|
botbuilder-python/libraries/botbuilder-ai/tests/luis/test_data/Patterns_v3.json/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-ai/tests/luis/test_data/Patterns_v3.json",
"repo_id": "botbuilder-python",
"token_count": 4294
}
| 395 |
{
"answers": [
{
"questions": [
"Bald Eagle"
],
"answer": "Apparently these guys aren't actually bald!",
"score": 100.0,
"id": 38,
"source": "Editorial",
"isDocumentText": false,
"metadata": [],
"context": {
"isContextOnly": true,
"prompts": []
}
}
],
"activeLearningEnabled": true
}
|
botbuilder-python/libraries/botbuilder-ai/tests/qna/test_data/QnAMakerDialog_MultiTurn_Answer2.json/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-ai/tests/qna/test_data/QnAMakerDialog_MultiTurn_Answer2.json",
"repo_id": "botbuilder-python",
"token_count": 286
}
| 396 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Application Insights Telemetry Processor for Bots."""
from typing import List
from .django.django_telemetry_processor import DjangoTelemetryProcessor
from .flask.flask_telemetry_processor import FlaskTelemetryProcessor
from .processor.telemetry_processor import TelemetryProcessor
class BotTelemetryProcessor(TelemetryProcessor):
"""Application Insights Telemetry Processor for Bot"""
def __init__(self, processors: List[TelemetryProcessor] = None):
self._processors: List[TelemetryProcessor] = (
[
DjangoTelemetryProcessor(),
FlaskTelemetryProcessor(),
]
if processors is None
else processors
)
def can_process(self) -> bool:
for processor in self._processors:
if processor.can_process():
return True
return False
def get_request_body(self) -> str:
for inner in self._processors:
if inner.can_process():
return inner.get_request_body()
return super().get_request_body()
|
botbuilder-python/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/bot_telemetry_processor.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/bot_telemetry_processor.py",
"repo_id": "botbuilder-python",
"token_count": 443
}
| 397 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from unittest.mock import create_autospec, MagicMock
from typing import Dict
import aiounittest
from botbuilder.core.adapters import TestAdapter, TestFlow
from botbuilder.schema import Activity
from botbuilder.core import (
ConversationState,
MemoryStorage,
TurnContext,
NullTelemetryClient,
)
from botbuilder.dialogs import (
Dialog,
DialogInstance,
DialogReason,
DialogSet,
WaterfallDialog,
DialogTurnResult,
DialogTurnStatus,
)
BEGIN_MESSAGE = Activity()
BEGIN_MESSAGE.text = "begin"
BEGIN_MESSAGE.type = "message"
MOCK_TELEMETRY = "botbuilder.applicationinsights.ApplicationInsightsTelemetryClient"
class TelemetryWaterfallTests(aiounittest.AsyncTestCase):
def test_none_telemetry_client(self):
# arrange
dialog = WaterfallDialog("myId")
# act
dialog.telemetry_client = None
# assert
self.assertEqual(type(dialog.telemetry_client), NullTelemetryClient)
async def test_execute_sequence_waterfall_steps(self):
# arrange
# Create new ConversationState with MemoryStorage and register the state as middleware.
convo_state = ConversationState(MemoryStorage())
telemetry = MagicMock(name=MOCK_TELEMETRY)
# Create a DialogState property, DialogSet and register the WaterfallDialog.
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
async def step1(step) -> DialogTurnResult:
await step.context.send_activity("bot responding.")
return Dialog.end_of_turn
async def step2(step) -> DialogTurnResult:
await step.context.send_activity("ending WaterfallDialog.")
return Dialog.end_of_turn
# act
my_dialog = WaterfallDialog("test", [step1, step2])
my_dialog.telemetry_client = telemetry
dialogs.add(my_dialog)
# Initialize TestAdapter
async def exec_test(turn_context: TurnContext) -> None:
dialog_context = await dialogs.create_context(turn_context)
results = await dialog_context.continue_dialog()
if results.status == DialogTurnStatus.Empty:
await dialog_context.begin_dialog("test")
else:
if results.status == DialogTurnStatus.Complete:
await turn_context.send_activity(results.result)
await convo_state.save_changes(turn_context)
adapt = TestAdapter(exec_test)
test_flow = TestFlow(None, adapt)
tf2 = await test_flow.send(BEGIN_MESSAGE)
tf3 = await tf2.assert_reply("bot responding.")
tf4 = await tf3.send("continue")
await tf4.assert_reply("ending WaterfallDialog.")
# assert
telemetry_calls = [
("WaterfallStart", {"DialogId": "test"}),
("WaterfallStep", {"DialogId": "test", "StepName": step1.__qualname__}),
("WaterfallStep", {"DialogId": "test", "StepName": step2.__qualname__}),
]
self.assert_telemetry_calls(telemetry, telemetry_calls)
async def test_ensure_end_dialog_called(self):
# arrange
# Create new ConversationState with MemoryStorage and register the state as middleware.
convo_state = ConversationState(MemoryStorage())
telemetry = MagicMock(name=MOCK_TELEMETRY)
# Create a DialogState property, DialogSet and register the WaterfallDialog.
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
async def step1(step) -> DialogTurnResult:
await step.context.send_activity("step1 response")
return Dialog.end_of_turn
async def step2(step) -> DialogTurnResult:
await step.context.send_activity("step2 response")
return Dialog.end_of_turn
# act
my_dialog = WaterfallDialog("test", [step1, step2])
my_dialog.telemetry_client = telemetry
dialogs.add(my_dialog)
# Initialize TestAdapter
async def exec_test(turn_context: TurnContext) -> None:
dialog_context = await dialogs.create_context(turn_context)
await dialog_context.continue_dialog()
if not turn_context.responded:
await dialog_context.begin_dialog("test", None)
await convo_state.save_changes(turn_context)
adapt = TestAdapter(exec_test)
test_flow = TestFlow(None, adapt)
tf2 = await test_flow.send(BEGIN_MESSAGE)
tf3 = await tf2.assert_reply("step1 response")
tf4 = await tf3.send("continue")
tf5 = await tf4.assert_reply("step2 response")
await tf5.send(
"Should hit end of steps - this will restart the dialog and trigger COMPLETE event"
)
# assert
telemetry_calls = [
("WaterfallStart", {"DialogId": "test"}),
("WaterfallStep", {"DialogId": "test", "StepName": step1.__qualname__}),
("WaterfallStep", {"DialogId": "test", "StepName": step2.__qualname__}),
("WaterfallComplete", {"DialogId": "test"}),
("WaterfallStart", {"DialogId": "test"}),
("WaterfallStep", {"DialogId": "test", "StepName": step1.__qualname__}),
]
self.assert_telemetry_calls(telemetry, telemetry_calls)
async def test_cancelling_waterfall_telemetry(self):
# Arrange
dialog_id = "waterfall"
index = 0
guid = "(guid)"
async def my_waterfall_step(step) -> DialogTurnResult:
await step.context.send_activity("step1 response")
return Dialog.end_of_turn
dialog = WaterfallDialog(dialog_id, [my_waterfall_step])
telemetry_client = create_autospec(NullTelemetryClient)
dialog.telemetry_client = telemetry_client
dialog_instance = DialogInstance()
dialog_instance.id = dialog_id
dialog_instance.state = {"instanceId": guid, "stepIndex": index}
# Act
await dialog.end_dialog(
TurnContext(TestAdapter(), Activity()),
dialog_instance,
DialogReason.CancelCalled,
)
# Assert
telemetry_props = telemetry_client.track_event.call_args_list[0][0][1]
self.assertEqual(3, len(telemetry_props))
self.assertEqual(dialog_id, telemetry_props["DialogId"])
self.assertEqual(my_waterfall_step.__qualname__, telemetry_props["StepName"])
self.assertEqual(guid, telemetry_props["InstanceId"])
telemetry_client.track_event.assert_called_once()
def assert_telemetry_call(
self, telemetry_mock, index: int, event_name: str, props: Dict[str, str]
) -> None:
# pylint: disable=unused-variable
args, kwargs = telemetry_mock.track_event.call_args_list[index]
self.assertEqual(args[0], event_name)
for key, val in props.items():
self.assertTrue(
key in args[1],
msg=f"Could not find value {key} in {args[1]} for index {index}",
)
self.assertTrue(isinstance(args[1], dict))
self.assertTrue(val == args[1][key])
def assert_telemetry_calls(self, telemetry_mock, calls) -> None:
index = 0
for event_name, props in calls:
self.assert_telemetry_call(telemetry_mock, index, event_name, props)
index += 1
if index != len(telemetry_mock.track_event.call_args_list):
self.assertTrue( # pylint: disable=redundant-unittest-assert
False,
f"Found {len(telemetry_mock.track_event.call_args_list)} calls, testing for {index + 1}",
)
|
botbuilder-python/libraries/botbuilder-applicationinsights/tests/test_telemetry_waterfall.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-applicationinsights/tests/test_telemetry_waterfall.py",
"repo_id": "botbuilder-python",
"token_count": 3330
}
| 398 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import List
from botbuilder.schema import (
Activity,
AttachmentData,
ChannelAccount,
ConversationParameters,
ConversationsResult,
ConversationResourceResponse,
PagedMembersResult,
ResourceResponse,
Transcript,
)
from botframework.connector.auth import (
AuthenticationConfiguration,
ChannelProvider,
ClaimsIdentity,
CredentialProvider,
JwtTokenValidation,
SkillValidation,
)
class BotActionNotImplementedError(Exception):
"""Raised when an action is not implemented"""
class ChannelServiceHandler:
"""
Initializes a new instance of the <see cref="ChannelServiceHandler"/> class,
using a credential provider.
"""
def __init__(
self,
credential_provider: CredentialProvider,
auth_config: AuthenticationConfiguration,
channel_provider: ChannelProvider = None,
):
if not credential_provider:
raise TypeError("credential_provider can't be None")
if not auth_config:
raise TypeError("auth_config can't be None")
self._credential_provider = credential_provider
self._auth_config = auth_config
self._channel_provider = channel_provider
async def handle_send_to_conversation(
self, auth_header, conversation_id, activity
) -> ResourceResponse:
claims_identity = await self._authenticate(auth_header)
return await self.on_send_to_conversation(
claims_identity, conversation_id, activity
)
async def handle_reply_to_activity(
self, auth_header, conversation_id, activity_id, activity
) -> ResourceResponse:
claims_identity = await self._authenticate(auth_header)
return await self.on_reply_to_activity(
claims_identity, conversation_id, activity_id, activity
)
async def handle_update_activity(
self, auth_header, conversation_id, activity_id, activity
) -> ResourceResponse:
claims_identity = await self._authenticate(auth_header)
return await self.on_update_activity(
claims_identity, conversation_id, activity_id, activity
)
async def handle_delete_activity(self, auth_header, conversation_id, activity_id):
claims_identity = await self._authenticate(auth_header)
await self.on_delete_activity(claims_identity, conversation_id, activity_id)
async def handle_get_activity_members(
self, auth_header, conversation_id, activity_id
) -> List[ChannelAccount]:
claims_identity = await self._authenticate(auth_header)
return await self.on_get_activity_members(
claims_identity, conversation_id, activity_id
)
async def handle_create_conversation(
self, auth_header, parameters: ConversationParameters
) -> ConversationResourceResponse:
claims_identity = await self._authenticate(auth_header)
return await self.on_create_conversation(claims_identity, parameters)
async def handle_get_conversations(
self, auth_header, continuation_token: str = ""
) -> ConversationsResult:
claims_identity = await self._authenticate(auth_header)
return await self.on_get_conversations(claims_identity, continuation_token)
async def handle_get_conversation_members(
self, auth_header, conversation_id
) -> List[ChannelAccount]:
claims_identity = await self._authenticate(auth_header)
return await self.on_get_conversation_members(claims_identity, conversation_id)
async def handle_get_conversation_member(
self, auth_header, conversation_id, member_id
) -> ChannelAccount:
claims_identity = await self._authenticate(auth_header)
return await self.on_get_conversation_member(
claims_identity, conversation_id, member_id
)
async def handle_get_conversation_paged_members(
self,
auth_header,
conversation_id,
page_size: int = 0,
continuation_token: str = "",
) -> PagedMembersResult:
claims_identity = await self._authenticate(auth_header)
return await self.on_get_conversation_paged_members(
claims_identity, conversation_id, page_size, continuation_token
)
async def handle_delete_conversation_member(
self, auth_header, conversation_id, member_id
):
claims_identity = await self._authenticate(auth_header)
await self.on_delete_conversation_member(
claims_identity, conversation_id, member_id
)
async def handle_send_conversation_history(
self, auth_header, conversation_id, transcript: Transcript
) -> ResourceResponse:
claims_identity = await self._authenticate(auth_header)
return await self.on_send_conversation_history(
claims_identity, conversation_id, transcript
)
async def handle_upload_attachment(
self, auth_header, conversation_id, attachment_upload: AttachmentData
) -> ResourceResponse:
claims_identity = await self._authenticate(auth_header)
return await self.on_upload_attachment(
claims_identity, conversation_id, attachment_upload
)
async def on_get_conversations(
self,
claims_identity: ClaimsIdentity,
continuation_token: str = "",
) -> ConversationsResult:
"""
get_conversations() API for Skill
List the Conversations in which this bot has participated.
GET from this method with a skip token
The return value is a ConversationsResult, which contains an array of
ConversationMembers and a skip token. If the skip token is not empty, then
there are further values to be returned. Call this method again with the
returned token to get more values.
Each ConversationMembers object contains the ID of the conversation and an
array of ChannelAccounts that describe the members of the conversation.
:param claims_identity:
:param conversation_id:
:param continuation_token:
:return:
"""
raise BotActionNotImplementedError()
async def on_create_conversation(
self,
claims_identity: ClaimsIdentity,
parameters: ConversationParameters,
) -> ConversationResourceResponse:
"""
create_conversation() API for Skill
Create a new Conversation.
POST to this method with a
* Bot being the bot creating the conversation
* IsGroup set to true if this is not a direct message (default is false)
* Array containing the members to include in the conversation
The return value is a ResourceResponse which contains a conversation id
which is suitable for use
in the message payload and REST API uris.
Most channels only support the semantics of bots initiating a direct
message conversation. An example of how to do that would be:
var resource = await connector.conversations.CreateConversation(new
ConversationParameters(){ Bot = bot, members = new ChannelAccount[] { new
ChannelAccount("user1") } );
await connect.Conversations.SendToConversationAsync(resource.Id, new
Activity() ... ) ;
end.
:param claims_identity:
:param parameters:
:return:
"""
raise BotActionNotImplementedError()
async def on_send_to_conversation(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
activity: Activity,
) -> ResourceResponse:
"""
send_to_conversation() API for Skill
This method allows you to send an activity to the end of a conversation.
This is slightly different from ReplyToActivity().
* SendToConversation(conversationId) - will append the activity to the end
of the conversation according to the timestamp or semantics of the channel.
* ReplyToActivity(conversationId,ActivityId) - adds the activity as a reply
to another activity, if the channel supports it. If the channel does not
support nested replies, ReplyToActivity falls back to SendToConversation.
Use ReplyToActivity when replying to a specific activity in the
conversation.
Use SendToConversation in all other cases.
:param claims_identity:
:param conversation_id:
:param activity:
:return:
"""
raise BotActionNotImplementedError()
async def on_send_conversation_history(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
transcript: Transcript,
) -> ResourceResponse:
"""
send_conversation_history() API for Skill.
This method allows you to upload the historic activities to the
conversation.
Sender must ensure that the historic activities have unique ids and
appropriate timestamps. The ids are used by the client to deal with
duplicate activities and the timestamps are used by the client to render
the activities in the right order.
:param claims_identity:
:param conversation_id:
:param transcript:
:return:
"""
raise BotActionNotImplementedError()
async def on_update_activity(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
activity_id: str,
activity: Activity,
) -> ResourceResponse:
"""
update_activity() API for Skill.
Edit an existing activity.
Some channels allow you to edit an existing activity to reflect the new
state of a bot conversation.
For example, you can remove buttons after someone has clicked "Approve"
button.
:param claims_identity:
:param conversation_id:
:param activity_id:
:param activity:
:return:
"""
raise BotActionNotImplementedError()
async def on_reply_to_activity(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
activity_id: str,
activity: Activity,
) -> ResourceResponse:
"""
reply_to_activity() API for Skill.
This method allows you to reply to an activity.
This is slightly different from SendToConversation().
* SendToConversation(conversationId) - will append the activity to the end
of the conversation according to the timestamp or semantics of the channel.
* ReplyToActivity(conversationId,ActivityId) - adds the activity as a reply
to another activity, if the channel supports it. If the channel does not
support nested replies, ReplyToActivity falls back to SendToConversation.
Use ReplyToActivity when replying to a specific activity in the
conversation.
Use SendToConversation in all other cases.
:param claims_identity:
:param conversation_id:
:param activity_id:
:param activity:
:return:
"""
raise BotActionNotImplementedError()
async def on_delete_activity(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
activity_id: str,
):
"""
delete_activity() API for Skill.
Delete an existing activity.
Some channels allow you to delete an existing activity, and if successful
this method will remove the specified activity.
:param claims_identity:
:param conversation_id:
:param activity_id:
:return:
"""
raise BotActionNotImplementedError()
async def on_get_conversation_members(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
) -> List[ChannelAccount]:
"""
get_conversation_members() API for Skill.
Enumerate the members of a conversation.
This REST API takes a ConversationId and returns a list of ChannelAccount
objects representing the members of the conversation.
:param claims_identity:
:param conversation_id:
:return:
"""
raise BotActionNotImplementedError()
async def on_get_conversation_member(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
member_id: str,
) -> ChannelAccount:
"""
get_conversation_member() API for Skill.
Enumerate the members of a conversation.
This REST API takes a ConversationId and returns a list of ChannelAccount
objects representing the members of the conversation.
:param claims_identity:
:param conversation_id:
:param member_id:
:return:
"""
raise BotActionNotImplementedError()
async def on_get_conversation_paged_members(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
page_size: int = None,
continuation_token: str = "",
) -> PagedMembersResult:
"""
get_conversation_paged_members() API for Skill.
Enumerate the members of a conversation one page at a time.
This REST API takes a ConversationId. Optionally a page_size and/or
continuation_token can be provided. It returns a PagedMembersResult, which
contains an array
of ChannelAccounts representing the members of the conversation and a
continuation token that can be used to get more values.
One page of ChannelAccounts records are returned with each call. The number
of records in a page may vary between channels and calls. The page_size
parameter can be used as
a suggestion. If there are no additional results the response will not
contain a continuation token. If there are no members in the conversation
the Members will be empty or not present in the response.
A response to a request that has a continuation token from a prior request
may rarely return members from a previous request.
:param claims_identity:
:param conversation_id:
:param page_size:
:param continuation_token:
:return:
"""
raise BotActionNotImplementedError()
async def on_delete_conversation_member(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
member_id: str,
):
"""
delete_conversation_member() API for Skill.
Deletes a member from a conversation.
This REST API takes a ConversationId and a memberId (of type string) and
removes that member from the conversation. If that member was the last
member
of the conversation, the conversation will also be deleted.
:param claims_identity:
:param conversation_id:
:param member_id:
:return:
"""
raise BotActionNotImplementedError()
async def on_get_activity_members(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
activity_id: str,
) -> List[ChannelAccount]:
"""
get_activity_members() API for Skill.
Enumerate the members of an activity.
This REST API takes a ConversationId and a ActivityId, returning an array
of ChannelAccount objects representing the members of the particular
activity in the conversation.
:param claims_identity:
:param conversation_id:
:param activity_id:
:return:
"""
raise BotActionNotImplementedError()
async def on_upload_attachment(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
attachment_upload: AttachmentData,
) -> ResourceResponse:
"""
upload_attachment() API for Skill.
Upload an attachment directly into a channel's blob storage.
This is useful because it allows you to store data in a compliant store
when dealing with enterprises.
The response is a ResourceResponse which contains an AttachmentId which is
suitable for using with the attachments API.
:param claims_identity:
:param conversation_id:
:param attachment_upload:
:return:
"""
raise BotActionNotImplementedError()
async def _authenticate(self, auth_header: str) -> ClaimsIdentity:
"""
Helper to authenticate the header.
This code is very similar to the code in JwtTokenValidation.authenticate_request,
we should move this code somewhere in that library when we refactor auth,
for now we keep it private to avoid adding more public static functions that we will need to deprecate later.
"""
if not auth_header:
is_auth_disabled = (
await self._credential_provider.is_authentication_disabled()
)
if not is_auth_disabled:
# No auth header. Auth is required. Request is not authorized.
raise PermissionError()
# In the scenario where Auth is disabled, we still want to have the
# IsAuthenticated flag set in the ClaimsIdentity. To do this requires
# adding in an empty claim.
# Since ChannelServiceHandler calls are always a skill callback call, we set the skill claim too.
return SkillValidation.create_anonymous_skill_claim()
# Validate the header and extract claims.
return await JwtTokenValidation.validate_auth_header(
auth_header,
self._credential_provider,
self._channel_provider,
"unknown",
auth_configuration=self._auth_config,
)
|
botbuilder-python/libraries/botbuilder-core/botbuilder/core/channel_service_handler.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/channel_service_handler.py",
"repo_id": "botbuilder-python",
"token_count": 6733
}
| 399 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Dict
from msrest.serialization import Model
class IntentScore(Model):
_attribute_map = {
"score": {"key": "score", "type": "float"},
"properties": {"key": "properties", "type": "{object}"},
}
def __init__(
self, score: float = None, properties: Dict[str, object] = None, **kwargs
):
super(IntentScore, self).__init__(**kwargs)
self.score = score
self.properties = properties or {}
|
botbuilder-python/libraries/botbuilder-core/botbuilder/core/intent_score.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/intent_score.py",
"repo_id": "botbuilder-python",
"token_count": 204
}
| 400 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Dict, NamedTuple
from msrest.serialization import Model
from botbuilder.core import IntentScore
class TopIntent(NamedTuple):
"""The top scoring intent and its score."""
intent: str
score: float
class RecognizerResult(Model):
"""Contains recognition results generated by a recognizer."""
_attribute_map = {
"text": {"key": "text", "type": "str"},
"altered_text": {"key": "alteredText", "type": "str"},
"intents": {"key": "intents", "type": "{IntentScore}"},
"entities": {"key": "entities", "type": "{object}"},
"properties": {"key": "properties", "type": "{object}"},
}
def __init__(
self,
*,
text: str = None,
altered_text: str = None,
intents: Dict[str, IntentScore] = None,
entities: Dict[str, object] = None,
properties: Dict[str, object] = None,
**kwargs
):
super(RecognizerResult, self).__init__(**kwargs)
self.text = text
self.altered_text = altered_text or kwargs.get("alteredText")
self.intents = intents
self.entities = entities
self.properties = properties or {}
def convert(self, result: object):
self.text = result.text
self.altered_text = result.altered_text
self.intents = result.intents
self.entities = result.entities
self.properties = result.properties
def get_top_scoring_intent(self) -> TopIntent:
"""Return the top scoring intent and its score.
:return: Intent and score.
:rtype: TopIntent
"""
if self.intents is None:
raise TypeError("result.intents can't be None")
top_intent = TopIntent(intent="", score=0.0)
for intent_name, intent_score in self.intents.items():
score = intent_score.score
if score > top_intent[1]:
top_intent = TopIntent(intent_name, score)
return top_intent
|
botbuilder-python/libraries/botbuilder-core/botbuilder/core/recognizer_result.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/recognizer_result.py",
"repo_id": "botbuilder-python",
"token_count": 855
}
| 401 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from copy import copy
from abc import ABC, abstractmethod
from typing import List
class Storage(ABC):
@abstractmethod
async def read(self, keys: List[str]):
"""
Loads store items from storage.
:param keys:
:return:
"""
raise NotImplementedError()
@abstractmethod
async def write(self, changes):
"""
Saves store items to storage.
:param changes:
:return:
"""
raise NotImplementedError()
@abstractmethod
async def delete(self, keys: List[str]):
"""
Removes store items from storage.
:param keys:
:return:
"""
raise NotImplementedError()
class StoreItem:
"""
Object which is stored in Storage with an optional eTag.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
non_magic_attributes = [attr for attr in dir(self) if not attr.startswith("_")]
output = (
"{"
+ ",".join(
[f' "{attr}": "{getattr(self, attr)}"' for attr in non_magic_attributes]
)
+ " }"
)
return output
def calculate_change_hash(item: StoreItem) -> str:
"""
Utility function to calculate a change hash for a `StoreItem`.
:param item:
:return:
"""
cpy = copy(item)
if cpy.e_tag is not None:
del cpy.e_tag
return str(cpy)
|
botbuilder-python/libraries/botbuilder-core/botbuilder/core/storage.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/storage.py",
"repo_id": "botbuilder-python",
"token_count": 711
}
| 402 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Logs incoming and outgoing activities to a TranscriptStore.."""
import datetime
import copy
import random
import string
from queue import Queue
from abc import ABC, abstractmethod
from typing import Awaitable, Callable, List
from botbuilder.schema import (
Activity,
ActivityEventNames,
ActivityTypes,
ChannelAccount,
ConversationReference,
)
from .middleware_set import Middleware
from .turn_context import TurnContext
class TranscriptLogger(ABC):
"""Transcript logger stores activities for conversations for recall."""
@abstractmethod
async def log_activity(self, activity: Activity) -> None:
"""Log an activity to the transcript.
:param activity:Activity being logged.
"""
raise NotImplementedError
class TranscriptLoggerMiddleware(Middleware):
"""Logs incoming and outgoing activities to a TranscriptStore."""
def __init__(self, logger: TranscriptLogger):
if not logger:
raise TypeError(
"TranscriptLoggerMiddleware requires a TranscriptLogger instance."
)
self.logger = logger
async def on_turn(
self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]
):
"""Initialization for middleware.
:param context: Context for the current turn of conversation with the user.
:param logic: Function to call at the end of the middleware chain.
"""
transcript = Queue()
activity = context.activity
# Log incoming activity at beginning of turn
if activity:
if not activity.from_property:
activity.from_property = ChannelAccount()
if not activity.from_property.role:
activity.from_property.role = "user"
# We should not log ContinueConversation events used by skills to initialize the middleware.
if not (
context.activity.type == ActivityTypes.event
and context.activity.name == ActivityEventNames.continue_conversation
):
await self.log_activity(transcript, copy.copy(activity))
# hook up onSend pipeline
# pylint: disable=unused-argument
async def send_activities_handler(
ctx: TurnContext,
activities: List[Activity],
next_send: Callable[[], Awaitable[None]],
):
# Run full pipeline
responses = await next_send()
for index, activity in enumerate(activities):
cloned_activity = copy.copy(activity)
if responses and index < len(responses):
cloned_activity.id = responses[index].id
# For certain channels, a ResourceResponse with an id is not always sent to the bot.
# This fix uses the timestamp on the activity to populate its id for logging the transcript
# If there is no outgoing timestamp, the current time for the bot is used for the activity.id
if not cloned_activity.id:
alphanumeric = string.ascii_lowercase + string.digits
prefix = "g_" + "".join(
random.choice(alphanumeric) for i in range(5)
)
epoch = datetime.datetime.utcfromtimestamp(0)
if cloned_activity.timestamp:
reference = cloned_activity.timestamp
else:
reference = datetime.datetime.today()
delta = (reference - epoch).total_seconds() * 1000
cloned_activity.id = f"{prefix}{delta}"
await self.log_activity(transcript, cloned_activity)
return responses
context.on_send_activities(send_activities_handler)
# hook up update activity pipeline
async def update_activity_handler(
ctx: TurnContext, activity: Activity, next_update: Callable[[], Awaitable]
):
# Run full pipeline
response = await next_update()
update_activity = copy.copy(activity)
update_activity.type = ActivityTypes.message_update
await self.log_activity(transcript, update_activity)
return response
context.on_update_activity(update_activity_handler)
# hook up delete activity pipeline
async def delete_activity_handler(
ctx: TurnContext,
reference: ConversationReference,
next_delete: Callable[[], Awaitable],
):
# Run full pipeline
await next_delete()
delete_msg = Activity(
type=ActivityTypes.message_delete, id=reference.activity_id
)
deleted_activity: Activity = TurnContext.apply_conversation_reference(
delete_msg, reference, False
)
await self.log_activity(transcript, deleted_activity)
context.on_delete_activity(delete_activity_handler)
if logic:
await logic()
# Flush transcript at end of turn
while not transcript.empty():
activity = transcript.get()
if activity is None:
break
await self.logger.log_activity(activity)
transcript.task_done()
async def log_activity(self, transcript: Queue, activity: Activity) -> None:
"""Logs the activity.
:param transcript: transcript.
:param activity: Activity to log.
"""
transcript.put(activity)
class TranscriptStore(TranscriptLogger):
"""Transcript storage for conversations."""
@abstractmethod
async def get_transcript_activities(
self,
channel_id: str,
conversation_id: str,
continuation_token: str,
start_date: datetime,
) -> "PagedResult":
"""Get activities for a conversation (Aka the transcript).
:param channel_id: Channel Id where conversation took place.
:param conversation_id: Conversation ID
:param continuation_token: Continuation token to page through results.
:param start_date: Earliest time to include
:result: Page of results of Activity objects
"""
raise NotImplementedError
@abstractmethod
async def list_transcripts(
self, channel_id: str, continuation_token: str
) -> "PagedResult":
"""List conversations in the channelId.
:param channel_id: Channel Id where conversation took place.
:param continuation_token : Continuation token to page through results.
:result: Page of results of TranscriptInfo objects
"""
raise NotImplementedError
@abstractmethod
async def delete_transcript(self, channel_id: str, conversation_id: str) -> None:
"""Delete a specific conversation and all of it's activities.
:param channel_id: Channel Id where conversation took place.
:param conversation_id: Id of the conversation to delete.
:result: None
"""
raise NotImplementedError
class ConsoleTranscriptLogger(TranscriptLogger):
"""ConsoleTranscriptLogger writes activities to Console output."""
async def log_activity(self, activity: Activity) -> None:
"""Log an activity to the transcript.
:param activity:Activity being logged.
"""
if activity:
print(f"Activity Log: {activity}")
else:
raise TypeError("Activity is required")
class TranscriptInfo:
"""Metadata for a stored transcript."""
# pylint: disable=invalid-name
def __init__(
self,
channel_id: str = None,
created: datetime = None,
conversation_id: str = None,
):
"""
:param channel_id: Channel ID the transcript was taken from
:param created: Timestamp when event created
:param id: Conversation ID
"""
self.channel_id = channel_id
self.created = created
self.id = conversation_id
class PagedResult:
"""Paged results for transcript data."""
# Page of Items
items: List[object] = None
# Token used to page through multiple pages.
continuation_token: str = None
|
botbuilder-python/libraries/botbuilder-core/botbuilder/core/transcript_logger.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/transcript_logger.py",
"repo_id": "botbuilder-python",
"token_count": 3351
}
| 403 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import aiounittest
from botbuilder.core.teams.teams_helper import deserializer_helper
from botbuilder.schema import Activity, ChannelAccount, Mention
from botbuilder.schema.teams import (
MessageActionsPayload,
MessagingExtensionAction,
TaskModuleRequestContext,
)
class TestTeamsActivityHandler(aiounittest.AsyncTestCase):
def test_teams_helper_teams_schema(self):
# Arrange
data = {
"data": {"key": "value"},
"context": {"theme": "dark"},
"commandId": "test_command",
"commandContext": "command_context_test",
"botMessagePreviewAction": "edit",
"botActivityPreview": [{"id": "activity123"}],
"messagePayload": {"id": "payloadid"},
}
# Act
result = deserializer_helper(MessagingExtensionAction, data)
# Assert
assert result.data == {"key": "value"}
assert result.context == TaskModuleRequestContext(theme="dark")
assert result.command_id == "test_command"
assert result.bot_message_preview_action == "edit"
assert len(result.bot_activity_preview) == 1
assert result.bot_activity_preview[0] == Activity(id="activity123")
assert result.message_payload == MessageActionsPayload(id="payloadid")
def test_teams_helper_schema(self):
# Arrange
data = {
"mentioned": {"id": "123", "name": "testName"},
"text": "Hello <at>testName</at>",
"type": "mention",
}
# Act
result = deserializer_helper(Mention, data)
# Assert
assert result.mentioned == ChannelAccount(id="123", name="testName")
assert result.text == "Hello <at>testName</at>"
assert result.type == "mention"
|
botbuilder-python/libraries/botbuilder-core/tests/teams/test_teams_helper.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/tests/teams/test_teams_helper.py",
"repo_id": "botbuilder-python",
"token_count": 776
}
| 404 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import aiounittest
from botbuilder.core import MemoryStorage, TurnContext, PrivateConversationState
from botbuilder.core.adapters import TestAdapter
from botbuilder.schema import Activity, ChannelAccount, ConversationAccount
RECEIVED_MESSAGE = Activity(
text="received",
type="message",
channel_id="test",
conversation=ConversationAccount(id="convo"),
from_property=ChannelAccount(id="user"),
)
class TestPrivateConversationState(aiounittest.AsyncTestCase):
async def test_should_load_and_save_state_from_storage(self):
storage = MemoryStorage()
adapter = TestAdapter()
context = TurnContext(adapter, RECEIVED_MESSAGE)
private_conversation_state = PrivateConversationState(storage)
# Simulate a "Turn" in a conversation by loading the state,
# changing it and then saving the changes to state.
await private_conversation_state.load(context)
key = private_conversation_state.get_storage_key(context)
state = private_conversation_state.get(context)
assert state == {}, "State not loaded"
assert key, "Key not found"
state["test"] = "foo"
await private_conversation_state.save_changes(context)
# Check the storage to see if the changes to state were saved.
items = await storage.read([key])
assert key in items, "Saved state not found in storage."
assert items[key]["test"] == "foo", "Missing test value in stored state."
|
botbuilder-python/libraries/botbuilder-core/tests/test_private_conversation_state.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/tests/test_private_conversation_state.py",
"repo_id": "botbuilder-python",
"token_count": 540
}
| 405 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class ChoiceFactoryOptions:
def __init__(
self,
inline_separator: str = None,
inline_or: str = None,
inline_or_more: str = None,
include_numbers: bool = None,
) -> None:
"""Initializes a new instance.
Refer to the code in the ConfirmPrompt for an example of usage.
:param object:
:type object:
:param inline_separator: The inline seperator value, defaults to None
:param inline_separator: str, optional
:param inline_or: The inline or value, defaults to None
:param inline_or: str, optional
:param inline_or_more: The inline or more value, defaults to None
:param inline_or_more: str, optional
:param includeNumbers: Flag indicating whether to include numbers as a choice, defaults to None
:param includeNumbers: bool, optional
:return:
:rtype: None
"""
self.inline_separator = inline_separator
self.inline_or = inline_or
self.inline_or_more = inline_or_more
self.include_numbers = include_numbers
|
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/choice_factory_options.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/choice_factory_options.py",
"repo_id": "botbuilder-python",
"token_count": 463
}
| 406 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class DialogEvent:
def __init__(self, bubble: bool = False, name: str = "", value: object = None):
self.bubble = bubble
self.name = name
self.value: object = value
|
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_event.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_event.py",
"repo_id": "botbuilder-python",
"token_count": 99
}
| 407 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import builtins
from inspect import isawaitable
from traceback import print_tb
from typing import (
Callable,
Dict,
Iterable,
Iterator,
List,
Tuple,
Type,
TypeVar,
)
from botbuilder.core import ComponentRegistration
from botbuilder.dialogs.memory.scopes import MemoryScope
from .component_memory_scopes_base import ComponentMemoryScopesBase
from .component_path_resolvers_base import ComponentPathResolversBase
from .dialog_path import DialogPath
from .dialog_state_manager_configuration import DialogStateManagerConfiguration
# Declare type variable
T = TypeVar("T") # pylint: disable=invalid-name
BUILTIN_TYPES = list(filter(lambda x: not x.startswith("_"), dir(builtins)))
# <summary>
# The DialogStateManager manages memory scopes and pathresolvers
# MemoryScopes are named root level objects, which can exist either in the dialogcontext or off of turn state
# PathResolvers allow for shortcut behavior for mapping things like $foo -> dialog.foo.
# </summary>
class DialogStateManager:
SEPARATORS = [",", "["]
def __init__(
self,
dialog_context: "DialogContext",
configuration: DialogStateManagerConfiguration = None,
):
"""
Initializes a new instance of the DialogStateManager class.
:param dialog_context: The dialog context for the current turn of the conversation.
:param configuration: Configuration for the dialog state manager. Default is None.
"""
# pylint: disable=import-outside-toplevel
# These modules are imported at static level to avoid circular dependency problems
from botbuilder.dialogs import (
DialogsComponentRegistration,
ObjectPath,
)
self._object_path_cls = ObjectPath
self._dialog_component_registration_cls = DialogsComponentRegistration
# Information for tracking when path was last modified.
self.path_tracker = "dialog._tracker.paths"
self._dialog_context = dialog_context
self._version: int = 0
ComponentRegistration.add(self._dialog_component_registration_cls())
if not dialog_context:
raise TypeError(f"Expecting: DialogContext, but received None")
self._configuration = configuration or dialog_context.context.turn_state.get(
DialogStateManagerConfiguration.__name__, None
)
if not self._configuration:
self._configuration = DialogStateManagerConfiguration()
# get all of the component memory scopes
memory_component: ComponentMemoryScopesBase
for memory_component in filter(
lambda comp: isinstance(comp, ComponentMemoryScopesBase),
ComponentRegistration.get_components(),
):
for memory_scope in memory_component.get_memory_scopes():
self._configuration.memory_scopes.append(memory_scope)
# get all of the component path resolvers
path_component: ComponentPathResolversBase
for path_component in filter(
lambda comp: isinstance(comp, ComponentPathResolversBase),
ComponentRegistration.get_components(),
):
for path_resolver in path_component.get_path_resolvers():
self._configuration.path_resolvers.append(path_resolver)
# cache for any other new dialog_state_manager instances in this turn.
dialog_context.context.turn_state[
self._configuration.__class__.__name__
] = self._configuration
def __len__(self) -> int:
"""
Gets the number of memory scopes in the dialog state manager.
:return: Number of memory scopes in the configuration.
"""
return len(self._configuration.memory_scopes)
@property
def configuration(self) -> DialogStateManagerConfiguration:
"""
Gets or sets the configured path resolvers and memory scopes for the dialog state manager.
:return: The configuration object.
"""
return self._configuration
@property
def keys(self) -> Iterable[str]:
"""
Gets a Iterable containing the keys of the memory scopes
:return: Keys of the memory scopes.
"""
return [memory_scope.name for memory_scope in self.configuration.memory_scopes]
@property
def values(self) -> Iterable[object]:
"""
Gets a Iterable containing the values of the memory scopes.
:return: Values of the memory scopes.
"""
return [
memory_scope.get_memory(self._dialog_context)
for memory_scope in self.configuration.memory_scopes
]
# <summary>
# Gets a value indicating whether the dialog state manager is read-only.
# </summary>
# <value><c>true</c>.</value>
@property
def is_read_only(self) -> bool:
"""
Gets a value indicating whether the dialog state manager is read-only.
:return: True.
"""
return True
# <summary>
# Gets or sets the elements with the specified key.
# </summary>
# <param name="key">Key to get or set the element.</param>
# <returns>The element with the specified key.</returns>
def __getitem__(self, key):
"""
:param key:
:return The value stored at key's position:
"""
return self.get_value(object, key, default_value=lambda: None)
def __setitem__(self, key, value):
if self._index_of_any(key, self.SEPARATORS) == -1:
# Root is handled by SetMemory rather than SetValue
scope = self.get_memory_scope(key)
if not scope:
raise IndexError(self._get_bad_scope_message(key))
# TODO: C# transforms value to JToken
scope.set_memory(self._dialog_context, value)
else:
self.set_value(key, value)
def _get_bad_scope_message(self, path: str) -> str:
return (
f"'{path}' does not match memory scopes:["
f"{', '.join((memory_scope.name for memory_scope in self.configuration.memory_scopes))}]"
)
@staticmethod
def _index_of_any(string: str, elements_to_search_for) -> int:
for element in elements_to_search_for:
index = string.find(element)
if index != -1:
return index
return -1
def get_memory_scope(self, name: str) -> MemoryScope:
"""
Get MemoryScope by name.
:param name:
:return: A memory scope.
"""
if not name:
raise TypeError(f"Expecting: {str.__name__}, but received None")
return next(
(
memory_scope
for memory_scope in self.configuration.memory_scopes
if memory_scope.name.lower() == name.lower()
),
None,
)
def version(self) -> str:
"""
Version help caller to identify the updates and decide cache or not.
:return: Current version.
"""
return str(self._version)
def resolve_memory_scope(self, path: str) -> Tuple[MemoryScope, str]:
"""
Will find the MemoryScope for and return the remaining path.
:param path:
:return: The memory scope and remaining subpath in scope.
"""
scope = path
sep_index = -1
dot = path.find(".")
open_square_bracket = path.find("[")
if dot > 0 and open_square_bracket > 0:
sep_index = min(dot, open_square_bracket)
elif dot > 0:
sep_index = dot
elif open_square_bracket > 0:
sep_index = open_square_bracket
if sep_index > 0:
scope = path[0:sep_index]
memory_scope = self.get_memory_scope(scope)
if memory_scope:
remaining_path = path[sep_index + 1 :]
return memory_scope, remaining_path
memory_scope = self.get_memory_scope(scope)
if not scope:
raise IndexError(self._get_bad_scope_message(scope))
return memory_scope, ""
def transform_path(self, path: str) -> str:
"""
Transform the path using the registered PathTransformers.
:param path: Path to transform.
:return: The transformed path.
"""
for path_resolver in self.configuration.path_resolvers:
path = path_resolver.transform_path(path)
return path
@staticmethod
def _is_primitive(type_to_check: Type) -> bool:
return type_to_check.__name__ in BUILTIN_TYPES
def try_get_value(
self, path: str, class_type: Type = object
) -> Tuple[bool, object]:
"""
Get the value from memory using path expression (NOTE: This always returns clone of value).
:param class_type: The value type to return.
:param path: Path expression to use.
:return: True if found, false if not and the value.
"""
if not path:
raise TypeError(f"Expecting: {str.__name__}, but received None")
return_value = (
class_type() if DialogStateManager._is_primitive(class_type) else None
)
path = self.transform_path(path)
try:
memory_scope, remaining_path = self.resolve_memory_scope(path)
except Exception as error:
print_tb(error.__traceback__)
return False, return_value
if not memory_scope:
return False, return_value
if not remaining_path:
memory = memory_scope.get_memory(self._dialog_context)
if not memory:
return False, return_value
return True, memory
# TODO: HACK to support .First() retrieval on turn.recognized.entities.foo, replace with Expressions once
# expressions ship
first = ".FIRST()"
try:
i_first = path.upper().rindex(first)
except ValueError:
i_first = -1
if i_first >= 0:
remaining_path = path[i_first + len(first) :]
path = path[0:i_first]
success, first_value = self._try_get_first_nested_value(path, self)
if success:
if not remaining_path:
return True, first_value
path_value = self._object_path_cls.try_get_path_value(
first_value, remaining_path
)
return bool(path_value), path_value
return False, return_value
path_value = self._object_path_cls.try_get_path_value(self, path)
return bool(path_value), path_value
def get_value(
self,
class_type: Type,
path_expression: str,
default_value: Callable[[], T] = None,
) -> T:
"""
Get the value from memory using path expression (NOTE: This always returns clone of value).
:param class_type: The value type to return.
:param path_expression: Path expression to use.
:param default_value: Function to give default value if there is none (OPTIONAL).
:return: Result or null if the path is not valid.
"""
if not path_expression:
raise TypeError(f"Expecting: {str.__name__}, but received None")
success, value = self.try_get_value(path_expression, class_type)
if success:
return value
return default_value() if default_value else None
def get_int_value(self, path_expression: str, default_value: int = 0) -> int:
"""
Get an int value from memory using a path expression.
:param path_expression: Path expression to use.
:param default_value: Default value if there is none (OPTIONAL).
:return:
"""
if not path_expression:
raise TypeError(f"Expecting: {str.__name__}, but received None")
success, value = self.try_get_value(path_expression, int)
if success:
return value
return default_value
def get_bool_value(self, path_expression: str, default_value: bool = False) -> bool:
"""
Get a bool value from memory using a path expression.
:param path_expression: Path expression to use.
:param default_value: Default value if there is none (OPTIONAL).
:return:
"""
if not path_expression:
raise TypeError(f"Expecting: {str.__name__}, but received None")
success, value = self.try_get_value(path_expression, bool)
if success:
return value
return default_value
def get_string_value(self, path_expression: str, default_value: str = "") -> str:
"""
Get a string value from memory using a path expression.
:param path_expression: Path expression to use.
:param default_value: Default value if there is none (OPTIONAL).
:return:
"""
if not path_expression:
raise TypeError(f"Expecting: {str.__name__}, but received None")
success, value = self.try_get_value(path_expression, str)
if success:
return value
return default_value
def set_value(self, path: str, value: object):
"""
Set memory to value.
:param path: Path to memory.
:param value: Object to set.
:return:
"""
if isawaitable(value):
raise Exception(f"{path} = You can't pass an awaitable to set_value")
if not path:
raise TypeError(f"Expecting: {str.__name__}, but received None")
path = self.transform_path(path)
if self._track_change(path, value):
self._object_path_cls.set_path_value(self, path, value)
# Every set will increase version
self._version += 1
def remove_value(self, path: str):
"""
Set memory to value.
:param path: Path to memory.
:param value: Object to set.
:return:
"""
if not path:
raise TypeError(f"Expecting: {str.__name__}, but received None")
path = self.transform_path(path)
if self._track_change(path, None):
self._object_path_cls.remove_path_value(self, path)
def get_memory_snapshot(self) -> Dict[str, object]:
"""
Gets all memoryscopes suitable for logging.
:return: object which represents all memory scopes.
"""
result = {}
for scope in [
ms for ms in self.configuration.memory_scopes if ms.include_in_snapshot
]:
memory = scope.get_memory(self._dialog_context)
if memory:
result[scope.name] = memory
return result
async def load_all_scopes(self):
"""
Load all of the scopes.
:return:
"""
for scope in self.configuration.memory_scopes:
await scope.load(self._dialog_context)
async def save_all_changes(self):
"""
Save all changes for all scopes.
:return:
"""
for scope in self.configuration.memory_scopes:
await scope.save_changes(self._dialog_context)
async def delete_scopes_memory_async(self, name: str):
"""
Delete the memory for a scope.
:param name: name of the scope.
:return:
"""
name = name.upper()
scope_list = [
ms for ms in self.configuration.memory_scopes if ms.name.upper == name
]
if len(scope_list) > 1:
raise RuntimeError(f"More than 1 scopes found with the name '{name}'")
scope = scope_list[0] if scope_list else None
if scope:
await scope.delete(self._dialog_context)
def add(self, key: str, value: object):
"""
Adds an element to the dialog state manager.
:param key: Key of the element to add.
:param value: Value of the element to add.
:return:
"""
raise RuntimeError("Not supported")
def contains_key(self, key: str) -> bool:
"""
Determines whether the dialog state manager contains an element with the specified key.
:param key: The key to locate in the dialog state manager.
:return: True if the dialog state manager contains an element with the key otherwise, False.
"""
scopes_with_key = [
ms
for ms in self.configuration.memory_scopes
if ms.name.upper == key.upper()
]
return bool(scopes_with_key)
def remove(self, key: str):
"""
Removes the element with the specified key from the dialog state manager.
:param key: Key of the element to remove.
:return:
"""
raise RuntimeError("Not supported")
# <summary>
# Removes all items from the dialog state manager.
# </summary>
# <remarks>This method is not supported.</remarks>
def clear(self, key: str):
"""
Removes all items from the dialog state manager.
:param key: Key of the element to remove.
:return:
"""
raise RuntimeError("Not supported")
def contains(self, item: Tuple[str, object]) -> bool:
"""
Determines whether the dialog state manager contains a specific value (should use __contains__).
:param item: The tuple of the item to locate.
:return bool: True if item is found in the dialog state manager otherwise, False
"""
raise RuntimeError("Not supported")
def __contains__(self, item: Tuple[str, object]) -> bool:
"""
Determines whether the dialog state manager contains a specific value.
:param item: The tuple of the item to locate.
:return bool: True if item is found in the dialog state manager otherwise, False
"""
raise RuntimeError("Not supported")
def copy_to(self, array: List[Tuple[str, object]], array_index: int):
"""
Copies the elements of the dialog state manager to an array starting at a particular index.
:param array: The one-dimensional array that is the destination of the elements copied
from the dialog state manager. The array must have zero-based indexing.
:param array_index:
:return:
"""
for memory_scope in self.configuration.memory_scopes:
array[array_index] = (
memory_scope.name,
memory_scope.get_memory(self._dialog_context),
)
array_index += 1
def remove_item(self, item: Tuple[str, object]) -> bool:
"""
Determines whether the dialog state manager contains a specific value (should use __contains__).
:param item: The tuple of the item to locate.
:return bool: True if item is found in the dialog state manager otherwise, False
"""
raise RuntimeError("Not supported")
# <summary>
# Returns an enumerator that iterates through the collection.
# </summary>
# <returns>An enumerator that can be used to iterate through the collection.</returns>
def get_enumerator(self) -> Iterator[Tuple[str, object]]:
"""
Returns an enumerator that iterates through the collection.
:return: An enumerator that can be used to iterate through the collection.
"""
for memory_scope in self.configuration.memory_scopes:
yield (memory_scope.name, memory_scope.get_memory(self._dialog_context))
def track_paths(self, paths: Iterable[str]) -> List[str]:
"""
Track when specific paths are changed.
:param paths: Paths to track.
:return: Normalized paths to pass to any_path_changed.
"""
all_paths = []
for path in paths:
t_path = self.transform_path(path)
# Track any path that resolves to a constant path
segments = self._object_path_cls.try_resolve_path(self, t_path)
if segments:
n_path = "_".join(segments)
self.set_value(self.path_tracker + "." + n_path, 0)
all_paths.append(n_path)
return all_paths
def any_path_changed(self, counter: int, paths: Iterable[str]) -> bool:
"""
Check to see if any path has changed since watermark.
:param counter: Time counter to compare to.
:param paths: Paths from track_paths to check.
:return: True if any path has changed since counter.
"""
found = False
if paths:
for path in paths:
if self.get_value(int, self.path_tracker + "." + path) > counter:
found = True
break
return found
def __iter__(self):
for memory_scope in self.configuration.memory_scopes:
yield (memory_scope.name, memory_scope.get_memory(self._dialog_context))
@staticmethod
def _try_get_first_nested_value(
remaining_path: str, memory: object
) -> Tuple[bool, object]:
# These modules are imported at static level to avoid circular dependency problems
# pylint: disable=import-outside-toplevel
from botbuilder.dialogs import ObjectPath
array = ObjectPath.try_get_path_value(memory, remaining_path)
if array:
if isinstance(array[0], list):
first = array[0]
if first:
second = first[0]
return True, second
return False, None
return True, array[0]
return False, None
def _track_change(self, path: str, value: object) -> bool:
has_path = False
segments = self._object_path_cls.try_resolve_path(self, path)
if segments:
root = segments[1] if len(segments) > 1 else ""
# Skip _* as first scope, i.e. _adaptive, _tracker, ...
if not root.startswith("_"):
# Convert to a simple path with _ between segments
path_name = "_".join(segments)
tracked_path = f"{self.path_tracker}.{path_name}"
counter = None
def update():
nonlocal counter
last_changed = self.try_get_value(tracked_path, int)
if last_changed:
if counter is not None:
counter = self.get_value(int, DialogPath.EVENT_COUNTER)
self.set_value(tracked_path, counter)
update()
if not self._is_primitive(type(value)):
# For an object we need to see if any children path are being tracked
def check_children(property: str, instance: object):
nonlocal tracked_path
# Add new child segment
tracked_path += "_" + property.lower()
update()
if not self._is_primitive(type(instance)):
self._object_path_cls.for_each_property(
property, check_children
)
# Remove added child segment
tracked_path = tracked_path.Substring(
0, tracked_path.LastIndexOf("_")
)
self._object_path_cls.for_each_property(value, check_children)
has_path = True
return has_path
|
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/dialog_state_manager.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/dialog_state_manager.py",
"repo_id": "botbuilder-python",
"token_count": 10201
}
| 408 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.dialogs.memory import scope_path
from .memory_scope import MemoryScope
class DialogContextMemoryScope(MemoryScope):
def __init__(self):
# pylint: disable=invalid-name
super().__init__(scope_path.SETTINGS, include_in_snapshot=False)
# Stack name.
self.STACK = "stack"
# Active dialog name.
self.ACTIVE_DIALOG = "activeDialog"
# Parent name.
self.PARENT = "parent"
def get_memory(self, dialog_context: "DialogContext") -> object:
"""
Gets the backing memory for this scope.
<param name="dc">The <see cref="DialogContext"/> object for this turn.</param>
<returns>Memory for the scope.</returns>
"""
if not dialog_context:
raise TypeError(f"Expecting: DialogContext, but received None")
# TODO: make sure that every object in the dict is serializable
memory = {}
stack = list([])
current_dc = dialog_context
# go to leaf node
while current_dc.child:
current_dc = current_dc.child
while current_dc:
# (PORTERS NOTE: javascript stack is reversed with top of stack on end)
for item in current_dc.stack:
# filter out ActionScope items because they are internal bookkeeping.
if not item.id.startswith("ActionScope["):
stack.append(item.id)
current_dc = current_dc.parent
# top of stack is stack[0].
memory[self.STACK] = stack
memory[self.ACTIVE_DIALOG] = (
dialog_context.active_dialog.id if dialog_context.active_dialog else None
)
memory[self.PARENT] = (
dialog_context.parent.active_dialog.id
if dialog_context.parent and dialog_context.parent.active_dialog
else None
)
return memory
def set_memory(self, dialog_context: "DialogContext", memory: object):
raise Exception(
f"{self.__class__.__name__}.set_memory not supported (read only)"
)
|
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/dialog_context_memory_scope.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/dialog_context_memory_scope.py",
"repo_id": "botbuilder-python",
"token_count": 916
}
| 409 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class DateTimeResolution:
def __init__(
self, value: str = None, start: str = None, end: str = None, timex: str = None
):
self.value = value
self.start = start
self.end = end
self.timex = timex
|
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/datetime_resolution.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/datetime_resolution.py",
"repo_id": "botbuilder-python",
"token_count": 131
}
| 410 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# TODO: Remove this file once we get some tests to verify waterfall_step
# unnecessary in Python.
|
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_step.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_step.py",
"repo_id": "botbuilder-python",
"token_count": 46
}
| 411 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import List
import aiounittest
from botbuilder.core import (
ConversationState,
MemoryStorage,
TurnContext,
MessageFactory,
)
from botbuilder.core.adapters import TestAdapter
from botbuilder.dialogs import DialogSet, DialogTurnResult, DialogTurnStatus
from botbuilder.dialogs.choices import Choice, ChoiceFactoryOptions, ListStyle
from botbuilder.dialogs.prompts import (
ConfirmPrompt,
PromptCultureModel,
PromptOptions,
PromptValidatorContext,
)
from botbuilder.schema import Activity, ActivityTypes
class ConfirmPromptTest(aiounittest.AsyncTestCase):
def test_confirm_prompt_with_empty_id_should_fail(self):
empty_id = ""
with self.assertRaises(TypeError):
ConfirmPrompt(empty_id)
def test_confirm_prompt_with_none_id_should_fail(self):
none_id = None
with self.assertRaises(TypeError):
ConfirmPrompt(none_id)
async def test_confirm_prompt(self):
async def exec_test(turn_context: TurnContext):
dialog_context = await dialogs.create_context(turn_context)
results: DialogTurnResult = await dialog_context.continue_dialog()
if results.status == DialogTurnStatus.Empty:
options = PromptOptions(
prompt=Activity(type=ActivityTypes.message, text="Please confirm.")
)
await dialog_context.prompt("ConfirmPrompt", options)
elif results.status == DialogTurnStatus.Complete:
message_text = "Confirmed" if results.result else "Not confirmed"
await turn_context.send_activity(MessageFactory.text(message_text))
await convo_state.save_changes(turn_context)
# Initialize TestAdapter.
adapter = TestAdapter(exec_test)
# Create new ConversationState with MemoryStorage and register the state as middleware.
convo_state = ConversationState(MemoryStorage())
# Create a DialogState property, DialogSet, and ChoicePrompt.
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
confirm_prompt = ConfirmPrompt("ConfirmPrompt", default_locale="English")
dialogs.add(confirm_prompt)
step1 = await adapter.send("hello")
step2 = await step1.assert_reply("Please confirm. (1) Yes or (2) No")
step3 = await step2.send("yes")
await step3.assert_reply("Confirmed")
async def test_confirm_prompt_retry(self):
async def exec_test(turn_context: TurnContext):
dialog_context = await dialogs.create_context(turn_context)
results: DialogTurnResult = await dialog_context.continue_dialog()
if results.status == DialogTurnStatus.Empty:
options = PromptOptions(
prompt=Activity(type=ActivityTypes.message, text="Please confirm."),
retry_prompt=Activity(
type=ActivityTypes.message,
text="Please confirm, say 'yes' or 'no' or something like that.",
),
)
await dialog_context.prompt("ConfirmPrompt", options)
elif results.status == DialogTurnStatus.Complete:
message_text = "Confirmed" if results.result else "Not confirmed"
await turn_context.send_activity(MessageFactory.text(message_text))
await convo_state.save_changes(turn_context)
# Initialize TestAdapter.
adapter = TestAdapter(exec_test)
# Create new ConversationState with MemoryStorage and register the state as middleware.
convo_state = ConversationState(MemoryStorage())
# Create a DialogState property, DialogSet, and ChoicePrompt.
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
confirm_prompt = ConfirmPrompt("ConfirmPrompt", default_locale="English")
dialogs.add(confirm_prompt)
step1 = await adapter.send("hello")
step2 = await step1.assert_reply("Please confirm. (1) Yes or (2) No")
step3 = await step2.send("lala")
step4 = await step3.assert_reply(
"Please confirm, say 'yes' or 'no' or something like that. (1) Yes or (2) No"
)
step5 = await step4.send("no")
await step5.assert_reply("Not confirmed")
async def test_confirm_prompt_no_options(self):
async def exec_test(turn_context: TurnContext):
dialog_context = await dialogs.create_context(turn_context)
results: DialogTurnResult = await dialog_context.continue_dialog()
if results.status == DialogTurnStatus.Empty:
await dialog_context.prompt("ConfirmPrompt", PromptOptions())
elif results.status == DialogTurnStatus.Complete:
message_text = "Confirmed" if results.result else "Not confirmed"
await turn_context.send_activity(MessageFactory.text(message_text))
await convo_state.save_changes(turn_context)
# Initialize TestAdapter.
adapter = TestAdapter(exec_test)
# Create new ConversationState with MemoryStorage and register the state as middleware.
convo_state = ConversationState(MemoryStorage())
# Create a DialogState property, DialogSet, and ChoicePrompt.
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
confirm_prompt = ConfirmPrompt("ConfirmPrompt", default_locale="English")
dialogs.add(confirm_prompt)
step1 = await adapter.send("hello")
step2 = await step1.assert_reply(" (1) Yes or (2) No")
step3 = await step2.send("lala")
step4 = await step3.assert_reply(" (1) Yes or (2) No")
step5 = await step4.send("no")
await step5.assert_reply("Not confirmed")
async def test_confirm_prompt_choice_options_numbers(self):
async def exec_test(turn_context: TurnContext):
dialog_context = await dialogs.create_context(turn_context)
results: DialogTurnResult = await dialog_context.continue_dialog()
if results.status == DialogTurnStatus.Empty:
options = PromptOptions(
prompt=Activity(type=ActivityTypes.message, text="Please confirm."),
retry_prompt=Activity(
type=ActivityTypes.message,
text="Please confirm, say 'yes' or 'no' or something like that.",
),
)
await dialog_context.prompt("ConfirmPrompt", options)
elif results.status == DialogTurnStatus.Complete:
message_text = "Confirmed" if results.result else "Not confirmed"
await turn_context.send_activity(MessageFactory.text(message_text))
await convo_state.save_changes(turn_context)
# Initialize TestAdapter.
adapter = TestAdapter(exec_test)
# Create new ConversationState with MemoryStorage and register the state as middleware.
convo_state = ConversationState(MemoryStorage())
# Create a DialogState property, DialogSet, and ChoicePrompt.
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
confirm_prompt = ConfirmPrompt("ConfirmPrompt", default_locale="English")
confirm_prompt.choice_options = ChoiceFactoryOptions(include_numbers=True)
confirm_prompt.style = ListStyle.in_line
dialogs.add(confirm_prompt)
step1 = await adapter.send("hello")
step2 = await step1.assert_reply("Please confirm. (1) Yes or (2) No")
step3 = await step2.send("lala")
step4 = await step3.assert_reply(
"Please confirm, say 'yes' or 'no' or something like that. (1) Yes or (2) No"
)
step5 = await step4.send("2")
await step5.assert_reply("Not confirmed")
async def test_confirm_prompt_choice_options_multiple_attempts(self):
async def exec_test(turn_context: TurnContext):
dialog_context = await dialogs.create_context(turn_context)
results: DialogTurnResult = await dialog_context.continue_dialog()
if results.status == DialogTurnStatus.Empty:
options = PromptOptions(
prompt=Activity(type=ActivityTypes.message, text="Please confirm."),
retry_prompt=Activity(
type=ActivityTypes.message,
text="Please confirm, say 'yes' or 'no' or something like that.",
),
)
await dialog_context.prompt("ConfirmPrompt", options)
elif results.status == DialogTurnStatus.Complete:
message_text = "Confirmed" if results.result else "Not confirmed"
await turn_context.send_activity(MessageFactory.text(message_text))
await convo_state.save_changes(turn_context)
# Initialize TestAdapter.
adapter = TestAdapter(exec_test)
# Create new ConversationState with MemoryStorage and register the state as middleware.
convo_state = ConversationState(MemoryStorage())
# Create a DialogState property, DialogSet, and ChoicePrompt.
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
confirm_prompt = ConfirmPrompt("ConfirmPrompt", default_locale="English")
confirm_prompt.choice_options = ChoiceFactoryOptions(include_numbers=True)
confirm_prompt.style = ListStyle.in_line
dialogs.add(confirm_prompt)
step1 = await adapter.send("hello")
step2 = await step1.assert_reply("Please confirm. (1) Yes or (2) No")
step3 = await step2.send("lala")
step4 = await step3.assert_reply(
"Please confirm, say 'yes' or 'no' or something like that. (1) Yes or (2) No"
)
step5 = await step4.send("what")
step6 = await step5.assert_reply(
"Please confirm, say 'yes' or 'no' or something like that. (1) Yes or (2) No"
)
step7 = await step6.send("2")
await step7.assert_reply("Not confirmed")
async def test_confirm_prompt_options_no_numbers(self):
async def exec_test(turn_context: TurnContext):
dialog_context = await dialogs.create_context(turn_context)
results: DialogTurnResult = await dialog_context.continue_dialog()
if results.status == DialogTurnStatus.Empty:
options = PromptOptions(
prompt=Activity(type=ActivityTypes.message, text="Please confirm."),
retry_prompt=Activity(
type=ActivityTypes.message,
text="Please confirm, say 'yes' or 'no' or something like that.",
),
)
await dialog_context.prompt("ConfirmPrompt", options)
elif results.status == DialogTurnStatus.Complete:
message_text = "Confirmed" if results.result else "Not confirmed"
await turn_context.send_activity(MessageFactory.text(message_text))
await convo_state.save_changes(turn_context)
# Initialize TestAdapter.
adapter = TestAdapter(exec_test)
# Create new ConversationState with MemoryStorage and register the state as middleware.
convo_state = ConversationState(MemoryStorage())
# Create a DialogState property, DialogSet, and ChoicePrompt.
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
confirm_prompt = ConfirmPrompt("ConfirmPrompt", default_locale="English")
confirm_prompt.choice_options = ChoiceFactoryOptions(
include_numbers=False, inline_separator="~"
)
dialogs.add(confirm_prompt)
step1 = await adapter.send("hello")
step2 = await step1.assert_reply("Please confirm. Yes or No")
step3 = await step2.send("2")
step4 = await step3.assert_reply(
"Please confirm, say 'yes' or 'no' or something like that. Yes or No"
)
step5 = await step4.send("no")
await step5.assert_reply("Not confirmed")
async def test_confirm_prompt_should_default_to_english_locale(self):
async def exec_test(turn_context: TurnContext):
dialog_context = await dialogs.create_context(turn_context)
results: DialogTurnResult = await dialog_context.continue_dialog()
if results.status == DialogTurnStatus.Empty:
options = PromptOptions(
prompt=Activity(type=ActivityTypes.message, text="Please confirm."),
retry_prompt=Activity(
type=ActivityTypes.message,
text="Please confirm, say 'yes' or 'no' or something like that.",
),
)
await dialog_context.prompt("ConfirmPrompt", options)
elif results.status == DialogTurnStatus.Complete:
message_text = "Confirmed" if results.result else "Not confirmed"
await turn_context.send_activity(MessageFactory.text(message_text))
await convo_state.save_changes(turn_context)
locales = [None, "", "not-supported"]
for locale in locales:
# Initialize TestAdapter.
adapter = TestAdapter(exec_test)
# Create new ConversationState with MemoryStorage and register the state as middleware.
convo_state = ConversationState(MemoryStorage())
# Create a DialogState property, DialogSet, and ChoicePrompt.
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
confirm_prompt = ConfirmPrompt("ConfirmPrompt")
confirm_prompt.choice_options = ChoiceFactoryOptions(include_numbers=True)
dialogs.add(confirm_prompt)
step1 = await adapter.send(
Activity(type=ActivityTypes.message, text="Hello", locale=locale)
)
step2 = await step1.assert_reply("Please confirm. (1) Yes or (2) No")
step3 = await step2.send("lala")
step4 = await step3.assert_reply(
"Please confirm, say 'yes' or 'no' or something like that. (1) Yes or (2) No"
)
step5 = await step4.send("2")
await step5.assert_reply("Not confirmed")
async def test_should_recognize_locale_variations_of_correct_locales(self):
def cap_ending(locale: str) -> str:
return f"{locale.split('-')[0]}-{locale.split('-')[1].upper()}"
def title_ending(locale: str) -> str:
return locale[:3] + locale[3].upper() + locale[4:]
def cap_two_letter(locale: str) -> str:
return locale.split("-")[0].upper()
def lower_two_letter(locale: str) -> str:
return locale.split("-")[0].upper()
async def exec_test_for_locale(valid_locale: str, locale_variations: List):
# Hold the correct answer from when a valid locale is used
expected_answer = None
def inspector(activity: Activity, description: str):
nonlocal expected_answer
assert not description
if valid_locale == test_locale:
expected_answer = activity.text
else:
# Ensure we're actually testing a variation.
assert activity.locale != valid_locale
assert activity.text == expected_answer
return True
async def exec_test(turn_context: TurnContext):
dialog_context = await dialogs.create_context(turn_context)
results: DialogTurnResult = await dialog_context.continue_dialog()
if results.status == DialogTurnStatus.Empty:
options = PromptOptions(
prompt=Activity(
type=ActivityTypes.message, text="Please confirm."
)
)
await dialog_context.prompt("prompt", options)
elif results.status == DialogTurnStatus.Complete:
confirmed = results.result
if confirmed:
await turn_context.send_activity("true")
else:
await turn_context.send_activity("false")
await convo_state.save_changes(turn_context)
async def validator(prompt: PromptValidatorContext) -> bool:
assert prompt
if not prompt.recognized.succeeded:
await prompt.context.send_activity("Bad input.")
return prompt.recognized.succeeded
test_locale = None
for test_locale in locale_variations:
adapter = TestAdapter(exec_test)
convo_state = ConversationState(MemoryStorage())
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
choice_prompt = ConfirmPrompt("prompt", validator)
dialogs.add(choice_prompt)
step1 = await adapter.send(
Activity(
type=ActivityTypes.message, text="Hello", locale=test_locale
)
)
await step1.assert_reply(inspector)
locales = [
"zh-cn",
"nl-nl",
"en-us",
"fr-fr",
"de-de",
"it-it",
"ja-jp",
"ko-kr",
"pt-br",
"es-es",
"tr-tr",
"de-de",
]
locale_tests = []
for locale in locales:
locale_tests.append(
[
locale,
cap_ending(locale),
title_ending(locale),
cap_two_letter(locale),
lower_two_letter(locale),
]
)
# Test each valid locale
for locale_tests in locale_tests:
await exec_test_for_locale(locale_tests[0], locale_tests)
async def test_should_recognize_and_use_custom_locale_dict(
self,
):
async def exec_test(turn_context: TurnContext):
dialog_context = await dialogs.create_context(turn_context)
results: DialogTurnResult = await dialog_context.continue_dialog()
if results.status == DialogTurnStatus.Empty:
options = PromptOptions(
prompt=Activity(type=ActivityTypes.message, text="Please confirm.")
)
await dialog_context.prompt("prompt", options)
elif results.status == DialogTurnStatus.Complete:
selected_choice = results.result
await turn_context.send_activity(selected_choice.value)
await convo_state.save_changes(turn_context)
async def validator(prompt: PromptValidatorContext) -> bool:
assert prompt
if not prompt.recognized.succeeded:
await prompt.context.send_activity("Bad input.")
return prompt.recognized.succeeded
adapter = TestAdapter(exec_test)
convo_state = ConversationState(MemoryStorage())
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
culture = PromptCultureModel(
locale="custom-locale",
no_in_language="customNo",
yes_in_language="customYes",
separator="customSeparator",
inline_or="customInlineOr",
inline_or_more="customInlineOrMore",
)
custom_dict = {
culture.locale: (
Choice(culture.yes_in_language),
Choice(culture.no_in_language),
ChoiceFactoryOptions(
culture.separator, culture.inline_or, culture.inline_or_more, True
),
)
}
confirm_prompt = ConfirmPrompt("prompt", validator, choice_defaults=custom_dict)
dialogs.add(confirm_prompt)
step1 = await adapter.send(
Activity(type=ActivityTypes.message, text="Hello", locale=culture.locale)
)
await step1.assert_reply(
"Please confirm. (1) customYescustomInlineOr(2) customNo"
)
|
botbuilder-python/libraries/botbuilder-dialogs/tests/test_confirm_prompt.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-dialogs/tests/test_confirm_prompt.py",
"repo_id": "botbuilder-python",
"token_count": 9261
}
| 412 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Optional
from aiohttp import ClientSession
from aiohttp.web import (
Request,
Response,
json_response,
WebSocketResponse,
HTTPBadRequest,
HTTPUnauthorized,
HTTPUnsupportedMediaType,
)
from botbuilder.core import Bot, BotFrameworkAdapterSettings
from botbuilder.core.streaming import (
BotFrameworkHttpAdapterBase,
StreamingRequestHandler,
)
from botbuilder.schema import Activity, ResourceResponse
from botbuilder.integration.aiohttp.streaming import AiohttpWebSocket
from botframework.connector.auth import AuthenticationConstants, JwtTokenValidation
class BotFrameworkHttpAdapter(BotFrameworkHttpAdapterBase):
def __init__(self, settings: BotFrameworkAdapterSettings):
# pylint: disable=invalid-name
super().__init__(settings)
self._AUTH_HEADER_NAME = "authorization"
self._CHANNEL_ID_HEADER_NAME = "channelid"
async def process(
self, request: Request, ws_response: WebSocketResponse, bot: Bot
) -> Optional[Response]:
# TODO: maybe it's not necessary to expose the ws_response
if not request:
raise TypeError("request can't be None")
# if ws_response is None:
# raise TypeError("ws_response can't be None")
if not bot:
raise TypeError("bot can't be None")
if request.method == "GET":
await self._connect_web_socket(bot, request, ws_response)
else:
# Deserialize the incoming Activity
if "application/json" in request.headers["Content-Type"]:
body = await request.json()
else:
raise HTTPUnsupportedMediaType()
activity = Activity().deserialize(body)
auth_header = (
request.headers["Authorization"]
if "Authorization" in request.headers
else ""
)
# Process the inbound activity with the bot
invoke_response = await self.process_activity(
activity, auth_header, bot.on_turn
)
if invoke_response:
return json_response(
data=invoke_response.body, status=invoke_response.status
)
return Response(status=201)
async def send_streaming_activity(self, activity: Activity) -> ResourceResponse:
# Check to see if any of this adapter's StreamingRequestHandlers is associated with this conversation.
possible_handlers = [
handler
for handler in self.request_handlers
if handler.service_url == activity.service_url
and handler.has_conversation(activity.conversation.id)
]
if possible_handlers:
if len(possible_handlers) > 1:
# The conversation has moved to a new connection and the former
# StreamingRequestHandler needs to be told to forget about it.
possible_handlers.sort(
key=lambda handler: handler.conversation_added_time(
activity.conversation.id
)
)
correct_handler = possible_handlers[-1]
for handler in possible_handlers:
if handler is not correct_handler:
handler.forget_conversation(activity.conversation.id)
return await correct_handler.send_activity(activity)
return await possible_handlers[0].send_activity(activity)
if self.connected_bot:
# This is a proactive message that will need a new streaming connection opened.
# The ServiceUrl of a streaming connection follows the pattern "url:[ChannelName]:[Protocol]:[Host]".
uri = activity.service_url.split(":")
protocol = uri[len(uri) - 2]
host = uri[len(uri) - 1]
# TODO: discuss if should abstract this from current package
# TODO: manage life cycle of sessions (when should we close them)
session = ClientSession()
aiohttp_ws = await session.ws_connect(protocol + host + "/api/messages")
web_socket = AiohttpWebSocket(aiohttp_ws, session)
handler = StreamingRequestHandler(self.connected_bot, self, web_socket)
if self.request_handlers is None:
self.request_handlers = []
self.request_handlers.append(handler)
return await handler.send_activity(activity)
return None
async def _connect_web_socket(
self, bot: Bot, request: Request, ws_response: WebSocketResponse
):
if not request:
raise TypeError("request can't be None")
if ws_response is None:
raise TypeError("ws_response can't be None")
if not bot:
raise TypeError(f"'bot: {bot.__class__.__name__}' argument can't be None")
if not ws_response.can_prepare(request):
raise HTTPBadRequest(text="Upgrade to WebSocket is required.")
if not await self._http_authenticate_request(request):
raise HTTPUnauthorized(text="Request authentication failed.")
try:
await ws_response.prepare(request)
bf_web_socket = AiohttpWebSocket(ws_response)
request_handler = StreamingRequestHandler(bot, self, bf_web_socket)
if self.request_handlers is None:
self.request_handlers = []
self.request_handlers.append(request_handler)
await request_handler.listen()
except Exception as error:
import traceback # pylint: disable=import-outside-toplevel
traceback.print_exc()
raise Exception(f"Unable to create transport server. Error: {str(error)}")
async def _http_authenticate_request(self, request: Request) -> bool:
# pylint: disable=no-member
try:
if not await self._credential_provider.is_authentication_disabled():
auth_header = request.headers.get(self._AUTH_HEADER_NAME)
channel_id = request.headers.get(self._CHANNEL_ID_HEADER_NAME)
if not auth_header:
await self._write_unauthorized_response(self._AUTH_HEADER_NAME)
return False
if not channel_id:
await self._write_unauthorized_response(
self._CHANNEL_ID_HEADER_NAME
)
return False
claims_identity = await JwtTokenValidation.validate_auth_header(
auth_header,
self._credential_provider,
self._channel_provider,
channel_id,
)
if not claims_identity.is_authenticated:
raise HTTPUnauthorized()
self._credentials = (
self._credentials
or await self._BotFrameworkAdapter__get_app_credentials(
self.settings.app_id,
AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE,
)
)
self.claims_identity = claims_identity
return True
except Exception as error:
raise error
async def _write_unauthorized_response(self, header_name: str):
raise HTTPUnauthorized(
text=f"Unable to authenticate. Missing header: {header_name}"
)
|
botbuilder-python/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_adapter.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_adapter.py",
"repo_id": "botbuilder-python",
"token_count": 3419
}
| 413 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from ._models_py3 import AppBasedLinkQuery
from ._models_py3 import ChannelInfo
from ._models_py3 import ConversationList
from ._models_py3 import FileConsentCard
from ._models_py3 import FileConsentCardResponse
from ._models_py3 import FileDownloadInfo
from ._models_py3 import FileInfoCard
from ._models_py3 import FileUploadInfo
from ._models_py3 import MeetingDetails
from ._models_py3 import MeetingInfo
from ._models_py3 import MeetingStartEventDetails
from ._models_py3 import MeetingEndEventDetails
from ._models_py3 import MessageActionsPayload
from ._models_py3 import MessageActionsPayloadApp
from ._models_py3 import MessageActionsPayloadAttachment
from ._models_py3 import MessageActionsPayloadBody
from ._models_py3 import MessageActionsPayloadConversation
from ._models_py3 import MessageActionsPayloadFrom
from ._models_py3 import MessageActionsPayloadMention
from ._models_py3 import MessageActionsPayloadReaction
from ._models_py3 import MessageActionsPayloadUser
from ._models_py3 import MessagingExtensionAction
from ._models_py3 import MessagingExtensionActionResponse
from ._models_py3 import MessagingExtensionAttachment
from ._models_py3 import MessagingExtensionParameter
from ._models_py3 import MessagingExtensionQuery
from ._models_py3 import MessagingExtensionQueryOptions
from ._models_py3 import MessagingExtensionResponse
from ._models_py3 import MessagingExtensionResult
from ._models_py3 import MessagingExtensionSuggestedAction
from ._models_py3 import NotificationInfo
from ._models_py3 import O365ConnectorCard
from ._models_py3 import O365ConnectorCardActionBase
from ._models_py3 import O365ConnectorCardActionCard
from ._models_py3 import O365ConnectorCardActionQuery
from ._models_py3 import O365ConnectorCardDateInput
from ._models_py3 import O365ConnectorCardFact
from ._models_py3 import O365ConnectorCardHttpPOST
from ._models_py3 import O365ConnectorCardImage
from ._models_py3 import O365ConnectorCardInputBase
from ._models_py3 import O365ConnectorCardMultichoiceInput
from ._models_py3 import O365ConnectorCardMultichoiceInputChoice
from ._models_py3 import O365ConnectorCardOpenUri
from ._models_py3 import O365ConnectorCardOpenUriTarget
from ._models_py3 import O365ConnectorCardSection
from ._models_py3 import O365ConnectorCardTextInput
from ._models_py3 import O365ConnectorCardViewAction
from ._models_py3 import SigninStateVerificationQuery
from ._models_py3 import TaskModuleContinueResponse
from ._models_py3 import TaskModuleMessageResponse
from ._models_py3 import TaskModuleRequest
from ._models_py3 import TaskModuleRequestContext
from ._models_py3 import TaskModuleResponse
from ._models_py3 import TaskModuleResponseBase
from ._models_py3 import TaskModuleTaskInfo
from ._models_py3 import TeamDetails
from ._models_py3 import TeamInfo
from ._models_py3 import TeamsChannelAccount
from ._models_py3 import TeamsChannelData
from ._models_py3 import TeamsPagedMembersResult
from ._models_py3 import TenantInfo
from ._models_py3 import TeamsMeetingInfo
from ._models_py3 import TeamsMeetingParticipant
from ._models_py3 import MeetingParticipantInfo
from ._models_py3 import CacheInfo
from ._models_py3 import TabContext
from ._models_py3 import TabEntityContext
from ._models_py3 import TabRequest
from ._models_py3 import TabResponseCard
from ._models_py3 import TabResponseCards
from ._models_py3 import TabResponsePayload
from ._models_py3 import TabResponse
from ._models_py3 import TabSubmit
from ._models_py3 import TabSubmitData
from ._models_py3 import TabSuggestedActions
from ._models_py3 import TaskModuleCardResponse
__all__ = [
"AppBasedLinkQuery",
"ChannelInfo",
"ConversationList",
"FileConsentCard",
"FileConsentCardResponse",
"FileDownloadInfo",
"FileInfoCard",
"FileUploadInfo",
"MeetingDetails",
"MeetingInfo",
"MeetingStartEventDetails",
"MeetingEndEventDetails",
"MessageActionsPayload",
"MessageActionsPayloadApp",
"MessageActionsPayloadAttachment",
"MessageActionsPayloadBody",
"MessageActionsPayloadConversation",
"MessageActionsPayloadFrom",
"MessageActionsPayloadMention",
"MessageActionsPayloadReaction",
"MessageActionsPayloadUser",
"MessagingExtensionAction",
"MessagingExtensionActionResponse",
"MessagingExtensionAttachment",
"MessagingExtensionParameter",
"MessagingExtensionQuery",
"MessagingExtensionQueryOptions",
"MessagingExtensionResponse",
"MessagingExtensionResult",
"MessagingExtensionSuggestedAction",
"NotificationInfo",
"O365ConnectorCard",
"O365ConnectorCardActionBase",
"O365ConnectorCardActionCard",
"O365ConnectorCardActionQuery",
"O365ConnectorCardDateInput",
"O365ConnectorCardFact",
"O365ConnectorCardHttpPOST",
"O365ConnectorCardImage",
"O365ConnectorCardInputBase",
"O365ConnectorCardMultichoiceInput",
"O365ConnectorCardMultichoiceInputChoice",
"O365ConnectorCardOpenUri",
"O365ConnectorCardOpenUriTarget",
"O365ConnectorCardSection",
"O365ConnectorCardTextInput",
"O365ConnectorCardViewAction",
"SigninStateVerificationQuery",
"TaskModuleContinueResponse",
"TaskModuleMessageResponse",
"TaskModuleRequest",
"TaskModuleRequestContext",
"TaskModuleResponse",
"TaskModuleResponseBase",
"TaskModuleTaskInfo",
"TeamDetails",
"TeamInfo",
"TeamsChannelAccount",
"TeamsChannelData",
"TeamsPagedMembersResult",
"TenantInfo",
"TeamsMeetingInfo",
"TeamsMeetingParticipant",
"MeetingParticipantInfo",
"CacheInfo",
"TabContext",
"TabEntityContext",
"TabRequest",
"TabResponseCard",
"TabResponseCards",
"TabResponsePayload",
"TabResponse",
"TabSubmit",
"TabSubmitData",
"TabSuggestedActions",
"TaskModuleCardResponse",
]
|
botbuilder-python/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py",
"repo_id": "botbuilder-python",
"token_count": 1800
}
| 414 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
from aiounittest import AsyncTestCase
from botbuilder.core import MessageFactory
from botbuilder.dialogs import (
ComponentDialog,
DialogContext,
DialogTurnResult,
DialogTurnStatus,
PromptOptions,
TextPrompt,
WaterfallDialog,
WaterfallStepContext,
)
from botbuilder.schema import Activity
from botbuilder.testing import DialogTestClient, DialogTestLogger
class DialogTestClientTest(AsyncTestCase):
"""Tests for dialog test client."""
def __init__(self, *args, **kwargs):
super(DialogTestClientTest, self).__init__(*args, **kwargs)
logging.basicConfig(format="", level=logging.INFO)
def test_init(self):
client = DialogTestClient(channel_or_adapter="test", target_dialog=None)
self.assertIsInstance(client, DialogTestClient)
def test_init_with_custom_channel_id(self):
client = DialogTestClient(channel_or_adapter="custom", target_dialog=None)
self.assertEqual("custom", client.test_adapter.template.channel_id)
async def test_single_turn_waterfall_dialog(self):
async def step1(step: DialogContext) -> DialogTurnResult:
await step.context.send_activity("hello")
return await step.end_dialog()
dialog = WaterfallDialog("waterfall", [step1])
client = DialogTestClient("test", dialog)
reply = await client.send_activity("hello")
self.assertEqual("hello", reply.text)
self.assertEqual("test", reply.channel_id)
self.assertEqual(DialogTurnStatus.Complete, client.dialog_turn_result.status)
async def test_single_turn_waterfall_dialog_with_logger(self):
"""
Test for single turn waterfall dialog with logger with test client.
To view the console output:
* unittest
```bash
python -m unittest -v -k logger
```
* pytest
```bash
pytest --log-cli-level=INFO --log-format="%(message)s" -k logger
```
The results are similar to:
```
User: Text = hello
-> ts: 13:39:59
Bot: Text = hello
Speak = None
InputHint = acceptingInput
-> ts: 13:39:59 elapsed 8 ms
```
:return: None
:rtype: None
"""
async def step1(step: DialogContext) -> DialogTurnResult:
await step.context.send_activity("hello")
return await step.end_dialog()
dialog = WaterfallDialog("waterfall", [step1])
client = DialogTestClient(
"test",
dialog,
initial_dialog_options=None,
middlewares=[DialogTestLogger()],
)
reply = await client.send_activity("hello")
self.assertEqual("hello", reply.text)
self.assertEqual("test", reply.channel_id)
self.assertEqual(DialogTurnStatus.Complete, client.dialog_turn_result.status)
async def test_two_turn_waterfall_dialog(self):
async def step1(step: WaterfallStepContext) -> DialogTurnResult:
await step.context.send_activity("hello")
await step.context.send_activity(Activity(type="typing"))
return await step.next(result=None)
async def step2(step: WaterfallStepContext) -> DialogTurnResult:
await step.context.send_activity("hello 2")
return await step.end_dialog()
dialog = WaterfallDialog("waterfall", [step1, step2])
client = DialogTestClient(
"test",
dialog,
initial_dialog_options=None,
middlewares=[DialogTestLogger()],
)
reply = await client.send_activity("hello")
self.assertEqual("hello", reply.text)
reply = client.get_next_reply()
self.assertEqual("typing", reply.type)
reply = client.get_next_reply()
self.assertEqual("hello 2", reply.text)
self.assertEqual(DialogTurnStatus.Complete, client.dialog_turn_result.status)
async def test_component_dialog(self):
component = MainDialog("component")
client = DialogTestClient(
"test",
component,
initial_dialog_options=None,
middlewares=[DialogTestLogger()],
)
reply = await client.send_activity("hello")
self.assertEqual("Tell me something", reply.text)
reply = await client.send_activity("foo")
self.assertEqual("you said: foo", reply.text)
self.assertEqual(DialogTurnStatus.Complete, client.dialog_turn_result.status)
class MainDialog(ComponentDialog):
def __init__(self, dialog_id: str):
super().__init__(dialog_id)
dialog = WaterfallDialog("waterfall", [self.step1, self.step2])
self.add_dialog(TextPrompt(TextPrompt.__name__))
self.add_dialog(dialog)
self.initial_dialog_id = dialog.id
@staticmethod
async def step1(step: WaterfallStepContext) -> DialogTurnResult:
options = PromptOptions(prompt=MessageFactory.text("Tell me something"))
return await step.prompt(TextPrompt.__name__, options)
@staticmethod
async def step2(step: WaterfallStepContext) -> DialogTurnResult:
await step.context.send_activity(
MessageFactory.text(f"you said: {step.result}")
)
return await step.end_dialog()
|
botbuilder-python/libraries/botbuilder-testing/tests/test_dialog_test_client.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-testing/tests/test_dialog_test_client.py",
"repo_id": "botbuilder-python",
"token_count": 2268
}
| 415 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=missing-docstring
from .authentication_constants import *
from .authenticate_request_result import *
from .bot_framework_authentication import *
from .bot_framework_authentication_factory import *
from .government_constants import *
from .channel_provider import *
from .connector_factory import *
from .simple_channel_provider import *
from .app_credentials import *
from .microsoft_app_credentials import *
from .microsoft_government_app_credentials import *
from .certificate_app_credentials import *
from .claims_identity import *
from .jwt_token_validation import *
from .credential_provider import *
from .channel_validation import *
from .emulator_validation import *
from .jwt_token_extractor import *
from .password_service_client_credential_factory import *
from .service_client_credentials_factory import *
from .user_token_client import *
from .authentication_configuration import *
|
botbuilder-python/libraries/botframework-connector/botframework/connector/auth/__init__.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/auth/__init__.py",
"repo_id": "botbuilder-python",
"token_count": 312
}
| 416 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import asyncio
from .authentication_configuration import AuthenticationConfiguration
from .verify_options import VerifyOptions
from .authentication_constants import AuthenticationConstants
from .jwt_token_extractor import JwtTokenExtractor
from .claims_identity import ClaimsIdentity
from .credential_provider import CredentialProvider
class ChannelValidation:
open_id_metadata_endpoint: str = None
# This claim is ONLY used in the Channel Validation, and not in the emulator validation
SERVICE_URL_CLAIM = "serviceurl"
#
# TO BOT FROM CHANNEL: Token validation parameters when connecting to a bot
#
TO_BOT_FROM_CHANNEL_TOKEN_VALIDATION_PARAMETERS = VerifyOptions(
issuer=[AuthenticationConstants.TO_BOT_FROM_CHANNEL_TOKEN_ISSUER],
# Audience validation takes place manually in code.
audience=None,
clock_tolerance=5 * 60,
ignore_expiration=False,
)
@staticmethod
async def authenticate_channel_token_with_service_url(
auth_header: str,
credentials: CredentialProvider,
service_url: str,
channel_id: str,
auth_configuration: AuthenticationConfiguration = None,
) -> ClaimsIdentity:
"""Validate the incoming Auth Header
Validate the incoming Auth Header as a token sent from the Bot Framework Service.
A token issued by the Bot Framework emulator will FAIL this check.
:param auth_header: The raw HTTP header in the format: 'Bearer [longString]'
:type auth_header: str
:param credentials: The user defined set of valid credentials, such as the AppId.
:type credentials: CredentialProvider
:param service_url: Claim value that must match in the identity.
:type service_url: str
:return: A valid ClaimsIdentity.
:raises Exception:
"""
identity = await ChannelValidation.authenticate_channel_token(
auth_header, credentials, channel_id, auth_configuration
)
service_url_claim = identity.get_claim_value(
ChannelValidation.SERVICE_URL_CLAIM
)
if service_url_claim != service_url:
# Claim must match. Not Authorized.
raise PermissionError("Unauthorized. service_url claim do not match.")
return identity
@staticmethod
async def authenticate_channel_token(
auth_header: str,
credentials: CredentialProvider,
channel_id: str,
auth_configuration: AuthenticationConfiguration = None,
) -> ClaimsIdentity:
"""Validate the incoming Auth Header
Validate the incoming Auth Header as a token sent from the Bot Framework Service.
A token issued by the Bot Framework emulator will FAIL this check.
:param auth_header: The raw HTTP header in the format: 'Bearer [longString]'
:type auth_header: str
:param credentials: The user defined set of valid credentials, such as the AppId.
:type credentials: CredentialProvider
:return: A valid ClaimsIdentity.
:raises Exception:
"""
auth_configuration = auth_configuration or AuthenticationConfiguration()
metadata_endpoint = (
ChannelValidation.open_id_metadata_endpoint
if ChannelValidation.open_id_metadata_endpoint
else AuthenticationConstants.TO_BOT_FROM_CHANNEL_OPENID_METADATA_URL
)
token_extractor = JwtTokenExtractor(
ChannelValidation.TO_BOT_FROM_CHANNEL_TOKEN_VALIDATION_PARAMETERS,
metadata_endpoint,
AuthenticationConstants.ALLOWED_SIGNING_ALGORITHMS,
)
identity = await token_extractor.get_identity_from_auth_header(
auth_header, channel_id, auth_configuration.required_endorsements
)
return await ChannelValidation.validate_identity(identity, credentials)
@staticmethod
async def validate_identity(
identity: ClaimsIdentity, credentials: CredentialProvider
) -> ClaimsIdentity:
if not identity:
# No valid identity. Not Authorized.
raise PermissionError("Unauthorized. No valid identity.")
if not identity.is_authenticated:
# The token is in some way invalid. Not Authorized.
raise PermissionError("Unauthorized. Is not authenticated")
# Now check that the AppID in the claimset matches
# what we're looking for. Note that in a multi-tenant bot, this value
# comes from developer code that may be reaching out to a service, hence the
# Async validation.
# Look for the "aud" claim, but only if issued from the Bot Framework
if (
identity.get_claim_value(AuthenticationConstants.ISSUER_CLAIM)
!= AuthenticationConstants.TO_BOT_FROM_CHANNEL_TOKEN_ISSUER
):
# The relevant Audience Claim MUST be present. Not Authorized.
raise PermissionError("Unauthorized. Audience Claim MUST be present.")
# The AppId from the claim in the token must match the AppId specified by the developer.
# Note that the Bot Framework uses the Audience claim ("aud") to pass the AppID.
aud_claim = identity.get_claim_value(AuthenticationConstants.AUDIENCE_CLAIM)
is_valid_app_id = await asyncio.ensure_future(
credentials.is_valid_appid(aud_claim or "")
)
if not is_valid_app_id:
# The AppId is not valid or not present. Not Authorized.
raise PermissionError(
"Unauthorized. Invalid AppId passed on token: ", aud_claim
)
return identity
|
botbuilder-python/libraries/botframework-connector/botframework/connector/auth/channel_validation.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/auth/channel_validation.py",
"repo_id": "botbuilder-python",
"token_count": 2152
}
| 417 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from datetime import timedelta
from typing import Dict, Union
import jwt
from .authentication_configuration import AuthenticationConfiguration
from .authentication_constants import AuthenticationConstants
from .claims_identity import ClaimsIdentity
from .credential_provider import CredentialProvider
from .government_constants import GovernmentConstants
from .verify_options import VerifyOptions
from .jwt_token_extractor import JwtTokenExtractor
from .channel_provider import ChannelProvider
class SkillValidation:
# TODO: Remove circular dependcies after C# refactor
# pylint: disable=import-outside-toplevel
"""
Validates JWT tokens sent to and from a Skill.
"""
@staticmethod
def is_skill_token(auth_header: str) -> bool:
"""
Determines if a given Auth header is from from a skill to bot or bot to skill request.
:param auth_header: Bearer Token, in the "Bearer [Long String]" Format.
:return bool:
"""
from .jwt_token_validation import JwtTokenValidation
if not JwtTokenValidation.is_valid_token_format(auth_header):
return False
bearer_token = auth_header.split(" ")[1]
# Parse the Big Long String into an actual token.
token = jwt.decode(bearer_token, options={"verify_signature": False})
return SkillValidation.is_skill_claim(token)
@staticmethod
def is_skill_claim(claims: Dict[str, object]) -> bool:
"""
Checks if the given list of claims represents a skill.
:param claims: A dict of claims.
:return bool:
"""
if (
claims.get(AuthenticationConstants.APP_ID_CLAIM, None)
== AuthenticationConstants.ANONYMOUS_SKILL_APP_ID
):
return True
if AuthenticationConstants.VERSION_CLAIM not in claims:
return False
audience = claims.get(AuthenticationConstants.AUDIENCE_CLAIM)
# The audience is https://api.botframework.com and not an appId.
if (
not audience
or audience == AuthenticationConstants.TO_BOT_FROM_CHANNEL_TOKEN_ISSUER
):
return False
from .jwt_token_validation import JwtTokenValidation
app_id = JwtTokenValidation.get_app_id_from_claims(claims)
if not app_id:
return False
# Skill claims must contain and app ID and the AppID must be different than the audience.
return app_id != audience
@staticmethod
async def authenticate_channel_token(
auth_header: str,
credentials: CredentialProvider,
channel_service_or_provider: Union[str, ChannelProvider],
channel_id: str,
auth_configuration: AuthenticationConfiguration,
) -> ClaimsIdentity:
if auth_configuration is None:
raise Exception(
"auth_configuration cannot be None in SkillValidation.authenticate_channel_token"
)
from .jwt_token_validation import JwtTokenValidation
if isinstance(channel_service_or_provider, ChannelProvider):
is_gov = channel_service_or_provider.is_government()
else:
is_gov = JwtTokenValidation.is_government(channel_service_or_provider)
open_id_metadata_url = (
GovernmentConstants.TO_BOT_FROM_EMULATOR_OPENID_METADATA_URL
if is_gov
else AuthenticationConstants.TO_BOT_FROM_EMULATOR_OPENID_METADATA_URL
)
token_validation_parameters = VerifyOptions(
issuer=[
"https://sts.windows.net/d6d49420-f39b-4df7-a1dc-d59a935871db/", # v3.1, 1.0 token
"https://login.microsoftonline.com/d6d49420-f39b-4df7-a1dc-d59a935871db/v2.0", # v3.1, 2.0 token
"https://sts.windows.net/f8cdef31-a31e-4b4a-93e4-5f571e91255a/", # v3.2, 1.0 token
"https://login.microsoftonline.com/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0", # v3.2, 2.0 token
"https://sts.windows.net/cab8a31a-1906-4287-a0d8-4eef66b95f6e/", # US Gov, 1.0 token
"https://login.microsoftonline.us/cab8a31a-1906-4287-a0d8-4eef66b95f6e/v2.0", # US Gov, 2.0 token
"https://login.microsoftonline.us/f8cdef31-a31e-4b4a-93e4-5f571e91255a/", # US Gov, 1.0 token
"https://login.microsoftonline.us/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0", # US Gov, 2.0 token
],
audience=None,
clock_tolerance=timedelta(minutes=5),
ignore_expiration=False,
)
if auth_configuration.valid_token_issuers:
token_validation_parameters.issuer.append(
auth_configuration.valid_token_issuers
)
token_extractor = JwtTokenExtractor(
token_validation_parameters,
open_id_metadata_url,
AuthenticationConstants.ALLOWED_SIGNING_ALGORITHMS,
)
identity = await token_extractor.get_identity_from_auth_header(
auth_header, channel_id, auth_configuration.required_endorsements
)
await SkillValidation._validate_identity(identity, credentials)
return identity
@staticmethod
def create_anonymous_skill_claim():
"""
Creates a ClaimsIdentity for an anonymous (unauthenticated) skill.
:return ClaimsIdentity:
"""
return ClaimsIdentity(
{
AuthenticationConstants.APP_ID_CLAIM: AuthenticationConstants.ANONYMOUS_SKILL_APP_ID
},
True,
AuthenticationConstants.ANONYMOUS_AUTH_TYPE,
)
@staticmethod
async def _validate_identity(
identity: ClaimsIdentity, credentials: CredentialProvider
):
if not identity:
# No valid identity. Not Authorized.
raise PermissionError("Invalid Identity")
if not identity.is_authenticated:
# The token is in some way invalid. Not Authorized.
raise PermissionError("Token Not Authenticated")
version_claim = identity.claims.get(AuthenticationConstants.VERSION_CLAIM)
if not version_claim:
# No version claim
raise PermissionError(
f"'{AuthenticationConstants.VERSION_CLAIM}' claim is required on skill Tokens."
)
# Look for the "aud" claim, but only if issued from the Bot Framework
audience_claim = identity.claims.get(AuthenticationConstants.AUDIENCE_CLAIM)
if not audience_claim:
# Claim is not present or doesn't have a value. Not Authorized.
raise PermissionError(
f"'{AuthenticationConstants.AUDIENCE_CLAIM}' claim is required on skill Tokens."
)
if not await credentials.is_valid_appid(audience_claim):
# The AppId is not valid. Not Authorized.
raise PermissionError("Invalid audience.")
from .jwt_token_validation import JwtTokenValidation
app_id = JwtTokenValidation.get_app_id_from_claims(identity.claims)
if not app_id:
# Invalid AppId
raise PermissionError("Invalid app_id.")
|
botbuilder-python/libraries/botframework-connector/botframework/connector/auth/skill_validation.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/auth/skill_validation.py",
"repo_id": "botbuilder-python",
"token_count": 3155
}
| 418 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from abc import ABC, abstractmethod
# TODO: add InvokeResponse to botbuilder-schema or rethink dependencies
from botbuilder.schema import Activity
class BotFrameworkClient(ABC):
@abstractmethod
async def post_activity(
self,
from_bot_id: str,
to_bot_id: str,
to_url: str,
service_url: str,
conversation_id: str,
activity: Activity,
) -> "botbuilder.core.InvokeResponse":
"""
Forwards an activity to a another bot.
:param from_bot_id: The MicrosoftAppId of the bot sending the activity.
:param to_bot_id: The MicrosoftAppId of the bot receiving the activity.
:param to_url: The URL of the bot receiving the activity.
:param service_url: The callback Url for the skill host.
:param conversation_id: A conversation ID to use for the conversation with the skill.
:param activity: Activity to forward.
"""
raise NotImplementedError()
|
botbuilder-python/libraries/botframework-connector/botframework/connector/skills/bot_framework_client.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/skills/bot_framework_client.py",
"repo_id": "botbuilder-python",
"token_count": 391
}
| 419 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
# pylint: disable=invalid-name
class AadResourceUrls(Model):
"""AadResourceUrls.
:param resource_urls:
:type resource_urls: list[str]
"""
_attribute_map = {"resource_urls": {"key": "resourceUrls", "type": "[str]"}}
def __init__(self, *, resource_urls=None, **kwargs) -> None:
super(AadResourceUrls, self).__init__(**kwargs)
self.resource_urls = resource_urls
class Error(Model):
"""Error.
:param code:
:type code: str
:param message:
:type message: str
:param inner_http_error:
:type inner_http_error: ~botframework.tokenapi.models.InnerHttpError
"""
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"inner_http_error": {"key": "innerHttpError", "type": "InnerHttpError"},
}
def __init__(
self, *, code: str = None, message: str = None, inner_http_error=None, **kwargs
) -> None:
super(Error, self).__init__(**kwargs)
self.code = code
self.message = message
self.inner_http_error = inner_http_error
class ErrorResponse(Model):
"""ErrorResponse.
:param error:
:type error: ~botframework.tokenapi.models.Error
"""
_attribute_map = {"error": {"key": "error", "type": "Error"}}
def __init__(self, *, error=None, **kwargs) -> None:
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(
deserialize, response, "ErrorResponse", *args
)
class InnerHttpError(Model):
"""InnerHttpError.
:param status_code:
:type status_code: int
:param body:
:type body: object
"""
_attribute_map = {
"status_code": {"key": "statusCode", "type": "int"},
"body": {"key": "body", "type": "object"},
}
def __init__(self, *, status_code: int = None, body=None, **kwargs) -> None:
super(InnerHttpError, self).__init__(**kwargs)
self.status_code = status_code
self.body = body
class SignInUrlResponse(Model):
"""SignInUrlResponse.
:param sign_in_link:
:type sign_in_link: str
:param token_exchange_resource:
:type token_exchange_resource:
~botframework.tokenapi.models.TokenExchangeResource
"""
_attribute_map = {
"sign_in_link": {"key": "signInLink", "type": "str"},
"token_exchange_resource": {
"key": "tokenExchangeResource",
"type": "TokenExchangeResource",
},
}
def __init__(
self, *, sign_in_link: str = None, token_exchange_resource=None, **kwargs
) -> None:
super(SignInUrlResponse, self).__init__(**kwargs)
self.sign_in_link = sign_in_link
self.token_exchange_resource = token_exchange_resource
class TokenExchangeRequest(Model):
"""TokenExchangeRequest.
:param uri:
:type uri: str
:param token:
:type token: str
"""
_attribute_map = {
"uri": {"key": "uri", "type": "str"},
"token": {"key": "token", "type": "str"},
}
def __init__(self, *, uri: str = None, token: str = None, **kwargs) -> None:
super(TokenExchangeRequest, self).__init__(**kwargs)
self.uri = uri
self.token = token
class TokenExchangeResource(Model):
"""TokenExchangeResource.
:param id:
:type id: str
:param uri:
:type uri: str
:param provider_id:
:type provider_id: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"uri": {"key": "uri", "type": "str"},
"provider_id": {"key": "providerId", "type": "str"},
}
def __init__(
self, *, id: str = None, uri: str = None, provider_id: str = None, **kwargs
) -> None:
super(TokenExchangeResource, self).__init__(**kwargs)
self.id = id
self.uri = uri
self.provider_id = provider_id
class TokenResponse(Model):
"""TokenResponse.
:param channel_id:
:type channel_id: str
:param connection_name:
:type connection_name: str
:param token:
:type token: str
:param expiration:
:type expiration: str
"""
_attribute_map = {
"channel_id": {"key": "channelId", "type": "str"},
"connection_name": {"key": "connectionName", "type": "str"},
"token": {"key": "token", "type": "str"},
"expiration": {"key": "expiration", "type": "str"},
}
def __init__(
self,
*,
channel_id: str = None,
connection_name: str = None,
token: str = None,
expiration: str = None,
**kwargs
) -> None:
super(TokenResponse, self).__init__(**kwargs)
self.channel_id = channel_id
self.connection_name = connection_name
self.token = token
self.expiration = expiration
class TokenStatus(Model):
"""The status of a particular token.
:param channel_id: The channelId of the token status pertains to
:type channel_id: str
:param connection_name: The name of the connection the token status
pertains to
:type connection_name: str
:param has_token: True if a token is stored for this ConnectionName
:type has_token: bool
:param service_provider_display_name: The display name of the service
provider for which this Token belongs to
:type service_provider_display_name: str
"""
_attribute_map = {
"channel_id": {"key": "channelId", "type": "str"},
"connection_name": {"key": "connectionName", "type": "str"},
"has_token": {"key": "hasToken", "type": "bool"},
"service_provider_display_name": {
"key": "serviceProviderDisplayName",
"type": "str",
},
}
def __init__(
self,
*,
channel_id: str = None,
connection_name: str = None,
has_token: bool = None,
service_provider_display_name: str = None,
**kwargs
) -> None:
super(TokenStatus, self).__init__(**kwargs)
self.channel_id = channel_id
self.connection_name = connection_name
self.has_token = has_token
self.service_provider_display_name = service_provider_display_name
|
botbuilder-python/libraries/botframework-connector/botframework/connector/token_api/models/_models_py3.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/token_api/models/_models_py3.py",
"repo_id": "botbuilder-python",
"token_count": 2806
}
| 420 |
interactions:
- request:
body: '{"recipient": {"id": "U19KH8EHJ:T03CWQ0QB"}, "channelId": "slack", "attachments":
[{"content": {"title": "A static image", "text": "JPEG image", "images": [{"url":
"https://docs.microsoft.com/en-us/bot-framework/media/designing-bots/core/dialogs-screens.png"}]},
"contentType": "application/vnd.microsoft.card.hero"}, {"content": {"title":
"An animation", "subtitle": "GIF image", "images": [{"url": "http://i.giphy.com/Ki55RUbOV5njy.gif"}]},
"contentType": "application/vnd.microsoft.card.hero"}], "from": {"id": "B21UTEF8S:T03CWQ0QB"},
"type": "message"}'
headers:
Accept: [application/json]
Accept-Encoding: ['gzip, deflate']
Connection: [keep-alive]
Content-Length: ['556']
Content-Type: [application/json; charset=utf-8]
User-Agent: [python/3.5.3 (Linux-4.11.0-041100-generic-x86_64-with-Ubuntu-17.04-zesty)
requests/2.18.1 msrest/0.4.23 azure-botframework-connector/v3.0]
method: POST
uri: https://slack.botframework.com/v3/conversations/B21UTEF8S%3AT03CWQ0QB%3AD2369CT7C/activities
response:
body: {string: "{\r\n \"id\": \"1514296292.000128\"\r\n}"}
headers:
cache-control: [no-cache]
content-length: ['33']
content-type: [application/json; charset=utf-8]
date: ['Tue, 26 Dec 2017 13:51:32 GMT']
expires: ['-1']
pragma: [no-cache]
request-context: ['appId=cid-v1:6814484e-c0d5-40ea-9dba-74ff29ca4f62']
server: [Microsoft-IIS/10.0]
strict-transport-security: [max-age=31536000]
vary: [Accept-Encoding]
x-powered-by: [ASP.NET]
status: {code: 200, message: OK}
version: 1
|
botbuilder-python/libraries/botframework-connector/tests/recordings/test_conversations_send_to_conversation_with_attachment.yaml/0
|
{
"file_path": "botbuilder-python/libraries/botframework-connector/tests/recordings/test_conversations_send_to_conversation_with_attachment.yaml",
"repo_id": "botbuilder-python",
"token_count": 768
}
| 421 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .cancel_disassembler import CancelDisassembler
from .payload_disassembler import PayloadDisassembler
from .request_disassembler import RequestDisassembler
from .response_disassembler import ResponseDisassembler
from .response_message_stream_disassembler import ResponseMessageStreamDisassembler
__all__ = [
"CancelDisassembler",
"PayloadDisassembler",
"RequestDisassembler",
"ResponseDisassembler",
"ResponseMessageStreamDisassembler",
]
|
botbuilder-python/libraries/botframework-streaming/botframework/streaming/payloads/disassemblers/__init__.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-streaming/botframework/streaming/payloads/disassemblers/__init__.py",
"repo_id": "botbuilder-python",
"token_count": 159
}
| 422 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from uuid import uuid4, UUID
class ResponseMessageStream:
# pylint: disable=invalid-name
def __init__(self, *, id: UUID = None, content: object = None):
self.id = id or uuid4()
self.content = content
|
botbuilder-python/libraries/botframework-streaming/botframework/streaming/payloads/response_message_stream.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-streaming/botframework/streaming/payloads/response_message_stream.py",
"repo_id": "botbuilder-python",
"token_count": 112
}
| 423 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .web_socket import WebSocketMessage
from .web_socket import WebSocket
from .web_socket_close_status import WebSocketCloseStatus
from .web_socket_server import WebSocketServer
from .web_socket_message_type import WebSocketMessageType
from .web_socket_transport import WebSocketTransport
from .web_socket_state import WebSocketState
__all__ = [
"WebSocketMessage",
"WebSocket",
"WebSocketCloseStatus",
"WebSocketMessageType",
"WebSocketServer",
"WebSocketTransport",
"WebSocketState",
]
|
botbuilder-python/libraries/botframework-streaming/botframework/streaming/transport/web_socket/__init__.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-streaming/botframework/streaming/transport/web_socket/__init__.py",
"repo_id": "botbuilder-python",
"token_count": 183
}
| 424 |
import json
import aiounittest
from botbuilder.schema import Activity
from botframework.streaming import ReceiveRequest, StreamingRequest
from botframework.streaming.payloads import ResponseMessageStream
class TestRequests(aiounittest.AsyncTestCase):
async def test_receive_request_empty_streams(self):
sut = ReceiveRequest()
self.assertIsNotNone(sut.streams)
self.assertEqual(0, len(sut.streams))
async def test_receive_request_null_properties(self):
sut = ReceiveRequest()
self.assertIsNone(sut.verb)
self.assertIsNone(sut.path)
async def test_streaming_request_null_properties(self):
sut = StreamingRequest()
self.assertIsNone(sut.verb)
self.assertIsNone(sut.path)
async def test_streaming_request_add_stream_null_throws(self):
sut = StreamingRequest()
with self.assertRaises(TypeError):
sut.add_stream(None)
async def test_streaming_request_add_stream_success(self):
sut = StreamingRequest()
content = "hi"
sut.add_stream(content)
self.assertIsNotNone(sut.streams)
self.assertEqual(1, len(sut.streams))
self.assertEqual(content, sut.streams[0].content)
async def test_streaming_request_add_stream_existing_list_success(self):
sut = StreamingRequest()
content = "hi"
content_2 = "hello"
sut.streams = [ResponseMessageStream(content=content_2)]
sut.add_stream(content)
self.assertIsNotNone(sut.streams)
self.assertEqual(2, len(sut.streams))
self.assertEqual(content_2, sut.streams[0].content)
self.assertEqual(content, sut.streams[1].content)
async def test_streaming_request_create_get_success(self):
sut = StreamingRequest.create_get()
self.assertEqual(StreamingRequest.GET, sut.verb)
self.assertIsNone(sut.path)
self.assertIsNone(sut.streams)
async def test_streaming_request_create_post_success(self):
sut = StreamingRequest.create_post()
self.assertEqual(StreamingRequest.POST, sut.verb)
self.assertIsNone(sut.path)
self.assertIsNone(sut.streams)
async def test_streaming_request_create_delete_success(self):
sut = StreamingRequest.create_delete()
self.assertEqual(StreamingRequest.DELETE, sut.verb)
self.assertIsNone(sut.path)
self.assertIsNone(sut.streams)
async def test_streaming_request_create_put_success(self):
sut = StreamingRequest.create_put()
self.assertEqual(StreamingRequest.PUT, sut.verb)
self.assertIsNone(sut.path)
self.assertIsNone(sut.streams)
async def test_streaming_request_create_with_body_success(self):
content = "hi"
sut = StreamingRequest.create_request(StreamingRequest.POST, "123", content)
self.assertEqual(StreamingRequest.POST, sut.verb)
self.assertEqual("123", sut.path)
self.assertIsNotNone(sut.streams)
self.assertEqual(1, len(sut.streams))
self.assertEqual(content, sut.streams[0].content)
async def test_streaming_request_set_body_string_success(self):
sut = StreamingRequest()
sut.set_body("123")
self.assertIsNotNone(sut.streams)
self.assertEqual(1, len(sut.streams))
self.assertIsInstance(sut.streams[0].content, list)
self.assertIsInstance(sut.streams[0].content[0], int)
self.assertEqual("123", bytes(sut.streams[0].content).decode("utf-8-sig"))
async def test_streaming_request_set_body_none_does_not_throw(self):
sut = StreamingRequest()
sut.set_body(None)
async def test_streaming_request_set_body_success(self):
sut = StreamingRequest()
activity = Activity(text="hi", type="message")
sut.set_body(activity)
self.assertIsNotNone(sut.streams)
self.assertEqual(1, len(sut.streams))
self.assertIsInstance(sut.streams[0].content, list)
self.assertIsInstance(sut.streams[0].content[0], int)
assert_activity = Activity.deserialize(
json.loads(bytes(sut.streams[0].content).decode("utf-8-sig"))
)
self.assertEqual(activity.text, assert_activity.text)
self.assertEqual(activity.type, assert_activity.type)
|
botbuilder-python/libraries/botframework-streaming/tests/test_requests.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-streaming/tests/test_requests.py",
"repo_id": "botbuilder-python",
"token_count": 1838
}
| 425 |
# BotFramework Token
> see https://aka.ms/autorest
Configuration for generating BotFramework Token SDK.
``` yaml
add-credentials: true
openapi-type: data-plane
```
The current release for the BotFramework Token is v3.0.
# Releases
## Token API 3.0
``` yaml
input-file: TokenAPI.json
```
### Token API 3.0 - Python Settings
These settings apply only when `--python` is specified on the command line.
DO NOT use `--basic-setup-py` as this will overwrite the existing setup.py files.
If you upgrade autorest from npm you may need to run `autorest ---reset` before continuing.
``` yaml $(python)
python:
license-header: MICROSOFT_MIT_NO_VERSION
add-credentials: true
payload-flattening-threshold: 2
namespace: botframework.tokenApi
package-name: botframework-Token
override-client-name: TokenApiClient
clear-output-folder: true
output-folder: ./tokenApi
```
|
botbuilder-python/swagger/tokenAPI.md/0
|
{
"file_path": "botbuilder-python/swagger/tokenAPI.md",
"repo_id": "botbuilder-python",
"token_count": 278
}
| 426 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.dialogs import (
ComponentDialog,
DialogTurnResult,
WaterfallDialog,
WaterfallStepContext
)
from botbuilder.dialogs.prompts import (
OAuthPrompt,
OAuthPromptSettings
)
from botbuilder.schema import TokenResponse
from botbuilder.core import MessageFactory
from config import DefaultConfig
class MainDialog(ComponentDialog):
def __init__(self, configuration: DefaultConfig):
super().__init__(MainDialog.__name__)
self._connection_name = configuration.CONNECTION_NAME
self.add_dialog(
OAuthPrompt(
OAuthPrompt.__name__,
OAuthPromptSettings(
connection_name=self._connection_name,
text=f"Sign In to AAD",
title="Sign In",
),
)
)
self.add_dialog(
WaterfallDialog(
WaterfallDialog.__name__, [self._sign_in_step, self._show_token_response]
)
)
self.initial_dialog_id = WaterfallDialog.__name__
async def _sign_in_step(self, context: WaterfallStepContext) -> DialogTurnResult:
return await context.begin_dialog(OAuthPrompt.__name__)
async def _show_token_response(self, context: WaterfallStepContext) -> DialogTurnResult:
result: TokenResponse = context.result
if not result:
await context.context.send_activity(MessageFactory.text("No token response from OAuthPrompt"))
else:
await context.context.send_activity(MessageFactory.text(f"Your token is {result.token}"))
return await context.end_dialog()
|
botbuilder-python/tests/experimental/sso/parent/dialogs/main_dialog.py/0
|
{
"file_path": "botbuilder-python/tests/experimental/sso/parent/dialogs/main_dialog.py",
"repo_id": "botbuilder-python",
"token_count": 721
}
| 427 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.