content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def write_tube_sample(radius):
"""Saves the image of the tube with selected radius"""
tube_input = files.slices.get_tube_sample()
circ = get_bound_circ(tube_input, radius)
x, y = circ[:2]
tube_file = tube_input.copy()
cv2.circle(tube_file, (x, y), radius, (255, 0, 0), 1)
cv2.imwrite("src/gui/img/tube_sample.png", tube_file)
| 5,354,700 |
def evaluate_score_batch(
predicted_classes=[], # list, len(num_classes), str(code)
predicted_labels=[], # shape (num_examples, num_classes), T/F for each code
predicted_probabilities=[], # shape (num_examples, num_classes), prob. [0-1] for each code
raw_ground_truth_labels=[], # list(('dx1', 'dx2'), ('dx1', 'dx3'), ...)
weights_file="evaluation-2020/weights.csv",
normal_class="426783006",
equivalent_classes=[
["713427006", "59118001"],
["284470004", "63593006"],
["427172004", "17338001"],
],
):
"""This is a helper function for getting
auroc, auprc, accuracy, f_measure, f_beta_measure, g_beta_measure, challenge_metric
without needing the directories of labels and prediction outputs.
It is useful for directly calculating the scores given the
classes, predicted labels, and predicted probabilities.
"""
label_classes, labels = _load_labels(
raw_ground_truth_labels,
normal_class=normal_class,
equivalent_classes_collection=equivalent_classes,
)
output_classes, binary_outputs, scalar_outputs = _load_outputs(
predicted_classes,
predicted_labels,
predicted_probabilities,
normal_class=normal_class,
equivalent_classes_collection=equivalent_classes,
)
classes, labels, binary_outputs, scalar_outputs = organize_labels_outputs(
label_classes, output_classes, labels, binary_outputs, scalar_outputs
)
weights = load_weights(weights_file, classes)
# Only consider classes that are scored with the Challenge metric.
indices = np.any(weights, axis=0) # Find indices of classes in weight matrix.
classes = [x for i, x in enumerate(classes) if indices[i]]
labels = labels[:, indices]
scalar_outputs = scalar_outputs[:, indices]
binary_outputs = binary_outputs[:, indices]
weights = weights[np.ix_(indices, indices)]
auroc, auprc = compute_auc(labels, scalar_outputs)
accuracy = compute_accuracy(labels, binary_outputs)
f_measure = compute_f_measure(labels, binary_outputs)
f_beta_measure, g_beta_measure = compute_beta_measures(
labels, binary_outputs, beta=2
)
challenge_metric = compute_challenge_metric(
weights, labels, binary_outputs, classes, normal_class
)
return (
auroc,
auprc,
accuracy,
f_measure,
f_beta_measure,
g_beta_measure,
challenge_metric,
)
| 5,354,701 |
def pair(data, color=None, tooltip=None, mark='point', width=150, height=150):
"""
Create pairwise scatter plots of all column combinations.
In contrast to many other pairplot tools,
this function creates a single scatter plot per column pair,
and no distribution plots along the diagonal.
Parameters
----------
data : DataFrame
pandas DataFrame with input data.
color : str
Column in **data** used for the color encoding.
tooltip: str
Column in **data** used for the tooltip encoding.
mark: str
Shape of the points. Passed to Chart.
One of "circle", "square", "tick", or "point".
width: int or float
Chart width.
height: int or float
Chart height.
Returns
-------
ConcatChart
Concatenated Chart of pairwise column scatter plots.
"""
# TODO support categorical?
col_dtype = 'number'
# color = 'species:N' # must be passed with a type, enh: autoetect
# tooltip = alt.Tooltip('species')
cols = data.select_dtypes(col_dtype).columns
# Setting a non-existing column with specified type passes through without effect
# and eliminates the need to hvae a separate plotting section for colored bars below.
if color is None:
color = ':Q'
if tooltip is None:
tooltip = ':Q'
# Infer color data type if not specified
if color[-2:] in [':Q', ':T', ':N', ':O']:
color_alt = alt.Color(color, title=None, legend=alt.Legend(orient='left', offset=width * -1.6))
# The selection fields parmeter does not work with the suffix
legend_color = color.split(':')[0]
else:
color_alt = alt.Color(color, title=None, type=alt.utils.infer_vegalite_type(data[color]))
legend_color = color
# Set up interactions
brush = alt.selection_interval()
color = alt.condition(brush, color_alt, alt.value('lightgrey'))
legend_click = alt.selection_multi(fields=[legend_color], bind='legend')
opacity = alt.condition(legend_click, alt.value(0.8), alt.value(0.2))
hidden_axis = alt.Axis(domain=False, title='', labels=False, ticks=False)
# Create corner of pair-wise scatters
i = 0
exclude_zero = alt.Scale(zero=False)
col_combos = list(combinations(cols, 2))[::-1]
subplot_row = []
while i < len(cols) - 1:
plot_column = []
for num, (y, x) in enumerate(col_combos[:i+1]):
if num == 0 and i == len(cols) - 2:
subplot = alt.Chart(data, mark=mark).encode(
alt.X(x, scale=exclude_zero),
alt.Y(y, scale=exclude_zero))
elif num == 0:
subplot = (
alt.Chart(data, mark=mark).encode(
alt.X(x, scale=exclude_zero, axis=hidden_axis),
alt.Y(y, scale=exclude_zero)))
elif i == len(cols) - 2:
subplot = (
alt.Chart(data, mark=mark).encode(
alt.X(x, scale=exclude_zero),
alt.Y(y, scale=exclude_zero, axis=hidden_axis)))
else:
subplot = (
alt.Chart(data, mark=mark).encode(
alt.X(x, scale=exclude_zero, axis=hidden_axis),
alt.Y(y, scale=exclude_zero, axis=hidden_axis)))
plot_column.append(
subplot
.encode(opacity=opacity, color=color, tooltip=tooltip)
.properties(width=width, height=height))
subplot_row.append(alt.hconcat(*plot_column))
i += 1
col_combos = col_combos[i:]
return (
alt.vconcat(*subplot_row)
.add_selection(brush, legend_click))
| 5,354,702 |
def gauss_kernel(model_cell, x, y, z, sigma=1):
"""
Convolute aligned pixels given coordinates `x`, `y` and values `z` with a gaussian kernel to form the final image.
Parameters
----------
model_cell : :class:`~colicoords.cell.Cell`
Model cell defining output shape.
x : :class:`~numpy.ndarray`
Array with combined x-coordinates of aligned pixels.
y : :class:`~numpy.ndarray`
Array with combined y-coordinates of aligned pixels.
z : :class:`~numpy.ndarray`
Array with pixel values of aligned pixels.
sigma : :obj:`float`
Sigma of the gaussian kernel.
Returns
-------
output : :class:`~numpy.ndarray`
Output aligned image.
"""
output = np.empty(model_cell.data.shape)
coords = np.array([x, y])
for index in tqdm(np.ndindex(model_cell.data.shape), desc='Gaussian kernel', total=np.product(model_cell.data.shape)):
xi, yi = index
xp, yp = model_cell.coords.x_coords[xi, yi], model_cell.coords.y_coords[xi, yi]
dist = distance.cdist(np.array([[xp, yp]]), coords.T).squeeze()
bools = dist < 5*sigma
weights = gauss_2d(x[bools], y[bools], xp, yp, sigma=sigma)
avg = np.average(z[bools], weights=weights)
output[xi, yi] = avg
return output
| 5,354,703 |
def main(argv):
"""
Creates a parquet file with tissue data.
:param list argv: the list elements should be:
[1]: Parquet file path with raw sample data
[2]: Output file
"""
raw_source_parquet_path = argv[1]
output_path = argv[2]
spark = SparkSession.builder.getOrCreate()
raw_source_df = spark.read.parquet(raw_source_parquet_path)
project_group_df = transform_project_group(raw_source_df)
project_group_df.write.mode("overwrite").parquet(output_path)
| 5,354,704 |
def create_file2four(file_list):
"""
Function to merge ascii files.
Parameters
-------------
file_list : 'str'
Path to the files. Taken automatically.
Returns
-------------
Merged file: output_file4.txt
"""
with open('output_file4.txt', 'w') as file3:
readers = [open(file) for file in file_list]
#print readers
for lines in zip(*readers):
print >>file3, ' '.join([line.strip() for line in lines])
| 5,354,705 |
def get_configs_from_multiple_files():
"""Reads training configuration from multiple config files.
Reads the training config from the following files:
model_config: Read from --model_config_path
train_config: Read from --train_config_path
input_config: Read from --input_config_path
Returns:
model_config: model_pb2.DetectionModel
train_config: train_pb2.TrainConfig
input_config: input_reader_pb2.InputReader
"""
train_config = train_pb2.TrainConfig()
with tf.gfile.GFile(FLAGS.train_config_path, 'r') as f:
text_format.Merge(f.read(), train_config)
model_config = model_pb2.DetectionModel()
with tf.gfile.GFile(FLAGS.model_config_path, 'r') as f:
text_format.Merge(f.read(), model_config)
input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(FLAGS.input_config_path, 'r') as f:
text_format.Merge(f.read(), input_config)
return model_config, train_config, input_config
| 5,354,706 |
def from_package(package_name, extensions=('.py',)):
"""Generate *.py file names available in given package."""
extensions = tuple(extensions) # .endswith doesn't like list
process = Popen("/usr/bin/dpkg -L %s" % package_name,
shell=True, stdout=PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise Exception("cannot get content of %s" % package_name)
stdout = str(stdout, 'utf-8')
for line in stdout.splitlines():
if line.endswith(extensions):
yield line
| 5,354,707 |
def line_to_numbers(line: str) -> t.List[int]:
"""Split a spreadsneet line into a list of numbers.
raises:
ValueError
"""
return list(map(int, line.split()))
| 5,354,708 |
def bits_to_amps(bits):
"""helper function to convert raw data from usb device to amps"""
return bits*BITS_TO_AMPS_SLOPE + BITS_TO_AMPS_Y_INTERCEPT
| 5,354,709 |
def fields_to_dict(fields):
""" FIXME:
https://www.debuggex.com/r/24QPqzm5EsR0e2bt
https://www.debuggex.com/r/0SjmBL55ySna0kFF
https://www.debuggex.com/r/Vh9qvHkCV4ZquS14
"""
result = {}
if not fields or len(fields.strip()) == 0:
return result
# look_behind_keys = re.findall('{(\w*?),', fields)
# look_behind_pattern_list = ['(?<!{' + k + ')' for k in look_behind_keys]
# # FIXME: '(?<!{[^,]*),<look_forward_pattern>' will trigger "look-behind requires
# # fixed-width pattern"
# look_behind_pattern = ''.join(look_behind_pattern_list)
# # FIXME: not support nested bracket: field{id,name,description{abc,def}}
# look_forward_pattern = '(?![a-zA-Z0-9,\}:\[\]]*?})'
# # sample pattern: '(?<!{id)(?<!{email),<look_forward_pattern>'
# re_pattern = look_behind_pattern + ',' + look_forward_pattern
splited_fields = []
word_block = ''
bracket_counter = 0
field_len = len(fields)
for index, word in enumerate(fields):
if word == '{':
bracket_counter = bracket_counter + 1
if word == '}':
bracket_counter = bracket_counter - 1
# move to new word block
if word == ',' and bracket_counter == 0:
splited_fields.append(word_block)
word_block = ''
else:
word_block += word
# add remaining word_block
if word_block != '' and index==field_len-1:
splited_fields.append(word_block)
for key in splited_fields:
key = key.strip()
value = {}
if key.find('{') > -1:
# get sub fields: field{<sub_fields>} and assign its value
sub_field = re.findall('{(.*)}', key)
value = fields_to_dict(sub_field[0])
# clean key
key = re.sub('{(.*)}', '', key)
if key.find('[') > -1:
# get & set slide range: [a:b]
value['__slice'] = re.findall('\[(.*)\]', key)[0]
# clean key
key = re.sub('\[(.*)\]', '', key)
result[key] = value
return result
| 5,354,710 |
def get_font_paths(fonts_dir):
"""
Load font path recursively from a folder
:param fonts_dir: folder contains ttf、otf or ttc format font
:return: path of all fonts
"""
print('Load fonts from %s' % os.path.abspath(fonts_dir))
fonts = glob.glob(fonts_dir + '/**/*', recursive=True)
fonts = list(filter(lambda x: os.path.isfile(x), fonts))
print("Total fonts num: %d" % len(fonts))
if len(fonts) == 0:
print("Not found fonts in fonts_dir")
exit(-1)
return fonts
| 5,354,711 |
def create(arguments):
""" Create a document """
if arguments.attack_log is None:
raise CmdlineArgumentException("Creating a new document requires an attack_log")
doc_get = DocGenerator()
doc_get.generate(arguments.attack_log, arguments.outfile)
| 5,354,712 |
def time_difference(t_early, t_later):
"""
Compute the time difference between t_early and t_later
Parameters:
t_early: np.datetime64, list or pandas series.
t_later: np.datetime64, list or pandas series.
"""
if type(t_early) == list:
t1 = np.array(t_early)
elif type(t_early) == pd.Series:
t1 = np.array(t_early.tolist())
else:
t1 = np.array([t_early])
if type(t_later) == list:
t2 = np.array(t_later)
elif type(t_later) == pd.Series:
t2 = np.array(t_later.tolist())
else:
t2 = np.array([t_later])
timedelta2float = np.vectorize(lambda x: x / np.timedelta64(3600, 's'))
t_diff = timedelta2float(t2 - t1)
return t_diff
| 5,354,713 |
def test_get_id_info_fuzzy_min_ratio():
"""
Tests the get_id_info function fuzzy matching with a specified minimum ratio.
"""
sa_id_book = SAIDBook()
in_str = (
'edn0 7101135111011\n'
'Suriname\n'
'Doe\n'
'Forenames\n'
'John-Michael\n'
'Robert\n'
'Sex\n'
'M\n'
'County o Bnth\n'
'South Africa\n'
)
assert sa_id_book.get_id_info(in_str, fuzzy_min_ratio=90.00) == {
'identity_number': None,
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': None,
'country_of_birth': None,
'status': None
}
| 5,354,714 |
def conv2d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):
""" Convolutional block with two convolutions followed by batch normalisation (if True) and with ReLU activations.
input_tensor: A tensor. Input tensor on which the convolutional block acts.
n_filters: An integer. Number of filters in this block.
kernel_size: An integer. Size of convolutional kernel.
batchnorm: A bool. Perform batch normalisation after each convolution if True.
:return: A tensor. The output of the operation.
"""
# first convolutional layer
x = layers.Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal",
padding="same")(input_tensor)
if batchnorm:
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
# second convolutional layer
x = layers.Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal",
padding="same")(x)
if batchnorm:
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
return x
| 5,354,715 |
def check_credentials(username):
"""
Function that check if a Credentials exists with that username and return true or false
"""
return Credentials.if_credential_exist(username)
| 5,354,716 |
def rpc_category_to_super_category(category_id, num_classes):
"""Map category to super-category id
Args:
category_id: list of category ids, 1-based
num_classes: 1, 17, 200
Returns:
super-category id, 0-based
"""
cat_id = -1
assert num_classes in RPC_SUPPORT_CATEGORIES, \
'Not support {} density categories'.format(num_classes)
if num_classes == 17:
cat_id = _categories[category_id]
elif num_classes == 1:
cat_id = 0
elif num_classes == 200:
cat_id = category_id - 1
assert 199 >= cat_id >= 0
return cat_id
| 5,354,717 |
def setup_integration():
"""Set up a test resource."""
print('Setting up a test integration for an API')
return Integration(name='myapi',
base_url='https://jsonplaceholder.typicode.com')
| 5,354,718 |
def secondsToHMS(intervalInSeconds):
"""converts time in seconds to a string representing time in hours, minutes, and seconds
:param intervalInSeconds: a time measured in seconds
:returns: time in HH:MM:SS format
"""
interval = [0, 0, intervalInSeconds]
interval[0] = (interval[2] / 3600) - ((interval[2] % 3600) / 3600)
interval[1] = ((interval[2] % 3600) / 60) - ((interval[2] % 3600) % 60) / 60
interval[2] = interval[2] % 60
intervalString = '{0:02.0f}:{1:02.0f}:{2:02.0f}'.format(interval[0],
interval[1], interval[2])
return intervalString
| 5,354,719 |
def build_rdn(coords, r, **kwargs):
"""
Reconstruct edges between nodes by radial distance neighbors (rdn) method.
An edge is drawn between each node and the nodes closer
than a threshold distance (within a radius).
Parameters
----------
coords : ndarray
Coordinates of points where each column corresponds to an axis (x, y, ...)
r : float, optional
Radius in which nodes are connected.
Examples
--------
>>> coords = make_simple_coords()
>>> pairs = build_rdn(coords, r=60)
Returns
-------
pairs : ndarray
The (n_pairs x 2) matrix of neighbors indices.
"""
tree = BallTree(coords, **kwargs)
ind = tree.query_radius(coords, r=r)
# clean arrays of neighbors from self referencing neighbors
# and aggregate at the same time
source_nodes = []
target_nodes = []
for i, arr in enumerate(ind):
neigh = arr[arr != i]
source_nodes.append([i]*(neigh.size))
target_nodes.append(neigh)
# flatten arrays of arrays
source_nodes = np.fromiter(itertools.chain.from_iterable(source_nodes), int).reshape(-1,1)
target_nodes = np.fromiter(itertools.chain.from_iterable(target_nodes), int).reshape(-1,1)
# remove duplicate pairs
pairs = np.hstack((source_nodes, target_nodes))
pairs = np.sort(pairs, axis=1)
pairs = np.unique(pairs, axis=0)
return pairs
| 5,354,720 |
def find_layer(model, type, order=0):
"""
Given a model, find the Nth layer of the specified type.
:param model: the model that will be searched
:param type: the lowercase type, as it is automatically saved by keras in the layer's name (e.g. conv2d, dense)
:param order: 0 by default (the first matching layer will be returned)
:return: The index of the matching layer or None if it was not found.
"""
num_found = 0
for layer in model.layers:
if type + '_' in layer.get_config()['name']:
if order == num_found:
return layer
num_found += 1
return None
| 5,354,721 |
def _interpretable(model):
# type: (Union[str, h2o.model.ModelBase]) -> bool
"""
Returns True if model_id is easily interpretable.
:param model: model or a string containing a model_id
:returns: bool
"""
return _get_algorithm(model) in ["glm", "gam", "rulefit"]
| 5,354,722 |
def uploaded_mapping(mimic_mapping, destroy_mimic_source):
"""Impots the mimic mapping to river-api
Args:
mimic_mapping (dict): the mimic mapping fixture loaded as dict
Raises:
Exception: when the mapping could not be uploaded
Yields:
dict: The uploaded mapping
"""
try:
# send a batch request
response = requests.post(f"{settings.RIVER_API_URL}/sources/import/", json=mimic_mapping)
except requests.exceptions.ConnectionError:
raise Exception("Could not connect to the api service")
assert response.status_code == 201, f"api POST /sources/import/ returned an error: {response.text}"
created_mapping = response.json()
assert (
len([resource["id"] for resource in created_mapping["resources"]]) > 0
), f"no resource ids in mapping: {created_mapping}"
yield created_mapping
| 5,354,723 |
def apply_pb_correction(obs,
pb_sensitivity_curve,
cutoff_radius):
"""
Updates the primary beam response maps for cleaned images in an ObsInfo object.
Args:
obs (ObsInfo): Observation to generate maps for.
pb_sensitivity_curve: Primary beam sensitivity as a function of radius
in units of image pixels. (Should be 1.0 at the exact centre).
cutoff_radius: Radius at which to mask the output image (avoids
extremely high corrected values for noise fluctuations at large
radii). Units: image pixels.
"""
assert isinstance(obs, ObsInfo)
def update_pb_map_for_img(flux_map_path):
pbmap = generate_primary_beam_response_map(flux_map_path,
pb_sensitivity_curve,
cutoff_radius)
return pbmap
def process_clean_maps(clean_maps):
pbmap = update_pb_map_for_img(clean_maps.flux)
img_path = clean_maps.image
pb_img_path = img_path+'.pbcor'
generate_pb_corrected_image(img_path, pb_img_path,
pbmap)
clean_maps.pbcor = pb_img_path
if obs.maps_masked.ms.image:
process_clean_maps(obs.maps_masked.ms)
if obs.maps_open.ms.image:
process_clean_maps(obs.maps_open.ms)
if obs.maps_hybrid.ms.image:
process_clean_maps(obs.maps_hybrid.ms)
| 5,354,724 |
def makeLoadParams(args):
"""
Create load parameters for start load request out of command line arguments.
Args:
args (dict): Parsed command line arguments.
"""
load_params = {'target': {},
'format': {'date_time': {},
'boolean': {}},
'load_options': {},
'advanced_options': {}}
add_param(load_params['target'], 'database', args.target_database)
add_param(load_params['target'], 'schema', args.target_schema)
add_param(load_params['target'], 'table', args.target_table)
if len(load_params['target']) == 0:
del load_params['target']
add_param(load_params['format'], 'type', args.type)
add_param(load_params['format'], 'field_separator', args.field_separator)
add_param(load_params['format'], 'trailing_field_separator',
args.trailing_field_separator, False)
add_param(load_params['format'], 'enclosing_character',
args.enclosing_character)
add_param(load_params['format'], 'escape_character', args.escape_character)
add_param(load_params['format'], 'null_value', args.null_value)
add_param(load_params['format'], 'has_header_row',
args.has_header_row, False)
add_param(load_params['format'], 'flexible', args.flexible, False)
add_param(load_params['format']['date_time'], 'converted_to_epoch',
args.date_converted_to_epoch, False)
add_param(load_params['format']['date_time'], 'date_format',
args.date_format)
add_param(load_params['format']['date_time'], 'time_format',
args.time_format)
add_param(load_params['format']['date_time'], 'date_time_format',
args.date_time_format)
add_param(load_params['format']['date_time'], 'second_fraction_start',
args.second_fraction_start)
add_param(load_params['format']['date_time'], 'skip_second_fraction',
args.skip_second_fraction, False)
if len(load_params['format']['date_time']) == 0:
del load_params['format']['date_time']
add_param(load_params['format']['boolean'], 'use_bit_values',
args.use_bit_boolean_values, False)
add_param(load_params['format']['boolean'], 'true_format', args.true_format)
add_param(load_params['format']['boolean'], 'false_format',
args.false_format)
if len(load_params['format']['boolean']) == 0:
del load_params['format']['boolean']
if len(load_params['format']) == 0:
del load_params['format']
add_param(load_params['load_options'], 'empty_target',
args.empty_target, False)
add_param(load_params['load_options'], 'max_ignored_rows',
args.max_ignored_rows)
if len(load_params['load_options']) == 0:
del load_params['load_options']
add_param(load_params['advanced_options'], 'validate_only',
args.validate_only, False)
add_param(load_params['advanced_options'], 'file_target_dir',
args.file_target_dir)
if len(load_params['advanced_options']) == 0:
del load_params['advanced_options']
print('Created load params: ', load_params)
return load_params
| 5,354,725 |
def main(argv=None):
"""
Steps if script is run directly
"""
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(
prog='getawscreds.py',
description=HELP_MESSAGE,
)
# Determine verbosity (optional argument)
parser.add_argument(
"-v", "--verbose",
help="increase output verbosity",
action="store_true",
default=False,
)
parser.add_argument(
"-u", "--username",
help="ldap username",
required=True,
)
parser.add_argument(
"-P", "--password-file",
help="file which contains password",
type=file,
)
parser.add_argument(
"-a", "--address",
help="address of the vault server (e.g. http://localhost:9000)",
default=VAULT_ADDR,
)
parser.add_argument(
"-n", "--account-name",
help="name of aws account (e.g. engineering)",
dest='account',
required=True,
)
parser.add_argument(
"-t", "--access-type",
help="type of access needed (e.g. read, XXX, or superuser)",
required=True,
)
parser.add_argument(
"-o", "--output-credential-type",
help="where to output credential from vault's results",
choices=["shellenv", "credfile"],
default="credfile",
dest="output_to",
)
args = parser.parse_args()
# Change log level if using verbose
if args.verbose:
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.DEBUG)
logging.info("Verbose logging.")
logging.debug("Supplied Arguments: %s", args)
logging.debug("Version: %s", VERINFO)
logging.debug("DEFAULT_PASSWORD_FILE: %s" % DEFAULT_PASSWORD_FILE)
logging.debug("VAULT_ADDR: %s" % VAULT_ADDR)
logging.debug("VAULT_TOKEN: %s" % VAULT_TOKEN)
logging.debug("AWS_CONFIG_FILE: %s" % AWS_CONFIG_FILE)
else:
logging.basicConfig(format="%(message)s", level=logging.INFO)
logging.debug(args)
if args.password_file:
if not has_valid_perms(args.password_file.name):
logging.warn('password file has wrong permissions (needs 0600): %s' % args.password_file.name)
sys.exit(5)
elif os.path.isfile(os.path.expanduser(VAULT_TOKEN)):
if not has_valid_perms(VAULT_TOKEN):
logging.warn('vault token file has wrong permissions (needs 0600): %s' % VAULT_TOKEN)
sys.exit(7)
client_data = {
'username': args.username,
'address': args.address,
'password': 'password-goes-here-if-collected',
}
client_data['path'] = ''.join([
args.account,
'/creds/',
args.access_type,
])
logging.debug("client_data: %s", client_data)
logging.debug('initlize vault client')
client = hvac.Client(url=client_data['address'])
vtoken = get_session_token(VAULT_TOKEN)
client.token = vtoken
logging.debug('vtoken: %s' % vtoken)
if client.is_authenticated():
logging.debug('client is authenticated, getting data')
awscreds_from_vault = client.read(client_data['path'])
vault_auth_output = 'successfully used existing token: %s' % client.token
else:
logging.debug('authenticating client')
client_data['password'] = get_password(args.password_file)
vault_auth_output = client.auth_ldap(client_data['username'], client_data['password'])
if client.token != vtoken:
write_session_token(VAULT_TOKEN, client.token)
logging.debug('wrote token to file %s' % VAULT_TOKEN)
logging.debug('token info (detailed): %s' % client.lookup_token())
logging.debug('getting data')
awscreds_from_vault = client.read(client_data['path'])
logging.debug("awscreds_from_vault: %s", awscreds_from_vault)
logging.debug("vault_auth_output: %s", vault_auth_output)
if args.output_to == "shellenv":
update_results = output_aws_credentials(awscreds_from_vault, args.account)
print(update_results)
elif args.output_to == "credfile":
update_results = update_aws_cred_file(awscreds_from_vault, args.account, AWS_CONFIG_FILE)
logging.info(update_results)
| 5,354,726 |
def empty_hash():
"""Initialize empty hash table."""
from hash import HashTable
test_hash = HashTable()
return test_hash
| 5,354,727 |
def denoise_sim(image, std, denoiser):
"""Simulate denoising problem
Args:
image (torch.Tensor): image tensor with shape (C, H, W).
std (float): standard deviation of additive Gaussian noise
on the scale [0., 1.].
denoiser: a denoiser instance (as in algorithms.denoiser).
The std argument for this denoiser is already specified
if applicable.
Returns:
denoised_image (torch.Tensor): tensor of denoised image
noisy_image (torch.Tensor): tensor of noisy image
"""
print('deploy.sim.denoise_sim: Simulating noisy image...')
noisy_image = gutil.add_noise(image, std)
print('deploy.sim.denoise_sim: Begin image denoising...')
denoised_image = denoiser(noisy_image, std=std)
return denoised_image, noisy_image
| 5,354,728 |
def _find_weektime(datetime, time_type='min'):
"""
Finds the minutes/seconds aways from midnight between Sunday and Monday.
Parameters
----------
datetime : datetime
The date and time that needs to be converted.
time_type : 'min' or 'sec'
States whether the time difference should be specified in seconds or minutes.
"""
if time_type == 'sec':
return datetime.weekday() * 24 * 60 * 60 + datetime.hour * 60 * 60 + datetime.minute * 60 + datetime.second
elif time_type == 'min':
return datetime.weekday() * 24 * 60 + datetime.hour * 60 + datetime.minute
else:
raise ValueError("Invalid time type specified.")
| 5,354,729 |
def test_calculator_get_result_method():
"""Testing the Calculator"""
calculator = Calculator()
assert calculator.get_result() == 0
| 5,354,730 |
def linear_to_image_array(pixels:List[List[int]], size:Tuple[int,int]) -> np.ndarray:
"""\
Converts a linear array ( shape=(width*height, channels) ) into an array
usable by PIL ( shape=(height, width, channels) )."""
a = np.array(pixels, dtype=np.uint8)
split = np.split(pixels, [i*size[0] for i in range(1,size[1])])
return np.array(split, dtype=np.uint8)
| 5,354,731 |
def stop_processes(hosts, pattern, verbose=True, timeout=60):
"""Stop the processes on each hosts that match the pattern.
Args:
hosts (list): hosts on which to stop the processes
pattern (str): regular expression used to find process names to stop
verbose (bool, optional): display command output. Defaults to True.
timeout (int, optional): command timeout in seconds. Defaults to 60
seconds.
Returns:
dict: a dictionary of return codes keys and accompanying NodeSet
values indicating which hosts yielded the return code.
Return code keys:
0 No processes matched the criteria / No processes killed.
1 One or more processes matched the criteria and a kill was
attempted.
"""
result = {}
log = getLogger()
log.info("Killing any processes on %s that match: %s", hosts, pattern)
if hosts is not None:
commands = [
"rc=0",
"if pgrep --list-full {}".format(pattern),
"then rc=1",
"sudo pkill {}".format(pattern),
"if pgrep --list-full {}".format(pattern),
"then sleep 5",
"pkill --signal KILL {}".format(pattern),
"fi",
"fi",
"exit $rc",
]
result = pcmd(hosts, "; ".join(commands), verbose, timeout, None)
return result
| 5,354,732 |
def test_user_edit_post_minimal_values(client, logged_in_dummy_user):
"""Test posting to the user edit page: /user/<username>/settings/profile/
with the bare minimum of values """
with fml_testing.mock_sends(
UserUpdateV1(
{
"msg": {
"agent": "dummy",
"user": "dummy",
"fields": ['timezone', 'locale'],
}
}
)
):
result = client.post('/user/dummy/settings/profile/', data=POST_CONTENTS_MIN)
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/profile/",
expected_message="Profile Updated: <a href=\"/user/dummy/\">view your profile</a>",
expected_category="success",
)
| 5,354,733 |
def field_as_table_row(field):
"""Prints a newforms field as a table row.
This function actually does very little, simply passing the supplied
form field instance in a simple context used by the _field_as_table_row.html
template (which is actually doing all of the work).
See soc/templates/soc/templatetags/_field_as_table_row.html for the CSS
styles used by this template tag.
Usage:
{% load forms_helpers %}
...
<table>
{% field_as_table_row form.fieldname %}
...
</table>
Args:
field: a Django newforms field instance
Returns:
a simple context containing the supplied newforms field instance:
{ 'field': field }
"""
return {'field': field}
| 5,354,734 |
def get_samples(select_samples: list, avail_samples: list) -> list:
"""Get while checking the validity of the requested samples
:param select_samples: The selected samples
:param avail_samples: The list of all available samples based on the range
:return: The selected samples, verified
"""
# Sample number has to be positive
if True in [_ < 0 for _ in select_samples]:
raise ValueError(
"Number of samples with -ns has to be strictly positive!")
# Sample number has to be within the available sample
elif False in [_ in avail_samples for _ in select_samples]:
raise ValueError(
"Some or all selected samples are not available in the design")
return select_samples
| 5,354,735 |
def createMemoLayer(type="", crs=4326, name="", fields={"id":"integer"}, index="no"):
"""
Créer une couche en mémoire en fonction des paramètres
:param type (string): c'est le type de geometrie "point", "linestring",
"polygon", "multipoint","multilinestring","multipolygon"
:param crs (int): systeme de projection CRS
:param fields (dict): {nom_champ : type_champ(longueur)} field=name : type(length,precision)
types : "integer", "double", "string(length)"
:param name (string): C'est le nom de la couche qui apparaitra dans la légende
:param index (string): indique si on créer un indice spatial
:return (QgsVectorLayer): on retourene un objet QgsVectorLayer
"""
# on créer l'uri et on ajoute tous les champs
uri="%s?crs=epsg:%s"%(type,crs)
for key, value in fields.items():
uri="%s&field=%s:%s"%(uri,key, value)
uri="%s&index=%s"%(uri,index)
# on créer l'objet QgsVectorLayer
memLayer = QgsVectorLayer(uri, name, "memory")
return memLayer
| 5,354,736 |
def draw_arc(arc):
"""draw arc"""
xy = (arc.center.x, arc.center.y)
start = 0
end = 0
if arc.start_angle < arc.end_angle:
start = arc.start_angle / math.pi * 180
end = arc.end_angle / math.pi * 180
else:
end = arc.start_angle / math.pi * 180
start = arc.end_angle / math.pi * 180
pac = mpatches.Arc(
xy, arc.radius * 2, arc.radius * 2, angle=0, theta1=start, theta2=end)
plt.gca().add_patch(pac)
| 5,354,737 |
def load_templates(package):
"""
Returns a dictionary {name: template} for the given instrument.
Templates are defined as JSON objects, with stored in a file named
"<instrument>.<name>.json". All templates for an instrument should
be stored in a templates subdirectory, made into a package by inclusion
of an empty __init__.py file. They can then be loaded using::
from dataflow import core as df
from . import templates
...
instrument = df.Instrument(
...
templates=df.load_templates(templates),
)
"""
templates = {}
for filename in resources.contents(package):
if filename.endswith('.json'):
name = filename.split('.')[-2]
template = json.loads(resources.read_text(package, filename))
templates[name] = template
return templates
| 5,354,738 |
def get_count_matrix(args):
"""首先获取数据库中全部文档的id,然后遍历id获取文档内容,再逐文档
进行分词,生成计数矩阵。"""
global DOC2IDX
with DocDB(args.db_path) as doc_db:
doc_ids = doc_db.get_doc_ids()
DOC2IDX = {doc_id: i for i, doc_id in enumerate(doc_ids)}
row, col, data = [], [], []
_count = partial(count, args)
for i in doc_ids:
b_row, b_col, b_data = _count(i)
row.extend(b_row)
col.extend(b_col)
data.extend(b_data)
# 创建稀疏矩阵,这里用的是按行压缩的方法(Compressed Sparse Row, csr)
# 关于什么是csr_matrix,参考:
# https://www.pianshen.com/article/7967656077/
# https://zhuanlan.zhihu.com/p/342942385
count_matrix = sp.csr_matrix((data, (row, col)), shape=(args.hash_size, len(doc_ids)))
count_matrix.sum_duplicates()
return count_matrix, (DOC2IDX, doc_ids)
| 5,354,739 |
def communication_round(model, clients, train_data, train_labels, train_people, val_data, val_labels, val_people,
val_all_labels, local_epochs, weights_accountant, individual_validation, local_operation):
"""
One round of communication between a 'server' and the 'clients'. Each client 'downloads' a global model and trains
a local model, updating its weights locally. When all clients have updated their weights, they are 'uploaded' to
the server and averaged.
:param model: Tensorflow Graph
:param clients: numpy array, array of unique client IDs
:param train_data: numpy array
:param train_labels: numpy array
:param train_people: numpy array
:param val_data: numpy array
:param val_labels: numpy array
:param val_people: numpy array
:param val_all_labels: numpy array
:param local_epochs: int, local epochs to be trained
:param weights_accountant: WeightsAccountant object
:param individual_validation: bool, if true, validation history for every local epoch in a federated setting
is stored (typically not necessary)
:param local_operation: string, valid arguments are "global_averaging", "localized_learning",
and "local_models"
:return:
Pandas DataFrame, training history
"""
# Split train and validation data into clients
train_data, train_labels = dL.split_data_into_clients_dict(train_people, train_data, train_labels)
if val_data is not None:
val_data, val_labels, val_people, val_all_labels = \
dL.split_data_into_clients_dict(val_people, val_data, val_labels, val_people, val_all_labels)
# Train each client
history = {}
for client in clients:
Output.print_client_id(client)
results = client_learning(model, client, local_epochs, train_data, train_labels, val_data, val_labels,
val_people, val_all_labels, weights_accountant, individual_validation)
# Append each client's results to the history dictionary
for key, val in results.items():
history.setdefault(key, []).extend(val)
# Pop general metrics from history as these are duplicated with client metrics, e.g. 'loss' == 'subject_43_loss'
for metric in model.metrics_names:
history.pop(metric, None)
history.pop("val_" + metric, None)
# If there is localization (e.g. the last layer of the model is not being averaged, indicated by less "shared
# weights" compared to total "default weights"), then we adapt local models to the new shared layers
if local_operation == 'localized_learning':
# Average all updates marked as "global"
weights_accountant.federated_averaging(layer_type='global')
# Decrease the learning rate for local adaptation only
K.set_value(model.optimizer.lr, K.get_value(model.optimizer.lr) / LR_FACTOR)
# Freeze the global layers
change_layer_status(model, 'global', 'freeze')
# Reconnect the Convolutional layers
for client in clients:
Output.print_client_id(client)
client_learning(model, client, local_epochs, train_data, train_labels, val_data, val_labels,
val_people, val_all_labels, weights_accountant, individual_validation)
# Unfreeze the global layers
change_layer_status(model, 'global', 'unfreeze')
# Increase the learning rate again
K.set_value(model.optimizer.lr, K.get_value(model.optimizer.lr) * LR_FACTOR)
elif local_operation == 'local_models':
print("No federated averaging.")
pass
elif local_operation == 'global_averaging':
weights_accountant.federated_averaging()
else:
raise ValueError('local_operation only accepts "global_averaging", "localized_learning", and "local_models"'
' as arguments. "{}" was given.'.format(local_operation))
return history
| 5,354,740 |
def _find_stop_area_mode(query_result, ref):
""" Finds the mode of references for each stop area.
The query results must have 3 columns: primary key, foreign key
reference and number of stop points within each area matching that
reference, in that order.
:param ref: Name of the reference column.
:returns: Two lists; one to be to be used with `bulk_update_mappings`
and the other strings for invalid areas.
"""
# Group by stop area and reference
stop_areas = collections.defaultdict(dict)
for row in query_result:
stop_areas[row[0]][row[1]] = row[2]
# Check each area and find mode matching reference
update_areas = []
invalid_areas = {}
for sa, count in stop_areas.items():
max_count = [k for k, v in count.items() if v == max(count.values())]
if len(max_count) == 1:
update_areas.append({"code": sa, ref: max_count[0]})
else:
invalid_areas[sa] = max_count
return update_areas, invalid_areas
| 5,354,741 |
def get_all_funds_ranking(fund_type: str = 'all',
start_date: str = '-1y',
end_date: str = arrow.now(),
sort: str = 'desc',
subopts: str = '',
available: str = 1):
"""Get all funds ranking from 'fund.eastmoney.com'. (基金排行)
:param fund_type: (optional) fund type, default is `all`.
value: ct场内 gp股票 hh混合 zq债券 zs指数 bb保本 qdii lof fof
:param start_date: (optional) start date of the custom return, default is `-1y`.
value: -nd -nw -nm -ny cyear or YYYY-MM-DD
:param end_date: (optional) the end date of the results, default is `now`.
:param sort: (optional) results order, default is `desc`.
:param subopts: (optional) some suboptions. format is a list of options(`first,second`).
Suboptions for bonds(有关债券的子选项):
- first option is bonds type(债券类型).
value: cz长债 dz短债 hz混债 dkz定开债 kzz可转债
- second option is leverage ratio(杠杆比例).
value: 0-100 100-150 150-200 200+
Suboptions for stock index(有关指数的子选项):
- first option is index type(标的).
value: hs沪深 hy行业 dp大盘 zxp中小盘 gz股指 zz债指
- second option is stock index operation(运作方式).
value: bd被动 zq增强
Suboptions for QDII fonds.
- first option is fond type(基金类型).
vaule: qqgp全球股票 ytgp亚太股票 dzh大中华区 xxsc新兴市场 jzgj金砖国家
cssc成熟市场 us美国股票 qqidx全球指数 etf hh股债混合 zq债券 sp商品
:param available: (optional) `1` can buy, `0` including both, default is `1`.
:return: a list of the funds.
:rtype: `pd.DataFrame`.
"""
dtype = fund_type == 'ct' and 'fb' or 'kf'
begin = str2date(start_date).format('YYYY-MM-DD')
end = arrow.get(end_date).format('YYYY-MM-DD')
opt1, opt2 = _funds_ranking_subopts(fund_type, subopts)
params = dict(op='ph',dt=dtype,ft=fund_type,rs='',gs=0,sc='zzf',st=sort,pi=1,pn=10000) # 场内基金
fund_type != 'ct' and params.update(dict(sd=begin,ed=end,qdii=opt1,tabSubtype=opt2,dx=available))
resp = sess.get(api.all_funds_rank, params=params)
obj = js2obj(resp.text, 'rankData')
# dataframe
if fund_type == 'ct': # 场内基金
cols = 'code,name,1,date,nav,cnav,-1week,-1month,-3month,-6month,-1year,-2year,'\
'-3year,current_year,since_create,issue_date,,,,,,type'
newcols = cols.replace('1','type,issue_date',1).split(',issue_date,,')[0]
else: # 基金排行
cols = 'code,name,1,date,nav,cnav,percent,-1week,-1month,-3month,-6month,-1year,-2year,'\
'-3year,current_year,since_create,issue_date,,custom,2,,,,'
newcols = cols.replace('1','issue_date',1).replace('issue_date,,','').split(',2')[0]
df = pd.DataFrame([i.split(',')[:-1] for i in obj['datas']],
columns=cols.split(',')).ffill(None)[newcols.split(',')]
df['date'] = pd.to_datetime(df['date'])
df['issue_date'] = pd.to_datetime(df['issue_date'])
df[['nav','cnav']] = df[['nav','cnav']].applymap(lambda x:x and float(x) or None)
colnum = fund_type == 'ct'\
and range(df.columns.get_loc('-1week'), len(df.columns))\
or range(df.columns.get_loc('percent'), len(df.columns))
df.iloc[:,colnum] = df.iloc[:,colnum].applymap(lambda x:x and float(x)/100 or None)
return df
| 5,354,742 |
def transfer_from_iterable(
grm: util.PathLike,
data: Iterable[str],
**kwargs: Any) -> Iterator[interface.Response]:
"""
Transfer from each MRS in *data* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
data (iterable): source MRSs as SimpleMRS strings
**kwargs: additional keyword arguments to pass to the
ACETransferer
Yields:
:class:`~delphin.interface.Response`
"""
with ACETransferer(grm, **kwargs) as transferer:
for datum in data:
yield transferer.interact(datum)
| 5,354,743 |
def recovermarks():
"""Walk through the tags made by ``colormarks`` and re-create the marks that were found.
This is useful if any marks were accidentally deleted and can be used for
recovering them as long as they were initally tagged properly.
"""
# collect
result = []
for fn, l in database.select('marks'):
m = set( (l['marks']) if hasattr(l['marks'], '__iter__') else [int(x, 16) for x in l['marks'].split(',')] if type(l['marks']) is str else [l['marks']])
res = [(ea, d['mark']) for ea, d in func.select(fn, 'mark')]
if m != { a for a, _ in res }:
logging.warning("{:s} : Ignoring the function tag \"{:s}\" for function {:#x} due to its value being out-of-sync with the contents values ({!s} <> {!s}).".format('.'.join((__name__, 'recovermarks')), fn, builtins.map(hex, m), builtins.map(hex, set(a for a, _ in res))))
result.extend(res)
result.sort(cmp=lambda x, y: cmp(x[1], y[1]))
# discovered marks versus database marks
result = dict(result)
current = {ea : descr for ea, descr in database.marks()}
# create tags
for x, y in result.items():
if x in current:
logging.warning("{:#x}: skipping already existing mark : {!r}".format(x, current[x]))
continue
# x not in current
if x not in current:
logging.info("{:#x}: adding missing mark due to tag : {!r}".format(x, result[x]))
elif current[x] != result[x]:
logging.info("{:#x}: database tag is different than mark description : {!r}".format(x, result[x]))
else:
assert current[x] == result[x]
database.mark(x, y)
# marks that aren't reachable in the database
for ea in set(current.viewkeys()).difference(result.viewkeys()):
logging.warning("{:#x}: unreachable mark (global) : {!r}".format(ea, current[ea]))
# color them
colormarks()
| 5,354,744 |
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
| 5,354,745 |
def exec_anaconda():
"""Re-execute the current Python script using the Anaconda Python
interpreter included with Splunk_SA_Scientific_Python.
After executing this function, you can safely import the Python
libraries included in Splunk_SA_Scientific_Python (e.g. numpy).
Canonical usage is to put the following at the *top* of your
Python script (before any other imports):
import exec_anaconda
exec_anaconda.exec_anaconda()
# Your other imports should now work.
import numpy as np
import pandas as pd
...
"""
if PSC_PATH_PREFIX in sys.executable:
from imp import reload
fix_sys_path()
reload(json)
reload(os)
reload(platform)
reload(stat)
reload(subprocess)
reload(sys)
return
check_python_version()
system = (platform.system(), platform.machine())
if system not in SUPPORTED_SYSTEMS:
raise Exception('Unsupported platform: %s %s' % (system))
sa_scipy = '%s%s' % (PSC_PATH_PREFIX, SUPPORTED_SYSTEMS[system])
sa_path = os.path.join(get_apps_path(), sa_scipy)
if not os.path.isdir(sa_path):
raise Exception('Failed to find Python for Scientific Computing Add-on (%s)' % sa_scipy)
system_path = os.path.join(sa_path, 'bin', '%s' % (SUPPORTED_SYSTEMS[system]))
if system[0] == 'Windows':
python_path = os.path.join(system_path, 'python.exe')
# MLA-564: Windows need the DLLs to be in the PATH
dllpath = os.path.join(system_path, 'Library', 'bin')
pathsep = os.pathsep if 'PATH' in os.environ else ''
os.environ['PATH'] = os.environ.get('PATH', '') + pathsep + dllpath
else:
python_path = os.path.join(system_path, 'bin', 'python')
# MLA-996: Unset PYTHONHOME
# XXX: After migration to Python3 PYTHONPATH is not set anymore so this will
# be unnecessary. SPL-170875
os.environ.pop('PYTHONHOME', None)
# Ensure that execute bit is set on <system_path>/bin/python
if system[0] != 'Windows':
mode = os.stat(python_path).st_mode
os.chmod(python_path, mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
print('INFO Running %s' % " ".join([python_path] + sys.argv), sys.stderr)
sys.stderr.flush()
# In Quake and later PYTHONPATH is removed or not set.
# So after shelling into PSC Python interpreter will lose
# information about what Splunk core's Python path is. So we
# stash it into an environment variable to retrieve it after
# switching into conda.
os.environ['SPLUNK_CORE_PYTHONPATH'] = json.dumps(sys.path)
try:
if system[0] == "Windows":
os.environ['MKL_NUM_THREADS'] = '1'
# os.exec* broken on Windows: http://bugs.python.org/issue19066
subprocess.check_call([python_path] + sys.argv)
os._exit(0)
else:
os.environ['VECLIB_MAXIMUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.execl(python_path, python_path, *sys.argv)
except Exception:
traceback.print_exc(None, sys.stderr)
sys.stderr.flush()
time.sleep(0.1)
raise RuntimeError(
'Error encountered while loading Python for Scientific Computing, see search.log.'
)
| 5,354,746 |
def _conform_list(li: List[Any]) -> List[T]:
"""
Ensures that every element in *li* can conform to one type
:param li: list to conform
:return: conformed list
"""
conform_type = li[0].__class__
for i in li:
if isinstance(i, StrictType):
conform_type = i.__class__
break
base_type = (
conform_type.__base__ if conform_type.__base__ != object else None
) # do not let base_type be 'object'
if not all(type(i) == conform_type or type(i) == base_type for i in li):
raise Exception(f"{li} can not be conformed to the {conform_type}")
return [i if isinstance(i, conform_type) else conform_type(i) for i in li]
| 5,354,747 |
def _255_to_tanh(x):
"""
range [0, 255] to range [-1, 1]
:param x:
:return:
"""
return (x - 127.5) / 127.5
| 5,354,748 |
def read_content(filename):
"""Read content and metadata from file into a dictionary."""
# Read file content.
text = fread(filename)
# Read metadata and save it in a dictionary.
date_slug = os.path.basename(filename).split('.')[0]
match = re.search('^(?:(\\d\\d\\d\\d-\\d\\d-\\d\\d)-)?(.+)$', date_slug)
content = {
'date': format_date(match.group(1) or '1970-01-01'),
'date_ymd': match.group(1) or '1970-01-01',
'date_rfc_2822': format_date(match.group(1) or '1970-01-01', date_format_override='%a, %d %b %Y %H:%M:%S +0000'),
'slug': match.group(2),
}
# Convert Markdown content to HTML.
if filename.endswith(('.md', '.mkd', '.mkdn', '.mdown', '.markdown')):
# Separate text and template variables
variables, text = separate_content_and_variables(text)
text = variables + "{% include 'md_header.html' %}" + \
commonmark.commonmark(text) + "{% include 'md_footer.html' %}"
# Optional additional parsing
if 'add_parser' in sys.modules:
text = add_parser.parse(text, filename)
# Update the dictionary with content text and summary text.
content.update({
'content': text,
})
return content
| 5,354,749 |
def helm_commands():
"""Helm commands group."""
| 5,354,750 |
def train_one_epoch(train_loader, model, criterion, optimizer, epoch, opt, num_train_samples, no_acc_eval=False):
""" model training
:param train_loader: train dataset loader
:param model: model
:param criterion: loss criterion
:param optimizer:
:param epoch: current epoch
:param num_train_samples: total number of samples in train_loader
:param no_acc_eval (bool): accuray eval in model training
:return:
"""
info = {}
losses = AverageMeter('Loss ', ':6.4g')
top1 = AverageMeter('Acc@1 ', ':6.2f')
top5 = AverageMeter('Acc@5 ', ':6.2f')
# switch to train mode
model.train()
lr_scheduler = global_utils.LearningRateScheduler(mode=opt.lr_mode,
lr=opt.lr,
num_training_instances=num_train_samples,
target_lr=opt.target_lr,
stop_epoch=opt.epochs,
warmup_epoch=opt.warmup,
stage_list=opt.lr_stage_list,
stage_decay=opt.lr_stage_decay)
lr_scheduler.update_lr(batch_size=epoch * num_train_samples)
optimizer.zero_grad()
batches_per_allreduce_count = 0
for i, (input_, target) in enumerate(train_loader):
if not opt.independent_training:
lr_scheduler.update_lr(batch_size=input_.shape[0] * opt.world_size)
else:
lr_scheduler.update_lr(batch_size=input_.shape[0])
current_lr = lr_scheduler.get_lr()
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr * opt.batches_per_allreduce
bool_label_smoothing = False
bool_mixup = False
if not opt.dist_mode == 'cpu':
input_ = input_.cuda(opt.gpu, non_blocking=True)
target = target.cuda(opt.gpu, non_blocking=True)
transformed_target = target
with torch.no_grad():
if hasattr(opt, 'label_smoothing') and opt.label_smoothing:
bool_label_smoothing = True
if hasattr(opt, 'mixup') and opt.mixup:
bool_mixup = True
if bool_label_smoothing and not bool_mixup:
transformed_target = one_hot(target, num_classes=opt.num_classes, smoothing_eps=0.1)
if not bool_label_smoothing and bool_mixup:
transformed_target = one_hot(target, num_classes=opt.num_classes)
input_, transformed_target = mixup(input_, transformed_target)
if bool_label_smoothing and bool_mixup:
transformed_target = one_hot(target, num_classes=opt.num_classes, smoothing_eps=0.1)
input_, transformed_target = mixup(input_, transformed_target)
# compute output
output = model(input_)
model_saved = model.module if hasattr(model, 'module') else model
logit_loss = criterion(output, transformed_target)
ts_feature_loss, ts_logit_loss = model_saved.compute_ts_distill_loss()
loss = logit_loss + opt.teacher_feature_weight * ts_feature_loss + opt.teacher_logit_weight * ts_logit_loss
# measure accuracy and record loss
input_size = int(input_.size(0))
if not no_acc_eval:
# pylint: disable=unbalanced-tuple-unpacking
acc1, acc5 = accuracy(output.data, target, topk=(1, 5))
top1.update(float(acc1[0]), input_size)
top5.update(float(acc5[0]), input_size)
else:
acc1 = [0]
acc5 = [0]
losses.update(float(loss), input_size)
if opt.apex:
if opt.dist_mode == 'horovod':
if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.zero_grad()
batches_per_allreduce_count = 0
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
batches_per_allreduce_count += 1
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_value_(model_saved.parameters(), opt.grad_clip)
if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.synchronize()
with optimizer.skip_synchronize():
optimizer.step()
else:
# if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.zero_grad()
batches_per_allreduce_count = 0
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_value_(model_saved.parameters(), opt.grad_clip)
# if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.step()
else:
if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.zero_grad()
batches_per_allreduce_count = 0
loss.backward()
batches_per_allreduce_count += 1
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_value_(model_saved.parameters(), opt.grad_clip)
if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.step()
if i % opt.print_freq == 0:
print(
f'<rank {opt.rank}> Train epoch={epoch}, i={i}, loss={float(loss):4g}, \
logit_loss={float(logit_loss):4g}, ts_feature_loss={float(ts_feature_loss):4g}, \
ts_logit_loss={float(ts_logit_loss):4g}, \
acc1={float(acc1[0]):4g}%, acc5={float(acc5[0]):4g}%, lr={current_lr:4g}')
top1_acc_avg = top1.avg
top5_acc_avg = top5.avg
losses_acc_avg = losses.avg
# if distributed, sync
if opt.dist_mode == 'horovod' and (not opt.independent_training):
sync_tensor = torch.tensor([top1.sum, top1.count, top5.sum, top5.count,
losses.sum, losses.count], dtype=torch.float32)
hvd.allreduce(sync_tensor, name='sync_tensor_topk_acc')
top1_acc_avg = (sync_tensor[0] / sync_tensor[1]).item()
top5_acc_avg = (sync_tensor[2] / sync_tensor[3]).item()
losses_acc_avg = (sync_tensor[4] / sync_tensor[5]).item()
elif opt.dist_mode == 'apex' and opt.distributed:
sync_tensor = torch.tensor([top1.sum, top1.count, top5.sum, top5.count,
losses.sum, losses.count], dtype=torch.float32).cuda()
dist.all_reduce(sync_tensor, op=dist.ReduceOp.SUM)
top1_acc_avg = (sync_tensor[0] / sync_tensor[1]).item()
top5_acc_avg = (sync_tensor[2] / sync_tensor[3]).item()
losses_acc_avg = (sync_tensor[4] / sync_tensor[5]).item()
else:
pass
info['losses_acc'] = losses_acc_avg
info['top1_acc'] = top1_acc_avg
info['top5_acc'] = top5_acc_avg
return info
| 5,354,751 |
def _d3hw_id_map(item, id_map, id_counter, is_unit):
"""
Assigns numeric ID for each unit, port and edge
Fills in string ID to numeric ID map
Replaces string ID with numeric
Sets maxID for unit meta
:param item: unit, port or edge
:param id_map: dict with mapping string ids to numeric ids
:param id_counter: list with single item - id counter (list is used to 'pass by ref')
:param is_unit: True if item is unit, otherwise False
:return:
"""
id_map[item["id"]] = id_counter[0]
DEBUG = 0
item["id"] = str(id_counter[0]) + ["", ":" + item["id"]][DEBUG]
id_counter[0] += 1
if not is_unit:
return
for i in item.get("ports", []):
_d3hw_id_map(i, id_map, id_counter, is_unit=False)
for i in item.get("edges", []):
_d3hw_id_map(i, id_map, id_counter, is_unit=False)
for i in item.get("children", []):
_d3hw_id_map(i, id_map, id_counter, is_unit=True)
item["hwMeta"]["maxId"] = id_counter[0]
| 5,354,752 |
def Write(Variable, f):
"""Function to Convert None Strings to Strings and Format to write to file with ,"""
import datetime
if isinstance(Variable, str) == False:
if isinstance(Variable, datetime.datetime) == True:
return f.write(f"{Variable.strftime('%Y-%m-%d')},")
else:
Variable = round(Variable, 2)
return f.write(f"{str(Variable)},")
elif isinstance(Variable, str) == True:
return f.write(f"{(Variable)},")
| 5,354,753 |
def graphs_infos():
"""
Build and return a JSON file containing some information on all the graphs.
The json file is built with the following format:
[
For each graph in the database :
{
'graph_id': the id of the graph,
'name': the name of the graph,
'iso': the string 'true' or 'false' depending if the graph belongs to J or not
}
]
:return: a JSON file containing some information on all the graphs.
"""
return jsonify(gdb.get_graph_infos())
| 5,354,754 |
def collect_compare(left, right):
"""
returns a tuple of four lists describing the file paths that have
been (in order) added, removed, altered, or left the same
"""
return collect_compare_into(left, right, [], [], [], [])
| 5,354,755 |
def runSuite(suiteparam, testrun, testid):
""" Runs the whole test suite, main entry
Args:
suiteparam : RunSuiteParam container
testrun: following values possible: one, all or from
testid: test case to run (for 'one' or 'from')
Returns:
nothing
Raises:
Exception in case of any error
"""
try:
he = None
if suiteparam.testprop != None:
he = _readListParam(suiteparam.testprop, suiteparam)
param = TestCaseHelper.TestParam(he, suiteparam)
list = _readTests(param, testrun, testid)
list.sort()
suite = unittest.TestSuite()
for (testid, num) in list:
logging.info("Run: " + testid + " " + str(num))
te = _getTestCase(suiteparam.factory, param, testid)
if te != None: suite.addTests(te)
res = unittest.TestResult()
suite.run(res)
_resReport(res)
except TestCaseHelper.TestException, e:
e.draw()
| 5,354,756 |
def test_response_failure_initialisation_with_exception(response_type: str) -> None:
"""It builds a ResponseFailure from exception."""
response = res.ResponseFailure(response_type, Exception("Just an error message"))
assert bool(response) is False
assert response.type == response_type
assert response.message == "Exception: Just an error message"
| 5,354,757 |
def _get_smallest_vectors(supercell, primitive, symprec):
"""
shortest_vectors:
Shortest vectors from an atom in primitive cell to an atom in
supercell in the fractional coordinates. If an atom in supercell
is on the border centered at an atom in primitive and there are
multiple vectors that have the same distance and different
directions, several shortest vectors are stored. The
multiplicity is stored in another array, "multiplicity".
[atom_super, atom_primitive, multiple-vectors, 3]
multiplicity:
Number of multiple shortest vectors (third index of "shortest_vectors")
[atom_super, atom_primitive]
"""
p2s_map = primitive.get_primitive_to_supercell_map()
size_super = supercell.get_number_of_atoms()
size_prim = primitive.get_number_of_atoms()
shortest_vectors = np.zeros((size_super, size_prim, 27, 3), dtype='double')
multiplicity = np.zeros((size_super, size_prim), dtype='intc')
reduced_bases = get_reduced_bases(supercell.get_cell(), symprec)
reduced_bases_inv = np.linalg.inv(reduced_bases)
primitive_lattice = primitive.get_cell()
primitive_lattice_inv = np.linalg.inv(primitive_lattice)
# matrix that converts fractional positions in the reduced bases into
# fractional positions in the primitive lattice
supercell_to_primitive_frac = reduced_bases.dot(primitive_lattice_inv)
# all positions are reduced into the cell formed by the reduced bases
supercell_fracs = np.dot(supercell.get_positions(), reduced_bases_inv)
supercell_fracs -= np.rint(supercell_fracs)
for s_index, s_pos in enumerate(supercell_fracs): # run in supercell
for j, p_index in enumerate(p2s_map): # run in primitive
p_pos = supercell_fracs[p_index]
# find smallest vectors equivalent under the supercell lattice
vectors = _get_equivalent_smallest_vectors_simple(s_pos - p_pos,
reduced_bases,
symprec)
# return primitive-cell-fractional vectors rather than supercell-fractional
vectors = [np.dot(v, supercell_to_primitive_frac) for v in vectors]
multiplicity[s_index][j] = len(vectors)
for k, elem in enumerate(vectors):
shortest_vectors[s_index][j][k] = elem
return shortest_vectors, multiplicity
| 5,354,758 |
def root():
"""Root endpoint that only checks if the server is running."""
return 'Server is running...'
| 5,354,759 |
def _PGProperty_SetAttributes(self, attributes):
"""
Set the property's attributes from a Python dictionary.
"""
for name,value in attributes.items():
self.SetAttribute(name, value)
| 5,354,760 |
def clone_model(model, **new_values):
"""Clones the entity, adding or overriding constructor attributes.
The cloned entity will have exactly the same property values as the
original entity, except where overridden. By default, it will have no
parent entity or key name, unless supplied.
Args:
model: datastore_services.Model. Model to clone.
**new_values: dict(str: *). Keyword arguments to override when
invoking the cloned entity's constructor.
Returns:
datastore_services.Model. A cloned, and possibly modified, copy of self.
Subclasses of BaseModel will return a clone with the same type.
"""
# Reference implementation: https://stackoverflow.com/a/2712401/4859885.
cls = model.__class__
model_id = new_values.pop('id', model.id)
props = {k: v.__get__(model, cls) for k, v in cls._properties.items()} # pylint: disable=protected-access
props.update(new_values)
return cls(id=model_id, **props)
| 5,354,761 |
def describe_project(projectId=None, syncFromResources=None):
"""
Gets details about a project in AWS Mobile Hub.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_project(
projectId='string',
syncFromResources=True|False
)
:type projectId: string
:param projectId: [REQUIRED]\nUnique project identifier.\n
:type syncFromResources: boolean
:param syncFromResources: If set to true, causes AWS Mobile Hub to synchronize information from other services, e.g., update state of AWS CloudFormation stacks in the AWS Mobile Hub project.
:rtype: dict
ReturnsResponse Syntax
{
'details': {
'name': 'string',
'projectId': 'string',
'region': 'string',
'state': 'NORMAL'|'SYNCING'|'IMPORTING',
'createdDate': datetime(2015, 1, 1),
'lastUpdatedDate': datetime(2015, 1, 1),
'consoleUrl': 'string',
'resources': [
{
'type': 'string',
'name': 'string',
'arn': 'string',
'feature': 'string',
'attributes': {
'string': 'string'
}
},
]
}
}
Response Structure
(dict) --
Result structure used for requests of project details.
details (dict) --
Detailed information about an AWS Mobile Hub project.
name (string) --
Name of the project.
projectId (string) --
Unique project identifier.
region (string) --
Default region to use for AWS resource creation in the AWS Mobile Hub project.
state (string) --
Synchronization state for a project.
createdDate (datetime) --
Date the project was created.
lastUpdatedDate (datetime) --
Date of the last modification of the project.
consoleUrl (string) --
Website URL for this project in the AWS Mobile Hub console.
resources (list) --
List of AWS resources associated with a project.
(dict) --
Information about an instance of an AWS resource associated with a project.
type (string) --
Simplified name for type of AWS resource (e.g., bucket is an Amazon S3 bucket).
name (string) --
Name of the AWS resource (e.g., for an Amazon S3 bucket this is the name of the bucket).
arn (string) --
AWS resource name which uniquely identifies the resource in AWS systems.
feature (string) --
Identifies which feature in AWS Mobile Hub is associated with this AWS resource.
attributes (dict) --
Key-value attribute pairs.
(string) --
Key part of key-value attribute pairs.
(string) --
Value part of key-value attribute pairs.
Exceptions
Mobile.Client.exceptions.InternalFailureException
Mobile.Client.exceptions.ServiceUnavailableException
Mobile.Client.exceptions.UnauthorizedException
Mobile.Client.exceptions.TooManyRequestsException
Mobile.Client.exceptions.BadRequestException
Mobile.Client.exceptions.NotFoundException
:return: {
'details': {
'name': 'string',
'projectId': 'string',
'region': 'string',
'state': 'NORMAL'|'SYNCING'|'IMPORTING',
'createdDate': datetime(2015, 1, 1),
'lastUpdatedDate': datetime(2015, 1, 1),
'consoleUrl': 'string',
'resources': [
{
'type': 'string',
'name': 'string',
'arn': 'string',
'feature': 'string',
'attributes': {
'string': 'string'
}
},
]
}
}
:returns:
Mobile.Client.exceptions.InternalFailureException
Mobile.Client.exceptions.ServiceUnavailableException
Mobile.Client.exceptions.UnauthorizedException
Mobile.Client.exceptions.TooManyRequestsException
Mobile.Client.exceptions.BadRequestException
Mobile.Client.exceptions.NotFoundException
"""
pass
| 5,354,762 |
def learn_laterals(frcs, bu_msg, perturb_factor, use_adjaceny_graph=False):
"""Given the sparse representation of each training example,
learn perturbation laterals. See train_image for parameters and returns.
"""
if use_adjaceny_graph:
graph = make_adjacency_graph(frcs, bu_msg)
graph = adjust_edge_perturb_radii(frcs, graph, perturb_factor=perturb_factor)
else:
graph = nx.Graph()
graph.add_nodes_from(range(frcs.shape[0]))
graph = add_underconstraint_edges(frcs, graph, perturb_factor=perturb_factor)
graph = adjust_edge_perturb_radii(frcs, graph, perturb_factor=perturb_factor)
edge_factors = np.array(
[(edge_source, edge_target, edge_attrs['perturb_radius'])
for edge_source, edge_target, edge_attrs in graph.edges_iter(data=True)])
return graph, edge_factors
| 5,354,763 |
def get_info(ingest_ldd_src_dir):
"""Get LDD version and namespace id."""
# look in src directory for ingest LDD
ingest_ldd = find_primary_ingest_ldd(ingest_ldd_src_dir)
# get ingest ldd version
tree = ETree.parse(ingest_ldd[0])
root = tree.getroot()
ldd_version = root.findall(f'.//{{{PDS_NS}}}ldd_version_id')[0].text
ns_id = root.findall(f'.//{{{PDS_NS}}}namespace_id')[0].text
return ingest_ldd, ns_id, ldd_version
| 5,354,764 |
def generate_tfidf(corpus_df, dictionary):
"""Generates TFIDF matrix for the given corpus.
Parameters
----------
corpus_df : pd.DataFrame
The corpus dataframe.
dictionary : gensim.corpora.dictionary.Dictionary
Dictionary defining the vocabulary of the TFIDF.
Returns
-------
X : np.ndarray
TFIDF matrix with documents as rows and vocabulary as the columns.
"""
tfidf_model = TfidfModel(
corpus_df.bag_of_words.apply(lambda x: dictionary.doc2bow(x)))
model = tfidf_model[
corpus_df.bag_of_words.apply(lambda x: dictionary.doc2bow(x))]
X = corpus2csc(model, len(dictionary)).T
return X
| 5,354,765 |
def load_config_file(file_path, fallback_file_path):
"""Load YAML format configuration file
:param file_path: The path to config file
:type file_path: `str`
:param fallback_file_path: The fallback path to config file
:type fallback_file_path: `str`
:return: config_map
:rtype: dict
"""
file_path = file_path if os.path.isfile(file_path) else fallback_file_path
logging.info("Loading %s..", file_path)
if not os.path.isfile(file_path):
# check if the config file exists
abort_framework("Config file not found at: {}".format(file_path))
try:
config_map = yaml.load(FileOperations.open(file_path, "r"))
return config_map
except yaml.YAMLError:
abort_framework("Error parsing config file at: {}".format(file_path))
| 5,354,766 |
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask
| 5,354,767 |
def main():
"""Queries PanelApp, checks Moka database and imports Moka PanelApp panels.
Args:
args (list): A list of command-line arguments. e.g. ['-c', 'config.ini']
"""
# Read server details from config file
_args = lib.cli(sys.argv[1:])
db_config = _args.config['mokadb']
# Setup logging
mlogger.log_setup(_args.logfile)
logger = logging.getLogger('mokapapp')
# Get a list of MokaPanel objects, each containing a unique PanelApp panel + colour combination
logger.info('Getting PanelApp panels as MokaPanel objects')
panels = query.main(head=_args.head)
logger.debug(f'Retrieved {len(panels)} MokaPanels: {[panel.name for panel in panels]}')
# Check Moka is ready for import:
# - Update panels in Item table
# - Update panel versions in Item table
# - Raise error if panel HGNCID missing from Moka
check.main(db_config, panels)
# Initialise Moka database object for updating panels
logger.info('Initialising MokaDB connection')
mpu = db.MokaPanelUpdater(
server=db_config['server'],
db=db_config['db'],
user=db_config['user'],
password=db_config['password']
)
# Deprecated panels are absent from PanelApp. Deactivate these panels in Moka.
logger.info('Deactivating all Moka Panels missing from PanelApp API')
mpu.activator.deactivate_deprecated(panels)
# For each panel,
for panel in panels:
# If the panel is new to Moka, import it. New panels have no entry in the NGS Panel table.
if not mpu.in_ngs_panel(panel.hash):
logger.info(f'New panel. Inserting into Moka: {panel}')
mpu.insert_into_moka(panel)
# Else if the panel is in Moka
else:
# Check if the latest panel version is in NGSPanel
if mpu.version_in_ngs_panel(panel.hash, panel.version):
# Moka contains the latest panel so no import is required.
# Ensure this is the only version visible to users by deactivating older versions
logger.info(f'Panel is present in NGSPanel. Setting only active version: {panel}')
mpu.activator.set_only_active(panel.hash, panel.version)
else:
# This is a new version of a panel in the NGSPanel Table.
# Insert into Moka, deactivating old panels first
logger.info(f'Updated panel. Inserting into Moka & Setting Active: {panel}')
mpu.insert_into_moka(panel, deactivate_old=True)
logger.info('Moka Panel import complete')
| 5,354,768 |
async def test_single_get(currencies: Currencies, symbol_from: str, symbol_to: str):
"""
>>> from aiocrypto_prices import currencies
>>> await currencies.ETH.prices.get('USD')
1053.28
"""
resp = await getattr(currencies, symbol_from).prices.get(symbol_to)
assert isinstance(resp, float)
| 5,354,769 |
def test_double_linked_list_pop_shifts_head_properly(dll_fixture):
"""Test pop shifts head."""
dll_fixture.push('potato')
dll_fixture.push('cabbage')
dll_fixture.pop()
assert dll_fixture.head.data == 'potato'
| 5,354,770 |
def _rankingmap_mpl(countrymasksnc, ranking, x, scenario=None, method='number', title='', label=''):
"""
countrymasksnc : nc.Dataset instance of countrymasks.nc
ranking: Ranking instance
method: "number" (default) or "value"
"""
import matplotlib.pyplot as plt
import numpy as np
if method not in ['number', 'value']:
raise ValueError('method must be "number" or "value"')
areas = ranking.areas
ds = countrymasksnc
lat, lon = ds['lat'][:], ds['lon'][:]
ni, nj = lat.size, lon.size
data = np.empty((ni, nj), dtype=int if method == 'number' else float)
mask = np.ones((ni, nj), dtype=bool) # undefined areas
for area in areas:
if 'm_'+area not in ds.variables:
print('! rankingmap::', area, 'not found in counrty masks')
continue
value = getattr(ranking, method)(area, x, scenario)
if value == 'undefined':
print('! rankingmap::value', area, 'undefined')
continue
m = ds['m_'+area][:] > 0
data[m] = value
mask[m] = False
fig, ax = plt.subplots(1,1)
h = ax.imshow(np.ma.array(data, mask=mask), extent=[-180, 180, -90, 90],
cmap=plt.cm.viridis_r if method == "number" else plt.cm.viridis,
vmax=len(areas) if method=='number' else None)
# default_title = getattr(ranking, 'plot_label_y','')+' :: ranking: '+method
if ranking.plot_type == 'indicator_vs_temperature':
details = 'warming level: {} {}'.format(x, ranking.plot_unit_x)
else:
details = 'period: {}, scenario: {}'.format(x, {'rcp26':'RCP 2.6', 'rcp45':'RCP 4.5', 'rcp60':'RCP 6', 'rcp85':'RCP 8.5'}.get(scenario, scenario))
default_title = getattr(ranking, 'plot_label_y','') + '\n' + details
default_label = 'ranking number' if method == 'number' else ('ranking value ({})'.format(getattr(ranking, 'plot_unit_y')))
ax.set_title(title or default_title)
plt.colorbar(h, ax=ax, orientation='horizontal', label=label or default_label)
return fig
| 5,354,771 |
def test_sent_entities(sent_loader):
"""Test getting Entities from a Sentinel Incident."""
responses.add(
responses.POST,
re.compile("https://management.azure.com/.*"),
json={"entities": [{"kind": "ipv4", "properties": "13.67.128.10"}]},
status=200,
)
ents = sent_loader.get_entities("0c7d4a60-46b3-45d0-a966-3b51373faef0")
assert isinstance(ents, List)
assert ents[0][0] == "ipv4"
assert ents[0][1] == "13.67.128.10"
| 5,354,772 |
def pathpatch_2d_to_3d_affine(pathpatch, mat_rot=np.array([[1,0,0],[0,1,0],[0,0,1]]), vec_trans=np.array([0,0,0])):
"""
Transforms a 2D Patch to a 3D patch using the affine tranform
of the given rotation matrix and translation vector.
The pathpatch is assumed to be on the plane Z = 0.
"""
import mpl_toolkits.mplot3d.art3d as art3d
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = art3d.PathPatch3D #Change the class
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
verts = path.vertices #Get the vertices in 2D
M = np.array([
[mat_rot[0, 0], mat_rot[0, 1], mat_rot[0, 2], vec_trans[0]],
[mat_rot[1, 0], mat_rot[1, 1], mat_rot[1, 2], vec_trans[1]],
[mat_rot[2, 0], mat_rot[2, 1], mat_rot[2, 2], vec_trans[2]],
])
pathpatch._segment3d = np.array([np.dot(M, (x, y, 0, 1)) for x, y in verts])
| 5,354,773 |
def fetch_data(
property: Property,
start_date: dt.date,
*,
end_date: Optional[dt.date] = None,
dimensions: Optional[List[Dimension]] = None,
) -> List[Dict[str, Any]]:
"""Query Google Search Console API for data.
Args:
property (Property): Property to request data for.
start_date (dt.date): Earliest day to request information for.
end_date (Optional[dt.date]): Latest day to request information for. Default to
``None``. Will be set to ``start_date`` if ``None``.
dimensions (Optional[List[Dimension]], optional): Dimensions to request from
API. Defaults to ``None``. Will be set to ``["page", "device"]`` if
``None``.
Returns:
List[Dict[str, Any]]: Response from API.
"""
if end_date is None:
end_date = start_date
if dimensions is None:
dimensions = ["page", "device"]
results = []
start_row = 0
ROW_LIMIT = 25000
while True:
request = {
"startDate": start_date.isoformat(),
"endDate": end_date.isoformat(),
"dimensions": dimensions,
"rowLimit": ROW_LIMIT,
"startRow": start_row,
"dataState": "all",
}
response = (
searchconsole_service.searchanalytics()
.query(siteUrl=property.url, body=request)
.execute()
)
start_row += ROW_LIMIT
result = response.get("rows", [])
results.extend(result)
if len(result) == 0:
break
return results
| 5,354,774 |
def fileOpenDlg(tryFilePath="",
tryFileName="",
prompt=_translate("Select file to open"),
allowed=None):
"""A simple dialogue allowing read access to the file system.
:parameters:
tryFilePath: string
default file path on which to open the dialog
tryFileName: string
default file name, as suggested file
prompt: string (default "Select file to open")
can be set to custom prompts
allowed: string (available since v1.62.01)
a string to specify file filters.
e.g. "Text files (\\*.txt) ;; Image files (\\*.bmp \\*.gif)"
See http://pyqt.sourceforge.net/Docs/PyQt4/qfiledialog.html
#getOpenFileNames
for further details
If tryFilePath or tryFileName are empty or invalid then
current path and empty names are used to start search.
If user cancels, then None is returned.
"""
ensureQtApp()
if allowed is None:
allowed = ("All files (*.*);;"
"PsychoPy Data (*.psydat);;"
"txt (*.txt *.dlm *.csv);;"
"pickled files (*.pickle *.pkl);;"
"shelved files (*.shelf)")
fdir = os.path.join(tryFilePath, tryFileName)
filesToOpen = QtWidgets.QFileDialog.getOpenFileNames(parent=None,
caption=prompt,
directory=fdir,
filter=allowed)
if type(filesToOpen) == tuple: # some versions(?) of PyQt return (files, filter)
filesToOpen = filesToOpen[0]
filesToOpen = [str(fpath) for fpath in filesToOpen
if os.path.exists(fpath)]
if len(filesToOpen) == 0:
return None
return filesToOpen
| 5,354,775 |
def inport(port_type, disconnected_value):
"""Marks this field as an inport"""
assert port_type in port_types, \
"Got %r, expected one of %s" % (port_type, port_types)
tag = "inport:%s:%s" % (port_type, disconnected_value)
return tag
| 5,354,776 |
def Align(samInHandle, fa, id, position, varId, refseq, altseq, mapq = 20):
"""
position is the left break point of the variants
And the position should be 1-base for convenience.
Because I use something like fa[id][position-1] to get bases from fa string
"""
if position < 1:
raise ValueError('[ERROR] The reference position is not 1-base: %r' % position)
if id not in fa:
raise ValueError('#[ERROR] The reference did not contain %s' % id)
rr,aa,com,diff = 0,0,0,0
for pileup in samInHandle.pileup(id, position-1, position):
pos = pileup.pos + 1 # 0-base index to 1-base index
if pos != position: continue
for read in [al for al in pileup.pileups if al.alignment.mapq >= mapq]:
refPos = read.alignment.pos - read.alignment.qstart # 0-base
# Next if the position is 2bp near the end of the reads
if position > refPos + read.alignment.rlen - 2: continue
q = Ref2QryPos(read.alignment.pos, position, read.alignment.cigar)
if q > read.alignment.rlen:
raise ValueError('#[BUG] The query position(%r) is > read length(%r)'
% (q, read.alignment.rlen))
if q == read.alignment.rlen: continue
refSeq = fa[id][position:refPos+read.alignment.rlen]
qrySeq = altseq + fa[id][position+len(refseq)-1:position+len(refseq)+read.alignment.rlen-q]
# [Debug]
# print '[POS]', id, pos, read.alignment.pos+1, '\n[QRY]', fa[id][refPos:position], qrySeq, read.alignment.qstart, q,'\n[TAR]',fa[id][refPos:position],refSeq,'\n[SEQ]', read.alignment.seq, read.alignment.cigar, read.alignment.cigarstring, read.alignment.is_secondary, '\n'
zr, _ = SumMismatchQuality(read.alignment.seq[q:], read.alignment.qual[q:], refSeq) # Reference
za, _ = SumMismatchQuality(read.alignment.seq[q:], read.alignment.qual[q:], qrySeq) # Alternate
if zr == 0 and za == 0:
com += 1 # Common perfect
elif zr == 0 and za > 0:
rr += 1 # Reference perfect
elif zr > 0 and za == 0:
aa += 1 # Alternate perfect
else:
diff += 1 # All im-perfect
#read.alignment.tags += [('ZJ', varId)] + [('ZR', zr)] + [('ZA', za)] # Not output to save the store
#samOutHandle.write(read.alignment) # Not output to save the store
return rr,aa,com,diff
| 5,354,777 |
def _empty_aggregate(*args: npt.ArrayLike, **kwargs) -> npt.ArrayLike:
"""Return unchaged array."""
return args[0]
| 5,354,778 |
def kfpartial(fun, *args, **kwargs):
""" Allows to create partial functions with arbitrary arguments/keywords """
return partial(keywords_first(fun), *args, **kwargs)
| 5,354,779 |
def test_post_an_margin_order_without_symbol():
"""Tests the API endpoint to post a new margin order without symbol"""
client = Client(key, secret)
client.new_margin_order.when.called_with(
symbol="", side="SELL", type="LIMIT", quantity=0.02
).should.throw(ParameterRequiredError)
| 5,354,780 |
def test_extract_subgraph_default_edge_weight(property_graph_instance):
"""
Ensure the default_edge_weight value is added to edges with missing
properties used for weights.
"""
pG = property_graph_instance
selection = pG.select_edges("_TYPE_=='transactions'")
G = pG.extract_subgraph(create_using=DiGraph_inst,
selection=selection,
edge_weight_property="volume",
default_edge_weight=99)
# last item is the DataFrame rows
transactions = dataset1["transactions"][-1]
(srcs, dsts, weights) = zip(*[(t[0], t[1], t[2])
for t in transactions])
# replace None with the expected value (convert to a list to replace)
weights_list = list(weights)
weights_list[weights.index(None)] = 99.
weights = tuple(weights_list)
expected_edgelist = cudf.DataFrame({"src": srcs, "dst": dsts,
"weights": weights})
expected_edgelist = expected_edgelist.sort_values(by="src",
ignore_index=True)
actual_edgelist = G.unrenumber(G.edgelist.edgelist_df, "src",
preserve_order=True)
actual_edgelist = G.unrenumber(actual_edgelist, "dst",
preserve_order=True)
actual_edgelist = actual_edgelist.sort_values(by="src",
ignore_index=True)
assert G.is_directed()
assert_frame_equal(expected_edgelist, actual_edgelist, check_like=True)
| 5,354,781 |
def power_state_update(system_id, state):
"""Report to the region about a node's power state.
:param system_id: The system ID for the node.
:param state: Typically "on", "off", or "error".
"""
client = getRegionClient()
return client(
UpdateNodePowerState,
system_id=system_id,
power_state=state)
| 5,354,782 |
def load_apigateway_locations_tx(
tx: neo4j.Transaction, locations: List[Dict],
project_id: str, gcp_update_tag: int,
) -> None:
"""
Ingest GCP Project Locations into Neo4j
:type neo4j_session: Neo4j session object
:param neo4j session: The Neo4j session object
:type locations: Dict
:param locations: A GCP Project Locations
:type project_id: str
:param project_id: Current Google Project Id
:type gcp_update_tag: timestamp
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
:rtype: NoneType
:return: Nothing
"""
ingest_project_locations = """
UNWIND {locations} as loc
MERGE (location:GCPLocation{id:loc.id})
ON CREATE SET
location.firstseen = timestamp()
SET
location.name = loc.name,
location.locationId = loc.locationId,
location.displayName = loc.displayName,
location.lastupdated = {gcp_update_tag}
WITH location
MATCH (owner:GCPProject{id:{ProjectId}})
MERGE (owner)-[r:RESOURCE]->(location)
ON CREATE SET
r.firstseen = timestamp(),
r.lastupdated = {gcp_update_tag}
"""
tx.run(
ingest_project_locations,
locations=locations,
ProjectId=project_id,
gcp_update_tag=gcp_update_tag,
)
| 5,354,783 |
def draw_piechart(question_info, explode, path):
"""Draw pie chart of each question.
:param: question_info is a list of users. eg: [12, 23, 43, 13]
means 12 people select A, 23 select B, 43 C, 13 D.
"""
labels = 'A', 'B', 'C', 'D'
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
#explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(question_info, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True)
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
#plt.show()
plt.savefig(path)
plt.clf() # note: remember plt.clf() to clear buffer
plt.close()
| 5,354,784 |
def build_post307_request(*, json: Any = None, content: Any = None, **kwargs: Any) -> HttpRequest:
"""Post redirected with 307, resulting in a 200 after redirect.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Simple boolean value true.
:paramtype json: any
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Simple boolean value true.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = True # Optional. Default value is True.
"""
content_type = kwargs.pop("content_type", None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", "/http/redirect/307")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=url, headers=header_parameters, json=json, content=content, **kwargs)
| 5,354,785 |
def rm_ssp_storage(ssp_wrap, lus, del_unused_images=True):
"""Remove some number of LogicalUnits from a SharedStoragePool.
The changes are flushed back to the REST server.
:param ssp_wrap: SSP EntryWrapper representing the SharedStoragePool to
modify.
:param lus: Iterable of LU ElementWrappers or LUEnt EntryWrappers
representing the LogicalUnits to delete.
:param del_unused_images: If True, and a removed Disk LU was the last one
linked to its backing Image LU, the backing Image
LU is also removed.
:return: The (possibly) modified SSP wrapper.
"""
if _rm_lus(ssp_wrap.logical_units, lus,
del_unused_images=del_unused_images):
# Flush changes
ssp_wrap = ssp_wrap.update()
return ssp_wrap
| 5,354,786 |
def urp_detail_view(request, pk):
"""Renders the URP detail page
"""
urp = get_object_or_404(URP, pk=pk)
ctx = {
'urp': urp,
}
# if user is logged in as a student, check if user has already applied
if request.user.is_authenticated:
if request.user.uapuser.is_student:
ctx['applied'] = Application.objects.filter(applicant=request.user, urp=urp).exists()
else:
ctx['applied'] = True
return render(request, 'post/urp_detail.html', context=ctx)
| 5,354,787 |
def squeeze_excite(input_name, squeeze_factor):
"""Returns a squeeze-excite block."""
ops = []
append = functools.partial(append_op, ops)
append(op_name="se/pool0",
op_type=OpType.AVG_POOL,
input_kwargs={"window_shape": 0},
input_names=[input_name])
append(op_name="se/dense1",
op_type=OpType.DENSE,
op_kwargs={"features": f"S:-1%{squeeze_factor}"})
append(op_name="se/swish2",
op_type=OpType.SWISH)
append(op_name="se/dense3",
op_type=OpType.DENSE,
op_kwargs={"features": f"S:-1*{squeeze_factor}"})
append(op_name="se/sigmoid4",
op_type=OpType.SIGMOID)
append(op_name="se/mul5",
op_type=OpType.MUL,
input_names=[input_name, ops[-1].name])
return ops
| 5,354,788 |
def get_code_v2(fl = r'C:\Users\bogdan\code_seurat\WholeGenome_MERFISH\Coordinates_code_1000region.csv'):
"""
Given a .csv file with header this returns 2 dictionaries: tad_to_PR,PR_to_tad
"""
lst = [(ln[:-1].split(',')[0].replace('__','_'),['R'+R for R in ln[:-1].split(',')[3].split('--')])
for ln in open(fl,'r')][1:]
tad_to_PR = dict(lst)
PR_to_tad = {Rs_to_Rnm(Rs):nm for nm,Rs in lst}
return tad_to_PR,PR_to_tad
| 5,354,789 |
def print_update_decks_help(fd):
"""Print the 'update-decks' command usage to the file descriptor."""
w = fd.write
w("Command 'update-decks':\n")
w(" Check if any decks are out-of-date (using etags)")
w(" and pull in the changes as needed.\n")
w("\n")
exe = os.path.basename(sys.argv[0])
w("Usage:\n")
w(" %s update-decks\n" % exe)
| 5,354,790 |
def run_source_lsq(vars, vs_list=vs_list):
"""
Script used to run_source and return the output file.
The function is called by AdaptiveLejaPCE.
"""
from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble
import spotpy as sp
print('Read Parameters')
parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index')
# Define objective functions
# Use annual or monthly loads
def timeseries_sum(df, temp_scale = 'annual'):
"""
Obtain the sum of timeseries of different temporal scale.
temp_scale: str, default is 'Y', monthly using 'M'
"""
assert temp_scale in ['monthly', 'annual'], 'The temporal scale given is not supported.'
if temp_scale == 'monthly':
sum_126001A = df.resample('M').sum()
else:
month_126001A = df.resample('M').sum()
sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year),
columns=df.columns)
for i in range(sum_126001A.shape[0]):
sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum()
return sum_126001A
# End timeseries_sum()
# import observation if the output.txt requires the use of obs.
date_range = pd.to_datetime(['2017/07/01', '2018/06/30'])
observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date')
observed_din.index = pd.to_datetime(observed_din.index)
observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x)
# loop over the vars and try to use parallel
parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short)
for i in range(vars.shape[1]):
parameter_df.iloc[i] = vars[:, i]
# set the time period of the results
retrieve_time = [pd.Timestamp('2017-07-01'), pd.Timestamp('2018-06-30')]
# define the modeling period and the recording variables
_, _, criteria, start_date, end_date = modeling_settings()
din = generate_observation_ensemble(vs_list,
criteria, start_date, end_date, parameter_df, retrieve_time)
# obtain the sum at a given temporal scale
# din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]])
din_126001A = timeseries_sum(din, temp_scale = 'annual')
obs_din = timeseries_sum(observed_din, temp_scale = 'annual')
din_126001A = pd.DataFrame(din_126001A,dtype='float')
obs_din = pd.DataFrame(obs_din,dtype='float')
resid = (obs_din - din_126001A).values
lsq = np.sum(resid ** 2, axis=0)
lsq = lsq.reshape(lsq.shape[0], 1)
print(f'Finish {lsq.shape[0]} run')
return lsq
| 5,354,791 |
def finnegans_wake_unicode_chars():
"""Data fixture that returns a string of all unicode characters in Finnegan's Wake."""
return '¤·àáãéìóôþŒŠŸˆ–—‘’‚“”‡…‹'
| 5,354,792 |
def get_upload(upload_key: UploadPath = Path(..., description="上传文件块位置")):
"""
获取文件上传目录
:param upload_key:
:return:
"""
root_path = posixpath.abspath(UPLOAD_PATH_DICT[upload_key])
def func(folder):
path = security.safe_join(root_path, folder)
os.makedirs(path, exist_ok=True)
return path
return func
| 5,354,793 |
def kit(): # simpler version
"""Open communication with the dev-kit once for all tests."""
return usp.Devkit()
| 5,354,794 |
def run_batch_import(jobs: Dict[Future, str], impl, ctx, db):
"""
Run a batch of import jobs using threading and process the results
"""
# Run the threads
with ThreadPoolExecutor(max_workers=config['batch_size']) as executor:
# Dictionary of {future: accession}
# Following this example:
# https://docs.python.org/3/library/concurrent.futures.html#threadpoolexecutor-example
futures = dict()
# Start all the threads
for (accession, params) in jobs:
future = executor.submit(impl.run_single_import, ctx, params)
futures[future] = accession
# Process all the results
for future in as_completed(futures):
accession = futures[future]
try:
data = future.result()
except Exception as exc:
print(f"{accession} had an error")
db_set_error(db, accession, str(exc))
continue
result = data[0]
if 'error' in result:
print(f"{accession} had an error")
db_set_error(db, accession, result['error'])
elif 'accession' in result:
print(f"{accession} successfully imported")
db_set_done(db, accession)
| 5,354,795 |
def givens_rotation(A):
"""Perform QR decomposition of matrix A using Givens rotation."""
(num_rows, num_cols) = np.shape(A)
# Initialize orthogonal matrix Q and upper triangular matrix R.
Q = np.identity(num_rows)
R = np.copy(A)
# Iterate over lower triangular matrix.
(rows, cols) = np.tril_indices(num_rows, -1, num_cols)
for (row, col) in zip(rows, cols):
# Compute Givens rotation matrix and
# zero-out lower triangular matrix entries.
if R[row, col] != 0:
(c, s) = _givens_rotation_matrix_entries(R[col, col], R[row, col])
G = np.identity(num_rows)
G[[col, row], [col, row]] = c
G[row, col] = s
G[col, row] = -s
R = np.dot(G, R)
Q = np.dot(Q, G.T)
return (Q, R)
| 5,354,796 |
def test_suggest_add_no_netixlan(entities, capsys):
"""
There isn't any netixlan between ix and network.
Network does not have automatic updates.
There isn't a local-ixf that matches the remote-ixf.
We create local-ixf[as,ip4,ip6] and email the network
but don't create a ticket or email the IX.
"""
data = setup_test_data("ixf.member.1") # asn1001
network = entities["net"]["UPDATE_DISABLED"] # asn1001
ixlan = entities["ixlan"][0]
importer = ixf.Importer()
importer.update(ixlan, data=data)
assert IXFMemberData.objects.count() == 1
assert NetworkIXLan.objects.count() == 0
log = importer.log["data"][0]
assert log["action"] == "suggest-add"
stdout = capsys.readouterr().out
assert_email_sent(
stdout, (network.asn, "195.69.147.250", "2001:7f8:1::a500:2906:1")
)
assert_no_ticket_exists()
# Test idempotent
importer.update(ixlan, data=data)
assert IXFMemberData.objects.count() == 1
assert NetworkIXLan.objects.count() == 0
assert_no_ticket_exists()
| 5,354,797 |
async def test_ssdp_discovery_confirm_abort(hass: HomeAssistantType) -> None:
"""Test we handle SSDP confirm cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP},
data={ATTR_SSDP_LOCATION: SSDP_LOCATION, ATTR_UPNP_SERIAL: UPNP_SERIAL},
)
with patch(
"tests.components.directv.test_config_flow.MockDirectvClass.get_version",
side_effect=RequestException,
) as mock_validate_input:
result = await async_configure_flow(hass, result["flow_id"], {})
assert result["type"] == RESULT_TYPE_ABORT
await hass.async_block_till_done()
assert len(mock_validate_input.mock_calls) == 1
| 5,354,798 |
def lex_from_str(
*,
in_str: Union[str, Path],
grammar: str = "standard",
ir_file: Optional[Union[str, Path]] = None,
) -> JSONDict:
"""Run grammar of choice on input string.
Parameters
----------
in_str : Union[str, Path]
The string to be parsed.
grammar : str
Grammar to be used. Defaults to "standard".
ir_file : Optional[Union[str, Path]]
File to write intermediate representation to (JSON format).
None by default, which means file is not written out.
Returns
-------
The contents of the input string as a dictionary.
Raises
------
:exc:`ParselglossyError`
"""
try:
lexer = dispatch_grammar(grammar)
except KeyError:
raise ParselglossyError(f"Grammar {grammar} not available.")
ir = parse_string_to_dict(lexer, in_str)
if ir_file is not None:
ir_file = path_resolver(ir_file)
with ir_file.open("w") as out:
json.dump(ir, out, cls=ComplexEncoder, indent=4)
return ir
| 5,354,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.